diff --git "a/5877.jsonl" "b/5877.jsonl" new file mode 100644--- /dev/null +++ "b/5877.jsonl" @@ -0,0 +1,635 @@ +{"seq_id":"79168589","text":"#!/usr/bin/env python\n\nimport numpy as np\n\n\ndef get_sepsis_score(data, model):\n feature_matrix = data\n feature_matrix[np.isnan(feature_matrix)]=-1\n\n # Use model parameters\n ESNtools = model['f']\n\n ## ESN Generation parameters\n N = model['N_def'] # Neurons\n mem = model['mem_def'] # memory\n scale = model['scale_def'] # scaling factor\n\n ## Nonlinear mapping function\n sigmoid_exponent = model['exponent_def'] # sig exponent\n func = ESNtools.sigmoid\n \n ## Mask parameters\n # M = 2*np.random.rand(np.shape(feature_matrix)[1],N)-1\n # Mb = 2*np.random.rand(1,N)-1\n M = model['M']\n Mb = model['Mb'] \n\n\n ##Weights and thresholds\n w = model['w']\n th_max = model['th_max'] \n th_min = model['th_min']\n th_scale = model['th_scale']\n\n ## Perform ESN feed\n # Apply backwards interpolation\n for f in range(feature_matrix.shape[1]):\n if np.sum(np.isnan(feature_matrix[:, f])) < len(feature_matrix[:, f]):\n ESNtools.nan_bounds(feature_matrix[:, f])\n ESNtools.nan_interpolate(feature_matrix[:, f])\n else:\n feature_matrix[:, f] = np.nan_to_num(feature_matrix[:, f], -1)\n ESN = ESNtools.feedESN(feature_matrix, N, M, Mb, scale, mem, func, sigmoid_exponent)\n\n del feature_matrix\n \n ## Compute class prediction\n\n Y_pred = (np.matmul(ESN[-1, :], w))\n scores = (Y_pred - th_min) / th_scale\n labels = np.asarray(Y_pred > th_max, dtype = np.int)\n if scores > 1.0:\n scores = 1.0\n elif scores < 0.0:\n scores = 0.0\n\n return scores, labels\n\n\ndef load_sepsis_model():\n import scipy.linalg as linalg\n \n # Random seed\n np.random.seed(seed=0)\n class ESNT:\n \"\"\"\n ESN tools module\n \"\"\"\n \n ### Map data ################################################################\n @staticmethod\n def sigmoid(x, exponent):\n \"\"\"Apply a [-0.5, 0.5] sigmoid function.\"\"\"\n \n return 1/(1+np.exp(-exponent*x))-0.5\n \n ### Feed data into Echo State Network #######################################\n @staticmethod\n def feedESN(features, neurons, mask, mask_bias, scale, mem, func, f_arg):\n \"\"\"Feeds data into a ring Echo State Network. Returns ESN state.\n Adds extra (1) neuron for Ax + b = Y linear system.\n \n Parameters\n ----------\n features : (np.array) feature matrix original data (samples,features)\n \n neurons : (int) number of neurons to use\n \n mask : (np.array) input weights mask matrix (usually randomly generated)\n \n mask_bias : (np.array) initialisation bias per neuron\n \n scale : (float) input scaling factor\n \n mem : (float) memory feedback factor\n \n func : (function) nonlinear mapping function\n \n f_arg : (float) function parameter. sigmoid exponent or slope in rect\n \"\"\"\n \n ESN = np.hstack((np.matmul(features, mask), np.ones((np.shape(features)[0],1), dtype=np.double)))\n p = np.zeros((1,neurons),dtype=np.double)\n \n for i in range(np.shape(features)[0]):\n in_val = scale * (ESN[i,:-1] + mask_bias) + p * mem\n \n ## Apply transform\n ESN[i,:-1] = func(in_val, f_arg)\n \n ## Connect preceding neighbour \n p = np.copy(np.roll(ESN[i,:-1],1))\n return ESN\n\n # Fix boundary nans (replicate head/tail vals)\n @staticmethod\n def nan_bounds(feats):\n nanidx = np.where(np.isnan(feats))[0]\n pointer_left = 0\n pointer_right = len(feats) - 1\n fix_left = pointer_left in nanidx\n fix_right = pointer_right in nanidx\n while fix_left:\n if pointer_left in nanidx:\n pointer_left += 1\n # print(\"pointer_left:\", pointer_left)\n else:\n val_left = feats[pointer_left]\n feats[:pointer_left] = val_left * np.ones((1, pointer_left), dtype=np.float)\n fix_left = False\n\n while fix_right:\n if pointer_right in nanidx:\n pointer_right -= 1\n # print(\"pointer_right:\", pointer_right)\n else:\n val_right = feats[pointer_right]\n feats[pointer_right + 1:] = val_right * np.ones((1, len(feats) - pointer_right - 1), dtype=np.float)\n fix_right = False\n\n # nan interpolation\n @staticmethod\n def nan_interpolate(feats):\n nanidx = np.where(np.isnan(feats))[0]\n nan_remain = len(nanidx)\n nanid = 0\n while nan_remain > 0:\n nanpos = nanidx[nanid]\n nanval = feats[nanpos - 1]\n nan_remain -= 1\n\n nandim = 1\n initpos = nanpos\n\n # Check whether it extends\n while nanpos + 1 in nanidx:\n nanpos += 1\n nanid += 1\n nan_remain -= 1\n nandim += 1\n # Average sides\n if np.isfinite(feats[nanpos + 1]):\n nanval = 0.5 * (nanval + feats[nanpos + 1])\n\n # Single value average\n if nandim == 1:\n nanval = 0.5 * (nanval + feats[nanpos + 1])\n feats[initpos:initpos + nandim] = nanval * np.ones((1, nandim), dtype=np.double)\n nanpos += 1\n nanid += 1\n\n esnt = ESNT()\n model = dict()\n with open('w.txt') as file:\n w = (np.loadtxt(file, skiprows=1))\n \n # Model parameters\n model['N_def'] = 100 # Neurons\n model['scale_def'] = 0.0001 # scaling\n model['mem_def'] = 1.0 # memory\n model['exponent_def'] = 1.0 # sigmoid exponent\n\n # Thresholds\n model['th_max'] = 0.1153\n model['th_min'] = -1.3363\n model['th_scale'] = 27.3891\n \n # Model functions\n model['f'] = esnt\n model['type'] = 'ESN'\n model['w'] = w\n\n # Model Mask\n model['M'] = 2*np.random.rand(40, model['N_def'])-1\n model['Mb'] = 2*np.random.rand(1, model['N_def'])-1\n\n return model\n","sub_path":"official_submissions/prepare_submit_20190824/get_sepsis_score.py","file_name":"get_sepsis_score.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"216520545","text":"import argparse\nfrom collections import namedtuple\nimport json\nimport os\nfrom PIL import Image\nimport numpy as np\n\nfrom yolo import YOLO\nfrom pycocotools.coco import COCO\n\nCOCOResultItem = namedtuple('COCOResultItem', ['image_id', 'category_id', 'bbox', 'score'])\n\nimg_dirs = {\n 'test-dev2017': 'test2017',\n 'val2017': 'val2017',\n}\n\nanno_names = {\n 'test-dev2017': 'image_info_test-dev2017.json',\n 'val2017': 'instances_val2017.json',\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--coco_dir', type=str, default='/home/cwq/data/COCO/2017')\n parser.add_argument('--dataset', type=str, default='test-dev2017', choices=['test-dev2017', 'val2017'])\n\n flags, _ = parser.parse_known_args()\n flags.img_dir = os.path.join(flags.coco_dir, img_dirs[flags.dataset])\n flags.anno_file = os.path.join(flags.coco_dir, 'annotations', anno_names[flags.dataset])\n return flags\n\n\nif __name__ == '__main__':\n \"\"\"\n Run yolo on coco test-dev, and save result file.\n \"\"\"\n flags = parse_args()\n coco = COCO(flags.anno_file)\n\n img_paths = []\n img_ids = coco.getImgIds()\n for i in img_ids:\n img_paths.append(os.path.join(flags.img_dir, '%.12d.jpg' % i))\n\n category_ids = []\n for item in coco.dataset['categories']:\n category_ids.append(item['id'])\n\n results = []\n yolo = YOLO()\n for j, img_path in enumerate(img_paths):\n img_id = img_ids[j]\n image = Image.open(img_path)\n _, boxes, scores, classes = yolo.detect_image(image, draw=False)\n\n for i in range(len(boxes)):\n top, left, bottom, right = boxes[i]\n box = [float(left), float(top), float(right - left), float(bottom - top)]\n\n category_id = category_ids[classes[i]]\n r = COCOResultItem(img_id, category_id, box, float(scores[i]))\n results.append(r._asdict())\n\n print(\"%d/%d\" % (j, len(img_paths)), end='\\r')\n\n yolo.close_session()\n\n file_path = 'coco/result/detections_{}_kyolov3_results.json'.format(flags.dataset)\n with open(file_path, 'w') as f:\n json.dump(results, f)\n","sub_path":"yolo_coco.py","file_name":"yolo_coco.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"223236841","text":"from __future__ import print_function\nimport sys\nfrom gunpowder import *\nfrom gunpowder.tensorflow import *\nfrom lsd.gp import AddLocalShapeDescriptor\nimport os\nimport math\nimport json\nimport tensorflow as tf\nimport numpy as np\nfrom micron import read_train_config\n\n\ndef train_until(max_iteration,\n training_container,\n raw_dset,\n gt_dset):\n\n \"\"\"\n max_iteration [int]: Number of training iterations\n\n data_dir [string]: Training data base directory\n\n samples [list of strings]: hdf5 files holding the training data. Each \n file is expected to have a dataset called\n *raw* holding the raw image data and \n a dataset called *tracing* holding the microtubule\n tracings.\n \"\"\"\n\n if tf.train.latest_checkpoint('.'):\n trained_until = int(tf.train.latest_checkpoint('.').split('_')[-1])\n else:\n trained_until = 0\n if trained_until >= max_iteration:\n return\n\n with open('train_net.json', 'r') as f:\n config = json.load(f)\n\n soft_mask = ArrayKey('SOFT_MASK')\n pred_maxima = ArrayKey('PRED_MAXIMA')\n pred_reduced_maxima = ArrayKey('PRED_REDUCED_MAXIMA')\n\n voxel_size = Coordinate(config['voxel_size'])\n input_size = Coordinate(config['input_shape'])*voxel_size\n output_size = Coordinate(config['output_shape'])*voxel_size\n\n request = BatchRequest()\n request.add(soft_mask, input_size)\n \n data_sources = tuple(\n Hdf5Source(\n container,\n datasets = {\n soft_mask: raw_dset\n },\n array_specs = {\n soft_mask: ArraySpec(interpolatable=True)\n }\n ) +\n Normalize(soft_mask) +\n Pad(soft_mask, None) +\n RandomLocation()\n for container in training_container\n )\n\n\n train_pipeline = (\n data_sources +\n RandomProvider() +\n PreCache(\n cache_size=40,\n num_workers=10) +\n Train(\n 'train_net',\n optimizer=config['optimizer'],\n loss=config['loss'],\n inputs={\n config['soft_mask']: soft_mask,\n },\n outputs={\n config['soft_mask']: soft_mask,\n config['derivatives']: derivatives,\n config['loss_weights_lsds']: loss_weights_lsds,\n config['gt_maxima']: gt_maxima,\n config['gt_reduced_maxima']: gt_reduced_maxima,\n config['pred_maxima']: pred_maxima,\n config['pred_reduced_maxima']: pred_reduced_maxima\n },\n gradients={},\n summary=config['summary'],\n log_dir='log',\n save_every=10000) +\n IntensityScaleShift(raw, 0.5, 0.5) +\n Snapshot({\n raw: 'raw',\n tracing: 'tracing',\n gt_lsds: 'gt_lsds',\n soft_mask: 'soft_mask',\n derivatives: 'derivatives',\n loss_weights_lsds: 'loss_weights_lsds',\n gt_maxima: 'gt_maxima',\n gt_reduced_maxima: 'gt_reduced_maxima',\n pred_maxima: 'pred_maxima',\n pred_reduced_maxima: 'pred_reduced_maxima'\n },\n dataset_dtypes={\n tracing: np.uint64\n },\n every=1000,\n output_filename='batch_{iteration}.hdf',\n additional_request=snapshot_request) +\n PrintProfilingStats(every=10)\n )\n\n print(\"Starting training...\")\n with build(train_pipeline) as b:\n for i in range(max_iteration - trained_until):\n b.request_batch(request)\n print(\"Training finished\")\n\nif __name__ == \"__main__\":\n iteration = int(sys.argv[1])\n train_config = read_train_config(\"./train_config.ini\")\n train_config[\"max_iteration\"] = iteration\n\n train_until(**train_config)\n","sub_path":"grid_search/hela_2_block_2/01_train/setup_t0/train_pipeline.py","file_name":"train_pipeline.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"524972245","text":"from chainer import serializers\n\nfrom model import AttSeq2Seq\nfrom model import DataConverter\n\nclass Predict:\n def __init__(self, model_file, gpu):\n self.data_converter = DataConverter(batch_col_size=20) # データコンバーター\n model = AttSeq2Seq(input_size=200, hidden_size=200, batch_col_size=20)\n serializers.load_npz(f'./{model_file}', model)\n if gpu >= 0:\n model.to_gpu(0)\n self.model = model\n\n def __call__(self, query):\n self.model.reset()\n enc_query = self.data_converter.sentence2vectors(query, train=False)\n dec_response = self.model(enc_words=enc_query, train=False)\n response = self.data_converter.vectors2sentences(dec_response)\n print(query, \"=>\", response)\n return response","sub_path":"server/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365943629","text":"\"\"\"Grover's algorithm and helper functions.\n\nTodo:\n\n* W gate construction (or perhaps -W gate based on Mermin's book)\n* Implement _represent_ZGate in OracleGate\n\"\"\"\n\nfrom __future__ import print_function, division\n\nfrom sympy import floor, pi, sqrt, sympify, eye\nfrom sympy.core.compatibility import range\nfrom sympy.core.numbers import NegativeOne\nfrom sympy.physics.quantum.qapply import qapply\nfrom sympy.physics.quantum.qexpr import QuantumError\nfrom sympy.physics.quantum.hilbert import ComplexSpace\nfrom sympy.physics.quantum.operator import UnitaryOperator\nfrom sympy.physics.quantum.gate import Gate\nfrom sympy.physics.quantum.qubit import IntQubit, Qubit\n\n__all__ = [\n 'OracleGate',\n 'WGate',\n 'superposition_basis',\n 'grover_iteration',\n 'apply_grover',\n 'random_oracle',\n 'g_bbht_search'\n]\n\n\ndef superposition_basis(nqubits):\n \"\"\"Creates an equal superposition of the computational basis.\n\n Parameters\n ==========\n\n nqubits : int\n The number of qubits.\n\n Returns\n =======\n\n state : Qubit\n An equal superposition of the computational basis with nqubits.\n\n Examples\n ========\n\n Create an equal superposition of 2 qubits::\n\n >>> from sympy.physics.quantum.grover import superposition_basis\n >>> superposition_basis(2)\n |0>/2 + |1>/2 + |2>/2 + |3>/2\n \"\"\"\n\n amp = 1/sqrt(2**nqubits)\n return sum([amp*IntQubit(n, nqubits) for n in range(2**nqubits)])\n\n\nclass OracleGate(Gate):\n \"\"\"A black box gate.\n\n The gate marks the desired qubits of an unknown function by flipping\n the sign of the qubits. The unknown function returns true when it\n finds its desired qubits and false otherwise.\n\n Parameters\n ==========\n\n qubits : int\n Number of qubits.\n\n oracle : callable\n A callable function that returns a boolean on a computational basis.\n\n Examples\n ========\n\n Apply an Oracle gate that flips the sign of ``|2>`` on different qubits::\n\n >>> from sympy.physics.quantum.qubit import IntQubit\n >>> from sympy.physics.quantum.qapply import qapply\n >>> from sympy.physics.quantum.grover import OracleGate\n >>> f = lambda qubits: qubits == IntQubit(2)\n >>> v = OracleGate(2, f)\n >>> qapply(v*IntQubit(2))\n -|2>\n >>> qapply(v*IntQubit(3))\n |3>\n \"\"\"\n\n gate_name = u'V'\n gate_name_latex = u'V'\n\n #-------------------------------------------------------------------------\n # Initialization/creation\n #-------------------------------------------------------------------------\n\n @classmethod\n def _eval_args(cls, args):\n # TODO: args[1] is not a subclass of Basic\n if len(args) != 2:\n raise QuantumError(\n 'Insufficient/excessive arguments to Oracle. Please ' +\n 'supply the number of qubits and an unknown function.'\n )\n sub_args = (args[0],)\n sub_args = UnitaryOperator._eval_args(sub_args)\n if not sub_args[0].is_Integer:\n raise TypeError('Integer expected, got: %r' % sub_args[0])\n\n if not callable(args[1]):\n raise TypeError('Callable expected, got: %r' % args[1])\n return (sub_args[0], args[1])\n\n @classmethod\n def _eval_hilbert_space(cls, args):\n \"\"\"This returns the smallest possible Hilbert space.\"\"\"\n return ComplexSpace(2)**args[0]\n\n #-------------------------------------------------------------------------\n # Properties\n #-------------------------------------------------------------------------\n\n @property\n def search_function(self):\n \"\"\"The unknown function that helps find the sought after qubits.\"\"\"\n return self.label[1]\n\n @property\n def targets(self):\n \"\"\"A tuple of target qubits.\"\"\"\n return sympify(tuple(range(self.args[0])))\n\n #-------------------------------------------------------------------------\n # Apply\n #-------------------------------------------------------------------------\n\n def _apply_operator_Qubit(self, qubits, **options):\n \"\"\"Apply this operator to a Qubit subclass.\n\n Parameters\n ==========\n\n qubits : Qubit\n The qubit subclass to apply this operator to.\n\n Returns\n =======\n\n state : Expr\n The resulting quantum state.\n \"\"\"\n if qubits.nqubits != self.nqubits:\n raise QuantumError(\n 'OracleGate operates on %r qubits, got: %r'\n % (self.nqubits, qubits.nqubits)\n )\n # If function returns 1 on qubits\n # return the negative of the qubits (flip the sign)\n if self.search_function(qubits):\n return -qubits\n else:\n return qubits\n\n #-------------------------------------------------------------------------\n # Represent\n #-------------------------------------------------------------------------\n\n def _represent_ZGate(self, basis, **options):\n \"\"\"\n Represent the OracleGate in the computational basis.\n \"\"\"\n nbasis = 2**self.nqubits # compute it only once\n matrixOracle = eye(nbasis)\n # Flip the sign given the output of the oracle function\n for i in range(nbasis):\n if self.search_function(IntQubit(i, self.nqubits)):\n matrixOracle[i, i] = NegativeOne()\n return matrixOracle\n\n\nclass WGate(Gate):\n \"\"\"General n qubit W Gate in Grover's algorithm.\n\n The gate performs the operation ``2|phi> = (tensor product of n Hadamards)*(|0> with n qubits)``\n\n Parameters\n ==========\n\n nqubits : int\n The number of qubits to operate on\n\n \"\"\"\n\n gate_name = u'W'\n gate_name_latex = u'W'\n\n @classmethod\n def _eval_args(cls, args):\n if len(args) != 1:\n raise QuantumError(\n 'Insufficient/excessive arguments to W gate. Please ' +\n 'supply the number of qubits to operate on.'\n )\n args = UnitaryOperator._eval_args(args)\n if not args[0].is_Integer:\n raise TypeError('Integer expected, got: %r' % args[0])\n return args\n\n #-------------------------------------------------------------------------\n # Properties\n #-------------------------------------------------------------------------\n\n @property\n def targets(self):\n return sympify(tuple(reversed(range(self.args[0]))))\n\n #-------------------------------------------------------------------------\n # Apply\n #-------------------------------------------------------------------------\n\n def _apply_operator_Qubit(self, qubits, **options):\n \"\"\"\n qubits: a set of qubits (Qubit)\n Returns: quantum object (quantum expression - QExpr)\n \"\"\"\n if qubits.nqubits != self.nqubits:\n raise QuantumError(\n 'WGate operates on %r qubits, got: %r'\n % (self.nqubits, qubits.nqubits)\n )\n\n # See 'Quantum Computer Science' by David Mermin p.92 -> W|a> result\n # Return (2/(sqrt(2^n)))|phi> - |a> where |a> is the current basis\n # state and phi is the superposition of basis states (see function\n # create_computational_basis above)\n basis_states = superposition_basis(self.nqubits)\n change_to_basis = (2/sqrt(2**self.nqubits))*basis_states\n return change_to_basis - qubits\n\n\ndef grover_iteration(qstate, oracle):\n \"\"\"Applies one application of the Oracle and W Gate, WV.\n\n Parameters\n ==========\n\n qstate : Qubit\n A superposition of qubits.\n oracle : OracleGate\n The black box operator that flips the sign of the desired basis qubits.\n\n Returns\n =======\n\n Qubit : The qubits after applying the Oracle and W gate.\n\n Examples\n ========\n\n Perform one iteration of grover's algorithm to see a phase change::\n\n >>> from sympy.physics.quantum.qapply import qapply\n >>> from sympy.physics.quantum.qubit import IntQubit\n >>> from sympy.physics.quantum.grover import OracleGate\n >>> from sympy.physics.quantum.grover import superposition_basis\n >>> from sympy.physics.quantum.grover import grover_iteration\n >>> numqubits = 2\n >>> basis_states = superposition_basis(numqubits)\n >>> f = lambda qubits: qubits == IntQubit(2)\n >>> v = OracleGate(numqubits, f)\n >>> qapply(grover_iteration(basis_states, v))\n |2>\n\n \"\"\"\n wgate = WGate(oracle.nqubits)\n return wgate*oracle*qstate\n\n\ndef apply_grover(oracle, nqubits, iterations=None):\n \"\"\"Applies grover's algorithm.\n\n Parameters\n ==========\n\n oracle : callable\n The unknown callable function that returns true when applied to the\n desired qubits and false otherwise.\n\n Returns\n =======\n\n state : Expr\n The resulting state after Grover's algorithm has been iterated.\n\n Examples\n ========\n\n Apply grover's algorithm to an even superposition of 2 qubits::\n\n >>> from sympy.physics.quantum.qapply import qapply\n >>> from sympy.physics.quantum.qubit import IntQubit\n >>> from sympy.physics.quantum.grover import apply_grover\n >>> f = lambda qubits: qubits == IntQubit(2)\n >>> qapply(apply_grover(f, 2))\n |2>\n\n \"\"\"\n if nqubits <= 0:\n raise QuantumError(\n 'Grover\\'s algorithm needs nqubits > 0, received %r qubits'\n % nqubits\n )\n if iterations is None:\n iterations = floor(sqrt(2**nqubits)*(pi/4))\n\n v = OracleGate(nqubits, oracle)\n iterated = superposition_basis(nqubits)\n for iter in range(iterations):\n iterated = grover_iteration(iterated, v)\n iterated = qapply(iterated)\n\n return iterated\n\n\ndef random_oracle(nqubits, min_img=1, max_img=1, q_type='bin'):\n \"\"\"Create a random OracleGate under the given parameter\n\n Parameters\n ==========\n\n nqubits : int\n The number of qubits for OracleGate\n min_pic : int\n Minimum number of inverse images that are mapped to 1\n max_pic : int\n Maximum number of inverse images that are mapped to 1\n q_type : OracleGate\n Type of the Qubits that the oracle should be applied on.\n Can be 'bin' for binary (Qubit()) or 'int' for integer (IntQubit()).\n\n Returns\n =======\n\n OracleGate : random OracleGate under the given parameter\n\n Examples\n ========\n\n Generate random OracleGate that outputs 1 for 2-4 inputs::\n\n >>> from sympy.physics.quantum.grover import random_oracle\n >>> oracle = random_oracle(4, min_img=2, max_img=4, q_type=\"bin\")\n\n \"\"\"\n if q_type != 'bin' and q_type != 'int':\n raise QuantumError(\"q_type must be 'int' or 'bin'\")\n\n if min_img < 1 or max_img < 1:\n raise QuantumError(\"min_pic, max_pic must be > 0\")\n\n if min_img > max_img:\n raise QuantumError(\"max_pic must be >= min_pic\")\n\n if min_img >= 2 ** nqubits or max_img > 2 ** nqubits:\n raise QuantumError(\"min_pic must be < 2**nqubits and max_pic must be <= 2**nqubits\")\n\n import random\n pics = random.randint(min_img, max_img)\n integers = random.sample(range(2 ** nqubits), pics)\n if q_type == \"int\":\n items = [IntQubit(i) for i in integers]\n else:\n items = [Qubit(IntQubit(i)) for i in integers]\n\n return OracleGate(nqubits, lambda qubits: qubits in items)\n\n\ndef g_bbht_search(qstate, oracle):\n \"\"\"G-BBHT-Search\n\n G-BBHT-Search is an algorithm based on Grover's algorithm,\n but designed for an unknown oracle function that returns 1\n for an unknown number of qubit states. It is based on the\n paper: Boyer, M., Brassard, G., Hoyer, P., Tapp, A. (1998).\n Tight bounds on quantum searching: Progress of Physics,\n 46(4-5), 493-505.\n\n Parameters\n ==========\n\n qstate : Qubit\n State the G-BBHT-Search should be applied on\n oracle : OracleGate\n The black box operator that flips the sign of the desired basis qubits.\n\n Returns\n =======\n\n (Qubit, int) : (single Qubit that fulfills oracle, number of grover iterations applied)\n\n Examples\n ========\n\n G-BBHT-Search for an orcale that returns 1 for an unkown number of statesbetween 5 - 15::\n\n >>> from sympy.physics.quantum.grover import random_oracle, superposition_basis, g_bbht_search\n >>> basis_states = superposition_basis(4)\n >>> oracle = random_oracle(4, min_img=5, max_img=15, q_type=\"bin\")\n >>> x = g_bbht_search(basis_states, oracle)[0]\n >>> oracle.search_function(x)\n True\n\n \"\"\"\n import random\n from sympy.physics.quantum.qubit import measure_all_oneshot\n\n max_iterations = 1\n factor_iterations = 6.0 / 5.0\n count_grover_iterations = 0\n\n while True:\n for _ in range(1 if max_iterations == 1 else random.randint(0, int(max_iterations))):\n count_grover_iterations += 1\n qstate = qapply(grover_iteration(qstate, oracle))\n\n measure = measure_all_oneshot(qstate)\n max_iterations *= factor_iterations\n if oracle.search_function(measure) is True:\n return measure, count_grover_iterations\n","sub_path":"sympy/physics/quantum/grover.py","file_name":"grover.py","file_ext":"py","file_size_in_byte":13222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"338492372","text":"\"\"\"Copyright 2021 Google LLC.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\n\nYou may obtain a copy of the License at\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport importlib\nimport json\nimport os\nimport re\nfrom inspect import isclass\nfrom typing import List, Union\n\nfrom pyangbind.lib import yangtypes\nfrom pyangbind.lib.base import PybindBase\nfrom pyangbind.lib.serialise import pybindJSONDecoder\n\nfrom oc_config_validate import models, target\n\n\nclass Error(Exception):\n \"\"\"Base Exception raised by this module.\"\"\"\n\n\ndef decodeJson(json_text: Union[str, bytes], obj: PybindBase):\n \"\"\"Decode a JSON text into a PybindBase object.\n\n This method is used to validate the JSON text adheres to the OC schema.\n\n Args:\n json_text: The JSON-IETF text to decode.\n obj: The PybindBase object to decode the texto into.\n\n Raises:\n Error if unable to parse the JSON text.\n \"\"\"\n json_text = removeOpenConfigPrefix(json_text)\n pybindJSONDecoder.load_ietf_json(json.loads(json_text), None, None, obj)\n\n\ndef removeOpenConfigPrefix(json_text: Union[str, bytes]) -> Union[str, bytes]:\n \"\"\"Remove open-config prefixed so JSON text can be processed by PyBind.\n\n When JSON-IETF is used for a gNMI response, type references will\n prepend the corresponding model name(ie. openconfig-aaa:RADIUS) when\n referencing a server of type RADIUS in a leaf. These must be removed\n before being processed by PyBind.\n\n Args:\n json_text: The JSON-IETF text to correct.\n\n Returns:\n The string that can be fed directly to pybindJSONDecoder.\n \"\"\"\n # https://regex101.com/r/xiZj4Q/1\n if isinstance(json_text, bytes):\n return re.sub(b'(openconfig(-[a-z]+)+\\:)', b'',\n json_text) # noqa\n return re.sub(r'(openconfig(-[a-z]+)+\\:)', '', json_text)\n\n\ndef ocContainerFromPath(model: str, xpath: str) -> PybindBase:\n \"\"\"Create an empty PybindBase instance of the model for the path.\n\n This method look for the model class in the oc_config_validate.models\n package\n\n Args:\n model: the OC model class name in the oc_config_validate.models\n package, as `module.class`.\n xpath: the xpath to the OC container to create.\n\n Returns:\n An PybindBase object of the class.\n\n Raises:\n Error if unable to find the Python class or if the class is not derived\n from PybindBase.\n target.XpathError if the xpath is invalid.\n AttributeError if unable to find an xpath element in the OC class.\n \"\"\"\n\n parts = model.split('.')\n if len(parts) != 2:\n raise Error(\"%s is not module.class\" % model)\n model_mod = importlib.import_module(\n \"oc_config_validate.models.\" + parts[0])\n model_cls = getattr(model_mod, parts[1])\n if not isclass(model_cls):\n raise Error(\n \"%s is not a class in oc_config_validate.models package\" % model)\n\n gnmi_xpath = target.parsePath(xpath)\n\n model_inst = model_cls()\n for e in gnmi_xpath.elem:\n model_inst = getattr(model_inst, yangtypes.safe_name(e.name))\n if e.key:\n save_key = {}\n for k, v in e.key.items():\n save_key[yangtypes.safe_name(k)] = v\n model_inst = model_inst.add(**save_key)\n if not issubclass(model_inst.__class__, PybindBase):\n raise Error(\"%s:%s is not a valid container class\" % (model, xpath))\n return model_inst\n\n\ndef fixSubifIndex(json_value: dict):\n \"\"\"Rewrite the index of a pybindJSON-produced subinterface as int.\n\n pybindJSON dumps the index as a str value, instead of int.\n\n https://github.com/robshakir/pyangbind/issues/139\n\n \"\"\"\n index = json_value['openconfig-interfaces:subinterfaces'][\n 'subinterface'][0]['index']\n json_value['openconfig-interfaces:subinterfaces']['subinterface'][0][\n 'index'] = int(index)\n\n\ndef getOcModelsVersions() -> List[str]:\n \"\"\"Returns a list of the OC models versions used.\n\n Returns an empty list if unable to read the models/versions file.\n \"\"\"\n versions_file = os.path.join(models.__path__[0], \"versions\")\n if os.path.isfile(versions_file):\n with open(versions_file) as f:\n return [line.strip() for line in f]\n return []\n","sub_path":"oc_config_validate/oc_config_validate/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"506403665","text":"from Utils.MapObjectBase import MapObjectBase, TiledObjectItem\nfrom Utils.UserEvents import *\nimport Utils.DirHelper\nimport pygame\nimport os\n\nclass PlayerProps():\n Loop = \"Loop\"\n Start = \"Start\"\n\nclass MusicPlayer(MapObjectBase):\n \"\"\"Implements the music player as map object.\"\"\"\n SongDictionary = {}\n\n def __init__(self):\n super().__init__()\n pygame.mixer.init()\n pygame.mixer.music.set_endevent(EVENT_MUSIC_ENDED)\n self._songs = []\n self._loop = None\n self._stop = False\n self._pause = False\n self._currentSongIndex = None\n\n def configure(self, configuration):\n assert isinstance(configuration, TiledObjectItem), \"Expected configuration of type TiledObjectItem.\"\n self.configureProperties(configuration.properties)\n return super().configure(configuration)\n\n def initializeObject(self, parent):\n parent.musicPlayer = self\n super().initializeObject(parent)\n if self._start:\n self.play()\n\n def configureProperties(self, properties):\n \"\"\"Configure the properties from TMX properties.\"\"\"\n trueStringList = ['True', 'true', '1', 1]\n songlist = []\n\n for prop in properties:\n if prop == PlayerProps.Loop:\n self._loop = properties[prop] in trueStringList\n elif prop == PlayerProps.Start:\n self._start = properties[prop] in trueStringList\n else:\n song = {\"Name\" : prop, \"FileName\" : properties[prop]}\n self._songs.append(song)\n \n\n def play(self, index = 0):\n \"\"\"Plays the song index.\"\"\"\n if index != None:\n if len(self._songs) > 0:\n musicFile = Utils.DirHelper.getSongResourceFile(self._songs[index][\"FileName\"])\n if os.path.isfile(musicFile):\n pygame.mixer.music.load(musicFile)\n print(\"Playing song: \", self._songs[index][\"FileName\"])\n self._currentSongIndex = 0\n pygame.mixer.music.play()\n\n pass\n\n def playNextSong(self):\n if self._songs and len(self._songs) > 0 and self._stop == False and self._currentSongIndex != None:\n if self._currentSongIndex + 1 < len(self._songs):\n self._currentSongIndex += 1\n self.play(self._currentSongIndex)\n else:\n if self._loop:\n self._currentSongIndex = 0\n self.play(self._currentSongIndex)\n\n def stop(self):\n self._stop = True\n pygame.mixer.music.stop()\n\n def togglePause(self):\n if self._pause:\n self._pause = False\n pygame.mixer.music.pause()\n else:\n self._pause = True\n pygame.mixer.music.unpause()\n pass\n\n \n\n\n","sub_path":"SimpleGame/SimpleGame/Src/MapObjects/MusicPlayer.py","file_name":"MusicPlayer.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"422437308","text":"# exec(open(\"gui.py\").read(), globals())\nimport sys\nfrom foldercount import numfile\n\nimport gui\n\nexec(open('gatherimages.py').read())\n\ntrain_q = gui.QApplication([])\ntrain_app = gui.App('train')\n# Continue training while there are images left in /images\nwhile numfile('./training_data/like') + numfile('./training_data/dislike') < numfile('./images/'):\n\ttrain_app.photoChange('./images/')\ntrain_q.quit()\n\npredict_q = gui.QApplication([])\npredict_app = gui.App('predict')\n\n# sys.argv = ['gui.py','train']\n# exec(open('gui.py').read())\n\n# sys.argv = ['gui.py','predict']\n# exec(open('gui.py').read())","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"567076704","text":"import os\nfrom src.request_handler.state.command_handler import CommandHandler\n\n\nclass CharacterRequestHandler(CommandHandler):\n def __init__(self):\n super().__init__()\n\n def handle_request(self, command, world_state):\n response = super().handle_request(command, world_state)\n try:\n characters = self.get_characters(command)\n except:\n return response.set_response_success(False)\n\n response.args['characters'] = characters\n return response.set_response_success(True)\n\n def get_characters(self, command):\n characters = []\n for root, _, files in os.walk(f'./{self.ACCOUNTS_DIRECTORY}/{command.ident}/characters/'):\n for file_data in files:\n if file_data.endswith('.character'):\n with open(root + file_data, 'r') as data_file:\n characters.append(data_file.read())\n return characters\n","sub_path":"server/src/request_handler/state/character_request_handler.py","file_name":"character_request_handler.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"149681170","text":"import threading\nimport time\n\ndef run(n):\n print(\"task\",n)\n time.sleep(1)\n\n# for x in range(3):\n# t=threading.Thread(target=run,args=(5,))\n# t.start()\n\n#当主线程休眠的时间过长时,其他线程可能已经结束 所以当前活跃线程数不确定\n# time.sleep(0.5)\n# print(threading.activeCount())\nprint(\"-\"*50)\n#将子线程设为守护线程\nfor x in range(3):\n t=threading.Thread(target=run,args=(3,))\n t.setDaemon(True)\n t.start()\ntime.sleep(0.5)\nprint(threading.activeCount())","sub_path":"python - 核心编程/第二章 高级主题/第十八章 多线程编程/统计当前活跃的线程数.py","file_name":"统计当前活跃的线程数.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"424107058","text":"from collections import OrderedDict\nimport numpy as np\nfrom network.layer import *\nfrom utility.gradient import *\n\nclass MultiLayerNet:\n \"\"\"\n (class) MultiLayerNet\n ---------------------\n - The multi layer neural network\n\n Parameter\n ---------\n - num_input_node : number of input node\n - num_hidden_node_list : number of hidden node list\n - num_output_node : number of output node\n - activation : activation function type (default = relu)\n - relu, sigmoid\n - weight_init_std : weight sigma value (default = he)\n - he, xavier\n - weight_decay_lambda : weight decay panelty parameter (default = 0)\n - use_dropout : dropout flag (default = False)\n - dropout_ratio : dropout probability (default = 0.5)\n - use_batchnorm : batch normalization flag (default = False)\n \"\"\"\n # Object initializer\n def __init__(self, num_input_node, num_hidden_node_list, num_output_node, \n activation='relu', weight_init_std='he', weight_decay_lambda=0,\n use_dropout=False, dropout_ratio=0.5, use_batchnorm=False):\n # Initialize parameters\n self.num_input_node = num_input_node # input node information\n self.num_hidden_node_list = num_hidden_node_list # hidden node information\n self.num_hidden_layer = len(num_hidden_node_list) # number of hidden layers\n self.num_output_node = num_output_node # output node information\n self.weight_decay_lambda = weight_decay_lambda # weight decay lambda parameter\n self.use_dropout = use_dropout # use flag for dropout\n self.dropout_ratio = dropout_ratio # dropout probability\n self.use_batchnorm = use_batchnorm # use flag for batch normalization\n\n # Initialize network parameters \n self.__init_weight(weight_init_std)\n\n # Create network architecture\n net_layer_var = {'sigmoid': Sigmoid, 'relu': Relu} # supported network variables\n self.__create_network(net_layer_var, activation)\n\n # Initialize weights\n def __init_weight(self, weight_init_std):\n net_architecture = [self.num_input_node] + self.num_hidden_node_list + [self.num_output_node]\n self.params = {} # network parameters\n for idx in range(1, len(net_architecture)):\n scale = weight_init_std\n if str(weight_init_std).lower() in ('relu', 'he'):\n scale = np.sqrt(2.0 / net_architecture[idx - 1]) # for ReLU\n elif str(weight_init_std).lower() in ('sigmoid', 'xavier'):\n scale = np.sqrt(1.0 / net_architecture[idx - 1]) # for sigmoid\n self.params['W' + str(idx)] = scale * np.random.randn(net_architecture[idx - 1], net_architecture[idx])\n self.params['b' + str(idx)] = np.zeros(net_architecture[idx])\n\n # Create a network\n def __create_network(self, net_layer_var, activation):\n self.layers = OrderedDict()\n for idx in range(1, self.num_hidden_layer + 1):\n self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])\n if self.use_batchnorm:\n self.params['gamma' + str(idx)] = np.ones(self.num_hidden_node_list[idx - 1])\n self.params['beta' + str(idx)] = np.zeros(self.num_hidden_node_list[idx - 1])\n self.layers['BatchNorm' + str(idx)] = BatchNorm(self.params['gamma' + str(idx)], self.params['beta' + str(idx)])\n self.layers['Activation_function' + str(idx)] = net_layer_var[activation]()\n if self.use_dropout:\n self.layers['Dropout' + str(idx)] = Dropout(self.dropout_ratio)\n idx = self.num_hidden_layer + 1\n self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])\n self.lastLayer = SoftmaxWithLoss()\n\n # Predict a response\n def predict(self, x, train_flag=False):\n for key, layer in self.layers.items():\n if \"Dropout\" in key or \"BatchNorm\" in key:\n x = layer.forward(x, train_flag)\n else:\n x = layer.forward(x)\n\n return x\n\n # Calculate a loss value\n def loss(self, x, t, train_flag=False):\n y = self.predict(x, train_flag)\n panelty = 0\n for idx in range(1, self.num_hidden_layer + 2):\n W = self.params['W' + str(idx)]\n panelty += 0.5 * self.weight_decay_lambda * np.sum(W**2)\n\n return self.lastLayer.forward(y, t) + panelty\n\n # Calculate an accuracy\n def accuracy(self, x, t):\n y = self.predict(x, train_flag=False)\n y = np.argmax(y, axis=1)\n if t.ndim != 1:\n t = np.argmax(t, axis=1)\n accuracy = np.sum(y == t) / float(x.shape[0])\n \n return accuracy\n\n # Calculate numerical gradients\n def numerical_gradient(self, x, t):\n # Do forward computations\n loss_W = lambda W: self.loss(x, t, train_flag=True)\n\n # Calculate gradients\n grads = {}\n for idx in range(1, self.num_hidden_layer + 2):\n grads['W' + str(idx)] = numerical_gradient(loss_W, self.params['W' + str(idx)])\n grads['b' + str(idx)] = numerical_gradient(loss_W, self.params['b' + str(idx)])\n if self.use_batchnorm and idx != self.num_hidden_layer + 1:\n grads['gamma' + str(idx)] = numerical_gradient(loss_W, self.params['gamma' + str(idx)])\n grads['beta' + str(idx)] = numerical_gradient(loss_W, self.params['beta' + str(idx)])\n\n return grads\n\n # Calculate gradients using backpropagations\n def backprop_gradient(self, x, t):\n # Do forward computations\n self.loss(x, t, train_flag=True)\n\n # Do backward computations\n dout = 1\n dout = self.lastLayer.backward(dout) \n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n # Save the gradients\n grads = {}\n for idx in range(1, self.num_hidden_layer + 2):\n grads['W' + str(idx)] = self.layers['Affine' + str(idx)].dW + self.weight_decay_lambda * self.layers['Affine' + str(idx)].W\n grads['b' + str(idx)] = self.layers['Affine' + str(idx)].db\n if self.use_batchnorm and idx != self.num_hidden_layer + 1:\n grads['gamma' + str(idx)] = self.layers['BatchNorm' + str(idx)].dgamma\n grads['beta' + str(idx)] = self.layers['BatchNorm' + str(idx)].dbeta\n\n return grads","sub_path":"network/multi_layer_net.py","file_name":"multi_layer_net.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"286002388","text":"\"\"\"07.12.2016\r\nMert Unsal\r\nPython 3.5 de calistirilmasi gerekmektedir.\r\n2016-2017 Tubitak Projesi icin sigma notasyonlarinin sonuclarinin dogrulugunu\r\nkontrol eden python kodu\"\"\"\r\n#Asagidaki fonksiyon r'nin herhangi bir degeri icin (1-r)+(1-r)*r^2+(1-r)*r^4..\r\n#seklinde uzayan sigma notasyonunu hesapliyor.\r\ndef sigma2():\r\n payda=int(input(\"Oranin paydasini giriniz: \"))\r\n toplam=0\r\n for sayi in range(1,10000):\r\n if (1-(-1)**sayi)==0:\r\n toplam+=0\r\n else:\r\n toplam+=((1-(1/payda))*((1/payda)**(sayi-1))*((1-(-1)**sayi)/2))\r\n#sonsuza kadar gidiyorsa uzun uzun basamaklari yazdirmamasi icin gereken kod\r\n if len(str(toplam))>16: \r\n print(str(toplam)[0:16])\r\n else:\r\n print(toplam)\r\nwhile True:\r\n sigma2()\r\n \r\n \r\n","sub_path":"KODLAR/sigma2.py","file_name":"sigma2.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"502557938","text":"from bottle import route, run, request\nfrom random import randint\n\ndef htmlify(text,title):\n page = \"\"\"\n \n \n \n \n \n \"\"\" + title + \"\"\"\n \n \n \n \"\"\" + text + \"\"\"\n \n \n\n \"\"\"\n return page\n\nTHENUMBER = randint(0,20)\n\ndef home_page():\n html = \"\"\"

Choose a number:

\"\"\"\n for number in range(0,21):\n html = html + \"\"\"\n
%d\"\"\" % (number,number)\n return htmlify(html,\"20 questions\")\n\ndef guess_page():\n guess = request.GET[\"number\"]\n global THENUMBER\n guessi = int(guess)\n if guessi == THENUMBER:\n return htmlify(\"You guessed CORRECT! Have chocolate.\",\"OK\")\n elif guessi > THENUMBER:\n return htmlify(\"You guessed high.\",\"high\")\n else:\n return htmlify(\"You guessed low.\",\"low\")\n\nroute('/','GET',home_page)\nroute('/guess','GET',guess_page)\nrun(debug=True)\n\n\n\n\n\n\n\n\n","sub_path":"BIL103E - Intr. to Inf. Syst.&Comp. Eng./Course Files/OtherExamples/Intro to Bottle/20questions0.py","file_name":"20questions0.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"549003287","text":"import pytest\r\nimport time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom _pytest.mark import expression\r\nimport unittest \r\n\r\n\r\n\r\n\r\ndef applyFiltersOnHomePage():\r\n driver = webdriver.Chrome('./webDrivers/chromedriver')\r\n url = \"https://www.airbnb.com/\"\r\n # url1 = \"https://www.airbnb.com/s/Rome--Italy/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&checkin=2021-01-16&checkout=2021-01-23&adults=2&children=1&source=structured_search_input_header&search_type=unknown&place_id=ChIJw0rXGxGKJRMRAIE4sppPCQM&map_toggle=true\"\r\n driver.get(url)\r\n driver.maximize_window()\r\n driver.implicitly_wait(10)\r\n # Now entering location Rome, Italy on Home page\r\n ele_EnterLocation = driver.find_element_by_xpath(\"//*[@id='bigsearch-query-detached-query']\") # Rome, Italy Location element \r\n ele_EnterLocation.send_keys(\"Rome, Italy\")\r\n # element = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[3]/div[4]/section/div/div/div[1]/div/div/div/div[2]/div[2]/div/div[2]/div/table/tbody/tr[3]/td[7]/div/div/div\")\r\n # element.send_keys(\"Rome, Italy\")\r\n # time.sleep(1)\r\n # Now Clicking on the In date on home page\r\n ele_EnterDateField = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[3]/div[1]/div[1]\") # CheckIn field element\r\n ele_EnterDateField.click()\r\n time.sleep(1)\r\n # Now entering Check In date on home page\r\n ele_EnterCheckInDate = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[3]/div[4]/section/div/div/div[1]/div/div/div/div[2]/div[2]/div/div[2]/div/table/tbody/tr[3]/td[7]/div/div/div\") # CheckIn date element\r\n ele_EnterCheckInDate.click()\r\n time.sleep(1)\r\n # Now entering CheckOut date on home page\r\n ele_EnterCheckOutDate = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[3]/div[4]/section/div/div/div[1]/div/div/div/div[2]/div[2]/div/div[2]/div/table/tbody/tr[4]/td[7]/div/div/div\") # Check out date element\r\n ele_EnterCheckOutDate.click()\r\n time.sleep(1)\r\n # Now clicking on the guest field of home page\r\n ele_GuestField = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[5]/div[1]/div\") # Guest Field Element\r\n ele_GuestField.click()\r\n time.sleep(1)\r\n\r\n # Now adding adults as guests on home page\r\n ele_AdultGuest = driver.find_element_by_xpath(\"//*[@id='stepper-adults']/button[2]\") # Adult guests alement on home page\r\n ele_AdultGuest.click()\r\n # time.sleep(1)\r\n ele_AdultGuest.click()\r\n time.sleep(1)\r\n # Now entering child Guest on home page\r\n ele_ChildGuest = driver.find_element_by_xpath(\"//*[@id='stepper-children']/button[2]\") # Child guest element on home page\r\n ele_ChildGuest.click()\r\n # time.sleep(2)\r\n\r\n # Now clicking on the element Search Button\r\n ele_SearchField = driver.find_element_by_xpath(\"//button[@class='_1mzhry13' and @type='button']\") # Button search element on home page\r\n ele_SearchField.click()\r\n time.sleep(1)\r\n return driver\r\n\r\n\r\ndef test_verifyIsNumberofGuestsAccomodate(): \r\n driver = applyFiltersOnHomePage()\r\n time.sleep(10)\r\n ele_guests = driver.find_elements_by_xpath(\"//div[@class='_kqh46o' and @style='margin-top: 9px;']\")\r\n print(\"Length of total cards is :: --->> \" , len(ele_guests))\r\n\r\n guests = []\r\n i=0\r\n for x in ele_guests:\r\n t = x.text\r\n parts = t.split(\"guests\")\r\n temp = parts[0]\r\n guests.append(temp)\r\n i = i + 1\r\n\r\n for y in guests:\r\n assert int(y) >= 3 , \"Property Does Not Accomudate the Number of Guests\"\r\n print(y)\r\n\r\n \r\n\r\ndef test_VerifyAppliedFiltersAreCorrect():\r\n\r\n driver = applyFiltersOnHomePage()\r\n time.sleep(10)\r\n\r\n location = \"Rome, Italy\"\r\n checkin = \"Jan 16\"\r\n checkout = \"Jan 23\"\r\n guests = 3\r\n\r\n \r\n\r\n ################################### Getting Filters from Container by Opening code Starts ####################################################################################################################################### \r\n\r\n # Clicking on the top Header filter to open and get filters details\r\n ele_DetailContainer = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[1]\") #clicking on filter of Detailed Container element\r\n ele_DetailContainer.click()\r\n time.sleep(2) \r\n\r\n \r\n # Now getting Location by Opening Container\r\n ele_DetailContainer_Location = driver.find_element_by_xpath(\"//*[@id='bigsearch-query-detached-query']\") # Detailed Container location element after opening\r\n ele_DetailContainer_Location_Text = ele_DetailContainer_Location.get_attribute(\"value\")\r\n print(\"by opening ---> \", ele_DetailContainer_Location_Text)\r\n # time.sleep(2) \r\n\r\n # Now getting CheckIn Date by Opening Container\r\n ele_DetailContainer_Date_Checkin = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[3]/div[1]/div[1]/div/div[2]\") # Detailed Container location Date CheckIn by Opening\r\n ele_DetailContainer_Date_Checkin_Text = ele_DetailContainer_Date_Checkin.text\r\n print(\"by opening ---> \",ele_DetailContainer_Date_Checkin_Text)\r\n # time.sleep(2) \r\n\r\n # Now getting CheckOut Date by Opening Container\r\n ele_DetailContainer_Date_Checkout = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[3]/div[3]/div[1]/div/div[2]\") # Detailed Container location Date Checkout by Opening\r\n ele_DetailContainer_Date_Checkout_Text = ele_DetailContainer_Date_Checkout.text\r\n print(\"by opening ---> \",ele_DetailContainer_Date_Checkout_Text)\r\n time.sleep(5) \r\n # Now getting Guests by Opening Container\r\n saasa = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[5]/div[1]/div/div[1]\")\r\n saasa.click()\r\n time.sleep(2)\r\n ele_GuestsAdultsElement = driver.find_element_by_xpath(\"//div[@class='_1665lvv']//span[@data-testid='stepper-adults-value']\")\r\n ele_GuestsAdultsElementText = ele_GuestsAdultsElement.text\r\n print(\"By Opening Adults are: --->\", ele_GuestsAdultsElementText)\r\n\r\n ele_GuestsChildernElement = driver.find_element_by_xpath(\"//div[@class='_1665lvv']//span[@data-testid='stepper-children-value']\")\r\n ele_GuestsChildernElementText = ele_GuestsChildernElement.text\r\n print(\"By Opening Childerns are: --->\" ,ele_GuestsChildernElementText)\r\n\r\n ele_DetailContainer_Guest_Text = int(ele_GuestsAdultsElementText) + int(ele_GuestsChildernElementText) \r\n ##################\r\n # ele_DetailContainer_Guest = driver.find_element_by_xpath(\"/html/body/div[4]/div/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div/div/div[5]/div[1]/div/div[2]\") # Detailed Container Guests element\r\n # ele_DetailContainer_Guest_Text = ele_DetailContainer_Guest.text\r\n # print(\"by opening ---> \",ele_DetailContainer_Guest_Text)\r\n time.sleep(1)\r\n ####################################### Getting Filters from Container by Opening code Ends ############################################################################################################################\r\n\r\n\r\n ########### Applying assert commands //////////\r\n assert ele_DetailContainer_Location_Text == location , \"Failed Due to not matched\"\r\n assert ele_DetailContainer_Date_Checkin_Text == checkin , \"Failed Due to not matched\"\r\n assert ele_DetailContainer_Date_Checkout_Text == checkout , \"Failed Due to not matched\"\r\n assert ele_DetailContainer_Guest_Text == guests , \"Failed Due to not matched\"\r\n\r\n\r\n\r\ndef test_VerifyDetailedPageMatchesExtraFilters ():\r\n driver = applyFiltersOnHomePage()\r\n\r\n # Now clicking on option More Filters to open the more filters pop-up\r\n ele_MoreFilters = driver.find_element_by_xpath(\"//*[@id='menuItemButton-dynamicMoreFilters']/button\") # More filters pop-op opening element\r\n ele_MoreFilters.click()\r\n time.sleep(1)\r\n\r\n # Now Locating Element to add number of beds\r\n ele_Bedrooms = driver.find_element_by_xpath(\"//*[@id='filterItem-rooms_and_beds-stepper-min_bedrooms-0']/button[2]\") # Bedroom element on morefilter pop-up\r\n \r\n ele_Bedrooms.click()\r\n time.sleep(0.5)\r\n ele_Bedrooms.click()\r\n time.sleep(0.5)\r\n ele_Bedrooms.click()\r\n time.sleep(0.5)\r\n ele_Bedrooms.click()\r\n time.sleep(0.5)\r\n ele_Bedrooms.click()\r\n time.sleep(2)\r\n\r\n # Now findind element Pool \r\n ele_Pool = driver.find_element_by_xpath(\"//*[@id='filterItem-facilities-checkbox-amenities-7']\") # pool element on more filters pop-up\r\n #Scrooling to pool item \r\n ele_Pool.location_once_scrolled_into_view\r\n # ele_PoolValue = ele_Pool.get_attribute(\"value\")\r\n # print(ele_PoolValue)\r\n ele_Pool.click()\r\n time.sleep(2)\r\n\r\n # Finding button Show Stays on more filters pop-up\r\n ele_ShowStays = driver.find_element_by_xpath(\"//button[@data-testid='more-filters-modal-submit-button']\") # Button show stays button Element \r\n ele_ShowStays.click()\r\n time.sleep(10)\r\n\r\n ################### ---------------------------------------------------------\r\n ### assert list accumodate the number of bedrooms\r\n ################### ---------------------------------------------------------\r\n\r\n ############### Starts --> Opening detail of first property and verify pool is available on new page ###############################################\r\n # window_before = driver.window_handles[0]\r\n\r\n # Opening detail of first property on new page\r\n ele_link = driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]/a\")\r\n time.sleep(2)\r\n ele_link.click()\r\n window_after = driver.window_handles[1]\r\n time.sleep(2)\r\n # Navigates the driver to the new opened page\r\n driver.switch_to_window(window_after)\r\n time.sleep(2)\r\n # print(ele_link.get_attribute(\"href\"))\r\n print(driver.current_url)\r\n time.sleep(2)\r\n\r\n # find element \"Show all Aminites\"\r\n ele_AllAminites = driver.find_element_by_xpath(\"//*[@id='site-content']/div/div/div[3]/div[1]/div/div[5]/div/div[2]/div[3]/a\") # button Show all aminites\"\"\r\n ele_AllAminites.click()\r\n time.sleep(2)\r\n\r\n # Opeing The pop-up Amenites\r\n ele_PoolUnderFacilities = driver.find_element_by_xpath(\"/html/body/div[11]/section/div/div/div[3]/div/div/section/div[2]/div[11]/div[4]/div\")\r\n # Getting text Pool on the Aminites pop-up\r\n ele_PoolUnderFacilitiesText = ele_PoolUnderFacilities.text\r\n print(\" ---asasasasas----> \",ele_PoolUnderFacilitiesText)\r\n\r\n assert ele_PoolUnderFacilitiesText == \"Pool\" , \"Pool is not available under the Facilites Heading\"\r\n\r\n time.sleep(10)\r\n\r\n ############### Ends --> Opening detail of first property and verify pool is available on new page ###############################################\r\n\r\n\r\n\r\n\r\ndef test_VerifyPropertiesCanAccommodateNumberOfGuests ():\r\n driver = applyFiltersOnHomePage()\r\n time.sleep(8)\r\n \r\n ele_bedrooms = driver.find_elements_by_xpath(\"//div[@class='_kqh46o' and @style='margin-top: 9px;']\")\r\n print(\"Length of total cards is :: --->> \" , len(ele_bedrooms))\r\n\r\n rooms = []\r\n i=0\r\n for x in ele_bedrooms:\r\n t = x.text\r\n parts = t.split(\" \")\r\n temp = parts[3]\r\n print(temp)\r\n # temp1 = parts[4]\r\n # print(temp1)\r\n rooms.append(temp)\r\n i = i + 1\r\n\r\n for y in rooms:\r\n assert int(y) >= 5 , \"Property Does Not Accomudate the Number of Bedrooms\"\r\n print(y)\r\n\r\n\r\ndef test_VerifyPropertyDisplayedOnMapCorrectly ():\r\n driver = applyFiltersOnHomePage()\r\n time.sleep(5)\r\n # ele_to_hover_over =driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]\")\r\n # hover = ActionChains(driver).move_to_element(ele_to_hover_over)\r\n # hover.perform()\r\n\r\n ########################## Getting data to compare with the property on the map ############################\r\n \r\n\r\n ele_PropertyName = driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]/div[2]/div[1]/div/div[1]/div\")\r\n propertyNameText = ele_PropertyName.text\r\n print(propertyNameText)\r\n\r\n ele_PropertyLocation = driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]/div[2]/div[1]/div/div[2]\")\r\n propertyLocationText = ele_PropertyLocation.text\r\n print(propertyLocationText)\r\n\r\n # ele_PropertyRating = driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]/div[2]/div[5]/div[1]/span/span[2]\")\r\n # propertyRatingText = ele_PropertyRating.text\r\n # print(propertyRatingText)\r\n\r\n # ele_PropertyReviews = driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]/div[2]/div[5]/div[1]/span/span[3]\")\r\n # propertyReviewsText = ele_PropertyReviews.text\r\n # print(propertyReviewsText)\r\n\r\n # ele_PropertyPrice = driver.find_element_by_xpath(\"//*[@id='ExploreLayoutController']/div/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/div[2]/div[2]/div[5]/div[2]/div/div[1]/span/span\")\r\n # propertyPriceText = ele_PropertyPrice.text\r\n # print(propertyPriceText)\r\n ############################################################################################################ \r\n print(\"\\n ------------ \\n Reached End of Page \\n ---------------\\n\")\r\n\r\n action = ActionChains(driver)\r\n\r\n ele_to_mouse_hover = driver.find_elements_by_xpath(\"//div[@class='_1nz9l7j']\")\r\n print(len(ele_to_mouse_hover))\r\n action.move_to_element(ele_to_mouse_hover[0]).perform()\r\n # time.sleep(10)\r\n\r\n time.sleep(15)\r\n ele_on_map = driver.find_element_by_xpath(\"//div[@style='align-items: center; background-color: rgb(34, 34, 34); border-radius: 28px; box-shadow: rgba(0, 0, 0, 0.24) 0px 0px 0px 1px inset, rgba(0, 0, 0, 0.18) 0px 1px 2px; color: rgb(255, 255, 255); display: flex; height: 28px; justify-content: center; padding: 0px 8px; position: relative; white-space: nowrap;']\")\r\n # time.sleep(2)\r\n ele_on_map.click()\r\n\r\n # WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".reply-button\"))).click()\r\n\r\n # hello = driver.find_element_by_xpath(\"//div[@class='_1q6k59c']//span[@class='_11ry7lz' and @aria-hidden='true']\").text\r\n # ele_hello_text = ele_hello.text\r\n # print (hello)\r\n # print(len(ele_hello))\r\n\r\n mapPropertyNameText= driver.find_elements_by_xpath(\"//div[@class='_v3gzda1']//ol[@class='_194e2vt2']//li\")\r\n print(mapPropertyNameText[0].text)\r\n \r\n mapPropertyLocationText = driver.find_element_by_xpath(\"//div[@class='_1q6k59c']//div[@class='_1isz8pdq']\").text\r\n print(mapPropertyLocationText)\r\n\r\n # ppxpxx = driver.find_element_by_xpath(\"//div[@class='_1q6k59c']//span[@class='_11ry7lz']']\").text\r\n # print(ppxpxx)\r\n\r\n print(\"\\n ------------ \\n Reached End of Page \\n ---------------\\n\")\r\n time.sleep(5)\r\n\r\n\r\n assert propertyNameText == mapPropertyNameText, \"Location Doesnot matches on MAP\"\r\n\r\n assert propertyLocationText == mapPropertyLocationText, \"Location Doesnot matches on MAP\"","sub_path":"asim.py","file_name":"asim.py","file_ext":"py","file_size_in_byte":16189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"648236582","text":"import libreria\n\ndef paciente():\n nombre=libreria.pedir_nombre(\"ingrese el nombre del paciente que desea ver :\")\n habitacion=libreria.pedir_numero(\"ingrese en numero de habitacion en donde se encuentra:\",0,40)\n contenido= (nombre +\"-\"+ \"-\" + str(habitacion) + \"\\n\")\n libreria.guardar_datos(\"enfermeria.txt\", contenido,\"a\")\n print(\"datos guardados con exito\")\n\ndef mostrar_datos_guardados():\n datos=libreria.obtener_datos(\"enfermeria.txt\")\n if(datos!= \"\"):\n print(datos)\n else:\n print(\"archivo sin datos\")\n\n\nopc=0\nmax=4\n\nwhile (opc != max):\n print(\"###########MENU##########\")\n print(\"#1.paciente #\")\n print(\"#2.numero de habitacion\")\n print(\"#3.mostrar datos guardados\")\n print(\"#4.salir #\")\n print(\"##########################\")\n\n opc=libreria.pedir_numero(\"Elija la opcion de la cual requiere la informacion:\",1,4)\n if (opc==1):\n paciente()\n if (opc==2):\n numero_de_habitacion()\n if (opc==3):\n mostrar_datos_guardados()\n\n\n#fin_menu\nprint(\"fin de las aperaciones realisadas\")\n\n","sub_path":"app4.py","file_name":"app4.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"126032853","text":"# -*- coding: utf-8 -*-\nimport math\n\nfrom openerp.osv import fields, osv\n\nclass barcode(osv.osv):\n _name = \"wf_barcode.barcode\"\n _columns = {\n\t'wf_barcode': fields.char('Barcode', size=64),\n\t'wf_product_tmpl_id': fields.many2one('product.template','Product Template'),\n }\n _sql_constraints = [('wf_barcode_uniq', 'unique(wf_barcode)', 'The reference must be unique !')]\t\n\nbarcode()\n\nclass product_template(osv.osv):\n _inherit = \"product.template\"\n _columns = {\n 'wf_barcodes_id': fields.one2many('wf_barcode.barcode', 'wf_product_tmpl_id', 'Barcode'),\n 'state': fields.selection([('',''),\n ('draft', 'In Development'),\n ('sellable','Normal'),\n ('end','End of Lifecycle'),\n ('obsolete','Obsolete'),\n ('filiale','Store article')], 'state', help=\"Tells the user if he can use the product or not.\"),\n 'wf_standard_price': fields.float('Costs 2'),\n 'wf_sup_extra_costs': fields.float('Default Extra Costs'),\n 'sale_ok': fields.boolean('Can be Sold', help=\"Specify if the product can be selected in a sales order line.\", track_visibility='onchange'),\n }\n _defaults = {\n 'type': 'product',\n 'cost_method': 'average',\n }\nproduct_template()\n","sub_path":"wf_product/wf_barcode.py","file_name":"wf_barcode.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"216163365","text":"\"\"\"\n情報をFirebaseに格納する\n情報:借り入れ社数、借り入れ総額、毎月返済額、ユーザID、各step\n弁護士登録\n弁護士への自動送信\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport time\nimport requests\nfrom flask import Flask, request\nfrom datetime import datetime\nfrom Flow import Flow\nfrom Get_started import Getstarted\n\napp = Flask(__name__)\n\nACCESS_TOKEN = os.environ[\"ACCESS_TOKEN\"]\nVERIFY_TOKEN = os.environ[\"VERIFY_TOKEN\"]\n\nflow = Flow()\n\ngetstarted = Getstarted()\ngetstarted.send_get_started(ACCESS_TOKEN)\n\n\n@app.route('/', methods=['GET'])\ndef verify():\n if request.args.get(\"hub.mode\") == \"subscribe\" and request.args.get(\"hub.challenge\"):\n if not request.args.get(\"hub.verify_token\") == VERIFY_TOKEN:\n return \"Verification token mismatch\", 403\n return request.args[\"hub.challenge\"], 200\n\n return \"Hello world\", 200\n\n\n@app.route('/', methods=['POST'])\ndef webhook():\n\n data = request.get_json()\n print('***** post data *****')\n print(data)\n\n if data[\"object\"] == \"page\":\n\n for entry in data[\"entry\"]:\n for messaging_event in entry[\"messaging\"]:\n\n if messaging_event.get(\"message\"):\n\n sender_id = messaging_event[\"sender\"][\"id\"]\n\n if messaging_event[\"message\"].get(\"text\"):\n message_text = messaging_event[\"message\"][\"text\"]\n\n send_typing_on(sender_id)\n\n flow.execute_method(sender_id, message_text, ACCESS_TOKEN)\n\n else:\n flow.execute_method(sender_id, \"error\", ACCESS_TOKEN)\n\n if messaging_event.get(\"delivery\"):\n pass\n\n if messaging_event.get(\"optin\"):\n pass\n\n if messaging_event.get(\"postback\"):\n\n sender_id = messaging_event[\"sender\"][\"id\"]\n message_text = messaging_event[\"postback\"][\"title\"]\n return_id = messaging_event[\"postback\"][\"payload\"]\n\n send_typing_on(sender_id)\n\n flow.execute_method(sender_id, message_text, ACCESS_TOKEN)\n\n return \"ok\", 200\n\n\ndef send_typing_on(recipient_id):\n params = {\n \"access_token\": ACCESS_TOKEN\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n data = json.dumps({\n \"recipient\": {\n \"id\": recipient_id\n },\n \"sender_action\": \"typing_on\"\n })\n\n requests.post(\"https://graph.facebook.com/v2.6/me/messages\", params=params, headers=headers, data=data)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n port = int(os.getenv(\"PORT\", 5000))\n app.run(host=\"0.0.0.0\", port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"213249714","text":"from frequency_analysis import frequency_analysis\nfrom most_common import most_common_letters\nfrom input_for_key import input_for_key\nfrom tkinter import filedialog, END, INSERT, messagebox\nfrom output import output\nimport os\n\n\ndef encoding_function(entry, initial_mes, key_txt):\n key = entry.get()\n ciphered_mes = str()\n for it in range(len(initial_mes)):\n ciphered_mes += chr(ord(initial_mes[it]) + ord(key[it % len(key)]))\n\n key_txt.delete(1.0, END)\n key_txt.insert(INSERT, key)\n\n return ciphered_mes\n\n\ndef vigenere_encoding(initial_mes, windows, screen_width, screen_height, output_txt, key_txt):\n input_for_key(initial_mes, windows, screen_width, screen_height, output_txt, encoding_function, key_txt)\n\n\ndef vigenere_decoding(ciphered_mes, output_txt):\n key = str()\n path_to_key = filedialog.askopenfilename()\n if os.path.exists(path_to_key):\n with open(path_to_key, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n key += line\n\n decrypt_mes = str()\n for it in range(len(ciphered_mes)):\n decrypt_mes += chr(ord(ciphered_mes[it]) - ord(key[it % len(key)]))\n\n output(output_txt, decrypt_mes)\n\n\ndef vigenere_decoding_with_frequency_analysis(ciphered_mes, var_eng, var_rus, output_txt):\n most_common = most_common_letters(var_rus, var_eng)\n\n for dekey_len in range(2, 21):\n table = [str()] * dekey_len\n for it in range(len(ciphered_mes)):\n table[it % dekey_len] += ciphered_mes[it]\n\n dekey = [None] * dekey_len\n for letter in range(dekey_len):\n dekey = frequency_analysis(table[letter], dekey, letter, most_common)\n\n decrypt_mes = str()\n for i in range(len(ciphered_mes)):\n if dekey[i % dekey_len] is not None:\n decrypt_mes += chr(ord(ciphered_mes[i]) + dekey[i % dekey_len])\n else:\n break\n\n if len(decrypt_mes) == len(ciphered_mes):\n output(output_txt, decrypt_mes[:min(100, len(decrypt_mes))])\n\n cont = messagebox.askquestion('Continuous Application', 'Do you want to continue?', icon='warning')\n if cont == \"no\":\n output(output_txt, decrypt_mes)\n return\n","sub_path":"GUI Cipher/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"298066559","text":"import seamless\nfrom seamless import context, cell, reactor, transformer\nfrom seamless.lib.gui.gl import glprogram\nimport numpy as np\n\nctx = context()\n\n# Shaders\nvertex_code = \"\"\"\n void main()\n {\n gl_Position = vec4(1.0, 1.0, 1.0, 1.0);\n } \"\"\"\n\nfragment_code = \"\"\"\n void main()\n {\n gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);;\n } \"\"\"\n\nctx.vertex_shader = cell((\"text\", \"code\", \"vertexshader\")).set(vertex_code)\nctx.fragment_shader = cell((\"text\", \"code\", \"fragmentshader\")).set(fragment_code)\n\n# Program\nprogram = {\n \"arrays\": [],\n \"uniforms\": {},\n \"render\": {\n \"command\": \"points\",\n \"glstate\": {},\n \"attributes\": {},\n },\n}\n\nctx.pre_program = cell(\"json\").set(program)\nctx.gen_program = transformer({\"program\": {\"pin\": \"input\", \"dtype\": \"json\"},\n \"result\": {\"pin\": \"output\", \"dtype\": \"json\"}})\nctx.pre_program.connect(ctx.gen_program.program)\nctx.gen_program.code.cell().set(\"return program\")\nctx.program = ctx.gen_program.result.cell()\nctx.equilibrate()\n\np = ctx.glprogram = glprogram(ctx.program)\np.uniforms.cell().set({})\nctx.vertex_shader.connect(p.vertex_shader)\nctx.fragment_shader.connect(p.fragment_shader)\n\np.painted.cell().connect(p.update) #if this connection is broken, no more crash!\n\n\"\"\"\nBUG:\nctx.program.touch() will always re-create the window\nctx.pre_program.touch() will once-in-a-while result in a Qt crash\nThis is much more likely if ctx.program.touch() has not occurred yet,\n and if the GL window has not been killed beforehand\n\nPARTIAL SOLUTION: let Qt flush its event loop whenever work is done\nThis solves the issue for the current program.\nBut this does not solve the same issue in fireworks.py...\nRemove run_qt() in seamless/init.py:run_work to reproduce the bug\n\nFULL SOLUTION: in addition, forbid QOpenGLWidget to call self.update()\n more than once from within self.paintGL (._painting attribute to check this)\nSee cell-glwindow.py in lib/gui/gl.\nThis prevents all crashes, but it must be combined with the partial\nsolution above, else the window will freeze.\n\nAs of now, no more issues.\n\nUPDATE: .rendered was renamed .painted and moved to glwindow from Renderer\n This apparently eliminates the need for either solution, at least for this case\n\"\"\"\n","sub_path":"OLD/tests/test-gl-BUG.py","file_name":"test-gl-BUG.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"314655859","text":"import math\r\n\r\ndef find_location(x1,y1,r1, x2,y2,r2) :\r\n d = math.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n\r\n #중점이 같을 경우\r\n if x1 == x2 and y1 == y2 :\r\n if r1 == r2 :\r\n return -1\r\n else :\r\n return 0\r\n\r\n #한 점에서 만나는 경우\r\n if (r1 + r2) == d or abs(r2 - r1) == d:\r\n return 1\r\n\r\n #만나지 않는 경우\r\n if r1 + r2 < d or abs(r2 - r1) > d:\r\n return 0\r\n\r\n #그 외의 모든 경우는 두 점에서 만나게 \r\n return 2\r\n \r\nnum = int(input())\r\n\r\nfor idx in range(num) :\r\n a,b,c,d,e,f = map(int, input().split())\r\n print(find_location(a,b,c,d,e,f))\r\n","sub_path":"Algorithm/python/b1002.py","file_name":"b1002.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"343885071","text":"import pytest\nfrom tfjfbvDHT.dhti import *\n\nIPPORTFILENAME = '/path/to/IPPORT.txt'\n\n@pytest.fixture(scope='function')\ndef new_dhti():\n cfg = DHTInterface(ipport_file=IPPORTFILENAME)\n yield cfg\n cfg.close_connection()\n\ndef test_owns(new_dhti):\n cfg = new_dhti\n with open(IPPORTFILENAME,'r') as f:\n line = f.readline().strip()\n realIP = line.split(':')[0]\n realPort = int(line.split(':')[1])\n owns_key = realIP + ':' + str(realPort)\n \n own = cfg.owns(owns_key)\n\n assert own == owns_key\n\ndef test_exists(new_dhti):\n cfg = new_dhti\n\n key_for_insert = 'iamtestkey'\n value_for_insert = 'testingtesting'\n\n ins = cfg.insert(key_for_insert,value_for_insert)\n\n exi = cfg.exists(key_for_insert)\n assert exi == 'T'\n\ndef test_fail_exists(new_dhti):\n cfg = new_dhti\n\n key_that_doesnt_exist = 'Isuredonotexistinthesystem'\n\n exi = cfg.exists(key_that_doesnt_exist)\n assert exi == 'F'\n\ndef test_insert(new_dhti):\n cfg = new_dhti\n\n key_for_insert = 'iamtestkeyasdf;lkj'\n value_for_insert = 'testingtesting'\n\n ins = cfg.insert(key_for_insert,value_for_insert)\n\n exi = cfg.exists(key_for_insert)\n assert ins == 'T'\n assert exi == 'T'\n\ndef test_remove(new_dhti):\n cfg = new_dhti\n\n key_for_insert = 'iamtestkey9428ewfijo'\n value_for_insert = 'testingtesting'\n\n ins = cfg.insert(key_for_insert,value_for_insert)\n\n exi = cfg.exists(key_for_insert)\n assert ins == 'T'\n assert exi == 'T'\n\n rem = cfg.remove(key_for_insert)\n\n exi = cfg.exists(key_for_insert)\n assert exi == 'F'\n assert rem == 'T'\n\ndef test_fail_remove(new_dhti):\n cfg = new_dhti\n\n key_that_doesnt_exist = 'Isuredonotexistinthesystem'\n\n rem = cfg.remove(key_that_doesnt_exist)\n assert rem == 'F'\n\ndef test_get(new_dhti):\n cfg = new_dhti\n\n key_for_insert = 'iamtestkey'\n value_for_insert = 'testingtesting'\n\n ins = cfg.insert(key_for_insert,value_for_insert)\n\n exi = cfg.exists(key_for_insert)\n assert ins == 'T'\n assert exi == 'T'\n\n get = cfg.get(key_for_insert)\n\n assert get[0] == 'T'\n assert get[1].decode() == value_for_insert\n\ndef test_fail_get(new_dhti):\n cfg = new_dhti\n\n key_for_get_dne = 'iamtestgetkeythatdoesntexistatall'\n\n get = cfg.get(key_for_get_dne)\n\n assert get[0] == 'F'\n","sub_path":"tests/test_dhti.py","file_name":"test_dhti.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"336741634","text":"#this module will emulate a unix like approach for browsing through a file repository\n#NOTE: cmd line functionality is very rudimentary\n#3/23/2014 MAJOR REVISIONS. The smeta database has been abandoned in favor of a SQL database. Browsing the file tree will be done entirely in memory, and a music file cache will keep disk reads to a minimum.\n#9/21/2014 MAJOR REVISIONS to MAJOR REVISIONS. The SQL database is abandoned in favor of mongo. why? bcus its easier to access with javascript and java.\n\n\nimport ctypes # this library wraps lower level c++ functions that allow us to interact with windows.\nfrom threading import Timer #callback functionality\nimport urllib2\nimport re\nimport os\nimport msvcrt\nimport sys\nimport subprocess\nimport time\nimport datetime\nimport operator #our sorter\nimport codecs #for saving funny characters\nfrom xml.etree import ElementTree as etree\n\n#===============================VARS===============================================\n\n#cpp libraries that allow us to query the system for proc names\nEnumWindows = ctypes.windll.user32.EnumWindows\nEnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))\nGetWindowText = ctypes.windll.user32.GetWindowTextW\nGetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW\nIsWindowVisible = ctypes.windll.user32.IsWindowVisible\n\n#cmd line formating\nbackspaces = 150 #used when backspace is hit or to clear lines for input\nlsColumns = 4 #ls command displays files more than one file per line\ndirCellWidth = 31 #the cell space for displaying multiple folders with the ls command. this number determines how they all line up and look uniform\n\n#selecting songs to be played(queues)\nsongsBeingPlayed = list() #song objects that should mirror winamps current playlist\nm3u = list() #empty at start up the list of songs to send to winamp\nsongsOnDisplay = list() #the songs the user is currently looking over\n\n#monitoring playback habits\nwinampTaskTitleRegExFormat = '\\d*.*- Winamp' #this is the pattern that appears in windows task manager under col \"Task\" this is very sensitive to change!!!\nmusicMonitorDaemon = None #Proc of the monitoring daemon that checks for music selection changes\nmonitorMusicWait = 7 #in seconds, controls how often winampac checks for music selection changes\ncurrentSongIndex = -1\nwinampDaemon = None\n\nmonitorWeatherWait = 3600 #in seconds, poll every hour, then fetch xml file\nweatherDaemon = None\nweatherXml = ''\nweatherXmlUrl = 'http://w1.weather.gov/xml/current_obs/KDAA.xml' #hard coded for ft. belvoir. TODO: make more robust by using ip geolocation services.\nweatherXmlTag_temp = 'temp_f'\nweatherXmlTag_pressure = 'pressure_mb'\nweatherXmlTag_condition = 'weather'\n\nkeepAlive = True\n\n#caching song files on a more reliable hd\n#timeStampFormat = '%Y-%m-%d %H:%M:%S' #deprecated\nfileCodec = 'utf8'\nfileTreePath = 'filetree.xml' #the xml mapping file paths in a tree structure\ncopyCacherPath = 'java -cp .;mongo-java-driver-2.12.3.jar HitOrMissCopyCacher'\ncachePath = 'C:\\\\MusicCache\\\\'\nplaylistPath = cachePath + 'playlist.m3u'\n\nxipathPaddingWidth = 3 #i.e. 002 010 003 001 011 111 233 ...\ncopyCacherCommand = copyCacherPath\n\n\n#============================ENTRY / EXIT============================================\n\ndef init():\n global fileTree #The root Element obj of the xml tree\n global fileNode #used to explore the xml tree\n global xipath #a stack to keep track of where we are\n\n try:\n myparser = etree.XMLParser(encoding='ISO-8859-1')\n\n xmlTree = etree.parse(fileTreePath, parser=myparser)\n \n fileTree = xmlTree.getroot()\n fileNode = fileTree\n xipath = list()\n\n except:\n printf('could not parse filetree. songs cannot be played!')\n stop()\n\n launchMongo()\n #create code that executes regurlarly checking the OS task mgr list for changes in a specific pattern\n #monitorMusic()\n #monitorWeather()\n\ndef launchMongo():\n sendToSystem('mongod --quiet --dbpath ' + cachePath)\n\n\ndef test():\n pass\n\ndef stop():\n global keepAlive\n keepAlive = False\n #weatherDaemon.cancel()\n #winampDaemon.cancel()\n exit(code=1)\n #musicMonitorDaemon.terminate()\n \n \n\n#=========================OUTPUT====================================================\ndef printf(line):\n line = line.encode(encoding='UTF-8',errors='ignore')\n sys.stdout.write(line)\n\ndef showSongs(songs):\n index = 0\n for song in songs:\n printf(padNumber(index, xipathPaddingWidth) + '***' + stripFileExtension(song.getName()) + '\\n')\n index = index + 1\n \n#============================SQL METHODS===========================================\ndef connectToDB():\n data = mysql.connect(\"localhost\", \"opo\", \"s33msiam\", \"audiodio\")\n return data\n\ndef wipeDataBase():\n db = connectToDB()\n data = db.cursor()\n data.execute(\"DELETE FROM song;\")\n #data.execute(\"DELETE FROM playback;\") #never delete this Table\n db.commit()\n db.close()\n\ndef insertSong(xipath, name, album, artist, genre):\n db = connectToDB()\n data = db.cursor()\n line = \"(\\\"\" + xipath + \"\\\", \\\"\" + name + \"\\\", \\\"\" + album + \"\\\", \\\"\" + artist + \"\\\", \\\"\" + genre +\"\\\")\"\n line = line.encode(encoding='UTF-8',errors='ignore')\n #print(line)#TESTING!!!\n data.execute(\"INSERT INTO song VALUES\" + line)\n db.commit()\n db.close()\n\ndef insertPlayback(xipath):\n global weatherXml\n db = connectToDB()\n data = db.cursor()\n #check for xml file\n\n try:\n head = xmlTree.fromstring(weatherXml)\n\n node = head.find(weatherXmlTag_temp)\n temp = node.text\n\n node = head.find(weatherXmlTag_pressure)\n pressure = node.text\n\n node = head.find(weatherXmlTag_condition)\n condit = node.text\n\n except:\n temp = 'N/A'\n pressure = 'N/A'\n condit = 'N/A'\n print('weather could not be monitored')\n \n line = \"(\\\"\" + xipath + \"\\\", \\\"\" + pressure + \"\\\", \\\"\" + temp + \"\\\", \\\"\" + condit + \"\\\")\"\n\n data.execute(\"INSERT INTO playback (xipath, weather_pressure, weather_temperature, weather_condition) VALUES\" + line)\n db.commit()\n db.close()\n\n\ndef findXipathBy(artist, song):\n db = connectToDB()\n data = db.cursor()\n data.execute('SELECT xipath FROM song WHERE name LIKE \\'%' + song + '%\\' AND artist LIKE \\'%' + artist + '%\\'')\n xipath = data.fetchone()\n \n db.commit()\n db.close()\n \n \n if(not xipath == None):\n return xipath[0]\n else:\n return ''\n\n\ndef conditionStringForSQL(line):\n line = line.encode(encoding='UTF-8',errors='ignore')\n line = line.replace('\\'', '\\\\\\'')\n return line\n \n#============================SHELL METHODS===========================================\ndef clrline(): #adhoc solution to clear the visible line\n global backspaces\n #printf(' ')#messes up tab complete\n printf('\\b' * backspaces)\n \n\ndef prompt(): #TODO: figure out the nonsense of stdout stdin\n printf('\\n))')#,end='')\n \ndef shell():\n line = '' #we build up the line with input char by char\n global m3u\n tabtries = 0 #this will keep track of the attempts to tab complete\n tabcompleted = ''\n prompt()\n while(1):\n bit = str(msvcrt.getwche()) #<=========INPUT using Vc++ runtime\n #asciichar = chr(ord(bit)) #useful when using getch()\n \n if(bit == '\\r'):\n if(tabtries > 0):\n execute(tabcompleted)\n else:\n execute(line)\n line = ''\n tabtries = 0\n tabcompleted = ''\n prompt()\n elif(bit == '\\t'):\n tabcompleted = tabcomplete(line, tabtries)\n tabtries += 1\n clrline()\n prompt()\n printf(tabcompleted)\n elif(bit == '\\b'): #TODO: remove the last visible char and the last recorded char\n clrline()\n line = ''\n prompt()\n tabtries = 0\n tabcompleted = ''\n\n elif(not bit == '\\xff'):\n line += bit #build up the command to be interpreted\n tabcompleted += bit\n\ndef execute(cmd):\n global songsOnDisplay\n global songsBeingPlayed\n \n line = cmd\n words = cmd.split() #nothing like the actual yak and lex of a real shell\n\n if(len(words) == 0):\n return\n cmd = words[0]\n #args = list2str(words[1:]) #<--------- args is a string!!!!!\n args = \" \".join(words[1:])\n \n if('ls' in words[0]):\n ls(args)\n elif(words[0] == 'cd' and len(args) > 0):\n changeDir(args)\n elif('history' in cmd):\n pass #TODO: query the database\n elif('reset' in cmd):\n pass#explore()#TODO: ask are you sure?!?!\n elif('play' == cmd):\n if(len(args) == 0):\n play(songsOnDisplay)\n else:\n temp = list()\n for index in getIndices(len(songsOnDisplay),args):\n temp.append(songsOnDisplay[index])\n \n play(temp)\n \n elif('cl' == cmd):\n wipeSongList()\n elif('addon' == cmd):\n if(len(args) == 0):\n #winamps list is going to be expanded\n addon(songsOnDisplay)\n else:\n temp = list()\n for index in getIndices(len(songsOnDisplay),args):\n temp.append(songsOnDisplay[index])\n \n addon(temp)\n\n elif('cwd' in cmd):\n cwd()\n elif('test' in cmd):\n test()\n elif('goto' in cmd):\n takeMeThere(args)\n elif('find' in cmd):\n findSong(args)\n elif('quit' == cmd or 'exit' in cmd):\n stop()\n\n#emulate pythons indice selection format : 3:4 etc\ndef getIndices(size, args):\n indices = args.split(':')\n\n if(len(indices) > 1):\n start = indices[0]\n stop = indices[1]\n if(len(stop) == 0): #5: i.e\n stop = size\n if(len(start) == 0): #:5 i.e\n start = 0\n \n return range(int(start), int(stop))\n else: \n temp = list()\n temp.append(int(args))\n return temp\n\ndef tabcomplete(line, tries):\n if(len(line) == 0):\n return ''\n #TODO split line and find last word\n matches = list() #empty list\n words = line.split()\n numwords = len(words)\n dirfiles = dirContents()\n numfiles = len(dirfiles)\n\n completeMe = words[numwords - 1] # the word to comp \n for f in range(numfiles): #find best possible matches for tab complete\n if(completeMe.lower() in dirfiles[f].lower()):\n matches.append(dirfiles[f])\n \n if(len(matches) == 0):\n return line #no matches\n else:\n completeMe = matches[tries % len(matches) - 1] #emulate multiple possible matches action\n words[numwords - 1] = completeMe\n #line = list2str(words)\n line = \" \".join(words)\n return line\n \n\n#======================================Browse Directory Tree methods========================================= \ndef ls(args):\n global songsOnDisplay\n global directoryWidth\n global xipath\n \n dividerSize = 1\n divider = '|'\n cap = '|\\n'\n line = ''\n word = ''\n c = 1\n \n temp = None\n name = ''\n node = None\n #show all directories first\n for index in range(len(fileNode)):\n node = fileNode[index]\n if('dir' in node.tag):\n word = node.get('name')\n blanks = dirCellWidth - len(word) - dividerSize\n if(blanks < 0):\n word = word[:(dirCellWidth - dividerSize)]\n else:\n for x in range(blanks): #add whitespace to the word to provide uniformity in output\n word = word + ' '\n \n line = line + divider + word\n if((c % lsColumns) == 0):\n printf(line + cap)\n line = ''\n\n c = c + 1\n \n printf(line + cap)\n #show current rank\n for x in range(dirCellWidth * lsColumns + 1):\n printf('^')\n printf('\\n')\n cwd()\n printf('\\n')\n #show all song files\n songsOnDisplay = list()\n for index in range(len(fileNode)):\n temp = list(xipath)\n temp.append(index)\n if('file' in fileNode[index].tag): #only add if a file\n name = fileNode[index].get('name')\n songsOnDisplay.append(song(name, xipathToString(temp) + getFileExtension(name)))\n\n\n showSongs(songsOnDisplay) \n\n\n\ndef cwd():\n global xipath\n #TODO: working forwards, follow the xipath down the tree to create the cwd string\n cwd = ''\n node = fileTree\n if(len(xipath) == 0):\n printf(fileNode.get('name'))\n else:\n for index in xipath:\n node = node[index] #INCREment\n cwd += node.get('name') + '\\\\'\n \n printf(cwd)\n \ndef getNameFromPath(path):\n node = fileTree\n name = ''\n for index in path:\n node = node[index]\n name = node.get('name')\n\n return name\n\ndef takeMeThere(artist): #TODO:\n pass\n\ndef findSong(song):\n global songsOnDisplay\n songsOnDisplay = list()\n crumbs = list()\n DFS(crumbs, fileTree, song)\n showSongs(songsOnDisplay)\n \ndef explore():\n crumbs = list()\n exploreInsert(crumbs, fileTree)\n\n \ndef exploreInsert(breadCrumbs, node):\n nBreadCrumbs = None\n if('dir' in node.tag):\n for index in range(len(node)): #explore subdirectory\n nBreadCrumbs = list(breadCrumbs)\n nBreadCrumbs.append(index)\n exploreInsert(nBreadCrumbs, node[index])\n elif('file' in node.tag):\n xmlPath = list(breadCrumbs)\n xmlPath.pop()\n album = getNameFromPath(xmlPath)\n xmlPath.pop()\n artist = getNameFromPath(xmlPath)\n xmlPath.pop()\n genre = getNameFromPath(xmlPath)\n name = node.get('name')\n insertSong(xipathToString(breadCrumbs), stripFileExtension(name), album, artist, genre)\n \n#stack, xmlElement, string\ndef DFS(breadCrumbs, node, key):\n global songsOnDisplay\n nBreadCrumbs = None\n if('dir' in node.tag):\n for index in range(len(node)): #explore subdirectory\n nBreadCrumbs = list(breadCrumbs)\n nBreadCrumbs.append(index)\n DFS(nBreadCrumbs, node[index], key)\n else:\n name = node.get('name')\n if(key in node.get('name').lower()):\n songsOnDisplay.append(song(stripFileExtension(name), xipathToString(breadCrumbs) + getFileExtension(name)))\n #return\n \ndef getNode(path):\n node = fileTree\n for index in path:\n node = node[index]\n\n return node\n\ndef changeDir(line):\n global fileNode\n global xipath\n \n node = fileNode\n if('..' in line):\n if(len(xipath) > 0):\n xipath.pop()\n fileNode = getNode(xipath)\n else: #find the dir in the children of fileNode\n lsCount = range(len(fileNode))\n for index in lsCount:\n node = fileNode[index]\n if(line.lower() in node.get('name').lower()):\n if('dir' in node.tag):\n xipath.append(index)\n fileNode = fileNode[index]\n \n else:\n printf('that was not a dir....derp')\n \n break #short circuit\n\n \n\n#========================PLAYING SONGS============================================ \n\ndef padNumber(sumnum, width):\n line = str(sumnum)\n while(len(line) < width):\n line = '0' + line\n\n return line\n\ndef xipathToString(path):\n line = ''\n for index in path:\n line += (padNumber(index, xipathPaddingWidth))\n\n return line\n\n\nclass song:\n name = ''\n cachename = ''\n def __init__(self, n, cn):\n self.name = n\n self.cachename = cn\n\n def getName(self):\n return self.name\n def getCacheName(self):#xipath\n return self.cachename\n \ndef cache(paths):\n line = copyCacherCommand\n\n rv = 0\n for path in paths:\n line = copyCacherCommand + ' ' + stripFileExtension(path.getCacheName())\n rv = os.system(line)\n if(rv < 0):\n break\n\n return rv\n\ndef play(songs):\n global m3u\n global songsBeingPlayed\n backup = songsBeingPlayed\n wipeSongSession() #songsBeingPlayed should be cleared\n \n \n rv = enqueue(songs)\n \n if(rv < 0): #something failed and the current play list will remain the same\n songsBeingPlayed = backup\n return rv\n \n writeM3U()\n\n \n if(len(m3u) == 0):\n return -1\n else:\n wipeSongList()\n line = 'winamp \\\"' + playlistPath + '\\\"'\n sendToSystem(line)\n \n return 0\n \n \ndef addon(songs):\n global m3u\n rv = enqueue(songs)\n if(rv < 0):\n return rv\n \n writeM3U()\n \n if(len(m3u) == 0):\n return - 1\n else:\n line = 'winamp /ADD \\\"' + playlistPath + '\\\"'\n sendToSystem(line)\n wipeSongList()\n return 0\n\n \ndef sendToSystem(cmd):\n subprocess.Popen(cmd)\n\n\n\n#songs is a list of strings(xipaths)\n#a call is made to the cache method of audiodio which couples with a program written in C#. the cache method returns after files are copied.\n#this is where songs are queued up within audiodio..... the system is told what song files to play.\ndef enqueue(songs): # returns -1 if error occurred\n global m3u\n global songsBeingPlayed\n \n if(len(songs) == 0):\n return -1\n \n \n pf = cache(songs)\n \n for song in songs:\n m3u.append(song.getCacheName())\n songsBeingPlayed.append(song)\n\n return pf\n\ndef monitorWeather():\n global weatherXml\n global weatherDaemon\n \n try:\n htmlObj = urllib2.urlopen(weatherXmlUrl)\n weatherXml = htmlObj.read()\n\n except:\n weatherXml = ''\n print('could not connect to nws server')\n\n weatherDaemon = Timer(monitorWeatherWait, monitorWeather)\n weatherDaemon.start() #loop recursively\n\n return None\n\ndef monitorMusic():\n global winampDaemon\n EnumWindows(EnumWindowsProc(isThisProcWinamp), 0)\n winampDaemon = Timer(monitorMusicWait, monitorMusic, ())\n winampDaemon.start() #recursively loop\n \n return None#continue main thread \n \n\ndef isThisProcWinamp(hwnd, lParam):\n global currentSongIndex\n if IsWindowVisible(hwnd):\n length = GetWindowTextLength(hwnd)\n buff = ctypes.create_unicode_buffer(length + 1)\n GetWindowText(hwnd, buff, length + 1)\n temp1 = winampTaskTitleRegExFormat\n temp2 = buff.value\n if(re.search(temp1, temp2)):#check for expected format\n line = buff.value\n nw = line.split('.') #get rid of the song number\n #parts = nw[1].split('-') # get the parts\n if(len(nw) < 1): #if not enough parts short circuit\n return\n \n try:\n songIndex = int(nw[0]) - 1 #offset by one\n except:\n songIndex = -1\n\n #TODO: Double insertion(successive insertion) can occur in instances of exiting and restarting audiodio while the same song still plays. Resolve this.\n if(not currentSongIndex == songIndex):\n currentSongIndex = songIndex\n if(songIndex < len(songsBeingPlayed) and songIndex >= 0):\n insertPlayback(stripFileExtension(songsBeingPlayed[songIndex].getCacheName()))\n #print songsBeingPlayed[songIndex].getName()\n #sys.stdout.flush()\n return\n \ndef files2str(songs): #aggregate a list of strings into one str, inverse of split()\n if(len(songs) == 0):\n return ''\n \n line = songs[0]\n nsongs = len(songs)\n\n for c in range(1,nsongs):\n line += ' ' + songs[c].getTitle()\n\n return line\ndef list2str(songs): #aggregate a list of strings into one str, inverse of split()\n if(len(songs) == 0):\n return ''\n \n line = songs[0]\n nsongs = len(songs)\n\n for c in range(1,nsongs):\n line += ' ' + songs[c]\n\n return line\ndef list2csv(songs): #aggregate a list of strings into one str, inverse of split()\n if(len(songs) == 0):\n return ''\n \n line = songs[0]\n nsongs = len(songs)\n\n for c in range(1,nsongs):\n line += ', ' + songs[c]\n\n return line\ndef wipeSongList():\n global m3u\n m3u = list() #wipe current list\n\ndef wipeSongSession():\n global currentSongIndex\n global songsBeingPlayed\n currentSongIndex = -1\n songsBeingPlayed = list()\n \ndef getTimeStamp():\n return datetime.datetime.now().strftime(timeStampFormat)\n\ndef updatePlayback():\n global m3u # list of xipaths\n for song in m3u:\n insertPlayback(stripFileExtension(song))\n \ndef writeM3U():\n global m3u\n f = open(playlistPath, 'w')\n for song in m3u:\n f.write(song + '\\n')\n \n f.close()\n#========================File Extension methods============================================\n \ndef isSongFile(file): #return bool\n for e in Extensions: #find complement\n if(e in file):\n return True\n\n return False\ndef stripFileExtension(file): #remove the file extension in a file name\n bits = list(file)\n if(not '.' in bits):\n return file\n \n c = len(bits) - 1 #size\n count = 1\n bit = bits[c] #last char in string file\n while((not bit == '.') and c >= 0): # work backwards\n c = c - 1\n bit = bits[c]\n count = count + 1\n \n del bits[c:] \n \n\n return ''.join(bits)\ndef getFileExtension(file):\n bits = list(file)\n if(not '.' in bits):\n return file\n \n c = len(bits) - 1 #size\n count = 1\n bit = bits[c] #last char in string file\n while((not bit == '.') and c >= 0): # work backwards\n c = c - 1\n bit = bits[c]\n count = count + 1\n\n line = bits[c:]\n \n return ''.join(line)\n\ndef dirContents():\n songs = list()\n for child in fileNode:\n songs.append(child.get('name'))\n \n return songs\n\n#create a deamon process that monitors Winamps playing states by querying the underlying win32 OS for process titles\n# #process is a forklike method so this only executes one time, and not in the spawned process\n# musicMonitorDaemon = multiprocessing.Process(target=monitorMusic, args=(monitorMusicWait,))\n# musicMonitorDaemon.start() #and its off!\nif __name__ == '__main__':\n init()\n shell()\n","sub_path":"winampac.py","file_name":"winampac.py","file_ext":"py","file_size_in_byte":22570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"6224253","text":"import tensorflow as tf\n# from readDate import *\nimport numpy as np\nslim = tf.contrib.slim\nimport tensorflow as tf\nimport glob\nimport os\nimport scipy.io\nimport random\nimport matplotlib.pyplot as plt\nimport math\n\n####model\ndef vgg_16(inputs, num_classes=2,is_training=True,dropout_keep_prob=0.5,scope='vgg_16'):\n with tf.variable_scope(scope, 'vgg_16', [inputs],reuse=tf.AUTO_REUSE) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n # weights_regularizer=slim.l2_regularizer(0.00005)\n ):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1', trainable=True)\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2', trainable=True)\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3', trainable=True)\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4', trainable=True)\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5', trainable=True)\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 1024, [7, 7], padding='VALID', scope='fc6', trainable=True)\n net = slim.dropout(net, dropout_keep_prob, is_training=True,\n scope='dropout6')\n net = slim.conv2d(net, 1024, [1, 1], scope='fc7')\n net = slim.dropout(net, dropout_keep_prob, is_training=True,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n net = tf.squeeze(net,[1,2])\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n return net, end_points\n####train\nx = tf.placeholder(tf.float32, [None, 224, 224, 1] , name='input')\nlabel_one_hot = tf.placeholder(tf.float32, [None, 2], name='label')\npredict, _ = vgg_16(x)\n####loss\nglobal_step = tf.Variable(0,trainable=False)\ncross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=label_one_hot, logits=predict)\n# l2_loss = tf.losses.get_regularization_loss()\nloss_all = tf.losses.get_total_loss()\n####train\n#################learning_rate_config\nlearning_rate = tf.train.exponential_decay(1e-6,global_step,500,0.9,staircase=True)\n# learning_rates = [1e-1,1e-3,1e-5,1e-7]\n# boundaries = [10000,50000,100000]\n# learning_rate = tf.train.piecewise_constant(global_step,boundaries=boundaries,values=learning_rates)\n###########Move_averages\n# variables_averages = tf.train.ExponentialMovingAverage(0.99, global_step)\n# variables_averages_op = variables_averages.apply(tf.trainable_variables())\n# train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss,global_step=global_step)\n# train_op = tf.group([train_step, variables_averages_op])\ntrain_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy,global_step=global_step)\n\ncorret_pred = tf.equal(tf.arg_max(predict, 1), tf.arg_max(label_one_hot, 1))\naccuracy = tf.reduce_mean(tf.cast(corret_pred, tf.float32))\ntf.summary.scalar('acc',accuracy)\ntf.summary.scalar('loss',loss_all)\ntf.summary.scalar('lr',learning_rate)\n\n# tf.summary.image('image',x)\n\n\n\n\n\n\nmodel_vars = tf.model_variables()\nprint(model_vars)\nvgg_16_vars = [var for var in model_vars if 'fc' not in var.name and 'conv1' not in var.name]\nsaver_vgg = tf.train.Saver(vgg_16_vars)\n\nsaver = tf.train.Saver()\nmodel_vars = tf.trainable_variables()\nprint(model_vars)\n\nall_TP = all_FP = all_TN = all_FN = 0\nLabel_list = []\nLogits_list = []\nwith tf.Session() as sess:\n\n tf.global_variables_initializer().run()\n saver_vgg.restore(sess,\n '/mnt/data/fast-neural-style-tensorflow-master/pretrained/vgg16.ckpt')\n writer = tf.summary.FileWriter('/mnt/data/VGG_ultrasound/record/train', sess.graph)\n\n writer_valid = tf.summary.FileWriter(\n '/mnt/data/VGG_ultrasound/record/val')\n\n\n saver.restore(sess,'/mnt/data/VGG_ultrasound/RF/spectrogram/RF3/ill/5_cross_vaildation/model/1/model.ckpt-16001')\n\n## 39001 0.86\n img_path_ill = '/mnt/data/VGG_ultrasound/RF/spectrogram/RF3/test/ill/1/'\n imgList_ill = os.listdir(img_path_ill)\n img_path_normal = '/mnt/data/VGG_ultrasound/RF/spectrogram/RF3/test/normal/'\n imgList_normal = os.listdir(img_path_normal)\n imgList = imgList_ill+imgList_normal\n merge_writer = tf.summary.merge_all()\n i=0\n for img_name in imgList:\n for name,labelname in [['ill', np.array([0, 1])], ['normal', np.array([1, 0])]]:\n######################## dataread\n\n\n img_dir = os.path.join(img_path, img_name)\n\n img_dir = os.path.join(img_dir, name)\n mat_list = os.listdir(img_dir)\n\n for mat_name in mat_list:\n img_final_name = os.path.join(img_dir, mat_name)\n print(img_dir)\n data = scipy.io.loadmat(img_final_name)\n inter_name = name + '_Array'\n data = data[inter_name]\n data = (data-data.min())/(data.max()-data.min())\n\n w = data.shape[1]\n h = data.shape[0]\n\n expend = math.ceil(224 / w)\n data = np.tile(data, (1, expend))\n data_resize = data[:, 0:224]\n Img = np.resize(data_resize, (224, 224))\n\n\n Img = np.expand_dims(Img, axis=0)\n Img = np.expand_dims(Img, axis=3)\n\n Label_one_hot_train = labelname\n Label_one_hot_train = np.expand_dims(Label_one_hot_train, axis=0)\n\n\n\n Loss,Predict = sess.run([loss_all,predict], feed_dict={x: Img, label_one_hot: Label_one_hot_train})\n print('The training step is [{0}], The loss is [{1}]'.format(i, Loss))\n # print('The training step is [{0}], The l2loss is [{1}]'.format(i, l2loss))\n print(Predict)\n\n\n logits_reshape = np.reshape(Predict, [1, -1])[0][0]\n Logits_list.append(logits_reshape)\n\n\n label_reshape = np.reshape(Label_one_hot_train, [1, -1])[0][0]\n Label_list.append(label_reshape)\n\n Predict = np.argmax(Predict, axis=1)\n Label_one_hot_train = np.argmax(Label_one_hot_train, axis=1)\n TP = np.sum(\n np.logical_and(np.equal(Label_one_hot_train, 1), np.equal(Predict, 1)).astype(int)\n )\n TN = np.sum(\n np.logical_and(np.equal(Label_one_hot_train, 0), np.equal(Predict, 0)).astype(int)\n )\n FP = np.sum(\n np.logical_and(np.equal(Label_one_hot_train, 0), np.equal(Predict, 1)).astype(int)\n )\n FN = np.sum(\n np.logical_and(np.equal(Label_one_hot_train, 1), np.equal(Predict, 0)).astype(int)\n )\n all_TP = all_TP + TP\n all_FP = all_FP + FP\n all_TN = all_TN + TN\n all_FN = all_FN + FN\n print('label{} ======= predict{}'.format(Label_one_hot_train, Predict))\n print('TP:{}'.format(TP))\n print('FP:{}'.format(FP))\n print('TN:{}'.format(TN))\n print('FN:{}'.format(FN))\n i=i+1\n precision = all_TP / (all_TP + all_FP)\n recall = all_TP / (all_TP + all_FN)\n sensitivity = all_TP / (all_TP + all_FN)\n specificity = all_TN / (all_TN + all_FP)\n ACC = (all_TP + all_TN) / (all_TP + all_TN + all_FP + all_FN)\n\n print('all_TP:{}'.format(all_TP))\n print('all_FP:{}'.format(all_FP))\n print('all_TN:{}'.format(all_TN))\n print('all_FN:{}'.format(all_FN))\n\n print('ACC:{}'.format(ACC))\n\n print('the precision is [{}]'.format(precision))\n print('the recall is [{}]'.format(recall))\n print('the sensitivity is [{}]'.format(sensitivity))\n print('the specificity is [{}]'.format(specificity))\n\n ###\n threshold = sorted(Logits_list, reverse=True)\n print(threshold)\n\n\n def fuc(x, i):\n if x >= i:\n x = 1\n else:\n x = 0\n return x\n\n\n y = [0]\n x = [0]\n\n for i in threshold:\n pred = list(map(fuc, Logits_list, [i] * len(Logits_list)))\n\n tp = np.logical_and(np.equal(pred, 1), np.equal(Label_list, 1))\n tp = tp.astype(np.int16).sum()\n fp = np.logical_and(np.equal(pred, 1), np.equal(Label_list, 0))\n fp = fp.astype(np.int16).sum()\n\n tn = np.logical_and(np.equal(pred, 0), np.equal(Label_list, 0))\n tn = tn.astype(np.int16).sum()\n fn = np.logical_and(np.equal(pred, 0), np.equal(Label_list, 1))\n fn = fn.astype(np.int16).sum()\n\n sensitivity = tp / (tp + fn)\n specificity = tn / (fp + tn)\n tpr = tp / (tp + fn)\n fpr = fp / (fp + tn)\n y.append(tpr)\n x.append(fpr)\n\n plt.figure(figsize=(10, 10))\n plt.plot(x, y, color='darkorange')\n plt.title('ROC', fontsize=20)\n plt.xlim(-0.05, 1.05)\n plt.ylim(-0.05, 1.05)\n plt.xlabel('False Positive Rate', fontsize=20)\n plt.ylabel('True Positive Rate', fontsize=20)\n plt.savefig('/mnt/data/VGG_ultrasound/RF/spectrogram/roc.jpg')","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":9747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"256252841","text":"#!/usr/bin/env python\n__author__ = \"Rob Cartwright\"\n\nimport requests\nimport datetime\nimport calendar\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nclass NapData:\n\n def __init__(self):\n self.save_dir = \"C:/Users/Robert Cartwright/Dropbox/MathsyBoyz/ugly_betting/\"\n self.url_base = 'http://racing.betting-directory.com/naps/'\n self.curr_table = pd.DataFrame()\n\n def create_date_url(self, date):\n \"\"\"\n integer date yyyymmdd\n creates url for historic data date\n \"\"\"\n assert type(date) == int\n date = str(date)\n year = date[:4]\n month = date[4:6]\n month = calendar.month_name[int(month)]\n day = int(date[6:])\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix = \"th\"\n else:\n suffix = [\"st\", \"nd\", \"rd\"][day % 10 - 1]\n day = str(day) + suffix\n str_date = day + '-' + month + '-' + year\n url = self.url_base + str_date + '.php'\n\n return url\n\n def get_table_html(self, url):\n \"\"\"\n gets the relevant part of html\n \"\"\"\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n table_html = soup.find_all('table')[0]\n\n return table_html\n\n def parse_html_table(self, table):\n \"\"\"\n converts html table to dataframe\n \"\"\"\n n_columns = 0\n n_rows = 0\n column_names = []\n\n # Find number of rows and columns\n # we also find the column titles if we can\n for row in table.find_all('tr'):\n\n # Determine the number of rows in the table\n td_tags = row.find_all('td')\n if len(td_tags) > 0:\n n_rows += 1\n if n_columns == 0:\n # Set the number of columns for our table\n n_columns = len(td_tags)\n\n # Handle column names if we find them\n th_tags = row.find_all('th')\n if len(th_tags) > 0 and len(column_names) == 0:\n for th in th_tags:\n column_names.append(th.get_text())\n\n # Safeguard on Column Titles\n if len(column_names) > 0 and len(column_names) != n_columns:\n raise Exception(\"Column titles do not match the number of columns\")\n\n columns = column_names if len(column_names) > 0 else range(0, n_columns)\n df = pd.DataFrame(columns=columns,\n index=range(0, n_rows))\n row_marker = 0\n for row in table.find_all('tr'):\n column_marker = 0\n columns = row.find_all('td')\n for column in columns:\n df.iat[row_marker, column_marker] = column.get_text()\n column_marker += 1\n if len(columns) > 0:\n row_marker += 1\n\n # Convert to float if possible\n for col in df:\n try:\n df[col] = df[col].astype(float)\n except ValueError:\n pass\n\n df.columns = df.iloc[0].values\n df = df.iloc[1:]\n try:\n df['Result'] = df['Result'].astype(str)\n except:\n KeyError\n df['Odds'] = df['Odds'].astype(str)\n df.drop(['Silk'], axis=1, inplace=True)\n\n return df\n\n def table_on_date(self, date):\n \"\"\"\n integer date yyyymmdd\n wraps functions to return data df on date\n \"\"\"\n date_url = self.create_date_url(date)\n table_html = self.get_table_html(date_url)\n table_on_date = self.parse_html_table(table_html)\n\n return table_on_date\n\n def today_table(self):\n \"\"\"\n gets today's table - url is different for live vs. historic\n \"\"\"\n today_url = 'http://racing.betting-directory.com/daily-naps.php'\n table_html = self.get_table_html(today_url)\n today_table = self.parse_html_table(table_html)\n\n return today_table\n\n def save_table(self, date):\n \"\"\"\n integer date yyyymmdd\n saves table to csv\n \"\"\"\n path = self.save_dir + str(date) + '.csv'\n table_on_date = self.table_on_date(date)\n table_on_date.to_csv(path, index=False)\n\n def update_today_table(self):\n \"\"\"\n writes a new file with time stamp if table data has changed\n \"\"\"\n curr_dttime = str(datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n path = self.save_dir + curr_dttime + '.csv'\n curr_table = self.today_table()\n if self.curr_table.equals(curr_table):\n pass\n else:\n curr_table.to_csv(path, index=False)\n self.curr_table = curr_table\n","sub_path":"src/data_collect/get_nap_data.py","file_name":"get_nap_data.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"223338487","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Arcangelo Massari \n#\n# Permission to use, copy, modify, and/or distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright notice\n# and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,\n# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,\n# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS\n# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS\n# SOFTWARE.\n\n\nfrom setup_and_tests import \\\n save, already_done, get_setup, tests_time_w_out_cache_w_out_index, tests_time_w_out_cache_w_index, \\\n tests_time_w_cache_w_out_index, tests_time_w_cache_w_index, create_statistics_file, save_baseline, \\\n OUTPUT_PATH, TRIPLESTORES\nfrom triplestore_manager import TriplestoreManager\nimport re\nimport timeit\n\n\niterations = 1\nrepetitions = 10\nparameters:dict = {\n 'w_cache_w_out_index': tests_time_w_cache_w_out_index,\n # 'w_out_cache_w_out_index': tests_time_w_out_cache_w_out_index,\n # 'w_out_cache_w_index': tests_time_w_out_cache_w_index,\n # 'w_cache_w_index': tests_time_w_cache_w_index\n}\n\n\nfor triplestore in TRIPLESTORES:\n # output_path = OUTPUT_PATH.replace('.json', f'_{triplestore}.json')\n # create_statistics_file(output_path=output_path)\n for parameter, tests in parameters.items(): \n config_filepath = f'config/{triplestore.lower()}/{parameter}.json'\n setup_no_cache, setup_cache = get_setup(triplestore)\n for test_name, test_entities in tests.items():\n for test_entity in test_entities:\n entity_match = re.search('https://github.com/opencitations/time-agnostic-library/br/(\\d+)', test_entity)\n entity = f':br/{entity_match.group(1)}' if entity_match else 'p_o'\n # work_done = already_done(parameter, test_name, 'time', entity, repetitions, output_path=output_path)\n # if work_done == repetitions:\n # continue\n # repetitions_to_do = repetitions - work_done\n test = f\"CONFIG = '{config_filepath}'\\n\" + test_entity\n # if 'w_cache' in parameter:\n # if repetitions_to_do == repetitions:\n # TriplestoreManager.clear_cache(triplestore)\n # setup_to_use = setup_cache\n # else:\n # setup_to_use = setup_no_cache\n # save_baseline(setup_to_use, test, 'time', test_name, entity, output_path, config_filepath)\n for _ in range(1):\n duration = timeit.timeit(stmt=test, setup=setup_cache, number=1)\n # save(data=duration, test_name=test_name, parameter=parameter, benchmark='time', entity=entity, output_path=output_path)\n","sub_path":"benchmark/benchmark_time.py","file_name":"benchmark_time.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"483498243","text":"import re\nimport os\nimport time\nfrom datetime import datetime\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException\nfrom bs4 import BeautifulSoup\nimport traceback\nimport json\nfrom os.path import exists\nfrom collections import defaultdict\n\n\nclass BaiDuWaiMaiCrawler:\n def __init__(self):\n self.comment_root_url = \"http://waimai.baidu.com/shopui/?qt=shopcomment&shop_id=\"\n self.comment_root_path = \"files/baiduwaimai_comments-%s.json\" % datetime.now().strftime(\"%Y-%m-%d\")\n self.browser = Firefox()\n self.ids = defaultdict(list)\n self.crawled_ids = []\n self.crawled_id_filepath = \"files/crawled_ids.txt\"\n self.get_crawled_ids()\n\n def __del__(self):\n self.browser.quit()\n\n def get_crawled_ids(self):\n if exists(self.crawled_id_filepath):\n with open(self.crawled_id_filepath, encoding=\"utf-8\") as f:\n for line in f:\n self.crawled_ids.append(line.strip())\n\n def record_crawled_id(self, shop_id):\n with open(self.crawled_id_filepath, mode=\"a\", encoding=\"utf-8\") as f:\n f.write(\"%s\\n\" % shop_id)\n\n @staticmethod\n def get_address_urls_from_file():\n urls = []\n pattern = re.compile(\"\\s+\")\n with open(\"files/baiduwaimai_address_urls.txt\") as f:\n for line in f:\n results = pattern.split(line.strip())\n if len(results) >= 2:\n urls.append(results[0])\n print(\"从文件内得到所有地址的url\")\n return urls\n\n def get_shop_ids_from_file(self, filepath, encoding=\"utf-8\"):\n pattern = re.compile(\"\\s+\")\n with open(filepath, encoding=encoding) as f:\n for line in f:\n results = pattern.split(line.strip())\n if len(results) >= 2:\n self.ids[results[0]] = results[1].split(\",\")\n\n def get_shop_ids_from_net(self):\n address_urls = self.get_address_urls_from_file()\n for index, url in enumerate(address_urls):\n self.shop_urls_at_a_address(url, index)\n\n def shop_urls_at_a_address(self, url, line_index):\n self.browser.get(url)\n self.browser.maximize_window()\n for i in range(10):\n self.browser.find_element_by_id(\"baiducopy\").click()\n time.sleep(2)\n page_source = self.browser.page_source\n # self.browser.close()\n\n soup = BeautifulSoup(page_source, \"html.parser\")\n if soup.find(\"ul\", class_=\"shopcards-list\"):\n for li in soup.find(\"ul\", class_=\"shopcards-list\").find_all(\"li\", class_=\"list-item\"):\n key = li.get(\"class\")[2][4:]\n address_id = str(line_index)\n self.ids[key].append(address_id)\n\n def get_comments_in_one_shop(self, shop_id):\n self.browser.get(\"%s%s\" % (self.comment_root_url, shop_id))\n self.browser.maximize_window()\n while True:\n footer = self.browser.find_element_by_xpath(\"//div[@class='footer-items']\")\n for i in range(2):\n ActionChains(self.browser).move_to_element(footer).perform()\n time.sleep(1)\n\n page_source = self.browser.page_source\n soup = BeautifulSoup(page_source, \"html.parser\")\n div = soup.find(\"section\", \"comment-list\").find(\"div\", \"comment-con\")\n if div.find(\"div\", class_=\"no-result\") is not None:\n break\n else:\n for a_div in div.find_all(\"div\", class_=\"list clearfix\"):\n self.get_one_comment(a_div, shop_id)\n try:\n the_next = self.browser.find_element_by_xpath(\n \"//div[@class='pagination']//a[@class='mod-page-item mod-page-item-next']\")\n the_next.click()\n time.sleep(2)\n except NoSuchElementException:\n break\n # self.browser.close()\n print(\"爬完ID为 '\", shop_id, \"' 的餐厅的评论信息。\")\n self.record_crawled_id(shop_id)\n self.crawled_ids.append(shop_id)\n\n def get_one_comment(self, div, shop_id):\n try:\n comment_info = {\"shop_id\": shop_id}\n top_sec = div.find(\"div\", class_=\"top-section\").get_text(\"|\", strip=True).split(\"|\")\n comment_info[\"user_name\"] = top_sec[0] # a_div.find(\"span\", class_=\"user-name\").string.strip()\n comment_info[\"mark\"] = top_sec[1][:-1]\n comment_info[\"delivery_time\"] = top_sec[2] # a_div.find(\"span\", class_=\"delivery-time\").string.strip()\n comment_info[\"comment_time\"] = top_sec[3] # a_div.find(\"span\", class_=\"fr\").string.strip()\n comment_info[\"content\"] = div.find(\"div\", class_=\"mid-section\").get_text(strip=True)\n if div.find(\"div\", class_=\"btm-section\") is not None:\n comment_info[\"recommend\"] = div.find(\"div\", class_=\"btm-section\").get_text(\n \"|\", strip=True).split(\"|\")[1:]\n else:\n comment_info[\"recommend\"] = []\n with open(self.comment_root_path, mode=\"a\", encoding=\"utf-8\") as f:\n a_json = json.dumps(comment_info, ensure_ascii=False, separators=(\",\", \":\"))\n f.write(\"%s\\n\" % a_json)\n except:\n print(\"id为'%s'的餐厅有Bug,请检查。\", shop_id)\n traceback.print_exc()\n\n def get_all_shop_comments(self):\n for shop_id in self.ids.keys():\n self.get_comments_in_one_shop(shop_id)\n\n def test(self):\n self.get_shop_ids_from_file(\"files/baiduwaimai_shop_urls.txt\")\n self.get_all_shop_comments()\n\n\ndef get_pos_and_neg_corpus():\n \"\"\"\n get the positive and negative f_corpus according to the command mark\n \"\"\"\n def string_is_too_short(a_string):\n \"\"\"\n judge if the f_corpus is too short or the chinese characters are few\n if True, write to the abandoned file\n :param a_string: a f_corpus\n :return: True or False\n \"\"\"\n if len(a_string) < 5:\n write_into_rubbish_corpus_file(\"Too short:\", a_string)\n return True\n if len(re.findall(r'[\\u4e00-\\u9fa5]', a_string)) <= len(a_string) * 0.4:\n write_into_rubbish_corpus_file(\"Few words:\", a_string)\n return True\n return False\n\n def string_is_numeric(a_string):\n \"\"\"\n judge if the f_corpus's characters are all or almost numbers\n if True, write to the abandoned file\n :param a_string: a f_corpus\n :return: True or False\n \"\"\"\n match = (re.match(r'\\d+', a_string))\n if match is not None and len(match.group()) >= len(a_string) * 0.75:\n write_into_rubbish_corpus_file(\"Is numeric:\", a_string)\n return True\n return False\n\n def string_is_english(a_string):\n \"\"\"\n judge if the f_corpus's characters are all English\n if True, write to the abandoned file\n :param a_string: a f_corpus\n :return: True or False\n \"\"\"\n match = re.match(r\"[a-zA-Z]+\", a_string)\n if match is not None and len(match.group()) >= len(a_string) * 0.75:\n write_into_rubbish_corpus_file(\"Is english:\", a_string)\n return True\n return False\n\n def string_is_word_repeat(a_string):\n \"\"\"\n check if the f_corpus is always the repeat word\n :param a_string: a f_corpus\n :return: True or False\n \"\"\"\n repeat_words, length = [], 0\n for word in a_string:\n if a_string.count(word) >= 4 and word not in repeat_words:\n repeat_words.append(word)\n length += content.count(word)\n if length > len(content) / 2:\n write_into_rubbish_corpus_file(\"Word repeat:\", a_string)\n return True\n return False\n\n def string_is_sentence_repeat(filepath, a_string):\n \"\"\"\n judge if the string is the same as the another string in the lines\n :param filepath: file path\n :param a_string: a f_corpus\n :return: True or False\n \"\"\"\n repeat = False\n with open(filepath, \"r\", encoding=\"utf-8\") as check_f:\n for a_line in check_f:\n if a_line.strip() in a_string and len(a_line.strip()) * 2 >= len(a_string):\n repeat = True\n if a_string in a_line.strip() and len(a_string) * 2 >= len(a_line.strip()):\n repeat = True\n if repeat:\n write_into_rubbish_corpus_file(\"Sentence repeat:\", a_string)\n write_into_rubbish_corpus_file(\"Sentence repeat:\", a_line.strip())\n break\n return repeat\n\n def write_into_rubbish_corpus_file(type_string, a_string):\n \"\"\"\n write the f_corpus into the rubbish f_corpus file\n :param type_string: the rubbish type\n :param a_string: a f_corpus\n \"\"\"\n with open(abandoned_filepath, \"a\", encoding=\"utf-8\") as abandoned_f:\n abandoned_f.write(type_string + \"\\n\\t\" + str(a_string) + \"\\n\")\n\n def write_into_corpus_file(filepath, a_string):\n \"\"\"\n write the f_corpus into the corresponding file if there is no repeat,\n otherwise, write it into the abandoned file\n :param filepath: file path\n :param a_string: a f_corpus\n \"\"\"\n repeat = string_is_sentence_repeat(filepath, a_string)\n if not repeat:\n with open(filepath, \"a\", encoding=\"utf-8\") as final_corpus_f:\n final_corpus_f.write(str(a_string) + \"\\n\")\n\n waimai_corpus_root_path = \"waimai/2015-11-05/\"\n abandoned_filepath = \"waimai/abandoned/abandoned_corpus.txt\"\n positive_filepath = \"waimai/pos/positive_corpus_v2.txt\"\n negative_filepath = \"waimai/neg/negative_corpus_v2.txt\"\n four_mark_filepath = \"waimai/handle/four_mark_corpus.txt\"\n runout_filepath = \"f_runout/get_waimai_pos_and_neg_corpus.txt\"\n\n open(abandoned_filepath, \"w\", encoding=\"utf-8\")\n open(positive_filepath, \"w\", encoding=\"utf-8\")\n open(negative_filepath, \"w\", encoding=\"utf-8\")\n open(four_mark_filepath, \"w\", encoding=\"utf-8\")\n\n start_time = time.clock()\n total_index, useful_index = 0, 0\n for filename in os.listdir(waimai_corpus_root_path):\n if \"comment\" in filename:\n with open(waimai_corpus_root_path+filename, \"r\", encoding=\"utf-8\") as corpus_f:\n for line in corpus_f:\n total_index += 1\n print(\"finish the number of %d f_corpus in total.\" % total_index)\n\n a_comment = json.loads(line.strip())\n content = \",\".join(re.split(r\"\\s+\", a_comment[\"content\"]))\n\n if string_is_too_short(content):\n continue\n if string_is_numeric(content):\n continue\n if string_is_english(content):\n continue\n if string_is_word_repeat(content):\n continue\n\n try:\n mark = int(\"\".join(re.findall(\"\\d+\", a_comment[\"mark\"])))\n\n if mark == 5:\n write_into_corpus_file(positive_filepath, content)\n\n if mark == 4:\n write_into_corpus_file(four_mark_filepath, content)\n\n if mark <= 3:\n write_into_corpus_file(negative_filepath, content)\n\n useful_index += 1\n print(\"finish the number of %d f_corpus useful.\" % useful_index)\n except ValueError:\n write_into_rubbish_corpus_file(\"ValueError:\", a_comment)\n\n end_time = time.clock()\n with open(runout_filepath, \"w\", encoding=\"utf-8\") as runout_f:\n runout_f.write(\"total f_corpus: %d\\n\" % total_index)\n runout_f.write(\"useful f_corpus: %d\\n\" % useful_index)\n runout_f.write(\"time used: \" + str(end_time - start_time))\n\n\nif __name__ == \"__main__\":\n crawler = BaiDuWaiMaiCrawler()\n crawler.test()\n\n\n\n","sub_path":"fomc/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":12233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"270358735","text":"from liir.nlp.representation.Word import Predicate\nfrom liir.nlp.representation.Word import Word\n\n__author__ = 'quynhdo'\n\nimport re\n\n\n# this class define a sentence which is a list of Word\nclass Sentence(list):\n def __init__(self): # value should be a list of Word\n list.__init__(self)\n\n def getPredicates(self):\n return [w for w in self if isinstance(w, Predicate)]\n\n def printfConll2009(self):\n s=\"\"\n lst=[]\n for w in self:\n s_w=str(w.id +1) + \"\\t\" + w.form +\"\\t\" +w.lemma +\"\\t\"+ w.lemma+\"\\t\"+w.pos+ \"\\t\" +w.pos + \"\\t\" +\"_\"+ \"\\t\" +\"_\"+ \"\\t\" +str(w.head+1) + \"\\t\" +str(w.head+1)+\"\\t\" + w.deprel + \"\\t\" +w.deprel + \"\\t\"\n if isinstance(w, Predicate):\n o = \"Y\"\n s_w=s_w + o + \"\\t\" + w.lemma + \".\" +str(w.sense)\n\n else:\n s_w=s_w + \"_\" + \"\\t\" + \"_\"\n\n lst.append(s_w)\n\n preds= self.getPredicates()\n\n for pred in preds:\n poss=[]\n\n for arg in pred.arguments.keys():\n position=arg.id\n lst[position]=lst[position] +\"\\t\" + pred.arguments[arg]\n poss.append(position)\n for i in range(len(lst)):\n if not i in set(poss):\n lst[i]=lst[i] + \"\\t\" + \"_\"\n\n for sss in lst:\n s=s+sss + '\\n'\n\n\n return s\n\n\n\n# Sentence in Conll 2009\nclass SentenceConll2009(Sentence):\n\n def __init__(self, data_lines=None, read_label=True, use_gold=False):\n '''\n\n :param data_lines:\n :param read_label: read srl labels or not\n :param use_gold: use gold annotation or not\n :return:\n '''\n Sentence.__init__(self)\n if data_lines is None:\n return\n if not isinstance(data_lines, list):\n return\n pred_id = 0\n dt = []\n for line in data_lines:\n temps=re.split(\"\\\\s+\", line)\n dt.append(temps)\n w = Word(int(temps[0])-1, temps[1])\n #w.word = temps[1].lower()\n if not use_gold:\n w.lemma = temps[3]\n w.pos = temps[5]\n w.head = int(temps[9])-1\n w.deprel = temps[11]\n else:\n w.lemma = temps[2]\n w.pos = temps[4]\n w.head = int(temps[8])-1\n w.deprel = temps[10]\n\n if read_label:\n if \"Y\" in set(temps[12]):\n w.__class__ = Predicate\n w.sense = temps[13].split(\".\")[1]\n\n self.append(w)\n if read_label:\n # read srl information\n for pred in self:\n if isinstance(pred, Predicate):\n args={}\n for j in range(len(data_lines)):\n tmps = dt[j]\n lbl = tmps[14+pred_id]\n if lbl != \"_\":\n args[self[int(tmps[0])-1]]=lbl\n\n pred.arguments = args\n pred_id += 1\n\n for w in self:\n w.sentence = self\n\n\nclass SentenceConll2009POS(Sentence):\n def __init__(self, data_lines=None):\n Sentence.__init__(self)\n if data_lines is None:\n return\n if not isinstance(data_lines, list):\n return\n dt = []\n for line in data_lines:\n temps=re.split(\"\\\\s+\", line)\n dt.append(temps)\n w = Word(int(temps[0])-1, temps[1])\n w.word = temps[1].lower()\n w.pos = temps[4]\n self.append(w)\n for w in self:\n w.sentence = self\n\n\n\n\nclass SentenceConll2005(Sentence): # Sentence in Conll 2005\n\n def __init__(self, data_lines=None):\n Sentence.__init__(self)\n if data_lines is None:\n return\n if not isinstance(data_lines, list):\n return\n pred_id = 0\n idx=0\n dt = []\n for line in data_lines:\n temps=re.split(\"\\\\s+\", line)\n # print (temps)\n dt.append(temps)\n w = Word(idx, temps[0], False, True)\n pos = temps[1]\n\n if pos == \"(\":\n pos= \"-lrb-\"\n if pos == \")\":\n pos = \"-rrb-\"\n w.pos = pos\n w.parsebit = temps[2]\n w.word = temps[0].lower()\n\n if temps[5] != \"-\":\n w.__class__= Predicate\n w.sense = temps[5]+\".\"+temps[4]\n self.append(w)\n idx += 1\n\n # read srl information\n for pred in self:\n if isinstance(pred, Predicate):\n args=[]\n j = 0\n while j < len(data_lines):\n tmps = dt[j]\n lbl = tmps[6+pred_id]\n match = re.match('\\((.+)\\*\\)', lbl)\n if match:\n args.append(\"B-\"+match.group(1))\n j += 1\n else:\n match = re.match('\\((.+)\\*', lbl)\n if match:\n args.append(\"B-\"+match.group(1))\n for k in range(j+1, len(data_lines)):\n l1 = data_lines[k]\n tmps1 = re.split(\"\\\\s+\",l1)\n match1 = re.match('\\*\\)', tmps1[6+pred_id])\n args.append(\"I-\"+match.group(1))\n if match1:\n j = k+1\n break\n else:\n args.append(\"O\")\n j += 1\n\n pred.arguments = args\n pred_id += 1\n for w in self:\n w.sentence = self\n\n\n","sub_path":"liir/nlp/representation/Sentence.py","file_name":"Sentence.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"391398609","text":"#!/usr/bin/python\n\nimport pyglet\nfrom pyglet.gl import *\nfrom pyglet.window import key\nimport math\n\nwindow = pyglet.window.Window(fullscreen=True)\n\npyglet.resource.path.append('./images')\npyglet.resource.reindex()\n\ncenter_x = int(window.width/2)\ncenter_y = int(window.height/2)\n\n\ndef center_anchor(img):\n img.anchor_x = img.width // 2\n img.anchor_y = img.height // 2\n\ndef wrap(value, width):\n if width == 0:\n return 0\n if value > width:\n value -= width\n if value < 0:\n value += width\n return value\n\nradians_in_circle = math.pi * 2\ndef to_radians(degrees):\n return math.pi * degrees / 180.0\n\nplanet_image = pyglet.resource.image('mars.png')\ncenter_anchor(planet_image)\nship_image = pyglet.resource.image('ship.png')\ncenter_anchor(ship_image)\nship_image_on = pyglet.resource.image('ship_on.png')\ncenter_anchor(ship_image_on)\nbullet_image = pyglet.resource.image('bullet.png')\ncenter_anchor(bullet_image)\n\nclass Planet(pyglet.sprite.Sprite):\n def __init__(self, image, x=0, y=0, batch=None):\n super(Planet, self).__init__(image, x, y, batch=batch)\n self.x = x\n self.y = y\n self.mass = 5000000 # experiment!\n self.radius = (self.image.height + self.image.width) / 4\n \n # Planet pulls ship in with gravity\n def dist_vec_to(self, target):\n dx = self.x - target.x\n dy = self.y - target.y\n sqr_distance = dx**2 + dy**2\n distance = math.sqrt(sqr_distance)\n \n angle = math.acos(float(dx) / distance)\n if dy < 0:\n angle = 2*math.pi - angle\n return (distance, angle)\n \n def force_on(self, target):\n G = 1 # experiment!\n distance, angle = self.dist_vec_to(target)\n return ((G * self.mass) / (distance**2), angle)\n \n def update(self, dt):\n # Check collisions\n distance, angle = self.dist_vec_to(ship)\n if distance <= ship.radius + self.radius:\n ship.reset()\n ship.alive = False\n \n # Gravity!\n force, angle = self.force_on(ship)\n force_x = force * math.cos(angle) * dt\n force_y = force * math.sin(angle) * dt\n ship.dx += force_x\n ship.dy += force_y\n\n \nclass Ship(pyglet.sprite.Sprite, key.KeyStateHandler):\n def __init__(self, image, x=0, y=0, dx=0, dy=0, rotv=0, batch=None):\n super(Ship, self).__init__(image, x, y, batch=batch)\n self.x = x\n self.y = y\n self.dx = dx\n self.dy = dy\n self.rotation = rotv\n self.thrust = 150.0\n self.rot_spd = 100.0\n self.alive = True\n self.radius = self.image.width / 2\n self.max_speed = 100\n self.shot_timer = 0.1\n self.reload_timer = self.shot_timer\n self.bullets = []\n \n def reset(self):\n ship.life_timer = 2.0 # seconds until respawn\n self.x = center_x + 300; self.y = center_y\n self.dx = 0; self.dy = 150\n self.rotation = -90\n \n def update(self, dt):\n self.image = ship_image\n \n if not self.alive:\n #print \"Dead! Respawn in %s\" % self.life_timer\n self.life_timer -= dt\n if self.life_timer > 0:\n return\n else:\n self.reset()\n self.alive = True\n \n # Update rotation\n if self[key.LEFT]:\n self.rotation -= self.rot_spd * dt\n if self[key.RIGHT]:\n self.rotation += self.rot_spd * dt\n self.rotation = wrap(self.rotation, 360.)\n \n # Get x/y components of orientation\n rotation_x = math.cos(to_radians(self.rotation))\n rotation_y = math.sin(to_radians(-self.rotation))\n\n # Update velocity\n if self[key.UP]:\n self.image = ship_image_on\n self.dx += self.thrust * rotation_x * dt\n self.dy += self.thrust * rotation_y * dt\n \n # Shoot bullets\n if self.reload_timer > 0:\n self.reload_timer -= dt\n elif self[key.SPACE]:\n self.bullets.append(Bullet(self.x, self.y, rotation_x*500+self.dx, rotation_y*500+self.dy, bullets))\n self.reload_timer = self.shot_timer\n \n self.x += self.dx * dt\n self.y += self.dy * dt\n self.x = wrap(self.x, window.width)\n self.y = wrap(self.y, window.height)\n \n self.velocity = abs(self.dx) + abs(self.dy)\n speedometer.text = \"Speed: %.02f %d\" % (self.velocity, len(self.bullets))\n if self.velocity < self.max_speed * 0.8:\n speedometer.color = (0, 255, 0, 255)\n elif self.velocity < self.max_speed:\n speedometer.color = (255, 255, 0, 255)\n else:\n speedometer.color = (255, 0, 0, 255)\n\n\nclass Bullet(pyglet.sprite.Sprite):\n\n def __init__(self, x=0, y=0, dx=0, dy=0, batch=None):\n super(Bullet, self).__init__(bullet_image, x, y, batch=batch)\n self.x = x\n self.y = y\n self.dx = dx\n self.dy = dy\n self.radius = self.image.width / 2\n self.timer = 5.0\n \n def update(self, dt):\n self.x += self.dx * dt\n self.y += self.dy * dt\n self.x = wrap(self.x, window.width)\n self.y = wrap(self.y, window.height)\n \n self.timer -= dt\n # collide with planet, or remove after 5 seconds\n distance, angle = planet.dist_vec_to(self)\n if distance <= planet.radius or self.timer < 0:\n ship.bullets.remove(self)\n \n \nplanet = Planet(planet_image, center_x, center_y)\nbullets = pyglet.graphics.Batch()\nprint(dir(bullets))\nship = Ship(ship_image)\nship.reset()\n\nspeedometer = pyglet.text.Label('Speed: 0',\n font_name='Arial',\n font_size=36,\n x=10, y=10,\n anchor_x='left', anchor_y='bottom')\n\n\n@window.event\ndef on_draw():\n window.clear()\n planet.draw()\n bullets.draw()\n if ship.alive:\n ship.draw()\n speedometer.draw()\n\n# Call update 60 times a second\ndef update(dt):\n planet.update(dt)\n ship.update(dt)\n for bullet in ship.bullets:\n bullet.update(dt)\n \nwindow.push_handlers(ship)\npyglet.clock.schedule_interval(update, 1/60.0)\npyglet.app.run()\n","sub_path":"hello_python_source_py3/chapter 09/ship-5-shooting.py","file_name":"ship-5-shooting.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"498011861","text":"import json\nimport cv2\nimport requests\n\nLIMIT_PX = 1024\nLIMIT_BYTE = 1024 * 1024 # 1MB\nLIMIT_BOX = 40\n\nREST_API_KEY = '0c6b943595fb299f1edd9a630623de49'\n\n\ndef kakao_ocr_resize(image_path):\n image = cv2.imread(image_path)\n height, width, _ = image.shape\n if LIMIT_PX < height or LIMIT_PX < width:\n ratio = float(LIMIT_PX) / max(height, width)\n image = cv2.resize(image, None, fx=ratio, fy=ratio)\n image_path = \"{}_resized.jpg\".format(image_path)\n cv2.imwrite(image_path, image)\n return image_path\n return None\n\n\ndef kakao_ocr(image_path, rest_api_key): # OCR 수행 함수\n API_URL = 'https://dapi.kakao.com/v2/vision/text/ocr'\n headers = {'Authorization': 'KakaoAK {}'.format(rest_api_key)}\n return requests.post(API_URL, headers=headers, files={\"image\": open(image_path, 'rb')})\n\n\ndef main():\n image_path = 'hello.jpg'\n # 이미지가 1024X1024를 초과할 경우 resize\n resize_impath = kakao_ocr_resize(image_path)\n if resize_impath is not None:\n image_path = resize_impath\n print(\"원본 대신 리사이즈된 이미지를 사용합니다.\")\n\n output = kakao_ocr(image_path, REST_API_KEY).json()\n print(\"[OCR] output:\\n{}\\n\".format(output))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Example3/KakaoAPI/kakao_vision_ocr.py","file_name":"kakao_vision_ocr.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"16587951","text":"import collections \r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom operator import itemgetter\r\n\r\nmreza = nx.read_weighted_edgelist(\"tez_edg.edges\" , delimiter = ' ' )\r\nG=nx.Graph(mreza)\r\nnx.draw(G)\r\nplt.show()\r\n\r\ncvorovi = np.float64(G.number_of_nodes())\r\nveze = np.float64(G.number_of_edges())\r\n\r\ndeg = [val for (node, val) in G.degree(weight='weight')]\r\n\r\ncnt = []\r\nfor i in range (30):\r\n cnt.append(i)\r\n\r\nfig, ax = plt.subplots()\r\nplt.bar(cnt, deg, color='b')\r\n\r\nplt.title(\"Degree Histogram\")\r\nplt.ylabel(\"Degree\")\r\nplt.xlabel(\"Nodes\")\r\n#plt.xticks(deg, fontsize=9, rotation=45)\r\n#ax.set_xticks([d + 0.4 for d in deg])\r\n#ax.set_xticklabels(deg)\r\nplt.savefig('degree_histogram.png', dpi=200)\r\n\r\n\r\nplt.show()\r\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"88857166","text":"# -*- coding: utf-8 -*-\n#\n# MongoDB documentation build configuration file, created by\n# sphinx-quickstart on Mon Oct 3 09:58:40 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\nimport sys\nimport os\nimport datetime\n\nfrom sphinx.errors import SphinxError\n\nfrom giza.config.runtime import RuntimeStateConfig\nfrom giza.config.helper import fetch_config, get_versions, get_manual_path\nfrom giza.config.project import get_current_path\n\nconf = fetch_config(RuntimeStateConfig())\nsconf = conf.system.files.data.sphinx_local\n\nsys.path.append(os.path.join(conf.paths.projectroot, conf.paths.buildsystem, 'sphinxext'))\n\n# -- General configuration ----------------------------------------------------\n\nneeds_sphinx = '1.0'\n\nextensions = [\n 'sphinx.ext.extlinks',\n 'sphinx.ext.todo',\n 'mongodb',\n 'directives',\n \"hieroglyph\",\n \"intermanual\"\n]\n\ntodo_include_todos = True\nlocale_dirs = [ conf.paths.locale ]\ngettext_compact = False\n\ntemplates_path = ['.templates']\nexclude_patterns = []\n\nsource_suffix = '.txt'\n\nmaster_doc = sconf.master_doc\nlanguage = 'en'\nproject = sconf.project\ncopyright = u'2011-{0}'.format(datetime.date.today().year)\nversion = conf.version.branch\nrelease = conf.version.release\n\nrst_epilog = '\\n'.join([\n '.. |branch| replace:: ``{0}``'.format(conf.git.branches.current),\n '.. |copy| unicode:: U+000A9',\n '.. |year| replace:: {0}'.format(datetime.date.today().year),\n '.. |ent-build| replace:: MongoDB Enterprise',\n '.. |hardlink| replace:: {0}/{1}'.format(conf.project.url, conf.git.branches.current)\n])\n\npygments_style = 'sphinx'\n\nextlinks = {\n 'issue': ('https://jira.mongodb.org/browse/%s', '' ),\n 'wiki': ('http://www.mongodb.org/display/DOCS/%s', ''),\n 'api': ('http://api.mongodb.org/%s', ''),\n 'source': ('https://github.com/mongodb/mongo/blob/master/%s', ''),\n 'docsgithub' : ( 'http://github.com/mongodb/docs/blob/{0}/%s'.format(conf.git.branches.current), ''),\n 'hardlink' : ( 'http://docs.mongodb.org/{0}/%s'.format(conf.git.branches.current), ''),\n 'manual': ('http://docs.mongodb.org/manual%s', ''),\n 'ecosystem': ('http://docs.mongodb.org/ecosystem%s', ''),\n 'meta-driver': ('http://docs.mongodb.org/meta-driver/latest%s', ''),\n 'mms': ('https://mms.mongodb.com/help%s', ''),\n 'mms-hosted': ('https://mms.mongodb.org/help-hosted%s', ''),\n 'about': ('http://www.mongodb.org/about%s', '')\n}\n\n\nintersphinx_mapping = {}\nif 'intersphinx' in conf.system.files.data:\n for i in conf.system.files.data.intersphinx:\n intersphinx_mapping[i.name] = ( i.url, os.path.join(conf.paths.projectroot,\n conf.paths.output,\n i.path))\n\nlanguages = [\n (\"ar\", \"Arabic\"),\n (\"cn\", \"Chinese\"),\n (\"cs\", \"Czech\"),\n (\"de\", \"German\"),\n (\"es\", \"Spanish\"),\n (\"fr\", \"French\"),\n (\"hu\", \"Hungarian\"),\n (\"id\", \"Indonesian\"),\n (\"it\", \"Italian\"),\n (\"jp\", \"Japanese\"),\n (\"ko\", \"Korean\"),\n (\"lt\", \"Lithuanian\"),\n (\"pl\", \"Polish\"),\n (\"pt\", \"Portuguese\"),\n (\"ro\", \"Romanian\"),\n (\"ru\", \"Russian\"),\n (\"tr\", \"Turkish\"),\n (\"uk\", \"Ukrainian\")\n]\n\n# -- Options for Slides output ---------------------------------------------------\n\nslide_theme = 'internal'\n\nslide_theme_path = [ os.path.join(conf.paths.projectroot, 'themes') ]\n\nslide_numbers = True\n\nslide_link_html_to_slides = True\nslide_relative_path = \"slides/\"\nslide_link_to_html = True\nslide_html_relative_path = \"../\"\n\n# To add\n#slide_footer = \"MongoDB Internal Contents - Confidential\" # Default is right aligned, need centered\n#slide_relative_path = \"../slides/\" #default\n\n# -- Options for HTML output ---------------------------------------------------\n\nhtml_theme = sconf.theme.name\nhtml_theme_path = [ os.path.join(conf.paths.projectroot, conf.paths.buildsystem, 'themes') ]\nhtml_title = conf.project.title\nhtmlhelp_basename = 'MongoDBdoc'\n\nhtml_logo = sconf.logo\nhtml_static_path = sconf.paths.static\n\nhtml_copy_source = False\nhtml_use_smartypants = True\nhtml_domain_indices = True\nhtml_use_index = True\nhtml_split_index = False\nhtml_show_sourcelink = False\nhtml_show_sphinx = True\nhtml_show_copyright = True\n\nmanual_edition_path = '{0}/{1}/{2}'.format(conf.project.url,\n conf.git.branches.current,\n sconf.theme.book_path_base)\n\nhtml_theme_options = {\n 'branch': conf.git.branches.current,\n 'pdfpath': manual_edition_path + '.pdf',\n 'epubpath': manual_edition_path + '.epub',\n 'manual_path': get_manual_path(conf),\n 'translations': languages,\n 'language': language,\n 'repo_name': sconf.theme.repo,\n 'jira_project': sconf.theme.jira,\n 'google_analytics': sconf.theme.google_analytics,\n 'project': sconf.theme.project,\n 'version': version,\n 'sitename': sconf.theme.sitename,\n 'nav_excluded': sconf.theme.nav_excluded,\n}\n\nhtml_sidebars = sconf.sidebars\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_documents = []\nif 'pdfs' in conf.system.files.data:\n for pdf in conf.system.files.data.pdfs:\n latex_documents.append((pdf.source, pdf.output, pdf.title, pdf.author, pdf.doc_class))\n\n\n\nlatex_preamble_elements = [ r'\\DeclareUnicodeCharacter{FF04}{\\$}',\n r'\\DeclareUnicodeCharacter{FF0E}{.}',\n r'\\PassOptionsToPackage{hyphens}{url}',\n r'\\usepackage{graphicx}',\n# r'\\setkeys{Gin}{width=\\linewidth}',\n r'\\usepackage{upquote}',\n r'\\pagestyle{plain}',\n r'\\pagenumbering{arabic}',\n r'\\usepackage{titlesec}',\n r'\\newcommand{\\sectionbreak}{\\clearpage}',\n# r'\\newcommand{\\subsectionbreak}{\\clearpage}',\n r'\\renewcommand{\\bottomtitlespace}{12\\baselineskip}',\n# r'\\definecolor{TitleColor}{rgb}{0.421,0.640,0.222}',\n r'\\definecolor{TitleColor}{rgb}{0.0,0.0,0.0}',\n# r'\\definecolor{InnerLinkColor}{rgb}{0.421,0.640,0.222}',\n r'\\definecolor{InnerLinkColor}{rgb}{0.421,0.421,0.421}',\n r'\\usepackage{tocloft}',\n # r'\\makeatletter',\n # r'\\renewcommand{\\l@section}{\\@dottedtocline{1}{1.0em}{1.5em}}',\n # r'\\renewcommand{\\l@subsection}{\\@dottedtocline{1.5}{4.0em}{3.0em}}',\n # r'\\renewcommand{\\l@subsubsection}{\\@dottedtocline{3}{7.4em}{4.5em}}',\n # r'\\makeatother',\n r'\\setcounter{tocdepth}{2}',\n r'\\renewcommand{\\cfttoctitlefont}{\\Large\\sffamily\\bfseries}',\n r'\\renewcommand{\\cftsecfont}{\\normalfont\\sffamily\\bfseries}',\n r'\\renewcommand{\\cftsubsecfont}{\\normalfont\\sffamily}',\n r'\\renewcommand{\\cftsubsubsecfont}{\\normalfont\\sffamily}',\n r'\\renewcommand{\\cftsecpagefont}{\\normalfont\\sffamily}',\n r'\\renewcommand{\\cftsubsecpagefont}{\\normalfont\\sffamily}',\n r'\\renewcommand{\\cftsubsubsecpagefont}{\\normalfont\\sffamily}',\n r'\\titleformat*{\\section}{\\Large\\sffamily\\bfseries}',\n r'\\titleformat*{\\subsection}{\\large\\sffamily\\bfseries}',\n r'\\titleformat*{\\subsubsection}{\\sffamily\\bfseries}',\n# r'\\usepackage[right=2.7in]{geometry}'\n ]\n\nlatex_elements = {\n 'preamble': '\\n'.join(latex_preamble_elements),\n 'pointsize': '10pt',\n 'papersize': 'letterpaper'\n}\n\nlatex_paper_size = 'letter'\nlatex_use_parts = False\nlatex_show_pagerefs = True\nlatex_show_urls = 'footnote'\nlatex_domain_indices = False\nlatex_logo = None\nlatex_appendices = []\n\n# -- Options for manual page output --------------------------------------------\n\nman_pages = []\nif 'manpages' in conf.system.files.data:\n for mp in conf.system.files.data.manpages:\n man_pages.append((mp.file, mp.name, mp.title, mp.authors, mp.section))\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = conf.project.title\nepub_author = u'MongoDB University'\nepub_publisher = u'MongoDB, Inc.'\nepub_copyright = copyright\nepub_theme = 'epub_mongodb'\nepub_tocdup = True\nepub_tocdepth = 3\nepub_language = language\nepub_scheme = 'url'\nepub_identifier = ''.join([conf.project.url, '/', conf.git.branches.current])\nepub_exclude_files = []\n\nepub_pre_files = []\nepub_post_files = []\n\n\n# put it into your conf.py\ndef setup(app):\n # disable versioning for speed\n from sphinx.builders.gettext import I18nBuilder\n I18nBuilder.versioning_method = 'none'\n\n def doctree_read(app, doctree):\n if not isinstance(app.builder, I18nBuilder):\n return\n from docutils import nodes\n from sphinx.versioning import add_uids\n list(add_uids(doctree, nodes.TextElement))\n app.connect('doctree-read', doctree_read)\n","sub_path":"conf-internal.py","file_name":"conf-internal.py","file_ext":"py","file_size_in_byte":9378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"112304626","text":"import sys\nimport subprocess\nimport platform\nif sys.version_info[0] == 2:\n import Tkinter as tk\nelse:\n import tkinter as tk\n\n\nif platform.system() == \"Linux\":\n praat_path = \"/usr/bin/praat\"\n script_path = \"\"\n list_path = \"./lists/\"\nelif platform.system() == \"Darwin\":\n praat_path = \"/Applications/Praat.app/Contents/MacOS/Praat\"\n script_path = \"\"\n list_path = \"./lists/\"\nelif platform.system() == \"Windows\":\n praat_path = \"C:\\\\Users\\\\User\\\\Desktop\\\\Praat6.exe\"\n script_path = \"D:\\\\Tim\\\\MMAT_slider\\\\\"\n list_path = \"D:\\\\Tim\\MMAT_slider\\\\lists\\\\\"\nelse:\n print(\"Incompatible operating system detected.\")\n sys.exit()\nslider_range = 100\nslider_start = slider_range / 2\nerb_shift_range = 4\n\n\nclass Experiment(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n container = tk.Frame(self)\n container.pack(expand=True)\n self.attributes(\"-fullscreen\", True)\n self.configure(background=\"darkgray\")\n self.bind(\"\", self.exitFullScreen)\n self.frames = {}\n for f in [StartPage, TrialPage]:\n frame = f(container, self)\n frame.config(bg=\"darkgray\")\n self.frames[f] = frame\n frame.grid(row=1, column=1)\n self.showFrame(StartPage)\n\n def exitFullScreen(self, event):\n self.attributes(\"-fullscreen\", False)\n\n def showFrame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n\n def quitExperiment():\n app.destroy()\n\n\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.grid_columnconfigure(0, minsize=800)\n # self.grid_rowconfigure(0, minsize=600)\n welcome_text = \"Welkom bij dit luister experiment. In dit experiment krijg je telkens 3 versies van hetzelfde woord te horen. De eerste versie die je hoort is de oorspronkelijke opname van het woord. De tweede en derde versie zijn gamanipuleerd.\\n\\nHet is jouw taak om de derde versie van het woord aan te passen zodat het verschil tussen versie A en versie B even groot is als het verschil tussen versie B en versie C. Dit doe je door de knop die op het scherm verschijnt de verschuiven. Elke keer dat je de knop verschuift krijg je de drie versies te horen. Je mag de knop zo vaak verschuiven als je wilt.\\n\\nAls je tevreden bent met je manipulatie mag je zelf door klikken naar het volgende woord.\"\n msg = tk.Message(self, text=welcome_text, font=(\"Arial\", 24), bg=\"darkgray\", fg=\"white\")\n msg.grid(row=0, column=0)\n button = tk.Button(self, text=\"Ok\", bg=\"darkgray\", command=lambda: controller.showFrame(TrialPage))\n button.grid(row=1, column=0)\n\n\nclass TrialPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.grid_rowconfigure(0, minsize=100)\n self.grid_rowconfigure(1, minsize=200)\n self.grid_rowconfigure(2, minsize=200)\n self.grid_columnconfigure(0, minsize=800)\n self.current_stim = 0\n self.num_adjustments = 0\n # Stimulus name\n # self.current_stim_var = tk.StringVar()\n self.current_stim_text = tk.Text(self, height=2, wrap=\"word\", font=(\"Arial\", 18), bg=\"darkgray\", fg=\"white\", highlightthickness=0)\n self.current_stim_text.grid(row=0, column=0)\n self.updateText()\n # Slider\n self.slider = tk.Scale(self, from_=0, to=slider_range, orient=\"horizontal\", showvalue=False, length=600, width=30, troughcolor=\"lightgray\")\n self.slider.set(slider_start)\n self.slider.bind(\"\", self.sendToPraat)\n self.slider.grid(row=1, column=0)\n # Next Button\n self.next_button = tk.Button(self, text=\"Volgende\", bg=\"darkgray\", command=self.nextTrial)\n self.next_button.grid(row=2, column=0)\n\n def sendToPraat(self, event):\n self.slider.unbind(\"\")\n self.slider.config(state=\"disabled\")\n self.next_button.config(state=\"disabled\")\n self.new_value = float(self.slider.get())\n self.min_erb = erb_shift_list[self.current_stim] - (erb_shift_range / 2)\n self.slider_prop = self.new_value / slider_range\n self.erb_shift = self.min_erb + (erb_shift_range * self.slider_prop)\n print(\"Slider pos: \" + str(self.new_value) + \"\\nERB shift: \" + str(self.erb_shift))\n subprocess.call([praat_path, \"--run\", script_path + \"playStimuli.praat\", stimulus_list[self.current_stim], \"c_b_a\", str(self.erb_shift)])\n self.num_adjustments += 1\n self.after(500, self.bindSlider)\n\n def bindSlider(self):\n self.slider.bind(\"\", self.sendToPraat)\n self.slider.config(state=\"normal\")\n self.next_button.config(state=\"normal\")\n\n def updateText(self):\n self.current_stim_text.config(state=\"normal\")\n self.current_stim_text.delete(1.0, \"end\")\n self.current_stim_text.insert(\"end\", \"Versleep de schuif zodat het verschil tussen de eerste en tweede versie van \" + stimulus_list[self.current_stim] + \" even groot is als het verschil tussen de tweede en derde versie.\")\n self.tag_end = \"1.\" + str(76 + len(stimulus_list[self.current_stim]))\n self.current_stim_text.tag_add(\"red\", \"1.76\", self.tag_end)\n self.current_stim_text.tag_configure(\"red\", foreground=\"red\")\n self.current_stim_text.tag_add(\"center\", 1.0, \"end\")\n self.current_stim_text.tag_configure(\"center\", justify='center')\n self.current_stim_text.config(state=\"disabled\")\n\n def writeInput(self):\n g.write(str(self.current_stim + 1) + \",\" + stimulus_list[self.current_stim] + \",\" + str(self.erb_shift) + \",\" + str(self.num_adjustments) + \"\\n\")\n\n def nextTrial(self, event=None):\n self.slider.set(slider_start)\n self.writeInput()\n self.num_adjustments = 0\n if self.current_stim >= (len(stimulus_list) - 1):\n Experiment.quitExperiment()\n else:\n self.current_stim += 1\n self.updateText()\n # self.current_stim_var.set(stimulus_list[self.current_stim])\n\n\ndef callbackPP():\n callbackPP.pp_num = int(getParticipantInfo.ent.get())\n infoLoop.window.destroy()\n\n\ndef getParticipantInfo():\n getParticipantInfo.lbl = tk.Label(infoLoop.window, text=\"Participant number:\")\n getParticipantInfo.ent = tk.Entry(infoLoop.window)\n getParticipantInfo.btn = tk.Button(infoLoop.window, text=\"Ok\", command=callbackPP)\n getParticipantInfo.lbl.grid(row=0)\n getParticipantInfo.ent.grid(row=0, column=1)\n getParticipantInfo.btn.grid(row=1, column=1, sticky=\"e\")\n\n\ndef infoLoop():\n infoLoop.window = tk.Tk()\n infoLoop.window.title(\"Enter Participant Info\")\n getParticipantInfo()\n infoLoop.window.mainloop()\n\n\ndef loadLists():\n if callbackPP.pp_num < 10:\n list_name = \"list0\" + str(callbackPP.pp_num) + \".csv\"\n else:\n list_name = \"list\" + str(callbackPP.pp_num) + \".csv\"\n f = open(list_path + list_name, \"r\")\n stim_list = []\n shift_list = []\n counter = 0\n for line in f:\n counter += 1\n if counter == 1:\n continue\n else:\n line_list = line[:-1].split(\",\")\n stim_list.append(line_list[0])\n shift_list.append(float(line_list[1]))\n f.close()\n return stim_list, shift_list, callbackPP.pp_num\n\n\ninfoLoop()\nstimulus_list, erb_shift_list, pp = loadLists()\ng = open(\"./logs/\" + str(pp) + \"_results.csv\", \"w\")\ng.write(\"trial,stimulus,erb_shift,num_adjustments\\n\")\n\napp = Experiment()\napp.mainloop()\ng.close()\n","sub_path":"slider/MMAT_slider_pilot1_mac.py","file_name":"MMAT_slider_pilot1_mac.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"93222660","text":"from django.contrib import admin\nfrom models import *\nfrom forms import *\n\n\nclass LikeInline(admin.TabularInline):\n model = Like\n extra = 0\n\n\nclass RatingInline(admin.TabularInline):\n model = Rating\n extra = 0\n\n\nclass JokeAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'created_at', 'approved', 'get_likes_count', 'get_rating_score')\n list_editable = ('approved',)\n list_filter = ('approved',)\n form = JokeAdminForm\n\n inlines = [\n LikeInline,\n RatingInline,\n ]\n\nadmin.site.register(Joke, JokeAdmin)\nadmin.site.register(Category)\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"603103682","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('exercise.views',\n # url(r'^JavaScript/', include('JavaScript.foo.urls')),\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^$','esercise_main',name=\"exercise\"),\n url(r'^01/$','ex01',name=\"ex01\"),\n url(r'^02/$','ex02',name=\"ex02\"),\n url(r'^37/$','ex37',name=\"ex37\"),\n url(r'^38/$','ex38',name=\"ex38\"),\n url(r'^42/$','ex42',name=\"ex42\"),\n url(r'^43/$','ex43',name=\"ex43\"),\n url(r'^44/$','ex44',name=\"ex44\"),\n url(r'^45/$','ex45',name=\"ex45\"),\n url(r'^47/$','ex47',name=\"ex47\"),\n url(r'^48/$','ex48',name=\"ex48\"),\n url(r'^49/$','ex49',name=\"ex49\"),\n url(r'^50/$','ex50',name=\"ex50\"),\n url(r'^51/$','ex51',name=\"ex51\"),\n url(r'^52/$','ex52',name=\"ex52\"),\n url(r'^53/$','ex53',name=\"ex53\"),\n url(r'^54/$','ex54',name=\"ex54\"),\n url(r'^55/$','ex55',name=\"ex55\"),\n url(r'^56/$','ex56',name=\"ex56\"),\n url(r'^57/$','ex57',name=\"ex57\"),\n url(r'^58/$','ex58',name=\"ex58\"),\n url(r'^59/$','ex59',name=\"ex59\"),\n url(r'^60/$','ex60',name=\"ex60\"),\n url(r'^61/$','ex61',name=\"ex61\"),\n)","sub_path":"liaoxuefeng/JavaScript/exercise/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"182710043","text":"def power(a,b,m):\n result = 1\n print(a,m,'a')\n a = a%m\n while b>0:\n if b%2==1:\n result = (result*a)%m\n a = (a*a) % m\n b = b//2\n\n return result%m\n\n\nwhile True:\n try:\n a = input()\n if a=='':\n pass\n else:\n b = int(input())\n m = int(input())\n a = int(a)\n print(power(a,b,m))\n\n except EOFError:\n break\n\n","sub_path":"Data Structure & Algorithm/Binary Exponentiation/UVa 374 - Big Mod.py","file_name":"UVa 374 - Big Mod.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"76203915","text":"#!/usr/bin/python3\n\"\"\" doctest unittest \"\"\"\nimport unittest\nimport pep8\nfrom models.city import City\nimport os\n\n\nclass TestBase(unittest.TestCase):\n \"\"\" test \"\"\"\n\n def test_pep8(self):\n \"\"\" test pep8 \"\"\"\n style = pep8.StyleGuide(quiet=True)\n file_city = \"models/city.py\"\n file_test_city = \"tests/test_models/test_city.py\"\n check = style.check_files([file_city, file_test_city])\n self.assertEqual(check.total_errors, 0,\n \"Found code style errors (and warning).\")\n\n @classmethod\n def setUpClass(cls):\n \"\"\" first set up\n check = style.check_files([file_city, file_test_city])\n \"\"\"\n cls.ins = City()\n\n @classmethod\n def teardown(cls):\n \"\"\" final statement \"\"\"\n del cls.ins\n try:\n os.remove(\"file.json\")\n except:\n pass\n\n def test_Userdoc(self):\n \"\"\" test base model documentation\n self.assertNotEqual(len(models.__doc__), 0)\n self.assertNotEqual(len(models.base_model.__doc__), 0)\n\n \"\"\"\n self.assertNotEqual(len(City.__doc__), 0)\n\n def test_BaseModelAttr(self):\n \"\"\" test basemodel attributes \"\"\"\n self.assertEqual(hasattr(self.ins, \"state_id\"), True)\n self.assertEqual(hasattr(self.ins, \"name\"), True)\n\n def test_isinstance(self):\n self.assertTrue(isinstance(self.ins, City))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_models/test_city.py","file_name":"test_city.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"192474446","text":"#!/usr/bin/env python3\n# Advent of Code 2016 - Day 21, Part Two\n\nimport sys\nimport re\n\n\ndef swap_position(string, x, y):\n string[x], string[y] = string[y], string[x]\n return string\n\n\ndef swap_letter(string, x, y):\n for i, c in enumerate(string):\n if c == x:\n string[i] = y\n elif c == y:\n string[i] = x\n return string\n\n\ndef rotate_right(string, n):\n n = n % len(string)\n return string[-n:] + string[:-n]\n \n\ndef rotate_left(string, n):\n return rotate_right(string, -n)\n\n\ndef rotate_position(string, x):\n i = string.index(x)\n n = i // 2 + (i % 2 == 1 or i == 0 and 1 or 5)\n return rotate_left(string, n)\n\ndef reverse(string, x, y):\n string[x:y+1] = string[x:y+1][::-1]\n return string\n\n\ndef move(string, x, y):\n c = string[x]\n del string[x]\n string.insert(y, c)\n return string\n\n\ndef getints(string):\n return map(int, re.findall(r'(\\d+)', string))\n\n\ndef getchars(string):\n z = re.findall(r'(?:^|\\s)([a-z])(?:\\s|$)', string)\n return z\n\n\ndef main(argv):\n if len(argv) < 2:\n print(\"Usage: {} puzzle.txt\".format(argv[0]))\n return 1\n with open(argv[1]) as f:\n string = list('fbgdceah')\n lines = reversed(f.readlines())\n for line in lines:\n head, tail = line.split(maxsplit=1)\n if head == 'swap':\n head, tail = tail.split(maxsplit=1)\n if head == 'position':\n x, y = getints(tail)\n string = swap_position(string, x, y)\n elif head == 'letter':\n x, y = getchars(tail)\n string = swap_letter(string, x, y)\n elif head == 'rotate':\n head, tail = tail.split(maxsplit=1)\n if head == 'left':\n n = list(getints(tail))[0]\n string = rotate_right(string, n)\n elif head == 'right':\n n = list(getints(tail))[0]\n string = rotate_left(string, n)\n else:\n x = getchars(tail)[0]\n string = rotate_position(string, x)\n elif head == 'reverse':\n x, y = getints(tail)\n string = reverse(string, x, y)\n elif head == 'move':\n x, y = getints(tail)\n string = move(string, y, x)\n print(''.join(string))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"2016/day21/day21-pt2.py","file_name":"day21-pt2.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"612905994","text":"import numpy as np\nfrom mnist import MNIST\nimport os\n\ndef normalize(x):\n return x.astype(np.float)/255.0\n\ndef load_data(test_image_path, test_labels_path):\n mndata = MNIST('data/', return_type=\"numpy\")\n mndata.gz = True\n train_image, train_labels = mndata.load_training()\n train_image = normalize(train_image)\n randomize = np.arange(len(train_image))\n np.random.shuffle(randomize)\n train_image = train_image[randomize]\n train_labels = train_labels[randomize]\n valid_image = train_image[:10000]\n valid_labels = train_labels[:10000]\n train_image = train_image[10000-1:-1]\n train_labels = train_labels[10000-1:-1]\n\n\n test_image, test_labels = mndata.load(os.path.join('data/', test_image_path), os.path.join('data/', test_labels_path))\n test_image = normalize(np.array(test_image))\n test_labels = np.array(test_labels)\n\n return train_image, train_labels, valid_image, valid_labels, test_image, test_labels\n","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"294255274","text":"import sys\n\ndef main(argv):\n # 入力を処理可能の形に変更する\n with open(argv[0], \"r\") as f:\n data = f.read()\n lines = data.split('\\n')\n h, w = map(int, lines[0].split())\n c = []\n sx = 0\n sy = 0\n gx = 0\n gy = 0\n before_step = [2, 2]\n for i in range(1, h + 1):\n temp_c = []\n for j in range(w):\n temp_c.append(lines[i][j])\n if lines[i][j] == 's':\n sx = i - 1\n sy = j\n elif lines[i][j] == 'g':\n gx = i - 1\n gy = j\n c.append(temp_c)\n ans = clear_maze(sx, sy, gx, gy, c, before_step)\n print(ans)\n\n\n# 迷路を探索する関数\ndef clear_maze(sx, sy, gx, gy, maze, before_step):\n INF = 100000000\n\n field_x_length = len(maze)\n field_y_length = len(maze[0])\n distance = [[INF for i in range(field_x_length)] for j in range(field_y_length)]\n\n def bfs():\n queue = []\n\n queue.insert(0, (sx, sy))\n\n distance[sx][sy] = 0\n\n while len(queue):\n x, y = queue.pop()\n\n if x == gx and y == gy:\n break\n for i in range(0, 4):\n nx, ny = x + [1, 0, -1, 0][i], y + [0, 1, 0, -1][i]\n if (0 <= nx and nx < field_x_length and 0 <= ny and ny < field_y_length and maze[nx][ny] != '#' and distance[nx][ny] == INF):\n queue.insert(0, (nx, ny))\n distance[nx][ny] = distance[x][y] + 1\n if before_step != [nx - x, ny - y]:\n distance[nx][ny] -= 1\n before_step[0], before_step[1] = nx - x, ny - y\n\n return distance[gx][gy]\n\n return bfs()\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"python/test/li/2019in/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"184840691","text":"\"\"\"\nExercise 2\n\nRead the documentation of the dictionary method setdefault and use it to write\na more concise version of invert_dict. Solution:\nhttp://thinkpython2.com/code/invert_dict.py.\n\"\"\"\n\ndef invert_dict(d):\n res = {}\n for key in d:\n res.setdefault(d[key], key)\n\n return res\n\ntest = { \"cat\": 'one', 'dog': 'two', 'fish': 'three'}\n\nprint(invert_dict(test))\n","sub_path":"ch11/ex2-invert_dict.py","file_name":"ex2-invert_dict.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"612692534","text":"import apptool\nimport unittest\n\nfrom apptool import args, environ, package, perms\nfrom unittest.mock import patch\n\ntest_adb_path = \"test_adb_path\"\ntest_package = \"test_package\"\n\nclass ArgParserTestCase(unittest.TestCase):\n @patch(\"apptool.args.argparse.ArgumentParser\")\n def test_parse(self, mock_argument_parser):\n parser = apptool.args.ArgParser(version=\"1.0\")\n parser.parse()\n \n parser.parser.parse_args.assert_called_once()\n\nclass EnvVarsTestCase(unittest.TestCase):\n def test_str(self):\n env = apptool.environ.EnvVars(test_adb_path, test_package)\n expected = \"const val EnvVar ={\\n adb_path: %s, \\n app_package: %s\\n}\" % \\\n (test_adb_path, test_package)\n self.assertEqual(expected, str(env))\n def test_is_valid_fails(self):\n env = apptool.environ.EnvVars(None, test_package)\n self.assertFalse(env.is_valid())\n def test_is_valid_succeeds(self):\n env = apptool.environ.EnvVars(test_adb_path, test_package)\n self.assertTrue(env.is_valid())\n\nclass PackageManagerTestCase(unittest.TestCase):\n @patch(\"apptool.package.subprocess.run\")\n def test_remove(self, mock_run):\n env = apptool.environ.EnvVars(test_adb_path, test_package)\n package = apptool.package.PackageManager(env)\n package.remove()\n\n mock_run.assert_called_with([test_adb_path, \"shell\", \"pm\", \"uninstall\", test_package])\n\nclass PermsTestCase(unittest.TestCase):\n @patch(\"apptool.perms.subprocess.run\")\n def test_grant(self, mock_run):\n env = apptool.environ.EnvVars(test_adb_path, test_package)\n perms = apptool.perms.Perms(env)\n perms.grant(\"camera\")\n\n mock_run.assert_called_with([test_adb_path, \"shell\", \"pm\", \"grant\", test_package, apptool.perms.VALID_PERMS[\"camera\"]])\n\nclass StorageTestCase(unittest.TestCase):\n @patch(\"apptool.storage.subprocess.run\")\n def test_clear(self, mock_run):\n env = apptool.environ.EnvVars(test_adb_path, test_package)\n storage = apptool.storage.Storage(env)\n storage.clear()\n\n mock_run.assert_called_with([test_adb_path, \"shell\", \"pm\", \"clear\", test_package])","sub_path":"tests/test_apptool.py","file_name":"test_apptool.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446433986","text":"from tensorflow.keras.datasets import cifar100\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Conv2D,LSTM\r\nfrom tensorflow.keras.layers import Flatten, MaxPooling2D\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n(x_train, y_train), (x_test, y_test) = cifar100.load_data()\r\n\r\nprint(x_train.shape, x_test.shape)#(50000, 32*32* 3) (10000, 32, 32, 3)\r\nprint(y_train.shape, y_test.shape)#(50000, 1) (10000, 1)\r\n\r\n# plt.imshow(x_train[0])\r\n# plt.show()\r\n\r\n#데이터 전처리 1.OneHotEncoding\r\ny_train = to_categorical(y_train)\r\ny_test = to_categorical(y_test)\r\n\r\n\r\n#스케일링 해준것\r\n\r\nx_train = x_train.reshape(50000, 32*32*3).astype('float32')/255. #형변환\r\nx_test = x_test.reshape(10000, 32*32*3).astype('float32')/255.\r\n\r\n\r\nx_predict = x_test[0:10]\r\ny_answer = y_test[0:10]\r\n \r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(100, input_shape=(32*32*3,)))\r\nmodel.add(Dense(100, activation='relu'))\r\nmodel.add(Dense(100, activation='relu'))\r\nmodel.add(Dense(100, activation='relu'))\r\nmodel.add(Dense(100, activation='relu'))\r\nmodel.add(Dense(100, activation='softmax'))\r\n\r\n\r\nmodel.summary()\r\n\r\n#. 컴파일 훈련\r\nfrom tensorflow.keras.callbacks import EarlyStopping\r\nearly_stopping =EarlyStopping(monitor='loss', patience=10, mode='auto')\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\nhistory = model.fit(x_train, y_train, epochs=1, batch_size=32, verbose=1, callbacks=[early_stopping])\r\n\r\nloss, accuracy = model.evaluate(x_test, y_test, batch_size=32)\r\nprint(\"loss : \", loss)\r\nprint(\"acc : \", accuracy)\r\n\r\ny_predict = model.predict([x_predict])\r\ny_predict = np.argmax(y_predict, axis=1)\r\ny_answer = np.argmax(y_answer, axis=1)\r\n\r\nprint(\"예측값:\" , y_predict)\r\nprint(\"실제값: \", y_answer)\r\n\r\n'''\r\nss: 0.4447 - accuracy: 0.9008\r\nloss : 0.44470536708831787\r\nacc : 0.9007999897003174\r\n예측값: [9 0 0 3 0 2 7 2 5 5]\r\n실제값: [9 0 0 3 0 2 7 2 5 5]\r\n\r\n'''","sub_path":"keras41_cifar_100_3_dnn.py","file_name":"keras41_cifar_100_3_dnn.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"540640838","text":"#! /usr/bin/env python3\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\nimport bql\nimport bqviz as bqv\nbq = bql.Service()\n\n# Set up some convenience settings\npd.set_option('display.max_rows', 200)\npd.set_option('display.max_seq_items', 200)\n\n# Read the main database into d\nkwargs = {'io': '325Universe.xlsx',\n 'sheet_name': 'Sheet1',\n 'na_filter': True}\n\nd = pd.read_excel(**kwargs)\nd = d.set_index('ticker')\n\nreasonable_eps = (d.ep_total_to_market_cap >= 1.5) & (d.ep_total_to_market_cap <= 15)\nhigh_roic_potential = d.roic_high_5 > d.roic_high_5.quantile(q = 0.75)\nhigh_roic_today = d.roic_ltm > d.roic_ltm.quantile(q = .75)\ntarget_industries = (~d.sector.isin(['Retailing', 'Pharmaceuticals & Biotechnology', 'Travel & Leisure'])) & (~d.business.isin(['Retailing', 'Pharmaceuticals & Biotechnology', 'Travel & Leisure']))\nep_display = ['ep_market_cap', 'ep_value_from_fcfe', 'ep_value_from_ebitda', 'ep_value_from_roic', 'ep_total_est_value', 'ep_total_to_market_cap']\ndisplay = ['name', 'sector', 'business', 'last_work','revenue_ltm', 'em_ltm', 'ev_to_ebitda_ltm', 'net_debt_to_ebitda_ltm', 'roic_ltm','roic_high_5','ep_total_to_market_cap']\npd.set_option('max_colwidth', 25)\npd.set_option('precision', 2)\npd.set_option('display.float_format', '{:.2f}'.format)\n","sub_path":"bplayf.py","file_name":"bplayf.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"405103553","text":"# -*- coding: utf-8 -*-\nimport os\nimport argparse\n\nfrom crate_vbb_importer.commands.async_insert import Command as AsyncInsertCommand\nfrom crate_vbb_importer.commands.clean import Command as CleanCommand\nfrom crate_vbb_importer.commands.sync_insert import Command as SyncInsertCommand\n\nfrom tests.utils import test_pg_connection, test_crate_cursor\nfrom tests.expected_data import EXPECTED_ASYNC_INSERT, EXPECTED_CLEAN, EXPECTED_SYNC_INSERT\n\n\ndef test_clean():\n parser = argparse.ArgumentParser()\n CleanCommand.prepare_argparser(parser)\n raw_args = ('--host', 'test_host', '-d', os.path.join('tests', 'testdata'))\n args, remaining_args = parser.parse_known_args(raw_args)\n args.command_name = 'clean'\n command = CleanCommand(args, remaining_args, parser)\n command.run()\n assert test_pg_connection.queries == EXPECTED_CLEAN\n test_pg_connection.queries = []\n\n\ndef test_async_insert():\n parser = argparse.ArgumentParser()\n AsyncInsertCommand.prepare_argparser(parser)\n raw_args = ('--host', 'test_host', '-d', os.path.join('tests', 'testdata'), '-b', '2')\n args, remaining_args = parser.parse_known_args(raw_args)\n args.command_name = 'async_insert'\n command = AsyncInsertCommand(args, remaining_args, parser)\n command.run()\n assert test_pg_connection.queries == EXPECTED_ASYNC_INSERT\n test_pg_connection.queries = []\n\n\ndef test_sync_insert():\n parser = argparse.ArgumentParser()\n SyncInsertCommand.prepare_argparser(parser)\n raw_args = ('--host', 'test_host', '-d', os.path.join('tests', 'testdata'), '-b', '2')\n args, remaining_args = parser.parse_known_args(raw_args)\n args.command_name = 'sync_insert'\n command = SyncInsertCommand(args, remaining_args, parser)\n command.run()\n assert test_crate_cursor.queries == EXPECTED_SYNC_INSERT\n test_crate_cursor.queries = []\n","sub_path":"tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"576346828","text":"from utility import make_cmd, make_request, Config\nfrom json import loads\nfrom time import sleep\nfrom meross_iot.api import MerossHttpClient\nfrom logging import exception\n\n\ndef get_nvidia_info():\n exclude_list = [\"\", \"|\", \"/\"]\n gpus = []\n response = make_cmd(\"nvidia-smi -L\")\n if response['cmd_err'] == \"\":\n rows = response[\"cmd_out\"].split(\"\\n\")\n for r in rows[:-1]:\n gpu_info = {\n 'uuid': r.split(\"GPU-\")[1].split(\")\")[0].replace(\"-\", \"\")\n }\n gpus.append(gpu_info)\n response = make_cmd(\"nvidia-smi\")\n if response['cmd_err'] == \"\":\n rows = response[\"cmd_out\"].split(\"\\n\")\n i = 0\n j = 0\n for r in rows:\n if r.__contains__(\"%\"):\n gpus[j]['indice'] = i\n j += 1\n i += 1\n for gpu in gpus:\n value = []\n for r in rows[gpu['indice']].split(\" \"):\n if r not in exclude_list:\n value.append(r)\n gpu['fan'] = value[0]\n gpu['temp'] = value[1]\n gpu['power'] = value[3]\n gpu['mem_used'] = value[5].replace(\"MiB\", \"\") + \"MB\"\n gpu['mem_free'] = str(int(value[6].replace(\"MiB\", \"\")) - int(value[5].replace(\"MiB\", \"\"))) + \"MB\"\n gpu['load'] = value[7]\n return gpus\n\n\ndef calculate_uptime(total_second):\n day = total_second // 86400\n remaning = total_second - (day * 86400)\n hour = remaning // 3600\n remaning = remaning - (hour * 3600)\n minute = remaning // 60\n sec = remaning - (minute * 60)\n uptime = ''\n if day > 0:\n uptime = str(day) + \" days \"\n if hour > 0:\n uptime = uptime + str(hour) + \" hours \"\n if minute > 0:\n uptime = uptime + str(minute) + \" minutes \"\n if sec > 0:\n uptime = uptime + str(sec) + \" seconds\"\n return uptime\n\n\ndef get_trex_info():\n response = make_request(\"http://127.0.0.1:4067/summary\")\n trex_info = {}\n if response['state'] is True:\n trex = loads(response['response'])\n trex_info['uptime'] = calculate_uptime(trex['uptime'])\n trex_info['pool_url'] = trex['active_pool']['url']\n trex_info['wallet_id'] = trex['active_pool']['user']\n trex_info['total_reported_hashrate'] = str(round(trex['hashrate'] / 1000000, 1)) + \" MH/s\"\n trex_info['total_share'] = str(trex['accepted_count'])\n trex_info['share_min'] = str(trex['sharerate'])\n trex_info['share_min_avg'] = str(trex['sharerate_average'])\n trex_info['cur_trex_profile'] = find_trex_profile(trex_info['pool_url'], trex_info['wallet_id'])\n trex_info['gpus'] = []\n i = 0\n for gpu in trex['gpus']:\n gpu_info = {\n 'intensity': str(gpu['intensity']),\n 'reported_hashrate': str(round(gpu['hashrate'] / 1000000, 1)) + \" MH/s\",\n 'gpu_name': gpu['vendor'] + \" \" + gpu['name'],\n 'gpu_efficency': 'N/A',\n 'accepted_count': str(trex['stat_by_gpu'][i]['accepted_count']),\n 'uuid': gpu['uuid']\n }\n if 'efficiency' in gpu:\n gpu_info['gpu_efficency'] = gpu['efficiency']\n trex_info['gpus'].append(gpu_info)\n i += 1\n return trex_info\n\n\ndef get_balance_info(crypto, wallet_ids):\n res_conv = make_request(\"https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest?CMC_PRO_API_KEY=e06c6aea-b4a6-422d-9f76-6ac205a5eae1&convert=EUR&slug=\" + crypto)\n balance = {\n 'walletts': {},\n 'conv_eur': list(loads(res_conv['response'])['data'].values())[0]['quote']['EUR']['price'],\n 'tot_eur_value': 0.00000000,\n 'tot_crypto_value': 0.00000000\n }\n if res_conv['state'] is True:\n for key, value in wallet_ids.items():\n res = make_request(Config.settings['cryptos'][crypto]['api_balance'] + value)\n if res['state'] is True:\n crypto_value = eval(Config.settings['cryptos'][crypto]['function_balance'])\n balance['walletts'][key] = {\n 'id': value,\n 'eur_value': str(round(balance['conv_eur'] * crypto_value, 2)) + \" €\",\n 'crypto_value': str(round(crypto_value, 6)) + \" \" + Config.settings['cryptos'][crypto]['crypto']\n }\n balance['tot_eur_value'] = balance['tot_eur_value'] + balance['conv_eur'] * crypto_value\n balance['tot_crypto_value'] = balance['tot_crypto_value'] + crypto_value\n balance['tot_eur_value'] = round(balance['tot_eur_value'], 2)\n balance['tot_crypto_value'] = str(round(balance['tot_crypto_value'], 6)) + \" \" + Config.settings['cryptos'][crypto]['crypto']\n balance['conv_eur'] = str(round(balance['conv_eur'], 6)) + \" €\"\n return balance\n\n\ndef get_miner_info(cur_trex_profile, wallet_id):\n if Config.settings['trex']['profiles'][cur_trex_profile]['api_domain'].find(\"2miners\") != -1:\n to_ret = get_2miners_info(cur_trex_profile, wallet_id)\n else:\n to_ret = get_ethermine_info(cur_trex_profile, wallet_id)\n return to_ret\n\n\ndef calculate_avg_hasrate(current_time, stats, time_key, hashrate_key):\n old_time = current_time - 21600\n sum_hash = 0\n i = 0\n for stat in stats:\n if stat[time_key] >= old_time:\n sum_hash = sum_hash + stat[hashrate_key]\n i += 1\n return str(round(sum_hash / (i * 1000000), 1))\n\n\ndef get_ethermine_info(cur_trex_profile, wallet_id):\n ethermine_info = {}\n trex_profile = Config.settings['trex']['profiles'][cur_trex_profile]\n crypto = Config.settings['cryptos'][trex_profile['crypto']]\n response_dash = make_request(trex_profile['api_domain'] + \"/miner/\" + wallet_id + \"/dashboard\")\n response_pay = make_request(trex_profile['api_domain'] + \"/miner/\" + wallet_id + \"/dashboard/payouts\")\n if response_dash['state'] is True and response_pay['state'] is True:\n dashboard = loads(response_dash['response'])\n payouts = loads(response_pay['response'])\n if 'unconfirmed' in dashboard['data']['currentStatistics']:\n ethermine_info['immature_balance'] = str(round(dashboard['data']['currentStatistics']['unconfirmed'] / pow(10, crypto['pow_divisor']), 5)) + \" \" + crypto['crypto']\n ethermine_info['unpaid_balance'] = str(round(dashboard['data']['currentStatistics']['unpaid'] / pow(10, crypto['pow_divisor']), 5)) + \" \" + crypto['crypto']\n ethermine_info['estimated_earning'] = str(round(payouts['data']['estimates']['coinsPerMin'] * 1440, 5)) + \" \" + crypto['crypto']\n ethermine_info['current_hashrate'] = str(round(dashboard['data']['currentStatistics']['currentHashrate'] / 1000000, 1)) + \" MH/s\"\n ethermine_info['average_hashrate'] = calculate_avg_hasrate(dashboard['data']['currentStatistics']['time'], dashboard['data']['statistics'], 'time', 'currentHashrate') + \" MH/s\"\n ethermine_info['active_worker'] = str(dashboard['data']['currentStatistics']['activeWorkers'])\n ethermine_info['valid_shares'] = str(dashboard['data']['currentStatistics']['validShares'])\n ethermine_info['stale_shares'] = str(dashboard['data']['currentStatistics']['staleShares'])\n ethermine_info['invalid_shares'] = str(dashboard['data']['currentStatistics']['invalidShares'])\n return ethermine_info\n\n\ndef get_2miners_info(cur_trex_profile, wallet_id):\n two_miners_info = {}\n trex_profile = Config.settings['trex']['profiles'][cur_trex_profile]\n crypto = Config.settings['cryptos'][trex_profile['crypto']]\n response_dash = make_request(trex_profile['api_domain'] + \"/\" + wallet_id)\n if response_dash['state'] is True:\n dashboard = loads(response_dash['response'])\n if 'immature' in dashboard['stats']:\n two_miners_info['immature_balance'] = str(round(dashboard['stats']['immature'] / pow(10, crypto['pow_divisor']), 5)) + \" \" + crypto['crypto']\n two_miners_info['unpaid_balance'] = str(round(dashboard['stats']['balance'] / pow(10, crypto['pow_divisor']), 5)) + \" \" + crypto['crypto']\n two_miners_info['estimated_earning'] = str(round((dashboard['sumrewards'][0]['reward'] / pow(10, crypto['pow_divisor'])) * 24, 5)) + \" \" + crypto['crypto']\n two_miners_info['current_hashrate'] = str(round(dashboard['currentHashrate'] / 1000000, 1)) + \" MH/s\"\n two_miners_info['average_hashrate'] = calculate_avg_hasrate(dashboard['updatedAt'] // 1000, dashboard['minerCharts'], 'x', 'minerHash') + \" MH/s\"\n two_miners_info['active_worker'] = str(dashboard['workersOnline'])\n return two_miners_info\n\n\ndef get_meross_info():\n device_info = {\n 'state': False\n }\n try:\n http_handler = MerossHttpClient(email=Config.settings['meross']['email'], password=Config.settings['meross']['password'])\n devices = http_handler.list_devices()\n i = 0\n for dev in devices:\n if dev['devName'] == Config.settings['meross']['device_name'] and dev['onlineStatus'] == 1:\n devices = http_handler.list_supported_devices()\n device_info['power'] = str(round(devices[i].get_electricity()['electricity']['power'] / 1000, 2)) + \" W\"\n device_info['state'] = True\n break\n i += 1\n except Exception as e:\n exception(e)\n return device_info\n\n\ndef be_stop_miner():\n response = make_request(\"http://127.0.0.1:4067/control?command=shutdown\")\n if response['state'] is True:\n if loads(response['response'])['success'] == 1:\n ret_str = \"SPENTO MINER\"\n else:\n ret_str = \"ERRORE DURANTE L'ARRESTO DEL MINER\"\n else:\n ret_str = response['response']\n return ret_str\n\n\ndef find_trex_profile(pool_url, wallet_id):\n key = \"\"\n for key, value in Config.settings['trex']['profiles'].items():\n if value['pool_url'] == pool_url and value['wallet'] == wallet_id:\n break\n return key\n\n\ndef be_set_trex_profile(profile):\n try:\n f = open('trex/config_temp.json', 'r')\n data_file = f.read()\n f.close()\n trex_profile = Config.settings['trex']['profiles'][profile]\n zero_string = \"\"\n dev_string = \"\"\n for i in range(Config.settings['trex']['gpu_number']):\n zero_string = zero_string + \"0, \"\n dev_string = dev_string + str(i) + \", \"\n zero_string = zero_string[:-2]\n dev_string = dev_string[:-2]\n crypto = Config.settings['cryptos'][trex_profile['crypto']]\n data_file = data_file.replace(\"BUILD_MODE\", zero_string)\n data_file = data_file.replace(\"DEVICES_ID\", dev_string)\n data_file = data_file.replace(\"KERNEL_LIST\", zero_string)\n data_file = data_file.replace(\"LOW_LOAD_LIST\", zero_string)\n data_file = data_file.replace(\"ALGORITMO\", crypto['algo'])\n data_file = data_file.replace(\"POOL_URL\", trex_profile['pool_url'])\n data_file = data_file.replace(\"WALLET_ID\", trex_profile['wallet'])\n data_file = data_file.replace(\"INTENSITA\", str(trex_profile['intensity'])[1:-1])\n data_file = data_file.replace(\"WORKER_NAME\", Config.settings['trex']['worker_name'])\n f = open('trex/config.json', 'w')\n f.write(data_file)\n f.close()\n ret_str = \"SETTATO PROFILO: %s\" % profile\n except Exception as e:\n exception(e)\n ret_str = \"ERRORE: \" + str(e)\n return ret_str\n\n\ndef be_set_gpu_speed_fan(name, speed):\n try:\n file_path = Config.settings['afterburner']['path'] + 'Profiles\\\\' + Config.settings['afterburner']['gpus'][name]['config_file']\n f = open(file_path, 'r')\n data_file = f.read()\n f.close()\n actual_speed = data_file.split(\"[Profile1]\")[1].split(\"FanSpeed=\")[1].split(\"\\n\")[0]\n data_file = data_file.replace(\"FanSpeed=\" + actual_speed, \"FanSpeed=\" + speed)\n f = open(file_path, 'w')\n f.write(data_file)\n f.close()\n make_cmd(\"start \\\"\\\" \\\"\" + Config.settings['afterburner']['path'] + \"MSIAfterburner.exe\\\" -Profile1\", sys=True)\n sleep(5)\n response = make_cmd(\"taskkill /F /IM MSIAfterburner.exe /T\")\n if response['cmd_err'] == \"\" and response[\"cmd_out\"].find(\"terminato\") > 0:\n ret_str = \"VELOCITA DELLA VENTOLA MODIFICATA\"\n else:\n ret_str = \"ERRORE NELLA MODIFICA DELLA VELOCITA DELLA VENTOLA\"\n except Exception as e:\n exception(e)\n ret_str = \"ERRORE: \" + str(e)\n return ret_str\n\n\ndef get_program_status(program, name):\n response = make_cmd(\"tasklist | find \\\"\" + program + \"\\\"\")\n if response['cmd_err'] == \"\":\n if response['cmd_out'] == \"\":\n ret_str = \"SERVIZIO \" + name + \" SPENTO\"\n else:\n ret_str = \"SERVIZIO \" + name + \" ACCESO\"\n else:\n ret_str = \"ERRORE: \" + response['cmd_err']\n return ret_str\n\n\ndef be_shutdown_system():\n response = make_cmd(\"shutdown /s /t 3\")\n if response['cmd_err'] == \"\":\n ret_str = \"SERVER SPENTO\"\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n return ret_str\n\n\ndef be_get_public_ip():\n response = make_cmd(\"nslookup myip.opendns.com resolver1.opendns.com\")\n if response['cmd_err'] == \"\":\n ret_str = response[\"cmd_out\"].split(\"Address:\")[2].strip()\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n return ret_str\n\n\ndef be_get_file_ovpn():\n try:\n f = open('ovpn/client_temp.ovpn', 'r')\n data_file = f.read()\n f.close()\n response = be_get_public_ip()\n if response.find(\"ERRORE\") == -1:\n data_file = data_file.replace(\"IPADDRESS\", response)\n f = open('ovpn/client.ovpn', 'w')\n f.write(data_file)\n f.close()\n response = \"OK\"\n except Exception as e:\n exception(e)\n response = \"ERRORE: \" + str(e)\n return response\n\n\ndef be_stop_server_vpn():\n response = make_cmd(\"openvpn-gui --command disconnect server.ovpn\")\n if response['cmd_err'] == \"\":\n response = make_cmd(\"openvpn-gui --command exit\")\n if response['cmd_err'] == \"\":\n ret_str = \"SPENTO SERVER VPN\"\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n return ret_str\n\n\ndef be_start_access_point():\n response = make_cmd(\"netsh wlan start hostednetwork\")\n if response['cmd_err'] == \"\":\n response = make_cmd(\"netsh interface ip set address \\\"\" + Config.settings['access_point']['interface_name'] + \"\\\" static \" + Config.settings['access_point']['ip'] + \" 255.255.255.0\")\n if response['cmd_err'] == \"\":\n f = open(\"dhcp/dhcpsrv_temp.ini\", \"r\")\n data_file = f.read()\n f.close()\n data_file = data_file.replace(\"SERVER_IP\", Config.settings['access_point']['ip'])\n f = open('dhcp/dhcpsrv.ini', 'w')\n f.write(data_file)\n f.close()\n make_cmd(\"\\\"dhcp/dhcpsrv.exe\\\" -configfirewall\")\n make_cmd(\"start /d dhcp dhcpsrv.exe -runapp\", sys=True)\n ret_str = \"ACCESS POINT AVVIATO\"\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n return ret_str\n\n\ndef be_stop_access_point():\n response = make_cmd(\"netsh wlan stop hostednetwork\")\n if response['cmd_err'] == \"\":\n response = make_cmd(\"taskkill /F /IM dhcpsrv.exe\")\n if response['cmd_err'] == \"\":\n ret_str = \"ACCESS POINT SPENTO\"\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n else:\n ret_str = \"ERRORE: \" + response[\"cmd_err\"]\n return ret_str\n\n\ndef get_mac_and_ip(client_number, cmd_out_split):\n to_ret = {}\n clients = {}\n if client_number > 0:\n response_arp = make_cmd(\"arp -an \" + Config.settings['access_point']['ip'])\n if response_arp['cmd_err'] == \"\":\n response_arp_split = response_arp['cmd_out'].split(\"\\n\")\n for i in range(client_number):\n mac_adrress = cmd_out_split[i].split(\"Autenticato\")[0].strip()\n mac_address_to_find = mac_adrress.replace(\":\", \"-\")\n found = False\n for line in response_arp_split:\n if line.find(mac_address_to_find) != -1:\n clients[mac_adrress] = line.split(mac_address_to_find)[0].strip()\n found = True\n if found is False:\n clients[mac_adrress] = 'undefined'\n to_ret['state'] = True\n to_ret['status'] = clients\n else:\n to_ret['state'] = False\n to_ret['status'] = \"ERRORE: \" + response_arp[\"cmd_err\"]\n else:\n to_ret['state'] = False\n to_ret['status'] = \"ERRORE: numero client minore o uguale a zero\"\n return to_ret\n\n\ndef be_get_access_point_status():\n response = make_cmd(\"netsh wlan show hostednetwork\")\n to_ret = {\n 'state': True\n }\n if response['cmd_err'] == \"\":\n cmd_out_split = response['cmd_out'].split(\"\\n\")\n to_ret['name'] = cmd_out_split[4].split(\"\\\"\")[1].split(\"\\\"\")[0]\n to_ret['status'] = cmd_out_split[11].split(\":\")[1].strip()\n if to_ret['status'] == 'Avviato':\n to_ret['client'] = cmd_out_split[15].split(\":\")[1].strip()\n mac_ip = get_mac_and_ip(int(to_ret['client']), cmd_out_split[16:])\n if mac_ip['state'] is True:\n to_ret['clients'] = mac_ip['status']\n else:\n to_ret = mac_ip\n if to_ret['state'] is True:\n response = make_cmd(\"netsh wlan show hostednetwork setting=security\")\n if response['cmd_err'] == \"\":\n to_ret['password'] = response['cmd_out'].split(\"\\n\")[6].split(\":\")[1].strip()\n to_ret['state'] = True\n else:\n to_ret['state'] = False\n to_ret['status'] = \"ERRORE: \" + response[\"cmd_err\"]\n else:\n to_ret['state'] = False\n to_ret['status'] = \"ERRORE: \" + response[\"cmd_err\"]\n return to_ret\n","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":18101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"83174223","text":"import numpy as np\nfrom scipy.spatial import distance\nimport math\nimport random\n\ndef kCCost(data, centers, r):\n\t#Calculates minimum distances\n\tdist = distance.cdist(data, np.array(centers))\n\tdist = np.amin(dist, axis = 1)\n\t#print(dist[0:100])\n\t#Counting how many > 2r\n\tr2 = 2*r\n\n\t#count = (dist > r2).sum()\n\tcount = np.count_nonzero(dist > r2)\n\tprint(count)\n\treturn count\n\t\n\t\ndef kCPrecRecall(sd, wins):\n\thit = np.zeros(sd.k)\n\ttp = 0\n\tfp = 0\n\tfn = 0\n\tcenters = sd.data[:sd.k]\n\tr2 = 2*sd.s\n\t\n\tdists = distance.cdist(wins, centers)\n\t\n\tfor i in range(len(dists)):\n\t\tard = dists[i]\n\t\th = False\n\t\tfor j in range(len(ard)):\n\t\t\tif(ard[j] <= r2):\n\t\t\t\tif(hit[j] == 0):\n\t\t\t\t\thit[j] = 1\n\t\t\t\t\ttp += 1\n\t\t\t\t\th = True\n\t\tif(not h):\n\t\t\tfp += 1\n\t\n\tfn = sd.k - np.sum(hit)\n\n\tprec = tp/(sd.k)\n\trecall = tp/(sd.k)\n\n\tprint(prec, recall)\n\n\treturn prec, recall\n\t\t\t\n","sub_path":"lib/kcenterAux.py","file_name":"kcenterAux.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"164616539","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport matplotlib.dates as mdates\n\ndef loadSampleStockData():\n with open('stock_data.pickle', 'rb') as pickle_in:\n return pickle.load(pickle_in)\n\ndef bytespdate2num(fmt, encoding='utf-8'):\n strconverter = mdates.strpdate2num(fmt)\n def bytesconverter(b):\n s = b.decode(encoding)\n return strconverter(s)\n return bytesconverter\n\ndef graph_data(stock_data):\n # %Y = full year. 2015\n # %y = partial year 15\n # %m = number month\n # %d = number day\n # %H = hours\n # %M = minutes\n # %S = seconds\n # 12-06-2014\n # %m-%d-%Y\n date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(stock_data, delimiter=',', unpack=True, converters={0: bytespdate2num('%Y-%m-%d')})\n\n fig = plt.figure()\n ax1 = plt.subplot2grid((1,1), (0,0))\n ax1.plot_date(date, closep, '-', label='Price')\n for label in ax1.xaxis.get_ticklabels():\n label.set_rotation(45)\n ax1.grid(True)\n\n plt.xlabel('Date')\n plt.ylabel('Price')\n plt.title('Interesting Graph\\nCheck it out')\n plt.legend()\n plt.subplots_adjust(left=0.09, bottom=0.20, right=0.94, top=0.90, wspace=0.2, hspace=0)\n plt.show()\n\ngraph_data(loadSampleStockData())","sub_path":"10_BasicCustomization.py","file_name":"10_BasicCustomization.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"512964479","text":"\"\"\"djangolite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom .views import currentf,fcastedit,addroom,fcastall,testodbc\nfrom .email import sendSimpleEmail\n\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('fcast',currentf),\n # path('fcast/',fcastedit),\n path('fcast/all',fcastedit),\n path('fcast/add/',addroom),\n path('fcast-all',fcastall),\n path('sendmail',sendSimpleEmail),\n path('test',testodbc),\n path('test/',testodbc)\n\n]\n","sub_path":"src/djangolite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"203343388","text":"from astropy.io import fits\nimport os\nimport argparse\nimport time\nfrom datetime import date\nimport getpass\nimport sys\nimport numpy as np\nimport re\nimport jwst_update_dict\nfrom shutil import copyfile\nimport operator\n\ndef check_filename(filename):\n \"\"\"\n Uses the dictionary filename_to_ev to prepend a descriptive string to each\n filename, which states it's directory of origin, and returns the new filename\n \"\"\"\n checker = False\n for k,v in jwst_update_dict.filename_to_ev.iteritems():\n if re.search(k,filename) != None:\n checker = True\n new_filename = v + \"$\" + filename\n return new_filename\n if not checker:\n print (\"Path for {} not found, not able to prepend Environmental Variable to filename\".format(filename))\n return filename\n\ndef get_all_files():\n \"\"\"\n Gets all files from the pandeia directory, as well as all of its subdirectories.\n This then returns 4 arrays, one for each of the following attributes: time of\n access, file's compname, file's name, and the DESCRIP of the file\n \"\"\"\n time_array = []\n compname = []\n filename_array = []\n comment = []\n\n all_files = {}\n all_instruments = [\"miri/\",\"nircam/\",\"niriss/\",\"nirspec/\",\"telescope\"]\n all_sub_dirs = [\"blaze/\",\"detector/\",\"dispersion/\",\"filters/\",\"optical/\",\"psfs/\",\"qe/\",\"wavepix/\",\"xtras/\"]\n old_directory = \"/grp/hst/cdbs/work/jwst/delivery/pandeia/\"\n for instr_dir in all_instruments:\n for instr_sub_dir in all_sub_dirs:\n if instr_dir != \"telescope\":\n directory = old_directory + instr_dir + instr_sub_dir\n else:\n directory = old_directory + instr_dir\n print (directory)\n if os.path.isdir(directory) or os.path.exists(directory):\n for filename in os.listdir(directory):\n if filename.endswith(\".fits\"):\n new_path = str(os.path.join(directory, filename))\n all_files[new_path] = filename\n all_files = check_dup_comp_values(all_files)\n new_all_files = sorted(all_files.items(), key=operator.itemgetter(1))\n\n for df, f in new_all_files:\n print (\"-------------------------------------------------------------\")\n print (\"Checking {}\".format(f))\n\n (temp_time, temp_compname, temp_filename, temp_comment) = update_columns(df, f)\n time_array.append(temp_time)\n compname.append(temp_compname)\n filename_array.append(temp_filename)\n comment.append(temp_comment)\n\n return (time_array, compname, filename_array, comment)\n\ndef get_all_files_chosen_dir(directory):\n \"\"\"\n Gets all files from the pandeia directory, as well as all of its subdirectories.\n This then returns 4 arrays, one for each of the following attributes: time of\n access, file's compname, file's name, and the DESCRIP of the file\n \"\"\"\n time_array = []\n compname = []\n filename_array = []\n comment = []\n\n all_files = {}\n if os.path.isdir(directory) or os.path.exists(directory):\n for filename in os.listdir(directory):\n if filename.endswith(\".fits\"):\n new_path = str(os.path.join(directory, filename))\n all_files[new_path] = filename\n all_files = check_dup_comp_values(all_files)\n for df, f in all_files.iteritems():\n print (\"-------------------------------------------------------------\")\n print (\"Checking {}\".format(f))\n\n (temp_time, temp_compname, temp_filename, temp_comment) = update_columns(df, f)\n time_array.append(temp_time)\n compname.append(temp_compname)\n filename_array.append(temp_filename)\n comment.append(temp_comment)\n\n return (time_array, compname, filename_array, comment)\n\ndef check_dup_comp_values(all_files):\n \"\"\"\n Checks to see if anyfiles within a certain directory contain duplicate COMPNAMEs\n \"\"\"\n all_files_new = {}\n comp_with_filenames = {}\n for df,f in all_files.iteritems():\n hdulist = fits.open(df)\n if \"COMPNAME\" not in hdulist[0].header:\n print (\"ERROR: COMPNAME not found in {}, exiting program\".format(f))\n sys.exit()\n elif hdulist[0].header[\"COMPNAME\"] not in comp_with_filenames:\n all_files_new[df] = f\n comp_with_filenames[hdulist[0].header[\"COMPNAME\"]] = f\n else:\n print (\"ERROR: {} and {} have same COMPNAME values, exiting program\".format(f, comp_with_filenames[hdulist[0].header[\"COMPNAME\"]]))\n sys.exit()\n return all_files_new\n\ndef check_valid_values(hdulist):\n \"\"\"\n Checks to make sure all necessary headers are present\n \"\"\"\n valid_values = {'INSTRUME':'JWST', 'DBTABLE': 'CRCOMPLIST', 'DESCRIP': 'A test TMC for JWST', 'PEDIGREE':'DUMMY',\\\n 'AUTHOR': 'Jesse A', 'HISTORY': 'Jesse added this test TMC file', 'COMPNAME':'acs_block1'}\n\n for key,value in valid_values.iteritems():\n if key in hdulist[0].header:\n if hdulist[0].header[key] == '' and value == '':\n hdulist[0].header[key] = \"Testing\"\n else:\n hdulist[0].header[key] = value\n else:\n hdulist[0].header[key] = value\n\n time=[]\n compname=[]\n filename=[]\n comment=[]\n col1= fits.Column(name='TIME', format='26A', array=time,\n disp='26A')\n col2= fits.Column(name='COMPNAME', format='18A', array=compname,\n disp='18A')\n col3= fits.Column(name='FILENAME', format='68A', array=filename,\n disp='68A')\n col4= fits.Column(name='COMMENT', format='68A', array=comment,\n disp='68A')\n\n cols=fits.ColDefs([col1,col2,col3,col4])\n tbhdu = fits.BinTableHDU.from_columns(cols)\n thdulist = fits.HDUList([hdulist[0],tbhdu])\n thdulist.writeto(\"testing_file2.fits\", clobber = True)\n #hdulist.writeto(\"testing_file.fits\", clobber = True)\n\ndef update_file(hdulist, writeto_file, is_test, time_array, compname_array, filename_array, comment):\n \"\"\"\n Takes the 4 arrays created earlier and adds them to the new JWST_TMC file\n \"\"\"\n test = False\n if is_test == \"y\":\n test = True\n\n tbdata = hdulist[1].data\n\n today = date.today()\n new_useafter = time.strftime(\"%b %d %Y\") + \" \" + time.strftime(\"%H:%M:%S\")\n hdulist[0].header[\"USEAFTER\"] = new_useafter\n\n col1= fits.Column(name='TIME', format='26A', array=time_array,\n disp='26A')\n col2= fits.Column(name='COMPNAME', format='18A', array=compname_array,\n disp='18A')\n col3= fits.Column(name='FILENAME', format='68A', array=filename_array,\n disp='68A')\n col4= fits.Column(name='COMMENT', format='68A', array=comment,\n disp='68A')\n\n cols = fits.ColDefs([col1,col2,col3,col4])\n tbhdu = fits.BinTableHDU.from_columns(cols)\n\n if not test:\n username = raw_input(\"Please input your name: \")\n reason_for_change = raw_input(\"Please state the reason for this update: \")\n if username == \"\":\n username = getpass.getuser()\n hdulist[0].header[\"AUTHOR\"] = username\n hdulist[0].header[\"HISTORY\"] = reason_for_change\n\n thdulist = fits.HDUList([hdulist[0],tbhdu])\n thdulist.writeto(writeto_file, clobber = True)\n print (\"A new TMC file {} has been created with data up-to-date as of {}\".format(\"testing_file2.fits\",new_useafter))\n\ndef update_columns(input_files_dir, input_files_name):\n \"\"\"\n As the files are looped through, this method extracts information from the file,\n such as its COMPNAME, filename and DESCRIP, as well as the date and time this\n file was accessed\n \"\"\"\n file_hdu = fits.open(input_files_dir)\n files_compname = file_hdu[0].header[\"COMPNAME\"]\n\n today = date.today()\n new_useafter = time.strftime(\"%b %d %Y\") + \" \" + time.strftime(\"%H:%M:%S\")\n hdulist[0].header[\"USEAFTER\"] = new_useafter\n\n return (new_useafter, file_hdu[0].header[\"COMPNAME\"].lower(), check_filename(input_files_name), file_hdu[0].header[\"DESCRIP\"])\n\n################################################################################\n# Main\n################################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"chosen_directory\", help=\"the directory of fits files to be run (type default for pandeia directories)\")\nparser.add_argument(\"old_tmc\", help=\"the old tmc file\")\nparser.add_argument(\"new_tmc\", help=\"the newer tmc file\")\nargs = parser.parse_args()\n\nhdulist = fits.open(args.old_tmc)\n\ntime_array = []\ncompname = []\nfilename_array = []\ncomment = []\n\nis_test = raw_input(\"Is this a test? (y/n)\")\n\nif args.chosen_directory == \"default\":\n (time_array, compname, filename_array, comment) = get_all_files()\nelse:\n (time_array, compname, filename_array, comment) = get_all_files_chosen_dir(args.chosen_directory)\n\nupdate_file(hdulist, args.new_tmc, is_test, time_array, compname, filename_array, comment)\n\nprint (hdulist[0].header)\nprint (hdulist[1].header)\n","sub_path":"jwst_update_tmc.py","file_name":"jwst_update_tmc.py","file_ext":"py","file_size_in_byte":8966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"133054009","text":"#!/usr/bin/env python\n\"\"\"ROS node for the Wit.ai API\"\"\"\n\nimport roslib\nroslib.load_manifest('wit_ros')\n\nglobal APIKEY\nAPIKEY = None\n\nimport rospy\nimport requests\n\nfrom wit_ros.srv import Interpret, InterpretResponse\nfrom wit_ros.msg import Outcome, Entity\n\ndef interpret(rosrequest):\n rospy.logdebug(\"Interpreting {0}\".format(rosrequest.sentence))\n httpresponse = requests.get('https://api.wit.ai/message?q={sentence}'.format(sentence=rosrequest.sentence), \n headers={\"Authorization\":\"Bearer {key}\".format(key=APIKEY)})\n data = httpresponse.json()\n rospy.logdebug(\"Data: {0}\".format(data))\n\n entities = []\n for name, json in data[\"outcome\"][\"entities\"].iteritems():\n entity = Entity(name = str(name),\n body = str(json[\"body\"]),\n start = int(json[\"start\"]),\n end = int(json[\"end\"]),\n value = str(json[\"value\"]))\n entities += [entity]\n\n outcome = Outcome( confidence = float(data[\"outcome\"][\"confidence\"]), \n entities = entities,\n intent = str(data[\"outcome\"][\"intent\"]))\n\n response = InterpretResponse( msg_body = str(data[\"msg_body\"]),\n msg_id = str(data[\"msg_id\"]),\n outcome = outcome)\n return response\n\nif __name__ == \"__main__\":\n rospy.init_node(\"wit_ros\")\n\n if rospy.has_param('~api_key'):\n APIKEY = rospy.get_param(\"~api_key\")\n\n rospy.Service('wit/interpret', Interpret, interpret)\n\n rospy.spin()\n\n else:\n rospy.logerr(\"No API key set (via parameter server). Please set one. \" +\n \"API keys can be obtained via the http://www.wit.ai\")","sub_path":"src/wit_ros/wit_ros.py","file_name":"wit_ros.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"269914944","text":"#common stuff \nimport pinocchio as pin\nfrom pinocchio.utils import *\nimport numpy as np\nfrom numpy import nan\nimport math\nimport time as tm\n\nimport os\nfrom base_controller.utils.common_functions import *\nfrom base_controller.utils.ros_publish import RosPub\nfrom base_controller.utils.kin_dyn_utils import RNEA \nfrom base_controller.utils.kin_dyn_utils import getM\nfrom base_controller.utils.kin_dyn_utils import getg\nfrom base_controller.utils.kin_dyn_utils import getC\n\n\nimport ex_1_conf as conf\n\n#instantiate graphic utils\nos.system(\"killall rosmaster rviz\")\nros_pub = RosPub(\"ur4\")\nrobot = getRobotModel(\"ur4\")\n\n\n# Init variables\nzero = np.array([0.0, 0.0, 0.0, 0.0])\ntime = 0.0\n\n# Init loggers\nq_log = np.empty((4))*nan\nq_des_log = np.empty((4))*nan\nqd_log = np.empty((4))*nan\nqd_des_log = np.empty((4))*nan\nqdd_log = np.empty((4))*nan\nqdd_des_log = np.empty((4))*nan\ntau_log = np.empty((4))*nan\nf_log = np.empty((3,0))*nan\nx_log = np.empty((3,0))*nan\ntime_log = np.empty((0,0))*nan\n\n# M_log = np.empty((4,4))*nan\n# g_log = np.empty ((4,1))*nan\n# C_log = np.empty((4,1))*nan \n\nq = conf.q0\nqd = conf.qd0\nqdd = conf.qdd0\n\nq_des = zero\nqd_des = zero\nqdd_des = zero # joint reference acceleration\n\n# get the ID corresponding to the frame we want to control\nassert(robot.model.existFrame(conf.frame_name))\nframe_ee = robot.model.getFrameId(conf.frame_name)\n\nerror = np.array([1, 1, 1, 1])\n\n\n# Main loop to simulate dynamics\nwhile any(i >= 0.01 for i in np.abs(error)):\n \n # initialize Pinocchio variables\n robot.computeAllTerms(q, qd) \n\n #######################\n # # Exercise 3.1\n #######################\n\n # # compute RNEA with Pinocchio\n # taup = pin.rnea(robot.model, robot.data, q, qd, qdd) \n\n # # compute RNEA with your function\n # tau_ = RNEA(9.81,q,qd,qdd)\n\n # print taup - tau_\n\n # gravity terms \n # Pinocchio\n #g = robot.gravity(q) \n g = getg(q,robot)\n \n\n # Exercise 3.2\n # compute joint space inertia matrix with Pinocchio \n #M = robot.mass(q, False)\n M = getM(q,robot)\n \n # compute joint space intertia matrix with built-in pinocchio rnea\n M_new = np.zeros((4,4))\n for i in range(4):\n ei = np.array([0.0, 0.0, 0.0, 0.0])\n ei[i] = 1\n taup = pin.rnea(robot.model, robot.data, q,np.array([0,0,0,0]) ,ei)\n M_new[:4,i] = taup - g \n \n # Pinocchio bias terms \n #h = robot.nle(q, qd, False) \n C = getC(q,qd,robot) \n # viscous friction to stop the motion\n damping = - 20*qd\n\n x = robot.framePlacement(q, frame_ee).translation \n # compute jacobian of the end effector (in the WF) \n J6 = robot.frameJacobian(q, frame_ee, False, pin.ReferenceFrame.LOCAL_WORLD_ALIGNED) \n # take first 3 rows of J6 cause we have a point contact \n J = J6[:3,:] \n\n #SIMULATION of the forward dynamics \n M_inv = np.linalg.inv(M)\n # Pinocchio\n #qdd = M_inv.dot(damping-h)\n qdd = M_inv.dot(damping -C -g) \n \n # Forward Euler Integration \n qd = qd + qdd*conf.dt \n q = q + conf.dt*qd + 0.5*conf.dt*conf.dt*qdd \n\n # Log Data into a vector\n time_log = np.append(time_log, time)\t\n q_log = np.vstack((q_log, q ))\n q_des_log= np.vstack((q_des_log, q_des))\n qd_log= np.vstack((qd_log, qd))\n qd_des_log= np.vstack((qd_des_log, qd_des))\n qdd_log= np.vstack((qdd_log, qd))\n qdd_des_log= np.vstack((qdd_des_log, qdd_des))\n\n # M_log = np.dstack((M_log, M)) \n # C_log = np.dstack((C_log, C)) \n # g_log = np.dtack((g_log, g)) \n \n # update time\n time = time + conf.dt \n\n \n \n #publish joint variables\n ros_pub.publish(robot, q, qd,damping) \n \n tm.sleep(conf.dt*conf.SLOW_FACTOR)\n \n # stops the while loop if you prematurely hit CTRL+C \n if ros_pub.isShuttingDown():\n print (\"Shutting Down\") \n break;\n \n#raw_input(\"Robot came to a stop. Press Enter to continue\")\nros_pub.deregister_node()\n \n\n \n# # plot joint variables \n# plotJoint('position', 0, time_log, q_log, q_des_log, qd_log, qd_des_log, qdd_log, qdd_des_log, tau_log)\n# # plotJoint('velocity', 1, time_log, q_log, q_des_log, qd_log, qd_des_log, qdd_log, qdd_des_log, tau_log)\n# # plotJoint('acceleration', 2, time_log, q_log, q_des_log, qd_log, qd_des_log, qdd_log, qdd_des_log, tau_log)\n# #plotJoint('torque', 3, time_log, q_log, q_des_log, qd_log, qd_des_log, qdd_log, qdd_des_log, tau_log)\n\n# raw_input(\"Press Enter to continue\")\n\n\n\n\n\n","sub_path":"L1_2_dynamics.py","file_name":"L1_2_dynamics.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"94303086","text":"# coding:utf-8\n\"\"\"\nbased on python2\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport os\nimport json\nimport urllib\nimport shutil\nimport cmd\nimport copy\n\n\nOUTPUT_PATH = \"\"\n\n\ndef replace_slash(input_str):\n \"\"\"\n replace all slash in string\n \"\"\"\n output_str = \"\"\n for i in input_str:\n if i == \"/\":\n i = \"\"\n output_str = output_str + i\n return output_str\n\n\ndef video_process(video_path):\n # page data\n print(\"start page_data: {}\".format(video_path), file=sys.stderr)\n \n with open(os.path.join(video_path, 'entry.json')) as fh:\n entry = json.load(fh)\n title = replace_slash(entry['title'].encode('utf-8'))\n cover_url = entry['cover']\n page_data = entry['page_data']\n part = replace_slash(page_data['part'].encode('utf-8'))\n\n print(\"title: {}\\tpart: {}\".format(title, part), file=sys.stderr)\n\n output_folder = os.path.join(OUTPUT_PATH, title)\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n img_name = \"{}-{}.jpg\".format(title, part)\n img_file = os.path.join(output_folder, img_name)\n print(\"begin download cover_img: {}\".format(cover_url), file=sys.stderr)\n urllib.urlretrieve(cover_url, img_file)\n\n # src folder\n video_src_folder_ = [os.path.join(video_path, x) for x in os.listdir(video_path) if os.path.isdir(os.path.join(video_path, x))][0]\n video_src_folder = copy.deepcopy(video_src_folder_)\n # dst folder\n video_dst_folder_temp = os.path.join(output_folder, \"{}-{}\".format(title, part))\n if not os.path.exists(video_dst_folder_temp):\n os.makedirs(video_dst_folder_temp)\n # dst video_name\n video_dst_name = \"{}-{}\".format(title, part)\n # rename and copy sub_video to dst_folder\n video_sub_files = os.listdir(video_src_folder)\n video_sub_videos = [v for v in video_sub_files if v.endswith('.blv') ]\n for video_src_sub_name in video_sub_videos:\n video_src_sub_file = os.path.join(video_src_folder, video_src_sub_name)\n video_sub_idx = video_src_sub_name.split('.')[0]\n video_dst_sub_name = \"{}-{}.flv\".format(video_dst_name, video_sub_idx)\n video_dst_sub_file = os.path.join(video_dst_folder_temp, video_dst_sub_name)\n print(\"begin copy file, dst: {}\".format(video_dst_sub_name), file=sys.stderr)\n shutil.copyfile(video_src_sub_file, video_dst_sub_file)\n\n\ndef main(root_path):\n video_folders = os.listdir(root_path)\n for video_folder in video_folders:\n if video_folder == \".DS_Store\":\n continue\n video_path = os.path.join(root_path, video_folder)\n video_process(video_path)\n\n\nif __name__ == \"__main__\":\n try:\n root_path = sys.argv[1]\n except:\n print(\"Please select a folder\")\n if not OUTPUT_PATH:\n OUTPUT_PATH = root_path.rsplit('/', 1)[0]\n main(root_path)\n","sub_path":"get-bilibili-via-android-single.py","file_name":"get-bilibili-via-android-single.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"327892681","text":"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nimport pickle\nfrom math import log\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom pymorphy2 import MorphAnalyzer\nfrom nltk.corpus import stopwords\nfrom gensim.models import KeyedVectors\nimport tarfile\n\nstop_words = set(stopwords.words('russian'))\n\n\n# preprocessing function\ndef preprocess(rawtext):\n ans = ''\n for char in rawtext:\n if char.isalpha() or char == ' ':\n ans += char\n else:\n ans += ' '\n words = ans.split(\" \")\n w = []\n for i in words:\n if i != '':\n p = morph.parse(i.lower())[0].normal_form\n if p not in stop_words:\n w.append(p)\n return \" \".join(w)\n\n\nmorph = MorphAnalyzer()\n\n\n# function for corpora creation\ndef work_files():\n answers = pd.read_excel(\"answers_base.xlsx\")\n questions = pd.read_excel(\"queries_base.xlsx\")\n reply_base = pd.DataFrame([answers['Номер связки'], answers[\"Текст ответа\"]]).transpose()\n reply_base = reply_base.to_numpy()\n d = {}\n for i in reply_base:\n d[i[0]] = i[1]\n train, test1 = train_test_split(questions, test_size=0.3)\n ans = pd.concat([pd.Series(row['Номер связки'], row[\"Текст вопросов\"].split('\\n'))\n for _, row in answers.iterrows()]).reset_index()\n new = pd.DataFrame([train['Номер связки\\n'], train[\"Текст вопроса\"]]).transpose()\n new = new.rename(columns={'Номер связки\\n': 'i', \"Текст вопроса\": 'k'}, inplace=False)\n ans = ans.rename(columns={0: 'i', \"index\": 'k'}, inplace=False)\n train = new.append(ans, ignore_index=True)\n train = train.dropna()\n train['f'] = train['i'].apply(lambda x: d[x])\n answers = pd.DataFrame([train['i'], train['f']]).transpose()\n train['proc'] = train['k'].apply(lambda x: preprocess(x))\n return answers, train['proc']\n\n\n# functions for different matrix types\ndef make_tf_idf(corpus):\n vectorizer = TfidfVectorizer()\n td_matrix = pd.DataFrame(vectorizer.fit_transform(corpus).A, columns=vectorizer.get_feature_names())\n return td_matrix, vectorizer\n\n\ndef bm25(tfmtrx, avgdl, N, q, idx, ld):\n k = 2.0\n b = 0.75\n n = np.count_nonzero(tfmtrx[q])\n tf = tfmtrx[q][idx]\n idf = log((N-n+0.5)/(n+0.5))\n score = idf*((tf*(k+1))/(tf+k*(1-b+b*ld/avgdl)))\n return score\n\n\ndef make_bm25(corpus):\n vectorizer = CountVectorizer()\n Y = vectorizer.fit_transform(corpus).toarray()\n names = vectorizer.get_feature_names()\n tfmtrx = pd.DataFrame(data=Y, columns=names)\n N = len(corpus)\n lds = [len(corp) for corp in corpus]\n avgdl = sum(lds)/len(lds)\n bm25mtrx = pd.DataFrame(columns=names)\n for i, text in enumerate(corpus):\n current = []\n t = text.split()\n for w in names:\n if w in t:\n ld = len(t)\n current.append(bm25(tfmtrx, avgdl, N, w, i, ld))\n else:\n current.append(0)\n bm25mtrx = bm25mtrx.append(pd.Series(current, index=bm25mtrx.columns), ignore_index=True)\n return bm25mtrx\n\n\ndef normalize_vec(v):\n return v / np.sqrt(np.sum(v ** 2))\n\n\ndef create_matrix(corpus, model):\n matrix = []\n for i in corpus:\n lemmas = i.split(' ')\n lemmas_vectors = np.zeros((len(lemmas), model.vector_size))\n for idx, lemma in enumerate(lemmas):\n try:\n if lemma in model:\n lemmas_vectors[idx] = model[lemma]\n except:\n continue\n if lemmas_vectors.shape[0] is not 0:\n vec = np.mean(lemmas_vectors, axis=0)\n matrix.append(normalize_vec(vec))\n return np.array(matrix)\n\n\ndef create_doc_matrix(docs, model):\n matrix = []\n for text in docs:\n lemmas = text.split(\" \")\n lemmas_vectors = np.zeros((len(lemmas), model.vector_size))\n for idx, lemma in enumerate(lemmas):\n try:\n if lemma in model:\n lemmas_vectors[idx] = normalize_vec(model[lemma])\n except:\n continue\n matrix.append(lemmas_vectors)\n return matrix\n\n\n# making all the necessary files\ndef make_files():\n tar = tarfile.open(\"araneum_none_fasttextcbow_300_5_2018.tgz\", \"r\")\n tar.extractall()\n model_file = 'araneum_none_fasttextcbow_300_5_2018.model'\n\n answers, train = work_files()\n\n model = KeyedVectors.load(model_file)\n\n tf_matrix, vectorizer = make_tf_idf(train)\n output = open('tfidf.pkl', 'wb')\n pickle.dump(tf_matrix, output)\n output.close()\n output = open('vectorizer.pkl', 'wb')\n pickle.dump(vectorizer, output)\n output.close()\n\n bm25_matrix = make_bm25(train)\n bm25_matrix.to_csv('bm25.csv', index=False)\n\n w2v_matrix = create_matrix(train, model)\n output = open('w2vclass.pkl', 'wb')\n pickle.dump(w2v_matrix, output)\n output.close()\n\n w2v_matrix = create_doc_matrix(train, model)\n output = open('w2vexp.pkl', 'wb')\n pickle.dump(w2v_matrix, output)\n output.close()\n\n output = open('answers.pkl', 'wb')\n pickle.dump(answers, output)\n output.close()\n\n\ndef main():\n make_files()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"search engine project/indexation.py","file_name":"indexation.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"58658364","text":"#! /usr/bin/python3\n\nimport struct\nimport fcntl\nfrom xxd import xxd_bin\nfrom random import randint\nfrom fcntl import ioctl\n\ndef reset_memory(file_name):\n \"\"\"\n reset memory\n \"\"\"\n FIBER_IOCRESET = 0x6d00\n\n fiber_fp = open(file_name,\"wb\",0)\n ioctl(fiber_fp.fileno(),FIBER_IOCRESET)\n\n fiber_fp.close()\n\n return\n\ndef alloc_memory(file_name,size):\n \"\"\"\n use ioctl alloc memory for fp\n \"\"\"\n FIBER_IOC_ALLOC_MEMORY = 0xc0046d01\n\n fiber_fp = open(file_name,\"wb\",0)\n ioctl(fiber_fp.fileno(),FIBER_IOC_ALLOC_MEMORY,size)\n\n fiber_fp.close()\n\n return\n\ndef recv_data(file_name,size):\n \"\"\"\n this function try recv message\n \"\"\"\n fiber_fp = open(file_name,\"rb\",0)\n\n try:\n #size = 0xff7\n #size = 0xff\n b = fiber_fp.read(size)\n if b:\n print(\"has read %d num from %s\" % (len(b),file_name))\n xxd_bin(b)\n else:\n print(\"none data read from %s\" % file_name)\n print()\n\n finally:\n fiber_fp.close()\n return\n\nif __name__ == \"__main__\":\n a1_size = 0x15\n a2_size = 0x14\n a3_size = 0x4e1\n a4_size = 0x136\n a5_size = 0xc5\n\n reset_memory(\"/dev/fibera1\")\n\n alloc_memory(\"/dev/fibera1\",a1_size)\n alloc_memory(\"/dev/fibera2\",a2_size)\n alloc_memory(\"/dev/fibera3\",a3_size)\n alloc_memory(\"/dev/fibera4\",a4_size)\n alloc_memory(\"/dev/fibera5\",a5_size)\n\n alloc_memory(\"/dev/fiberb1\",a1_size)\n alloc_memory(\"/dev/fiberb2\",a2_size)\n alloc_memory(\"/dev/fiberb3\",a3_size)\n alloc_memory(\"/dev/fiberb4\",a4_size)\n alloc_memory(\"/dev/fiberb5\",a5_size)\n\n recv_data(\"/dev/fibera1\",a1_size)\n recv_data(\"/dev/fibera2\",a2_size)\n recv_data(\"/dev/fibera3\",a3_size)\n recv_data(\"/dev/fibera4\",a4_size)\n recv_data(\"/dev/fibera5\",a5_size)\n\n recv_data(\"/dev/fiberb1\",a1_size)\n recv_data(\"/dev/fiberb2\",a2_size)\n recv_data(\"/dev/fiberb3\",a3_size)\n recv_data(\"/dev/fiberb4\",a4_size)\n recv_data(\"/dev/fiberb5\",a5_size)\n","sub_path":"driver/fiber/recv_test.py","file_name":"recv_test.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"58194633","text":"from OpenGL.GLUT import *\nfrom OpenGL.GLU import * \nfrom OpenGL.GL import *\nimport math\nimport png\n\ndef desenha():\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n glRotatef(2,1,3,0)\n desenhaEsfera()\n glutSwapBuffers()\n \ndef timer(i):\n glutPostRedisplay()\n glutTimerFunc(50,timer,1)\n\ndef getPonto(i, j):\n teta = ((i*math.pi)/nEsfera) - (math.pi/r)\n phi = j*r*math.pi/nRotacao\n\n x = r*math.cos(teta)*math.cos(phi)\n y = r*math.sin(teta)\n z = r*math.cos(teta)*math.sin(phi)\n return [x,y,z]\n\ndef desenhaEsfera():\n glBegin(GL_QUAD_STRIP)\n for i in range(0,nEsfera):\n #glColor3fv(cores[i])\n for j in range(0, nRotacao): \n glTexCoord2f(i/nEsfera, -j/nRotacao)\n glVertex3fv(getPonto(i,j))\n glTexCoord2f((i+1)/nEsfera, -j/nRotacao)\n glVertex3fv(getPonto(i+1,j))\n glTexCoord2f(i/nEsfera, -(j+1)/nRotacao)\n glVertex3fv(getPonto(i,j+1))\n glTexCoord2f((i+1)/nEsfera, -(j+1)/nRotacao)\n glVertex3fv(getPonto(i+1,j+1))\n \n glEnd()\n\ndef LoadTextures():\n global texture\n texture = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, texture)\n reader = png.Reader(filename='mapa_obesidade.png')\n w, h, pixels, metadata = reader.read_flat()\n if(metadata['alpha']):\n modo = GL_RGBA\n else:\n modo = GL_RGB\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glTexImage2D(GL_TEXTURE_2D, 0, modo, w, h, 0, modo, GL_UNSIGNED_BYTE, pixels.tolist())\n# glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)\n# glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n\n\n\n\n# PROGRAMA PRINCIPAL\n#cores = ( (1,0,0),(1,1,0),(0,1,0),(0,1,1),(0,0,1),(1,0,1),(0.5,1,1),(1,0,0.5) )\n\nnEsfera = 25 #i\nnRotacao = 25 #j\nr = 2\n\nglutInit(sys.argv)\nglutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_MULTISAMPLE)\nglutInitWindowSize(800,600)\nglutCreateWindow(\"Adultos Obesos em 2016 (%)\")\nglutDisplayFunc(desenha)\nglEnable(GL_MULTISAMPLE)\nglEnable(GL_DEPTH_TEST)\nglClearColor(0.,0.,0.,1.)\n\nglClearDepth(1.0)\nglDepthFunc(GL_LESS) \nglEnable(GL_DEPTH_TEST) \nglShadeModel(GL_SMOOTH) \nglMatrixMode(GL_PROJECTION)\n\ngluPerspective(45,800.0/600.0,0.1,50.0)\n\nglMatrixMode(GL_MODELVIEW)\n\nglTranslatef(0.0,0.0,-8)\nglRotatef(1,1,1,1)\nglutTimerFunc(50,timer,1)\nLoadTextures()\nglEnable(GL_TEXTURE_2D)\nglutMainLoop()","sub_path":"mapa_obesidade.py","file_name":"mapa_obesidade.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"384212065","text":"import copy\r\n\r\nfrom queue import Queue, Empty\r\n\r\nfrom types import FunctionType\r\nfrom typing import Union, List\r\n\r\nfrom .types import Input, Options\r\nfrom .context import opt_set\r\nfrom .exceptions import ParameterError, DefinitionError, PARAMETER_TYPE, MISS_PARAMETER\r\n\r\n\r\nSKIP_GETVALUE = ('Input', )\r\n\r\n\r\ndef assert_int(a):\r\n if not a.isdigit():\r\n error = f'\"{a}\" is not a int'\r\n raise ParameterError(PARAMETER_TYPE, arg=a, msg=error)\r\n return int(a)\r\n\r\n\r\ndef assert_float(a):\r\n try:\r\n arg = float(a)\r\n except ValueError:\r\n error = f'\"{a}\" is not a float'\r\n raise ParameterError(PARAMETER_TYPE, arg=a, msg=error)\r\n return arg\r\n\r\n\r\nclass BaseOptionsHandler:\r\n options_type = None\r\n\r\n def process_options(self, func: FunctionType, options):\r\n \"\"\"\r\n Process parameters and return a list or dict\r\n\r\n :param func: The command method\r\n :param options: Parameter container specified by `options_type`\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\nclass SimpleOptionsHandler(BaseOptionsHandler):\r\n \"\"\" A simple parameters handler \"\"\"\r\n options_type = 'Queue'\r\n\r\n @staticmethod\r\n def validate_options(func: FunctionType, options: Queue, args: list):\r\n annotation = func.__annotations__\r\n al = []\r\n\r\n for a in args:\r\n arg_type = annotation.get(a)\r\n if hasattr(arg_type, '__name__') and arg_type.__name__ in SKIP_GETVALUE:\r\n arg = None\r\n if arg_type.__name__ == 'Input':\r\n default = func.__kwdefaults__.get(a) if func.__kwdefaults__ else None\r\n arg = Input(a, default=default)\r\n\r\n al.append(arg)\r\n continue\r\n\r\n try:\r\n arg = options.get(False)\r\n except Empty:\r\n break\r\n\r\n if arg_type == int:\r\n arg = assert_int(arg)\r\n elif arg_type == float:\r\n arg = assert_float(arg)\r\n\r\n al.append(arg)\r\n\r\n options.full()\r\n for i in al:\r\n options.put(i)\r\n\r\n def process_options(self, func: FunctionType, options: Queue) -> Union[list, dict, None]:\r\n args_count = func.__code__.co_argcount\r\n if 'args' in func.__code__.co_varnames:\r\n args_count += 1\r\n # pop up self & variables.\r\n args = list(func.__code__.co_varnames[1: args_count])\r\n\r\n if func.__kwdefaults__:\r\n for k in func.__kwdefaults__.keys():\r\n args.append(k)\r\n\r\n if not args:\r\n return\r\n\r\n if 'args' in args:\r\n res = []\r\n while True:\r\n try:\r\n res.append(options.get(False))\r\n except Empty:\r\n break\r\n else:\r\n self.validate_options(func, options, args)\r\n\r\n res = {}\r\n for arg in args:\r\n try:\r\n res[arg] = options.get(False)\r\n except Empty:\r\n if func.__defaults__:\r\n if len(func.__defaults__) == len(args):\r\n break\r\n elif func.__kwdefaults__:\r\n if arg in func.__kwdefaults__:\r\n break\r\n else:\r\n raise ParameterError(MISS_PARAMETER, arg=arg)\r\n return res\r\n\r\n\r\nclass OptionsTagHandler(BaseOptionsHandler):\r\n options_type = 'list'\r\n\r\n @staticmethod\r\n def process_context(func):\r\n annotation = func.__annotations__\r\n\r\n tag_model_context = {}\r\n for arg, t in annotation.items():\r\n if issubclass(t, Options):\r\n if isinstance(t.tag, str):\r\n common_tag = t.tag\r\n elif isinstance(t.tag, (tuple, list)) and t.tag:\r\n common_tag = t.tag[0]\r\n else:\r\n common_tag = str(t.tag)\r\n\r\n tag_model_context[arg] = {\r\n 'arglen': t.arglen,\r\n 'tag': t.tag,\r\n 'common_tag': common_tag,\r\n }\r\n\r\n tag_context = opt_set.get(func.__name__)\r\n if tag_context:\r\n tag_context.update(tag_model_context)\r\n else:\r\n tag_context = tag_model_context\r\n\r\n tmp_tag_context = copy.deepcopy(tag_context)\r\n\r\n tag_list = []\r\n if tag_context:\r\n for k, v in tmp_tag_context.items():\r\n if isinstance(v['tag'], str):\r\n if v['tag'] in tag_list:\r\n msg = f'Duplicate tag: {v[\"tag\"]}'\r\n raise DefinitionError(msg)\r\n\r\n tag_list.append(v['tag'])\r\n elif isinstance(v['tag'], (tuple, list)):\r\n for i in v['tag']:\r\n if i in tag_list:\r\n msg = f'Duplicate tag: {i}'\r\n raise DefinitionError(msg)\r\n\r\n tag_list.append(v['tag'])\r\n\r\n tag_context[k]['required'] = True\r\n if func.__kwdefaults__ and k in func.__kwdefaults__:\r\n tag_context[k]['required'] = False\r\n return tag_context\r\n\r\n @staticmethod\r\n def validate_type(func, args: dict):\r\n annotation = func.__annotations__\r\n result = {}\r\n\r\n for k, v in args.items():\r\n arg_type = annotation.get(k)\r\n arg = v\r\n\r\n if hasattr(arg_type, '__name__') and arg_type.__name__ in SKIP_GETVALUE:\r\n arg = None\r\n if arg_type.__name__ == 'Input':\r\n default = func.__kwdefaults__.get(k)\r\n arg = Input(k, default=default)\r\n\r\n result[k] = arg\r\n continue\r\n\r\n if isinstance(v, str):\r\n if arg_type == int:\r\n arg = assert_int(v)\r\n elif arg_type == float:\r\n arg = assert_float(v)\r\n\r\n elif isinstance(v, list):\r\n arg = []\r\n for i in v:\r\n if arg_type == int:\r\n i = assert_int(i)\r\n elif arg_type == float:\r\n i = assert_float(i)\r\n arg.append(i)\r\n\r\n result[k] = arg\r\n return result\r\n\r\n def get_positional_parameters(self, context, opts, func):\r\n args_count = func.__code__.co_argcount\r\n if func.__kwdefaults__:\r\n args_count += len(func.__kwdefaults__)\r\n\r\n varnames = list(func.__code__.co_varnames[1: args_count])\r\n\r\n for key in context.keys():\r\n varnames.remove(key)\r\n\r\n pos_args = []\r\n for p in opts:\r\n if self.find_tag(p, context):\r\n break\r\n pos_args.append(p)\r\n\r\n args = {}\r\n\r\n for i in varnames[:]:\r\n var_type = func.__annotations__.get(i)\r\n if var_type and hasattr(var_type, '__name__') and var_type.__name__ == 'Input':\r\n args[i] = None\r\n varnames.remove(i)\r\n\r\n for index, var in enumerate(varnames):\r\n try:\r\n args[var] = pos_args[index]\r\n except IndexError:\r\n raise ParameterError(MISS_PARAMETER, arg=var)\r\n return args\r\n\r\n def get_tag_parameters(self, context, opts):\r\n tag_args = {}\r\n tmp_context = context.copy()\r\n for k, v in context.items():\r\n tag = v['tag']\r\n index = None\r\n if isinstance(tag, str):\r\n if tag in opts:\r\n index = opts.index(tag)\r\n elif isinstance(tag, (tuple, list)):\r\n for t in tag:\r\n if t in opts:\r\n index = opts.index(t)\r\n\r\n if index is None:\r\n if v.get('required', False) is False:\r\n continue\r\n else:\r\n raise ParameterError(MISS_PARAMETER, arg=k)\r\n\r\n params = []\r\n for p in opts[index + 1:]:\r\n if self.find_tag(p, tmp_context):\r\n break\r\n params.append(p)\r\n\r\n tag_args[k] = params\r\n return tag_args\r\n\r\n @staticmethod\r\n def find_tag(p, context):\r\n found = False\r\n for ctx in context.values():\r\n tag = ctx['tag']\r\n if isinstance(tag, str):\r\n if p == tag:\r\n found = True\r\n break\r\n elif isinstance(tag, (tuple, list)):\r\n if p in tag:\r\n found = True\r\n break\r\n return found\r\n\r\n @staticmethod\r\n def validate_tag_parameters(tags, context):\r\n args = {}\r\n for k, v in context.items():\r\n tag_args_list = tags.get(k)\r\n\r\n if tag_args_list:\r\n s = ''\r\n if len(tag_args_list) > v['arglen']:\r\n if v['arglen'] > 1:\r\n s = 's'\r\n\r\n msg = f'\"{k}\" takes {v[\"arglen\"]} parameter{s} but {len(tag_args_list)} were given'\r\n raise DefinitionError(msg)\r\n elif len(tag_args_list) < v['arglen']:\r\n if v[\"arglen\"] - len(tag_args_list):\r\n s = 's'\r\n\r\n msg = f'\"{k}\"[{v[\"common_tag\"]}] missing {v[\"arglen\"] - len(tag_args_list)} required parameter{s}'\r\n raise DefinitionError(msg)\r\n else:\r\n if len(tag_args_list) == 1:\r\n args[k] = tag_args_list[0]\r\n else:\r\n args[k] = tag_args_list.copy()\r\n else:\r\n if v['required']:\r\n raise ParameterError(MISS_PARAMETER, arg=k)\r\n return args\r\n\r\n def process_options(self, func: FunctionType, options: List[str]) -> dict:\r\n args_count = func.__code__.co_argcount\r\n if func.__kwdefaults__:\r\n args_count += len(func.__kwdefaults__)\r\n\r\n if args_count != len(func.__code__.co_varnames):\r\n msg = 'Parameters after `*` need to define default value'\r\n raise DefinitionError(msg)\r\n\r\n context = self.process_context(func)\r\n\r\n has_tag = False\r\n for var in func.__code__.co_varnames[1: args_count]:\r\n found = False if not context.get(var) else True\r\n if found is False:\r\n if has_tag:\r\n msg = 'Cannot define positional parameter after parameter decorated by `Options`.'\r\n raise DefinitionError(msg)\r\n else:\r\n has_tag = found\r\n\r\n pos_args = self.get_positional_parameters(context, options, func)\r\n tag_args = self.get_tag_parameters(context, options)\r\n args = self.validate_tag_parameters(tag_args, context)\r\n\r\n args.update(pos_args)\r\n args = self.validate_type(func, args)\r\n\r\n return args\r\n","sub_path":"likeshell/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":11082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420163724","text":"import sqlite3, logging, sys, os\nfrom flask import Flask, jsonify, json, render_template, request, url_for, redirect, flash, Response\nfrom werkzeug.exceptions import abort, HTTPException\n\nfrom http import HTTPStatus\n\nhomepage_view = 0\n\n# Function to get a database connection.\n# This function connects to database with the name `database.db`\ndef get_db_connection():\n connection = sqlite3.connect('database.db')\n connection.row_factory = sqlite3.Row\n return connection\n\n\n# Function to get a post using its ID\ndef get_post(post_id):\n connection = get_db_connection()\n post = connection.execute('SELECT * FROM posts WHERE id = ?',\n (post_id,)).fetchone()\n connection.close()\n return post\n\n\n# Function to count the number of posts in the database\ndef count_post():\n connection = sqlite3.connect('database.db')\n cursor = connection.cursor()\n post_count = cursor.execute('SELECT COUNT(id) FROM posts').fetchone()\n connection.close()\n return post_count[0]\n\n\n# Function to count the total connection to the database\ndef count_db_connection():\n connection = sqlite3.connect('database.db')\n cursor = connection.cursor()\n db_connection_count = cursor.execute('SELECT SUM(article_view) FROM posts').fetchall()\n connection.close()\n db_connection_final_count = db_connection_count[0][0] + homepage_view\n return db_connection_final_count\n\n\n# Function to increment database connection by 1 per article visit\ndef update_db_connection(post_id):\n connection = get_db_connection()\n cur = connection.cursor()\n cur.execute('UPDATE posts SET article_view = article_view + 1 WHERE id = ?',\n (post_id,)).fetchone()\n connection.commit()\n connection.close()\n\n\n# Define the Flask application\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'your secret key'\n\n# Define the main route of the web application\n@app.route('/')\ndef index():\n connection = get_db_connection()\n # Add connection count from homepage view\n global homepage_view\n homepage_view += 1\n posts = connection.execute('SELECT * FROM posts').fetchall()\n connection.close()\n return render_template('index.html', posts=posts)\n\n\n''' \nDefine how each individual article is rendered and increment the db connection count by 1 per article visit.\nIf the post ID is not found a 404 page is shown \n'''\n@app.route('/')\ndef post(post_id):\n post = get_post(post_id)\n # Update the db connection\n update_db_connection(post_id)\n if post is None:\n # Log accessing non-existing article\n app.logger.error('A non-existing article was accessed! \"404\"')\n return render_template('404.html'), 404\n else:\n # Log accessing existing article\n app.logger.info('Article ' + '\"' + post['title'] + '\"' + ' retrieved!')\n return render_template('post.html', post=post)\n\n\n# Define the About Us page\n@app.route('/about')\ndef about():\n # Log accessing About Us page\n app.logger.info('\"About Us\" page was retrieved!')\n return render_template('about.html')\n\n\n# Define the post creation functionality\n@app.route('/create', methods=('GET', 'POST'))\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n if not title:\n flash('Title is required!')\n else:\n connection = get_db_connection()\n connection.execute('INSERT INTO posts (title, content, article_view ) VALUES (?, ?, ?)',\n (title, content, '0'))\n connection.commit()\n connection.close()\n\n # Log newly created article\n app.logger.info('A new article ' + '\"' + title + '\"' + ' was created!')\n return redirect(url_for('index'))\n\n return render_template('create.html')\n\n\n# Define the Healthcheck endpoint\n@app.route('/healthz')\ndef healthz():\n try:\n connection = get_db_connection()\n connection.execute('SELECT * FROM posts').fetchall()\n response = app.response_class(\n response=json.dumps({\"result\": \"OK - healthy\"}),\n status=200,\n mimetype='application/json'\n )\n except sqlite3.OperationalError as err:\n response = app.response_class(\n response=json.dumps({\"result\": \"ERROR - unhealthy\"}),\n status=500,\n mimetype='application/json'\n )\n return response\n\n# Define the metrics endpoint\n@app.route('/metrics')\ndef metrics():\n response = app.response_class(\n response=json.dumps({\"db_connection_count\": count_db_connection(), \"post_count\": count_post()}),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n# start the application on port 3111\nif __name__ == \"__main__\":\n loglevel = os.getenv(\"LOGLEVEL\", \"DEBUG\").upper()\n loglevel = (\n getattr(logging, loglevel)\n if loglevel in [\"CRITICAL\", \"DEBUG\", \"ERROR\", \"INFO\", \"WARNING\", ]\n else logging.DEBUG\n )\n\n # Set logger to handle STDOUT and STDERR\n stdout_handler = logging.StreamHandler(sys.stdout)\n stderr_handler = logging.StreamHandler(sys.stderr)\n handlers = [stderr_handler, stdout_handler]\n\n # Create the log file and format each log\n logging.basicConfig(\n format='%(levelname)s:%(name)s:%(asctime)s, %(message)s',\n level=loglevel,\n datefmt='%m-%d-%Y, %H:%M:%S',\n handlers=handlers\n )\n\n app.run(host='0.0.0.0', port='3111')\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"169428039","text":"import os\nimport sys\nimport tempfile\nimport unittest\n\nfrom m42.common import yamlutil\n\n\nclass TestYamlUtil(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.yaml_filename\\\n = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test.yaml')\n\n\n def test_load(self):\n \"\"\" Test that yaml.load works properly. \"\"\"\n d = yamlutil.load(self.yaml_filename)\n expected_tempdir = tempfile.gettempdir()\n config_tempdir = d.get('tmp-dir')\n self.assertTrue(expected_tempdir, config_tempdir)\n\n\n def test_load_file_or_url(self):\n d = yamlutil.load_file_or_url(self.yaml_filename)\n d = yamlutil.load_file_or_url(\n 'http://vmimages.milestone42.com/krasue.zbox')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"m42/common/test/test_yamlutil.py","file_name":"test_yamlutil.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"528672051","text":"from pommerman.agents import BaseAgent\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\nfrom a3c_mlp import A3C_MLP_NET\nfrom utils import *\nfrom features import *\nfrom action_prune import *\n\n\nclass ISSCAgent(BaseAgent):\n def __init__(self, modelpath, *args, **kwargs):\n super(ISSCAgent, self).__init__(*args, **kwargs)\n model = A3C_MLP_NET()\n model = load_checkpoint(modelpath, model)\n # self.me = id\n self.model = model\n self.reward = None\n self.rewards = []\n self.values = []\n self.logProbs = []\n self.entropies = []\n self.train = False\n self.stage_signal = 0\n self.prev_actions = [None for _ in range(2)]\n self.prev_positions = [None for _ in range(2)]\n self._recently_visited_positions = []\n self._recently_visited_length = 6\n self._prev_direction = None\n self.prev_states = [None for _ in range(2)]\n\n def clear_actions(self):\n self.values = []\n self.logProbs = []\n self.rewards = []\n self.entropies = []\n self.prev_states = [None for _ in range(2)]\n self.prev_actions = [None for _ in range(2)]\n self.prev_postions = [None for _ in range(2)]\n return self\n\n def act(self, obs, action_space):\n\n def convert_bombs(bomb_map):\n '''Flatten outs the bomb array'''\n ret = []\n locations = np.where(bomb_map > 0)\n for r, c in zip(locations[0], locations[1]):\n ret.append({\n 'position': (r, c),\n 'blast_strength': int(bomb_map[(r, c)])\n })\n return ret\n\n self.my_position = tuple(obs['position'])\n self.board = np.array(obs['board'])\n self.bombs = convert_bombs(np.array(obs['bomb_blast_strength']))\n self.enemies = [constants.Item(e) for e in obs['enemies']]\n self.ammo = int(obs['ammo'])\n self.blast_strength = int(obs['blast_strength'])\n self.items, self.dist, self.prev = self._djikstra(\n self.board, self.my_position, self.bombs, self.enemies, depth=10)\n safe_actions = get_filtered_actions(obs, self.prev_states) #, self.prev_actions, self.prev_positions\n data = get_feature(self.board, safe_actions, self.my_position, self.dist, self.items, self.prev, self.enemies)\n input_tmp = torch.tensor(data).float()\n logit,value = self.model(input_tmp)\n logit_cp = copy.deepcopy(logit.clone().detach().numpy())\n for idx in range(6):\n if idx not in safe_actions:\n logit_cp[0][idx] = -float(\"inf\")\n\n if self.train:\n prob_cp = F.softmax(torch.Tensor(logit_cp), dim=-1)\n action = Categorical(prob_cp).sample().unsqueeze(0)\n else:\n action = torch.argmax(torch.Tensor(logit_cp), dim=-1)\n act = action.item()\n\n if near_target(self.my_position, self.items, self.enemies, self.dist, self.prev, 3) != None:\n if act not in safe_actions:\n act = safe_actions[random.randint(0,len(safe_actions)-1)]\n self.stage_signal = 1\n else:\n self.stage_signal = 0\n while True:\n # Move if we are in an unsafe place.\n unsafe_directions = self._directions_in_range_of_bomb(\n self.board, self.my_position, self.bombs, self.dist)\n if unsafe_directions:\n directions = self._find_safe_directions(\n self.board, self.my_position, unsafe_directions, self.bombs, self.enemies)\n act = random.choice(directions).value\n if act not in safe_actions:\n act = safe_actions[random.randint(0, len(safe_actions) - 1)]\n break\n\n\n # Lay pomme if we are adjacent to an enemy.\n if self._is_adjacent_enemy(self.items, self.dist, self.enemies) and self._maybe_bomb(\n self.ammo, self.blast_strength, self.items, self.dist, self.my_position):\n act = constants.Action.Bomb.value\n break\n\n # Move towards an enemy if there is one in exactly three reachable spaces.\n direction = self._near_enemy(self.my_position, self.items, self.dist, self.prev, self.enemies, 3)\n if direction is not None and (self._prev_direction != direction or\n random.random() < .5):\n self._prev_direction = direction\n act = direction\n if act not in safe_actions:\n act = safe_actions[random.randint(0, len(safe_actions) - 1)]\n break\n # return direction.value\n\n # Move towards a good item if there is one within two reachable spaces.\n direction = self._near_good_powerup(self.my_position, self.items, self.dist, self.prev, 2)\n if direction is not None:\n act = direction\n if act not in safe_actions:\n act = safe_actions[random.randint(0, len(safe_actions) - 1)]\n break\n # return direction.value\n\n # Maybe lay a bomb if we are within a space of a wooden wall.\n if self._near_wood(self.my_position, self.items, self.dist, self.prev, 1):\n if self._maybe_bomb(self.ammo, self.blast_strength, self.items, self.dist, self.my_position):\n act = constants.Action.Bomb.value\n break\n else:\n act = constants.Action.Stop.value\n break\n\n # Move towards a wooden wall if there is one within two reachable spaces and you have a bomb.\n direction = self._near_wood(self.my_position, self.items, self.dist, self.prev, 2)\n if direction is not None:\n directions = self._filter_unsafe_directions(self.board, self.my_position,\n [direction], self.bombs)\n if directions:\n act = directions[0].value\n if act not in safe_actions:\n act = safe_actions[random.randint(0, len(safe_actions) - 1)]\n break\n # return directions[0].value\n\n # Choose a random but valid direction.\n directions = [\n constants.Action.Stop, constants.Action.Left,\n constants.Action.Right, constants.Action.Up, constants.Action.Down\n ]\n valid_directions = self._filter_invalid_directions(\n self.board, self.my_position, directions, self.enemies)\n directions = self._filter_unsafe_directions(self.board, self.my_position,\n valid_directions, self.bombs)\n directions = self._filter_recently_visited(\n directions, self.my_position, self._recently_visited_positions)\n if len(directions) > 1:\n directions = [k for k in directions if k != constants.Action.Stop]\n if not len(directions):\n directions = [constants.Action.Stop]\n\n # Add this position to the recently visited uninteresting positions so we don't return immediately.\n self._recently_visited_positions.append(self.my_position)\n self._recently_visited_positions = self._recently_visited_positions[\n -self._recently_visited_length:]\n\n act = random.choice(directions).value\n if act not in safe_actions:\n act = safe_actions[random.randint(0, len(safe_actions) - 1)]\n break\n prob = F.softmax(logit, dim=-1)\n log_prob = F.log_softmax(logit, dim=-1)\n entropy = -(log_prob * prob).sum(1)\n log_prob = log_prob.gather(1, torch.LongTensor([[act]]))\n self.entropies.append(entropy)\n self.values.append(value)\n self.logProbs.append(log_prob)\n self.prev_actions[:-1] = self.prev_actions[1:]\n self.prev_actions[-1] = act\n self.prev_positions[:-1] = self.prev_positions[1:]\n self.prev_positions[-1] = self.my_position\n self.prev_states[0] = self.prev_states[1]\n self.prev_states[1] = obs\n return act\n\n @staticmethod\n def _djikstra(board, my_position, bombs, enemies, depth=None, exclude=None):\n assert (depth is not None)\n\n if exclude is None:\n exclude = [\n constants.Item.Fog, constants.Item.Rigid, constants.Item.Flames\n ]\n\n def out_of_range(p_1, p_2):\n '''Determines if two points are out of rang of each other'''\n x_1, y_1 = p_1\n x_2, y_2 = p_2\n return abs(y_2 - y_1) + abs(x_2 - x_1) > depth\n\n items = defaultdict(list)\n dist = {}\n prev = {}\n Q = queue.Queue()\n\n my_x, my_y = my_position\n for r in range(max(0, my_x - depth), min(len(board), my_x + depth)):\n for c in range(max(0, my_y - depth), min(len(board), my_y + depth)):\n position = (r, c)\n if any([\n out_of_range(my_position, position),\n utility.position_in_items(board, position, exclude),\n ]):\n continue\n\n prev[position] = None\n item = constants.Item(board[position])\n items[item].append(position)\n\n if position == my_position:\n Q.put(position)\n dist[position] = 0\n else:\n dist[position] = np.inf\n\n for bomb in bombs:\n if bomb['position'] == my_position:\n items[constants.Item.Bomb].append(my_position)\n\n while not Q.empty():\n position = Q.get()\n\n if utility.position_is_passable(board, position, enemies):\n x, y = position\n val = dist[(x, y)] + 1\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n new_position = (row + x, col + y)\n if new_position not in dist:\n continue\n\n if val < dist[new_position]:\n dist[new_position] = val\n prev[new_position] = position\n Q.put(new_position)\n elif (val == dist[new_position] and random.random() < .5):\n dist[new_position] = val\n prev[new_position] = position\n\n return items, dist, prev\n\n def _directions_in_range_of_bomb(self, board, my_position, bombs, dist):\n ret = defaultdict(int)\n\n x, y = my_position\n for bomb in bombs:\n position = bomb['position']\n distance = dist.get(position)\n if distance is None:\n continue\n\n bomb_range = bomb['blast_strength']\n if distance > bomb_range:\n continue\n\n if my_position == position:\n # We are on a bomb. All directions are in range of bomb.\n for direction in [\n constants.Action.Right,\n constants.Action.Left,\n constants.Action.Up,\n constants.Action.Down,\n ]:\n ret[direction] = max(ret[direction], bomb['blast_strength'])\n elif x == position[0]:\n if y < position[1]:\n # Bomb is right.\n ret[constants.Action.Right] = max(\n ret[constants.Action.Right], bomb['blast_strength'])\n else:\n # Bomb is left.\n ret[constants.Action.Left] = max(ret[constants.Action.Left],\n bomb['blast_strength'])\n elif y == position[1]:\n if x < position[0]:\n # Bomb is down.\n ret[constants.Action.Down] = max(ret[constants.Action.Down],\n bomb['blast_strength'])\n else:\n # Bomb is down.\n ret[constants.Action.Up] = max(ret[constants.Action.Up],\n bomb['blast_strength'])\n return ret\n\n def _find_safe_directions(self, board, my_position, unsafe_directions,\n bombs, enemies):\n\n def is_stuck_direction(next_position, bomb_range, next_board, enemies):\n '''Helper function to do determine if the agents next move is possible.'''\n Q = queue.PriorityQueue()\n Q.put((0, next_position))\n seen = set()\n\n next_x, next_y = next_position\n is_stuck = True\n while not Q.empty():\n dist, position = Q.get()\n seen.add(position)\n\n position_x, position_y = position\n if next_x != position_x and next_y != position_y:\n is_stuck = False\n break\n\n if dist > bomb_range:\n is_stuck = False\n break\n\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n new_position = (row + position_x, col + position_y)\n if new_position in seen:\n continue\n\n if not utility.position_on_board(next_board, new_position):\n continue\n\n if not utility.position_is_passable(next_board,\n new_position, enemies):\n continue\n\n dist = abs(row + position_x - next_x) + abs(col + position_y - next_y)\n Q.put((dist, new_position))\n return is_stuck\n\n # All directions are unsafe. Return a position that won't leave us locked.\n safe = []\n\n if len(unsafe_directions) == 4:\n next_board = board.copy()\n next_board[my_position] = constants.Item.Bomb.value\n\n for direction, bomb_range in unsafe_directions.items():\n next_position = utility.get_next_position(\n my_position, direction)\n next_x, next_y = next_position\n if not utility.position_on_board(next_board, next_position) or \\\n not utility.position_is_passable(next_board, next_position, enemies):\n continue\n\n if not is_stuck_direction(next_position, bomb_range, next_board,\n enemies):\n # We found a direction that works. The .items provided\n # a small bit of randomness. So let's go with this one.\n return [direction]\n if not safe:\n safe = [constants.Action.Stop]\n return safe\n\n x, y = my_position\n disallowed = [] # The directions that will go off the board.\n\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n position = (x + row, y + col)\n direction = utility.get_direction(my_position, position)\n\n # Don't include any direction that will go off of the board.\n if not utility.position_on_board(board, position):\n disallowed.append(direction)\n continue\n\n # Don't include any direction that we know is unsafe.\n if direction in unsafe_directions:\n continue\n\n if utility.position_is_passable(board, position,\n enemies) or utility.position_is_fog(\n board, position):\n safe.append(direction)\n\n if not safe:\n # We don't have any safe directions, so return something that is allowed.\n safe = [k for k in unsafe_directions if k not in disallowed]\n\n if not safe:\n # We don't have ANY directions. So return the stop choice.\n return [constants.Action.Stop]\n\n return safe\n\n @staticmethod\n def _is_adjacent_enemy(items, dist, enemies):\n for enemy in enemies:\n for position in items.get(enemy, []):\n if dist[position] == 1:\n return True\n return False\n\n @staticmethod\n def _has_bomb(obs):\n return obs['ammo'] >= 1\n\n @staticmethod\n def _maybe_bomb(ammo, blast_strength, items, dist, my_position):\n \"\"\"Returns whether we can safely bomb right now.\n\n Decides this based on:\n 1. Do we have ammo?\n 2. If we laid a bomb right now, will we be stuck?\n \"\"\"\n # Do we have ammo?\n if ammo < 1:\n return False\n\n # Will we be stuck?\n x, y = my_position\n for position in items.get(constants.Item.Passage):\n if dist[position] == np.inf:\n continue\n\n # We can reach a passage that's outside of the bomb strength.\n if dist[position] > blast_strength:\n return True\n\n # We can reach a passage that's outside of the bomb scope.\n position_x, position_y = position\n if position_x != x and position_y != y:\n return True\n\n return False\n\n @staticmethod\n def _nearest_position(dist, objs, items, radius):\n nearest = None\n dist_to = max(dist.values())\n\n for obj in objs:\n for position in items.get(obj, []):\n d = dist[position]\n if d <= radius and d <= dist_to:\n nearest = position\n dist_to = d\n\n return nearest\n\n @staticmethod\n def _get_direction_towards_position(my_position, position, prev):\n if not position:\n return None\n\n next_position = position\n while prev[next_position] != my_position:\n next_position = prev[next_position]\n\n return utility.get_direction(my_position, next_position)\n\n @classmethod\n def _near_enemy(cls, my_position, items, dist, prev, enemies, radius):\n nearest_enemy_position = cls._nearest_position(dist, enemies, items,\n radius)\n return cls._get_direction_towards_position(my_position,\n nearest_enemy_position, prev)\n\n @classmethod\n def _near_good_powerup(cls, my_position, items, dist, prev, radius):\n objs = [\n constants.Item.ExtraBomb, constants.Item.IncrRange,\n constants.Item.Kick\n ]\n nearest_item_position = cls._nearest_position(dist, objs, items, radius)\n return cls._get_direction_towards_position(my_position,\n nearest_item_position, prev)\n\n @classmethod\n def _near_wood(cls, my_position, items, dist, prev, radius):\n objs = [constants.Item.Wood]\n nearest_item_position = cls._nearest_position(dist, objs, items, radius)\n return cls._get_direction_towards_position(my_position,\n nearest_item_position, prev)\n\n @staticmethod\n def _filter_invalid_directions(board, my_position, directions, enemies):\n ret = []\n for direction in directions:\n position = utility.get_next_position(my_position, direction)\n if utility.position_on_board(\n board, position) and utility.position_is_passable(\n board, position, enemies):\n ret.append(direction)\n return ret\n\n @staticmethod\n def _filter_unsafe_directions(board, my_position, directions, bombs):\n ret = []\n for direction in directions:\n x, y = utility.get_next_position(my_position, direction)\n is_bad = False\n for bomb in bombs:\n bomb_x, bomb_y = bomb['position']\n blast_strength = bomb['blast_strength']\n if (x == bomb_x and abs(bomb_y - y) <= blast_strength) or \\\n (y == bomb_y and abs(bomb_x - x) <= blast_strength):\n is_bad = True\n break\n if not is_bad:\n ret.append(direction)\n return ret\n\n @staticmethod\n def _filter_recently_visited(directions, my_position,\n recently_visited_positions):\n ret = []\n for direction in directions:\n if not utility.get_next_position(\n my_position, direction) in recently_visited_positions:\n ret.append(direction)\n\n if not ret:\n ret = directions\n return ret\n","sub_path":"issc_agent/issc_agent.py","file_name":"issc_agent.py","file_ext":"py","file_size_in_byte":21216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"349250189","text":"#! /usr/bin/python\n# by pts@fazekas.hu at Tue Oct 11 13:12:47 CEST 2016\n# modified by oneirag@yahoo.es on 2018/04/16\n\n\"\"\":\" #megapubdl: Download public files from MEGA (mega.nz).\n\n\nmegapubdl is command-line tool for Unix implemented as a Python script to\ndownload public files (with a public URL) from MEGA (mega.nz, mega.co.nz).\nIt works with Python 3.6 and needs only the requests and `openssl' external tool or\nPyCrypto installed.\n\nUsage:\n\n megapubdl.py \"https://mega.nz/#!...\"\n\"\"\"\n\n#\n# TODO(pts): Improve error handling (especially socket errors and parse errors).\n#\n\nimport base64\nimport os\nimport random\nimport re\nimport socket\nimport stat\nimport struct\nimport subprocess\nimport sys\nimport traceback\nimport json as builtin_json # From Python 2.6.\nimport requests\n\n\ndef import_get(module, name, default):\n try:\n __import__(module)\n except ImportError:\n return default\n return getattr(__import__('sys').modules[module], name, default)\n\n\ndef parse_json(data):\n return builtin_json.loads(data)\n\n\ndef dump_json(obj):\n return builtin_json.dumps(obj)\n\n\n# --- Crypto.\n\nopenssl_prog = False\n\nif import_get('Crypto.Cipher.AES', 'MODE_CBC', None) is not None:\n # PyCrypto, implemented in C (no Python implementation). Tested and found\n # working with pycrypto-2.3.\n def aes_cbc(is_encrypt, data, key, iv='\\0' * 16):\n if len(key) != 16:\n raise ValueError\n if len(iv) != 16:\n raise ValueError\n from Crypto.Cipher import AES\n aes_obj = AES.new(key, AES.MODE_CBC, iv)\n if is_encrypt:\n return aes_obj.encrypt(data)\n else:\n return aes_obj.decrypt(data)\nelse:\n openssl_prog = True\n\n\n def aes_cbc(is_encrypt, data, key, iv='\\0' * 16):\n if len(key) != 16:\n raise ValueError\n if len(iv) != 16:\n raise ValueError\n encdec = ('-d', '-e')[bool(is_encrypt)]\n p = subprocess.Popen(\n (openssl_prog, 'enc', encdec, '-aes-128-cbc', '-nopad',\n '-K', key.encode('hex'), '-iv', iv.encode('hex')),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n try:\n got, _ = p.communicate(data)\n finally:\n p.stdin.close()\n exitcode = p.wait()\n if exitcode:\n raise ValueError('Error running openssl enc.')\n if len(got) != len(data):\n raise ValueError('openssl enc output size mismatch.')\n assert len(got) == len(data)\n return got\n\n\nif import_get('Crypto.Cipher.AES', 'MODE_CTR', None) is not None:\n # PyCrypto, implemented in C (no Python implementation). Tested and found\n # working with pycrypto-2.3.\n def yield_aes_ctr(data_iter, key, iv='\\0' * 16, bufsize=None):\n if len(key) != 16:\n raise ValueError\n if len(iv) != 16:\n raise ValueError\n if isinstance(data_iter, (str, bytes)):\n data_iter = (data_iter,)\n data_iter = iter(data_iter)\n # PyCrypto, implemented in C (no Python implementation).\n from Crypto.Cipher import AES\n from Crypto.Util import Counter\n counter = Counter.new(8 * len(key), initial_value=int(iv.hex(), 16))\n aes_obj = AES.new(key, AES.MODE_CTR, counter=counter)\n yield '' # This is important, it signifies that decryption has started.\n encrypt = aes_obj.encrypt # .encrypt and .decrypt do the same.\n for data in data_iter:\n yield encrypt(data)\n\n\n\n\ndef aes_cbc_encrypt_a32(data, key):\n return str_to_a32(aes_cbc(True, a32_to_str(data), a32_to_str(key)))\n\n\ndef aes_cbc_decrypt_a32(data, key):\n return str_to_a32(aes_cbc(False, a32_to_str(data), a32_to_str(key)))\n\n\ndef stringhash(str, aeskey):\n s32 = str_to_a32(str)\n h32 = [0, 0, 0, 0]\n for i in range(len(s32)):\n h32[i % 4] ^= s32[i]\n for r in range(0x4000):\n h32 = aes_cbc_encrypt_a32(h32, aeskey)\n return a32_to_base64((h32[0], h32[2]))\n\n\ndef encrypt_key(a, key):\n return sum(\n (aes_cbc_encrypt_a32(a[i:i + 4], key)\n for i in range(0, len(a), 4)), ())\n\n\ndef decrypt_key(a, key):\n return sum(\n (aes_cbc_decrypt_a32(a[i:i + 4], key)\n for i in range(0, len(a), 4)), ())\n\n\ndef decrypt_attr(attr, key):\n attr = aes_cbc(False, attr, a32_to_str(key)).rstrip(b'\\0').decode()\n return attr.startswith('MEGA{\"') and parse_json(attr[4:])\n\n\ndef a32_to_str(a):\n return struct.pack('>%dI' % len(a), *a)\n\n\ndef str_to_a32(b):\n if len(b) % 4:\n # pad to multiple of 4\n b += '\\0' * (4 - len(b) % 4)\n return struct.unpack('>%dI' % (len(b) / 4), b)\n\n\ndef base64_url_decode(data):\n data += '=='[(2 - len(data) * 3) % 4:]\n for search, replace in (('-', '+'), ('_', '/'), (',', '')):\n data = data.replace(search, replace)\n return base64.b64decode(data)\n\n\ndef base64_to_a32(s):\n return str_to_a32(base64_url_decode(s))\n\n\ndef base64_url_encode(data):\n data = base64.b64encode(data)\n for search, replace in ((b'+', b'-'), (b'/', b'_'), (b'=', b'')):\n data = data.replace(search, replace)\n return data\n\n\ndef a32_to_base64(a):\n return base64_url_encode(a32_to_str(a))\n\n\n# more general functions\ndef make_id(length):\n possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n return ''.join(random.choice(possible) for _ in range(length))\n\n\ndef send_http_request(url, data=None, timeout=None, params=None, stream=False):\n r = requests.get(url, data=data, timeout=timeout, params=params, stream=stream)\n return r\n\n\nMEGA_ERRORS = {\n 0: 'API_OK', # Success\n -1: 'API_EINTERNAL',\n # An internal error has occurred. Please submit a bug report, detailing the exact circumstances in which this error occurred.\n -2: 'API_EARGS', # You have passed invalid arguments to this command.\n -3: 'API_EAGAIN',\n # (always at the request level): A temporary congestion or server malfunction prevented your request from being processed. No data was altered. Retry. Retries must be spaced with exponential backoff.\n -4: 'API_ERATELIMIT',\n # You have exceeded your command weight per time quota. Please wait a few seconds, then try again (this should never happen in sane real-life applications).\n -5: 'API_EFAILED', # The upload failed. Please restart it from scratch.\n -6: 'API_ETOOMANY', # Too many concurrent IP addresses are accessing this upload target URL.\n -7: 'API_ERANGE', # The upload file packet is out of range or not starting and ending on a chunk boundary.\n -8: 'API_EEXPIRED', # The upload target URL you are trying to access has expired. Please request a fresh one.\n -9: 'API_EOENT', # Object (typically, node or user) not found\n -10: 'API_ECIRCULAR', # Circular linkage attempted\n -11: 'API_EACCESS', # Access violation (e.g., trying to write to a read-only share)\n -12: 'API_EEXIST', # Trying to create an object that already exists\n -13: 'API_EINCOMPLETE', # Trying to access an incomplete resource\n -14: 'API_EKEY', # A decryption operation failed (never returned by the API)\n -15: 'API_ESID', # Invalid or expired user session, please relogin\n -16: 'API_EBLOCKED', # User blocked\n -17: 'API_EOVERQUOTA', # Request over quota\n -18: 'API_ETEMPUNAVAIL', # Resource temporarily not available, please try again later\n -19: 'API_ETOOMANYCONNECTIONS', # Too many connections on this resource\n -20: 'API_EWRITE', # Write failed\n -21: 'API_EREAD', # Read failed\n -22: 'API_EAPPKEY', # Invalid application key; request not processed\n}\n\n\nclass RequestError(ValueError):\n \"\"\"Error in API request.\"\"\"\n\n\nclass Mega(object):\n def __init__(self, options=None):\n self.bufsize = 65536\n self.schema = 'https'\n self.domain = 'mega.co.nz'\n self.timeout = 160 # max time (secs) to wait for resp from api requests\n self.sid = None\n self.sequence_num = random.randint(0, 0xFFFFFFFF)\n self.request_id = make_id(10)\n\n if options is None:\n options = {}\n self.options = options\n\n def _login(self):\n master_key = [random.randint(0, 0xFFFFFFFF)] * 4\n password_key = [random.randint(0, 0xFFFFFFFF)] * 4\n session_self_challenge = [random.randint(0, 0xFFFFFFFF)] * 4\n\n user = self._api_request({\n 'a': 'up',\n 'k': a32_to_base64(encrypt_key(master_key, password_key)).decode(),\n 'ts': base64_url_encode(a32_to_str(session_self_challenge) +\n a32_to_str(encrypt_key(session_self_challenge, master_key))).decode()\n })\n\n resp = self._api_request({'a': 'us', 'user': user})\n # if numeric error code response\n if isinstance(resp, int):\n raise RequestError(resp)\n encrypted_master_key = base64_to_a32(resp['k'])\n self.master_key = decrypt_key(encrypted_master_key, password_key)\n if 'tsid' not in resp:\n raise RequestError('Missing tsid.')\n tsid = base64_url_decode(resp['tsid'])\n key_encrypted = a32_to_str(\n encrypt_key(str_to_a32(tsid[:16]), self.master_key))\n if key_encrypted == tsid[-16:]:\n self.sid = resp['tsid']\n\n def _api_request(self, data, folder_id=None):\n params = {}\n if folder_id:\n params['n'] = folder_id\n params['id'] = self.sequence_num\n\n self.sequence_num += 1\n\n if self.sid:\n params.update({'sid': self.sid})\n\n # ensure input data is a list\n if not isinstance(data, list):\n data = [data]\n\n url = '%s://g.api.%s/cs' % (self.schema, self.domain)\n hr = send_http_request(url, data=dump_json(data), timeout=self.timeout, params=params)\n if hr.status_code != 200:\n raise RequestError('HTTP not OK: %s %s' % (hr.status_code, hr.reason))\n json_resp = hr.json()\n if isinstance(json_resp, int):\n raise RequestError('%s (%s)' % (MEGA_ERRORS.get(json_resp), json_resp))\n if isinstance(json_resp[0], int):\n raise RequestError('%s (%s)' % (MEGA_ERRORS.get(json_resp[0]), json_resp[0]))\n return json_resp[0]\n\n @classmethod\n def _parse_url(self, url):\n \"\"\"Returns (file_id, file_key.\"\"\"\n i = url.find('/#!')\n if i < 0:\n raise RequestError('Key missing from URL.')\n path = url[i + 3:].split('!')\n return path[:2]\n\n @classmethod\n def get_file_id(self, url):\n return self._parse_url(url)[0]\n\n def list_files(self, folder_url, filter_func=None):\n \"\"\"\n Get a list of files in a public folder link of mega\n Example use:\n mega = Mega()\n list = mega.list_files('https://mega.nz/#F!O4YA2JgD!n2b4iSHQDruEsYUvTQP5_w')\n for name, val in list.iteritems():\n mega.download_public_file(val)\n\n :param folder_url: a public link such as 'https://mega.nz/#F!O4YA2JgD!n2b4iSHQDruEsYUvTQP5_w'\n :param filter_func: a function to filter the names of the files, returning True for the files to keep in the list\n :return: a dictionary. Key is the name of the file\n file_info['folder_id'] = folder_id (to pass to download file)\n file_info['ts'] = file timestamp\n file_info['url'] = public_link (to pass to download file)\n file_info['filename'] = file name\n\n \"\"\"\n if self.sid is None:\n self._login()\n\n folder_id, orig_folder_key = folder_url.split(\"!\")[1:] # TODO: error check\n\n folder_key = base64_to_a32(orig_folder_key) # if is_public:\n\n if len(folder_key) == 4:\n k = folder_key\n elif len(folder_key) == 8:\n k = (folder_key[0] ^ folder_key[4],\n folder_key[1] ^ folder_key[5],\n folder_key[2] ^ folder_key[6],\n folder_key[3] ^ folder_key[7])\n else:\n raise Exception(\"Invalid key, please verify your MEGA url.\")\n\n if len(folder_key) > 4:\n iv = folder_key[4:6] + (0, 0)\n meta_mac = folder_key[6:8]\n\n retval = {}\n folder_data = self._api_request({'a': 'f', 'c': 1, 'ca': 1, 'r': 1}, folder_id)\n for node in folder_data['f']:\n if node['t'] == 0: # Just files\n if node['k']:\n node_k = node['k']\n file_key = node_k[node_k.find(':') + 1:]\n file_key = decrypt_key(base64_to_a32(file_key), folder_key)\n public_link_key = base64_url_encode(a32_to_str(file_key))\n public_link = \"https://mega.nz/#!{}!{}\".format(\n node['h'],\n public_link_key.decode()\n )\n file_key = (file_key[0] ^ file_key[4],\n file_key[1] ^ file_key[5],\n file_key[2] ^ file_key[6],\n file_key[3] ^ file_key[7])\n attribs = decrypt_attr(base64_url_decode(node['a']), file_key)\n file_name = attribs['n']\n if filter_func:\n if not filter_func(file_name):\n continue\n file_info = {}\n file_info['folder_id'] = folder_id\n file_info['ts'] = node['ts']\n file_info['url'] = public_link\n file_info['filename'] = file_name\n retval[file_name] = file_info\n return retval\n\n def download_public_file(self, file_info, target_dir=\"\"):\n\n dl = self.download_url(file_info['url'], file_info['folder_id'], is_public=True)\n dl_info = next(dl)\n print(dl_info['name'], dl_info['size']) # TODO: use logging\n next(dl) # Start the download.\n file_name = os.path.join(target_dir, dl_info['name'])\n if not os.path.exists(file_name):\n with open(file_name, 'wb') as f:\n for data in dl:\n f.write(data)\n\n def download_url(self, url, folder_id=None, is_public=True):\n \"\"\"Starts downloading a file from Mega, based on URL.\n\n Example usage:\n\n mega = Mega()\n dl = mega.download_url('https://mega.nz/#!ptJElSYC!qEPvI7qJkjvreVxpLU7CoJc4sxF3X7p1DH5WEMmPs5U')\n dl_info = dl.next()\n print (dl_info['name'], dl_info['size'])\n dl.next() # Start the download.\n f = open(dl_info['name'], 'wb')\n try:\n for data in dl:\n f.write(data)\n finally:\n f.close()\n \"\"\"\n if self.sid is None:\n self._login()\n file_id, file_key = self._parse_url(url)\n file_key = base64_to_a32(file_key) #\n if is_public:\n file_data = self._api_request({'a': 'g', 'g': 1, 'n': file_id}, folder_id)\n else:\n file_data = self._api_request({'a': 'g', 'g': 1, 'p': file_id}, folder_id)\n k = (file_key[0] ^ file_key[4], file_key[1] ^ file_key[5],\n file_key[2] ^ file_key[6], file_key[3] ^ file_key[7])\n iv = file_key[4:6] + (0, 0)\n meta_mac = file_key[6:8]\n\n # Seems to happens sometime... When this occurs, files are\n # inaccessible also in the official also in the official web app.\n # Strangely, files can come back later.\n if 'g' not in file_data:\n raise RequestError('File not accessible now.')\n file_url = file_data['g'] # Can be non-ASCII UTF-8.\n file_size = int(file_data['s']) # Was already an int.\n attribs = base64_url_decode(file_data['at'])\n attribs = decrypt_attr(attribs, k)\n file_name = attribs['n'] # Can be non-ASCII UTF-8.\n key_str = a32_to_str(k)\n assert len(key_str) == 16\n iv_str = struct.pack('>LLLL', iv[0], iv[1], 0, 0)\n assert len(iv_str) == 16\n\n yield {'name': file_name, 'size': file_size, 'url': file_url, 'key': key_str, 'iv': iv_str, 'id': file_id}\n\n hr = send_http_request(file_url, timeout=self.timeout, stream=True)\n if hr.status_code != 200:\n raise RequestError('HTTP download link not OK: %s %s' % (hr.status_code, hr.reason))\n ct = hr.headers['content-type'].lower()\n if ct.startswith('text/'): # Typically 'application/octet-stream'.\n raise RequestError('Unexpected content-type: %s' % ct)\n yield_size = 0\n for pdata in yield_aes_ctr(\n hr.iter_content(self.bufsize),\n key_str, iv_str, self.bufsize):\n yield pdata\n yield_size += len(pdata)\n if yield_size != file_size:\n raise RequestError('File size mismatch: got=%d expected=%d' %\n (yield_size, file_size))\n\n\ndef get_module_docstring():\n return __doc__\n\n\ndef get_doc(doc=None):\n if doc is None:\n doc = get_module_docstring()\n doc = doc.rstrip()\n doc = re.sub(r'\\A:\"\\s*#', '', doc, 1)\n doc = re.sub(r'\\n(\\ntype python.*)+\\nexec python -- .*', '', doc, 1)\n return doc\n\n\ndef fix_ext(filename):\n a, b = os.path.splitext(filename)\n return a + b.lower()\n\n\ndef download_mega_url(url, mega):\n print('info: Downloading URL: %s' % url, sys.stderr)\n file_id = mega.get_file_id(url)\n prefix = 'mega_%s_' % file_id\n entries = [e for e in os.listdir('.') if e.startswith(prefix) and not e.endswith('.tmpdl')]\n if entries:\n for entry in entries:\n print('info: Already present, keeping %s bytes in file: %s' % (\n os.stat(entry).st_size, entry), sys.stderr)\n return\n dl = mega.download_url(url)\n try:\n dl_info = dl.next()\n except RequestError as e:\n if str(e).startswith('API_EOENT ('): # File not found on MEGA.\n open(prefix + 'not_found.err', 'wb').close()\n raise\n filename = prefix + fix_ext('_'.join(dl_info['name'].split()))\n try:\n st = os.stat(filename)\n except OSError as e:\n st = None\n if st and stat.S_ISREG(st.st_mode) and st.st_size == dl_info['size']:\n print('info: Already downloaded, keeping %s bytes in file: %s' % (\n dl_info['size'], filename), sys.stderr)\n return\n print('info: Saving file of %s bytes to file: %s' % (dl_info['size'], filename), sys.stderr)\n marker = dl.next() # Start the download.\n assert marker == ''\n filename_tmpdl = filename + '.tmpdl'\n try:\n f = open(filename_tmpdl, 'wb')\n try:\n for data in dl:\n f.write(data)\n finally:\n f.close()\n os.rename(filename_tmpdl, filename)\n filename_tmpdl = '' # Don't attempt to remove it.\n finally:\n if filename_tmpdl:\n try:\n os.remove(filename_tmpdl)\n except OSError:\n pass\n\n\ndef main(argv):\n if len(argv) < 2 or argv[1] == '--help':\n print(get_doc())\n sys.exit(0)\n mega = Mega()\n had_error = False\n for url in argv[1:]:\n try:\n download_mega_url(url, mega)\n except (socket.error, IOError, OSError, ValueError):\n traceback.print_exc()\n had_error = True\n sys.exit(2 * bool(had_error))\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"megapubdl.py","file_name":"megapubdl.py","file_ext":"py","file_size_in_byte":19337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"540781548","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 13 11:54:08 2019\n\n@author: Yung-Yu Tsai\n\nMemory bitmap setting for memory fault mapping\n\"\"\"\n\nimport numpy as np\n \nclass bitmap:\n \"\"\"\n The bitmap of a buffer for memory fault tolerance analysis.\n \n \"\"\"\n\n def __init__(self, row, col, wl=None):\n \"\"\"\n # Arguments\n row: Integer. Number of rows in memory.\n col: Integer. Number of columns in memory.\n wl: Integer. The word length of memory\n fault_num: Integer. Number of faults in memory.\n fault_dict: Dictionary. The fault information {location : fault type}\n \n \"\"\"\n self.row=row\n self.col=col\n self.wl=wl\n self.fault_num=None\n self.fault_dict=dict()\n\n def fault_num_gen_mem(self, fault_rate):\n \"\"\"\n Genenerate the number of fault\n \"\"\"\n self.fault_num=int(self.row * self.col * fault_rate)\n \n \n def addr_gen_mem(self,distribution='uniform',poisson_lam=None):\n \"\"\"Genenerate the fault location in a memory\n\n # Arguments\n distribution: String. The distribution type of locaton in memory. Must be one of 'uniform', 'poisson', 'normal'.\n poisson_lam: Integer. The lambda of poisson distribution.\n \n # Returns\n The location index Tuple(Integer).\n \"\"\"\n if distribution=='uniform':\n row_tmp=np.random.randint(self.row)\n col_tmp=np.random.randint(self.col)\n elif distribution=='poisson':\n if not isinstance(poisson_lam,tuple) or len(poisson_lam)!=2:\n raise TypeError('Poisson distribution lambda setting must be a tuple has length of 2 (row, col).')\n \n if isinstance(poisson_lam[0],int) and poisson_lam[0]>=0 and poisson_lam[0]=self.row:\n row_tmp=np.random.poisson(poisson_lam[0])\n else:\n raise ValueError('Poisson distribution Lambda must within feature map shape. Feature map shape %s but got lambda input %s'%(str((self.row,self.col)),str(poisson_lam)))\n \n if isinstance(poisson_lam[1],int) and poisson_lam[1]>=0 and poisson_lam[1]=self.col:\n col_tmp=np.random.poisson(poisson_lam[1])\n else:\n raise ValueError('Poisson distribution Lambda must within feature map shape. Feature map shape %s but got lambda input %s'%(str((self.row,self.col)),str(poisson_lam)))\n \n elif distribution=='normal':\n pass \n '''TO BE DONE''' \n else:\n raise NameError('Invalid type of random generation distribution. Please choose between uniform, poisson, normal.')\n \n return (row_tmp,col_tmp)\n \n def addr_gen_mem_fast(self,fault_num,distribution='uniform',poisson_lam=None):\n \"\"\"Genenerate the fault location in a memory\n Faster generation may have repetitive fault addr.\n\n # Arguments\n distribution: String. The distribution type of locaton in memory. Must be one of 'uniform', 'poisson', 'normal'.\n poisson_lam: Integer. The lambda of poisson distribution.\n \n # Returns\n The location index Tuple(Integer).\n \"\"\"\n if distribution=='uniform':\n row_tmp=np.random.randint(self.row,size=fault_num)\n col_tmp=np.random.randint(self.col,size=fault_num)\n elif distribution=='poisson':\n if not isinstance(poisson_lam,tuple) or len(poisson_lam)!=2:\n raise TypeError('Poisson distribution lambda setting must be a tuple has length of 2 (row, col).')\n \n if isinstance(poisson_lam[0],int) and poisson_lam[0]>=0 and poisson_lam[0]=0 and poisson_lam[1]02}:{:>05.2f}\".format(h, m, s)\n# End hms_string\n\n\n\nclass WikiPage():\n\n def __init__(self, title, degree, targetWord, count):\n self.title = title\n self.degree = degree\n self.targetWord = targetWord\n self.count = count\n","sub_path":"Pikmin/Pikmin_Search[John].py","file_name":"Pikmin_Search[John].py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268413926","text":"import MySQLdb\nimport discord\nfrom discord.ext import commands\nimport asyncio\nfrom pokemonlist import pokemon, pokejson, base_stats, cp_multipliers\nfrom config import bot_channel, token, host, user, password, database, website, log_channel, raids_channel, spawn_channel\nimport datetime\nimport calendar\nimport math\nimport sys\nimport traceback\n\nbot = commands.Bot(command_prefix = '.')#set prefix to .\n\ndatabase = MySQLdb.connect(host,user,password,database)\ndatabase.ping(True)\ncursor = database.cursor()\n\ndef find_pokemon_id(name):\n if name == 'Nidoran-F':\n return 29\n elif name == 'Nidoran-M':\n return 32\n elif name == 'Mr-Mime':\n return 122\n elif name == 'Ho-Oh':\n return 250\n elif name == 'Mime-Jr':\n return 439\n else:\n name = name.split('-')[0]\n for k in pokejson.keys():\n v = pokejson[k]\n if v == name:\n return int(k)\n return 0\n \ndef calculate_cp(pokemon, level, iv_attack, iv_defense, iv_stamina):\n stats = base_stats[str(pokemon)]\n cpm = cp_multipliers[str(level)]\n\n return math.floor(\n (cpm * cpm *\n (stats['attack'] + iv_attack)\n * math.sqrt((stats['defense'] + iv_defense))\n * math.sqrt((stats['stamina'] + iv_stamina))) / 10)\n\ndef get_time(minute):\n future = datetime.datetime.utcnow() + datetime.timedelta(minutes=minute)\n return calendar.timegm(future.timetuple())\n\n#raid function\n@bot.command(pass_context=True)\nasync def raid(ctx, arg, arg2, arg3, arg4): # arg = gym name, arg2 = pokemon name, arg3 = level, arg4 = time remaining\n if ctx and ctx.message.channel.id == str(bot_channel) and str(arg2).lower() in pokemon:\n pokemon_id = find_pokemon_id(str(arg2).capitalize())\n time = get_time(int(arg4))\n try:\n cursor.execute(\"SELECT url FROM forts WHERE name LIKE '\" + str(arg) + \"%';\")\n image = str(cursor.fetchall())\n image = image.split(',')\n image = image[0].split(\"'\")\n cursor.execute(\"SELECT name FROM forts WHERE name LIKE '\" + str(arg) + \"%';\")\n gym_title = str(cursor.fetchall())\n if '\"' in gym_title:\n gym_title = gym_title.split('\"')\n elif \"'\" in gym_title:\n gym_title = gym_title.split(\"'\")\n cursor.execute(\"SELECT lat FROM forts WHERE name LIKE '\" + str(arg) + \"%';\")\n lat = str(cursor.fetchall())\n lat = lat.split(',')\n lat = lat[0].split('(')\n cursor.execute(\"SELECT lon FROM forts WHERE name LIKE '\" + str(arg) + \"%';\")\n lon = str(cursor.fetchall())\n lon = lon.split(',')\n lon = lon[0].split('(')\n cursor.execute(\"SELECT id FROM forts WHERE name LIKE '\" + str(arg) + \"%';\")\n gym_id = str(cursor.fetchall())\n gym_id = gym_id.split(',')\n gym_id = gym_id[0].split('((')\n raid_embed = discord.Embed(\n title=(str(gym_title[1])),\n url=(\"https://www.google.com/maps/?q=\" + str(lat[2]) + \",\" + str(lon[2])),\n description=str(arg2).capitalize() + \" raid is available on the live map!\\n\"\n \"**Level:** \" + str(arg3) + \"\\n\"\n \"**L20 100%:** \" + str(calculate_cp(pokemon_id, 20, 15, 15, 15)) + \"\\n\"\n \"**L25 100%:** \" + str(calculate_cp(pokemon_id, 25, 15, 15, 15)) + \"\\n\"\n \"**Minutes Remaining:** \" + str(arg4) + \"\\n\"\n \"**Live Map:** \"+ str(website),\n color=3447003\n )\n raid_embed.set_thumbnail(url=image[1])\n raid_embed.set_image(url=\"http://www.pokestadium.com/sprites/xy/\" + str(arg2).lower() + \".gif\")\n cursor.execute(\"INSERT INTO raids(\"\n \"id, external_id, fort_id , level, \"\n \"pokemon_id, move_1, move_2, time_spawn, \"\n \"time_battle, time_end, cp)\"\n \"VALUES \"\n \"(null, null, %s, \"\n \"%s, %s, null, null, \"\n \"null, null, %s, null);\"\n , (str(gym_id[1]), str(arg3), str(pokemon_id), str(time)))\n database.commit()\n await bot.say('Successfully added your raid to the live map.')\n await bot.send_message(discord.Object(id=raids_channel), embed=raid_embed)\n await bot.send_message(discord.Object(id=log_channel),\n str(ctx.message.author.name) + ' said there was a ' + str(arg2) +\n ' raid going on at ' + str(arg)) and print(\n str(ctx.message.author.name) + ' said there was a ' + str(arg2) +\n ' raid going on at ' + str(arg))\n except:\n database.rollback()\n tb = traceback.print_exc(file=sys.stdout)\n print(tb)\n await bot.say('Unsuccessful in database query, your raid was not added to the live map.')\n \n@bot.command(pass_context=True)\nasync def spawn(ctx, arg, arg2, arg3):\n if ctx and ctx.message.channel.id == str(bot_channel) and str(arg).lower() in pokemon:\n pokemon_id = find_pokemon_id(str(arg).capitalize())\n time = get_time(15)\n try:\n cursor.execute(\"INSERT INTO sightings(\"\n \"id, pokemon_id, spawn_id, expire_timestamp, encounter_id, lat, lon, \"\n \"atk_iv, def_iv, sta_iv, move_1, move_2, gender, \"\n \"form, cp, level, updated, weather_boosted_condition, weather_cell_id, weight) \"\n \"VALUES (null, %s, null, %s, null, %s , %s\"\n \", null, null, null, null, null, null,\"\n \" null, null, null, null, null, null, null);\"\n , (str(pokemon_id), str(time), str(arg2), str(arg3)))\n database.commit()\n await bot.say('Successfully added your spawn to the live map.\\n'\n '*Pokemon timers are automatically given 15 minutes since the timer is unknown.*')\n spawn_embed=discord.Embed(\n title='Click for directions!',\n url=(\"https://www.google.com/maps/?q=\" + str(arg2) + \",\" + str(arg3)),\n description=('A wild ' + str(arg).capitalize() + ' is available!\\n\\n'\n '**Time Remaining:** ~15 minutes.\\n'\n '**Spotted by:** ' + str(ctx.message.author.name) + '!'),\n color=3447003\n )\n spawn_embed.set_image(url=\"http://www.pokestadium.com/sprites/xy/\" + str(arg).lower() + \".gif\")\n await bot.send_message(discord.Object(id=spawn_channel), embed=spawn_embed)\n await bot.send_message(discord.Object(id=log_channel), str(ctx.message.author.name) + ' said there was a wild ' + str(arg) +\n ' at these coordinates: ' + str(arg2) + ', ' + str(arg3)) and print(str(ctx.message.author.name) + ' said there was a wild ' + str(arg) +\n ' at these coordinates: ' + str(arg2) + ', ' + str(arg3))\n except:\n tb = traceback.print_exc(file=sys.stdout)\n print(tb)\n await bot.say('Unsuccessful in database query, your reported spawn was not added to the live map.')\n@bot.command(pass_context=True)\nasync def map(ctx):\n if ctx:\n await bot.say('Hey! Visit ' + str(website) + ' to see our crowd-sourced raids and spawns!')\n\n@bot.command(pass_context=True)\nasync def helpme(ctx):\n if ctx:\n help_embed=discord.Embed(\n title='CSPM Help',\n description='**Mapping Raids:**\\n'\n 'To add a raid to the live map, use the following command:\\n'\n '`.raid `\\n'\n 'Example: `.raid \"Fave Bird Mural\" Lugia 5 45`\\n\\n'\n '**Mapping Spawns:**\\n'\n 'To add a spawn to the live map, use the following command:\\n'\n '`.spawn `\\n'\n 'Example: `.spawn larvitar 34.101085 -118.287312`\\n\\n'\n '*To see raids that are crowdsourced, please make sure you tick the raids option in layers (top right)*',\n color=3447003\n )\n await bot.say(embed=help_embed)\n \nbot.run(token)\n","sub_path":"cspm.py","file_name":"cspm.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"522601804","text":"\"\"\"\nThis example shows how to read an mzXML file exported by the Merlin software\nand make a contourf plots.\n\nAuthor: James E. T. Smith \nDate: 3/29/2020\n\"\"\"\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport matplotlib.colors as colors # For log color scale\nfrom pyopenms import MSExperiment, MzXMLFile\n\nfrom msanalysis.data_extraction import read_mzXML\nfrom msanalysis.plotting import add_custom_ticks\nfrom msanalysis.plotting.contour import contourf\nfrom msanalysis.sample_data import get_mzXML_sample_path, get_csv_sample_path\n\n#\n# User specified variables\n#\n\nlabview_file = get_csv_sample_path()\nmzXML_file = get_mzXML_sample_path()\n# Users can specify their own path like the lines below\n# labview_file = \"/home/james/Downloads/20200228_TP.csv\"\n# mzXML_file = \"/home/james/Downloads/20200228_1175.mzXML\"\n# labview_file = \"20200612_TP.csv\"\n# mzXML_file = \"20200612_2735.mzXML\"\n\n\n#\n# Read CSV Data from LabView\n#\ncols = [\"time\", \"b\", \"temp\", \"d\", \"e\", \"f\", \"g\", \"h\"]\ndf = pd.read_csv(labview_file, names=cols)\ndf[\"time\"] -= df[\"time\"][0]\nlast_lv_time = np.array(df[\"time\"])[-1]\n\n#\n# Read in mzXML\n#\ndata = read_mzXML(mzXML_file)\nmz, intensities, times = data[\"mz\"], data[\"intensities\"], data[\"times\"]\n# Only go as far as LabView data (which we are assuming is always shut off after the mass spec)\nsubset = np.where(times <= last_lv_time)[0]\ntimes = times[subset]\nintensities = intensities[subset]\n\n#\n# Use timestamps from mzXML and Labview to interpolate temperature for each scan\n#\ntemp_interp = np.interp(times, df[\"time\"], df[\"temp\"])\n\n#\n# Select a subset of MZ range and plot intensities as a contour plot\n#\nmz_lb, mz_ub = (130, 180)\nkeep_ith_scan = 1\nX, Y, Z = contourf(mz, intensities, mz_lb, mz_ub, keep_ith_scan=keep_ith_scan)\nprint(X.shape, Y.shape, Z.shape)\n\n\n#\n# Plot contour\n#\n\nplt.figure()\nplt.contourf(X * keep_ith_scan, Y, Z)\ncbar = plt.colorbar()\ncbar.ax.set_ylabel(\"Intensity\", rotation=270, fontsize=12, labelpad=15)\nadd_custom_ticks(plt.gca(), temp_interp)\nplt.xlabel(\"Temperature $^o$ C\")\nplt.title(\"M/Z Intensity as a Function of Temperature\")\nplt.savefig(\"figures/ex2_contour.png\", dpi=600)\n\n#\n# Plot contour with log scale\n#\nplt.figure()\nZ += 1e-5\nplt.contourf(X * keep_ith_scan, Y, Z, norm=colors.LogNorm(vmin=1e-3, vmax=Z.max()))\ncbar = plt.colorbar()\ncbar.ax.set_ylabel(\"Log(Intensity)\", rotation=270, fontsize=12, labelpad=15)\nadd_custom_ticks(plt.gca(), temp_interp)\nplt.xlabel(\"Temperature $^o$ C\")\nplt.title(\"Log(M/Z Intensity) as a Function of Temperature\")\nplt.savefig(\"figures/ex2_contour_logscale.png\", dpi=600)\n","sub_path":"examples/02_contour.py","file_name":"02_contour.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"198956720","text":"goods = [[\"1.iphone X\",7399],[\"2.vivo X21\",3598],[\"3.利用Python进行数据分析\",72],[\"4.迪士尼滑板车\", 179],[\"5.索尼无线蓝牙耳机\", 1999]]\nhavabuys = []\n\nsalarytem = input(\"请输入你带多少钱来购物:\")\nif not salarytem.isdigit():\n exit()\n\nyour = int(salarytem)\n\nprint(\"购物车中的商品有:\")\nfor i in goods:\n print(i)\n\ngoodsindex = \"\"\n\n# print(\"需要购买的商品序号,'q'退出\")\nwhile True:\n goodsindex = input(\"需要购买的商品序号,'q'退出:\")\n if goodsindex == \"q\" or goodsindex == \"Q\":\n if not havabuys:\n print(\"你个搓比,啥都没有买!gun~~~\")\n else:\n print(\"您所购买的商品有:\")\n for i in havabuys:\n print(i)\n print(\"您所剩余额:\", your )\n exit()\n elif goodsindex.isdigit():\n buys = int(goodsindex)\n if buys < 0 or buys > len(goods):\n print(\"您选择的物品不存在\")\n exit()\n price = int(goods[buys-1][1])\n\n # print(type(your))\n # print(type(price))\n\n if your >= price:\n havabuys.append(goods[buys-1][0])\n your = your -price\n print(\"购买成功,购买商品为:\\n\" + goods[buys - 1][0])\n else:\n print(\"\\033你个穷B,剩余:%.2f \\033,还差:%.2f \\033,买不起就滚...\" % (your, price-your))\n","sub_path":"python练习/购物车.py","file_name":"购物车.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"142788186","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, String, DateTime, Float\n\n\nclass Base:\n '''Class describing the model for items table in database'''\n itemId = Column(String, primary_key=True)\n url = Column(String)\n price_amount = Column(Float)\n price_currency = Column(String)\n title = Column(String)\n category = Column(String)\n\n\nBase = declarative_base(cls=Base)\n\n\nclass EbayItem(Base):\n __tablename__ = 'ebay'\n expire = Column(DateTime)\n\n\nclass AmazonItem(Base):\n __tablename__ = 'amazon'\n","sub_path":"adapters/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"38777910","text":"# -*- coding: utf-8 -*-\n__author__ = 'jiahuixing'\n\nimport os\nimport sys\n# import pexpect\nimport time\n# noinspection PyPep8Naming\nfrom xml.etree import ElementTree as ET\nimport urllib2\nimport json\n# import memcache\nimport threading\nimport subprocess\nimport hashlib\nimport traceback\nimport datetime\nimport calendar\n\n\n# noinspection PyClassHasNoInit\nclass Constants:\n # miui start year\n miui_start_year = 2010\n # get adb device and fastboot device\n adb_device_flag = 'device'\n fastboot_device_flag = 'fastboot'\n\n\ndef debug_msg(m_msg):\n if m_msg is not None:\n print('-' * 60)\n print(m_msg)\n print('-' * 60)\n\n\ndef read_xml_file(file_path, tag, attr_key=''):\n m_command_list = list()\n if isinstance(file_path, list):\n path = file_path[0]\n os.chdir(path)\n file_name = file_path[1]\n debug_msg('file_name=%s,tag=%s,attr_key=%s' % (file_name, tag, attr_key))\n debug_msg('abs=%s' % os.path.abspath(file_name))\n if os.path.exists(file_name):\n root = ET.parse(file_name)\n debug_msg('root=%s' % root)\n m_contents = root.findall(tag)\n debug_msg('tmp_s=%s' % m_contents)\n for m_content in m_contents:\n if isinstance(m_content, ET.Element):\n # children = tmp.getchildren()\n emp_dict = dict()\n # debug('emp_dict=%s' % emp_dict)\n if m_content.attrib == emp_dict or m_content.attrib['type'] == attr_key:\n children = list(m_content)\n for child in children:\n if isinstance(child, ET.Element):\n # msg = 'tag:%s,text=%s' % (child.tag, child.text)\n # debug(msg)\n m_command = child.text\n # debug(tmp_cmd)\n m_command_list.append(m_command)\n else:\n print('not exists.')\n else:\n print('file_path not list.')\n debug_msg(m_command_list)\n return m_command_list\n\n\ndef json_analyse():\n url_result = urllib2.urlopen('http://fm.duokanbox.com/category').read()\n debug_msg('url_result=%s' % url_result)\n json_r = json.loads(url_result)\n debug_msg('json_r=%s' % json_r)\n # debug(type(json_r))\n if isinstance(json_r, dict):\n keys = json_r.keys()\n debug_msg('keys=%s' % keys)\n for item in json_r.items():\n # debug(type(item))\n # debug(item)\n # for i in xrange(len(item)):\n debug_msg('%s:%s' % (item[0], item[len(item) - 1]))\n\n\n# noinspection PyClassHasNoInit\nclass Color:\n COLOR_START = '\\033[%s;'\n COLOR_END = '\\033[0m'\n # 显示方式: DISPLAY_TYPE_DEFAULT\n # 0(默认值)、1(高亮)、22(非粗体)、4(下划线)、24(非下划线)、\n # 5(闪烁)、25(非闪烁)、7(反显)、27(非反显)\n DISPLAY_TYPE_DEFAULT = 0\n DISPLAY_TYPE_BRIGHT = 1\n DISPLAY_TYPE_UNDERLINE = 4\n DISPLAY_TYPE_NON_UNDERLINE = 24\n DISPLAY_TYPE_FLICKER = 5\n DISPLAY_TYPE_NONE_FLICKER = 25\n DISPLAY_TYPE_NONE_BOLD = 22\n DISPLAY_TYPE_REVERSE = 7\n DISPLAY_TYPE_NONE_REVERSE = 27\n BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = xrange(8)\n\n\ndef color_msg(m_msg, m_foreground=Color.GREEN, m_background=None, display_type=Color.DISPLAY_TYPE_DEFAULT):\n m_color = list()\n if m_foreground is not None:\n m_color_fg = '3%d' % m_foreground\n m_color.append(m_color_fg)\n if m_background is not None:\n m_color_bg = '4%d' % m_background\n m_color.append(m_color_bg)\n if len(m_color) > 0:\n m_color_str = ';'.join(m_color)\n m_msg_start = Color.COLOR_START % display_type\n m_msg_end = Color.COLOR_END\n m_msg_body = '%sm%s' % (m_color_str, m_msg)\n m_msg = '%s%s%s' % (m_msg_start, m_msg_body, m_msg_end)\n return m_msg\n\n\ndef get_adb_device_list():\n m_adb_device_list = None\n m_adb_device_count = 0\n m_command = 'adb devices'\n m_popen = popen_system_command(m_command, show_debug_msg=0)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_split_line = m_line.split()\n m_split_line_length = len(m_split_line)\n if m_split_line_length == 2 and Constants.adb_device_flag in m_split_line:\n m_adb_device_count += 1\n if m_adb_device_list is None:\n m_adb_device_list = list()\n m_adb_device_list.append(m_split_line)\n else:\n m_adb_device_list.append(m_split_line)\n return m_adb_device_count, m_adb_device_list\n\n\ndef get_fastboot_device_list():\n m_fastboot_device_list = None\n m_fastboot_device_count = 0\n m_command = 'fastboot devices'\n m_popen = popen_system_command(m_command, show_debug_msg=0)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_split_line = m_line.split()\n m_split_line_length = len(m_split_line)\n if m_split_line_length == 2 and Constants.fastboot_device_flag in m_split_line:\n m_fastboot_device_count += 1\n if m_fastboot_device_list is None:\n m_fastboot_device_list = list()\n m_fastboot_device_list.append(m_split_line)\n else:\n m_fastboot_device_list.append(m_split_line)\n return m_fastboot_device_count, m_fastboot_device_list\n\n\ndef get_ro_build_product(m_device_id):\n m_ro_build_product = None\n m_command = 'adb -s %s shell getprop ro.build.product' % m_device_id\n m_popen = popen_system_command(m_command, show_debug_msg=0)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_ro_build_product = m_line\n return m_ro_build_product\n\n\ndef get_ro_product_device(m_device_id):\n m_ro_product_device = None\n m_command = 'adb -s %s shell getprop ro.product.device' % m_device_id\n m_popen = popen_system_command(m_command, show_debug_msg=0)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_ro_product_device = m_line\n return m_ro_product_device\n\n\ndef get_ro_build_version(m_device_id):\n m_ro_build_version = None\n m_command = 'adb -s %s shell getprop ro.build.version.incremental' % m_device_id\n m_popen = popen_system_command(m_command, show_debug_msg=0)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_ro_build_version = m_line\n return m_ro_build_version\n\n\ndef get_system_language(m_device_id):\n m_system_language = None\n m_command = 'adb -s %s shell getprop persist.sys.language' % m_device_id\n m_popen = popen_system_command(m_command)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_system_language = m_line\n return m_system_language\n\n\ndef get_fastboot_var_product(m_device_id):\n m_fastboot_var_product = None\n m_command = 'fastboot -s %s getvar product' % m_device_id\n m_popen = popen_system_command(m_command, show_debug_msg=0)\n while True:\n # m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n m_line = m_popen.stderr.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n m_line = m_line.split(':')\n if len(m_line) == 2:\n m_fastboot_var_product = m_line[1]\n break\n return m_fastboot_var_product\n\n\ndef adb_root_device(m_device_id):\n m_command = 'adb -s %s root' % m_device_id\n popen_system_command(m_command)\n\n\ndef adb_remount_device(m_device_id):\n m_remount = False\n m_command = 'adb -s %s remount' % m_device_id\n m_popen = popen_system_command(m_command)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n if 'succeeded' in m_line:\n debug_msg(color_msg('remount succeeded.'))\n m_remount = True\n break\n return m_remount\n\n\ndef adb_root_remount_device(m_device_id):\n adb_root_device(m_device_id)\n while True:\n m_remount = adb_remount_device(m_device_id)\n if m_remount:\n break\n else:\n waiting_for(3)\n\n\ndef adb_or_mdb():\n m_command_line = ''\n m_choose = 0\n if m_choose == 1:\n which_adb = 'which adb'\n which_mdb = 'which mdb'\n results = subprocess.Popen(which_adb, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n m_path_name = os.path.dirname(results[0])\n # debug_msg(tmp)\n if os.path.exists(m_path_name):\n m_command_line = 'adb'\n results = subprocess.Popen(which_mdb, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n m_path_name = os.path.dirname(results[0])\n # debug_msg(tmp)\n if os.path.exists(m_path_name):\n m_command_line = 'mdb'\n else:\n m_command_line = 'adb'\n return m_command_line\n\n\ndef get_date():\n m_year, m_month, m_day = int(time.strftime('%Y')) - 2010, int(time.strftime('%m')), int(time.strftime('%d'))\n m_date = '%s.%s.%s' % (m_year, m_month, m_day)\n return m_date\n\n\ndef get_date_weekday(m_date):\n m_weekday_list = [\n '星期一',\n '星期二',\n '星期三',\n '星期四',\n '星期五',\n '星期六',\n '星期日',\n ]\n m_split_date = str.split(m_date, '.')\n m_year = int(m_split_date[0]) + Constants.miui_start_year\n m_month = int(m_split_date[1])\n m_day = int(m_split_date[2])\n m_week = datetime.datetime(m_year, m_month, m_day).weekday()\n m_week = m_weekday_list[m_week]\n debug_msg('m_date = %s\\n'\n 'm_year = %s, m_month = %s, m_day = %s\\n'\n 'm_week = %s'\n % (m_date, m_year, m_month, m_day, m_week))\n\n\ndef is_date_valid(m_goal_date, m_this_date):\n m_goal_date_tuple = str.split(m_goal_date, '.')\n m_this_date_tuple = str.split(m_this_date, '.')\n m_goal_year = int(m_goal_date_tuple[0]) + Constants.miui_start_year\n m_goal_month = int(m_goal_date_tuple[1])\n m_goal_day = int(m_goal_date_tuple[2])\n m_this_year = int(m_this_date_tuple[0]) + Constants.miui_start_year\n m_this_month = int(m_this_date_tuple[1])\n m_this_day = int(m_this_date_tuple[2])\n if not (m_goal_year == m_this_year):\n return False\n m_valid_month = 12\n m_valid_day_of_month = calendar.monthrange(m_goal_year, m_goal_month)[1]\n if m_goal_month <= 0 or m_goal_month > m_valid_month:\n return False\n if m_goal_day <= 0 or m_goal_day > m_valid_day_of_month:\n return False\n # debug_msg('m_date = %s, this_date = %s' % (m_date, this_date))\n m_goal_datetime = datetime.datetime(m_goal_year, m_goal_month, m_goal_day)\n m_this_datetime = datetime.datetime(m_this_year, m_this_month, m_this_day)\n m_del_days = (m_goal_datetime - m_this_datetime).days\n if m_del_days > 5:\n return False\n else:\n return True\n\n\ndef get_time():\n m_time_time = int(time.time())\n return m_time_time\n\n\ndef get_ymd():\n m_ymd = time.strftime('%Y-%m-%d')\n return m_ymd\n\n\ndef get_hms(display=0):\n m_hms = time.strftime('%H:%M:%S')\n if display != 0:\n m_hms = time.strftime('%H-%M-%S')\n return m_hms\n\n\ndef run_system_command(m_command):\n debug_msg(color_msg('m_command = %s' % m_command))\n os.system(m_command)\n\n\ndef popen_system_command(m_command, m_thread=0, show_debug_msg=1):\n if show_debug_msg == 1:\n if m_thread == 0:\n debug_msg(color_msg('popen_system_command + %s') % m_command)\n else:\n debug_msg(color_msg('popen_system_command with thread + %s') % m_command)\n m_popen = subprocess.Popen(m_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return m_popen\n\n\ndef thread_popen_system_command(m_command, m_thread=1, timeout=3):\n m_thread = threading.Thread(target=popen_system_command, args=(m_command, m_thread))\n m_thread.start()\n m_thread.join(timeout)\n\n\ndef waiting_for(m_waiting_seconds):\n debug_msg(color_msg('waiting for %s seconds.' % m_waiting_seconds))\n time.sleep(m_waiting_seconds)\n\n\ndef get_current_method_info():\n try:\n raise KeyError\n except KeyError:\n pass\n finally:\n exc_info = sys.exc_info()\n m_trace_obj = exc_info[2]\n m_frame_obj = m_trace_obj.tb_frame\n m_up_frame = m_frame_obj.f_back\n # m_file_name = m_up_frame.f_code.co_filename\n m_method_name = m_up_frame.f_code.co_name\n # m_line_number = m_up_frame.f_lineno\n # return m_file_name, m_method_name, m_line_number\n debug_msg(color_msg(m_method_name, Color.CYAN))\n return m_method_name\n\n\ndef kill_package(m_device_id, m_package_name):\n m_command = 'adb -s %s shell am kill %s' % (m_device_id, m_package_name)\n popen_system_command(m_command)\n m_command = 'adb -s %s shell am force-stop %s' % (m_device_id, m_package_name)\n popen_system_command(m_command)\n\n\ndef kill_running_monkey(m_device_id):\n m_monkey_process_id = None\n m_command = 'adb -s %s shell ps' % m_device_id\n m_popen = popen_system_command(m_command)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n if 'monkey' in m_line:\n m_line = m_line.split()\n debug_msg(color_msg(m_line))\n m_monkey_process_id = m_line[1]\n if m_monkey_process_id is not None:\n m_command = 'adb -s %s shell kill %s' % (m_device_id, m_monkey_process_id)\n popen_system_command(m_command)\n\n\ndef write_file(m_file_name, m_msg):\n m_mode = 'a+'\n m_file_obj = open(m_file_name, mode=m_mode)\n m_file_obj.writelines(m_msg)\n m_file_obj.close()\n\n\ndef init_package_lists():\n m_package_lists = list()\n m_soundrecorder = ['com.android.soundrecorder']\n m_compass = ['com.miui.compass']\n m_fm = ['com.miui.fm', 'com.miui.fmradio']\n m_notes = ['com.miui.notes']\n m_calculator = ['com.android.calculator2']\n m_frame = ['com.android.soundrecorder', 'com.miui.compass', 'com.miui.fm', 'com.miui.fmradio',\n 'com.miui.notes', 'com.android.calculator2']\n # m_all = None\n m_package_lists.append(m_soundrecorder)\n m_package_lists.append(m_compass)\n m_package_lists.append(m_fm)\n m_package_lists.append(m_notes)\n m_package_lists.append(m_calculator)\n m_package_lists.append(m_frame)\n # m_package_lists.append(m_all)\n\n\ndef is_alpha_rom(m_device_id):\n m_is_alpha = False\n m_command = 'adb -s %s shell getprop ro.product.mod_device' % m_device_id\n m_popen = popen_system_command(m_command)\n while True:\n m_line = m_popen.stdout.readline().strip('\\n').strip('\\r')\n if not m_line:\n break\n if 'alpha' in m_line:\n m_is_alpha = True\n break\n return m_is_alpha\n\n\ndef set_strict_mode(m_device_id):\n m_command = 'adb -s %s shell setprop persist.sys.strictmode.visual true' % m_device_id\n if is_alpha_rom(m_device_id):\n debug_msg(color_msg('alpha ROM,开启strict_mode.'))\n popen_system_command(m_command)\n else:\n debug_msg(color_msg('不是alpha ROM.'))\n\n\ndef get_md5(m_string):\n m_hash = hashlib.md5()\n m_hash.update(m_string)\n m_md5 = m_hash.hexdigest()\n debug_msg(color_msg('m_string = %s\\n'\n 'm_md5 = %s' % (m_string, m_md5)))\n return m_md5\n\n\ndef get_file_md5(m_file_name):\n m_md5 = None\n if not os.path.isfile(m_file_name):\n return m_md5\n m_hash = hashlib.md5()\n m_open_file = file(m_file_name, 'rb')\n while True:\n m_buffer = m_open_file.read(8096)\n if not m_buffer:\n break\n m_hash.update(m_buffer)\n m_open_file.close()\n m_md5 = m_hash.hexdigest().lower()\n return m_md5\n\n\ndef make_folders(m_folder_name):\n if not os.path.exists(m_folder_name):\n debug_msg(color_msg('folder == %s not exist, make dir.' % m_folder_name, Color.RED))\n os.makedirs(m_folder_name)\n return m_folder_name\n\n\ndef show_error_trace(m_error_msg):\n if m_error_msg is not None:\n m_error_traceback = traceback.format_exc()\n print(m_error_traceback)\n\n\ndef get_main_user_path():\n m_user_path = os.path.expanduser('~')\n debug_msg(color_msg('m_user_path = %s' % m_user_path))\n return m_user_path\n\n\ndef update_svn_files():\n # debug_msg(color_msg(get_current_method_info()))\n get_current_method_info()\n m_current_path = os.getcwd()\n os.chdir(sys.path[0])\n m_command = 'svn update'\n os.system(m_command)\n os.chdir(m_current_path)\n","sub_path":"common_libs.py","file_name":"common_libs.py","file_ext":"py","file_size_in_byte":16975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"210388365","text":"#coding:utf-8\nimport re\nimport math\nimport sqlite3 as sqlite\n#import jieba\nimport bpnn\n\n\n\ndef getwords(doc):\n splitter = re.compile('\\\\W*')\n #根据非字母字符进行单词拆分\n words = [s.lower() for s in splitter.split(doc)\n if len(s)>2 and len(s)<20]\n #返回一组不重复的单词\n return dict([(w,1) for w in words])\n\n#中文分词 \n''' \ndef getchinese(doc):\n ignorewords = [',', '/', ':', '.', '。', ','] \n generator = jieba.cut(doc)\n words = ('##'.join(generator)).split('##')\n words = [w for w in words if w not in ignorewords]\n return dict([(w,1) for w in words])\n''' \n \n \ndef sampletrain(cl):\n cl.train('Nobody owns the water.','good')\n cl.train('the quick rabbit jumps fences','good')\n cl.train('buy pharmaceuticals now','bad')\n cl.train('make quick money at the online casino','bad')\n cl.train('the quick brown fox jumps','good')\n \n \nclass classifier:\n def __init__(self,getfearures,filename=None):\n #统计特征/分类组合的数量\n self.fc = {}\n #统计每个分类中的文档数量\n self.cc = {}\n self.getfearures = getfearures\n self.thresholds = {}\n \n def setdb(self, dbfile):\n self.con = sqlite.connect(dbfile)\n self.con.execute('create table if not exists fc(feature,category,count)')\n self.con.execute('create table if not exists cc(category,count)')\n \n def setthreshold(self,cat,t):\n self.thresholds[cat] = t\n \n def getthreshold(self, cat):\n if cat not in self.thresholds: return 1.0\n return self.thresholds[cat]\n \n #增加对特征/分类组合的计数值\n def incf(self, f, cat):\n count = self.fcount(f,cat)\n if count == 0:\n self.con.execute(\"insert into fc values ('%s','%s',1)\"%(f,cat))\n else:\n self.con.execute(\n \"update fc set count=%d where feature='%s' and category='%s'\"%(count+1,f,cat))\n \n #增加对某一分类的计数值\n def incc(self, cat):\n count = self.catcount(cat)\n if count == 0:\n self.con.execute(\"insert into cc values ('%s',1)\"%cat)\n else:\n self.con.execute(\n \"update cc set count=%d where category='%s'\"%(count+1,cat))\n \n #某一特征出现于某一分类中的次数\n def fcount(self, f, cat):\n res = self.con.execute(\"select count from fc where feature='%s' and category='%s'\"%(f,cat)).fetchone()\n if res == None: return 0\n else: return float(res[0])\n \n #属于某一分类的内容项数量\n def catcount(self, cat):\n res = self.con.execute(\"select count from cc where category='%s'\"%cat).fetchone()\n if res == None: return 0\n else: return float(res[0])\n \n #所有内容项的数量\n def totalcount(self):\n res = self.con.execute(\"select sum(count) from cc\").fetchone()\n if res == None: return 0\n return res[0]\n \n #所有分类的列表\n def categories(self):\n cur = self.con.execute('select category from cc')\n return [d[0] for d in cur]\n \n def fprob(self,f,cat):\n if self.catcount(cat) == 0: return 0\n #特征在分类中出现的总次数,除以分类中包含内容项的总数\n return self.fcount(f,cat) / self.catcount(cat)\n \n def weightedprob(self, f, cat, prf, weight=1.0, ap=0.5):\n #计算当前的概率值\n basicprob = prf(f, cat)\n #统计特征在所有分类中出现的次数\n totals = sum([self.fcount(f,c) for c in self.categories()])\n #计算加权平均\n bp = ((weight*ap) + (totals*basicprob)) / (weight+totals)\n return bp\n \n def train(self, item, cat):\n features = self.getfearures(item)\n #针对该分类为每个特征增加计数值\n for f in features: self.incf(f, cat)\n self.incc(cat)\n self.con.commit()\n \n def classify(self, item, default=None):\n probs = {}\n #寻找最大概率的分类\n max = 0.0\n for cat in self.categories():\n probs[cat] = self.prob(item, cat)\n if probs[cat] > max:\n max = probs[cat]\n best = cat\n \n #确保概率值超过阈值*次大概率值\n for cat in probs:\n if cat == best: continue\n if probs[cat] * self.getthreshold(best) > probs[best]: return default\n return best\n \n#朴素贝叶斯 \nclass naivebayes(classifier):\n def docprob(self, item, cat):\n features = self.getfearures(item)\n \n #将所有特征的概率相乘\n p = 1\n for f in features: p *= self.weightedprob(f, cat, self.fprob)\n return p\n \n def prob(self, item, cat):\n catprob = self.catcount(cat) / self.totalcount()\n docprob = self.docprob(item, cat)\n return docprob*catprob\n\n#费舍尔分类 \nclass fisherclassifier(classifier):\n def __init__(self,getfearures):\n classifier.__init__(self, getfearures)\n self.minimums = {}\n \n def setminimum(self, cat, min):\n self.minimums[cat] = min\n \n def getminimum(self, cat):\n if cat not in self.minimums: return 0\n return self.minimums[cat]\n \n def cprob(self, f, cat):\n #特征在该分类中出现的频率\n clf = self.fprob(f, cat)\n if clf == 0: return 0\n \n #特征在所有分类中出现的频率\n freqsum = sum([self.fprob(f,c) for c in self.categories()])\n \n #概率等于特征在该分类中出现的频率除以总体概率\n p = clf / (freqsum)\n \n return p\n \n def fisherprob(self, item, cat):\n #将所有概率值相乘\n p = 1\n features = self.getfearures(item)\n for f in features:\n p *= (self.weightedprob(f, cat, self.cprob))\n #取自然对数并乘以-2\n fscore = -2 * math.log(p)\n #利用倒置对数卡方函数求得概率\n return self.invchi2(fscore, len(features)*2)\n \n def invchi2(self, chi, df):\n m = chi / 2.0\n sum = term = math.exp(-m)\n for i in range(1, df//2):\n term *= m / i\n sum += term\n return min(sum, 1.0)\n \n def classify(self, item, default=None):\n #循环遍历并寻找最佳结果\n best = default\n max = 0.0\n for c in self.categories():\n p = self.fisherprob(item, c)\n #确保其超过下限值\n if p>self.getminimum(c) and p>max:\n best = c\n max = p\n return best\n\n#BPNN分类器 \nclass bpnnclassifier():\n def __init__(self, getfearures):\n self.net = bpnn.bpnn('bpnnclassifier.db')\n self.getfearures = getfearures\n self.net.maketables()\n \n def train(self, item, cat):\n features = self.getfearures(item)\n #针对该分类为每个特征增加计数值\n inputs = [self.net.getinputid(i) for i in features]\n outputs = [self.net.getoutputid(o) for o in ['good','bad']]\n selectid = self.net.getoutputid(cat)\n self.net.trainquery(inputs, outputs, selectid)\n \n def classify(self, item, default=None):\n features = self.getfearures(item)\n inputs = [self.net.getinputid(i) for i in features]\n outputs = [self.net.getoutputid(o) for o in ['good','bad']]\n return self.net.getresult(inputs, outputs)\n","sub_path":"docclass.py","file_name":"docclass.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"100615326","text":"import pandas as pd\nimport sys\nimport json \nimport numpy as np\nimport math as m\nimport gensim\nimport pandas as pd\nimport numpy as np\nfrom keras import backend as K\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras.layers import Input, Dense, Activation\nfrom keras.models import Sequential, Model , load_model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, LSTM, TimeDistributed, RepeatVector, Input, BatchNormalization, GRU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers.advanced_activations import LeakyReLU\n\ndef f1(y_true, y_pred):\n #y_pred = K.round(y_pred)\n # y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), THRESHOLD), K.floatx())\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K.epsilon())\n r = tp / (tp + fn + K.epsilon())\n\n f1 = 2*p*r / (p+r+K.epsilon())\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\nword2vec_model = gensim.models.Word2Vec.load(\"word2vec_train+test.model\")\nmodel = load_model(sys.argv[2])\n########################################### read json file ################################################\ntest_dict = []\ntest_data = open(sys.argv[1]).readlines()\nfor sentence in test_data : test_dict.append(json.loads(sentence))\n\n\n########################################### data processing ################################################\ntest_tokens = []\ntest_span = []\ntest_nodes_dict = []\n#test_nodes = []\ntest_edges = []\n\nfor dic in test_dict : \n\ttest_tokens.append(dic[\"tokens\"])\n\tnodes_dict = {}\n\tnode = []\n\tspan_list = []\n\tedges_list = []\n\tlast_end = 0\n\tfor nodes in dic[\"nodes\"] : \n\t\tstart , end = nodes[0][0] , nodes[0][1]\n\t\tfor i in range(start - last_end) : span_list.append((dic[\"tokens\"][last_end + i] , \"fuck\"))\n\t\ts = dic[\"tokens\"][start]\n\t\tfor i in range(1 , end - start) : s += (\" \" + dic[\"tokens\"][start + i])\n\t\tnodes_dict[tuple(nodes[0])] = (s , list(nodes[1].keys())[0])\t\t\n\t\tspan_list.append((s , list(nodes[1].keys())[0]))\n\t\tlast_end = end\n\ttest_nodes_dict.append(nodes_dict)\n\ttest_span.append(span_list)\n\tfor edges in dic[\"edges\"] : edges_list.append((nodes_dict[tuple(edges[0])][0] , nodes_dict[tuple(edges[1])][0] , list(edges[2].keys())[0]))\n\ttest_edges.append(edges_list)\n\ntemp = pd.Series([\"fuck\", \"value\" , \"agent\" , \"condition\" , \"theme\", \"theme_mod\" , \"quant_mod\" , \n\t\"co_quant\", \"null\", \"location\" , \"whole\" , \"source\" , \"reference_time\" , \"quant\" , \"manner\" , \"time\" , \"cause\" , \"+\" , \"-\" ])\nnode_label = pd.get_dummies(temp)\n\n\nsentence_vector = []\nfor line in test_span:\n\ttemp = []\n\tfor word in line:\n\t\ttemp.append(word2vec_model[word[0]])\n\tsentence_vector.append(temp)\nsentence_vector = np.array(sentence_vector)\nedges_vector = []\nsentence_len = len(sentence_vector)\nfor i in range(sentence_len):\n\tfor pairs in test_edges[i]:\n\t\tfor k in range(len(test_span[i])):\n\t\t\tif test_span[i][k][0] == pairs[0] : index1 = k\n\t\t\tif test_span[i][k][0] == pairs[1] : index2 = k\n\t\t\n\t\tvec = [] \n\t\tfor index in range(len(test_span[i])):\n\t\t\ttemp = []\n\t\t\ttemp.append(index - index1)\n\t\t\ttemp.append(index - index2)\n\t\t\tif test_span[i][index][0] == pairs[0]:\n\t\t\t\ttemp.append(1)\n\t\t\telse : temp.append(0)\n\t\t\tif test_span[i][index][0] == pairs[1]:\n\t\t\t\ttemp.append(1)\n\t\t\telse : temp.append(0)\n\t\t\ttemp += list(node_label[test_span[i][index][1]])\n\t\t\tvec.append(temp)\n\t\tvec = np.array(vec)\n\t\tedges_vector.append(np.hstack((sentence_vector[i] , vec)))\n\ntest_x = pad_sequences(edges_vector, maxlen = 50, dtype = 'float64', padding = 'post', truncating = 'post', value = np.zeros(len(edges_vector[0][0])))\n\n\ny_predict = model.predict(test_x)\ny = y_predict.argmax(axis = -1).reshape(test_x.shape[0],1)\n\ntemp = []\nacc = 0\ntotal = 0\nans = []\nfor i in test_edges:\n\tfor j in i:\n\t\tans.append(j[2])\nfor i in range(len(y)):\n\tif y[i][0] == 0:\n\t\ttemp.append('anaolgy')\n\telif y[i][0] == 1:\n\t\ttemp.append('eq')\n\telse:\n\t\ttemp.append('fact')\n\tif temp[i][0] == ans[i][0]:\n\t\tacc += 1\n\ntpf = 0\nfpf = 0\ntnf = 0\nfnf = 0\ntpa = 0\nfpa = 0\ntna = 0\nfna = 0\ntpe = 0\nfpe = 0\ntne = 0\nfne = 0\nfor i in range(len(y)):\n\tif temp[i][0] == 'f':\n\t\tif ans[i][0] == 'f':\n\t\t\ttpf += 1\n\t\telse:\n\t\t\tfpf += 1\n\telse:\n\t\tif ans[i][0] != 'f':\n\t\t\ttnf += 1\n\t\telse:\n\t\t\tfnf += 1\n\n\tif temp[i][0] == 'e':\n\t\tif ans[i][0] == 'e':\n\t\t\ttpe += 1\n\t\telse:\n\t\t\tfpe += 1\n\telse:\n\t\tif ans[i][0] != 'e':\n\t\t\ttne += 1\n\t\telse:\n\t\t\tfne += 1\n\n\tif temp[i][0] == 'a':\n\t\tif ans[i][0] == 'a':\n\t\t\ttpa += 1\n\t\telse:\n\t\t\tfpa += 1\n\telse:\n\t\tif ans[i][0] != 'a':\n\t\t\ttna += 1\n\t\telse:\n\t\t\tfna += 1\n#print(tpa, tpe, tpf)\naf1 = 2*tpa/(2*tpa + fna + fpa)\nff1 = 2*tpf/(2*tpf + fnf + fpf)\nef1 = 2*tpe/(2*tpe + fne + fpe)\nprint(\"F1 Score : \",(af1 + ff1 + ef1)/3)\n#print(acc/len(y))\n\n","sub_path":"Final_Project/two-layer_Bidirectional_LSTM/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"21756693","text":"\n\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom.qmr import qmr\nfrom .wongsandler import ws_nrtl, ws_wilson, ws_unifac, ws_rk\nfrom .mhv import mhv_nrtl, mhv_wilson, mhv_nrtlt, mhv_unifac, mhv_rk\nfrom .alphas import alpha_soave, alpha_sv, alpha_rk\nfrom ..constants import R\n\nclass cubicm():\n '''\n Mixture Cubic EoS Object\n \n This object have implemeted methods for phase equilibrium \n as for iterfacial properties calculations.\n \n Parameters\n ----------\n mix : object\n mixture created with mixture class\n c1, c2 : float\n constants of cubic EoS\n oma, omb : float\n constants of cubic EoS\n alpha_eos : function\n function that gives thermal funcionality to attractive term of EoS\n mixrule : function\n computes mixture attactive and cohesive terms\n \n Attributes\n ----------\n Tc: array_like\n critical temperture in K\n Pc: array_like\n critical pressure in bar\n w: array_like\n acentric factor\n cii : array_like\n influence factor for SGT\n nc : int\n number of components of mixture\n \n Methods\n -------\n a_eos : computes the attractive term of cubic eos.\n Zmix : computes the roots of compresibility factor polynomial.\n density : computes density of mixture.\n logfugef : computes effective fugacity coefficients.\n logfugmix : computes mixture fugacity coeficcient;\n a0ad : computes adimentional Helmholtz density energy\n muad : computes adimentional chemical potential.\n dOm : computes adimentional Thermodynamic Grand Potential.\n ci : computes influence parameters matrix for SGT.\n sgt_adim : computes adimentional factors for SGT.\n\n '''\n \n def __init__(self, mix, c1, c2, oma, omb, alpha_eos, mixrule):\n \n self.c1 = c1\n self.c2 = c2\n self.oma = oma\n self.omb = omb\n self.alpha_eos = alpha_eos \n self.emin = 2+self.c1+self.c2+2*np.sqrt((1+self.c1)*(1+self.c2))\n\n \n self.Tc = np.array(mix.Tc, ndmin = 1) \n self.Pc = np.array(mix.Pc, ndmin = 1)\n self.w = np.array(mix.w, ndmin = 1)\n self.cii = np.array(mix.cii, ndmin = 1) \n self.b = self.omb*R*self.Tc/self.Pc\n self.nc = mix.nc\n self.beta = np.zeros([self.nc, self.nc])\n \n if mixrule == 'qmr':\n self.mixrule = qmr \n if hasattr(mix, 'kij'):\n self.kij = mix.kij\n self.mixruleparameter = (mix.kij,)\n else: \n self.kij = np.zeros([self.nc, self.nc])\n self.mixruleparameter = (self.kij, )\n \n elif mixrule == 'mhv_nrtl':\n self.mixrule = mhv_nrtl \n if hasattr(mix, 'g') and hasattr(mix, 'alpha'):\n #Este se utiliza con mhv_nrtl\n self.nrtl = (mix.alpha, mix.g, mix.g1)\n self.mixruleparameter = (self.c1,self.c2, \n mix.alpha, mix.g, mix.g1)\n else: \n raise Exception('NRTL parameters needed')\n \n elif mixrule == 'mhv_nrtlt':\n self.mixrule = mhv_nrtlt \n if hasattr(mix, 'g') and hasattr(mix, 'alpha') and hasattr(mix, 'rkternario'):\n self.nrtlt = (mix.alpha, mix.g, mix.g1, mix.rkternario)\n self.mixruleparameter = (self.c1,self.c2, mix.alpha, mix.g,\n mix.g1, mix.rkternario)\n else: \n raise Exception('NRTL/ternary parameters needed')\n \n elif mixrule == 'mhv_wilson':\n self.mixrule = mhv_wilson \n if hasattr(mix, 'Aij'):\n #este se utiliza con mhv_wilson\n self.wilson = (mix.Aij, mix.vlrackett)\n self.mixruleparameter = (self.c1,self.c2, mix.Aij, mix.vlrackett)\n else: \n raise Exception('Wilson parameters needed')\n \n elif mixrule == 'mhv_unifac':\n self.mixrule = mhv_unifac \n if hasattr(mix, 'actmodelp'):\n mix.unifac()\n self.unifac = mix.actmodelp\n self.mixruleparameter = (self.c1,self.c2, *mix.actmodelp)\n else: \n raise Exception('Unifac parameters needed')\n \n elif mixrule == 'mhv_rk':\n\n self.mixrule = mhv_rk \n if hasattr(mix, 'rkp') and hasattr(mix, 'rkpT'):\n self.rk = (mix.rkp, mix.rkpT, mix.combinatory)\n self.mixruleparameter = (self.c1,self.c2, mix.rkp, mix.rkpT,\n mix.combinatory)\n else:\n raise Exception('RK parameters needed')\n\n \n elif mixrule == 'ws_nrtl':\n self.mixrule = ws_nrtl \n if hasattr(mix, 'Kijws'):\n self.Kijws = mix.Kijws\n else:\n self.Kijws = np.zeros([self.nc, self.nc]) \n\n if hasattr(mix, 'g') and hasattr(mix, 'alpha'):\n #Este se utiliza con mhv_nrtl\n c1, c2 = self.c1, self.c2\n C = np.log((1+c1)/(1+c2))/(c1-c2)\n self.nrtl = (mix.alpha, mix.g, mix.g1)\n self.mixruleparameter = (C, self.Kijws, \n mix.alpha, mix.g, mix.g1)\n else: \n raise Exception('NRTL parameters needed')\n \n elif mixrule == 'ws_wilson':\n self.mixrule = ws_wilson \n if hasattr(mix, 'Kijws'):\n self.Kijws = mix.Kijws\n else:\n self.Kijws = np.zeros([self.nc, self.nc]) \n if hasattr(mix, 'Aij'):\n #este se utiliza con mhv_wilson\n c1, c2 = self.c1, self.c2\n C = np.log((1+c1)/(1+c2))/(c1-c2)\n self.wilson = (mix.Aij, mix.vlrackett)\n self.mixruleparameter = (C, self.Kijws, mix.Aij, mix.vlrackett)\n else: \n raise Exception('Wilson parameters needed')\n \n elif mixrule == 'ws_rk':\n self.mixrule = ws_rk\n if hasattr(mix, 'Kijws'):\n self.Kijws = mix.Kijws\n else:\n self.Kijws = np.zeros([self.nc, self.nc])\n \n if hasattr(mix, 'rkp') and hasattr(mix, 'rkpT'):\n c1, c2 = self.c1, self.c2\n C = np.log((1+c1)/(1+c2))/(c1-c2)\n self.rk = (mix.rkp, mix.rkpT, mix.combinatoria)\n self.mixruleparameter = (C, self.Kijws, mix.rkp, mix.rkpT,\n mix.combinatoria)\n else:\n raise Exception('RK parameters needed')\n \n elif mixrule == 'ws_unifac':\n self.mixrule = ws_unifac \n if hasattr(mix, 'Kijws'):\n self.Kijws = mix.Kijws\n else:\n self.Kijws = np.zeros([self.nc, self.nc]) \n\n c1, c2 = self.c1, self.c2\n C = np.log((1+c1)/(1+c2))/(c1-c2)\n mix.unifac()\n self.unifac = mix.actmodelp\n self.mixruleparameter = (C, self.Kijws,*self.unifac)\n else: \n raise Exception('Mixrule not valid')\n \n \n #Cubic EoS methods \n def a_eos(self,T):\n \"\"\" \n a_eos(T)\n \n Method that computes atractive term of cubic eos at fixed T (in K)\n \n Parameters\n ----------\n \n T : float\n absolute temperature in K\n \n Returns\n -------\n a : array_like\n atractive term array\n \"\"\"\n alpha = self.alpha_eos(T,self.k,self.Tc)\n a = self.oma*(R*self.Tc)**2*alpha/self.Pc\n return a\n \n def _Zroot(self,A,B):\n a1 = (self.c1+self.c2-1)*B-1\n a2 = self.c1*self.c2*B**2-(self.c1+self.c2)*(B**2+B)+A\n a3 = -B*(self.c1*self.c2*(B**2+B)+A)\n Zpol=[1,a1,a2,a3]\n Zroots = np.roots(Zpol)\n Zroots = np.real(Zroots[np.imag(Zroots) == 0])\n Zroots = Zroots[Zroots>B]\n return Zroots\n \n def Zmix(self, X, T, P):\n '''\n Zmix (X, T, P)\n \n Method that computes the roots of the compresibility factor polynomial\n at given mole fractions (X), Temperature (T) and Pressure (P)\n \n Parameters\n ----------\n \n X : array_like\n mole fraction vector\n T : float\n absolute temperature in K\n P : float\n pressure in bar\n\n Returns\n ------- \n Z : array_like\n roots of Z polynomial\n '''\n a = self.a_eos(T)\n am, bm, ep, ap, bp = self.mixrule(X,T, a, self.b,*self.mixruleparameter)\n A = am*P/(R*T)**2\n B = bm*P/(R*T)\n return self._Zroot(A,B)\n\n def density(self, X, T, P, state):\n \"\"\" \n density(X, T, P, state)\n Method that computes the density of the mixture at X, T, P\n\n \n Parameters\n ----------\n \n X : array_like\n mole fraction vector\n T : float\n absolute temperature in K\n P : float\n pressure in bar\n state : string\n 'L' for liquid phase and 'V' for vapour phase\n\n Returns\n -------\n density: array_like\n density vector of the mixture in moll/cm3\n \"\"\"\n if state == 'L':\n Z=min(self.Zmix(X,T,P))\n elif state == 'V':\n Z=max(self.Zmix(X,T,P))\n return X*P/(R*T*Z)\n \n def logfugef(self, X, T, P, state, v0 = None):\n \"\"\" \n logfugef(X, T, P, state)\n \n Method that computes the effective fugacity coefficients at given\n composition, temperature and pressure. \n\n Parameters\n ----------\n \n X : array_like, mole fraction vector\n T : absolute temperature in K\n P : pressure in bar\n state : 'L' for liquid phase and 'V' for vapour phase\n \n Returns\n -------\n logfug: array_like\n effective fugacity coefficients\n v0 : float\n volume of phase, if calculated\n \"\"\"\n b = self.b\n a = self.a_eos(T)\n am, bm, ep, ap, bp = self.mixrule(X, T, a, b, *self.mixruleparameter)\n if state == 'V':\n Z=max(self.Zmix(X,T,P))\n elif state == 'L':\n Z=min(self.Zmix(X,T,P))\n \n B=(bm*P)/(R*T)\n \n logfug=(Z-1)*(bp/bm) - np.log(Z-B)\n logfug -= (ep/(self.c2-self.c1))*np.log((Z+self.c2*B)/(Z+self.c1*B))\n return logfug, v0\n \n def logfugmix(self, X, T, P, state, v0 = None):\n \n \"\"\" \n logfugmix(X, T, P, state)\n \n Method that computes the mixture fugacity coefficient at given\n composition, temperature and pressure. \n\n Parameters\n ----------\n \n X : array_like\n mole fraction vector\n T : float\n absolute temperature in K\n P : float\n pressure in bar\n state : string\n 'L' for liquid phase and 'V' for vapour phase\n \n Returns\n -------\n lofgfug : array_like\n effective fugacity coefficients\n \"\"\"\n\n a = self.a_eos(T)\n am, bm, ep, ap, bp = self.mixrule(X,T,a,self.b,*self.mixruleparameter)\n if state == 'V':\n Z=max(self.Zmix(X,T,P))\n elif state == 'L':\n Z=min(self.Zmix(X,T,P))\n \n B=(bm*P)/(R*T)\n A=(am*P)/(R*T)**2\n \n logfug=Z - 1 - np.log(Z-B)\n logfug -= (A/(self.c2-self.c1)/B)*np.log((Z+self.c2*B)/(Z+self.c1*B))\n \n return logfug, v0\n \n def a0ad(self, roa, T):\n \n \"\"\" \n a0ad(roa, T)\n \n Method that computes the adimenstional Helmholtz density energy at given\n density and temperature.\n\n Parameters\n ----------\n \n roa : array_like\n adimentional density vector\n T : float\n absolute temperature in K\n\n Returns\n ------- \n a0ad: float\n adimenstional Helmholtz density energy\n \"\"\"\n \n c1 = self.c1\n c2 = self.c2\n ai = self.a_eos(T)\n bi = self.b\n a = ai[0]\n b = bi[0]\n ro = np.sum(roa)\n X = roa/ro\n \n am, bm, ep, ap, bp = self.mixrule(X, T, ai, bi, *self.mixruleparameter)\n Prefa=1*b**2/a\n Tad = R*T*b/a\n ama = am/a\n bma = bm/b\n \n a0 = np.sum(np.nan_to_num(Tad*roa*np.log(roa/ro)))\n a0 += -Tad*ro*np.log(1-bma*ro)\n a0 += -Tad*ro*np.log(Prefa/(Tad*ro))\n a0 += -ama*ro*np.log((1+c2*ro*bma)/(1+c1*ro*bma))/((c2-c1)*bma)\n \n return a0\n \n def muad(self, roa, T):\n \n \"\"\" \n muad(roa, T)\n \n Method that computes the adimenstional chemical potential at given\n density and temperature.\n\n Parameters\n ----------\n \n roa : array_like\n adimentional density vector\n T : float\n absolute temperature in K\n\n Returns\n ------- \n muad : array_like\n adimentional chemical potential vector\n \"\"\"\n \n c1 = self.c1\n c2 = self.c2\n ai = self.a_eos(T)\n bi = self.b\n a = ai[0]\n b = bi[0]\n ro = np.sum(roa)\n X = roa/ro\n \n am, bm, ep, ap, bp = self.mixrule(X,T, ai, bi,*self.mixruleparameter)\n Prefa=1*b**2/a\n Tad = R*T*b/a\n apa = ap/a\n ama = am/a\n bma = bm/b\n bad = bp/b\n \n mui = -Tad*np.log(1-bma*ro)\n mui += -Tad*np.log(Prefa/(Tad*roa))+Tad\n mui += bad*Tad*ro/(1-bma*ro)\n \n mui -= ro*(ama+apa) * np.log((1+c2*bma*ro)/(1+c1*bma*ro)) / ((c2-c1)*bma*ro)\n mui += bad*ama * np.log((1+c2*bma*ro)/(1+c1*bma*ro)) / ((c2-c1)*bma**2)\n mui -= bad*ama*ro / ((1+c2*bma*ro)*(1+c1*bma*ro)*bma)\n \n return mui\n \n \n def dOm(self, roa, T, mu, Psat):\n \"\"\" \n dOm(roa, T, mu, Psat)\n \n Method that computes the adimenstional Thermodynamic Grand potential at given\n density and temperature.\n\n Parameters\n ----------\n \n roa : array_like\n adimentional density vector\n T : float\n absolute temperature in K\n mu : array_like\n adimentional chemical potential at equilibrium\n Psat : float\n adimentional pressure at equilibrium\n\n Returns\n ------- \n dom: float\n Thermodynamic Grand potential\n \"\"\"\n dom = self.a0ad(roa, T) - np.sum(np.nan_to_num(roa*mu)) + Psat\n return dom\n \n def _lnphi0(self, T, P):\n \n nc = self.nc\n a_puros = self.a_eos(T)\n Ai = a_puros*P/(R*T)**2\n Bi = self.b*P/(R*T)\n pols = np.array([Bi-1,-3*Bi**2-2*Bi+Ai,(Bi**3+Bi**2-Ai*Bi)])\n Zs = np.zeros([nc,2])\n for i in range(nc):\n zroot = np.roots(np.hstack([1,pols[:,i]]))\n zroot = zroot[zroot>Bi[i]]\n Zs[i,:]=np.array([max(zroot),min(zroot)])\n \n\n logphi=Zs - 1 - np.log(Zs.T-Bi)\n logphi -= (Ai/(self.c2-self.c1)/Bi)*np.log((Zs.T+self.c2*Bi)/(Zs.T+self.c1*Bi))\n logphi = np.amin(logphi,axis=0)\n \n return logphi\n \n def beta_sgt(self, beta):\n self.beta = beta\n \n def ci(self, T):\n '''\n ci(T)\n \n Method that evaluates the polynomials for the influence parameters used\n in the SGT theory for surface tension calculations.\n \n Parameters\n ----------\n T : float\n absolute temperature in K\n\n Returns\n ------- \n cij: array_like\n matrix of influence parameters with geomtric mixing rule.\n '''\n\n n=self.nc\n ci=np.zeros(n)\n for i in range(n):\n ci[i]=np.polyval(self.cii[i],T)\n self.cij=np.sqrt(np.outer(ci,ci))*(1-self.beta)\n return self.cij\n \n def sgt_adim(self, T):\n '''\n sgt_adim(T)\n \n Method that evaluates adimentional factor for temperature, pressure, \n density, tension and distance for interfacial properties computations with\n SGT.\n \n Parameters\n ----------\n T : absolute temperature in K\n \n Returns\n ------- \n Tfactor : float\n factor to obtain dimentionless temperature (K -> adim)\n Pfactor : float\n factor to obtain dimentionless pressure (bar -> adim)\n rofactor : float\n factor to obtain dimentionless density (mol/cm3 -> adim)\n tenfactor : float\n factor to obtain dimentionless surface tension (mN/m -> adim)\n zfactor : float\n factor to obtain dimentionless distance (Amstrong -> adim)\n \n '''\n a0 = self.a_eos(T)[0]\n b0 = self.b[0]\n ci = self.ci(T)[0,0]\n Tfactor = R*b0/a0\n Pfactor = b0**2/a0\n rofactor = b0\n tenfactor = 1000*np.sqrt(a0*ci)/b0**2*(np.sqrt(101325/1.01325)*100**3) \n zfactor = np.sqrt(a0/ci*10**5/100**6)*10**-10 \n return Tfactor, Pfactor, rofactor, tenfactor, zfactor\n \n# Peng Robinson EoS \nc1pr = 1-np.sqrt(2)\nc2pr = 1+np.sqrt(2)\nomapr = 0.4572355289213825\nombpr = 0.07779607390388854\nclass prmix(cubicm): \n def __init__(self, mix, mixrule = 'qmr'):\n cubicm.__init__(self, mix,c1 = c1pr, c2 = c2pr,\n oma = omapr, omb = ombpr, alpha_eos = alpha_soave, mixrule = mixrule )\n \n self.k = 0.37464 + 1.54226*self.w - 0.26992*self.w**2\n \n# Peng Robinson SV EoS \nclass prsvmix(cubicm): \n def __init__(self, mix, mixrule = 'qmr'):\n cubicm.__init__(self, mix, c1 = c1pr, c2 = c2pr,\n oma = omapr, omb = ombpr, alpha_eos = alpha_sv, mixrule = mixrule )\n if np.all(mix.ksv == 0):\n self.k = np.zeros([self.nc,2])\n self.k[:,0] = 0.378893+1.4897153*self.w-0.17131838*self.w**2+0.0196553*self.w**3\n else:\n self.k = np.array(mix.ksv) \n\n# RK - EoS\nc1rk = 0\nc2rk = 1\nomark = 0.42748\nombrk = 0.08664\nclass rksmix(cubicm): \n def __init__(self, mix, mixrule = 'qmr'):\n cubicm.__init__(self, mix, c1 = c1rk, c2 = c2rk,\n oma = omark, omb = ombrk, alpha_eos = alpha_soave, mixrule = mixrule)\n self.k = 0.47979 + 1.5476*self.w - 0.1925*self.w**2 + 0.025*self.w**3\n \n#RKS- EoS \nclass rkmix(cubicm): \n def __init__(self, mix, mixrule = 'qmr'):\n cubicm.__init__(self, mix, c1 = c1rk, c2 = c2rk,\n oma = omark, omb = ombrk, alpha_eos = alpha_rk, mixrule = mixrule)\n def a_eos(self,T):\n alpha=self.alpha_eos(T, self.Tc)\n return self.oma*(R*self.Tc)**2*alpha/self.Pc\n\n","sub_path":"build/lib.win-amd64-3.7/phasepy/cubic/cubicmix.py","file_name":"cubicmix.py","file_ext":"py","file_size_in_byte":19089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"147772359","text":"import fnmatch\r\nimport glob\r\nimport os\r\nimport shutil\r\nimport sys\r\nimport zipfile\r\n\r\nfrom lxml import etree\r\n\r\nfrom pyro.CommandArguments import CommandArguments\r\nfrom pyro.ElementHelper import ElementHelper\r\nfrom pyro.Logger import Logger\r\nfrom pyro.PapyrusProject import PapyrusProject\r\nfrom pyro.PathHelper import PathHelper\r\nfrom pyro.ProcessManager import ProcessManager\r\n\r\n\r\nclass PackageManager(Logger):\r\n def __init__(self, ppj: PapyrusProject) -> None:\r\n self.ppj = ppj\r\n self.options = ppj.options\r\n\r\n self.extension: str = '.ba2' if self.options.game_type == 'fo4' else '.bsa'\r\n\r\n self.package_paths: list = []\r\n\r\n def _fix_package_extension(self, package_name: str) -> str:\r\n if not package_name.casefold().endswith(('.ba2', '.bsa')):\r\n return '%s%s' % (package_name, self.extension)\r\n return '%s%s' % (os.path.splitext(package_name)[0], self.extension)\r\n\r\n def _populate_include_paths(self, parent_node: etree.ElementBase, root_path: str) -> list:\r\n include_paths: list = []\r\n\r\n for include_node in parent_node:\r\n if not include_node.tag.endswith('Include'):\r\n continue\r\n\r\n no_recurse: bool = self.ppj._get_attr_as_bool(include_node, 'NoRecurse')\r\n wildcard_pattern: str = '*' if no_recurse else '**\\*'\r\n\r\n include_text: str = self.ppj.parse(include_node.text)\r\n\r\n if include_text == os.curdir or include_text == os.pardir:\r\n PackageManager.log.warning('Include paths cannot be equal to \".\" or \"..\"')\r\n continue\r\n\r\n if include_text.startswith('.'):\r\n PackageManager.log.warning('Include paths cannot start with \".\"')\r\n continue\r\n\r\n # populate files list using simple glob patterns\r\n if '*' in include_text:\r\n search_path: str = os.path.join(root_path, wildcard_pattern)\r\n files: list = [f for f in glob.iglob(search_path, recursive=not no_recurse) if os.path.isfile(f)]\r\n matches: list = fnmatch.filter(files, include_text)\r\n if not matches:\r\n PackageManager.log.warning('No files in \"%s\" matched glob pattern: %s' % (search_path, include_text))\r\n include_paths.extend(matches)\r\n continue\r\n\r\n include_path: str = os.path.normpath(include_text)\r\n\r\n # populate files list using absolute paths\r\n if os.path.isabs(include_path) and os.path.exists(include_path):\r\n if root_path not in include_path:\r\n PackageManager.log.warning('Cannot include path outside RootDir: \"%s\"' % include_path)\r\n continue\r\n include_paths.append(include_path)\r\n continue\r\n\r\n # populate files list using relative file path\r\n test_path = os.path.join(root_path, include_path)\r\n if not os.path.isdir(test_path):\r\n include_paths.append(test_path)\r\n continue\r\n\r\n # populate files list using relative folder path\r\n search_path = os.path.join(root_path, include_path, wildcard_pattern)\r\n include_paths.extend([f for f in glob.iglob(search_path, recursive=not no_recurse) if os.path.isfile(f)])\r\n\r\n return PathHelper.uniqify(include_paths)\r\n\r\n def build_commands(self, containing_folder: str, output_path: str) -> str:\r\n \"\"\"Returns arguments for BSArch as a string\"\"\"\r\n arguments = CommandArguments()\r\n\r\n arguments.append_quoted(self.options.bsarch_path)\r\n arguments.append('pack')\r\n arguments.append_quoted(containing_folder)\r\n arguments.append_quoted(output_path)\r\n\r\n if self.options.game_type == 'fo4':\r\n arguments.append('-fo4')\r\n elif self.options.game_type == 'sse':\r\n arguments.append('-sse')\r\n else:\r\n arguments.append('-tes5')\r\n\r\n return arguments.join()\r\n\r\n def create_packages(self) -> None:\r\n if self.ppj.packages_node is None:\r\n return\r\n\r\n # clear temporary data\r\n if os.path.exists(self.options.temp_path):\r\n shutil.rmtree(self.options.temp_path, ignore_errors=True)\r\n\r\n # ensure package path exists\r\n if not os.path.exists(self.options.package_path):\r\n os.makedirs(self.options.package_path, exist_ok=True)\r\n\r\n for i, package_node in enumerate(self.ppj.packages_node):\r\n if not package_node.tag.endswith('Package'):\r\n continue\r\n\r\n default_name: str = self.ppj.project_name if i == 0 else '%s (%s)' % (self.ppj.project_name, i)\r\n package_name: str = self.ppj.parse(package_node.get('Name', default=default_name))\r\n package_name = self._fix_package_extension(package_name)\r\n\r\n package_root: str = self.ppj.parse(package_node.get('RootDir', default=self.ppj.project_path))\r\n\r\n PackageManager.log.info('Creating \"%s\"...' % package_name)\r\n\r\n package_data: list = self._populate_include_paths(package_node, package_root)\r\n\r\n if not package_data:\r\n PackageManager.log.info('No includes found for package: \"%s\"' % package_name)\r\n continue\r\n\r\n PackageManager.print_list('Includes found:', package_data)\r\n\r\n for source_path in package_data:\r\n target_path: str = os.path.join(self.options.temp_path, os.path.relpath(source_path, package_root))\r\n\r\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\r\n shutil.copy2(source_path, target_path)\r\n\r\n package_path: str = os.path.join(self.options.package_path, package_name)\r\n self.package_paths.append(package_path)\r\n\r\n # run bsarch\r\n commands: str = self.build_commands(self.options.temp_path, package_path)\r\n ProcessManager.run(commands, use_bsarch=True)\r\n\r\n # clear temporary data\r\n if os.path.exists(self.options.temp_path):\r\n shutil.rmtree(self.options.temp_path, ignore_errors=True)\r\n\r\n def create_zip(self) -> None:\r\n if self.ppj.zipfile_node is None:\r\n return\r\n\r\n zip_data: list = self._populate_include_paths(self.ppj.zipfile_node, self.ppj.zip_root_path)\r\n if not zip_data:\r\n PackageManager.log.error('No includes found for ZIP file: \"%s\"' % self.ppj.zip_file_name)\r\n return\r\n\r\n PackageManager.log.info('Creating \"%s\"...' % self.ppj.zip_file_name)\r\n\r\n PackageManager.print_list('Includes found:', zip_data)\r\n\r\n # ensure that zip output folder exists\r\n zip_output_path: str = os.path.join(self.options.zip_output_path, self.ppj.zip_file_name)\r\n os.makedirs(os.path.dirname(zip_output_path), exist_ok=True)\r\n\r\n try:\r\n with zipfile.ZipFile(zip_output_path, mode='w', compression=self.ppj.compress_type) as z:\r\n for include_path in zip_data:\r\n arcname: str = os.path.relpath(include_path, self.ppj.zip_root_path)\r\n z.write(include_path, arcname, compress_type=self.ppj.compress_type)\r\n\r\n PackageManager.log.info('Wrote ZIP file to: \"%s\"' % zip_output_path)\r\n except PermissionError:\r\n PackageManager.log.error('Cannot open ZIP file for writing: \"%s\"' % zip_output_path)\r\n sys.exit(1)\r\n","sub_path":"pyro/PackageManager.py","file_name":"PackageManager.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"80858970","text":"# Main Controller of the backend\n# determine application logic from each url\n# Authors : Tuyen, Khang\n# Last Date: 29/03/2018\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseServerError\nfrom django.template import loader, RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import viewsets\nfrom .serializers import UserSerializer, GroupSerializer, CreateFormSerializer\nfrom django.contrib.auth.models import User, Group\nfrom django.http import JsonResponse\nfrom .views import *\nimport htmlmin\nfrom Utility.networks import RocketSetting, RocketUsersAPI, ActionLinkPrep, ActionLinkBuilder, ActionParameters\nfrom .default_data.rocket_data import RCLoginDataDefault\nfrom user.default_data.rocket_data import RocketUserData\nfrom Utility.default_data.rocket_data import RCAPI\nfrom Utility.models import RocketAPIAuthentication\nfrom attendance_app.models import Attendance\nimport random\n# viewsets: create, edit, delete, post, get, list\n#1. welcome user to create attendance\n#2. create attendance\n#3. allow student to check attendance\n#4. list all attendances\n#5. list all checks of an attendance\n#6. http://www.django-rest-framework.org/api-guide/views/\n\n# API:\n# X-Auth-Token: pl59Z7F1S7c5MGIMi8ZtQ6d1XAtvafqwCoc1VFoyRCN\n# X-User-Id: KEPvCAsPtzniBTdYB\nadmin = 'admin'\n\n\nclass APIViews:\n path = '/attendance_app'\n confirm_create_attendance = '/confirm_create_attendance'\n confirm_submit = '/confirm_submit'\n def __init__(self):\n self.data = ''\n self.rocketPath = 'views/'\n self.appControllers = AppControllers()\n self.app_view = AppViews()\n\n def templateResponseDictionary(self, request):\n return {\n \"username\": \"Attendance\",\n \"icon_emoji\": \":ghost:\",\n }\n def buildURL(self, request):\n return request.scheme + \"://\" + request.get_host() + APIViews.path\n\n def authenticate(self, params):\n source = params.get('source')\n username = 'attendance'\n password = 'attendance'\n api_authentication = RocketAPIAuthentication.objects.getRocketAPIAuth(source)\n rocket_setting = RocketSetting()\n rocket_setting.url = source + RocketSetting.API_PATH\n if (api_authentication):\n if api_authentication.rocket_chat_user_id != None:\n rocket_setting.user_id = api_authentication.rocket_chat_user_id\n\n if api_authentication.rocket_chat_auth_token != None:\n rocket_setting.auth_token = api_authentication.rocket_chat_auth_token\n\n if rocket_setting.user_id == None or rocket_setting.auth_token == None:\n rocket_api = RocketUsersAPI(rocket_setting)\n login_result = rocket_api.login(username, password)\n if (login_result.is_sucess()):\n rocket_setting.user_id = login_result.get_uid()\n rocket_setting.auth_token = login_result.get_auth_token()\n api_authentication.set_user_id(login_result.get_uid()) \\\n .set_auth_token(login_result.get_auth_token()) \\\n .save()\n else :\n rocket_setting = None\n else:\n rocket_api = RocketUsersAPI(rocket_setting)\n login_result = rocket_api.login(username, password)\n if (login_result.is_success()):\n rocket_setting.user_id = login_result.get_uid()\n rocket_setting.auth_token = login_result.get_auth_token()\n RocketAPIAuthentication.objects.createRocketAPIAuth(source, login_result.get_uid(),\n login_result.get_auth_token())\n else :\n rocket_setting = None\n return rocket_setting\n\n def format_html(self, response):\n html = response.getvalue().decode('utf-8')\n minified_html_value = htmlmin.minify(html, remove_empty_space = True)\n return minified_html_value\n\n @csrf_exempt\n def createAttendance(self, request):\n submitURL = self.appControllers.urlToConfirmCreateAttendance(request, AppViews.path)\n instructor = self.appControllers.createUserProfileIfNeeded(request)\n context = self.appControllers.contextForCreateAttendanceHTML(instructor, submitURL)\n res_html = self.format_html(self.app_view.createAttendance(request))\n rocket_setting = self.authenticate(request.GET)\n if (rocket_setting):\n rc_api = RocketUsersAPI(rocket_setting)\n response = rc_api.get_user_by_username(instructor.username)\n if (response.is_success()):\n submit_link = ActionLinkPrep('Confirm Create Attendance', 'name=submit').buildActionLink()\n act_params = ActionParameters(self.buildURL(request) + APIViews.confirm_create_attendance, \"post\")\n source = request.GET.get('source')\n act_params.config_optional({'source': source, 'username': instructor.username})\n act_params.config_optional({RCAPI.CLIENT_SERVER: RCAPI.SERVER_ONLY})\n params = act_params.buildActionParameters()\n\n act_link_obj = ActionLinkBuilder(act_links = [submit_link],\n act_params = params).buildObject()\n rc_api.post_message(response.user_data._id, res_html, act_link_obj)\n else:\n print('Fail to obtain user id.')\n return HttpResponse()\n\n @csrf_exempt\n def confirmCreateAttendance(self, request):\n params = request.POST\n instructor_username = params.get('username')\n rocket_setting = self.authenticate(params)\n if (rocket_setting):\n rc_api = RocketUsersAPI(rocket_setting)\n response = rc_api.get_users()\n if (response.is_success()):\n users = response.get_users()\n users = list(filter(lambda user : user.name != None and user.username != None, users))\n res = self.app_view.confirmCreateAttendanceAPI(request)\n if (not res[0]):\n res_html = self.format_html(res[1])\n channels = list(map(lambda user : user._id, users))\n responses = rc_api.post_message(text = res_html, channel = instructor_username)\n else:\n print(\"confirm create attendance params: \", params)\n response_delete = rc_api.delete_message(params.get(RocketUserData.CHANNEL), params.get(RocketUserData.MESSAGE_ID))\n\n res_html_student = self.format_html(res[1])\n channels = [user._id for user in users if user.username != instructor_username]\n random_answers = random.sample(range(1, 11), 5)\n answer_links = []\n\n for answer in random_answers:\n answer_link = ActionLinkPrep('' + str(answer), 'value=' + str(answer)).buildActionLink()\n answer_links.append(answer_link)\n\n correct_answer_index = random.randint(0, 4)\n correct_answer = random_answers[correct_answer_index]\n\n act_params = ActionParameters(self.buildURL(request) + APIViews.confirm_submit, \"post\")\n source = request.POST.get('source')\n act_params = act_params.config_optional({'source': source,\n 'username': instructor_username,\n 'instructor_username': instructor_username,\n 'answer': str(correct_answer),\n 'attendance_id': res[0]})\n act_params.config_optional({RCAPI.CLIENT_SERVER: RCAPI.SERVER_ONLY})\n\n act_params = act_params.buildActionParameters()\n\n act_link_obj = ActionLinkBuilder(act_links = answer_links,\n act_params = act_params).buildObject()\n responses = rc_api.post_message(text = res_html_student, channel = channels, opt = act_link_obj)\n\n res_html_instructor = self.format_html(self.app_view.viewAttendance(request, {'attendance_id' : res[0], 'answer': str(correct_answer)})[1])\n instructor_channel = [user._id for user in users if user.username == instructor_username]\n response_instructor = rc_api.post_message(text = res_html_instructor, channel = instructor_channel)\n if response_instructor.is_success():\n Attendance.objects.set_message_id(res[0], response_instructor.msg[0]._id)\\\n .set_room_id(res[0], response_instructor.msg[0].rid)\n else :\n print('Fail to send message to instructor. Error message ', response_instructor.get_err())\n else:\n print('Fail to obtain user list.')\n return HttpResponse()\n\n @csrf_exempt\n def confirmSubmit(self, request):\n params = request.POST\n rocket_setting = self.authenticate(params)\n if (rocket_setting):\n rc_api = RocketUsersAPI(rocket_setting)\n\n print(\"confirmSubmit: \", params)\n response_delete = rc_api.delete_message(params.get(RocketUserData.CHANNEL), params.get(RocketUserData.MESSAGE_ID))\n\n res = self.app_view.confirmSubmitAPI(request)\n res_html = self.format_html(res[1])\n channel = params.get('channel')\n confirm_res = rc_api.post_message(channel = channel, text = res_html)\n if res[0] is True:\n \tins_res = self.app_view.viewAttendance(request)\n \tins_res_html = self.format_html(ins_res[1])\n \tattendance = ins_res[0]\n \tif attendance is not None:\n \t\trc_api.update_message(attendance.roomid, \n \t\t\t\t\t\t\t\tattendance.messageid,\n \t\t\t\t\t\t\t\tins_res_html)\n return HttpResponse()\n\n @csrf_exempt\n def viewAttendance(self, request):\n params = request.POST\n res = self.app_view.viewAttendance(request)\n res_html = self.format_html(res)\n rocket_setting = self.authenticate(params)\n if (rocket_setting):\n rc_api = RocketUsersAPI(rocket_setting)\n response = rc_api.get_user_by_username(params.get('username'))\n if (response.is_success()):\n channel = response.user_data._id\n confirm_res = rc_api.post_message(channel = channel, text = res_html)\n else :\n print('Fail to get indvidual user info.')\n return HttpResponse()\n\n","sub_path":"attendance_app/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"521025171","text":"#!/usr/bin/env python3\n\"\"\"\nthe idea here is to collect and zip every directory Alone in a specific directory..\nthen zip them all togother in one file.\n\ncollect the directorys\nzip collected directorys\nadd the zipped directorys to a zip file with the dir name\n\"\"\"\n\nimport os, sys, string, time, subprocess\n\n# GET DIRECTS IN THE CURRENT DIRECTORY\nos.system(\"ls -latd */ |grep \\\"d\\\"|awk '{print $9}' |tr -d \\\"/\\\" > .dirList.txt\")\n\ndlist = \".dirList.txt\"\ndirListReader = open(dlist, \"r\")\n\nfor dir in dirListReader:\n\tdirClean = dir.strip()\n\ttime.sleep(0.2)\n\tprint(\"- COMPRESSING \\\"\" + dirClean.upper() + \"\\\"\")\n\tos.system(\"zip -rq {0}.zip {0}\".format(dirClean))\n\tos.system(\"echo {}.zip > zipedFiles.txt\".format(dirClean)) # to use later in ziping from a file\n\t# exit the loop\n\t\n\n# get the current directory name into a variable\nworkingDir = subprocess.check_output(\"pwd |tr '/' ' ' |awk \\'{print $NF}\\'\" ,shell=True)\nwDirStripped = workingDir.strip() # Clean it\nwDirCPy = wDirStripped.decode('ascii') # code it to get printed properly\nprint(\"- ZIPPING ALL ZIP FILES IN ONE FILE CALLED \" + wDirCPy + \".zip...\") \ntime.sleep(1)\nos.system(\"wDirC=`pwd |tr '/' ' ' |awk '{print $NF}'`; zip -q $wDirC.zip *[a-Z0-9].zip\")\nprint(\"DONE...\")\n\n\n","sub_path":"zipper.py","file_name":"zipper.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"344627503","text":"import asyncio\nimport websockets\nimport json\nimport math\nimport time\nimport threading\nimport collections\nimport random\nimport copy\nfrom urllib.parse import urlparse\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\ntoHistorical = collections.deque()\ntoRealTime = collections.deque()\n\ndataRecordLock = threading.Lock()\ndataRecord = {\n \"prop.fuel\":[],\n \"prop.thrusters\":[],\n \"comms.recd\":[],\n \"comms.sent\":[],\n \"pwr.temp\":[],\n \"pwr.c\":[],\n \"Generator.Voltage\":[],\n}\n\ndef recordData():\n while True:\n while toHistorical:\n datum = toHistorical.popleft()\n with dataRecordLock:\n dataRecord[datum['id']].append(datum)\n time.sleep(.05)\n\ndef getData(value, start, end):\n data = []\n # Only show up to 15 minutes worth of data\n if end - start > 899*1000:\n start = end - 899*1000\n\n with dataRecordLock:\n for datum in dataRecord[value]:\n if datum['timestamp'] > start and datum['timestamp'] < end:\n data.append(datum)\n \n return json.dumps(data)\n\nclass serverHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n urlParams = urlparse(self.path)\n if urlParams[2].find('history') != 1:\n self.end_headers()\n self.wfile.write(b'Unrecognized request :(')\n return\n # Split the query part of the url into the key value pairs\n query = dict(x.split('=') for x in urlParams[4].split('&'))\n if \"start\" not in query:\n self.end_headers()\n self.wfile.write(b'Missing start in query :(')\n return\n if \"end\" not in query:\n self.end_headers()\n self.wfile.write(b'Missing end in query :(')\n return\n \n self.end_headers()\n data = getData(urlParams[2].split('/')[2], int(query['start']), int(query['end']))\n self.wfile.write(data.encode('utf-8'))\n \n def end_headers(self):\n self.send_header('Content-Type','application/json; charset=utf-8')\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n BaseHTTPRequestHandler.end_headers(self)\n\nasync def webSockServer(websocket, path):\n print(\"start of webSockServer\")\n subs = []\n\n toRealTime.clear()\n while True:\n try:\n while True:\n receiveMessage = await asyncio.wait_for(websocket.recv(), 0.001)\n if receiveMessage != \"\":\n receiveMessage = receiveMessage.split(' ')\n if receiveMessage[0] == \"subscribe\":\n subs.append(receiveMessage[1])\n elif receiveMessage[0] == \"unsubscribe\":\n subs.remove(receiveMessage[1])\n else:\n print(f\"Unrecognized command received {receiveMessage[0]}\")\n except asyncio.TimeoutError:\n #print(\"No commands received\")\n pass\n except Exception as e:\n print(f\"Error in getting commands from client - {e}\")\n\n while toRealTime:\n newData = toRealTime.popleft()\n if newData['id'] in subs:\n await websocket.send(json.dumps(newData))\n\n time.sleep(1)\n\ndef dataProducer():\n print(\"Starting data producer\")\n fuelDefault = 99\n fuel = fuelDefault\n thrustersDefault = \"ON\"\n recdDefault = 1\n sentDefault = 1\n tempDefault = 100\n currentDefault = 10\n voltageDefault = 28\n\n while True:\n time.sleep(1)\n\n datum = {}\n datum['timestamp'] = int(time.time()*1000)\n\n # Fuel can decrease at most by 2% per second\n fuel = random.uniform(fuel*.98, fuel)\n datum['value'] = fuel\n datum['id'] = \"prop.fuel\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n\n datum['value'] = thrustersDefault\n datum['id'] = \"prop.thrusters\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n\n datum['value'] = recdDefault\n datum['id'] = \"comms.recd\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n\n datum['value'] = sentDefault\n datum['id'] = \"comms.sent\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n \n temp = random.uniform(tempDefault*.9, tempDefault*1.1)\n datum['value'] = temp\n datum['id'] = \"pwr.temp\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n\n current = random.uniform(currentDefault*.9, currentDefault*1.05)\n datum['value'] = current\n datum['id'] = \"pwr.c\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n\n voltage = random.uniform(voltageDefault*.95, voltageDefault*1.05)\n datum['value'] = voltage\n datum['id'] = \"Generator.Voltage\"\n toHistorical.append(copy.deepcopy(datum))\n toRealTime.append(copy.deepcopy(datum))\n\ndef main():\n global toHistorical\n global toRealTime\n global dataRecord\n\n httpPort = 8090\n webSocketPort = 8091\n\n server = HTTPServer(('localhost', httpPort), serverHandler)\n threading.Thread(target=server.serve_forever).start()\n threading.Thread(target=recordData).start()\n threading.Thread(target=dataProducer).start()\n\n websocketServer = websockets.serve(webSockServer, 'localhost', webSocketPort)\n print(\"Created websocket server\")\n asyncio.get_event_loop().run_until_complete(websocketServer)\n asyncio.get_event_loop().run_forever()\n #asyncio.get_event_loop().close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"openmct_python_example/python-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"282280750","text":"import os, sys\nsys.path.append(os.path.abspath('../'))\nfrom solutions import tacoma_animation as tc\nfrom functions.HelperFunctions import Tacoma\nfrom functions.StepFunctions import FehlbergStepFunction\nimport numpy as np\n# Fehlberg metoden\n#plotte y(t) og theta(t)\n\n\nic = [0, 0, 0.001, 0]\ninter = [0, 1000]\nn = 25000\n\nanim = tc.Animator(title=\"Task 2\")\ntacoma = Tacoma(W=80)\nstep_func = FehlbergStepFunction(tacoma.F,TOL=1.0E-12)\nh = (inter[1] - inter[0]) / n\n\nt = []\ny = []\n\ny.append(ic)\nt.append(inter[0])\n\nt, y = step_func.run_steps(t, y, h, n)\n\n\n#anim.animate_bridge(t, y, step=20, duration=5) # to animate the bridge\nanim.animate_raw(t,y[:,2]) # theta\nprint(\"Max of theta is\", np.max(y[:,2]))\n#anim.animate_raw(t,y[:,0]) # height, y\nanim.show()\n","sub_path":"TDAT3024/tacoma_narrows_bridge/solutions/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"634658536","text":"#!/usr/bin/python\n\nfrom sqlalchemy.ext.declarative import declarative_base\n\nOrmModel = declarative_base()\nclass Model(object):\n\tdef dict(self, exclude=None, keys=None):\n\t\t\"\"\"Only serialize public variables (for objects) (those that don't start with '_')\"\"\"\n\t\tdictionary = self.__dict__\n\n\t\t# Return immediately if the user only wants certain keys\n\t\tif keys:\n\t\t\tdictionary = {i: dictionary[i] for i in keys if i in dictionary}\n\t\t\treturn dictionary\n\n\t\tif exclude:\n\t\t\tdictionary = {key: dictionary[key] for key, _ in dictionary.items() if key not in exclude}\n\n\t\tdictionary = {key: dictionary[key] for key, _ in dictionary.items() if not key.startswith('_')}\n\t\treturn dictionary\n","sub_path":"api/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"34847355","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nimport colorsys\n# import PIL import Image\n\n\n\n# Loads image\nimage = cv2.imread('pyramid.png')\nIMG = image/255\n\n# Get the image width & height of each pixel\nxrange = IMG[0].shape # width\nyrange = IMG[1].shape # height\n\n# Examine each pixel in the image\nfor x in range(0, xrange[0]):\n for y in range(0, yrange[0]):\n\n\n# Get RGB value of each pixel\n [r, g, b] = IMG\n\n\n\ndef RGB2HSI(R, G, B):\n eps = 1E-6\n if(0<=R<=255 and 0<=G<=255 and 0<=B<=255):\n d = float(R+G+B)\n r = IMG*float(R)/d # The normalization for the RGB image needed?\n g = IMG*float(G)/d\n b = IMG*float(B)/d\n numer = float(0.5*((r-g)+(r-b)))\n denom = float(((r-g)*(r-g)+(r-b)*(g-b))*math.pow(0.5)) # when uplifting you can use ** as well!\n if(b<=g):\n h = math.acos(numer/denom+eps) # to not divide with 0 !\n if(b>g):\n h = (2*math.pi) - math.acos(numer/denom)\n s = 1 - (3*min(r,g,b))\n i = float(R+G+B)/float(3*255)\n # The HSI in corrected numbers!\n H = h*(180/math.pi)\n S = s*100\n I = i*255\n return H, S, I\n\n# Calling the RGB2HSI function to convert the colors\n[H, S, I] = RGB2HSI(IMG[x, y])\n\ncv2.imshow('image', IMG)\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n\n","sub_path":"RGB2HSI-MINIPROJECT.py","file_name":"RGB2HSI-MINIPROJECT.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311238026","text":"import sys\nimport json\nimport random\nimport linecache\nimport operator\n\n\ntraining_file = sys.argv[1]\nmodel_file = sys.argv[2]\n\nf_training = open(training_file, 'r')\nf_model = open(model_file, 'w')\n\n\narray = []\nweight = {}\ncatchWeight = {}\nactivation = {}\nbias = {}\ncatchBias = {}\nc = 1\ntagClass = [\"it's\", \"its\", \"you're\", \"your\", \"they're\", \"their\", \"loose\", \"lose\", \"to\", \"too\"]\n\n\n\ndef test(w, i, words):\n for tag in tagClass:\n if w == tag: \n return 5\n if w == \"it\" and words[i + 1].lower() == \"'s\": \n return 1\n elif w == \"you\" and words[i + 1].lower() ==\"'re\": \n return 2\n elif w == \"they\" and words[i + 1].lower() == \"'re\" : \n return 3\n else: \n return 4\n\n#initialize tag class and word3\nsentence = f_training.read().decode(errors = 'ignore').encode(errors = 'ignore')\nwords = sentence.split()\n_length = len(words)\nflagi = 0\nfor i in range(_length):\n\n if flagi == 1:\n flagi = 0\n continue\n\n flag = test(words[i].lower(), i, words)\n\n\n if flag != 4:\n word = words[i].lower()\n if flag == 5:\n for tag in tagClass:\n if tag == word:\n tag = word\n break\n j = i + 1\n k = i + 2\n length = _length \n \n elif flag == 1 or flag == 2 or flag==3:\n j = i + 2\n k = i + 3\n length = _length - 1\n if flag == 1: tag = \"it's\"\n if flag == 2: tag = \"your're\"\n if flag == 3: tag = \"they're\"\n flagi = 1 \n\n if length >1:\n if i == 0:\n wordl1 = 'bos1'\n wordl2 = 'bos2'\n elif i == 1:\n wordl1 = 'bos1'\n wordl2 = words[i-1].lower()######################33\n else:\n wordl1 = words[i-2].lower()\n wordl2 = words[i-1].lower()\n\n if length - i == 2:\n wordr1 = words[j].lower()\n wordr2 = 'eos2'\n elif length - i == 1:\n wordr1 = 'eos1'\n wordr2 = 'eos2'\n else:\n wordr1 = words[j].lower()\n wordr2 = words[k].lower()\n elif length == 1:\n wordl1 = 'bos1'\n wordl2 = 'bos2'\n wordr1 = 'eos1'\n wordr2 = 'eos2'\n \n if tag == \"it's\" or tag == \"its\":\n temp1 = \"it's\"\n temp2 = \"its\"\n if tag == \"you're\" or tag == \"your\":\n temp1 = \"you're\"\n temp2 = \"your\"\n if tag == \"they're\" or tag == \"their\":\n temp1 = \"they're\"\n temp2 = \"their\"\n if tag == \"loose\" or tag == \"lose\":\n temp1 = \"loose\"\n temp2 = \"lose\"\n if tag == \"to\" or tag == \"too\":\n temp1 = \"to\"\n temp2 = \"too\"\n \n weight[temp1 + \"l:\" + wordl1] = 0 \n weight[temp2 + \"l:\" + wordl1] = 0 \n weight[temp1 + \"l:\" + wordl2] = 0 \n weight[temp2 + \"l:\" + wordl2] = 0 \n weight[temp1 + \"r:\" + wordr1] = 0 \n weight[temp2 + \"r:\" + wordr1] = 0 \n weight[temp1 + \"r:\" + wordr2] = 0 \n weight[temp2 + \"r:\" + wordr2] = 0 \n weight[temp1 + \"c:\" + word] = 0 \n weight[temp2 + \"c:\" + word] = 0 \n\n catchWeight[temp1 + \"l:\" + wordl1] = 0 \n catchWeight[temp2 + \"l:\" + wordl1] = 0 \n catchWeight[temp1 + \"l:\" + wordl2] = 0 \n catchWeight[temp2 + \"l:\" + wordl2] = 0 \n catchWeight[temp1 + \"r:\" + wordr1] = 0 \n catchWeight[temp2 + \"r:\" + wordr1] = 0 \n catchWeight[temp1 + \"r:\" + wordr2] = 0 \n catchWeight[temp2 + \"r:\" + wordr2] = 0 \n catchWeight[temp1 + \"c:\" + word] = 0 \n catchWeight[temp2 + \"c:\" + word] = 0 \n \n\nfor theclass in tagClass:\n bias[theclass] = 0\n catchBias[theclass] = 0\n\naaa = 1\n\nfor x in range(10):\n print(\"iteration\" + str(aaa))\n aaa = aaa + 1\n f_training.close()\n f_training = open(training_file, 'r')\n sentence = f_training.read().decode(errors = 'ignore').encode(errors = 'ignore')\n words = sentence.split()\n _length = len(words)\n flagi = 0\n for i in range(_length):\n\n if flagi == 1:\n flagi = 0\n continue\n\n flag = test(words[i].lower(), i, words)\n\n if flag != 4:\n word = words[i].lower()\n if flag == 5:\n for tag in tagClass:\n if tag == word:\n tag = word\n break\n j = i + 1\n k = i + 2\n length = _length \n \n elif flag == 1 or flag == 2 or flag==3:\n j = i + 2\n k = i + 3\n length = _length - 1\n if flag == 1: tag = \"it's\"\n if flag == 2: tag = \"your're\"\n if flag == 3: tag = \"they're\"\n flagi = 1 \n\n\n feature1 = {}\n feature2 = {}\n activation = {}\n if length >1:\n if i == 0:\n wordl1 = 'bos1'\n wordl2 = 'bos2'\n elif i == 1:\n wordl1 = 'bos1'\n wordl2 = words[i-1].lower()#####################\n else:\n wordl1 = words[i-2].lower()\n wordl2 = words[i-1].lower()\n\n if length - i == 2:\n wordr1 = words[j].lower()\n wordr2 = 'eos2'\n elif length - i == 1:\n wordr1 = 'eos1'\n wordr2 = 'eos2'\n else:\n wordr1 = words[j].lower()\n wordr2 = words[k].lower()\n elif length == 1:\n wordl1 = 'bos1'\n wordl2 = 'bos2'\n wordr1 = 'eos1'\n wordr2 = 'eos2'\n \n if tag == \"it's\" or tag == \"its\":\n temp1 = \"it's\"\n temp2 = \"its\"\n if tag == \"you're\" or tag == \"your\":\n temp1 = \"you're\"\n temp2 = \"your\"\n if tag == \"they're\" or tag == \"their\":\n temp1 = \"they're\"\n temp2 = \"their\"\n if tag == \"loose\" or tag == \"lose\":\n temp1 = \"loose\"\n temp2 = \"lose\"\n if tag == \"to\" or tag == \"too\":\n temp1 = \"to\"\n temp2 = \"too\"\n\n activation[temp1] = 0\n activation[temp2] = 0\n\n feature1[0] = temp1 + \"l:\" + wordl1\n feature2[0] = temp2 + \"l:\" + wordl1\n feature1[1] = temp1 + \"l:\" + wordl2\n feature2[1] = temp2 + \"l:\" + wordl2\n feature1[2] = temp1 + \"r:\" + wordr1\n feature2[2] = temp2 + \"r:\" + wordr1\n feature1[3] = temp1 + \"r:\" + wordr2\n feature2[3] = temp2 + \"r:\" + wordr2\n feature1[4] = temp1 + \"c:\" + word\n feature2[4] = temp2 + \"c:\" + word\n\n for v1 in feature1.values():\n activation[temp1] += weight[v1]\n for v2 in feature2.values():\n activation[temp2] += weight[v2]\n\n activation[temp1] += bias[temp1] \n activation[temp2] += bias[temp2] \n predict = max(activation.items(), key = operator.itemgetter(1))[0]\n if predict != tag:\n if predict != temp1:\n temp = temp1\n temp1 = temp2\n temp2 = temp\n for w in feature1.values(): \n weight[w] -= 1 \n catchWeight[w] -= c\n for w in feature2.values():\n weight[w] += 1\n catchWeight[w] += c\n bias[temp1] -= 1\n bias[temp2] += 1\n catchBias[temp1] -= c \n catchBias[temp2] += c \n c = c + 1\n for a in bias.keys():\n bias[a] = bias[a] - catchBias[a] / c\n\n\nfor i in weight.keys():\n weight[i] = weight[i] - catchWeight[i] / c\n\njson.dump(weight, f_model)\n\n\nf_training.close()\nf_model.close()\n\n\n\n","sub_path":"hw3/sentence/nelearn2.py","file_name":"nelearn2.py","file_ext":"py","file_size_in_byte":7187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"55321539","text":"def cmb(n, r, p):\n if (r < 0) or (n < r):\n return 0\n r = min(r, n - r)\n return fact[n] * factinv[r] * factinv[n-r] % p\n\np = 10**9 + 7\nN = 10**6 # N は必要分だけ用意する\nfact = [1, 1] # fact[n] = (n! mod p)\nfactinv = [1, 1] # factinv[n] = ((n!)^(-1) mod p)\ninv = [0, 1] # factinv 計算用\n \nfor i in range(2, N+1):\n fact.append((fact[-1] * i) % p)\n inv.append((-inv[p % i] * (p // i)) % p)\n factinv.append((factinv[-1] * inv[-1]) % p)\n\n \nx,y=map(int,input().split())\nif ((x+y)%3!=0):\n print(0)\nelse:\n xx=(2*x-y)//3\n yy=(2*y-x)//3\n if (xx<0 or yy<0):\n print(0)\n else:\n ans=cmb(xx+yy,xx,p)\n print(ans)","sub_path":"Python_codes/p02862/s247540410.py","file_name":"s247540410.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"448245775","text":"\"\"\"foamsite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import include, path\n\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.Cart.as_view(), name='cart'),\n path('delete/', views.DeleteCartEntry.as_view(), name='deletecartentry'),\n path('empty', views.EmptyCart.as_view(), name='empty'),\n \n path('payment_complete', views.PaypalPaymentComplete.as_view(), name=\"payment_complete\"), ##Paypal payment complete\n path('payment_successful', views.PaymentSuccessful.as_view(), name=\"payment_successful\"), #Paypal payment successful\n\n path('checkout', views.Checkout.as_view(), name='checkout'),\n path('checkout_stripe', views.CheckoutStripe.as_view(), name='checkout_stripe'),\n path('checkout/complete', views.StripePaymentComplete.as_view(), name='success'),\n path('checkout/cancel', views.CheckoutCancel.as_view(), name='cancel')\n]","sub_path":"checkout/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"385001745","text":"class Solution:\n def minDistance(self, houses: List[int], k: int) -> int:\n @lru_cache(None)\n def dfs(start, k):\n if start==n: return 0\n if k==0: return float('inf')\n res=float('inf')\n for i in range(start, n):\n res=min(res, costs[start][i]+dfs(i+1, k-1))\n return res\n \n n=len(houses)\n houses=sorted(houses)\n costs=[[0]*n for i in range(n)]\n for i in range(n):\n for j in range(i+1, n):\n m=(i+j)//2\n for h in range(i, j+1):\n costs[i][j]+=abs(houses[h]-houses[m])\n return dfs(0, k)\n\n","sub_path":"python/allocate-mailboxes.py","file_name":"allocate-mailboxes.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"352168873","text":"'''Simple terminal pomodoro timer.\nBy default a 25 minute, then 5 minute timer on loop.\n'''\nfrom functools import partial\nfrom platform import system\nfrom itertools import cycle\nfrom copy import deepcopy\nimport argparse\nimport _thread\nimport termios\nimport select\nimport signal\nimport shutil\nimport pyglet\nimport time\nimport sys\nimport os\n\nREFRESH_RATE = 0.05\nGOODBYE_DELAY = 0.2\nFLASH_TIME = 0.75\n\nDEFAULT_SOUNDPATH = os.path.join(\n 'siren_noise_soundbible_shorter_fadeout.wav'\n)\nREAL_DIRNAME = os.path.dirname(os.path.realpath(__file__))\npyglet.resource.path = [os.path.join(REAL_DIRNAME, 'data')]\nTIME_FORMAT = '{:02d}:{:02d} {} {:02d}:00'\n\n\nTERMINAL_WIDTH = None\nCHANGED = False\n\nTERM_HIDE_CHAR, TERM_SHOW_CHAR = '\\033[?25l', '\\033[?25h'\nSAVE_TERM, RESTORE_TERM = '\\033[?47h', '\\033[?47l'\nINVERT_ON, INVERT_OFF = '\\033[7m', '\\033[27m'\nBOLD_ON, BOLD_OFF = '\\033[1m', '\\033[21m'\nBLUE, DEFAULT = '\\033[34m', '\\033[39m'\n\nPYGLET_VOLUME_LIB_REQ = '1.4.0b1'\nif pyglet.version < PYGLET_VOLUME_LIB_REQ:\n import warnings\n version_warning_string = (\n 'Volume not supported on pyglet < {}, you have {}'\n ''.format(PYGLET_VOLUME_LIB_REQ, pyglet.version)\n )\n warnings.warn(version_warning_string, UserWarning)\n\n\ndef get_terminal_width():\n return shutil.get_terminal_size((80, 20)).columns\n\n\ndef setup_terminal():\n # The following stops the interrupt character (or other special chars)\n # being echoed into the terminal, along with the cursor.\n sys.stdout.write(TERM_HIDE_CHAR)\n\n # This prevents user input being echoed out into the terminal, so it can\n # be exclusively used as input to the program.\n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n new = deepcopy(old)\n new[3] = new[3] & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSADRAIN, new)\n\n # This saves the contents of the current terminal.\n sys.stdout.write(SAVE_TERM)\n\n def reset_terminal():\n # Reset all at the end, echoing, showing special chars, and previous\n # terminal contents.\n try:\n sys.stdout.write(TERM_SHOW_CHAR + RESTORE_TERM)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old)\n\n return reset_terminal\n\n\nclass CycleAction(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n setattr(args, self.dest, cycle(values))\n\n\nclass VolumeAction(argparse.Action):\n def __call__(self, parser, args, value, option_string=None):\n if pyglet.version < PYGLET_VOLUME_LIB_REQ and value is not None:\n print(\n 'Setting volume requires pyglet {}, you have pyglet {}.'\n ''.format(PYGLET_VOLUME_LIB_REQ, pyglet.version).center(\n TERMINAL_WIDTH\n ),\n )\n print('Will ignore value of --volume'.center(TERMINAL_WIDTH))\n print('Enter to continue Ctrl + C to exit.'.center(TERMINAL_WIDTH))\n input()\n value = None\n elif value is not None:\n value = float(value)\n\n setattr(args, self.dest, value)\n\n\ndef build_parser():\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'countdowns', type=int, nargs='*',\n action=CycleAction,\n default=cycle((25, 5)),\n help='Cycle through countdown of this many minutes.'\n )\n\n parser.add_argument(\n '--sound-path', type=str,\n default=DEFAULT_SOUNDPATH,\n action=SoundPathAction,\n help='Path to alarm sound.'\n )\n\n parser.add_argument(\n '--volume', type=float,\n default=(0.05 if pyglet.version >= PYGLET_VOLUME_LIB_REQ else None),\n action=VolumeAction,\n help='Volume from 0 to 1.'\n )\n\n return parser\n\n\ndef run_sound(sound_path, volume=None):\n sound = pyglet.resource.media(sound_path)\n player = sound.play()\n\n if volume is not None and volume < 1:\n player.volume = volume\n\n # This is kind of an abuse of pyglet\n pyglet.clock.schedule_once(lambda x: pyglet.app.exit(), sound.duration)\n pyglet.app.run()\n\n\ndef minutes_seconds_elapsed(elapsed):\n minutes, seconds = divmod(elapsed, 60)\n\n return int(minutes), int(seconds)\n\n\ndef print_time(minutes, seconds, total_minutes, paused=False):\n print('\\r', end='')\n\n separator = u'\\u23F8' if paused else '/'\n\n time_str = TIME_FORMAT.format(minutes, seconds, separator, total_minutes)\n time_str = time_str.center(TERMINAL_WIDTH)\n if paused:\n time_str = ''.join((BOLD_ON, BLUE, time_str, BOLD_OFF, DEFAULT))\n print(time_str, end='')\n\n\ndef clear_if_changed():\n global CHANGED\n if CHANGED:\n print()\n # This might give you some garbage characters depending\n # on the value of $TERM. They should be hidden anyway.\n # Also won't work on Windows. But nor will most of this...\n os.system('clear')\n CHANGED = False\n\n\ndef pause_thread(pause_obj):\n while pause_obj.alive:\n while sys.stdin in select.select([sys.stdin], [], [], REFRESH_RATE)[0]:\n sys.stdin.readline()\n pause_obj.toggle_pause()\n\n\nclass PauseObject():\n def __init__(self):\n self.paused = 0\n self.state_changed = False\n\n self.current_pause_time = 0\n self.total_pause_time = 0\n\n self.pause_start = None\n\n self.alive = True\n\n def toggle_pause(self):\n self.paused = not self.paused\n self.state_changed = True\n\n def event(self):\n if self.state_changed:\n self.state_changed = False\n return True\n else:\n return False\n\n def poll(self):\n if self.paused:\n if self.event():\n self.pause_start = time.time()\n self.current_pause_time = time.time() - self.pause_start\n else:\n if self.event():\n self.total_pause_time += self.current_pause_time\n self.current_pause_time = 0\n\n def pause_time(self):\n if self.paused:\n return self.current_pause_time + self.total_pause_time\n else:\n return self.total_pause_time\n\n def kill(self):\n self.alive = False\n\n\ndef countdown(minutes_total):\n global TERMINAL_WIDTH\n\n clear_if_changed()\n\n upper_limit = minutes_total * 60\n start_time = time.time()\n\n pause_obj = PauseObject()\n _thread.start_new_thread(pause_thread, (pause_obj,))\n\n while True:\n pause_obj.poll()\n\n elapsed = time.time() - start_time - pause_obj.pause_time()\n timer_numbers = (*minutes_seconds_elapsed(elapsed), minutes_total)\n\n print_time(*timer_numbers, paused=pause_obj.paused)\n time.sleep(REFRESH_RATE)\n\n clear_if_changed()\n\n if elapsed >= upper_limit:\n pause_obj.kill()\n sys.stdout.flush()\n break\n\n\nclass SoundPathAction(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n setattr(args, self.dest, self.check_soundpath(values))\n\n @staticmethod\n def check_soundpath(sound_path):\n if os.path.isfile(sound_path):\n sound_dir = os.path.dirname(os.path.realpath(sound_path))\n pyglet.resource.path.append(sound_dir)\n\n return os.path.basename(sound_path)\n\n for path in pyglet.resource.path:\n if os.path.isfile(os.path.join(path, sound_path)):\n return sound_path\n else:\n raise FileNotFoundError('Could not locate {}'.format(sound_path))\n\n\ndef resize_handler(*args):\n global TERMINAL_WIDTH, CHANGED\n\n TERMINAL_WIDTH = get_terminal_width()\n CHANGED = True\n\n\ndef exit(reset_terminal, *args, code=0):\n try:\n print('', end='\\r')\n print('Goodbye!'.center(TERMINAL_WIDTH))\n\n # Hack to stop strange callback happening on exit\n pyglet.media.drivers.get_audio_driver().delete()\n\n time.sleep(GOODBYE_DELAY)\n finally:\n reset_terminal()\n sys.exit(code)\n\n\ndef input_thread(input_recorder):\n input_recorder.append(input())\n\n\ndef format_reset_string(string):\n return ''.join(\n (BLUE, INVERT_ON, BOLD_ON, string, BOLD_OFF, INVERT_OFF, DEFAULT)\n )\n\n\ndef reset_loop():\n input_list = []\n _thread.start_new_thread(input_thread, (input_list,))\n\n even = True\n time_since_flash = 0\n while True:\n clear_if_changed()\n print('', end='\\r')\n string = 'Return to reset'.center(TERMINAL_WIDTH)\n reset_string = format_reset_string(string) if even else string\n\n print(reset_string, end='')\n\n time.sleep(REFRESH_RATE)\n time_since_flash += REFRESH_RATE\n if time_since_flash >= FLASH_TIME:\n even = not even\n time_since_flash = 0\n\n if len(input_list) > 0:\n break\n\n\ndef main_loop(countdowns, sound_path, volume=None):\n for countdown_amount in countdowns:\n countdown(countdown_amount)\n run_sound(sound_path, volume=volume)\n\n # Clear standard input incase user was pressing things before\n # return message is displayed.\n termios.tcflush(sys.stdin, termios.TCIOFLUSH)\n reset_loop()\n else:\n # Shouldn't actually get here.\n print('Out of countdowns!'.center(TERMINAL_WIDTH))\n\n\ndef check_tty():\n is_tty = os.isatty(sys.stdout.fileno())\n if is_tty:\n return\n\n print(\n 'Can only operate on a tty, are you piping or redirecting output?',\n file=sys.stderr\n )\n sys.exit(1)\n\n\ndef check_os():\n platform_string = system().lower()\n if 'linux' in platform_string:\n return\n elif 'win32' in platform_string:\n # Just give up... There is a plenty of posix / linux stuff here.\n # Feel free to remove this and try it out if you'd like to push\n # towards Windows support, but it's not on my radar.\n raise OSError(\n 'System is {}. Windows is not supported.'\n ''.format(platform_string)\n )\n elif 'darwin' in platform_string:\n import warnings\n version_warning_string = (\n 'System is {}. Mac may not work as expected. Support is planned.'\n ' If this doesn\\'t work for you please report your issue.'\n ''.format(platform_string)\n )\n warnings.warn(version_warning_string, UserWarning)\n else:\n import warnings\n version_warning_string = (\n 'System is {}. This may not work as expected.'\n ' Support is not planned.'\n ' If you would like support, and this tool doesn\\'t work for you'\n ' please report your issue, with details of your system.'\n ''.format(platform_string)\n )\n warnings.warn(version_warning_string, UserWarning)\n\n\ndef main():\n check_os()\n check_tty()\n try:\n reset_terminal = setup_terminal()\n exit_partial_app = partial(exit, reset_terminal)\n\n resize_handler()\n\n signal.signal(signal.SIGWINCH, resize_handler)\n signal.signal(signal.SIGINT, exit_partial_app)\n\n args = build_parser().parse_args()\n\n main_loop(args.countdowns, args.sound_path, args.volume)\n except Exception as e:\n print('Exception was raised: {}'.format(e).center(TERMINAL_WIDTH))\n print('Cleaning up'.center(TERMINAL_WIDTH))\n exit_partial_app(code=1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"py_alarm.py","file_name":"py_alarm.py","file_ext":"py","file_size_in_byte":11337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"463492607","text":"from selenium import webdriver\nimport math\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\nbrowser = webdriver.Chrome()\nlink = \"http://SunInJuly.github.io/execute_script.html\"\nbrowser.get(link)\n\n# Считать х и посчитать\nx_element = browser.find_element_by_id('input_value').text\ny = calc(x_element)\n\n\n# Записать значение\noutput1 = browser.find_element_by_id(\"answer\")\noutput1.send_keys(y)\n\n\n# Ставим чекбокс\noutput2 = browser.find_element_by_id(\"robotCheckbox\")\nbrowser.execute_script(\"return arguments[0].scrollIntoView(true);\", output2) # JS мотает страницу до элемента output2\noutput2.click()\n\n\n# Ставим радиобат\noutput3 = browser.find_element_by_id(\"robotsRule\")\nbrowser.execute_script(\"return arguments[0].scrollIntoView(true);\", output3)\noutput3.click()\n\n\n# Кликаем кнопку\nbutton = browser.find_element_by_tag_name(\"button\")\nbrowser.execute_script(\"return arguments[0].scrollIntoView(true);\", button)\nbutton.click()\nassert True\n","sub_path":"files/JS.py","file_name":"JS.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"572438717","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport time\nimport datetime\n\n# colab mode\n# try:\n# %tensorflow_version 2.x\n# except Exception:\n# pass\n# !pip install tensorflow_probability==0.8.0rc0 --upgrade\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport numpy as np\nimport tensorflow as tf\nfrom utils import Mask, CustomSchedule, Trainer, translate\nfrom data_loader import DataLoader\nimport datetime\nfrom model import *\n\n# hyper paramaters\nTRAIN_RATIO = 0.9\nD_POINT_WISE_FF = 2048\nD_MODEL = 512\nENCODER_COUNT = DECODER_COUNT = 6\nEPOCHS = 20\nATTENTION_HEAD_COUNT = 8\nDROPOUT_PROB = 0.1\nBATCH_SIZE = 32\nSEQ_MAX_LEN_SOURCE = 100\nSEQ_MAX_LEN_TARGET = 100\nBPE_VOCAB_SIZE = 32000\n\n\ndata_loader = DataLoader(\n dataset_name='wmt14/en-de',\n data_dir='./datasets'\n)\n\nsource_data, target_data = data_loader.load_test(index=3)\ndata = zip(source_data, target_data)\n\ntransformer = Transformer(\n input_vocab_size=BPE_VOCAB_SIZE,\n target_vocab_size=BPE_VOCAB_SIZE,\n encoder_count=ENCODER_COUNT,\n decoder_count=DECODER_COUNT,\n attention_head_count=ATTENTION_HEAD_COUNT,\n d_model=D_MODEL,\n d_point_wise_ff=D_POINT_WISE_FF,\n dropout_prob=DROPOUT_PROB\n)\n\ntrainer = Trainer(model=transformer, checkpoint_dir='./checkpoints')\nif trainer.checkpoint_manager.latest_checkpoint:\n print(\"Restored from {}\".format(trainer.checkpoint_manager.latest_checkpoint))\nelse:\n print(\"Initializing from scratch.\")\n\ntrainer.checkpoint.restore(\n trainer.checkpoint_manager.latest_checkpoint\n)\n\ndef do_translate(input):\n index = input[0]\n source = input[1][0]\n target = input[1][1]\n print(index)\n output = translate(source, data_loader, trainer, SEQ_MAX_LEN_TARGET)\n res = data_loader.sequences_to_texts([output.numpy().tolist()], mode='target')\n return {\n 'source': source,\n 'target': target,\n 'output': res\n }\n\ntranslated_data = []\n\nfor input in data:\n res = do_translate(input)\n translated_data.append(res)\n\nimport pickle\nwith open('translated_data.pickle', 'wb') as f:\n pickle.dump(translated_data, f)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"26859901","text":"# main.py\n\nfrom cash_desk import Bill, BatchBill, CashDesk\n\n\ndef main():\n # a = Bill(10)\n # b = Bill(5)\n # c = Bill(10)\n #\n # print(int(a))\n #\n # print(a)\n # print(str(a))\n #\n # print(a == b)\n # print(a == c)\n #\n # money_holder = {}\n #\n # money_holder[a] = 1\n #\n # if c in money_holder:\n # money_holder[c] += 1\n #\n # print(money_holder)\n\n values = [10, 20, 50, 100, 100, 100]\n\n bills = [Bill(value) for value in values]\n\n batch = BatchBill(bills)\n\n print(batch.total())\n\n for bill in batch:\n print(bill)\n\n desk = CashDesk()\n\n desk.take_money(batch)\n desk.take_money(Bill(10))\n\n print(desk.total())\n\n print(Bill(5).to_list())\n print(batch.to_list())\n\n print(desk.inspect())\n\nif __name__ == '__main__':\n main()\n","sub_path":"week_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"9572399","text":"# -*- coding: utf-8 -*-\n# __author__ = 'budurli'\nfrom __future__ import unicode_literals\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.accounts.models import Account\nfrom apps.edus.models import FieldOfStudy\nfrom apps.core import GenericModel, nullable, FileMixin\nfrom apps.core.consts import semesters_choices\n\n\n@python_2_unicode_compatible\nclass Project(GenericModel):\n title = models.CharField(_('title'), max_length=120)\n description = models.TextField(_('description'), **nullable)\n\n field_of_study = models.ForeignKey(\n FieldOfStudy,\n verbose_name=_('field of study'),\n related_name='projects',\n **nullable\n )\n\n semester = models.IntegerField(_('semester'), choices=semesters_choices, default=0)\n year = models.IntegerField(_('year'), validators=[MinValueValidator(1990)])\n\n chief = models.ForeignKey(\n Account,\n verbose_name=_('chief'),\n related_name='as_chief',\n limit_choices_to={'is_teacher': True}\n )\n\n class Meta:\n verbose_name = _('project')\n verbose_name_plural = _('projects')\n\n def __str__(self):\n return self.title\n\n\n@python_2_unicode_compatible\nclass ProjectFile(FileMixin, GenericModel):\n project = models.ForeignKey(Project, verbose_name=_('project'), related_name='files')\n\n class Meta:\n verbose_name = _('project file')\n verbose_name_plural = _('projects files')\n\n def __str__(self):\n return self.filename\n\n\n@python_2_unicode_compatible\nclass ProjectResult(GenericModel):\n project = models.ForeignKey(Project, verbose_name=_('project'), related_name='results')\n student = models.ForeignKey(\n Account,\n verbose_name=_('student'),\n related_name='projects',\n limit_choices_to={'is_student': True}\n\n )\n rating = models.IntegerField(_('rating'), **nullable)\n\n class Meta:\n unique_together = ('project', 'student')\n verbose_name = _('project result')\n verbose_name_plural = _('projects results')\n\n def __str__(self):\n return '%s - %s' % (self.project, self.student)\n\n","sub_path":"apps/projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"393299036","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 16 17:43:42 2018\n\n@author: lwuag\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 16 14:25:24 2018\n\n@author: lwuag\n\"\"\"\nimport numpy as np\nfrom graph import *\nfrom algorithm import *\nfrom New_end_point_mid import *\nfrom index_2_xy import *\nfrom xy_2_index import *\n\n# star_point = Start_city[0] * col_num + Start_city[1]\n# end_point = Target_city[0] * col_num + Target_city[1]\n\ndef Path_design(Data, star_point, end_point, end_point_replace, height):\n high_num = int(Data.shape[0])\n row_num = int(Data.shape[1])\n col_num = int(Data.shape[2]) \n thre_wind = 15\n end_x = end_point // col_num\n end_y = end_point % col_num\n if Data[height, end_x, end_y] >= thre_wind:\n end_point_replace = New_end_point_mid(Data[height,:,:], star_point, end_point, col_num, thre_wind) # if the end_point is unaccessible, choose a new end \n else:\n end_point_replace = end_point\n graph = Graph()\n for i in range(row_num):\n for j in range(col_num):\n index = i * col_num + j\n# graph.add_node(index)\n if i - 1 >= 0 and Data[height,i - 1, j] < thre_wind:\n index_next = (i - 1) * col_num + j\n graph.add_edge(index, index_next, {'cost': 2})\n if i + 1 < row_num and Data[height, i + 1, j] < thre_wind:\n index_next = (i + 1) * col_num + j\n graph.add_edge(index, index_next, {'cost': 2})\n if j - 1 >= 0 and Data[height, i, j - 1] < thre_wind:\n index_next = i * col_num + (j - 1)\n graph.add_edge(index, index_next, {'cost': 2})\n if j + 1 < col_num and Data[height, i, j + 1] < thre_wind:\n index_next = i * col_num + (j + 1)\n graph.add_edge(index, index_next, {'cost': 2})\n cost_func_1 = lambda u, v, e, prev_e: e['cost']\n heuristic_func_1 = lambda u, v, e, prev_e: e['cost']\n PathInfo = find_path(graph, star_point, end_point_replace, cost_func=cost_func_1, heuristic_func=heuristic_func_1)\n Stop = False\n Fail_pos = 0\n Height_pos = 0\n if height == high_num - 1:\n return PathInfo.nodes\n else:\n while index in range(0, len(PathInfo.nodes)) and not Stop:\n z_id = index // 30\n x_id = PathInfo[index].nodes // col_num\n y_id = PathInfo[index].nodes % col_num\n if Data[z_id, x_id, y_id] >= thre_wind:\n Stop = True\n Fail_pos = index\n Height_pos = z_id\n if Stop:\n end_point_replace = end_point\n return PathInfo[0:(Height_pos*30-1)].nodes + Path_design(Data, PathInfo[Height_pos*30-1].nodes, end_point, end_point_replace, Height_pos)\n else:\n return PathInfo.nodes\n \n","sub_path":"Functions/Linlong/Path_design.py","file_name":"Path_design.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"77347342","text":"from libcloud.container.base import ContainerImage\nfrom libcloud.container.types import Provider\nfrom libcloud.container.providers import get_driver\n\ncls = get_driver(Provider.ECS)\n\nconn = cls(\n access_id=\"SDHFISJDIFJSIDFJ\",\n secret=\"THIS_IS)+_MY_SECRET_KEY+I6TVkv68o4H\",\n region=\"ap-southeast-2\",\n)\n\nfor cluster in conn.list_clusters():\n print(cluster.name)\n if cluster.name == \"my-cluster\":\n conn.list_containers(cluster=cluster)\n container = conn.deploy_container(\n name=\"my-simple-app\",\n image=ContainerImage(\n id=None, name=\"simple-app\", path=\"simple-app\", version=None, driver=conn\n ),\n cluster=cluster,\n )\n","sub_path":"docs/examples/container/working_with_clusters.py","file_name":"working_with_clusters.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"147668448","text":"# coding: utf-8\n# Módulo diminutivo_sentence\n# Converte um diminutivo masculino padrão, dentro de uma sentença, para uma forma do falar nordestino.\n# Melhorias na função diminutivo2, criada por João Vitor e Pedro Muniz\ndef diminutivo_sentence():\n sentence: str = input(\"Digite uma sentença, contendo um diminutivo masculino na forma padrão, para convertê-lo em um falar nordestino: \")\n if \"inho\" in sentence:\n sentence = sentence.replace(\"inho\", \"im\")\n print(\"Sentença convertida: \", sentence)\n else:\n print(\"Sua sentença não contém um diminutivo na forma masculina padrão. Por favor, tente novamente.\")\n\ndiminutivo_sentence()\n\ndef next():\n query = input(\"Deseja Prosseguir? [sim/não]: \")\n while query in \"sim\":\n diminutivo_sentence()\n next()\n if query not in \"sim\":\n exit()\n\nnext()\n","sub_path":"exercicio-1/diminutivo_sentence.py","file_name":"diminutivo_sentence.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"120884014","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.2 (3180)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ally/core/http/impl/processor/parsing_multipart.py\n# Compiled at: 2013-10-02 09:54:40\n\"\"\"\nCreated on Aug 30, 2012\n\n@package: ally core http\n@copyright: 2012 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the multipart content parsing based on RFC1341.\n@see: http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html\n\"\"\"\nfrom ally.container.ioc import injected\nfrom ally.core.http.spec.codes import MUTLIPART_NO_BOUNDARY\nfrom ally.core.impl.processor.parsing import ParsingHandler, Request, RequestContent, Response\nfrom ally.design.processor.assembly import Assembly\nfrom ally.design.processor.attribute import requires, defines\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.execution import Chain, Processing\nfrom ally.design.processor.processor import Included\nfrom ally.exception import DevelError\nfrom ally.support.util_io import IInputStream, IClosable\nfrom collections import Callable\nfrom io import BytesIO\nimport codecs, logging, re\nlog = logging.getLogger(__name__)\nFLAG_CONTENT_END = 2\nFLAG_MARK_START = 4\nFLAG_MARK_END = 8\nFLAG_HEADER_END = 16\nFLAG_CLOSED = 32\nFLAG_MARK = FLAG_MARK_START | FLAG_MARK_END\nFLAG_END = FLAG_CONTENT_END | FLAG_MARK\n\nclass RequestPopulate(Context):\n \"\"\"\n The request context used in populating the request content.\n \"\"\"\n headers = defines(dict, doc='\\n @rtype: dictionary{string, string}\\n The raw headers.\\n ')\n\n\nclass RequestContentMultiPart(RequestContent):\n \"\"\"\n The request content context.\n \"\"\"\n typeAttr = requires(dict)\n source = requires(IInputStream)\n fetchNextContent = defines(Callable, doc='\\n @rtype: callable()\\n The callable used to fetch the next request content, only use this after you have finalized the work with the\\n current request content. It will not take any argument.\\n ')\n previousContent = defines(object, doc='\\n @rtype: RequestContentMultiPart\\n The reference to the previous content, this will be available only after the fetch method has been used.\\n ')\n\n\nclass ResponseMultiPart(Response):\n \"\"\"\n The response context.\n \"\"\"\n status = defines(int)\n\n\n@injected\nclass DataMultiPart:\n \"\"\"\n Contains the data required by the multi part stream.\n \"\"\"\n charSet = 'UTF8'\n formatMarkStart = '--%s\\r\\n'\n formatMarkEnd = '--%s--\\r\\n'\n markHeaderEnd = '\\r\\n\\r\\n'\n trimBodyAtEnd = '\\r\\n'\n separatorHeader = ':'\n packageSize = 1024\n\n def __init__(self):\n assert isinstance(self.charSet, str), 'Invalid character set %s' % self.charSet\n assert isinstance(self.formatMarkStart, str), 'Invalid format mark start %s' % self.formatMarkStart\n assert isinstance(self.formatMarkEnd, str), 'Invalid format mark end %s' % self.formatMarkEnd\n assert isinstance(self.markHeaderEnd, str), 'Invalid header end %s' % self.markHeaderEnd\n assert isinstance(self.trimBodyAtEnd, str), 'Invalid trim body at end %s' % self.trimBodyAtEnd\n assert isinstance(self.separatorHeader, str), 'Invalid separator header %s' % self.separatorHeader\n assert isinstance(self.packageSize, int), 'Invalid package size %s' % self.packageSize\n self.markHeaderEnd = bytes(self.markHeaderEnd, self.charSet)\n self.trimBodyAtEnd = bytes(self.trimBodyAtEnd, self.charSet)\n\n\n@injected\nclass ParsingMultiPartHandler(ParsingHandler, DataMultiPart):\n \"\"\"\n Provides the multipart content handler parsing.\n @see: http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html\n \"\"\"\n regexMultipart = '^multipart($|\\\\/.)'\n attrBoundary = 'boundary'\n populateAssembly = Assembly\n\n def __init__(self):\n assert isinstance(self.regexMultipart, str), 'Invalid multi part regex %s' % self.regexMultipart\n assert isinstance(self.attrBoundary, str), 'Invalid attribute boundary name %s' % self.attrBoundary\n assert isinstance(self.populateAssembly, Assembly), 'Invalid populate assembly %s' % self.populateAssembly\n DataMultiPart.__init__(self)\n ParsingHandler.__init__(self, Included(self.populateAssembly).using(request=RequestPopulate))\n self._reMultipart = re.compile(self.regexMultipart)\n\n def process(self, chain, populate, parsing, request: Request, requestCnt: RequestContentMultiPart, response: ResponseMultiPart, **keyargs):\n \"\"\"\n @see: ParsingHandler.process\n \n Parse the request content.\n \"\"\"\n assert isinstance(chain, Chain), 'Invalid processors chain %s' % chain\n assert isinstance(populate, Processing), 'Invalid processing %s' % populate\n assert isinstance(parsing, Processing), 'Invalid processing %s' % parsing\n assert isinstance(request, Request), 'Invalid request %s' % request\n assert isinstance(requestCnt, RequestContentMultiPart), 'Invalid request content %s' % requestCnt\n assert isinstance(response, ResponseMultiPart), 'Invalid response %s' % response\n chain.proceed()\n if response.isSuccess is False:\n return\n else:\n isMultipart = requestCnt.type and self._reMultipart.match(requestCnt.type)\n if isMultipart:\n if not log.debug('Content type %s is multi part', requestCnt.type):\n assert True\n boundary = requestCnt.typeAttr.pop(self.attrBoundary, None)\n if not boundary:\n response.code, response.status, response.isSuccess = MUTLIPART_NO_BOUNDARY\n return\n else:\n assert isinstance(requestCnt.source, IInputStream), 'Invalid request content source %s' % requestCnt.source\n stream = StreamMultiPart(self, requestCnt.source, boundary)\n requestCnt = NextContent(requestCnt, response, populate, self, stream)()\n if requestCnt is None:\n pass\n response.code, response.status, response.isSuccess = MUTLIPART_NO_BOUNDARY\n return\n if not request.decoder:\n if isMultipart:\n chain.update(requestCnt=requestCnt)\n return\n if self.processParsing(parsing, request=request, requestCnt=requestCnt, response=response, **keyargs):\n if requestCnt.fetchNextContent is not None:\n nextContent = requestCnt.fetchNextContent()\n if nextContent is not None:\n assert isinstance(nextContent, RequestContentMultiPart), 'Invalid request content %s' % nextContent\n chain.update(requestCnt=nextContent)\n else:\n chain.update(requestCnt=requestCnt)\n return\n\n\nclass StreamMultiPart(IInputStream, IClosable):\n \"\"\"\n Provides the muti part stream content.\n \"\"\"\n __slots__ = ('_data', '_stream', '_markStart', '_markEnd', '_extraSize', '_flag',\n '_buffer')\n\n def __init__(self, data, stream, boundary):\n \"\"\"\n Constructs the multi part content stream.\n \n @param data: DataMultiPart\n The data used for multi part content processing.\n @param stream: IInputStream\n The stream that contains the multi part.\n @param boundary: string\n The boundary used for identifying the multi part bodies.\n \"\"\"\n assert isinstance(data, DataMultiPart), 'Invalid data %s' % data\n assert isinstance(stream, IInputStream), 'Invalid content stream %s' % stream\n assert isinstance(boundary, str), 'Invalid boundary %s' % boundary\n self._data = data\n self._stream = stream\n self._markStart = bytes(data.formatMarkStart % boundary, data.charSet)\n self._markEnd = bytes(data.formatMarkEnd % boundary, data.charSet)\n self._extraSize = max(len(self._markStart), len(self._markEnd), len(data.markHeaderEnd))\n self._flag = 0\n self._buffer = bytearray()\n\n def read(self, nbytes=None):\n \"\"\"\n @see: IInputStream.read\n \"\"\"\n if self._flag & FLAG_CLOSED:\n raise ValueError('I/O operation on a closed content file')\n if self._flag & FLAG_END:\n return b''\n if nbytes:\n if nbytes <= self._data.packageSize:\n return self._readToMark(nbytes)\n data = bytearray()\n while True:\n data.extend(self._readToMark(min(nbytes - len(data), self._data.packageSize)))\n if len(data) >= nbytes or self._flag & FLAG_END:\n break\n\n else:\n data = bytearray()\n while 1:\n data.extend(self._readToMark(self._data.packageSize))\n if self._flag & FLAG_END:\n break\n\n return bytes(data)\n\n def close(self):\n \"\"\"\n @see: IClosable.close\n \"\"\"\n self._flag |= FLAG_CLOSED\n\n def _readInBuffer(self, nbytes):\n \"\"\"\n Reads in the instance buffer the specified number of bytes, always when reading it will read in the buffer\n additional bytes for the mark processing. It will adjust the flags if END is encountered.\n \"\"\"\n assert not self._flag & FLAG_CONTENT_END, 'End reached, cannot read anymore'\n data = self._stream.read(nbytes + self._extraSize - len(self._buffer))\n if data:\n self._buffer.extend(data)\n if not self._buffer:\n self._flag |= FLAG_CONTENT_END\n\n def _readToMark(self, nbytes):\n \"\"\"\n Read the provided number of bytes or read until a mark separator is encountered (including the end separator).\n It will adjust the flags according to the findings.\n \n @return: bytes\n The bytes read.\n \"\"\"\n assert not self._flag & FLAG_MARK, 'Already at a mark, cannot read until flag is reset'\n self._readInBuffer(nbytes)\n if not self._buffer:\n return b''\n indexSep = self._buffer.find(self._markStart)\n if indexSep >= 0:\n self._flag |= FLAG_MARK_START\n indexBody = indexSep - len(self._data.trimBodyAtEnd)\n if not self._buffer.endswith(self._data.trimBodyAtEnd, indexBody, indexSep):\n indexBody = indexSep\n data = self._buffer[:indexBody]\n del self._buffer[:indexSep + len(self._markStart)]\n else:\n nbytes = max(len(self._buffer), nbytes)\n data = self._buffer[:nbytes]\n del self._buffer[:nbytes]\n indexEnd = data.find(self._markEnd)\n if indexEnd >= 0:\n self._flag |= FLAG_MARK_END\n indexBody = indexEnd - len(self._data.trimBodyAtEnd)\n if not data.endswith(self._data.trimBodyAtEnd, indexBody, indexEnd):\n indexBody = indexEnd\n data = data[:indexBody]\n self._buffer = data[indexEnd + len(self._markEnd):]\n return data\n\n def _readToHeader(self, nbytes):\n \"\"\"\n Read the provided number of bytes or read until the mark header is encountered.\n It will adjust the flags according to the findings.\n \n @return: bytes\n The bytes read.\n \"\"\"\n assert not self._flag & FLAG_HEADER_END, 'Already at header end, cannot read until flag is reset'\n self._readInBuffer(nbytes)\n if not self._buffer:\n return b''\n indexHeader = self._buffer.find(self._data.markHeaderEnd)\n if indexHeader >= 0:\n self._flag |= FLAG_HEADER_END\n data = self._buffer[:indexHeader]\n del self._buffer[:indexHeader + len(self._data.markHeaderEnd)]\n else:\n nbytes = max(len(self._buffer), nbytes)\n data = self._buffer[:nbytes]\n del self._buffer[:nbytes]\n return data\n\n def _pullHeaders(self):\n \"\"\"\n Pull the multi part headers, it will leave the content stream attached to the header reader at the body begin.\n \n @return: dictionary{string, string}\n The multi part headers.\n \"\"\"\n assert self._flag & FLAG_MARK_START, 'Not at a separator mark position, cannot process headers'\n data = bytearray()\n while 1:\n data.extend(self._readToHeader(self._data.packageSize))\n if self._flag & FLAG_HEADER_END:\n self._flag ^= FLAG_HEADER_END\n break\n if self._flag & FLAG_CONTENT_END:\n raise DevelError('No empty line after multi part header')\n continue\n\n reader = codecs.getreader(self._data.charSet)(BytesIO(data))\n headers = {}\n while True:\n line = reader.readline()\n if line == '':\n break\n hindex = line.find(self._data.separatorHeader)\n if hindex < 0:\n raise DevelError(\"Invalid multi part header '%s'\" % line)\n headers[line[:hindex]] = line[hindex + 1:].strip()\n\n self._flag ^= FLAG_MARK_START\n return headers\n\n\nclass NextContent:\n \"\"\"\n Callable used for processing the next request content.\n \"\"\"\n __slots__ = ('_requestCnt', '_response', '_processing', '_data', '_stream', '_nextCnt')\n\n def __init__(self, requestCnt, response, processing, data, stream):\n \"\"\"\n Construct the next callable.\n \n @param requestCnt: RequestContentMultiPart\n The current request content.\n @param response: ResponseMultiPart\n The response context.\n @param processing: Processing\n The processing used for populating the next request content.\n @param data: DataMultiPart\n The multi part data.\n @param stream: StreamMultiPart\n The stream that contains the multi part.\n @return: RequestContent\n The next content.\n \"\"\"\n assert isinstance(requestCnt, RequestContentMultiPart), 'Invalid request content %s' % requestCnt\n assert isinstance(response, ResponseMultiPart), 'Invalid response %s' % response\n assert isinstance(processing, Processing), 'Invalid processing %s' % processing\n assert isinstance(data, DataMultiPart), 'Invalid data %s' % data\n assert isinstance(stream, StreamMultiPart), 'Invalid stream %s' % stream\n self._requestCnt = requestCnt\n self._response = response\n self._processing = processing\n self._data = data\n self._stream = stream\n self._nextCnt = None\n return\n\n def __call__(self):\n \"\"\"\n Provides the next multi part request content based on the provided multi part stream.\n \"\"\"\n if self._nextCnt is not None:\n return self._nextCnt\n else:\n stream, processing = self._stream, self._processing\n assert isinstance(stream, StreamMultiPart), 'Invalid stream %s' % stream\n assert isinstance(processing, Processing), 'Invalid processing %s' % processing\n if not stream._flag & (FLAG_CONTENT_END | FLAG_MARK_END):\n if not stream._flag & FLAG_MARK_START:\n while True:\n stream._readToMark(self._data.packageSize)\n if stream._flag & FLAG_MARK_START:\n break\n if stream._flag & FLAG_END:\n return\n\n req = processing.ctx.request()\n self._nextCnt = reqCnt = self._requestCnt.__class__()\n assert isinstance(req, RequestPopulate), 'Invalid request %s' % req\n assert isinstance(reqCnt, RequestContentMultiPart), 'Invalid request content %s' % reqCnt\n req.headers = stream._pullHeaders()\n if stream._flag & FLAG_CLOSED:\n stream._flag ^= FLAG_CLOSED\n reqCnt.source = stream\n reqCnt.fetchNextContent = NextContent(reqCnt, self._response, self._processing, self._data, stream)\n reqCnt.previousContent = self._requestCnt\n chain = Chain(self._processing).process(request=req, requestCnt=reqCnt, response=self._response)\n return chain.doAll().arg.requestCnt\n return","sub_path":"pycfiles/ally_py-0.9.0-py3.2/parsing_multipart.cpython-32.py","file_name":"parsing_multipart.cpython-32.py","file_ext":"py","file_size_in_byte":16520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"190213984","text":"for j in range(3,9,2):\n print (f'Valor de j={j}')\n\n#va del limite superior-1 'range'\nfor i in range(9):\n print (f'Valor de i={i}')\n\n\nfrase = 'Hardware es a lo que le pegas y software es la razon por la que le pegas'\nfor s in frase[:-6:-1]:\n print(f\"letra:{s.upper()}\")\n\n\nlista=[1,\"hola\",True,2.46,[1,2]]\nfor l in lista[:-3:-1]:\n print (l)\n\nlistota=[[\"Pera\",\"Manzana\",\"Uva\"],[\"Rojo\",\"Verde\",\"Azul\"],[1,2,3]]\nprint(listota)\nfor elem in listota:\n for e in elem:\n print(e)\n\nfor i in range(len(listota)):\n for j in range (len(listota[i])):\n print (listota[i][j])\n","sub_path":"PrimeraClase/For.py","file_name":"For.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"279946160","text":"#\n# Example file for working with conditional statements\n#\n\n\ndef main():\n x, y = 1000, 100\n\n # conditional flow uses if, elif, else\n if(x < y):\n st = \"x is less then y\"\n elif(x == y):\n st = \"x is the same as y\"\n else:\n st = \"x is more then y\"\n\n print(st)\n # conditional statements let you use \"a if C else b\"\n print(\"Now, using a single line\")\n st = \"x is less then y\" if (x < y) else \"y is less then x\"\n print(st)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Ch2/conditionals_start.py","file_name":"conditionals_start.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132264118","text":"import supriya.osc\nfrom supriya.commands.Request import Request\n\n\nclass GroupNewRequest(Request):\n \"\"\"\n A /g_new request.\n\n ::\n\n >>> import supriya.commands\n >>> import supriya.realtime\n >>> request = supriya.commands.GroupNewRequest(\n ... add_action=supriya.realtime.AddAction.ADD_TO_TAIL,\n ... node_id=1001,\n ... target_node_id=1000,\n ... )\n >>> request\n GroupNewRequest(\n add_action=AddAction.ADD_TO_TAIL,\n node_id=1001,\n target_node_id=1000,\n )\n\n ::\n\n >>> message = request.to_osc_message()\n >>> message\n OscMessage(21, 1001, 1, 1000)\n\n ::\n\n >>> message.address == supriya.commands.RequestId.GROUP_NEW\n True\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n __slots__ = (\n '_add_action',\n '_node_id',\n '_target_node_id',\n )\n\n ### INITIALIZER ###\n\n def __init__(\n self,\n add_action=None,\n node_id=None,\n target_node_id=None,\n ):\n import supriya.realtime\n Request.__init__(self)\n self._add_action = supriya.realtime.AddAction.from_expr(add_action)\n self._node_id = node_id\n self._target_node_id = target_node_id\n\n ### PUBLIC METHODS ###\n\n def to_osc_message(self, with_textual_osc_command=False):\n if with_textual_osc_command:\n request_id = self.request_command\n else:\n request_id = int(self.request_id)\n add_action = int(self.add_action)\n node_id = int(self.node_id)\n target_node_id = int(self.target_node_id)\n message = supriya.osc.OscMessage(\n request_id,\n node_id,\n add_action,\n target_node_id,\n )\n return message\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def add_action(self):\n return self._add_action\n\n @property\n def node_id(self):\n return self._node_id\n\n @property\n def target_node_id(self):\n return self._target_node_id\n\n @property\n def response_specification(self):\n import supriya.commands\n return {\n supriya.commands.NodeInfoResponse: {\n 'action': supriya.commands.NodeAction.NODE_CREATED,\n 'node_id': self.node_id,\n },\n }\n\n @property\n def request_id(self):\n import supriya.commands\n return supriya.commands.RequestId.GROUP_NEW\n","sub_path":"supriya/commands/GroupNewRequest.py","file_name":"GroupNewRequest.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"554240736","text":"\"\"\"\nProgram:\n--------\n Program 3 - \n\nDescription:\n------------\n Read in Json files to get a adjusted x and y \n coordinate to print out earthquake data to the screen\n \nName: Matthew Trebing\nDate: 22 June 2017\n\"\"\"\nimport pygame\nimport sys,os\nimport json\nimport random\nimport time\n\ndef clean_area(screen,origin,width,height,color):\n \"\"\"\n Prints a color rectangle (typically white) to \"erase\" an area on the screen.\n Could be used to erase a small area, or the entire screen.\n \"\"\"\n ox,oy = origin\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\n pygame.draw.polygon(screen, color, points, 0)\n\nif __name__=='__main__':\n DIRPATH = os.path.dirname(os.path.realpath(__file__))\n background_colour = (255,255,255)\n black = (0,0,0)\n\n (width, height) = (1024,512)\n pygame.init()\n bg=pygame.image.load(DIRPATH +\"\\\\World Map.png\" )\n screen = pygame.display.set_mode((width, height))\n pygame.display.set_caption('MBRs')\n screen.fill(background_colour)\n screen.blit(bg, (0, 0))\n pygame.display.flip()\n f = open(DIRPATH +'/'+'Adjusted_JSON_Files'+'/'+'quakes-adjusted.json','r')\n points = json.loads(f.read())\n orange=(255,165,0)\n\n running = True\n delay=1\n while running:\n for p in points[:delay]:\n pygame.draw.circle(screen, orange, p, 1,0)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n pygame.image.save(screen,DIRPATH+'/'+'screen_shot.png')\n# ''' if event.type == pygame.MOUSEBUTTONDOWN:\n# clean_area(screen,(0,0),width,height,(255,255,255))'''\n delay+=1\n pygame.display.flip()\n pygame.time.wait(45)","sub_path":"Assignments/Program_3/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"525596694","text":"import requests\nfrom bs4 import BeautifulSoup as BS\nimport re\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'}\nurl = 'https://www.google.com/search?q=eur+to+byr&oq=eur+to+byr&aqs=chrome..69i57j0i10i22i30j0i22i30l5j0i10i22i30j0i22i30l2.3575j1j4&sourceid=chrome&ie=UTF-8'\n\nr = requests.get(url, timeout=5, headers=headers)\n\nsoup = BS(r.content, 'html.parser')\nexchange_rate =float(soup.find('span', class_=\"DFlfde SwHCTb\")['data-value'])\n\n\ndef main(rate):\n user_choice = input('Please choose.\\n'\n '1. EUR to BYR\\n'\n '2.BYR to EUR\\n'\n '0. Exit\\n'\n '--> ')\n if user_choice == '1':\n eur_to_byr(rate)\n if user_choice == '2':\n byr_to_eur(rate)\n if user_choice == '0':\n print('Good bye')\n exit()\n else:\n print('Choice is out of range')\n main(rate)\n\ndef eur_to_byr(rate):\n try:\n ammount = float(input('Please enter amount in EUR: '))\n except:\n print('Amount you entered is not numeric, please try again!')\n eur_to_byr(rate)\n else:\n print(ammount * rate)\n main(rate)\n\ndef byr_to_eur(rate):\n try:\n ammount = float(input('Please enter amount in EUR: '))\n except:\n print('Amount you entered is not numeric, please try again!')\n eur_to_byr(rate)\n else:\n print(ammount / rate)\n main(rate)\n\nmain(exchange_rate)","sub_path":"014_001_001.py","file_name":"014_001_001.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"41362364","text":"#! /usr/bin/env python\n\ndef load_hex_file( filename ):\n arr = []\n with open( filename, 'r' ) as fd:\n for line in fd:\n line = line.strip()\n if len( line ) > 0:\n arr.append( [ int( x, 16 ) for x in line.split() ] )\n return arr\n\ndef to_bin( byte_arr ):\n '''Converts byte array 1-d/2-d into binary array'''\n if type( byte_arr ) == list and type( byte_arr[0] ) == int:\n return sum( [ [ ( n >> i ) & 0x01 for i in range( 7, -1, -1 ) ] for n in byte_arr ], [] )\n else:\n return [ to_bin( sub_arr ) for sub_arr in byte_arr ]\n\ndef rotate_right( arr, r ):\n r %= len( arr )\n return arr[-r:] + arr[:-r]\n","sub_path":"bautil.py","file_name":"bautil.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"130957906","text":"# coding: utf-8\n# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests that walk through Course Builder pages.\"\"\"\n\n__author__ = 'Sean Lip'\n\nimport csv\nimport datetime\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport urllib\nimport appengine_config\nfrom controllers import lessons\nfrom controllers import sites\nfrom controllers import utils\nfrom controllers.utils import XsrfTokenManager\nfrom models import config\nfrom models import jobs\nfrom models import models\nfrom models import vfs\nfrom models.utils import get_all_scores\nfrom models.utils import get_score\nfrom modules.announcements.announcements import AnnouncementEntity\nfrom tools import verify\nimport actions\nfrom actions import assert_contains\nfrom actions import assert_contains_all_of\nfrom actions import assert_does_not_contain\nfrom actions import assert_equals\nfrom google.appengine.api import namespace_manager\n\n\nclass InfrastructureTest(actions.TestBase):\n \"\"\"Test core infrastructure classes agnostic to specific user roles.\"\"\"\n\n def test_utf8_datastore(self):\n \"\"\"Test writing to and reading from datastore using UTF-8 content.\"\"\"\n event = models.EventEntity()\n event.source = 'test-source'\n event.user_id = 'test-user-id'\n event.data = u'Test Data (тест данные)'\n event.put()\n\n stored_event = models.EventEntity().get_by_id([event.key().id()])\n assert 1 == len(stored_event)\n assert event.data == stored_event[0].data\n\n def assert_queriable(self, entity, name, date_type=datetime.datetime):\n \"\"\"Create some entities and check that single-property queries work.\"\"\"\n for i in range(1, 32):\n item = entity(\n key_name='%s_%s' % (date_type.__class__.__name__, i))\n setattr(item, name, date_type(2012, 1, i))\n item.put()\n\n # Descending order.\n items = entity.all().order('-%s' % name).fetch(1000)\n assert len(items) == 31\n assert getattr(items[0], name) == date_type(2012, 1, 31)\n\n # Ascending order.\n items = entity.all().order('%s' % name).fetch(1000)\n assert len(items) == 31\n assert getattr(items[0], name) == date_type(2012, 1, 1)\n\n def test_indexed_properties(self):\n \"\"\"Test whether entities support specific query types.\"\"\"\n\n # A 'DateProperty' or 'DateTimeProperty' of each persistent entity must\n # be indexed. This is true even if the application doesn't execute any\n # queries relying on the index. The index is still critically important\n # for managing data, for example, for bulk data download or for\n # incremental computations. Using index, the entire table can be\n # processed in daily, weekly, etc. chunks and it is easy to query for\n # new data. If we did not have an index, chunking would have to be done\n # by the primary index, where it is impossible to separate recently\n # added/modified rows from the rest of the data. Having this index adds\n # to the cost of datastore writes, but we believe it is important to\n # have it. Below we check that all persistent date/datetime properties\n # are indexed.\n\n self.assert_queriable(AnnouncementEntity, 'date', datetime.date)\n self.assert_queriable(models.EventEntity, 'recorded_on')\n self.assert_queriable(models.Student, 'enrolled_on')\n self.assert_queriable(models.StudentAnswersEntity, 'updated_on')\n self.assert_queriable(jobs.DurableJobEntity, 'updated_on')\n\n def test_assets_and_date(self):\n \"\"\"Verify semantics of all asset and data files.\"\"\"\n\n def echo(unused_message):\n pass\n\n warnings, errors = verify.Verifier().load_and_verify_model(echo)\n assert not errors and not warnings\n\n def test_config_visible_from_any_namespace(self):\n \"\"\"Test that ConfigProperty is visible from any namespace.\"\"\"\n\n assert (\n config.UPDATE_INTERVAL_SEC.value ==\n config.UPDATE_INTERVAL_SEC.default_value)\n new_value = config.UPDATE_INTERVAL_SEC.default_value + 5\n\n # Add datastore override for known property.\n prop = config.ConfigPropertyEntity(\n key_name=config.UPDATE_INTERVAL_SEC.name)\n prop.value = str(new_value)\n prop.is_draft = False\n prop.put()\n\n # Check visible from default namespace.\n config.Registry.last_update_time = 0\n assert config.UPDATE_INTERVAL_SEC.value == new_value\n\n # Check visible from another namespace.\n old_namespace = namespace_manager.get_namespace()\n try:\n namespace_manager.set_namespace(\n 'ns-test_config_visible_from_any_namespace')\n\n config.Registry.last_update_time = 0\n assert config.UPDATE_INTERVAL_SEC.value == new_value\n finally:\n namespace_manager.set_namespace(old_namespace)\n\n\nclass AdminAspectTest(actions.TestBase):\n \"\"\"Test site from the Admin perspective.\"\"\"\n\n def test_python_console(self):\n \"\"\"Test access rights to the Python console.\"\"\"\n\n email = 'test_python_console@google.com'\n\n # Check normal user has no access.\n actions.login(email)\n response = self.testapp.get('/admin?action=console')\n assert_equals(response.status_int, 302)\n\n response = self.testapp.post('/admin?action=console')\n assert_equals(response.status_int, 302)\n\n # Check delegated admin has no access.\n os.environ['gcb_admin_user_emails'] = '[%s]' % email\n actions.login(email)\n response = self.testapp.get('/admin?action=console')\n assert_equals(response.status_int, 200)\n assert_contains(\n 'You must be an actual admin user to continue.', response.body)\n\n response = self.testapp.get('/admin?action=console')\n assert_equals(response.status_int, 200)\n assert_contains(\n 'You must be an actual admin user to continue.', response.body)\n\n del os.environ['gcb_admin_user_emails']\n\n # Check actual admin has access.\n actions.login(email, True)\n response = self.testapp.get('/admin?action=console')\n assert_equals(response.status_int, 200)\n\n response.form.set('code', 'print \"foo\" + \"bar\"')\n response = self.submit(response.form)\n assert_contains('foobar', response.body)\n\n def test_non_admin_has_no_access(self):\n \"\"\"Test non admin has no access to pages or REST endpoints.\"\"\"\n\n email = 'test_non_admin_has_no_access@google.com'\n actions.login(email)\n\n # Add datastore override.\n prop = config.ConfigPropertyEntity(\n key_name='gcb_config_update_interval_sec')\n prop.value = '5'\n prop.is_draft = False\n prop.put()\n\n # Check user has no access to specific pages and actions.\n response = self.testapp.get('/admin?action=settings')\n assert_equals(response.status_int, 302)\n\n response = self.testapp.get(\n '/admin?action=config_edit&name=gcb_admin_user_emails')\n assert_equals(response.status_int, 302)\n\n response = self.testapp.post(\n '/admin?action=config_reset&name=gcb_admin_user_emails')\n assert_equals(response.status_int, 302)\n\n # Check user has no rights to GET verb.\n response = self.testapp.get(\n '/rest/config/item?key=gcb_config_update_interval_sec')\n assert_equals(response.status_int, 200)\n json_dict = json.loads(response.body)\n assert json_dict['status'] == 401\n assert json_dict['message'] == 'Access denied.'\n\n # Check user has no rights to PUT verb.\n payload_dict = {}\n payload_dict['value'] = '666'\n payload_dict['is_draft'] = False\n request = {}\n request['key'] = 'gcb_config_update_interval_sec'\n request['payload'] = json.dumps(payload_dict)\n\n # Check XSRF token is required.\n response = self.testapp.put('/rest/config/item?%s' % urllib.urlencode(\n {'request': json.dumps(request)}), {})\n assert_equals(response.status_int, 200)\n assert_contains('\"status\": 403', response.body)\n\n # Check user still has no rights to PUT verb even if he somehow\n # obtained a valid XSRF token.\n request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(\n 'config-property-put')\n response = self.testapp.put('/rest/config/item?%s' % urllib.urlencode(\n {'request': json.dumps(request)}), {})\n assert_equals(response.status_int, 200)\n json_dict = json.loads(response.body)\n assert json_dict['status'] == 401\n assert json_dict['message'] == 'Access denied.'\n\n def test_admin_list(self):\n \"\"\"Test delegation of admin access to another user.\"\"\"\n\n email = 'test_admin_list@google.com'\n actions.login(email)\n\n # Add environment variable override.\n os.environ['gcb_admin_user_emails'] = '[%s]' % email\n\n # Add datastore override.\n prop = config.ConfigPropertyEntity(\n key_name='gcb_config_update_interval_sec')\n prop.value = '5'\n prop.is_draft = False\n prop.put()\n\n # Check user has access now.\n response = self.testapp.get('/admin?action=settings')\n assert_equals(response.status_int, 200)\n\n # Check overrides are active and have proper management actions.\n assert_contains('gcb_admin_user_emails', response.body)\n assert_contains('[test_admin_list@google.com]', response.body)\n assert_contains(\n '/admin?action=config_override&name=gcb_admin_user_emails',\n response.body)\n assert_contains(\n '/admin?action=config_edit&name=gcb_config_update_interval_sec',\n response.body)\n\n # Check editor page has proper actions.\n response = self.testapp.get(\n '/admin?action=config_edit&name=gcb_config_update_interval_sec')\n assert_equals(response.status_int, 200)\n assert_contains('/admin?action=config_reset', response.body)\n assert_contains('name=gcb_config_update_interval_sec', response.body)\n\n # Remove override.\n del os.environ['gcb_admin_user_emails']\n\n # Check user has no access.\n response = self.testapp.get('/admin?action=settings')\n assert_equals(response.status_int, 302)\n\n def test_access_to_admin_pages(self):\n \"\"\"Test access to admin pages.\"\"\"\n\n # assert anonymous user has no access\n response = self.testapp.get('/admin?action=settings')\n assert_equals(response.status_int, 302)\n\n # assert admin user has access\n email = 'test_access_to_admin_pages@google.com'\n name = 'Test Access to Admin Pages'\n\n actions.login(email, True)\n actions.register(self, name)\n\n response = self.testapp.get('/admin')\n assert_contains('Power Searching with Google', response.body)\n assert_contains('All Courses', response.body)\n\n response = self.testapp.get('/admin?action=settings')\n assert_contains('gcb_admin_user_emails', response.body)\n assert_contains('gcb_config_update_interval_sec', response.body)\n assert_contains('All Settings', response.body)\n\n response = self.testapp.get('/admin?action=perf')\n assert_contains('gcb-admin-uptime-sec:', response.body)\n assert_contains('In-process Performance Counters', response.body)\n\n response = self.testapp.get('/admin?action=deployment')\n assert_contains('application_id: testbed-test', response.body)\n assert_contains('About the Application', response.body)\n\n actions.unregister(self)\n actions.logout()\n\n # assert not-admin user has no access\n actions.login(email)\n actions.register(self, name)\n response = self.testapp.get('/admin?action=settings')\n assert_equals(response.status_int, 302)\n\n def test_multiple_courses(self):\n \"\"\"Test courses admin page with two courses configured.\"\"\"\n\n courses = 'course:/foo:/foo-data, course:/bar:/bar-data:nsbar'\n os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME] = courses\n\n email = 'test_multiple_courses@google.com'\n\n actions.login(email, True)\n response = self.testapp.get('/admin')\n assert_contains('Course Builder > Admin > Courses', response.body)\n assert_contains('Total: 2 item(s)', response.body)\n\n # Check ocurse URL's.\n assert_contains('', response.body)\n assert_contains('', response.body)\n\n # Check content locations.\n assert_contains('/foo-data', response.body)\n assert_contains('/bar-data', response.body)\n\n # Check namespaces.\n assert_contains('gcb-course-foo-data', response.body)\n assert_contains('nsbar', response.body)\n\n\nclass CourseAuthorAspectTest(actions.TestBase):\n \"\"\"Tests the site from the Course Author perspective.\"\"\"\n\n def test_dashboard(self):\n \"\"\"Test course dashboard.\"\"\"\n\n email = 'test_dashboard@google.com'\n name = 'Test Dashboard'\n\n # Non-admin does't have access.\n actions.login(email)\n response = self.get('dashboard')\n assert_equals(response.status_int, 302)\n\n actions.register(self, name)\n assert_equals(response.status_int, 302)\n actions.logout()\n\n # Admin has access.\n actions.login(email, True)\n response = self.get('dashboard')\n assert_contains('Google > Dashboard > Outline', response.body)\n\n # Tests outline view.\n response = self.get('dashboard')\n assert_contains('Unit 3 - Advanced techniques', response.body)\n\n # Test assets view.\n response = self.get('dashboard?action=assets')\n assert_contains('Google > Dashboard > Assets', response.body)\n assert_contains('data/lesson.csv', response.body)\n assert_contains('assets/css/main.css', response.body)\n assert_contains('assets/img/Image1.5.png', response.body)\n assert_contains('assets/js/activity-3.2.js', response.body)\n\n # Test settings view.\n response = self.get('dashboard?action=settings')\n assert_contains(\n 'Google > Dashboard > Settings', response.body)\n assert_contains('course.yaml', response.body)\n assert_contains('title: \\'Power Searching with Google\\'', response.body)\n assert_contains('locale: \\'en_US\\'', response.body)\n\n # Tests student statistics view.\n response = self.get('dashboard?action=students')\n assert_contains(\n 'Google > Dashboard > Students', response.body)\n assert_contains('have not been calculated yet', response.body)\n\n compute_form = response.forms['gcb-compute-student-stats']\n response = self.submit(compute_form)\n assert_equals(response.status_int, 302)\n assert len(self.taskq.GetTasks('default')) == 1\n\n response = self.get('dashboard?action=students')\n assert_contains('is running', response.body)\n\n self.execute_all_deferred_tasks()\n\n response = self.get('dashboard?action=students')\n assert_contains('were last updated on', response.body)\n assert_contains('currently enrolled: 1', response.body)\n assert_contains('total: 1', response.body)\n\n # Tests assessment statistics.\n old_namespace = namespace_manager.get_namespace()\n namespace_manager.set_namespace(self.namespace)\n try:\n for i in range(5):\n student = models.Student(key_name='key-%s' % i)\n student.is_enrolled = True\n student.scores = json.dumps({'test-assessment': i})\n student.put()\n finally:\n namespace_manager.set_namespace(old_namespace)\n\n response = self.get('dashboard?action=students')\n compute_form = response.forms['gcb-compute-student-stats']\n response = self.submit(compute_form)\n\n self.execute_all_deferred_tasks()\n\n response = self.get('dashboard?action=students')\n assert_contains('currently enrolled: 6', response.body)\n assert_contains(\n 'test-assessment: completed 5, average score 2.0', response.body)\n\n def test_trigger_sample_announcements(self):\n \"\"\"Test course author can trigger adding sample announcements.\"\"\"\n email = 'test_announcements@google.com'\n name = 'Test Announcements'\n\n actions.login(email, True)\n actions.register(self, name)\n\n response = actions.view_announcements(self)\n assert_contains('Example Announcement', response.body)\n assert_contains('Welcome to the final class!', response.body)\n assert_does_not_contain('No announcements yet.', response.body)\n\n def test_manage_announcements(self):\n \"\"\"Test course author can manage announcements.\"\"\"\n email = 'test_announcements@google.com'\n name = 'Test Announcements'\n\n actions.login(email, True)\n actions.register(self, name)\n\n # add new\n response = actions.view_announcements(self)\n add_form = response.forms['gcb-add-announcement']\n response = self.submit(add_form)\n assert_equals(response.status_int, 302)\n\n # check added\n response = actions.view_announcements(self)\n assert_contains('Sample Announcement (Draft)', response.body)\n\n # delete draft\n response = actions.view_announcements(self)\n delete_form = response.forms['gcb-delete-announcement-1']\n response = self.submit(delete_form)\n assert_equals(response.status_int, 302)\n\n # check deleted\n assert_does_not_contain('Welcome to the final class!', response.body)\n\n def test_announcements_rest(self):\n \"\"\"Test REST access to announcements.\"\"\"\n email = 'test_announcements_rest@google.com'\n name = 'Test Announcements Rest'\n\n actions.login(email, True)\n actions.register(self, name)\n\n response = actions.view_announcements(self)\n assert_does_not_contain('My Test Title', response.body)\n\n # REST GET existing item\n items = AnnouncementEntity.all().fetch(1)\n for item in items:\n response = self.get('rest/announcements/item?key=%s' % item.key())\n json_dict = json.loads(response.body)\n assert json_dict['status'] == 200\n assert 'message' in json_dict\n assert 'payload' in json_dict\n\n payload_dict = json.loads(json_dict['payload'])\n assert 'title' in payload_dict\n assert 'date' in payload_dict\n\n # REST PUT item\n payload_dict['title'] = u'My Test Title Мой заголовок теста'\n payload_dict['date'] = '2012/12/31'\n payload_dict['is_draft'] = True\n request = {}\n request['key'] = str(item.key())\n request['payload'] = json.dumps(payload_dict)\n\n # Check XSRF is required.\n response = self.put('rest/announcements/item?%s' % urllib.urlencode(\n {'request': json.dumps(request)}), {})\n assert_equals(response.status_int, 200)\n assert_contains('\"status\": 403', response.body)\n\n # Check PUT works.\n request['xsrf_token'] = json_dict['xsrf_token']\n response = self.put('rest/announcements/item?%s' % urllib.urlencode(\n {'request': json.dumps(request)}), {})\n assert_equals(response.status_int, 200)\n assert_contains('\"status\": 200', response.body)\n\n # Confirm change is visible on the page.\n response = self.get('announcements')\n assert_contains(\n u'My Test Title Мой заголовок теста (Draft)', response.body)\n\n # REST GET not-existing item\n response = self.get('rest/announcements/item?key=not_existent_key')\n json_dict = json.loads(response.body)\n assert json_dict['status'] == 404\n\n\nclass StudentAspectTest(actions.TestBase):\n \"\"\"Test the site from the Student perspective.\"\"\"\n\n def test_view_announcements(self):\n \"\"\"Test student aspect of announcements.\"\"\"\n email = 'test_announcements@google.com'\n name = 'Test Announcements'\n\n actions.login(email)\n actions.register(self, name)\n\n # Check no announcements yet.\n response = actions.view_announcements(self)\n assert_does_not_contain('Example Announcement', response.body)\n assert_does_not_contain('Welcome to the final class!', response.body)\n assert_contains('No announcements yet.', response.body)\n actions.logout()\n\n # Login as admin and add announcements.\n actions.login('admin@sample.com', True)\n actions.register(self, 'admin')\n response = actions.view_announcements(self)\n actions.logout()\n\n # Check we can see non-draft announcements.\n actions.login(email)\n response = actions.view_announcements(self)\n assert_contains('Example Announcement', response.body)\n assert_does_not_contain('Welcome to the final class!', response.body)\n assert_does_not_contain('No announcements yet.', response.body)\n\n # Check no access to access to draft announcements via REST handler.\n items = AnnouncementEntity.all().fetch(1000)\n for item in items:\n response = self.get('rest/announcements/item?key=%s' % item.key())\n if item.is_draft:\n json_dict = json.loads(response.body)\n assert json_dict['status'] == 401\n else:\n assert_equals(response.status_int, 200)\n\n def test_registration(self):\n \"\"\"Test student registration.\"\"\"\n email = 'test_registration@example.com'\n name1 = 'Test Student'\n name2 = 'John Smith'\n name3 = u'Pavel Simakov (тест данные)'\n\n actions.login(email)\n\n actions.register(self, name1)\n actions.check_profile(self, name1)\n\n actions.change_name(self, name2)\n actions.unregister(self)\n\n actions.register(self, name3)\n actions.check_profile(self, name3)\n\n def test_registration_closed(self):\n \"\"\"Test student registration when course is full.\"\"\"\n\n email = 'test_registration_closed@example.com'\n name = 'Test Registration Closed'\n\n # Override course.yaml settings by patching app_context.\n get_environ_old = sites.ApplicationContext.get_environ\n\n def get_environ_new(self):\n environ = get_environ_old(self)\n environ['reg_form']['can_register'] = False\n return environ\n\n sites.ApplicationContext.get_environ = get_environ_new\n\n # Try to login and register.\n actions.login(email)\n try:\n actions.register(self, name)\n raise actions.ShouldHaveFailedByNow(\n 'Expected to fail: new registrations should not be allowed '\n 'when registration is closed.')\n except actions.ShouldHaveFailedByNow as e:\n raise e\n except:\n pass\n\n # Clean up app_context.\n sites.ApplicationContext.get_environ = get_environ_old\n\n def test_permissions(self):\n \"\"\"Test student permissions, and which pages they can view.\"\"\"\n email = 'test_permissions@example.com'\n name = 'Test Permissions'\n\n actions.login(email)\n\n actions.register(self, name)\n actions.Permissions.assert_enrolled(self)\n\n actions.unregister(self)\n actions.Permissions.assert_unenrolled(self)\n\n actions.register(self, name)\n actions.Permissions.assert_enrolled(self)\n\n def test_login_and_logout(self):\n \"\"\"Test if login and logout behave as expected.\"\"\"\n email = 'test_login_logout@example.com'\n\n actions.Permissions.assert_logged_out(self)\n actions.login(email)\n\n actions.Permissions.assert_unenrolled(self)\n\n actions.logout()\n actions.Permissions.assert_logged_out(self)\n\n def test_lesson_activity_navigation(self):\n \"\"\"Test navigation between lesson/activity pages.\"\"\"\n\n email = 'test_lesson_activity_navigation@example.com'\n name = 'Test Lesson Activity Navigation'\n\n actions.login(email)\n actions.register(self, name)\n\n response = self.get('unit?unit=1&lesson=1')\n assert_does_not_contain('Previous Page', response.body)\n assert_contains('Next Page', response.body)\n\n response = self.get('unit?unit=2&lesson=3')\n assert_contains('Previous Page', response.body)\n assert_contains('Next Page', response.body)\n\n response = self.get('unit?unit=3&lesson=5')\n assert_contains('Previous Page', response.body)\n assert_does_not_contain('Next Page', response.body)\n assert_contains('End', response.body)\n\n def test_attempt_activity_event(self):\n \"\"\"Test activity attempt generates event.\"\"\"\n\n email = 'test_attempt_activity_event@example.com'\n name = 'Test Attempt Activity Event'\n\n actions.login(email)\n actions.register(self, name)\n\n # Enable event recording.\n config.Registry.db_overrides[\n lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True\n\n # Prepare event.\n request = {}\n request['source'] = 'test-source'\n request['payload'] = json.dumps({'Alice': u'Bob (тест данные)'})\n\n # Check XSRF token is required.\n response = self.post('rest/events?%s' % urllib.urlencode(\n {'request': json.dumps(request)}), {})\n assert_equals(response.status_int, 200)\n assert_contains('\"status\": 403', response.body)\n\n # Check PUT works.\n request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(\n 'event-post')\n response = self.post('rest/events?%s' % urllib.urlencode(\n {'request': json.dumps(request)}), {})\n assert_equals(response.status_int, 200)\n assert not response.body\n\n # Check event is properly recorded.\n old_namespace = namespace_manager.get_namespace()\n namespace_manager.set_namespace(self.namespace)\n try:\n events = models.EventEntity.all().fetch(1000)\n assert 1 == len(events)\n assert_contains(\n u'Bob (тест данные)', json.loads(events[0].data)['Alice'])\n finally:\n namespace_manager.set_namespace(old_namespace)\n\n # Clean up.\n config.Registry.db_overrides = {}\n\n def test_two_students_dont_see_each_other_pages(self):\n \"\"\"Test a user can't see another user pages.\"\"\"\n email1 = 'user1@foo.com'\n name1 = 'User 1'\n email2 = 'user2@foo.com'\n name2 = 'User 2'\n\n # Login as one user and view 'unit' and other pages, which are not\n # cached.\n actions.login(email1)\n actions.register(self, name1)\n actions.Permissions.assert_enrolled(self)\n response = actions.view_unit(self)\n assert_contains(email1, response.body)\n actions.logout()\n\n # Login as another user and check that 'unit' and other pages show\n # the correct new email.\n actions.login(email2)\n actions.register(self, name2)\n actions.Permissions.assert_enrolled(self)\n response = actions.view_unit(self)\n assert_contains(email2, response.body)\n actions.logout()\n\n def test_xsrf_defence(self):\n \"\"\"Test defense against XSRF attack.\"\"\"\n\n email = 'test_xsrf_defence@example.com'\n name = 'Test Xsrf Defence'\n\n actions.login(email)\n actions.register(self, name)\n\n response = self.get('student/home')\n response.form.set('name', 'My New Name')\n response.form.set('xsrf_token', 'bad token')\n\n response = response.form.submit(expect_errors=True)\n assert_equals(response.status_int, 403)\n\n\nclass StaticHandlerTest(actions.TestBase):\n \"\"\"Check serving of static resources.\"\"\"\n\n def test_static_files_cache_control(self):\n \"\"\"Test static/zip handlers use proper Cache-Control headers.\"\"\"\n\n # Check static handler.\n response = self.get('/assets/css/main.css')\n assert_equals(response.status_int, 200)\n assert_contains('max-age=600', response.headers['Cache-Control'])\n assert_contains('public', response.headers['Cache-Control'])\n assert_does_not_contain('no-cache', response.headers['Cache-Control'])\n\n # Check zip file handler.\n response = self.get(\n '/static/inputex-3.1.0/src/inputex/assets/skins/sam/inputex.css')\n assert_equals(response.status_int, 200)\n assert_contains('max-age=600', response.headers['Cache-Control'])\n assert_contains('public', response.headers['Cache-Control'])\n assert_does_not_contain('no-cache', response.headers['Cache-Control'])\n\n\nclass AssessmentTest(actions.TestBase):\n \"\"\"Test for assessments.\"\"\"\n\n def submit_assessment(self, name, args):\n \"\"\"Test student taking an assessment.\"\"\"\n\n response = self.get('assessment?name=%s' % name)\n assert_contains(\n '' % name,\n response.body)\n\n # Extract XSRF token from the page.\n match = re.search(r'assessmentXsrfToken = [\\']([^\\']+)', response.body)\n assert match\n xsrf_token = match.group(1)\n args['xsrf_token'] = xsrf_token\n\n response = self.post('answer', args)\n assert_equals(response.status_int, 200)\n return response\n\n def test_course_pass(self):\n \"\"\"Test student passing final exam.\"\"\"\n email = 'test_pass@google.com'\n name = 'Test Pass'\n\n post = {'assessment_type': 'postcourse', 'score': '100.00'}\n\n # Register.\n actions.login(email)\n actions.register(self, name)\n\n # Submit answer.\n response = self.submit_assessment('Post', post)\n assert_equals(response.status_int, 200)\n assert_contains('Your score is 70%', response.body)\n assert_contains('you have passed the course', response.body)\n\n # Check that the result shows up on the profile page.\n response = actions.check_profile(self, name)\n assert_contains('70', response.body)\n assert_contains('100', response.body)\n\n def test_assessments(self):\n \"\"\"Test assessment scores are properly submitted and summarized.\"\"\"\n email = 'test_assessments@google.com'\n name = 'Test Assessments'\n\n pre_answers = [{'foo': 'bar'}, {'Alice': u'Bob (тест данные)'}]\n pre = {\n 'assessment_type': 'precourse', 'score': '1.00',\n 'answers': json.dumps(pre_answers)}\n mid = {'assessment_type': 'midcourse', 'score': '2.00'}\n post = {'assessment_type': 'postcourse', 'score': '3.00'}\n second_mid = {'assessment_type': 'midcourse', 'score': '1.00'}\n second_post = {'assessment_type': 'postcourse', 'score': '100000'}\n\n # Register.\n actions.login(email)\n actions.register(self, name)\n\n old_namespace = namespace_manager.get_namespace()\n namespace_manager.set_namespace(self.namespace)\n try:\n # Check that no scores exist right now.\n student = models.Student.get_enrolled_student_by_email(email)\n assert len(get_all_scores(student)) == 0 # pylint: disable=C6411\n\n # Submit assessments and check the numbers of scores recorded.\n self.submit_assessment('Pre', pre)\n student = models.Student.get_enrolled_student_by_email(email)\n assert len(get_all_scores(student)) == 1\n\n self.submit_assessment('Mid', mid)\n student = models.Student.get_enrolled_student_by_email(email)\n assert len(get_all_scores(student)) == 2\n\n self.submit_assessment('Post', post)\n student = models.Student.get_enrolled_student_by_email(email)\n\n # Check final score also includes overall_score.\n assert len(get_all_scores(student)) == 4\n\n # Check assessment answers.\n answers = json.loads(\n models.StudentAnswersEntity.get_by_key_name(\n student.user_id).data)\n assert pre_answers == answers['precourse']\n\n # pylint: disable-msg=g-explicit-bool-comparison\n assert [] == answers['midcourse']\n assert [] == answers['postcourse']\n # pylint: enable-msg=g-explicit-bool-comparison\n\n # Check that scores are recorded properly.\n student = models.Student.get_enrolled_student_by_email(email)\n assert int(get_score(student, 'precourse')) == 1\n assert int(get_score(student, 'midcourse')) == 2\n assert int(get_score(student, 'postcourse')) == 3\n assert (int(get_score(student, 'overall_score')) ==\n int((0.30 * 2) + (0.70 * 3)))\n\n # Try posting a new midcourse exam with a lower score;\n # nothing should change.\n self.submit_assessment('Mid', second_mid)\n student = models.Student.get_enrolled_student_by_email(email)\n assert int(get_score(student, 'precourse')) == 1\n assert int(get_score(student, 'midcourse')) == 2\n assert int(get_score(student, 'postcourse')) == 3\n assert (int(get_score(student, 'overall_score')) ==\n int((0.30 * 2) + (0.70 * 3)))\n\n # Now try posting a postcourse exam with a higher score and note\n # the changes.\n self.submit_assessment('Post', second_post)\n student = models.Student.get_enrolled_student_by_email(email)\n assert int(get_score(student, 'precourse')) == 1\n assert int(get_score(student, 'midcourse')) == 2\n assert int(get_score(student, 'postcourse')) == 100000\n assert (int(get_score(student, 'overall_score')) ==\n int((0.30 * 2) + (0.70 * 100000)))\n finally:\n namespace_manager.set_namespace(old_namespace)\n\n\n# TODO(psimakov): if mixin method names overlap, we don't run them all; must fix\nclass CourseUrlRewritingTest(\n StudentAspectTest, AssessmentTest, CourseAuthorAspectTest, AdminAspectTest):\n \"\"\"Run existing tests using rewrite rules for '/courses/pswg' base URL.\"\"\"\n\n def setUp(self): # pylint: disable-msg=g-bad-name\n super(CourseUrlRewritingTest, self).setUp()\n\n self.base = '/courses/pswg'\n self.namespace = 'gcb-courses-pswg-tests-ns'\n\n courses = 'course:%s:/:%s' % (self.base, self.namespace)\n os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME] = courses\n\n def tearDown(self): # pylint: disable-msg=g-bad-name\n del os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME]\n\n super(CourseUrlRewritingTest, self).tearDown()\n\n def canonicalize(self, href, response=None):\n \"\"\"Canonicalize URL's using either or self.base.\"\"\"\n # Check if already canonicalized.\n if href.startswith(\n self.base) or utils.ApplicationHandler.is_absolute(href):\n pass\n else:\n # Look for tag in the response to compute the canonical URL.\n if response:\n return super(CourseUrlRewritingTest, self).canonicalize(\n href, response)\n\n # Prepend self.base to compute the canonical URL.\n if not href.startswith('/'):\n href = '/%s' % href\n href = '%s%s' % (self.base, href)\n\n self.audit_url(href)\n return href\n\n\ndef remove_dir(dir_name):\n \"\"\"Delete a directory.\"\"\"\n\n logging.info('removing folder: %s', dir_name)\n if os.path.exists(dir_name):\n shutil.rmtree(dir_name)\n if os.path.exists(dir_name):\n raise Exception('Failed to delete directory: %s' % dir_name)\n\n\ndef clean_dir(dir_name):\n \"\"\"Clean a directory.\"\"\"\n\n remove_dir(dir_name)\n\n logging.info('creating folder: %s', dir_name)\n os.makedirs(dir_name)\n if not os.path.exists(dir_name):\n raise Exception('Failed to create directory: %s' % dir_name)\n\n\ndef clone_canonical_course_data(src, dst):\n \"\"\"Makes a copy of canonical course content.\"\"\"\n clean_dir(dst)\n\n def copytree(name):\n shutil.copytree(\n os.path.join(src, name),\n os.path.join(dst, name))\n\n copytree('assets')\n copytree('data')\n copytree('views')\n\n shutil.copy(\n os.path.join(src, 'course.yaml'),\n os.path.join(dst, 'course.yaml'))\n\n # Make all files writable.\n for root, unused_dirs, files in os.walk(dst):\n for afile in files:\n fname = os.path.join(root, afile)\n os.chmod(fname, 0o777)\n\n\nclass GeneratedCourse(object):\n \"\"\"A helper class for a dynamically generated course content.\"\"\"\n\n @classmethod\n def set_data_home(cls, test):\n \"\"\"All data for this test will be placed here.\"\"\"\n cls.data_home = '/tmp/experimental/coursebuilder/test-data/%s' % (\n test.__class__.__name__)\n\n def __init__(self, ns):\n self.path = ns\n\n @property\n def namespace(self):\n return 'ns%s' % self.path\n\n @property\n def title(self):\n return u'Power title-%s Searching with Google (тест данные)' % self.path\n\n @property\n def unit_title(self):\n return u'Interpreting unit-title-%s results (тест данные)' % self.path\n\n @property\n def lesson_title(self):\n return u'Word lesson-title-%s order matters (тест данные)' % self.path\n\n @property\n def head(self):\n return '' % self.path\n\n @property\n def css(self):\n return '' % self.path\n\n @property\n def home(self):\n return os.path.join(self.data_home, 'data-%s' % self.path)\n\n @property\n def email(self):\n return 'walk_the_course_named_%s@google.com' % self.path\n\n @property\n def name(self):\n return 'Walk The Course Named %s' % self.path\n\n\nclass MultipleCoursesTestBase(actions.TestBase):\n \"\"\"Configures several courses for running concurrently.\"\"\"\n\n def modify_file(self, filename, find, replace):\n \"\"\"Read, modify and write back the file.\"\"\"\n\n text = open(filename, 'r').read().decode('utf-8')\n\n # Make sure target text is not in the file.\n assert not replace in text\n text = text.replace(find, replace)\n assert replace in text\n\n open(filename, 'w').write(text.encode('utf-8'))\n\n def modify_canonical_course_data(self, course):\n \"\"\"Modify canonical content by adding unique bits to it.\"\"\"\n\n self.modify_file(\n os.path.join(course.home, 'course.yaml'),\n 'title: \\'Power Searching with Google\\'',\n 'title: \\'%s\\'' % course.title)\n\n self.modify_file(\n os.path.join(course.home, 'data/unit.csv'),\n ',Interpreting results,',\n ',%s,' % course.unit_title)\n\n self.modify_file(\n os.path.join(course.home, 'data/lesson.csv'),\n ',Word order matters,',\n ',%s,' % course.lesson_title)\n\n self.modify_file(\n os.path.join(course.home, 'data/lesson.csv'),\n ',Interpreting results,',\n ',%s,' % course.unit_title)\n\n self.modify_file(\n os.path.join(course.home, 'views/base.html'),\n '',\n '\\n%s' % course.head)\n\n self.modify_file(\n os.path.join(course.home, 'assets/css/main.css'),\n 'html {',\n '%s\\nhtml {' % course.css)\n\n def prepare_course_data(self, course):\n \"\"\"Create unique course content for a course.\"\"\"\n\n clone_canonical_course_data(self.bundle_root, course.home)\n self.modify_canonical_course_data(course)\n\n def setUp(self): # pylint: disable-msg=g-bad-name\n \"\"\"Configure the test.\"\"\"\n\n super(MultipleCoursesTestBase, self).setUp()\n\n GeneratedCourse.set_data_home(self)\n\n self.course_a = GeneratedCourse('a')\n self.course_b = GeneratedCourse('b')\n self.course_ru = GeneratedCourse('ru')\n\n # Override BUNDLE_ROOT.\n self.bundle_root = appengine_config.BUNDLE_ROOT\n appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home\n\n # Prepare course content.\n clean_dir(GeneratedCourse.data_home)\n self.prepare_course_data(self.course_a)\n self.prepare_course_data(self.course_b)\n self.prepare_course_data(self.course_ru)\n\n # Setup one course for I18N.\n self.modify_file(\n os.path.join(self.course_ru.home, 'course.yaml'),\n 'locale: \\'en_US\\'',\n 'locale: \\'ru_RU\\'')\n\n # Configure courses.\n courses = '%s, %s, %s' % (\n 'course:/courses/a:/data-a:nsa',\n 'course:/courses/b:/data-b:nsb',\n 'course:/courses/ru:/data-ru:nsru')\n os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME] = courses\n\n def tearDown(self): # pylint: disable-msg=g-bad-name\n \"\"\"Clean up.\"\"\"\n\n del os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME]\n appengine_config.BUNDLE_ROOT = self.bundle_root\n super(MultipleCoursesTestBase, self).tearDown()\n\n def walk_the_course(\n self, course, first_time=True, is_admin=False, logout=True):\n \"\"\"Visit a course as a Student would.\"\"\"\n\n # Check normal user has no access.\n actions.login(course.email, is_admin)\n\n # Test schedule.\n if first_time:\n response = self.testapp.get('/courses/%s/preview' % course.path)\n else:\n response = self.testapp.get('/courses/%s/course' % course.path)\n assert_contains(course.title, response.body)\n assert_contains(course.unit_title, response.body)\n assert_contains(course.head, response.body)\n\n # Tests static resource.\n response = self.testapp.get(\n '/courses/%s/assets/css/main.css' % course.path)\n assert_contains(course.css, response.body)\n\n if first_time:\n # Test registration.\n response = self.get('/courses/%s/register' % course.path)\n assert_contains(course.title, response.body)\n assert_contains(course.head, response.body)\n response.form.set('form01', course.name)\n response.form.action = '/courses/%s/register' % course.path\n response = self.submit(response.form)\n\n assert_contains(course.title, response.body)\n assert_contains(course.head, response.body)\n assert_contains(course.title, response.body)\n assert_contains(\n '//groups.google.com/group/My-Course-Announce', response.body)\n assert_contains(\n '//groups.google.com/group/My-Course', response.body)\n\n # Check lesson page.\n response = self.testapp.get(\n '/courses/%s/unit?unit=1&lesson=5' % course.path)\n assert_contains(course.title, response.body)\n assert_contains(course.lesson_title, response.body)\n assert_contains(course.head, response.body)\n\n if logout:\n actions.logout()\n\n\nclass MultipleCoursesTest(MultipleCoursesTestBase):\n \"\"\"Test several courses running concurrently.\"\"\"\n\n def test_courses_are_isolated(self):\n \"\"\"Test each course serves its own assets, views and data.\"\"\"\n\n # Pretend students visit courses.\n self.walk_the_course(self.course_a)\n self.walk_the_course(self.course_b)\n self.walk_the_course(self.course_a, False)\n self.walk_the_course(self.course_b, False)\n\n # Check course namespaced data.\n self.validate_course_data(self.course_a)\n self.validate_course_data(self.course_b)\n\n # Check default namespace.\n assert (\n namespace_manager.get_namespace() ==\n appengine_config.DEFAULT_NAMESPACE_NAME)\n\n assert not models.Student.all().fetch(1000)\n\n def validate_course_data(self, course):\n \"\"\"Check course data is valid.\"\"\"\n\n old_namespace = namespace_manager.get_namespace()\n namespace_manager.set_namespace(course.namespace)\n try:\n students = models.Student.all().fetch(1000)\n assert len(students) == 1\n for student in students:\n assert_equals(course.email, student.key().name())\n assert_equals(course.name, student.name)\n finally:\n namespace_manager.set_namespace(old_namespace)\n\n\nclass I18NTest(MultipleCoursesTestBase):\n \"\"\"Test courses running in different locales and containing I18N content.\"\"\"\n\n def test_csv_supports_utf8(self):\n \"\"\"Test UTF-8 content in CSV file is handled correctly.\"\"\"\n\n title_ru = u'Найди факты быстрее'\n\n csv_file = os.path.join(self.course_ru.home, 'data/unit.csv')\n self.modify_file(\n csv_file, ',Find facts faster,', ',%s,' % title_ru)\n self.modify_file(\n os.path.join(self.course_ru.home, 'data/lesson.csv'),\n ',Find facts faster,', ',%s,' % title_ru)\n\n rows = []\n for row in csv.reader(open(csv_file)):\n rows.append(row)\n assert title_ru == rows[6][3].decode('utf-8')\n\n response = self.get('/courses/%s/preview' % self.course_ru.path)\n assert_contains(title_ru, response.body)\n\n # Tests student perspective.\n self.walk_the_course(self.course_ru, first_time=True)\n self.walk_the_course(self.course_ru, first_time=False)\n\n # Test course author dashboard.\n self.walk_the_course(\n self.course_ru, first_time=False, is_admin=True, logout=False)\n\n def assert_page_contains(page_name, text_array):\n dashboard_url = '/courses/%s/dashboard' % self.course_ru.path\n response = self.get('%s?action=%s' % (dashboard_url, page_name))\n for text in text_array:\n assert_contains(text, response.body)\n\n assert_page_contains('', [\n title_ru, self.course_ru.unit_title, self.course_ru.lesson_title])\n assert_page_contains(\n 'assets', [self.course_ru.title, self.course_ru.home])\n assert_page_contains(\n 'settings', [self.course_ru.title, self.course_ru.home])\n\n # Clean up.\n actions.logout()\n\n def test_i18n(self):\n \"\"\"Test course is properly internationalized.\"\"\"\n response = self.get('/courses/%s/preview' % self.course_ru.path)\n assert_contains_all_of(\n [u'Вход', u'Регистрация', u'Расписание', u'Курс'], response.body)\n\n\nclass VirtualFileSystemTest(\n StudentAspectTest, AssessmentTest, CourseAuthorAspectTest,\n StaticHandlerTest):\n \"\"\"Run existing tests using virtual local file system.\"\"\"\n\n def setUp(self): # pylint: disable-msg=g-bad-name\n \"\"\"Configure the test.\"\"\"\n\n super(VirtualFileSystemTest, self).setUp()\n\n GeneratedCourse.set_data_home(self)\n\n # Override BUNDLE_ROOT.\n self.bundle_root = appengine_config.BUNDLE_ROOT\n appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home\n\n # Prepare course content.\n home_folder = os.path.join(GeneratedCourse.data_home, 'data-v')\n clone_canonical_course_data(self.bundle_root, home_folder)\n\n # Configure course.\n self.namespace = 'nsv'\n os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME] = (\n 'course:/:/data-vfs:%s' % self.namespace)\n\n # Modify app_context filesystem to map /data-v to /data-vfs.\n def after_create(unused_cls, instance):\n # pylint: disable-msg=protected-access\n instance._fs = vfs.LocalReadOnlyFileSystem(\n os.path.join(GeneratedCourse.data_home, 'data-vfs'),\n home_folder)\n\n sites.ApplicationContext.after_create = after_create\n\n def tearDown(self): # pylint: disable-msg=g-bad-name\n \"\"\"Clean up.\"\"\"\n\n del os.environ[sites.GCB_COURSES_CONFIG_ENV_VAR_NAME]\n appengine_config.BUNDLE_ROOT = self.bundle_root\n super(VirtualFileSystemTest, self).tearDown()\n","sub_path":"tests/functional/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":49000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"641171373","text":"import tweepy\nfrom tweepy.streaming import StreamListener\nfrom tweepy import Stream\nimport pandas\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport time\nfrom Listning import listener\nimport io\nimport json\n\n\nclass IntegrateTP:\n\n def __init__(self):\n\n my_consumer_key = 'X2a7K6yn6u1tj1mqHWf8polXn'\n my_consumer_secret = 'WMKOUbcYFaay7Jl6Cir9CWmFAWmPNJufKkh0UpoKVrNWVdJbhY'\n\n self.auth = tweepy.OAuthHandler(consumer_key=my_consumer_key, consumer_secret=my_consumer_secret)\n self.auth.set_access_token('2789964445-DIRIzS2esF7jtXEbH77fgdEti8VeRFQbVIZL3IT',\n 'FONFNATobd9Vp2p1rEQR9TDGsfuQXpufK6EwhX3PlUwb0')\n\n # self.api = tweepy.API(self.auth)\n self.results = []\n self.data_set = pandas.DataFrame()\n\n # start_time = time.time()\n # keyword_list = ['cat']\n # l = listener(start_time, time_limit=20)\n # self.twitter_stream = Stream(self.auth, l)\n # self.twitter_stream.filter(track=keyword_list, languages=['en'])\n\n def get_tweets(self):\n tweets_data = []\n tweets_file = open('raw_tweets.txt', \"r\")\n for line in tweets_file:\n try:\n print(line)\n self.results.append(line)\n except:\n print('didnt work')\n continue\n print(len(self.results))\n trump = 'Trump2016'\n sanders = 'Sanders2016'\n clinton = 'Clinton'\n # for tweet in tweepy.Cursor(self.twitter_stream.search, q=clinton).items(50):\n # self.results.append(tweet)\n # print(len(self.results))\n\n def tweets_to_d_frame(self, tweets):\n\n self.data_set['tweetID'] = [tweet.id for tweet in tweets]\n self.data_set['tweetText'] = [tweet.text for tweet in tweets]\n self.data_set['tweetRetweetCt'] = [tweet.retweet_count for tweet in tweets]\n self.data_set['tweetFavoriteCt'] = [tweet.favorite_count for tweet in tweets]\n self.data_set['tweetSource'] = [tweet.source for tweet in tweets]\n self.data_set['tweetCreated'] = [tweet.created_at for tweet in tweets]\n self.data_set['userID'] = [tweet.user.id for tweet in tweets]\n self.data_set['userScreen'] = [tweet.user.screen_name for tweet in tweets]\n self.data_set['userName'] = [tweet.user.name for tweet in tweets]\n self.data_set['userCreateDt'] = [tweet.user.created_at for tweet in tweets]\n self.data_set['userDesc'] = [tweet.user.description for tweet in tweets]\n self.data_set['userFollowerCt'] = [tweet.user.followers_count for tweet in tweets]\n self.data_set['userFriendsCt'] = [tweet.user.friends_count for tweet in tweets]\n self.data_set['userLocation'] = [tweet.user.location for tweet in tweets]\n self.data_set['userTimezone'] = [tweet.user.time_zone for tweet in tweets]\n self.data_set = self.data_set[self.data_set.userLocation.notnull()]\n print(len(self.data_set))\n\n return self.data_set\n\n def graph_it(self):\n\n ti = self.data_set\n tzs = self.data_set['userLocation'].value_counts()[:5]\n print(tzs)\n\n\n # data = [\n # go.Bar(\n # x=['giraffes', 'orangutans', 'monkeys'],\n # y=[20, 14, 23]\n # )\n # ]\n # plot_url = py.plot(data, filename='bernie-bar')\n\nin_one = IntegrateTP()\nin_one.get_tweets()\n\nin_one.tweets_to_d_frame(in_one.results)\n\n# in_one.graph_it()\n\n\n\n\n","sub_path":"MineTest.py","file_name":"MineTest.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"228698253","text":"#\n# Author: Henrique Pereira Coutada Miranda\n# Run a GW+BSE calculation using Yambo\n#\nfrom __future__ import print_function\nfrom yambopy import *\nfrom qepy import *\n\nyambo = 'yambo'\n\nif not os.path.isdir('database'):\n os.mkdir('database')\n\n#check if the nscf cycle is present\nif os.path.isdir('nscf/si.save'):\n print('nscf calculation found!')\nelse:\n print('nscf calculation not found!')\n exit()\n\n#check if the SAVE folder is present\nif not os.path.isdir('database/SAVE'):\n print('preparing yambo database')\n os.system('cd nscf/si.save; p2y')\n os.system('cd nscf/si.save; yambo')\n os.system('mv nscf/si.save/SAVE database')\n\nif not os.path.isdir('gw_bse'):\n os.mkdir('gw_bse')\n os.system('cp -r database/SAVE gw_bse')\n\n#create the gw yambo input file\ny = YamboIn('%s -d -p c -g n -V all'%yambo,folder='gw_bse')\nQPKrange,_ = y['QPkrange']\ny['QPkrange'] = [QPKrange[:2]+[4,5],'']\ny['FFTGvecs'] = [15,'Ry']\ny['NGsBlkXs'] = [1,'Ry']\ny['BndsRnXs'] = [[1,30],'']\ny.arguments.append('WFbuffIO')\ny.write('gw_bse/yambo_run.in')\n\nprint('running gw')\nos.system('cd gw_bse; %s -F yambo_run.in -J yambo'%yambo)\n\n#creathe the bse input file\ny = YamboIn('%s -b -o b -k sex -y d -V all'%yambo,folder='gw_bse')\ny['FFTGvecs'] = [15,'Ry']\ny['NGsBlkXs'] = [1,'Ry']\ny['BndsRnXs'] = [[1,30],'']\ny['BSEBands'] = [[4,5]]\ny['KfnQPdb'] = 'E < yambo/ndb.QP'\ny.write('gw_bse/yambo_run.in')\n\n#run the bse calculation using the dielectric function from gw\nprint('running bse')\nos.system('cd gw_bse; %s -F yambo_run.in -J yambo'%yambo)\n","sub_path":"tutorial/si/gw_bse_si.py","file_name":"gw_bse_si.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"398705050","text":"\"\"\"\nQuiz 5 code file\nName: Kyaw Za Zaw\nCourse: COMP123-04\nProfessor: Lauren Milne\n\"\"\"\n\nfrom imageTools import *\n\n\n# -----------------------------------------------------\n# Question 1\n\n\ndef changeToRed(pic):\n \"\"\"takes a picture object at input. It should modify pixels\n where the amount of color in each of the three channels is less than 100 i.e. (r < 100 and g < 100 and b < 100) and change them to red\"\"\"\n for (x, y) in pic:\n r, g, b = pic.getColor(x, y)\n if r < 100 and g < 100 and b < 100:\n pic.setColor(x, y, (255, 0, 0))\n\n\n# -----------------------------------------------------\n# Question 2\n\n\ndef addVerticalLines(pic):\n \"\"\"takes a picture object as input. It should\n modify the image by changing each pixel of every 10th column to black\"\"\"\n for (x, y) in pic:\n if x % 10 == 0:\n pic.setColor(x, y, (0, 0, 0))\n\n\nif __name__ == '__main__':\n # pic1 = Picture(\"jellyfish.jpg\")\n # changeToRed(pic1)\n # pic1.show()\n\n # pic2 = Picture(\"theCulprit.jpg\")\n # addVerticalLines(pic2)\n # pic2.show()\n\n n = input(\"Press enter to continue...\")\n","sub_path":"quiz5/quiz5ACode.py","file_name":"quiz5ACode.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"632677349","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nimport time\n#from AnimatedGif import *\nimport AvgPosGen as avg\nfrom _thread import start_new_thread\nfrom multiprocessing import Process\nimport threading\nfrom tkinter import messagebox as mb\navg.logOpen()\nglobalFlag = False\ntoStart = False\n\ndef quitapp():\n quit()\n\ndef ShowDialog(title, text):\n t=threading.Thread(target=mb.showerror, args=(title, text))\n t.start()\n\ndef gifStart():\n global globalFlag\n imagelist=[]\n for i in range (1,76):\n imagelist.append('Images/drone ('+str(i)+').gif')\n # extract width and height info\n photo = ImageTk.PhotoImage(file=imagelist[0])\n width = photo.width()\n height = photo.height()\n canvas = tk.Canvas(width=width, height=height,highlightthickness=0)\n canvas.place(x=200, y=160)\n canvas.configure(background='white')\n # create a list of image objects\n giflist = []\n for imagefile in imagelist:\n photo = ImageTk.PhotoImage(file=imagefile)\n giflist.append(photo)\n # loop through the gif image objects for a while\n\n while globalFlag == True:\n for gif in giflist:\n canvas.delete(ALL)\n canvas.create_image(width/2.0, height/2.0, image=gif)\n canvas.update()\n time.sleep(0.04)\n## ##print (\"cycle\")\n canvas.place_forget()\n return None\n\ndef startPos(opt,value,COM):\n global globalFlag\n globalFlag=True\n pross=avg.start_pos(opt,value,COM)\n## pross=True\n if pross==None:\n app.show_frame(Stopped)\n else:\n app.show_frame(Done)\n globalFlag=False\n \n \n\ndef background_init(frame):\n load = Image.open(\"Images/background.png\")\n render = ImageTk.PhotoImage(load)\n\n # labels can be text or images\n img = tk.Label(frame, image=render)\n img.image = render\n img.place(x=0, y=0)\n\ndef set_btn_bg(btn,path):\n load = Image.open(path)\n render = ImageTk.PhotoImage(load)\n btn.config(image=render)\n btn.image = render\n return btn\n\nclass AvgPos(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n container = tk.Frame(self)\n\n container.pack(side=\"top\", fill=\"both\", expand=True)\n\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n #Chief icon:\n self.iconbitmap('Images/chief.ico')\n\n #Window label:\n self.title(\"Average Position Generator\")\n\n self.frames = {}\n\n for F in (StartPage, inProgress, Stopped, Done):\n frame = F(container, self)\n\n self.frames[F] = frame\n\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(StartPage)\n\n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, parent, controller):\n\n def onClick_start ():\n global toStart\n global globalFlag\n opt=getV()\n ##print(opt)\n ##print(txtBox.get())\n COM=txtBox2.get()\n try:\n value = int(txtBox.get())\n if value>0:\n controller.show_frame(inProgress)\n startPos(opt,value,COM)\n else:\n avg.logWrite(\" [ERROR]: Unaccepted value - Negative values\\n\")\n #droneAnimation(False)\n globalFlag=False\n controller.show_frame(Stopped)\n ShowDialog(\"Error\",'Unaccepted value - Negative values')\n except Exception:\n avg.logWrite(\" [ERROR]: Unaccepted value - string or float has passed\\n\")\n globalFlag=False\n controller.show_frame(Stopped)\n ShowDialog(\"Error\",'Unaccepted value - Enter numbers only,\\nPlease follow instructions above!')\n\n tk.Frame.__init__(self, parent)\n\n background_init(self)\n\n #instruction label:\n label = tk.Label(self, text=\"*Make sure that you have a static IP\\n\"\n \"*Make sure the mast DEBUG cable is connected\\n\",\n justify=\"left\")\n label.config(bg=\"white\")\n label.place(x=180 , y=70)\n\n # radio buttons label\n radioLabel=tk.Label(self, text=\"Choose one option:\\n\")\n radioLabel.config(bg=\"white\",font = \"Arial 12 bold underline\")\n radioLabel.place(x=220 , y=120)\n\n # radio buttons setup:\n timeChoises = [\n (\"Seconds\\n(40-400)\", 3),\n (\"Minutes\\n(1-60)\", 2),\n (\"Hours\\n(0.1-48)\", 1),\n ]\n v = tk.IntVar()\n v.set(1)\n def getV():\n return v.get()\n\n for txt, val in timeChoises:\n radio=tk.Radiobutton(self,\n text=txt,\n padx=20,\n variable=v,\n value=val,\n command=getV)\n x = int(val * 100)+50\n radio.config(bg=\"white\")\n radio.place(x=x, y=150)\n\n # text box\n value=tk.StringVar()\n value.set(\"\")\n\n txtLable=tk.Label(self, text=\"Time:\")\n txtLable.config(font=\"Arial 9 bold underline\", bg=\"white\")\n txtLable.place(x=220, y=255)\n txtBox=tk.Entry(self)\n txtBox.place(x=270, y=255)\n\n txtLable2=tk.Label(self, text=\"COM Number:\")\n txtLable2.config(font=\"Arial 9 bold underline\", bg=\"white\")\n txtLable2.place(x=178, y=230)\n txtBox2=tk.Entry(self)\n txtBox2.insert(END, '/dev/ttyr03')\n txtBox2.place(x=270, y=230)\n\n\n # start button\n button = ttk.Button(self, text=\"\",\n command=onClick_start)\n button = set_btn_bg(button, \"Images/start_btn.png\")\n button.place(x=200,y=320)\n\n # quit button\n button2 = ttk.Button(self, text=\"\",\n command=quitapp)\n button2=set_btn_bg(button2,\"Images/quit_btn.png\")\n button2.place(x=310 , y=320)\n\n\nclass inProgress(tk.Frame):\n \n def __init__(self, parent, controller):\n global toStart\n tk.Frame.__init__(self, parent)\n\n background_init(self)\n\n t=threading.Thread(target=gifStart)\n t.start()\n\n # in progress label:\n label = tk.Label(self, text=\"IN PROGRESS ...\\n\")\n label.config(bg=\"white\", fg = \"red\",font = \"Times 12 bold\")\n label.place(x=225, y=95)\n\n label = tk.Label(self, text=\"*DO NOT disconnect the DEBUG cable\\n\"\n \"*DO NOT turn of the computer\\n\",\n justify=\"left\")\n label.config(bg=\"white\")\n label.place(x=200, y=120)\n\n # back button\n button = ttk.Button(self, text=\"back\",\n command=lambda: controller.show_frame(Stopped))\n button = set_btn_bg(button, \"Images/back_btn.png\")\n button.place(x=200,y=320)\n\n # quit button\n button2 = ttk.Button(self, text=\"Quit\",\n command=quitapp)\n button2 = set_btn_bg(button2, \"Images/quit_btn.png\")\n button2.place(x=310 , y=320)\n \n\n\n \n\n\nclass Stopped(tk.Frame):\n def __init__(self, parent, controller):\n\n \n tk.Frame.__init__(self, parent)\n\n background_init(self)\n\n # \"you hav stoped\" label:\n label = tk.Label(self, text=\"The process has stopped\\n\"\n \"You may close the window, reopen it or press \\\"try again\\\"\\n\"\n \"Make sure you enter the right values on your next try...\\n\\n\"\n \"DO NOT CALL US!!!\")\n label.config(bg=\"white\", fg=\"red\", font=\"Times 12 bold\")\n label.place(x=120, y=95)\n\n # start again button\n button1 = ttk.Button(self,\n command=lambda: controller.show_frame(StartPage))\n button1 = set_btn_bg(button1, \"Images/start_again_btn.png\")\n button1.place(x=200,y=320)\n\n # quit button\n button2 = ttk.Button(self,\n command=quitapp)\n button2 = set_btn_bg(button2, \"Images/quit_btn.png\")\n button2.place(x=310 , y=320)\n\n\nclass Done(tk.Frame):\n def __init__(self, parent, controller):\n\n \n tk.Frame.__init__(self, parent)\n\n background_init(self)\n\n # \"you hav stoped\" label:\n label = tk.Label(self, text=\"ALL DONE\\n\"\n \"See log file for more details\\n\"\n \"If you ran into a problem...\\n\\n\"\n \"DO NOT CALL US!!!\")\n label.config(bg=\"white\", fg=\"red\", font=\"Times 12 bold\")\n label.place(x=210, y=110)\n\n # start again button\n button1 = ttk.Button(self,\n command=lambda: controller.show_frame(StartPage),state=DISABLED)\n button1 = set_btn_bg(button1, \"Images/start_again_btn.png\")\n button1.place(x=200,y=320)\n\n # quit button\n button2 = ttk.Button(self,\n command=quitapp)\n button2 = set_btn_bg(button2, \"Images/quit_btn.png\")\n button2.place(x=310 , y=320)\n\n \n \n \n\n\n\napp = AvgPos()\n\n\n#Window size settings\napp.minsize(600,450)\napp.maxsize(600,450)\n\n\napp.mainloop()\n\n","sub_path":"Linux/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":9430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"400899458","text":"import logging\n\nimport numpy as np\nfrom cvxpy import mul_elemwise\nfrom django.core.management.base import BaseCommand\nfrom django.utils.timezone import now\n\nfrom portfolios.algorithms.markowitz import markowitz_optimizer_3\nfrom portfolios.calculation import MIN_PORTFOLIO_PCT, get_core_constraints, get_instruments, \\\n INSTRUMENT_TABLE_EXPECTED_RETURN_LABEL\nfrom portfolios.markowitz_scale import get_risk_curve\nfrom portfolios.providers.data.django import DataProviderDjango\n\nlogger = logging.getLogger(\"markowitz_finder\")\n# logger.setLevel(logging.DEBUG)\n\n\nclass Command(BaseCommand):\n help = 'Calculate all the optimal portfolios for ' \\\n 'all the goals in the system.'\n\n def handle(self, *args, **options):\n # find extremes\n data_provider = DataProviderDjango()\n # Get the funds from the instruments table\n covars, funds, masks = get_instruments(data_provider)\n logger.debug(\"Using instruments:\\n {}\\n\\n with covars:\\n{}\".format(funds, covars))\n sigma = covars.values\n\n mu = funds[INSTRUMENT_TABLE_EXPECTED_RETURN_LABEL].values\n\n # Get the instruments with the best BL ER.\n perfix = np.argmax(mu)\n itms = np.argwhere(mu == mu[perfix])\n ilist = [i[0] for i in itms.tolist()]\n logger.info(\"Found largest ER instruments: {} at index: {}, ilist: {}\".format(funds.index[itms], itms, ilist))\n\n xs, constraints = get_core_constraints(funds.shape[0])\n\n constraints += [xs >= 0]\n\n # Find the lambda that gives only the best BL ER.\n lowerb = 0.0\n upperb = 100000000.0\n mval = 10\n while upperb - lowerb > .001: # We want lambda to 3 decimal places\n weights, cost = markowitz_optimizer_3(xs, sigma, mval, mu, constraints)\n changed = False\n for ix, weight in enumerate(weights):\n # print(\"ix={}, weight={}\".format(ix, weight))\n if ix not in itms and weight > MIN_PORTFOLIO_PCT:\n lowerb = mval\n mval = min(mval * 2, mval + ((upperb - mval) / 2))\n changed = True\n break\n if not changed:\n upperb = mval\n mval -= ((mval - lowerb) / 2)\n\n max_lambda = round(mval, 3)\n logger.debug(\"Weights at max_lambda: {}\".format(weights))\n logger.info(\"Found MAX_LAMBDA: {}\".format(max_lambda))\n\n # Find the least variance portfolio.\n constraints.append(mul_elemwise(mu, xs) >= 0)\n weights, cost = markowitz_optimizer_3(xs, sigma, 0.0, mu, constraints)\n # Remove any below minimum percent and round to find the target portfolio\n weights[weights < MIN_PORTFOLIO_PCT] = 0\n target = np.round(weights, 2)\n\n # Find the lambda that gives the same portfolio as the target.\n lowerb = 0.0\n upperb = max_lambda\n mval = max_lambda / 2\n while upperb - lowerb > .001: # We want lambda to 3 decimal places\n weights, cost = markowitz_optimizer_3(xs, sigma, mval, mu,\n constraints)\n weights[weights < MIN_PORTFOLIO_PCT] = 0\n comp = np.round(weights, 2)\n if np.allclose(target, comp):\n lowerb = mval\n mval += ((upperb - mval) / 2)\n else:\n upperb = mval\n mval -= ((mval - lowerb) / 2)\n\n min_lambda = round(mval, 3)\n logger.info(\"Found MIN_LAMBDA: {}\".format(min_lambda))\n\n vals = get_risk_curve(min_lambda, max_lambda)\n\n data_provider.set_markowitz_scale(dt=now().today(),\n mn=min_lambda,\n mx=max_lambda,\n a=vals[0],\n b=vals[1],\n c=vals[2])\n","sub_path":"portfolios/management/commands/markowitz_finder.py","file_name":"markowitz_finder.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"205057699","text":"from bs4 import BeautifulSoup\nimport re\n\n#soup = BeautifulSoup(open('/Users/jbbinder/Projects/Data-Science-Program/raw_data/dc_washington_259972-blue-duck-tavern.html', 'r'), \"lxml\")\nsoup = BeautifulSoup(open('/Users/jbbinder/Projects/Data-Science-Program/raw_data/ca_los-angeles_224421-in--out-burger.html', 'r'), \"lxml\")\n\n\nw=0\nnum=0\n\n#if soup.findAll(attrs = {\"itemprop\": \"ratingValue\"}):\na = soup.findAll(attrs = {\"itemprop\": \"ratingValue\"})\nprint(len(a))\nfor i in a:\n w=w+int(i['content'])\n num=num+1\n print(i['content'])\n\n print(w/float(num))\n\n\n#print(soup.findChildren(\"meta\"))\n#print(soup.find(\"meta\", {\"itemprop\": \"latitude\"}))\n#tmp = str(soup.find(\"meta\", {\"itemprop\": \"latitude\"}))\n#print(type(tmp))\n\n#print(float(re.findall(r'\"(.*?)\"', tmp)[0]))\n","sub_path":"test_bs4.py","file_name":"test_bs4.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"147571675","text":"\"\"\"Plotting class \"\"\"\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom platform import python_version\r\n\r\n# print version of packages\r\nprint(\"Import PyPlt:\")\r\nprint(\"python version: \", python_version())\r\nprint(\"numpy version: \", np.version.version)\r\nprint(\"pandas version: \", pd.__version__)\r\nprint(\"matplotlib version: \", matplotlib.__version__)\r\nprint(\"Seaborn version: \", sns.__version__)\r\n\r\n\r\nclass MyPlt:\r\n \"\"\"My python plotter library.\r\n This does the drawing using matplotlib and/or seaborn\r\n data (data_x, data_y, data_y2) can be passed in a list, numpy array or\r\n pandas series with same size.\r\n\r\n # Arguments\r\n data_x: data for x axis\r\n data_y: data for y on left y axis\r\n data_y2: data for y2 on right y axis (optional), default = None\r\n xlabel, ylabel, y2label: label for data_x, data_y , data_y2,\r\n default = None if numpy array or list, or\r\n name of data if input data is a pandas series\r\n fonts, fontm, fontl: font size small, medium, large\r\n xmin, xmax, ymin, ymax, y2min, y2max: min and max of x, y, y2 axis,\r\n default = min and max of input data\r\n xpadding, ypadding:\r\n effective only when xmin, xmax, ymin, or ymax is none (default),\r\n use paddig to add more space above/below the max and min of data\r\n add (x.max-x.min)*xpadding to the x.min or x.max\r\n add (y.max-y.min)*ypadding to the y.min or y.max\r\n legend_pos, legend_loc, legend_pad:\r\n legend position, location and pad\r\n for example, legend_pos=(1.04,1) and legend_loc=\"upper left\"\r\n means to place the legend outside the axes, such that the\r\n \"upper left\" corner of the legend is at position (1.04,1)\r\n in axes coordinates.\r\n legend_loc can be:\r\n 'best','right','center'\r\n 'upper left', 'upper right'\r\n 'lower left', 'lower right',\r\n 'center left', 'center right',\r\n 'lower center', 'upper center',\r\n legend_pad is the pad between the axes and legend border.\r\n title: title for figure, default = None\r\n savefig: save the figure to a png file\r\n name = title + \"png\", default = False\r\n **fig_kw: keywords that are passed to matplotlib.pyplot.plot\r\n # Date\r\n 20190206\r\n \"\"\"\r\n\r\n def __init__(self, data_x, data_y, data_y2=None,\r\n xmin=None, xmax=None,\r\n ymin=None, ymax=None, y2min=None, y2max=None,\r\n xpadding=0.1, ypadding=0.1,\r\n xlabel=None, ylabel=None, y2label=None,\r\n fonts=14, fontm=16, fontl=18,\r\n legend_pos=(1, 1), legend_loc='upper right',\r\n legend_pad=0.5,\r\n title=None, savefig=False):\r\n\r\n # check if 2 y axis\r\n self.double_y = False\r\n if data_y2 is not None:\r\n self.double_y = True\r\n\r\n # check dimension of input data\r\n N = len(data_x)\r\n assert len(data_y) == N\r\n if self.double_y:\r\n assert len(data_y2) == N\r\n self.data_size = N\r\n\r\n # assign values\r\n self.x = data_x\r\n self.y = data_y\r\n self.xmin = xmin\r\n self.xmax = xmax\r\n self.ymin = ymin\r\n self.ymax = ymax\r\n self.xlabel = xlabel\r\n self.ylabel = ylabel\r\n self.title = title\r\n self.savefig = savefig\r\n self.fonts = fonts\r\n self.fontm = fontm\r\n self.fontl = fontl\r\n self.legend_pos = legend_pos\r\n self.legend_loc = legend_loc\r\n self.legend_pad = legend_pad\r\n\r\n if self.double_y:\r\n self.y2 = data_y2\r\n self.y2min = y2min\r\n self.y2max = y2max\r\n self.y2label = y2label\r\n\r\n # convert list to numpy array\r\n if isinstance(self.x, list):\r\n self.x = np.array(self.x)\r\n if isinstance(self.y, list):\r\n self.y = np.array(self.y)\r\n if self.double_y and isinstance(self.y2, list):\r\n self.y2 = np.array(self.y2)\r\n\r\n # set min and max of axis\r\n padding_x = abs(self.x.max() - self.x.min())*xpadding\r\n if self.xmin is None:\r\n self.xmin = self.x.min() - padding_x\r\n if self.xmax is None:\r\n self.xmax = self.x.max() + padding_x\r\n\r\n padding_y = abs(self.y.max() - self.y.min())*ypadding\r\n if self.ymin is None:\r\n self.ymin = self.y.min() - padding_y\r\n if self.ymax is None:\r\n self.ymax = self.y.max() + padding_y\r\n\r\n if (self.double_y) and (self.y2min is None):\r\n padding_y = abs(self.y2.max() - self.y2.min())*ypadding\r\n self.y2min = self.y2.min() - padding_y\r\n if (self.double_y) and (self.y2max is None):\r\n padding_y = abs(self.y2.max() - self.y2.min())*ypadding\r\n self.y2max = self.y2.max() + padding_y\r\n\r\n # size of font\r\n plt.rc('font', size=self.fonts) # controls default text sizes\r\n plt.rc('axes', titlesize=self.fonts) # fontsize of the axes title\r\n plt.rc('axes', labelsize=self.fontm) # fontsize of the x, y labels\r\n plt.rc('xtick', labelsize=self.fonts) # fontsize of the tick labels\r\n plt.rc('ytick', labelsize=self.fonts) # fontsize of the tick labels\r\n plt.rc('legend', fontsize=self.fonts) # legend fontsize\r\n plt.rc('figure', titlesize=self.fontl) # fontsize of the figure title\r\n\r\n # set label\r\n if (isinstance(self.x, pd.Series)) and (self.xlabel is None):\r\n self.xlabel = self.x.name\r\n if (isinstance(self.y, pd.Series)) and (self.ylabel is None):\r\n self.ylabel = self.y.name\r\n if self.double_y:\r\n if (isinstance(self.y2, pd.Series)) and (self.y2label is None):\r\n self.y2label = self.y2.name\r\n\r\n # set tickits\r\n self.tkw = dict(size=4, width=1.5)\r\n\r\n def scatter_plt(self, hue=None, size=None, style=None,\r\n set_ylabel=None,\r\n markersize=50, edgecolor='k', legend='brief',\r\n **fig_kw):\r\n \"\"\"Scatter plot.\r\n\r\n # Arguments\r\n hue, size, style : grouping variable that produce points with\r\n different colors, size or style\r\n markersize: marker size, default = 50. it has no effect if size is\r\n used.\r\n edgecolor : color of marks, default = 'k' (black)\r\n set_ylabel: used for set_ylabel to set label for y axis\r\n None: don't set_ylabel, default\r\n 'self': use set_ylabel(self.ylabel)\r\n other string: use set_ylabel(set_ylabel)\r\n legend : “brief”, “full”, or False, default = 'brief'\r\n **fig_kw: keywords that are passed to matplotlib.pyplot.plot\r\n # Example\r\n (1)\r\n fig, ax = plt.subplots(2,1,figsize=(14,16))\r\n a = PyPlt.MyPlt(x, y, xmin=0.1, xmax=5, ymin=2, ymax=8)\r\n a.scatter_plt(style=y,marksize=100,edgecolor='red',legend='brief',ax=ax[0])\r\n a.scatter_plt(hue=x,ax=ax[1])\r\n\r\n (2) 2 scatter plots on 1 figure\r\n fig, ax = plt.subplots(5,1,figsize=(8,16))\r\n a = PyPlt.MyPlt(x, y, xmin=0.1, xmax=5, ymin=2, ymax=18,\r\n xlabel='x', ylabel='y ylabel', title='a title')\r\n a.scatter_plt(markersize=100, edgecolor='red', legend='brief',\r\n ax=ax[0])\r\n a = PyPlt.MyPlt(x, y2, xmin=0.1, xmax=5, ymin=2, ymax=18,\r\n xlabel='x', ylabel='y2 ylabel', title='a title')\r\n a.scatter_plt(markersize=100, edgecolor='blue', legend='brief',\r\n ax=ax[0])\r\n # Date\r\n 20191127\r\n \"\"\"\r\n plot_ylabel = self.ylabel\r\n\r\n if hue is not None:\r\n #assert len(hue) == self.data_size\r\n plot_ylabel = None\r\n if size is not None:\r\n assert len(size) == self.data_size\r\n plot_ylabel = None\r\n if style is not None:\r\n assert len(style) == self.data_size\r\n plot_ylabel = None\r\n\r\n # lebel here will be shown in legend\r\n plot = sns.scatterplot(self.x, self.y,\r\n hue=hue, size=size, style=style,\r\n s=markersize, edgecolor=edgecolor,\r\n legend=legend, label=plot_ylabel,\r\n **fig_kw)\r\n\r\n # set x, y limit\r\n plot.set_xlim(self.xmin, self.xmax)\r\n plot.set_ylim(self.ymin, self.ymax)\r\n\r\n # set labels, shown on axis\r\n plot.set_xlabel(self.xlabel)\r\n\r\n if set_ylabel is None:\r\n plot.set_ylabel('')\r\n elif set_ylabel == 'self':\r\n plot.set_ylabel(self.ylabel)\r\n else:\r\n plot.set_ylabel(set_ylabel)\r\n\r\n # set ticks\r\n plot.tick_params(axis='x', **self.tkw)\r\n plot.tick_params(axis='y', **self.tkw)\r\n\r\n # set title\r\n if self.title is not None:\r\n plot.set_title(self.title)\r\n\r\n # set legend\r\n plot.legend(bbox_to_anchor=self.legend_pos,\r\n loc=self.legend_loc,\r\n borderaxespad=self.legend_pad)\r\n\r\n # save figures\r\n if self.savefig:\r\n fig = plot.get_figure()\r\n if self.title is None:\r\n file_title = 'scatter_plt'\r\n else:\r\n file_title = self.title\r\n fig.savefig(file_title + \".png\", transparent=False,\r\n dpi=100, bbox_inches='tight')\r\n\r\n def box_plt(self, hue=None,\r\n linewidth=2.5, width=0.8,\r\n markersize=5, boxvalue=True,\r\n **fig_kw):\r\n \"\"\"Box plot.\r\n For boxplot, min and max from self is different from what shows on\r\n axis, I decide not to manually set it with set_xlim and set_ylim.\r\n\r\n # Arguments\r\n hue: grouping variable that produce points with different style\r\n linewidth: width of gray lines that frame the plot elements,\r\n default = 2.5\r\n width: box width, default = 0.8\r\n markersize: marker size of outlier observations, default = 5.\r\n\r\n **fig_kw: keywords that are passed to matplotlib.pyplot.plot\r\n # Example\r\n use sharey here I can still set y(or x) range !!!\r\n\r\n fig, ax = plt.subplots(2,1,figsize=(19,10),sharey=True)\r\n xmin=-1\r\n xmax=12\r\n ymin=0\r\n ymax=2000\r\n Eth_r = PyPlt.MyPlt(df_Eth['r']-1, df_Eth['Eth'],\r\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\r\n Eth_r.scatter_plt(hue=df_Eth['case'],markersize=10,ax=ax[0])\r\n Eth_rint = PyPlt.MyPlt(df_Eth['r_int']-1,\r\n df_Eth['Eth'].astype('int64'),\r\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\r\n Eth_rint.box_plt(ax=ax[1], hue=df_Eth['case'], boxvalue=True,\r\n linewidth=1, width=0.7)\r\n # Date\r\n 20191127\r\n \"\"\"\r\n # if hue is not None:\r\n # assert len(hue) == self.data_size\r\n\r\n plot = sns.boxplot(self.x, self.y,\r\n hue=hue, fliersize=markersize,\r\n linewidth=linewidth, width=width,\r\n **fig_kw)\r\n\r\n # set labels, shown on axis\r\n plot.set_xlabel(self.xlabel)\r\n plot.set_ylabel(self.ylabel)\r\n\r\n # set ticks\r\n plot.tick_params(axis='x', **self.tkw)\r\n plot.tick_params(axis='y', **self.tkw)\r\n\r\n # set title\r\n if self.title is not None:\r\n plot.set_title(self.title)\r\n\r\n # value on box\r\n if boxvalue:\r\n axe = plot.axes\r\n lines = axe.get_lines()\r\n NumBox = int(np.ceil(len(lines)/6)) # number of box in the figure\r\n for ibox in range(NumBox):\r\n # x_l, x_r = position (left and right) of box\r\n # 25%, 75%, minimum, maximum, median, others\r\n (x_l, y_25), (x_r, _) = lines[0 + ibox*6].get_xydata()\r\n (x_l, y_75), (x_r, _) = lines[1 + ibox*6].get_xydata()\r\n (x_l, y_min), (x_r, _) = lines[2 + ibox*6].get_xydata()\r\n (x_l, y_max), (x_r, _) = lines[3 + ibox*6].get_xydata()\r\n (x_l, y_med), (x_r, _) = lines[4 + ibox*6].get_xydata()\r\n x_center = (x_l + x_r)/2\r\n # text on box\r\n axe.text(x_center, y_25, f'{y_25}')\r\n axe.text(x_center, y_75, f'{y_75}')\r\n axe.text(x_center, y_min, f'{y_min}')\r\n axe.text(x_center, y_max, f'{y_max}')\r\n axe.text(x_center, y_med, f'{y_med}')\r\n\r\n # set legend\r\n plot.legend(bbox_to_anchor=self.legend_pos,\r\n loc=self.legend_loc,\r\n borderaxespad=self.legend_pad)\r\n\r\n # save figures\r\n if self.savefig:\r\n fig = plot.get_figure()\r\n if self.title is None:\r\n file_title = 'box_plt'\r\n else:\r\n file_title = self.title\r\n fig.savefig(file_title + \".png\", transparent=False,\r\n dpi=100, bbox_inches='tight')\r\n\r\n def xyy_plt(self, ycolor='blue', y2color='red', legend='brief',\r\n marker='o', markersize=10,\r\n set_ylabel=None, set_y2label=None,\r\n **fig_kw):\r\n \"\"\"plot data_y1 (on left y axis) and data_y2\r\n (on right y axis) with same data_x (on x axis).\r\n data_x, data_y1, data_y2 are 1-d arrays with same length\r\n\r\n # Arguments\r\n ycolor, y2color: color of left and right y axis,\r\n defautl = 'blue', 'red'\r\n use None to allow multiple lines with different colors in\r\n a single figure.\r\n marker: marker of line, default = 'o'\r\n markersize: marker size, default = 10. 0 means no markers.\r\n legend : “brief”, “full”, or False, default = 'brief'\r\n set_ylabel, set_y2label: for set_ylabel to set label for y axis\r\n None: don't set_ylabel, default\r\n 'self': use set_ylabel(self.ylabel)\r\n other string: use set_ylabel(set_ylabel)\r\n **fig_kw: keywords that are passed to matplotlib.pyplot.plot\r\n # Example\r\n (1) x and 2 y axis (left and right):\r\n b = PyPlt.MyPlt(df['x'], df['y'], df['y2'],\r\n xmin=-1, xmax=5, ymin=0, ymax=10,\r\n y2min=3, y2max=10, title='b title')\r\n b.xyy_plt(legend=False, set_ylabel='its y', set_y2label='its y2',\r\n ax=ax[1])\r\n\r\n (2) multiple data on left y axis\r\n c = PyPlt.MyPlt(df['x'], df['y'], ylabel='y',xmin=0.1, xmax=5,\r\n ymin=2, ymax=12)\r\n c.xyy_plt(marker='v',ax=ax[2])\r\n c = PyPlt.MyPlt(df['x'], df['y2'],ylabel='y2',xmin=0.1, xmax=5,\r\n ymin=2, ymax=12)\r\n c.xyy_plt(marker='o',ax=ax[2],set_ylabel='Data')\r\n # Date\r\n 20191127\r\n \"\"\"\r\n\r\n plot_ylabel = self.ylabel\r\n # lebel here will be shown in legend\r\n ploty = sns.lineplot(self.x, self.y,\r\n color=ycolor, label=plot_ylabel,\r\n legend=legend, marker=marker,\r\n markersize=markersize,\r\n **fig_kw)\r\n\r\n if self.double_y:\r\n plot_y2label = self.y2label\r\n ax_y2 = ploty.twinx()\r\n ploty2 = sns.lineplot(self.x, self.y2,\r\n color=y2color, label=plot_y2label,\r\n legend=legend, marker=marker,\r\n markersize=markersize,\r\n ax=ax_y2)\r\n\r\n # set x, y limit\r\n ploty.set_xlim(self.xmin, self.xmax)\r\n ploty.set_ylim(self.ymin, self.ymax)\r\n if self.double_y:\r\n ploty2.set_ylim(self.y2min, self.y2max)\r\n\r\n # set labels, shown on axis\r\n ploty.set_xlabel(self.xlabel)\r\n\r\n if set_ylabel is None:\r\n ploty.set_ylabel('')\r\n elif set_ylabel == 'self':\r\n ploty.set_ylabel(self.ylabel)\r\n else:\r\n ploty.set_ylabel(set_ylabel)\r\n\r\n if self.double_y:\r\n if set_y2label is None:\r\n ploty2.set_ylabel('')\r\n elif set_y2label == 'self':\r\n ploty2.set_ylabel(self.y2label)\r\n else:\r\n ploty2.set_ylabel(set_y2label)\r\n\r\n # set axis color\r\n if self.double_y:\r\n # Need to use ploty2 here for left and right spines !!\r\n ploty2.spines['left'].set_color(ploty.get_lines()[0].get_color())\r\n ploty2.spines['right'].set_color(ploty2.get_lines()[0].get_color())\r\n else:\r\n # use ploty if only 1 y axis is used !!\r\n # ploty.spines['left'].set_color(ploty.get_lines()[0].get_color())\r\n ploty.spines['left'].set_color('black')\r\n\r\n # set color for y labels\r\n if self.double_y:\r\n ploty.yaxis.label.set_color(ploty.get_lines()[0].get_color())\r\n ploty2.yaxis.label.set_color(ploty2.get_lines()[0].get_color())\r\n else:\r\n ploty.yaxis.label.set_color('black')\r\n\r\n # set ticks\r\n tkw = dict(size=4, width=1.5)\r\n ploty.tick_params(axis='x', **tkw)\r\n if self.double_y:\r\n ploty.tick_params(axis='y',\r\n colors=ploty.get_lines()[0].get_color(),\r\n **tkw)\r\n ploty2.tick_params(axis='y',\r\n colors=ploty2.get_lines()[0].get_color(),\r\n **tkw)\r\n else:\r\n ploty.tick_params(axis='y',\r\n colors='black',\r\n **tkw)\r\n\r\n # set title\r\n if self.title is not None:\r\n ploty.set_title(self.title)\r\n\r\n # set legend\r\n ploty.legend(bbox_to_anchor=self.legend_pos,\r\n loc=self.legend_loc,\r\n borderaxespad=self.legend_pad)\r\n\r\n # save figures\r\n if self.savefig:\r\n fig = ploty.get_figure()\r\n if self.title is None:\r\n file_title = 'xyy_plt'\r\n else:\r\n file_title = self.title\r\n fig.savefig(file_title + \".png\", transparent=False,\r\n dpi=100, bbox_inches='tight')\r\n\r\n def bar_plt(self, hue=None, orient='v',\r\n order=None, hue_order=None, ci=None, edgecolor='k',\r\n linewidth=1,\r\n set_ylabel='self', **fig_kw):\r\n \"\"\"Show point estimates and confidence intervals as rectangular bars.\r\n\r\n A bar plot represents an estimate of central tendency for a numeric\r\n variable with the height of each rectangle and provides some indication\r\n of the uncertainty around that estimate using error bars. Bar plots\r\n include 0 in the quantitative axis range, and they are a good choice\r\n when 0 is a meaningful value for the quantitative variable, and you\r\n want to make comparisons against it.\r\n\r\n It is also important to keep in mind that a bar plot shows only the\r\n mean (or other estimator) value, but in many cases it may be more\r\n informative to show the distribution of values at each level of the\r\n categorical variables. In that case, other approaches such as a box or\r\n violin plot may be more appropriate.\r\n\r\n # Arguments\r\n x, y, hue : Inputs for plotting long-form data.\r\n orient: orientation of the plot (vertical 'v' or horizontal 'h'),\r\n sometimes we need to change orient and switch x, y to make it\r\n work.\r\n set_ylabel: used for set_ylabel to set label for y axis\r\n None: don't set_ylabel\r\n 'self': use set_ylabel(self.ylabel), default\r\n other string: use set_ylabel(set_ylabel)\r\n order, hue_order : lists of strings, optional\r\n Order to plot the categorical levels in, otherwise the levels\r\n are inferred from the data objects.\r\n x, y limit is not working here, but we can use order to add\r\n space to the plot (for legend).\r\n ci: float, 'sd', or None(default),\r\n size of confidence intervals to draw around estimated values.\r\n If “sd”, skip bootstrapping and draw the standard deviation of\r\n the observations.\r\n If None, no bootstrapping will be performed, and error bars\r\n will not be drawn.\r\n edgecolor: color of bar, default = 'k' (black).\r\n linewidth: width of edge of bar, default = 1.\r\n **fig_kw: keywords that are passed to matplotlib.pyplot.plot\r\n\r\n # Example\r\n fig, ax = plt.subplots(1,1,figsize=(4,4),sharey=True)\r\n var_pre = PyPlt.MyPlt(df_1d['mt'],df_1d[var])\r\n var_pre.bar_plt(hue=df_1d['wb?'],order=[10.0,20.0,40.0,'',''],\r\n hue_order=[14.0,2.0])\r\n ('' in order to add space for legend)\r\n # Date\r\n 20191219\r\n \"\"\"\r\n\r\n plot = sns.barplot(self.x, self.y, hue=hue, orient=orient,\r\n order=order, hue_order=hue_order, ci=ci,\r\n linewidth=linewidth, edgecolor=edgecolor,\r\n **fig_kw)\r\n\r\n # only show legend if hue is used.\r\n if hue is not None:\r\n # set tile in legend\r\n if (isinstance(hue, pd.Series)):\r\n legend_title = hue.name\r\n # set legend\r\n plot.legend(bbox_to_anchor=self.legend_pos,\r\n loc=self.legend_loc,\r\n borderaxespad=self.legend_pad,\r\n title=legend_title)\r\n\r\n # set labels, shown on axis\r\n plot.set_xlabel(self.xlabel)\r\n\r\n if set_ylabel is None:\r\n plot.set_ylabel('')\r\n elif set_ylabel == 'self':\r\n plot.set_ylabel(self.ylabel)\r\n else:\r\n plot.set_ylabel(set_ylabel)\r\n\r\n # set title\r\n if self.title is not None:\r\n plot.set_title(self.title)\r\n\r\n # save figures\r\n if self.savefig:\r\n fig = plot.get_figure()\r\n if self.title is None:\r\n file_title = 'bar_plt'\r\n else:\r\n file_title = self.title\r\n fig.savefig(file_title + \".png\", transparent=False,\r\n dpi=100, bbox_inches='tight')\r\n","sub_path":"PyPlt.py","file_name":"PyPlt.py","file_ext":"py","file_size_in_byte":23142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268775362","text":"from MovieLens import MovieLens\nfrom ContentKNNAlgorithm import ContentKNNAlgorithm\nfrom Evaluator import Evaluator\nfrom surprise import NormalPredictor\n\nimport random\nimport numpy as np\n\n# Color for text OUtPUT\nBeginRED = '\\033[91m'\nEndRED = '\\033[0m'\n\nBeginBgRED = '\\033[41m'\nEndBgRED = '\\033[0m'\n\nBeginGREEN = '\\033[92m'\nEndGREEN = '\\033[0m'\n\nBeginBgGREEN ='\\033[42m'\nEndBgGREEN ='\\033[0m'\n\nBeginYELLO = '\\033[93m'\nEndYELLO = '\\033[0m'\n\nBeginBgYELLO = '\\033[43m'\nEndBgYELLO = '\\033[0m'\n\nBeginBLUE = '\\033[94m'\nEndBLUE = '\\033[0m'\n\nBeginBgBLUE = '\\033[44m'\nEndBgBLUE = '\\033[0m'\n\ndef LoadMovieLensData():\n ml = MovieLens()\n print(BeginGREEN +\"Loading movie ratings...\" + EndGREEN)\n data = ml.loadMovieLensLatestSmall()\n print(BeginGREEN +\"Computing movie popularity ranks so we can measure novelty later...\" + EndGREEN)\n rankings = ml.getPopularityRanks()\n return (ml, data, rankings)\n\nnp.random.seed(0)\nrandom.seed(0)\n\n# Load up common data set for the recommender algorithms\n(ml, evaluationData, rankings) = LoadMovieLensData()\n\n# Construct an Evaluator\nevaluator = Evaluator(evaluationData, rankings)\n\ncontentKNN = ContentKNNAlgorithm()\nevaluator.AddAlgorithm(contentKNN, BeginBgBLUE + \"ContentKNN\" + EndBgBLUE) \n\n# Just make random recommendations\nRandom = NormalPredictor()\nevaluator.AddAlgorithm(Random, BeginBgBLUE + \"Random\" + EndBgBLUE)\n\nevaluator.Evaluate(False)\n\nevaluator.SampleTopNRecs(ml)\n\n\n","sub_path":"ContentBased/ContentRecs.py","file_name":"ContentRecs.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"119104416","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport atexit\nimport time\nimport RPi.GPIO as GPIO\nimport spi\n\n# assegurar que a função cleanup será chamada na saída do script\natexit.register(GPIO.cleanup)\n\n# usar numeração lógica dos pinos\nGPIO.setmode(GPIO.BCM)\n\nDISPLAY = [17, 4, 9, 11, 7, 27, 22, 10]\n\nSPI_CLK = 18\nSPI_MISO = 23\nSPI_MOSI = 24\nSPI_CS = 25\nconversor_ad = spi.Mcp3008(SPI_CLK, SPI_MISO, SPI_MOSI, SPI_CS)\n\nCANAL_POTENCIOMETRO = 1\n\nfor led in DISPLAY[:6]:\n GPIO.setup(led, GPIO.OUT)\n GPIO.output(led, 0)\n\nwhile True:\n for led in DISPLAY[:6]:\n GPIO.output(led, 1)\n atraso = conversor_ad.read(CANAL_POTENCIOMETRO)/1000.0\n time.sleep(atraso)\n GPIO.output(led, 0)\n","sub_path":"experiments/rpi/dojo/dojo_display7.py","file_name":"dojo_display7.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389262270","text":"\"\"\"\nComputer Vision I - Assignment 02 \nTask 3\n\nGroup members: Emilio Brambilla, Lasse Haffke, Moritz Lahann\n\"\"\"\n\n\"\"\"\nDifferences to integral image approach:\nThe integral image approach with center-surround and surround-center\nfilters difference-of-boxes (DoB) approximates the\ndifference-of-gaussian (DoG) approach we used here.\nFiltering is done on one image resolution vs. multiple\nin pyramids. There is effectively only one filter size\nused in the integral image approach. Since filters respond\nto size of features in the image, this means we may lose out \non features that our filter size is too large or small for.\nImage pyramids essentially emulate different filter sizes by\ndownsampling the image progressively.\n\nAdvantages of using image pyramids:\nImage pyramids use multiple image resolutions (progressively more smoothed\nand downsampled). This means both large-scale\nand small-scale features are taken into account.\nThey are also computationally less expensive than using\nlarge filters and filter with multiple sizes.\nSmoothing and downsampling are cheaper operations than\nlarge convolutions.\n\"\"\"\n\n# Imports\nfrom skimage.transform.pyramids import pyramid_gaussian\nfrom skimage.transform import resize\nfrom skimage.io import imread, imshow\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Function for plotting image pyramids\n# Shows layers as a single row\n# layers are scaled to the same dimensions in the plot\ndef show_pyramid(pyramid):\n images = []\n titles = []\n for idx, layer in enumerate(pyramid):\n images.append(layer)\n titles.append(\"Layer {}\".format(idx))\n\n fig, ax = plt.subplots(1, len(images))\n for idx, title in enumerate(titles):\n ax[idx].imshow(images[idx])\n ax[idx].set_title(title)\n ax[idx].set_axis_off()\n return fig\n\n\n# Function for calculating a contrast pyramid\n# Output is an ndarray clipped to [0, 1]\ndef contrast_pyramid(minuend, subtrahend):\n contrast_pyramid = []\n for idx, m in enumerate(minuend):\n m = np.array(m)\n s = np.array(subtrahend[idx])\n a = m - s\n contrast_pyramid.append(np.clip(a, 0, 1))\n return np.array(contrast_pyramid)\n\n\n# Function for calculating the feature map of an image pyramid\n# Resizes layers to the size of the 1st layer\n# Averages layers pixelwise\ndef feature_map(pyramid):\n feature_map = []\n for idx in range(pyramid.shape[0]):\n if idx > 1:\n r = resize(pyramid[idx], pyramid[1].shape)\n feature_map.append(r)\n else:\n feature_map.append(pyramid[idx])\n\n # Remove layer 0 (original image)\n feature_map = np.array(feature_map[1:])\n\n # Average pixelwise\n feature_map = np.mean(feature_map, axis=0)\n return feature_map\n\n\n# Main function\nif __name__ == \"__main__\":\n\n # Load image and convert to grayscale\n visual_attention = imread(\n \"C:/Users/Moritz Lahann/Desktop/STUDIUM/Module IAS/Computer Vision/cv_exercises/cv_ex02_03/visual_attention.png\",\n as_gray=True,\n )\n fig, ax = plt.subplots()\n ax.imshow(visual_attention)\n plt.title(\"Input image\")\n plt.show()\n\n # Calculate center pyramid\n center_pyramid = tuple(pyramid_gaussian(visual_attention, max_layer=4, sigma=9))\n\n # Visualize center pyramid\n fig = show_pyramid(center_pyramid)\n fig.suptitle(\"Center Pyramid\")\n plt.show()\n\n # Calculate surround pyramid\n surround_pyramid = tuple(pyramid_gaussian(visual_attention, max_layer=4, sigma=16))\n\n # Visualize surround pyramid\n fig = show_pyramid(surround_pyramid)\n fig.suptitle(\"Surround Pyramid\")\n plt.show()\n\n # Calculate on-off contrast pyramid\n on_off = contrast_pyramid(center_pyramid, surround_pyramid)\n\n # Calculate off-on contrast pyramid\n off_on = contrast_pyramid(surround_pyramid, center_pyramid)\n\n # Visualize on-off contrast pyramid\n fig = show_pyramid(on_off)\n fig.suptitle(\"On-Off Contrast Pyramid\")\n plt.show()\n\n # Visualize off-on contrast pyramid\n fig = show_pyramid(off_on)\n fig.suptitle(\"Off-On Contrast Pyramid\")\n plt.show()\n\n # Calculate and visualize feature map for on-off\n on_off_map = feature_map(on_off)\n fig, ax = plt.subplots()\n ax.imshow(on_off_map)\n plt.title(\"On-Off Feature Map\")\n plt.show()\n\n # Calculate and visualize feature map for off-on\n off_on_map = feature_map(off_on)\n fig, ax = plt.subplots()\n ax.imshow(off_on_map)\n plt.title(\"Off-On Feature Map\")\n plt.show()\n\n # Calculate and show saliency map\n saliency_map = (on_off_map + off_on_map) / 2\n fig, ax = plt.subplots()\n ax.imshow(saliency_map)\n plt.title(\"Saliency Map\")\n plt.savefig(\"saliency_map.png\")\n plt.show()\n","sub_path":"CV1_2/cv_ex02_03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"461446517","text":"#! /usr/bin/env python3\n\n# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\ndescription = \"\"\"\nThis module creates an ACE file from a GNDS file that has been processed for Monte Carlo transport.\n\"\"\"\n\n__doc__ = description\n\nimport pathlib\n\nfrom fudge import reactionSuite as reactionSuiteModule\nfrom fudge import styles as stylesModule\n\nfrom brownies.LANL.toACE import reactionSuite\nfrom brownies.LANL.toACE import reaction\nfrom brownies.LANL.toACE import production\nfrom brownies.LANL.toACE import channels\nfrom brownies.LANL.toACE import product\nfrom brownies.LANL.toACE import multiplicity\nfrom brownies.LANL.toACE import angularEnergy\nfrom brownies.LANL.toACE import energy\nfrom brownies.LANL.toACE import energyAngular\nfrom brownies.LANL.toACE import KalbachMann\n\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser( description = description )\nparser.add_argument( '-a', '--annotate', action = 'store_true', help = 'If present, annotation is added to the ACE file.' )\nparser.add_argument( '-i', '--ID', action = 'store', type = int, required = True, help = 'The evaluation identification.' )\nparser.add_argument( '-s', '--style', type = str, default = None, help = 'The griddedCrossSection style to convert to ACE.' )\nparser.add_argument( '--NILm1', type = int, default = 20, help = 'Number of angular equal probable bins for TNSL inelastic scattering. Note this is NIL - 1.' )\nparser.add_argument( '--NCL', type = int, default = 20, help = 'Number of angular equal probable bins for TNSL elastic scattering.' )\nparser.add_argument( '-v', '--verbose', action = 'count', default = 0, help = 'Verbose mode.' )\nparser.add_argument( '--skipURR', action='store_true', help = 'Do not write URR probability tables even if they are present in GNDS.' )\nparser.add_argument('--skipILF_logic', action='store_true', help = 'If present, the URR ILF and ILO flags are only set to -1. This is mainly for testing.')\nparser.add_argument( 'gnds', type = str, help = 'gnds file to convert to ACE.' )\nparser.add_argument( 'output', type = str, help = 'name of the outputted ACE file.' )\n\nargs = parser.parse_args( )\n\nif args.verbose > 0:\n print('Reading GNDS file.')\ngnds = reactionSuiteModule.read(args.gnds, lazyParsing=True)\n\nif args.style is None:\n styleOptions = []\n for style in gnds.styles :\n if isinstance( style, stylesModule.GriddedCrossSection ): styleOptions.append( style.label )\n if len( styleOptions ) == 0: raise Exception( 'GNDS file does not contain Monte Carlo processed data.' )\n if len( styleOptions ) > 1:\n print( ' %16s | Temperature (%s)' % (\"Style\", gnds.styles[styleOptions[0]].temperature.unit))\n for style in styleOptions : print( ' %16s | %g' % ( style, gnds.styles[style].temperature ) )\n raise Exception( 'GNDS file contains multiple Monte Carlo processed data. Please select one of the above styles using option \"-s\"' )\n args.style = styleOptions[0]\nif args.style not in gnds.styles: raise Exception( 'GNDS file does not contain style \"%s\".' % args.style )\nif not isinstance(gnds.styles[args.style], stylesModule.GriddedCrossSection):\n raise Exception(\"Selected style must be an instance of 'GriddedCrossSection', not %s\" % type(gnds.styles[args.style]))\n\npath = pathlib.Path(args.output)\nif not path.parent.exists():\n path.parent.mkdir(parents=True)\n\nif args.verbose > 0:\n print('Calling toACE.')\ngnds.toACE(args, args.style, args.output, args.ID, addAnnotation=args.annotate, verbose=args.verbose, skipURR=args.skipURR, skipILF_logic=args.skipILF_logic)\n","sub_path":"brownies/LANL/toACE/toACE.py","file_name":"toACE.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580159983","text":"import numpy as np\nimport sys\n\n\nclass ART2:\n a = 10\n b = 10\n c = 0.1\n d = 0.9\n e = sys.float_info.epsilon\n theta = 0\n alpha = 0\n vigilance = 0.98\n B = list()\n T = list()\n\n classes = 0\n\n def __init__(self, M, N):\n self.theta = 0.3 #/np.sqrt(M)\n self.alpha = 1/np.sqrt(M)\n\n self.T = np.zeros([N, M])\n self.B = np.random.rand(N, M) * (1/(1-self.d) * self.alpha)\n\n def present(self, s, learn):\n norm = self.norm\n classes = self.classes\n\n w = s\n x = np.divide(w, (norm(w) + self.e))\n v = self.f(x)\n\n u = np.divide(v, (norm(v) + self.e))\n w = s + self.a * u\n x = np.divide(w, (norm(w) + self.e))\n p = u\n q = np.divide(p, (norm(p) + self.e))\n v = self.f(x) + self.f(q) * self. b\n\n y = np.dot(self.B, p)[0:max(classes, 1)]\n reset = True\n J = 0\n while reset:\n if (np.max(y) == -1) or (classes == 0):\n if classes == len(self.B) - 1:\n return -1\n else:\n self.classes += 1\n classes += 1\n J = classes\n reset = False\n else:\n J = np.argmax(y)\n u = np.divide(v, (norm(v) + self.e))\n p = u + self.T[J] * self.d\n r = np.add(u, self.c * p) / (self.e + norm(u) + self.c * norm(p))\n n = norm(r)\n if n < (self.vigilance - self.e):\n y[J] = -1\n else:\n reset = False\n if learn:\n self.T[J] = self.alpha * self.d * u + (1 + self.alpha * self.d * (self.d - 1)) * self.T[J]\n self.B[J] = self.alpha * self.d * u + (1 + self.alpha * self.d * (self.d - 1)) * self.B[J]\n\n return J\n\n def f(self, vector):\n return np.array([v if np.abs(v) > self.theta else 0 for v in vector])\n\n @staticmethod\n def norm(vector):\n return np.sqrt(np.power(vector, 2).sum())\n","sub_path":"Art2Network.py","file_name":"Art2Network.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"553391005","text":"import pickle\nimport re\nimport sqlite3\n\nimport numpy as np\nimport tensorflow as tf\nfrom gensim.models import word2vec\nfrom sklearn.model_selection import train_test_split\n\nclass Parser(object):\n '''\n store the configure infos using yaml\n '''\n\n def __init__(self, sql_in='../data/kbzy.db', embedding_size=50, seq_length=100, test_size=0.2, validate_size=0.1):\n self.sql_in = sql_in\n self.corpus = []\n self.labels = []\n self.action_id = {}\n self.embedding_size = embedding_size\n self.model = None\n self.regx = re.compile(r'board_layer/board.*?')\n self.seq_length = seq_length\n\n self.X_train = None\n self.X_test = None\n self.X_validate = None\n self.Y_train = None\n self.Y_test = None\n self.Y_validate = None\n self.ops_length_train = None\n self.ops_length_test = None\n self.ops_length_validate = None\n self.test_size = test_size\n self.validate_size = validate_size\n\n def sql_data_base_parse(self, *file_out, day=1):\n '''\n replace the chat op by a substitute \n Args:\n self.sql_in (string): sqlite3 data base filepath\n day (int): which day to extract\n file_out (list): [file path for corpus, file path for labels]\n Returns:\n corpus (list): shape = (num of users, ops length)\n labels (list): shape = (num of users, 2)\n '''\n assert len(file_out) == 2\n conn = sqlite3.connect(self.sql_in)\n c = conn.cursor()\n query_sql = \"SELECT user_id, action, num_days_played \\\n FROM maidian WHERE current_day = {} ORDER BY user_id, relative_timestamp\".format(day)\n\n self.corpus = []\n self.labels = []\n ops = []\n previous_userid = None\n self.action_id = {}\n i = 1\n for row in c.execute(query_sql):\n user_id = row[0]\n action = 'substitite_for_chat' if self.regx.match(\n row[1]) else row[1]\n num_days_played = row[2]\n\n if action not in self.action_id:\n self.action_id[action] = i\n i += 1\n if previous_userid is not None and user_id != previous_userid:\n label = [0, 1] if num_days_played == day else [1, 0]\n self.labels.append(label)\n self.corpus.append(ops)\n ops = [self.action_id[action]]\n else:\n ops.append(self.action_id[action])\n previous_userid = user_id\n\n with open(file_out[0], 'wb') as f_ops, open(file_out[1], 'wb') as f_labels:\n pickle.dump(self.corpus, f_ops)\n pickle.dump(self.labels, f_labels)\n\n def word2vec_training(self, file_out):\n sentences = []\n for ops in self.corpus:\n sentences.append([str(op) for op in ops])\n self.model = word2vec.Word2Vec(\n sentences, self.embedding_size, min_count=1)\n self.model.save(file_out)\n\n def data_generator(self, *file_in):\n '''\n fix file_in -> *file_in with assert\n conditions\n rid ops if the length < 16\n Args:\n file_in: ops, labels, wv \n Returns:\n '''\n assert len(file_in) == 3\n ops_length = []\n X = []\n Y = []\n # 'list' object has no attribute '_load_specials'\n self.model = word2vec.Word2Vec.load(file_in[0])\n with open(file_in[1], 'rb') as f_ops, open(file_in[2], 'rb') as f_labels:\n self.corpus = pickle.load(f_ops)\n self.labels = pickle.load(f_labels)\n #self.seq_length = 100\n padding_vector = np.random.normal(size=self.embedding_size)\n\n def convert_to_wv(op_id):\n return self.model.wv[str(op_id)] if op_id != 0 else padding_vector\n\n for ops, label in zip(self.corpus, self.labels):\n mask = [0] * self.seq_length\n if len(ops) < self.seq_length:\n ops_length.append(len(ops))\n for i in range(len(ops)):\n mask[i] = ops[i]\n else:\n ops_length.append(self.seq_length)\n for i in range(len(ops[-self.seq_length:])):\n mask[i] = ops[i]\n\n line = list(map(convert_to_wv, mask))\n X.append(line)\n Y.append(label)\n #return np.array(X), np.array(Y), ops_length\n return X, Y, ops_length\n\n def data_split(self):\n ''' \n reference: https://stackoverflow.com/questions/31467487\n '''\n X, Y, ops_length = self.data_generator(\n '../temp/wv.bin', '../temp/fc_ops.pkl', '../temp/fc_labels.pkl')\n # print(np.shape(X))\n # print(np.shape(Y))\n # print(np.shape(ops_length))\n length = len(X)\n validate_indice = int(length - (self.validate_size + self.test_size) * length)\n test_indice = int(length - self.validate_size * length)\n self.X_train = X[:validate_indice]\n self.Y_train = Y[:validate_indice]\n self.ops_length_train = ops_length[:validate_indice] \n self.X_validate = X[validate_indice:test_indice]\n self.Y_validate = Y[validate_indice:test_indice]\n self.ops_length_validate = ops_length[validate_indice:test_indice] \n self.X_test = X[test_indice:]\n self.Y_test = Y[test_indice:]\n self.ops_length_test = ops_length[test_indice:] \n\n # use train_test_split may cause Memory Error\n # X_train, self.X_test, Y_train, self.Y_test, ops_length_train, self.ops_length_test = train_test_split(\n # X, Y, ops_length, test_size=self.test_size)\n # self.X_train, self.X_validate, self.Y_train, self.Y_validate, self.ops_length_train, self.ops_length_validate = train_test_split(\n # X_train, Y_train, ops_length_train, test_size=self.validate_size)\n\n\ndef batch_iter(data, labels, ops_length, batch_size, epochs, shuffle):\n '''\n Use random.shuffle instead \n Args:\n data (list)\n labels (list)\n '''\n data_size = len(data) \n # data = np.array(data)\n # labels = np.array(labels)\n # data_size = len(data) # like len(list)\n num_batches_per_epoch = int(len(data) / batch_size)\n\n for epoch in range(epochs):\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n shuffled_label = labels[shuffle_indices]\n #shuffled_length = ops_length[shuffle_indices]\n shuffled_length = [ops_length[i] for i in shuffle_indices]\n else:\n shuffled_data = data\n shuffled_label = labels\n shuffled_length = ops_length\n\n for batch_num in range(num_batches_per_epoch):\n start = batch_num * batch_size\n end = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start: end], shuffled_label[start: end], shuffled_length[start: end]\n\nif __name__ == '__main__':\n parse = Parser()\n parse.sql_data_base_parse('../temp/fc_ops.pkl', '../temp/fc_labels.pkl')\n parse.word2vec_training('../temp/wv.bin')\n ","sub_path":"utils/data_parse.py","file_name":"data_parse.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"121135052","text":"import os\nfrom os.path import basename\nimport numpy as np\nfrom PyQt5.QtWidgets import QToolButton, QPushButton, QLineEdit, QDialogButtonBox, QFileDialog, QDialog, QMessageBox, QTabWidget, QWidget, QTreeWidgetItem, QTreeWidget, QRadioButton\nfrom pulse.utils import error\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QColor, QBrush\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import uic\nimport configparser\nfrom shutil import copyfile\nfrom pulse.utils import error, remove_bc_from_file\n\nclass RadiationImpedanceInput(QDialog):\n def __init__(self, project, opv, *args, **kwargs):\n super().__init__(*args, **kwargs)\n uic.loadUi('data/user_input/ui/Model/Setup/Acoustic/radiationImpedanceInput.ui', self)\n\n icons_path = 'data\\\\icons\\\\'\n self.icon = QIcon(icons_path + 'pulse.png')\n self.setWindowIcon(self.icon)\n\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setWindowModality(Qt.WindowModal)\n\n self.opv = opv\n self.opv.setInputObject(self)\n self.transform_points = self.opv.transformPoints\n\n self.project = project\n self.preprocessor = project.preprocessor\n self.before_run = self.preprocessor.get_model_checks()\n\n self.userPath = os.path.expanduser('~')\n self.new_load_path_table = \"\"\n self.project_folder_path = project.project_folder_path\n self.acoustic_bc_info_path = project.file._node_acoustic_path\n\n self.nodes = project.preprocessor.nodes\n self.radiation_impedance = None\n self.nodes_typed = []\n \n self.remove_acoustic_pressure = False\n\n self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')\n\n self.radioButton_anechoic = self.findChild(QRadioButton, 'radioButton_anechoic')\n self.radioButton_flanged = self.findChild(QRadioButton, 'radioButton_flanged')\n self.radioButton_unflanged = self.findChild(QRadioButton, 'radioButton_unflanged')\n self.radioButton_anechoic.toggled.connect(self.radioButtonEvent)\n self.radioButton_flanged.toggled.connect(self.radioButtonEvent)\n self.radioButton_unflanged.toggled.connect(self.radioButtonEvent)\n self.flag_anechoic = self.radioButton_anechoic.isChecked()\n self.flag_flanged = self.radioButton_flanged.isChecked()\n self.flag_unflanged = self.radioButton_unflanged.isChecked()\n\n self.tabWidget_radiation_impedance = self.findChild(QTabWidget, \"tabWidget_radiation_impedance\")\n self.tabWidget_radiation_impedance.currentChanged.connect(self.tabEvent_radiation_impedance)\n\n self.tab_model = self.tabWidget_radiation_impedance.findChild(QWidget, \"tab_model\")\n self.tab_remove = self.tabWidget_radiation_impedance.findChild(QWidget, \"tab_remove\")\n\n self.treeWidget_radiation_impedance = self.findChild(QTreeWidget, 'treeWidget_radiation_impedance')\n self.treeWidget_radiation_impedance.setColumnWidth(1, 20)\n self.treeWidget_radiation_impedance.setColumnWidth(2, 80)\n self.treeWidget_radiation_impedance.itemClicked.connect(self.on_click_item)\n self.treeWidget_radiation_impedance.itemDoubleClicked.connect(self.on_doubleclick_item)\n\n self.pushButton_confirm = self.findChild(QPushButton, 'pushButton_confirm')\n self.pushButton_confirm.clicked.connect(self.check_radiation_impedance_type)\n\n self.pushButton_remove_bc_confirm = self.findChild(QPushButton, 'pushButton_remove_bc_confirm')\n self.pushButton_remove_bc_confirm.clicked.connect(self.check_remove_bc_from_node)\n\n self.pushButton_remove_bc_confirm_2 = self.findChild(QPushButton, 'pushButton_remove_bc_confirm_2')\n self.pushButton_remove_bc_confirm_2.clicked.connect(self.check_remove_bc_from_node)\n \n self.writeNodes(self.opv.getListPickedPoints())\n self.load_nodes_info()\n self.exec_()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n if self.tabWidget_radiation_impedance.currentIndex()==0:\n self.check_radiation_impedance_type()\n elif event.key() == Qt.Key_Delete:\n if self.tabWidget_radiation_impedance.currentIndex()==1:\n self.check_remove_bc_from_node()\n elif event.key() == Qt.Key_Escape:\n self.close()\n\n def radioButtonEvent(self):\n self.flag_anechoic = self.radioButton_anechoic.isChecked()\n self.flag_flanged = self.radioButton_flanged.isChecked()\n self.flag_unflanged = self.radioButton_unflanged.isChecked()\n\n def tabEvent_radiation_impedance(self):\n self.current_tab = self.tabWidget_radiation_impedance.currentIndex()\n if self.current_tab == 1:\n self.lineEdit_nodeID.setDisabled(True)\n else:\n self.lineEdit_nodeID.setDisabled(False)\n\n def writeNodes(self, list_node_ids):\n text = \"\"\n for node in list_node_ids:\n text += \"{}, \".format(node)\n self.lineEdit_nodeID.setText(text)\n\n def check_radiation_impedance_type(self):\n\n lineEdit_nodeID = self.lineEdit_nodeID.text()\n self.stop, self.nodes_typed = self.before_run.check_input_NodeID(lineEdit_nodeID)\n if self.stop:\n return\n\n try:\n if self.flag_anechoic:\n type_id = 0\n elif self.flag_unflanged:\n type_id = 1\n elif self.flag_flanged:\n type_id = 2\n self.radiation_impedance = type_id\n self.project.set_radiation_impedance_bc_by_node(self.nodes_typed, type_id)\n self.transform_points(self.nodes_typed)\n self.close()\n except:\n return\n\n def text_label(self, value):\n text = \"\"\n if isinstance(value, complex):\n value_label = str(value)\n elif isinstance(value, np.ndarray):\n value_label = 'Table'\n text = \"{}\".format(value_label)\n return text\n\n def on_click_item(self, item):\n self.lineEdit_nodeID.setText(item.text(0))\n\n def on_doubleclick_item(self, item):\n self.lineEdit_nodeID.setText(item.text(0))\n self.check_remove_bc_from_node()\n\n def check_remove_bc_from_node(self):\n\n lineEdit_nodeID = self.lineEdit_nodeID.text()\n self.stop, self.nodes_typed = self.before_run.check_input_NodeID(lineEdit_nodeID)\n if self.stop:\n return\n\n key_strings = [\"radiation impedance\"]\n message = \"The radiation impedance attributed to the {} node(s) have been removed.\".format(self.nodes_typed)\n remove_bc_from_file(self.nodes_typed, self.acoustic_bc_info_path, key_strings, message)\n self.project.preprocessor.set_radiation_impedance_bc_by_node(self.nodes_typed, None)\n self.transform_points(self.nodes_typed)\n self.treeWidget_radiation_impedance.clear()\n self.load_nodes_info()\n # self.close()\n\n def load_nodes_info(self):\n for node in self.project.preprocessor.nodes_with_radiation_impedance:\n if node.radiation_impedance_type == 0:\n text = \"Anechoic\"\n elif node.radiation_impedance_type == 1:\n text = \"Unflanged\"\n elif node.radiation_impedance_type == 2:\n text = \"Flanged\"\n new = QTreeWidgetItem([str(node.external_index), text])\n new.setTextAlignment(0, Qt.AlignCenter)\n new.setTextAlignment(1, Qt.AlignCenter)\n self.treeWidget_radiation_impedance.addTopLevelItem(new)\n \n def update(self):\n self.writeNodes(self.opv.getListPickedPoints())","sub_path":"data/user_input/model/setup/acoustic/radiationImpedanceInput.py","file_name":"radiationImpedanceInput.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"257796203","text":"# Please see instructions.pdf for the description of this problem.\nfrom fixed_size_array import FixedSizeArray\nfrom cs5112_hash import cs5112_hash1\n\n# An implementation of a hash table that uses chaining to handle collisions.\nclass HashTable:\n def __init__(self, initial_size=10, load_factor=.75):\n # DO NOT EDIT THIS CONSTRUCTOR\n if (initial_size < 0) or (load_factor <= 0) or (load_factor > 1):\n raise Exception(\"size must be greater than zero, and load factor must be between 0 and 1\")\n self.array_size = initial_size\n self.load_factor = load_factor\n self.item_count = 0\n self.array = FixedSizeArray(initial_size)\n\n def hashed_key(self,key):\n return cs5112_hash1(key) % self.array_size\n\n\n # Inserts the `(key, value)` pair into the hash table, overwriting any value\n # previously associated with `key`.\n # Note: Neither `key` nor `value` may be None (an exception will be raised)\n def insert(self, key=None, value=None):\n\n try:\n if key is None or value is None:\n raise KeyError()\n\n if value is None:\n raise ValueError()\n\n #Before inserting an item, check if array needs resizing:\n # print('check if proportion of item_count:array_size',str(self.item_count/self.array_size),' exceeds load_factor',str(self.load_factor))\n if self.load_factor <= self.item_count/self.array_size:\n # print('number of elements in array:',self.item_count)\n # print('current table_size:',str(self.array_size))\n self._resize_array()\n # print('resized_array size:',str(self.array_size))\n # print('number of elements in array:',self.item_count)\n\n h_key = self.hashed_key(key)\n print('hashed_key val of',str(key),':',str(h_key))\n\n results = self.array.get(h_key)\n print('results of self.array.get('+str(h_key)+'):',str(results))\n\n if results is None:\n\n self.array.set(index=h_key,elem=[(key,value)])\n self.item_count += 1\n print('items in table:',str(self.item_count))\n else:\n if key in [k[0] for k in results]:\n print('value exists for key',str(key),'...replacing val for this key')\n #code will decrement item_count st removing and dded v into same k will delta 0 self.item_count\n self.remove(key)\n\n results += [(key,value)]\n # print('results of self.array.get('+str(h_key)+'):',str(results))\n self.item_count += 1\n # print('items in table:',str(self.item_count))\n\n except KeyError:\n print('Error inserting value with None Key. Insertion aborted.')\n\n except ValueError:\n print('Error inserting None value. Insertion aborted.')\n\n\n#put resize into insert\n\n # self.array.insert(index=key,elem=value)\n # raise NotImplementedError()\n\n # Returns the value associated with `key` in the hash table, or None if no\n # such value is found.\n # Note: `key` may not be None (an exception will be raised)\n\n def get(self, key=None):\n try:\n if key is None:\n raise KeyError()\n # YOUR CODE HERE\n h_key = self.hashed_key(key)\n results = self.array.get(h_key)\n\n # if results is not None:\n if results:\n for r in results:\n if r[0] == key:\n return r[1]\n #if reached this line, either no list of results or no results with this h_key\n print('No value stored with key',key)\n return\n except KeyError:\n print('Error retrieving value with None key.')\n\n\n # Removes the `(key, value)` pair matching the given `key` from the map, if it\n # exists. If such a pair exists in the map, the return value will be the value\n # that was removed. If no such value exists, the method will return None.\n # Note: `key` may not be None (an exception will be raised)\n def remove(self, key):\n try:\n if key is None:\n raise KeyError()\n\n results = self.array.get(self.hashed_key(key))\n # if results is not None:\n if results:\n for (k,v) in results:\n if k == key:\n results.remove((k,v))\n self.item_count -= 1\n #only one instance of k\n #return updated array\n return self.array\n\n print('No value stored with key',key)\n return self.array\n except KeyError:\n print('Error removing None Key')\n\n\n\n\n # Returns the number of elements in the hash table.\n def size(self):\n return self.item_count\n\n # Internal helper function for resizing the hash table's array once the ratio\n # of stored mappings to array size exceeds the specified load factor.\n def _resize_array(self):\n # item_count = self.item_count\n # if self.load_factor <= self.item_count/self.array_size:\n new_table = HashTable(initial_size=self.array_size*2)\n\n # for elem_list in self.array.items:\n for i in range(self.array_size):\n elem_list = self.array.get(i)\n if elem_list is not None:\n for (k,v) in elem_list:\n new_table.insert(key=k,value=v)\n\n self.array_size = new_table.array_size\n self.load_factor = new_table.load_factor\n self.item_count = new_table.item_count\n self.array = new_table.array\n\n # Internal helper function for accessing the array underlying the hash table.\n def _get_array(self):\n # DO NOT EDIT THIS METHOD\n return self.array\n\n def __str__(self):\n # print(self.array.items)\n for elem_list in self.array.items:\n if elem_list is not None:\n # return str(elem_list)\n print (str(self.array.items.index(elem_list)),':',str(elem_list))\n return \"\"\n # for (k,v) in elem_list:\n # new_table.insert(key=k,value=v)\n","sub_path":"HW2/hashtable_linear_probing_legacy.py","file_name":"hashtable_linear_probing_legacy.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"520167412","text":"# flake8: noqa E501\nfrom asyncio import get_event_loop\nfrom enum import Enum\nfrom pathlib import PurePath\nfrom types import GeneratorType\nfrom typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Set, Tuple, Union\n\nfrom pydantic.json import ENCODERS_BY_TYPE\nfrom pydantic.main import BaseModel\n\nfrom example.client import models as m\n\nSetIntStr = Set[Union[int, str]]\nDictIntStrAny = Dict[Union[int, str], Any]\n\n\ndef generate_encoders_by_class_tuples(type_encoder_map: Dict[Any, Callable]) -> Dict[Callable, Tuple]:\n encoders_by_classes: Dict[Callable, List] = {}\n for type_, encoder in type_encoder_map.items():\n encoders_by_classes.setdefault(encoder, []).append(type_)\n encoders_by_class_tuples: Dict[Callable, Tuple] = {}\n for encoder, classes in encoders_by_classes.items():\n encoders_by_class_tuples[encoder] = tuple(classes)\n return encoders_by_class_tuples\n\n\nencoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)\n\n\ndef jsonable_encoder(\n obj: Any,\n include: Union[SetIntStr, DictIntStrAny] = None,\n exclude=None,\n by_alias: bool = True,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n include_none: bool = True,\n custom_encoder=None,\n sqlalchemy_safe: bool = True,\n) -> Any:\n if exclude is None:\n exclude = set()\n if custom_encoder is None:\n custom_encoder = {}\n if include is not None and not isinstance(include, set):\n include = set(include)\n if exclude is not None and not isinstance(exclude, set):\n exclude = set(exclude)\n if isinstance(obj, BaseModel):\n encoder = getattr(obj.Config, \"json_encoders\", {})\n if custom_encoder:\n encoder.update(custom_encoder)\n obj_dict = obj.dict(\n include=include, exclude=exclude, by_alias=by_alias, exclude_unset=bool(exclude_unset or skip_defaults),\n )\n\n return jsonable_encoder(\n obj_dict, include_none=include_none, custom_encoder=encoder, sqlalchemy_safe=sqlalchemy_safe,\n )\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, PurePath):\n return str(obj)\n if isinstance(obj, (str, int, float, type(None))):\n return obj\n if isinstance(obj, dict):\n encoded_dict = {}\n for key, value in obj.items():\n if (\n (not sqlalchemy_safe or (not isinstance(key, str)) or (not key.startswith(\"_sa\")))\n and (value is not None or include_none)\n and ((include and key in include) or key not in exclude)\n ):\n encoded_key = jsonable_encoder(\n key,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_value = jsonable_encoder(\n value,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_dict[encoded_key] = encoded_value\n return encoded_dict\n if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):\n encoded_list = []\n for item in obj:\n encoded_list.append(\n jsonable_encoder(\n item,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n )\n return encoded_list\n\n if custom_encoder:\n if type(obj) in custom_encoder:\n return custom_encoder[type(obj)](obj)\n else:\n for encoder_type, encoder in custom_encoder.items():\n if isinstance(obj, encoder_type):\n return encoder(obj)\n\n if type(obj) in ENCODERS_BY_TYPE:\n return ENCODERS_BY_TYPE[type(obj)](obj)\n for encoder, classes_tuple in encoders_by_class_tuples.items():\n if isinstance(obj, classes_tuple):\n return encoder(obj)\n\n errors: List[Exception] = []\n try:\n data = dict(obj)\n except Exception as e:\n errors.append(e)\n try:\n data = vars(obj)\n except Exception as e:\n errors.append(e)\n raise ValueError(errors)\n return jsonable_encoder(\n data,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n\n\nif TYPE_CHECKING:\n from example.client.api_client import ApiClient\n\n\nclass _StoreApi:\n def __init__(self, api_client: \"ApiClient\"):\n self.api_client = api_client\n\n def _build_for_delete_order(self, order_id: int) -> Awaitable[None]:\n \"\"\"\n For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors\n \"\"\"\n path_params = {\"orderId\": str(order_id)}\n\n return self.api_client.request(\n type_=None, method=\"DELETE\", url=\"/store/order/{orderId}\", path_params=path_params,\n )\n\n def _build_for_get_inventory(self,) -> Awaitable[Dict[str, int]]:\n \"\"\"\n Returns a map of status codes to quantities\n \"\"\"\n return self.api_client.request(type_=Dict[str, int], method=\"GET\", url=\"/store/inventory\",)\n\n def _build_for_get_order_by_id(self, order_id: int) -> Awaitable[m.Order]:\n \"\"\"\n For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions\n \"\"\"\n path_params = {\"orderId\": str(order_id)}\n\n return self.api_client.request(\n type_=m.Order, method=\"GET\", url=\"/store/order/{orderId}\", path_params=path_params,\n )\n\n def _build_for_place_order(self, body: m.Order) -> Awaitable[m.Order]:\n body = jsonable_encoder(body)\n\n return self.api_client.request(type_=m.Order, method=\"POST\", url=\"/store/order\", json=body)\n\n\nclass AsyncStoreApi(_StoreApi):\n async def delete_order(self, order_id: int) -> None:\n \"\"\"\n For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors\n \"\"\"\n return await self._build_for_delete_order(order_id=order_id)\n\n async def get_inventory(self,) -> Dict[str, int]:\n \"\"\"\n Returns a map of status codes to quantities\n \"\"\"\n return await self._build_for_get_inventory()\n\n async def get_order_by_id(self, order_id: int) -> m.Order:\n \"\"\"\n For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions\n \"\"\"\n return await self._build_for_get_order_by_id(order_id=order_id)\n\n async def place_order(self, body: m.Order) -> m.Order:\n return await self._build_for_place_order(body=body)\n\n\nclass SyncStoreApi(_StoreApi):\n def delete_order(self, order_id: int) -> None:\n \"\"\"\n For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors\n \"\"\"\n coroutine = self._build_for_delete_order(order_id=order_id)\n return get_event_loop().run_until_complete(coroutine)\n\n def get_inventory(self,) -> Dict[str, int]:\n \"\"\"\n Returns a map of status codes to quantities\n \"\"\"\n coroutine = self._build_for_get_inventory()\n return get_event_loop().run_until_complete(coroutine)\n\n def get_order_by_id(self, order_id: int) -> m.Order:\n \"\"\"\n For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions\n \"\"\"\n coroutine = self._build_for_get_order_by_id(order_id=order_id)\n return get_event_loop().run_until_complete(coroutine)\n\n def place_order(self, body: m.Order) -> m.Order:\n coroutine = self._build_for_place_order(body=body)\n return get_event_loop().run_until_complete(coroutine)\n","sub_path":"example/client/api/store_api.py","file_name":"store_api.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"49152754","text":"import re\n\nfrom rest_framework.serializers import ValidationError\n\n\nclass FieldRegexValidator:\n \"\"\"\n Do Regex validation for form answer entries\n \"\"\"\n\n def __init__(self, regex, error_code, identifier):\n self._regex = regex\n self._error_code = error_code\n self._identifier = identifier\n\n def __call__(self, value):\n for entry in value[\"entries\"]:\n if entry[\"field\"].identifier == self._identifier:\n if not re.search(self._regex, entry[\"value\"]):\n raise ValidationError(code=self._error_code)\n\n\nclass RequiredFormFieldValidator:\n def __call__(self, value):\n for section in value[\"form\"].sections.all():\n for field in section.fields.all():\n if not field.required:\n continue\n found = False\n for entry in value[\"entries\"]:\n if entry[\"field\"].id == field.id and entry[\"value\"] != \"\":\n found = True\n continue\n if not found:\n raise ValidationError(code=\"required\")\n","sub_path":"forms/validators/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"40101883","text":"from collections import OrderedDict\nnontoxicproducts = OrderedDict()\nnontoxicproducts['2104']='Terra Nova
Poli Crème'\nnontoxicproducts['2103']='Terra Nova
Nettyoeur à Meubles'\nnontoxicproducts['2102']='Terra Nova
Pierre Ponce Liquide'\nnontoxicproducts['2101']='Terra Nova
Restaurateur à Métal'\nnontoxicproducts['2100']='Terra Nova
NaturOil'\nnontoxicproducts['1823']='Circa 1850
Béton Nu'\nnontoxicproducts['1822']='Circa 1850
Métal Nu'\nnontoxicproducts['1821']='Circa 1850
Plancher Net'\nproducts = {\n'nontoxic' : nontoxicproducts,\n#'oils':oilsproducts,\n#'removers':removersproducts,\n#'primer':primerproducts,\n#'special':specialproducts,\n#'waxes':waxesproducts,\n#'stain':stainproducts,\n#'accessories':accessoriesproducts\n}\n","sub_path":"static/frenchcategoryproducts.py","file_name":"frenchcategoryproducts.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"606450930","text":"from django.conf.urls import url\n\nfrom django.contrib.auth.decorators import login_required\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^accounts/profile/$', login_required(views.ViewHistoryList.as_view()), \n name='profile'),\n url(r'^register/', views.CreateNewUser.as_view(), name='register'),\n url(r'^create_report/', login_required(views.CreateReport.as_view()), \n name='createreport'),\n url(r'^add_movie/', login_required(views.AddMovie.as_view()), \n name='addmovie'),\n url(r'^select_movie/', login_required(views.SelectMovie.as_view()), \n name='selectmovie'),\n url(r'^watch_movie/', login_required(views.WatchMovie.as_view()), \n name='watchmovie'),\n]","sub_path":"mysite/movietracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"418281157","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport scipy.signal as sig\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\n\ndef vertical_flaten(a):\n \n return a.reshape(a.shape[0],1)\n\nfig_sz_x = 5\nfig_sz_y = 4\nfig_dpi = 80 # dpi\n\nfig_font_family = 'Ubuntu'\nfig_font_size = 10\n\nsio.whosmat('ECG_TP4.mat')\nmat_struct = sio.loadmat('ECG_TP4.mat')\n\necg_one_lead = mat_struct['ecg_lead']\necg_one_lead = ecg_one_lead.flatten(1)\ncant_muestras = len(ecg_one_lead)\n\nfs = 1000 # Hz\nnyq_frec = fs / 2\n\n\n# filter design\nripple = 0.5 # dB\natenuacion = 40 # dB\n\nws1 = 1.0 #Hz\nwp1 = 3.0 #Hz\nwp2 = 15.0 #Hz\nws2 = 35.0 #Hz\n\nfrecs = np.array([0.0, ws1, wp1, wp2, ws2, nyq_frec ]) / nyq_frec\ngains = np.array([-atenuacion, -atenuacion, -ripple, -ripple, -atenuacion, -atenuacion])\ngains = 10**(gains/20)\n\n\nbp_sos_butter = sig.iirdesign(wp=np.array([wp1, wp2]) / nyq_frec, ws=np.array([ws1, ws2]) / nyq_frec, gpass=0.5, gstop=40., analog=False, ftype='butter', output='sos')\nbp_sos_cheby = sig.iirdesign(wp=np.array([wp1, wp2]) / nyq_frec, ws=np.array([ws1, ws2]) / nyq_frec, gpass=0.5, gstop=40., analog=False, ftype='cheby1', output='sos')\nbp_sos_cauer = sig.iirdesign(wp=np.array([wp1, wp2]) / nyq_frec, ws=np.array([ws1, ws2]) / nyq_frec, gpass=0.5, gstop=40., analog=False, ftype='ellip', output='sos')\n\ncant_coef = 501\n\nnum_firls = sig.firls(cant_coef, frecs, gains, fs=fs)\nnum_remez = sig.remez(cant_coef, frecs, gains[::2], fs=fs)\nnum_win = sig.firwin2(cant_coef, frecs, gains , window='blackmanharris' )\n\nden = 1.0\n\n######################################################################################################\n\nplt.rcParams.update({'font.size':fig_font_size})\nplt.rcParams.update({'font.family':fig_font_family})\n\nw, h_butter = sig.sosfreqz(bp_sos_butter)\n_, h_cheby = sig.sosfreqz(bp_sos_cheby)\n_, h_cauer = sig.sosfreqz(bp_sos_cauer)\n_, hh_firls = sig.freqz(num_firls, den)\n_, hh_remez = sig.freqz(num_remez, den)\n_, hh_win = sig.freqz(num_win, den)\n\nw = w / np.pi * nyq_frec\n\n\n#plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w', edgecolor='k')\n\n#plt.plot(w, 20*np.log10(np.abs(h_butter)), label='IIR-Butter' )\n#plt.plot(w, 20*np.log10(np.abs(h_cheby)), label='IIR-Cheby' )\n#plt.plot(w, 20*np.log10(np.abs(h_cauer)), label='IIR-Cauer' )\n#plt.plot(w, 20 * np.log10(abs(hh_firls)), label='FIR-ls')\n#plt.plot(w, 20 * np.log10(abs(hh_remez)), label='FIR-remez')\n#plt.plot(w, 20 * np.log10(abs(hh_win)), label='FIR-Win')\n#plt.plot(frecs * nyq_frec, 20*np.log10(gains), 'rx', label='plantilla' )\n\n#plt.title('FIR diseñado por métodos directos')\n#plt.xlabel('Frequencia [Hz]')\n#plt.ylabel('Modulo [dB]')\n#plt.axis([0, nyq_frec, -60, 5 ]);\n\n#plt.grid()\n\n#axes_hdl = plt.gca()\n#axes_hdl.legend()\n\n#plt.show()\n\n###############################################################################################\n\nECG_f_butt = sig.sosfiltfilt(bp_sos_butter, ecg_one_lead)\nECG_f_cheb = sig.sosfiltfilt(bp_sos_cheby, ecg_one_lead)\nECG_f_cauer = sig.sosfiltfilt(bp_sos_cauer, ecg_one_lead)\n\nECG_f_ls = sig.filtfilt(num_firls, den, ecg_one_lead)\nECG_f_remez = sig.filtfilt(num_remez, den, ecg_one_lead)\nECG_f_win = sig.filtfilt(num_win, den, ecg_one_lead)\n\n# Segmentos de interés\nregs_interes = ( \n np.array([0, 1000]), # minutos a muestras\n )\n\nfor ii in regs_interes:\n \n # intervalo limitado de 0 a cant_muestras\n zoom_region = np.arange(np.max([0, ii[0]]), np.min([cant_muestras, ii[1]]), dtype='uint')\n \n plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w', edgecolor='k')\n plt.plot(zoom_region, ecg_one_lead[zoom_region], label='ECG', lw=2)\n #plt.plot(zoom_region, ECG_f_butt[zoom_region], label='Butter')\n #plt.plot(zoom_region, ECG_f_cheb[zoom_region], label='Cheby')\n #plt.plot(zoom_region, ECG_f_cauer[zoom_region], label='Cauer')\n #plt.plot(zoom_region, ECG_f_remez[zoom_region], label='Remez')\n #plt.plot(zoom_region, ECG_f_ls[zoom_region], label='LS')\n #plt.plot(zoom_region, ECG_f_win[zoom_region], label='Win')\n \n plt.title('ECG filtering example from ' + str(ii[0]) + ' to ' + str(ii[1]) )\n plt.ylabel('Adimensional')\n plt.xlabel('Muestras (#)')\n \n axes_hdl = plt.gca()\n axes_hdl.legend()\n axes_hdl.set_yticks(())\n \n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TP4/prueba2.py","file_name":"prueba2.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"633096664","text":"import logging\nimport os\nfrom os.path import expanduser\nfrom pathlib import Path\nimport sys\nfrom textwrap import dedent\nimport traceback\n\nfrom prompt_toolkit import HTML, print_formatted_text as print, prompt\nimport toml\n\nfrom .log import get_logger\nfrom .ptdb import Column, Db, DbError, NoResultFound, Model, Schema\nfrom .table import mktable\nfrom . import __version__\n\n\n# Note the 'root' is not actually at the root directory and is a special case\nDB_FILE = os.environ.get('NEM_ROOT_DB', str(Path.home().absolute() / '.config' / '.nem.toml'))\nDB_FILE = str(Path(DB_FILE).absolute())\n\n\nlog = get_logger(__name__)\n\n\nclass CODE:\n EXEC = 1\n\n\nclass Ignore(Model):\n __table__ = 'ignore'\n\n item = Column() # eg. ~/.nem.toml, git*\n type = Column() # eg. dbfile, cmdrule\n\n\n\nclass Command(Model):\n __table__ = 'cmds'\n\n cmd = Column()\n code = Column()\n desc = Column()\n\n def __repr__(self):\n return f''\n\n\nclass NemSchema(Schema):\n version = __version__\n cmds = Command\n\n\ndef mkresp(out='', code=0, ctx=None):\n log.info(f'creating response \"{out}\" {code} {ctx}')\n ctx = ctx or {}\n return (out, code, ctx)\n\n\ndef err(**kwargs):\n kwargs.update(code=-1)\n return mkresp(**kwargs)\n\n\ndef mkcode(cmd, codes):\n if not cmd:\n return ''\n\n def _pick_letter(word):\n l = list(word)\n while l:\n if l[0].isalpha():\n return l[0]\n elif l[0] in ['{']:\n return ''\n else:\n l = l[1:]\n return ''\n\n code = ''.join([_pick_letter(s) for s in str(cmd).split(' ')])\n while code in codes:\n code += 'f'\n return code\n\n\nclass Resource:\n __resname__ = 'Resource'\n\n def help(self, cmd, args, ctx):\n return mkresp(out=f'TODO: help')\n\n @property\n def _attrs(self):\n return [attr for attr in dir(self) if not attr.startswith('_')]\n\n @property\n def _doc(self):\n resname = self.__resname__.lower()\n # doc = f' {resname[0]}{resname[1:]}\\n'\n doc = ''\n for attr in self._attrs:\n m = getattr(self, attr)\n doc += f' {resname[0]}{resname[1:]}\\n'\n doc += f' {attr[0]}{attr[1:]}:\\n'\n doc += f' {dedent((m.__doc__ or \"TODO\").strip())}'\n doc += '\\n'\n doc = doc[0:-1]\n return doc\n\n def _handle(self, cmd, args, ctx):\n try:\n if not cmd:\n handler = self.help\n else:\n attrs = [attr for attr in self._attrs if attr.startswith(cmd[0])]\n handler = getattr(self, attrs[0])\n return handler(cmd[1:], args, ctx)\n except Exception:\n log.warn(f'failed on command {cmd}', exc_info=True)\n return err(out=f'command {cmd} failed or does not exist on resource {self.__class__.__resname__}')\n\n\nclass Help(Resource):\n __resname__ = 'Help'\n\n @property\n def _resource_docs(self):\n docs = ''\n for resource in Resources.resources():\n docs += resource._doc\n return docs\n\n def help(self, cmd, args, ctx):\n doc =\\\n \"\"\"\\\n MNEMONICS\n ttmytabptb\n trying to make your terminal a better place to be\n\n /\n {resource_docs}\n \"\"\"\n doc = dedent(doc)\n doc = doc.format(resource_docs=self._resource_docs)\n return mkresp(out=doc)\n\n\nclass CmdRes(Resource):\n __resname__ = 'Command'\n\n # @arg_parse parses args for\n def create(self, opts, args, ctx):\n \"\"\"\n Creates a command\n \"\"\"\n pwd = ctx.get('pwd')\n db = ctx.get('db')\n cmds = db.query(Command).all(in_dbs=[db.closest])\n codes_cmds = { cmd.code: cmd.cmd for cmd in cmds }\n cmd = ' '.join(args)\n code = mkcode(cmd, codes_cmds)\n db.add(Command(cmd=cmd, code=code, desc=''), in_dbs=['closest'])\n return mkresp(out=f'added command: {code} = {cmd} to {db.closest}')\n\n def document(self, opts, args, ctx):\n \"\"\"\n Document a command\n \"\"\"\n pass\n\n def edit(self, opts, args, ctx):\n db = ctx.get('db')\n code = args[0]\n new_code = args[1]\n try:\n cmd = db.query(Command).filter_by(code=code).one()\n cmd.code = new_code\n return mkresp(out=f'command {cmd.cmd} code updated {code} -> {new_code}')\n except NoResultFound:\n return err(out=f'code {code} not found')\n\n def find(self, opts, args, ctx):\n pass\n\n def list(self, opts, args, ctx):\n # Want most relevant (closer) dbfiles listed at bottom\n db = ctx.get('db')\n codes = resolve_codes(db)\n\n table_rows = []\n codes_left = set(codes.keys())\n rev_dbnames = list(db.dbnames)\n rev_dbnames.reverse()\n for dbname in rev_dbnames:\n dbcodes = [code for code, meta in codes.items() if dbname in meta['dbs'] and code in codes_left]\n codes_left = codes_left - set(dbcodes)\n for code in dbcodes:\n meta = codes[code]\n cmd = meta['cmd']\n desc = meta['desc']\n dbids = [str(db.dbname_to_i(dbname)) for dbname in meta['dbs']]\n if 'v' in opts:\n sources = ','.join(dbids) if len(dbids) > 1 else ''\n sources = f'({sources})' if sources else ''\n table_rows.append([f'{cmd} {sources}', f'[{code}]', desc])\n else:\n table_rows.append([f'{cmd}', f'[{code}]'])\n\n if 'v' in opts:\n headers = ['command', 'code', 'description']\n else:\n headers = ['command', 'code']\n\n table = mktable(table_rows, headers=headers)\n table = table.replace('[', '[')\n table = table.replace(']', ']')\n if 'v' in opts:\n dbs = '\\n'.join([f'(db {db.dbname_to_i(dbname)}) {dbname}' for dbname in db.dbnames])\n else:\n dbs = ''\n return mkresp(out=f'{dbs}\\n{table}')\n\n def remove(self, opts, args, ctx):\n db = ctx.get('db')\n code = args[0]\n try:\n cmd = db.query(Command).filter_by(code=code, _in_dbs=[db.closest]).one()\n db.delete(cmd, in_dbs=[db.closest])\n return mkresp(out=f'removed command {cmd.cmd} with code {cmd.code}')\n except NoResultFound:\n return err(out=f'command for code {code} not found')\n\n\nclass Resources:\n class _Resources:\n commands = CmdRes()\n help = Help()\n\n @classmethod\n def resourcenames(cls):\n return [r for r in dir(cls._Resources) if not r.startswith('__')]\n\n @classmethod\n def resources(cls):\n return [getattr(cls._Resources, resname) for resname in cls.resourcenames()]\n\n @classmethod\n def hasresource(cls, name):\n return hasattr(cls._Resources, name)\n\n @classmethod\n def _interpret(cls, cmd, args, ctx):\n resname = [r for r in cls.resourcenames() if r.startswith(cmd[0])]\n if not resname or not cls.hasresource(resname[0]):\n return None\n resource = getattr(cls._Resources, resname[0])\n return resource._handle(cmd[1:], args, ctx)\n\n\ndef cmd_w_args(cmd, args):\n kwargs = {\n f'arg{i+1}': arg for i, arg in enumerate(args)\n }\n return cmd.format(**kwargs)\n\n\ndef handle_req(args, ctx):\n db = ctx.get('db')\n if len(args) < 1:\n args.append('/cl')\n\n cmd = args[0]\n\n if cmd.startswith('/'):\n resp = Resources._interpret(cmd[1:], args[1:], ctx)\n if resp:\n return resp\n\n # if finding a resource falls through, interpret the first arg\n # as a code\n code = args[0]\n\n try:\n codes = resolve_codes(db)\n ex_cmd = codes[code]['cmd']\n except IndexError:\n return err(out=f'unknown command: {code}')\n\n print(ex_cmd)\n # if there are args, fill them in\n if len(args) > 1:\n ex_cmd = cmd_w_args(ex_cmd, args[1:])\n return mkresp(out=f'exec: {ex_cmd}', code=CODE.EXEC, ctx={'cmd': ex_cmd})\n\n\ndef gather_dbfiles():\n dbs = []\n d = Path(os.environ.get('PWD'))\n while str(d) != '/':\n log.debug(f'searching for config directory {d}')\n db_file = d / '.nem.toml'\n if os.path.exists(db_file) and os.path.isfile(db_file): # and not in block list\n dbs.append(str(db_file))\n d = d.parent\n\n if not os.path.exists(DB_FILE):\n print(HTML(f'db file {DB_FILE} does not exist'))\n if prompt('create it [y/n]? ') == 'y':\n dbs.append(DB_FILE)\n log.debug(f'gathered dbs {dbs}')\n return dbs\n\n\ndef resolve_codes(db):\n cursor = db.query(Command)\n codes = {}\n for dbname, row in cursor.rows():\n row_code = row['code']\n if row_code not in codes:\n codes[row_code] = dict(\n dbs=[dbname],\n cmd=row['cmd'],\n desc=row['desc'],\n )\n elif codes[row_code]['cmd'] == row['cmd']:\n # duplicate entry, add to the dbs list\n codes[row_code]['dbs'].append(dbname)\n else:\n # for conflicts, postfix db code to code until no conflicts\n i = db.dbname_to_i(dbname)\n code = f'{row_code}{i}'\n while code not in codes:\n code = f'{code}{i}'\n codes[code] = dict(\n dbs=[dbname],\n cmd=row['cmd'],\n desc=row['desc'],\n )\n return codes\n\n\ndef nem():\n try:\n dbs = gather_dbfiles()\n\n if not dbs:\n print(HTML('could not find a db file to use!'))\n return\n\n db = Db(toml, NemSchema, dbfiles=dbs)\n db.load()\n\n # codes = resolve_codes(db)\n\n ctx = {\n 'pwd': os.environ.get('PWD'),\n 'db': db,\n }\n args = sys.argv[1:]\n (out, code, ctx) = handle_req(args, ctx)\n\n if code == CODE.EXEC:\n print(HTML(out))\n os.system(ctx['cmd'])\n else:\n print(HTML(out))\n\n db.commit()\n except DbError:\n log.error('', exc_info=True)\n print(HTML('a database error has occurred:\\n{traceback.format_exc()}'))\n except KeyboardInterrupt:\n pass\n","sub_path":"nem/nem.py","file_name":"nem.py","file_ext":"py","file_size_in_byte":10873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"283035979","text":"from django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nUserModel = get_user_model()\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n if not UserModel.objects.filter(email='joshbaker286.jb@gmail.com').exists():\n UserModel.objects.create_superuser('joshbaker286.jb@gmail.com',\n 'p1234567',\n first_name='Josh',\n last_name='Baker')\n","sub_path":"account/management/commands/create_super_user.py","file_name":"create_super_user.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"223502155","text":"#!/usr/bin/env python3.6\nimport argparse\nimport re\n\nclass Vertex:\n def __init__(self, vertex_number):\n self.vertex_number = vertex_number\n self.edges = []\n\n def add_edge(self, vertex_number):\n if vertex_number not in self.edges:\n self.edges.append(vertex_number)\n\n\nclass Graph:\n def __init__(self):\n self.vertices = dict()\n\n def add_vertex(self, vertex_number, edges):\n if vertex_number not in self.vertices:\n self.vertices[vertex_number] = Vertex(vertex_number)\n for edge in edges:\n self.vertices[vertex_number].add_edge(edge)\n if edge not in self.vertices:\n self.vertices[edge] = Vertex(edge)\n self.vertices[edge].add_edge(vertex_number)\n\n def find_connected_components(self, start_vertex):\n stack = []\n connected_components = set()\n\n stack.append(start_vertex)\n while stack:\n vertex = stack.pop()\n if vertex not in connected_components:\n connected_components.add(vertex)\n for edge in self.vertices[vertex].edges:\n stack.append(edge)\n\n return connected_components\n\n def count_groups(self):\n all_visited = set()\n groups = 0\n\n for vertex in self.vertices.keys():\n if vertex not in all_visited:\n connected_components = self.find_connected_components(vertex)\n all_visited = all_visited | connected_components\n groups += 1\n\n return groups\n\n\ndef parse_line(line):\n parse_string = '(\\d+) <-> (.*)'\n\n m = re.search(parse_string, line)\n groups = m.groups()\n vertex_number = int(groups[0])\n edges = [int(edge) for edge in groups[1].split(',')]\n\n return vertex_number, edges\n\n\nparser = argparse.ArgumentParser(\n description='Solution for part 1 of day 12')\nparser.add_argument('file', metavar='file', type=str)\n\nargs = parser.parse_args()\n\ngraph = Graph()\nwith open(args.file) as f:\n for line in f:\n vertex_number, edges = parse_line(line.strip())\n graph.add_vertex(vertex_number, edges)\n\n groups = graph.count_groups()\n print(groups)\n","sub_path":"day12/groups/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"170464016","text":"x = input(\"Enter positive integer to find all primes up to and including the integer: \")\n\ndef sieve(num):\n marked = []\n\n for i in range (2, num + 1):\n if i not in marked:\n print(i)\n for j in range(i*i, num + 1, i):\n marked.append(j)\n\nsieve(x)","sub_path":"src/sieve.py","file_name":"sieve.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"426992707","text":"import re\n\nmock_response = '{\"Response\":\"No Response Set\"}'\nmock_path = ''\nmock_method = ''\nmock_request = ''\n\ndef mock_call(method,path,request):\n \"\"\"\n Handles local testing, not intended to be called directly.\n \"\"\"\n global mock_method \n mock_method = method\n global mock_path\n mock_path = path\n pat = re.compile('\"number\":\"([0-9]+)\"')\n matches = pat.findall(request)\n if matches.count('4000100011112224') != len(matches):\n raise Exception('Please only use test card 4000100011112224 with the mockhandler')\n global mock_request\n mock_request = request\n global mock_response\n return mock_response\n ","sub_path":"mockhandler.py","file_name":"mockhandler.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"66600286","text":"# 8. Find closest pair points by Divide And Conquer (DAC) approach.\n# Atinesh Singh-15IT409\n \nfrom __future__ import generators\nimport matplotlib.pyplot as plt\nimport math\n \ndef ecdist(p,q):\n return math.sqrt((p[0]-q[0])*(p[0]-q[0])+(p[1]-q[1])*(p[1]-q[1]))# Without sqrt it'll still work\n \ndef testpair(p,q):\n d=ecdist(p,q)\n if(d=len(B) or (i= prob, colName] = \"s\"\n\n\n###### Export Output\npred_colName = VI_idx + \"_\" + smooth + \"_\" + model + \"_batchNumber\" + batch_no + \"_preds\"\nout_name = out_dir + pred_colName + \".csv\"\npredictions.to_csv(out_name, index=False)\n\nprint(\"--------------------------------------------------------------\")\nprint(date.today(), \"-\", datetime.now().strftime(\"%H:%M:%S\"))\nprint(\"--------------------------------------------------------------\")\n","sub_path":"NASA/Python_codes/drivers/Kamiak_ML_Oct17/01_trend_preds/trend_ML_preds.py","file_name":"trend_ML_preds.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"269195423","text":"import os\nimport argparse\nimport yaml\nimport chainer\nfrom chainer import training\nfrom chainer.training import extensions\nfrom model import build_model, KeypointsDetector\nfrom data.transforms import build_transforms\nfrom data.keypoints_dataset import KeypointsDataset\nfrom loss import build_loss\nfrom config.utils import get_outdir\nimport matplotlib\nmatplotlib.use('Agg')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('config', type=str,\n help='Path to the config file of pose estimation.')\n parser.add_argument('device', type=int,\n help='Device ID.')\n parser.add_argument('--progress', action='store_true',\n help='Action is `store_true`.')\n args = parser.parse_args()\n return args\n\n\ndef freeze_extractor(model):\n for l in model.predictor.extractor.children():\n l.disable_update()\n\n\ndef train():\n args = parse_args()\n with open(args.config, 'r') as fp:\n cfg = yaml.load(fp)\n\n if 'val_img_dir' in cfg.keys() and 'val_keypoints_dir' in cfg.keys():\n flag_val = True\n else:\n flag_val = False\n if flag_val and 'early_stop' in cfg.keys() and cfg['early_stop']:\n flag_early_stop = True\n else:\n flag_early_stop = False\n\n # `outdir` is path to the directory saved log and model\n outdir = get_outdir(cfg)\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n\n # setup model\n detector = build_model(cfg)\n loss = build_loss(cfg, args.device)\n model = KeypointsDetector(detector, loss)\n\n if args.device >= 0:\n chainer.cuda.get_device_from_id(args.device).use() # Make a specified GPU current\n model.to(args.device)\n\n # setup dataset\n transforms = build_transforms((224, 224), (56, 56), cfg['extractor'])\n train_dataset = KeypointsDataset(\n os.path.expanduser(cfg['train_img_dir']),\n os.path.expanduser(cfg['train_keypoints_dir']), transforms)\n\n optimizer = chainer.optimizers.SGD(lr=cfg['lr'])\n optimizer.setup(model)\n if cfg['extractor_freeze']:\n print('Freeze Extractor params: True')\n freeze_extractor(model)\n else:\n print('Freeze Extractor params: False')\n\n train_iter = chainer.iterators.SerialIterator(train_dataset, cfg['bs'])\n\n updater = training.updaters.StandardUpdater(\n train_iter, optimizer, device=args.device)\n\n if flag_val and flag_early_stop:\n print(\"Early Stopping: True\")\n early_stop = training.triggers.EarlyStoppingTrigger(\n monitor='validation/main/loss', max_trigger=(cfg['epoch'], 'epoch'),\n patients=10)\n trainer = training.Trainer(updater, stop_trigger=early_stop, out=outdir)\n else:\n print(\"Early Stopping: False\")\n trainer = training.Trainer(updater, (cfg['epoch'], 'epoch'), out=outdir)\n\n if flag_val:\n print('Validation: True')\n val_dataset = KeypointsDataset(\n os.path.expanduser(cfg['val_img_dir']),\n os.path.expanduser(cfg['val_keypoints_dir']), transforms)\n val_iter = chainer.iterators.SerialIterator(\n val_dataset, cfg['bs'], repeat=False, shuffle=False)\n trainer.extend(training.extensions.Evaluator(\n val_iter, model, device=args.device))\n else:\n print('Validation: False')\n\n # trainer extensions\n trainer.extend(extensions.LogReport())\n if flag_val:\n trainer.extend(extensions.PlotReport(\n ['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png', marker=None))\n else:\n trainer.extend(extensions.PlotReport(\n ['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png', marker=None))\n trainer.extend(extensions.snapshot(),\n trigger=(cfg['step_save_model'], 'epoch'))\n if args.progress:\n trainer.extend(extensions.ProgressBar(update_interval=20))\n\n print('============= Start Training =============')\n trainer.run()\n print('============= End Training =============')\n\n\nif __name__ == '__main__':\n train()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"569989608","text":"'''\n模拟硬件设备\n每隔一秒 朝文件写入一次数据\n\n\n- 程序模拟出 30 个风机\n'''\n\nimport time\nimport random\n\n# 生成初始数据,用于数据预测\ndef data_init():\n data_list = []\n '''\n with open('data_test.txt', 'a') as f:\n for i in range(30):\n data_list.append(i)\n f.write(str(i))\n f.write('\\n')\n '''\n with open('data_test.txt', 'r') as f:\n for line in f.readlines():\n data_list.append( float(line) )\n\n return data_list\n\n\n# 求数字列表最后 60 个(提高运行速度)的平均值\ndef get_mean(data_list):\n sum = 0\n for i in data_list[:60]:\n sum += i\n return round(sum / len(data_list), 2 )\n\n\n\n# 生成固定个数 的 模拟风机\n# 并将生成的 模拟风机 写入文件 记录\ndef gen_fake_motor(num):\n motor_list = []\n for i in range(num):\n motor = {}\n motor['motor_id'] = i\n motor['motor_ip'] = str(i) + '.' + str(i) + '.' + str(i) + '.' + str(i)\n motor['motor_name'] = 'motor ' + str(i)\n motor['motor_sum_volt'] = random.random() * 100\n motor['motor_info'] = 'info ' + str(i)\n status = 0\n if random.random() > 0.5:\n status = 1\n motor['motor_status'] = status\n\n motor_list.append(motor)\n\n\n # 将风机 写入文件 记录\n with open('motor_test2.txt', 'w') as f:\n for m in motor_list:\n f.write( str(m['motor_id']) )\n f.write(',')\n f.write(m['motor_ip'])\n f.write(',')\n f.write(m['motor_name'])\n f.write(',')\n f.write( str(m['motor_sum_volt']) )\n f.write(',')\n f.write(m['motor_info'])\n f.write(',')\n f.write( str(m['motor_status']) )\n\n f.write('\\n')\n print('-> 写入 模拟硬件 记录到文件 成功')\n\n return motor_list\n\n\n\n# 为 给定的 模拟风机 生成模拟数据\ndef gen_stimulate_data(motor_list):\n for motor in motor_list:\n tmp_volt = random.random() * 100\n print('-> tmp_volt: ' + str(tmp_volt))\n motor['motor_sum_volt'] = tmp_volt\n\n return motor_list\n\n\n# 将生成的每一个 模拟风机 的 模拟数据 写入文件,以供 flask 接口使用\ndef write_stimulate_file(motor_list):\n with open('data_test2.txt', 'a') as f:\n for motor in motor_list:\n f.write( str(motor['motor_id']) )\n f.write(',')\n f.write(motor['motor_ip'])\n f.write(',')\n f.write(motor['motor_name'])\n f.write(',')\n f.write( str(motor['motor_sum_volt']) )\n f.write(',')\n f.write(motor['motor_info'])\n f.write(',')\n f.write( str(motor['motor_status']) )\n\n f.write('\\n')\n print('-> write_stimulate_file Down.')\n\n\n\nif __name__ == '__main__':\n # 生成 30 个模拟 硬件\n fake_motor_list = gen_fake_motor(30)\n while True:\n # 为 模拟 硬件 生成 模拟数据\n fake_data_motor_list = gen_stimulate_data( fake_motor_list )\n # 将 模拟硬件 列表 的 数据 写入文件\n write_stimulate_file(fake_data_motor_list)\n\n time.sleep(1)\n\n\n '''\n while True:\n data_list = data_init()\n new_data = get_mean(data_list)\n # 实现随机化\n new_data += random.random()\n if random.random() > 0.5:\n new_data += 5.9\n else:\n new_data -= 4.3\n\n # 写入新数据 到文件\n with open('data_test.txt', 'a') as f:\n f.write(str(new_data))\n f.write('\\n')\n\n print('-> 写入 ' + str(new_data) + ' succ')\n time.sleep(1)\n '''\n","sub_path":"data_api/hardware_model.py","file_name":"hardware_model.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"89582278","text":"import requests\nimport datetime\nimport os\nfrom profiler.models import File\n\nbatch_id = '6af969dc5a604ca695410d7de8ccc1c6'\nbatch_created = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\nip = \"localhost\"\nport = 5000\n\n\nupload_files = list(File.objects.filter(batch_id__exact=batch_id))\nurl = \"http://{0}:{1}/api/uploads\".format(ip, port)\nfor f in upload_files:\n form = {'batch_id': batch_id, 'filename': os.path.splitext(os.path.basename(f.file.path)), 'created': batch_created}\n files = [('file', open(f.file.path, 'rb'))]\n requests.post(url, data=form, files=files)\nurl = \"http://127.0.0.1:5000/api/profiling/{}\".format(batch_id)\nrequests.get(url)\n\n\nurl = \"http://127.0.0.1:5000/api/profiles/{}\".format(batch_id)\nrequests.get(url)\n\nurl = \"http://127.0.0.1:5000/api/dendrograms/{}/{}\".format(\"png\", batch_id)\nrequests.get(url)\n","sub_path":"test/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"265707650","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n #home page\n path('', views.homepage, name='homepage'),\n\n #authorization\n path('register', views.registration, name='register'),\n path('login', views.login_page, name='login'),\n path('logout', views.logout_user, name='logout'),\n\n #feedback\n path('contact', views.contact_us, name='contact'),\n\n # Urls for customer cart\n path('cart/add//', views.cart_add, name='cart_add'),\n path('cart/item_clear//', views.item_clear, name='item_clear'),\n path('cart/item_increment//', views.item_increment, name='item_increment'),\n path('cart/item_decrement//', views.item_decrement, name='item_decrement'),\n path('cart/cart_clear/', views.cart_clear, name='cart_clear'),\n path('cart/cart-detail/',views.cart_detail,name='cart_detail'),\n # Urls for customer order\n path('order_create', views.order_create, name=\"order_create\"),\n]","sub_path":"eshopapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"316776359","text":"# --------------------------------------------------------\n# Swin Transformer\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ze Liu\n# --------------------------------------------------------\n\nfrom torch import optim as optim\n\n\ndef build_optimizer(config, model):\n \"\"\"\n Build optimizer, set weight decay of normalization to 0 by default.\n \"\"\"\n\n parameters = list(model.named_parameters())\n for n, v in parameters:\n if (\"score\" not in n) and v.requires_grad:\n print(n, \"weight_para\")\n for n, v in parameters:\n if (\"score\" in n) and v.requires_grad:\n print(n, \"score_para\")\n weight_params = [v for n, v in parameters if (\"score\" not in n) and v.requires_grad]\n score_params = [v for n, v in parameters if (\"score\" in n) and v.requires_grad]\n\n skip = {}\n skip_keywords = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n if hasattr(model, 'no_weight_decay_keywords'):\n skip_keywords = model.no_weight_decay_keywords()\n weight_params = set_weight_decay([(n, v) for n, v in parameters if (\"score\" not in n) and v.requires_grad], skip, skip_keywords)\n opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()\n optimizer, score_optimizer = None, None\n if opt_lower == 'sgd':\n if config.train_weights_at_the_same_time:\n optimizer = optim.SGD(weight_params, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n score_optimizer = optim.Adam(score_params, lr=12e-3, weight_decay=0)\n else:\n optimizer = optim.SGD(weight_params, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n elif opt_lower == 'adamw':\n if config.train_weights_at_the_same_time:\n optimizer = optim.AdamW(weight_params, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n score_optimizer = optim.Adam(score_params, lr=12e-3, weight_decay=0)\n else:\n optimizer = optim.AdamW(weight_params, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,\n lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)\n return optimizer, score_optimizer, weight_params\n\n\ndef set_weight_decay(weight_params, skip_list=(), skip_keywords=()):\n has_decay = []\n no_decay = []\n\n for name, param in weight_params:\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or (name in skip_list) or \\\n check_keywords_in_name(name, skip_keywords):\n no_decay.append(param)\n # print(f\"{name} has no weight decay\")\n # if len(param.shape) == 1:\n # print(\"1\")\n # elif name.endswith(\".bias\"):\n # print(\"2\")\n # elif (name in skip_list):\n # print(\"3\")\n # elif check_keywords_in_name(name, skip_keywords):\n # print(\"4\")\n else:\n has_decay.append(param)\n return [{'params': has_decay},\n {'params': no_decay, 'weight_decay': 0.}]\n\n\ndef check_keywords_in_name(name, keywords=()):\n isin = False\n for keyword in keywords:\n if keyword in name:\n isin = True\n return isin\n","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"249869213","text":"# coding: utf-8\n# Copyright (c) 2018-present, Qikun Lu..\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n'''Featurn engineer function\nEnvironment\n python 3.6\n matplotlib2.0.2\n numpy 1.12.1\n seaborn 0.7.1\n scipy 0.19.1\n scikit-learn 0.19.0\n lightgbm 2.1.2\n py-xgboost 0.60\n\nThis is for finishing feature engineer.\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['font.family'] = 'sans-serif' # 解决负号是方块\nfrom scipy import stats\nfrom scipy.stats import norm, skew\nfrom scipy.special import boxcox1p\n\nfrom sklearn.preprocessing import LabelEncoder\n\n\nclass Abnormal_value_handle():\n '''\n 异常值处理\n '''\n def plot_abnormalvalue(self, dataset_used1):\n fig, ax = plt.subplots()\n ax.scatter(x=dataset_used1.ix[:, 'size_house_edit1'], y=dataset_used1.ix[:, 'smeter_price_edit1'])\n plt.ylabel('SalePrice', fontsize=13)\n plt.xlabel('GrLivArea', fontsize=13)\n plt.show()\n\n return None\n\n def handle_abnormalvalue(self, dataset_used1):\n '''\n 删除离群点\n :param dataset_used1: pd.DataFrame\n :return: pd.DataFrame\n '''\n dataset_used1 = dataset_used1.drop(dataset_used1[(dataset_used1['size_house_edit1'] > 1900) & (\n dataset_used1['smeter_price_edit1'] < 1250000)].index)\n fig, ax = plt.subplots()\n ax.scatter(x=dataset_used1.ix[:, 'size_house_edit1'], y=dataset_used1.ix[:, 'smeter_price_edit1'])\n plt.ylabel('SalePrice', fontsize=13)\n plt.xlabel('GrLivArea', fontsize=13)\n plt.show()\n\n return dataset_used1\n\n\nclass Pred_feature_handle():\n '''\n 目标变量处理\n '''\n def goalvalue_check_normal(self, dataset_used1):\n '''\n 目标变量处理——满足整体分布\n 目标值处理:线性的模型需要正态分布的目标值才能发挥最大的作用。\n 我们需要检测房价什么时候偏离正态分布。使用probplot函数,即正态概率图:\n :param dataset_used1:\n :return: None\n '''\n # 绘制正态分布图\n fig5 = plt.figure(figsize=(6, 6))\n sns.distplot(dataset_used1['smeter_price_edit1'], fit=norm)\n # 正态分布拟合\n (mu, sigma) = norm.fit(dataset_used1['smeter_price_edit1'])\n print('\\n mu = {:.2f} and sigma = {:.2f}\\n'.format(mu, sigma))\n\n # 绘制QQ图 看是否与理论的一致\n fig5 = plt.figure(figsize=(6, 6))\n\n # 绘图\n plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)],\n loc='best')\n plt.ylabel('Frequency')\n plt.title('SalePrice distribution')\n\n # 原始数据分布绘图\n res = stats.probplot(dataset_used1['smeter_price_edit1'], plot=plt)\n plt.show()\n\n return None\n\n def goalvalue_handle_normal(self, dataset_used1):\n '''\n 目标变量不满足正态分布情况下的变换处理\n :param dataset_used1: pd.DataFrame\n :return dataset_used1: pd.DataFrame\n '''\n # 使用log1p函数完成log(1+x)变换\n dataset_used1['smeter_price_edit1'] = np.log1p(dataset_used1['smeter_price_edit1'])\n\n # 正态分布拟合\n (mu, sigma) = norm.fit(dataset_used1['smeter_price_edit1'])\n fig6 = plt.figure(figsize=(6, 6))\n\n # 绘图\n plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)],\n loc='best')\n plt.ylabel('Frequency')\n plt.title('SalePrice distribution')\n\n # log变换之后的数据分布绘图\n res = stats.probplot(dataset_used1['smeter_price_edit1'], plot=plt)\n plt.show()\n\n return dataset_used1\n\n\nclass Missing_value_handle():\n '''\n 缺失值处理\n '''\n def missingvalue_check(self, dataset_used1):\n '''\n 检查各个列变量中的缺失值情况。\n 由观察得知缺失变量\n unit_house\n years_house_year_edit1,num\n years_house_type_edit1\n type_house_edit1\n direction_edit1\n decoration_edit1\n elevator_edit1\n watch_time_edit1,num\n submit_period_edit1,num\n interests_house_edit1,num\n :param dataset_used1: pd.DataFrame\n :return: None\n '''\n dataset_used1.info()\n\n # Percent missing data by feature-连续(-1)\n all_data_na3 = (dataset_used1[dataset_used1 == -1].sum() / len(dataset_used1)) * 100\n all_data_na3 = all_data_na3.drop(all_data_na3[all_data_na3 == 0].index).sort_values(ascending=False)[:30]\n f, ax = plt.subplots(figsize=(8, 8))\n plt.xticks(rotation='90')\n sns.barplot(x=all_data_na3.index, y=all_data_na3)\n plt.xlabel('Features', fontsize=15)\n plt.ylabel('Percent of missing values', fontsize=15)\n plt.title('Percent missing data by feature', fontsize=15)\n\n # Percent missing data by feature-类别\n temp1 = dataset_used1.dtypes\n temp2 = temp1[temp1 == 'object'].index\n temp3 = dataset_used1[temp2] == 'nodata'\n all_data_na2 = (temp3.sum() / len(dataset_used1[temp2])) * 100\n all_data_na2 = all_data_na2.drop(all_data_na2[all_data_na2 == 0].index).sort_values(ascending=False)[:30]\n f, ax = plt.subplots(figsize=(8, 8))\n plt.xticks(rotation='90')\n sns.barplot(x=all_data_na2.index, y=all_data_na2)\n plt.xlabel('Features', fontsize=15)\n plt.ylabel('Percent of missing values', fontsize=15)\n plt.title('Percent missing data by feature', fontsize=15)\n\n # Percent missing data by feature-连续(0)\n all_data_na = (dataset_used1.isnull().sum() / len(dataset_used1)) * 100\n all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]\n f, ax = plt.subplots(figsize=(8, 8))\n plt.xticks(rotation='90')\n sns.barplot(x=all_data_na.index, y=all_data_na)\n plt.xlabel('Features', fontsize=15)\n plt.ylabel('Percent of missing values', fontsize=15)\n plt.title('Percent missing data by feature', fontsize=15)\n return None\n\n def missingvalue_handle(self, dataset_used1):\n '''\n 处理缺失值\n :param dataset_used1: pd.DataFrame\n :return dataset_used1: pd.DataFrame\n '''\n dataset_used1['unit_house'] = dataset_used1['unit_house'].fillna('nodata')\n dataset_used1['years_house_year_edit1'] = dataset_used1['years_house_year_edit1'].fillna(0)\n # 关于年份的处理,将其处理成距今2018的连续型年值\n dataset_used1['years_house_year_edit2'] = dataset_used1['years_house_year_edit1'].apply(\n lambda x: 0.0 if x == 0.0 else (2018.0 - x))\n\n return dataset_used1\n\n\nclass Other_feature_engineer():\n '''\n 其他特征工程\n '''\n def feature_eg_other(self, dataset_used1, threshold=0.75):\n # 1、有许多特征实际上是类别型的特征,但给出来的是数字,所以需要将其转换成类别型。\n dataset_used1['years_house_year_edit1'] = dataset_used1['years_house_year_edit1'].astype(int).apply(str)\n\n # 2、接下来 LabelEncoder,对部分类别的特征进行编号。\n temp1 =dataset_used1.dtypes\n temp2 = temp1[temp1=='object'].index\n # 使用LabelEncoder做变换\n for c in temp2:\n lbl = LabelEncoder()\n lbl.fit(list(dataset_used1[c].unique()))\n dataset_used1[c] = lbl.transform(list(dataset_used1[c].values))\n\n # 3、检查变量的正态分布情况\n ###### 检查\n total_price = dataset_used1['total_price']\n dataset_used1.drop('total_price', axis=1, inplace=True)\n numeric_feats = ['size_house_edit1', 'watch_time_edit1', 'interests_house_edit1', 'submit_period_edit1']\n\n # 对所有数值型的特征都计算skew,即计算一下偏度\n skewed_feats = dataset_used1[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)\n print(\"\\nSkew in numerical features: \\n\")\n skewness = pd.DataFrame({'Skew' :skewed_feats})\n skewness.head()\n\n ###### 变换处理\n skewness = skewness[abs(skewness) > threshold] # 关于临界值,如何定,不知??\n print(\"总共有 {} 数值型的特征做变换\".format(skewness.shape[0]))\n\n skewed_features = skewness.index\n lam = 0.15\n for feat in skewed_features:\n #all_data[feat] += 1\n dataset_used1[feat] = boxcox1p(dataset_used1[feat], lam)\n\n ###### 哑变量处理\n temp1 = dataset_used1.dtypes\n temp2 = temp1[temp1 == 'int64'].index\n for name in temp2:\n dataset_used1[name] = dataset_used1[name].astype(str)\n\n temp2_2 = temp1[temp1 == 'float64'].index\n for name in temp2_2:\n dataset_used1[name] = dataset_used1[name].astype(float)\n\n temp_ds_use1 = dataset_used1.drop(['community_house', 'years_house_year_edit1'], axis=1)\n all_usedata = pd.get_dummies(temp_ds_use1)\n useful_dataset = all_usedata.sample(frac=0.1, random_state=123)\n\n all_usedata = None\n del all_usedata, temp_ds_use1, dataset_used1\n\n return useful_dataset\n\n\n\n\n","sub_path":"data_science_lianjia/feature_engineer.py","file_name":"feature_engineer.py","file_ext":"py","file_size_in_byte":9950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74479330","text":"# -*- coding: utf-8 -*-\nimport os,time\nimport fasttext.FastText as fasttext\nfrom test1 import evaluate_line\nfrom pprint import pprint\nfrom intention import get_intention\nfrom collections import OrderedDict\nimport tensorflow as tf\n\ndir_path = os.getcwd()\n\ncla_model = os.path.join('model','data_dim100_lr05_iter5.model')\n\ncws_model_path = os.path.join(dir_path,'model','cws.model') # 分词模型路径,模型名称为`cws.model`\n\npos_model_path = os.path.join(dir_path,'model','pos.model') # 词性标注模型路径,模型名称为`pos.model`\n\nner_model_path = os.path.join(dir_path,'model','ckpt') # 实体识别模型路径,模型名称为`ner.model`\nNer = evaluate_line(ner_model_path) #加载实体识别模型\nclassifier = fasttext.load_model(cla_model) #加载文本分类模型\n\nfrom pyltp import Segmentor, Postagger\n\n\nclass PyltpTool:\n def __init__(self,sentence):\n self.sentence = sentence\n\n self.segmentor = Segmentor() # 初始化实例\n self.segmentor.load(cws_model_path) # 加载模型\n self.postagger = Postagger()\n self.postagger.load(pos_model_path)\n self.seg_sentences = []\n self.pos_tags = []\n self.ner_sentence = []\n self.id = 0\n self.a = []\n\n def get_seg_sentences(self):\n seg_sentence = self.segmentor.segment(self.sentence)\n self.seg_sentences.extend(list(seg_sentence))\n self.a.extend([i[0] for i in classifier.predict(' '.join(seg_sentence))])\n\n def get_pos_tags(self):\n # for seg_sentence in self.seg_sentences:\n pos_tag = self.postagger.postag(list(self.seg_sentences))\n self.pos_tags.extend(list(pos_tag))\n # print(self.pos_tags)\n\n\n\n def ner(self):\n ner_content = Ner.pridict(self.sentence)\n for i in range(len(self.seg_sentences)):\n s=OrderedDict()\n s['cont'] = self.seg_sentences[i]\n s['pos'] = self.pos_tags[i]\n if i!=0:\n self.id+=len(self.seg_sentences[i-1])\n if ner_content[0][self.id]=='O':\n s['ner'] = ner_content[0][self.id]\n else:\n s['ner'] = ner_content[0][self.id].split('-')[1]\n s['id'] = self.id\n self.a.append(s)\n return self.a\n\n\n def realease(self):\n self.segmentor.release()\n self.postagger.release()\n\nif __name__=='__main__':\n # 输入存放一行一行句子的文件,分别输出分词、词性标注和依存分析三个文件。\n while True:\n text = input('请输入:')\n # text = '刘伟,帮我打开厨房空调。'\n t1 = time.time()\n ctb6_pyltp_tool = PyltpTool(text)\n ctb6_pyltp_tool.get_seg_sentences()\n ctb6_pyltp_tool.get_pos_tags()\n ctb6_pyltp_tool.realease()\n ltp_result = ctb6_pyltp_tool.ner()\n\n # pprint(ltp_result)\n Intention = get_intention()\n intention_result, summary = Intention.extract_intention(ltp_result)\n print('意图识别的结果:\\n')\n pprint(intention_result)\n print('具体的分词信息\\n')\n pprint(summary)\n print('{}秒'.format(time.time()-t1))\n\n\n # if not text:\n # break\n","sub_path":"lltp.py","file_name":"lltp.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"312191","text":"import pygame as pg\r\n\r\n'''REQUIRED FUNCTIONS'''\r\ndef visuals():\r\n\twindow.fill(WHITE)\r\n\tpg.draw.rect(window, LIGHT_GREY, player1)\r\n\tpg.draw.rect(window, LIGHT_GREY, player2)\r\n\tpg.draw.ellipse(window, LIGHT_GREY, ball)\r\n\tpg.draw.aaline(window, LIGHT_GREY,(w/2,0), (w/2,h))\r\n\t\r\ndef player1_animation():\r\n\tplayer1.y += player_speed\r\n\tif player1.top <= 0:\r\n\t\tplayer1.top = 0\r\n\tif player1.bottom > h:\r\n\t\tplayer1.bottom = h\r\n\r\ndef player2_ai():\r\n\tif player2.top < ball.y:\r\n\t\tplayer2.top += player2_speed\r\n\tif player2.top > ball.y:\r\n\t\tplayer2.top -= player2_speed\r\n\tif player2.bottom > h:\r\n\t\tplayer2.bottom = h\r\n\r\ndef ball_animation():\r\n\tglobal ball_speed_x, ball_speed_y\r\n\tball.x += ball_speed_x\r\n\tball.y += ball_speed_y\r\n\t\r\n\tif ball.top < 0 or ball.bottom >= h:\r\n\t\tball_speed_y *= -1\r\n\tif ball.left <= 0 or ball.right >= w:\r\n\t\tball_speed_x *= -1\r\n\r\n\tif ball.colliderect(player1) or ball.colliderect(player2):\r\n\t\tball_speed_x *= -1\r\n\r\n# GAME INITIALIZATION\r\npg.init()\r\n\r\n# CONFIGS\r\nw, h = 600, 400\r\nWHITE = (255, 255, 255)\r\nLIGHT_GREY = (200, 200, 200)\r\n\r\n# SCREEN\r\nwindow = pg.display.set_mode((w, h))\r\npg.display.set_caption('Ping-pong')\r\npg.display.set_icon(pg.image.load(\"ping-pong.jpg\"))\r\n\r\n# SPRITES\r\nball = pg.Rect(w/2 - 15,h/2 - 15,30,30)\r\nplayer1 = pg.Rect(w - 20,h/2 - 70,10,140)\r\nplayer2 = pg.Rect(10, h/2 - 70,10,140)\r\n\r\n# SPEEDS\r\nball_speed_x = 5\r\nball_speed_y = 5\r\nplayer_speed = 0\r\nplayer2_speed = 7\r\n\r\n# VARS\r\ngame = True\r\nclock = pg.time.Clock()\r\nFPS = 60\r\n\r\n# MAIN LOOP\r\nwhile game:\r\n\t# HANDLING INPUT\r\n\tfor event in pg.event.get():\r\n\t\tif event.type == pg.QUIT:\r\n\t\t\tgame = False\r\n\r\n\t\t# DOWN\r\n\t\tif event.type == pg.KEYDOWN:\r\n\t\t\tif event.key == pg.K_DOWN:\r\n\t\t\t\tplayer_speed += 7\r\n\t\t\tif event.key == pg.K_UP:\r\n\t\t\t\tplayer_speed -= 7\r\n\t\t\r\n\t\t# UP\r\n\t\tif event.type == pg.KEYUP:\r\n\t\t\tif event.key == pg.K_DOWN:\r\n\t\t\t\tplayer_speed -= 7\r\n\t\t\tif event.key == pg.K_UP:\r\n\t\t\t\tplayer_speed += 7\t\r\n\r\n\t# START\r\n\tball_animation()\r\n\tvisuals()\r\n\tplayer1_animation()\r\n\tplayer2_ai()\r\n\t\r\n\t# UPDATE\r\n\tclock.tick(FPS)\r\n\tpg.display.update()\r\n","sub_path":"Ping-Pong/ping-pong.py","file_name":"ping-pong.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"373763273","text":"from collections import deque\n\ndef brute(pat):\n n = len(pat)\n opts = deque([pat])\n for i in range(n):\n sz = len(opts)\n for _ in range(sz):\n x = opts.popleft()\n if x[i] == '?':\n start = 1 if (i == 0 and n > 1) else 0\n cands = range(start, 10)\n for d in cands:\n opts.append(x[:i] + str(d) + x[i + 1:])\n else:\n opts.append(x)\n\n return [int(x) for x in opts]\n\ndef can_match(exp, pat):\n return len(exp) == len(pat) and all(b == '?' or a == b for a, b in zip(exp, pat))\n\nans = 0\nwords = input().split()[::2]\nwords.sort(key=lambda s: s.count('?'))\nfor ba in brute(words[0]):\n for bb in brute(words[1]):\n exp = ba ^ bb\n if can_match(str(exp), words[2]):\n ans += 1\nif any(word[0] == '0' and len(word) > 1 for word in words):\n ans = 0\nprint(ans)\n","sub_path":"kattis/xorequation.py","file_name":"xorequation.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"49464075","text":"#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\"\"\"\n@author:Wllen\n@file:练习题.py\n@time:2018/8/24 23:45\n\"\"\"\n# 2、编写一个简单的文本处理工具,具备三个任务,一个接收用户输入,一个将用户输入的内容格式化成大写,一个将格式化后的结果存入文件\nfrom threading import Thread\n\nmsg_l = []\nformat_l = []\ndef get():\n while True:\n data = input('>>>>:').strip()\n if not data:continue\n msg_l.append(data)\n\n\ndef upper():\n while True:\n if msg_l:\n res = msg_l.pop()\n format_l.append(res.upper())\n\ndef save():\n while True:\n if format_l:\n with open('threadfile.txt', 'a', encoding='utf-8') as f:\n res = format_l.pop()\n f.write('%s\\n' % res)\n\n\nif __name__ == '__main__':\n t1 = Thread(target=get)\n t2 = Thread(target=upper,)\n t3 = Thread(target=save)\n t1.start()\n t2.start()\n t3.start()\n\n","sub_path":"learning/第七章/13多线程练习题/练习题.py","file_name":"练习题.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"82103578","text":"'''\r\nDate: 8/8/2016\r\nPurpose: Run updated sensitivity analysis for Lancet revision\r\n'''\r\nimport pandas as pd \r\nimport numpy as np \r\nfrom scipy.stats.mstats import gmean\r\nimport sys\r\n\r\nfrom getpass import getuser\r\nif getuser() == 'strUser':\r\n SDG_REPO = \"/homes/strUser/sdg-capstone-paper-2015\"\r\nif getuser() == 'strUser':\r\n SDG_REPO = '/homes/strUser/sdgs/sdg-capstone-paper-2015'\r\nif getuser() == 'strUser':\r\n SDG_REPO = ('/ihme/code/test/strUser/under_development'\r\n '/sdg-capstone-paper-2015')\r\nsys.path.append(SDG_REPO)\r\nimport sdg_utils.draw_files as dw\r\nimport sdg_utils.queries as qry\r\nimport sdg_utils.tests as sdg_test\r\n\r\nindicators_master = '/home/j/WORK/10_gbd/04_journals/gbd2015_capstone_lancet_SDG/02_inputs/indicator_ids.csv'\r\n\r\ndef arithmetic_mean(arr):\r\n return np.mean(arr)\r\n\r\ndef geometric_mean(arr):\r\n return gmean(arr)\r\n\r\ndef take_min(arr):\r\n\treturn np.min(arr)\r\n\r\ndef floor(arr):\r\n\tarr[arr < 0.01] = 0.01\r\n\treturn arr\r\n\r\ndef copyit(arr):\r\n\treturn arr\r\n\r\ndef apply_methods(version, target_method, summary_method):\r\n\t'''\r\n\tversion: int, SDG version\r\n\ttarget_method: function, how to summarize over target\r\n\tsummary_method: function, how to summarize across target summaries\r\n\r\n\treturns dataframe with correct summary methods applied\r\n\t'''\r\n\t# Load data and merge on targets\r\n\tdata = pd.read_csv('/home/j/WORK/10_gbd/04_journals/gbd2015_capstone_lancet_SDG/'\\\r\n\t\t+'04_outputs/indicator_values/indicators_scaled_{}.csv'.format(version))\r\n\tmeta = pd.read_csv(indicators_master)\r\n\tdata = data.merge(meta, on = 'indicator_id', how = 'left')\r\n\tdata = data[['indicator_id','indicator_target','location_id','year_id','mean_val']]\r\n\r\n\t#Apply the floor\r\n\tdata['mean_val'] = floor(data['mean_val'].values)\r\n\r\n\t# Apply summary method by target - geometric or arithmetic mean\r\n\tif target_method != copyit:\r\n\t\tdata = data.groupby(['indicator_target','location_id','year_id'], as_index = False).aggregate(target_method)\r\n\t\r\n\tdata['mean_val'] = floor(data['mean_val'].values)\r\n\r\n\t# Save intermediates\r\n\tif target_method == arithmetic_mean:\r\n\t\ttag = 'arithmetic_mean'\r\n\telif target_method == geometric_mean:\r\n\t\ttag = 'geometric_mean'\r\n\r\n\tif target_method != copyit:\r\n\t\tdata.to_csv('/home/j/WORK/10_gbd/04_journals/gbd2015_capstone_lancet_SDG/'\\\r\n\t\t\t+'04_outputs/sensitivity_analysis/{v}_{t}_intermediates.csv'.format(t=tag, v=version), index = False)\r\n\r\n\tdata.drop('indicator_target', axis = 1, inplace = True)\r\n\r\n\t# Now summarize across all the targets\r\n\tdata = data.groupby(['location_id','year_id'], as_index = False).aggregate(summary_method)\r\n\r\n\tdata = data[['location_id','year_id','mean_val']]\r\n\r\n\treturn data\r\n\r\ndef run_all(version):\r\n\tarith = apply_methods(version, arithmetic_mean, arithmetic_mean)\r\n\tgeom1 = apply_methods(version, geometric_mean, geometric_mean)\r\n\tgeom2 = apply_methods(version, geometric_mean, take_min)\r\n\trawgeom = apply_methods(version, copyit, geometric_mean)\r\n\r\n\ttogether = arith.merge(geom1, on = ['location_id','year_id'], suffixes = ('_arith','_geom'))\\\r\n\t\t\t\t\t.merge(geom2, on = ['location_id','year_id'])\\\r\n\t\t\t\t\t.merge(rawgeom, on = ['location_id','year_id'], suffixes = ('_geom_min','_raw_geom'))\r\n\t#together = together.rename(columns = {'mean_val':'mean_val_geom_min'})\r\n\r\n\t# Generate ranks for each mean\r\n\ttogether = together.sort_values(by = ['year_id','mean_val_arith'], ascending = False)\\\r\n\t\t\t\t\t\t.reset_index(drop=True).reset_index()\r\n\ttogether = together.rename(columns = {'index':'rank_arithmetic_mean'})\r\n\r\n\ttogether = together.sort_values(by = ['year_id','mean_val_geom'], ascending = False)\\\r\n\t\t\t\t\t\t.reset_index(drop=True).reset_index()\r\n\ttogether = together.rename(columns = {'index':'rank_geometric_mean'})\r\n\r\n\ttogether = together.sort_values(by = ['year_id','mean_val_geom_min'], ascending = False)\\\r\n\t\t\t\t\t\t.reset_index(drop=True).reset_index()\r\n\ttogether = together.rename(columns = {'index':'rank_geometric_min'})\r\n\r\n\ttogether = together.sort_values(by = ['year_id','mean_val_raw_geom'], ascending = False)\\\r\n\t\t\t\t\t\t.reset_index(drop=True).reset_index()\r\n\ttogether = together.rename(columns = {'index':'rank_raw_geom'})\r\n\r\n\tfor col in ['rank_arithmetic_mean','rank_geometric_mean','rank_geometric_min','rank_raw_geom']:\r\n\t\ttogether[col] = together[col] + 1 - 192*((2015-together['year_id'])/5)\r\n\r\n\t# Save data\r\n\ttogether.to_csv('/home/j/WORK/10_gbd/04_journals/gbd2015_capstone_lancet_SDG/'\\\r\n\t\t+'04_outputs/sensitivity_analysis/different_means_{}.csv'.format(version), index = False)\r\n\r\nif __name__ == '__main__':\r\n\tsdg_vers = sys.argv[1] # argparse is only useful for more complicated things\r\n\trun_all(sdg_vers)","sub_path":"sdg/indicator_calculations/sensitivity_update.py","file_name":"sensitivity_update.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"314202650","text":"\nfrom urllib.request import urlopen\n#The urllib.request module defines functions and classes which \n#help in opening URLs (mostly HTTP) in a complex world — \n#basic and digest authentication, redirections, cookies and more.\nfrom bs4 import BeautifulSoup\n\n\ndef fetching_url():\n\n\thtml = urlopen(\"http://shakespeare.mit.edu/lll/full.html\")\n\tbsobj = BeautifulSoup(html.read(), \"html.parser\")\n\t# print(bsobj.h3)\n\n\t# .get_text() strips all tags from the document you are working\n\t# with and returns a string containing the text only.\n\t# Calling .get_text() should always be the last thing you do, immedi‐\n\t# ately before you print, store, or manipulate your final data.\n \n\th3 = bsobj.findAll(\"h3\")\n\tfor tag in h3:\n\t\tprint(tag.get_text())\n\n\n\nfetching_url()","sub_path":"selecting_header.py","file_name":"selecting_header.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"229592563","text":"#!/usr/bin/env python\n\nimport rospy\nfrom gazebo_msgs.srv import GetModelState\nfrom geometry_msgs.msg import Pose\n\n\ndef talker():\n pub = rospy.Publisher('person_pose', Pose, queue_size=10)\n rospy.init_node('person_publisher', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n serviceTag = '/gazebo/get_model_state'\n rospy.wait_for_service(serviceTag)\n model_coordinates = rospy.ServiceProxy(serviceTag, GetModelState)\n while not rospy.is_shutdown():\n resp_coordinates = model_coordinates('person', 'world')\n rospy.loginfo('publishing')\n pub.publish(resp_coordinates.pose)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"hri_arvr/scripts/human_publisher.py","file_name":"human_publisher.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"399058552","text":"import kfp.dsl as dsl\nfrom kfp import components\nfrom kubernetes import client as k8s_client\nimport json\n\nfrom random import randint\n\ndkube_preprocess_op = components.load_component_from_file(\"../components/preprocess/component.yaml\")\ndkube_training_op = components.load_component_from_file(\"../components/training/component.yaml\")\ndkube_serving_op = components.load_component_from_file(\"../components/serving/component.yaml\")\ndkube_viewer_op = components.load_component_from_file('../components/viewer/component.yaml')\n\n@dsl.pipeline(\n name='dkube-face-detection-pl',\n description='face detection pipeline with dkube components'\n)\ndef d3pipeline(\n auth_token,\n training_container=json.dumps({'image':'docker.io/ocdr/dkube-datascience-tf-cpu:v1.14', 'username':'', 'password': ''}),\n training_program=\"facedetection\",\n preprocess_program=\"facedetection\",\n training_script=\"python model.py\",\n training_datasets=json.dumps([\"faces\"]),\n training_gpus=0,\n preprocess_script=\"python preprocessing.py\",\n serving_device='cpu'):\n\n preprocess = dkube_preprocess_op(auth_token,'binfaces', training_container,\n program=preprocess_program, run_script=preprocess_script,\n datasets=training_datasets)\n\n train = dkube_training_op(auth_token, training_container,\n program=training_program, run_script=training_script,\n datasets=training_datasets, ngpus=training_gpus).after(preprocess)\n # serving = dkube_servinMODEL_DIR = os.getenv('DKUBE_JOB_OUTPUT_S3', None)\n\nif __name__ == '__main__':\n import kfp.compiler as compiler\n compiler.Compiler().compile(d3pipeline, 'face_detection.tar.gz')","sub_path":"pipeline/src/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"506673232","text":"#!/usr/bin/env python3\n###############################################################################\n#_TITLE isis3_to_pds4_LOLA_pysis.py\n#\n#_ARGS\n# input.cub\n# output_config.txt\n#\n#_REQUIRES\n# Python 3.x\n# pysis library: https://github.com/wtolson/pysis (for install see page)\n# --recommended Anaconda Python 3.x environment w/ gdal, once installed:\n# $ conda install -c conda-forge gdal \n#\n#_DESC\n# Create a configuration for a pds4 conversion using gdal_translate.\n# Requires an existing PDS4 XML template with known variables.\n# The default PDS4 template for GDAL is available in GDAL_DATA\n# https://trac.osgeo.org/gdal/wiki/FAQInstallationAndBuilding#WhatisGDAL_DATAenvironmentvariable\n#\n#_HIST\n# Oct 18 2017 - Trent Hare (thare@usgs.gov) - original version\n#\n#_LICENSE\n# Public domain (unlicense)\n# \n#_END\n##############################################################################\nimport sys, os\n#import pysis\nfrom pysis.isis import getkey\nimport gdal\n\ndef Usage(theApp):\n print( 'Usage:\\npython {} input.cub output.config'.format(theApp))\n print( ' optional: to run gdal_translate send: -run')\n print( ' optional: to set a PDS4 XML template send: -template my.xml')\n print( ' by default the template used is from $GDALDATA/pds4_template.xml')\n print( '\\nUsage: {} -run -template myTemplate.xml in.cub out.config'.format(theApp))\n print( 'Note: Requires an existing PDS4 XML template with known variables.\\n')\n sys.exit(1)\n\ndef EQUAL(a, b):\n return a.lower() == b.lower()\n\n#/************************************************************************/\n#/* main() */\n#/************************************************************************/\ndef main( argv = None ):\n\n inputlbl = None\n outputConfig = None\n run = None\n template = None\n\n if argv is None:\n argv = sys.argv\n\n argv = gdal.GeneralCmdLineProcessor( argv )\n if argv is None:\n return 1\n\n nArgc = len(argv)\n\n#/* -------------------------------------------------------------------- */\n#/* Parse arguments. */\n#/* -------------------------------------------------------------------- */\n i = 1\n while i < nArgc:\n\n if EQUAL(argv[i], '-run'):\n run = True\n elif EQUAL(argv[i], '-template'):\n i = i + 1\n template = argv[i]\n elif inputlbl is None:\n inputlbl = argv[i]\n elif outputConfig is None:\n outputConfig = argv[i]\n else:\n return Usage(argv[0])\n i = i + 1\n\n if inputlbl is None:\n return Usage(argv[0])\n if outputConfig is None:\n return Usage(argv[0])\n if template is None:\n template = 'pds4_template.xml'\n\n #open output config file\n fileConfig = open(outputConfig, 'w')\n print('writing {}'.format(outputConfig))\n\n #Write first comment line\n theLine = '#{0} {1} {2}\\n'.format(sys.argv[0], sys.argv[1], sys.argv[2])\n fileConfig.write(theLine)\n\n #Next lines are not available in ISIS3 label\n theLine = '-co VAR_TARGET_TYPE=Satellite\\n'\n fileConfig.write(theLine)\n\n theLine = '-co VAR_INVESTIGATION_AREA_LID_REFERENCE=\"urn:nasa:pds:context:instrument_host:spacecraft.lro\"\\n'\n fileConfig.write(theLine)\n\n try:\n target = getkey(from_=inputlbl, keyword='TargetName', grp='Mapping')\n theLine = '-co VAR_TARGET={}'.format(target)\n fileConfig.write(theLine)\n except KeyError:\n print('No Target in ISIS3 Label')\n\n try:\n mission = getkey(from_=inputlbl, keyword='InstrumentHostName', grp='Archive')\n theLine = '-co VAR_INVESTIGATION_AREA_NAME=\"{}\"\\n'.format(mission.rstrip())\n fileConfig.write(theLine)\n except KeyError:\n print('No InstrumentHostName in ISIS3 Label')\n\n try:\n dataSetID = getkey(from_=inputlbl, keyword='DataSetId', grp='Archive')\n theLine = '-co VAR_LOGICAL_IDENTIFIER={}'.format(dataSetID)\n fileConfig.write(theLine)\n except KeyError:\n print('No DataSetId in ISIS3 Label')\n\n try:\n observeID = getkey(from_=inputlbl, keyword='InstrumentId', grp='Archive')\n theLine = '-co VAR_OBSERVING_SYSTEM_NAME={}'.format(observeID)\n fileConfig.write(theLine)\n except KeyError:\n print('No InstrumentId in ISIS3 Label')\n\n try:\n fileName = getkey(from_=inputlbl, keyword='ProductId', grp='Archive')\n theLine = '-co VAR_TITLE={}'.format(fileName)\n fileConfig.write(theLine)\n except KeyError:\n print('No ProductId in ISIS3 Label')\n\n fileConfig.close()\n\n #write out helper line for gdal - can run from here too\n outPDS4 = inputlbl.replace('.cub','_pds4.xml')\n\n theCmd='gdal_translate -of PDS4 -co IMAGE_FORMAT=GEOTIFF -co TEMPLATE={0} --optfile {1} {2} {3}'.format(template, outputConfig, inputlbl, outPDS4)\n\n if run is None:\n print('\\nRecommended gdal run:')\n print('{}\\n'.format(theCmd))\n else: #run gdal\n os.system(theCmd)\n\n\nif __name__ == '__main__':\n #version_num = int(gdal.VersionInfo('VERSION_NUM'))\n #if version_num < 2200: # because of PDS4 support\n # print('ERROR: Python bindings of GDAL 2.2.0 or later required')\n # sys.exit(1)\n\n sys.exit(main(sys.argv))\n\n","sub_path":"PDS4gdal/isis3_to_pds4_LOLA_pysis.py","file_name":"isis3_to_pds4_LOLA_pysis.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"569567556","text":"import random\n\n\nclass NQPosition:\n\n def __init__(self, n):\n self.n = n\n self.queens_with_rows = [random.randrange(self.n) for _ in range(self.n)]\n # every queen has it's own column, int in array is row position\n\n def value(self) -> int:\n # calculate number of conflicts (pairs of queens that can capture each other)\n value = 0\n for i, q in enumerate(self.queens_with_rows):\n for i2 in range(i + 1, self.n):\n q2 = self.queens_with_rows[i2]\n # captures horizontally (same row) or captures diagonally\n if q == q2 or abs(q - q2) == abs(i - i2):\n value += 1\n break\n return value\n\n def make_move(self, queen_to_move: int, move: int) -> None:\n self.queens_with_rows[queen_to_move] = move\n\n def best_move(self) -> tuple:\n # find the best move and the value function after making that move\n queen_to_move = 0\n best_move = 0\n value = self.value()\n\n for i, q in enumerate(self.queens_with_rows):\n for move in range(self.n):\n if move != q: # new position not the same\n self.make_move(i, move)\n new_value = self.value()\n if new_value < value: # if better value, remember the move and queen\n queen_to_move = i\n best_move = move\n value = new_value\n self.make_move(i, q) # restore position before move\n return queen_to_move, best_move, value\n\n def print_board(self) -> None:\n board = []\n for row in range(self.n):\n board_row = []\n for q in self.queens_with_rows:\n board_row.append('Q' if q == row else '.')\n board.append(board_row)\n\n print('\\n'.join([''.join(row) for row in board]))\n print()\n\n\ndef hill_climbing(pos: NQPosition):\n curr_value = pos.value()\n while True:\n queen_to_move, move, new_value = pos.best_move()\n if new_value >= curr_value:\n # no improvement, give up\n return pos, curr_value\n else:\n # position improves, keep searching\n curr_value = new_value\n pos.make_move(queen_to_move, move)\n\n\ndef n_queen_problem(n: int):\n best_value = n\n try_nr = 1\n pos = None\n while best_value != 0: # random restart\n pos = NQPosition(n)\n best_pos, best_value = hill_climbing(pos)\n try_nr += 1\n if try_nr > 200:\n print(f\"No solution for N={n} or unable to solve!\")\n break\n print(f\"Restart nr: {try_nr}\")\n print(f\"Final value {best_value}\\n\")\n if best_value == 0:\n pos.print_board()\n\n\nif __name__ == '__main__':\n n_queen_problem(8)\n","sub_path":"hw3/n_queen.py","file_name":"n_queen.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"122508554","text":"#This is my Python Project -- start 12/12/2020\n\n\n# best ways to code/learn --> #1 follow documentations and build your own structure and stuff (best for full-understanding and scaling)\n# #2 go along a video or website and thouroughly investigate their methods along with documentation (what I am doing ) == best for speed and learning\n# #3 go along a video build with them and change parameters/code to explore effects (good for analyzing results and broad concepts)\n# #4 cheat. You get very little out of it but you can still learn about concepts (not good lol)\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, LSTM\nfrom tensorflow.keras.layers import LayerNormalization\nfrom tensorflow.keras.metrics import RootMeanSquaredError\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error, explained_variance_score, r2_score\nfrom tensorflow.keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.preprocessing import MinMaxScaler\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport bs4 as bs\nimport pickle\nimport requests\nimport datetime as dt\nimport os\nimport pandas as pd\nimport pandas_datareader.data as web\nfrom datetime import date\n\n\nimport bs4 as bs\nimport pickle\nimport requests\n\ndef save_tickers():\n resp=requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n soup=bs.BeautifulSoup(resp.text)\n table=soup.find('table',{'class':'wikitable sortable'})\n tickers=[]\n\n for row in table.findAll('tr')[1:]:\n ticker=row.findAll('td')[0].text[:-1]\n tickers.append(ticker)\n\n files = ['TRANS', 'XLF', 'SMH', 'SPY', 'IWM' ]\n for x in files:\n tickers.append(x)\n with open(\"tickers.pickle\",'wb') as f:\n pickle.dump(tickers, f)\n return tickers\n\ntickers=[]\ntickers = save_tickers()\n\nprint(tickers)\ntoday = date.today()\n\ndef fetch_data():\n with open(\"tickers.pickle\",'rb') as f:\n tickers=pickle.load(f)\n\nif not os.path.exists('stock_details'):\n os.makedirs('stock_details')\n count=200\nstart= dt.datetime(2017,1,1)\nend=today\ncount=0\n\nif os.path.getctime('stock_details') == dt.datetime.strptime(today.ctime(), \"%a %b %d %H:%M:%S %Y\"):\n for ticker in tickers:\n count+=1\n print(ticker)\n\n try:\n df=web.DataReader(ticker, 'yahoo', start, end)\n df.to_csv('stock_details/{}.csv'.format(ticker))\n except:\n print(\"Error\")\n continue\n\nfetch_data()\n# transportation Banking Semiconductor\n\n# for f in files:\n# temp = pd.read_csv(f)\n# temp = temp.drop('Date', axis=1)\n# temp = temp.drop(' Volume', axis=1) #put back eventually\n#\n# # print(y_min, coeff)\n# # raise Exception(\"stop\")\n# normalized_temp=(temp-temp.min())/(temp.max()-temp.min())\n# temp = normalized_temp\n# temp['MA'] = temp[' Close/Last'].values[ ::-1]\n# temp['STD'] = temp[' Close/Last'].values[ ::-1]\n# temp['MA'] = temp['MA'].rolling(window=10).mean()\n# temp['STD'] = temp['STD'].rolling(window=10).std()\n# temp['MA'] = temp['MA'].values[ ::-1]\n# temp['STD'] = temp['STD'].values[ ::-1]\n# df = df.join(temp, rsuffix='_'+f)\ndf = None\nfor ticker in tickers:\n if not os.path.exists('../Video #1/stock_details/{}.csv'.format(ticker)):\n continue\n temp = pd.read_csv('../Video #1/stock_details/{}.csv'.format(ticker))\n\n temp = temp.drop('Date', axis=1)\n temp = temp.drop('Volume', axis=1) #put back eventually\n temp = temp.drop('Adj Close', axis=1)\n\n if ticker == 'SPY':\n y_min = temp['Close'].min()\n coeff = temp['Close'].max() - temp['Close'].min()\n #NORMALIZE male nail polish\n normalized_temp=(temp-temp.min())/(temp.max()-temp.min())\n temp = normalized_temp\n temp['MA'] = temp['Close'].values[ ::-1]\n temp['STD'] = temp['Close'].values[ ::-1]\n temp['MA'] = temp['MA'].rolling(window=10).mean()\n temp['STD'] = temp['STD'].rolling(window=10).std()\n temp['MA'] = temp['MA'].values[ ::-1]\n temp['STD'] = temp['STD'].values[ ::-1]\n if df is not None:\n df = df.join(temp, rsuffix='_'+ticker+'.csv')\n else:\n df = temp\n\n#2. Do classifier dataset(SPY)\ndf['spy'] = df['Close_SPY.csv']\n\ndf['spy'] = df['spy'].shift(1) #in order to train we need to offset by 1 so it can \"see\" the next day's closing / opening in future implem.\ndf = df.dropna() #drop any NaN values\n\nX = df.drop('spy',axis=1)\nY = df['spy'].shift(1)\n# y_min = Y.min()\n# coeff = Y.max() - Y.min()\n# normalized_temp=(Y-y_min)/coeff # ( Y.max() - Y.min()) + Y.min()\n# Y = normalized_temp\n# print(X)\n# print(Y)\n#timeInput, batch, epoch\n# timeInputs = [1, 2, 3, 4, 5, 6, 7, 8, 10, 20, 25]\n# batchs = [1, 5, 10, 50, 100, 500, 1000]\n# epochs = [ 1, 5, 10, 20, 50, 100, 500, 1000]\n\n\ntimeInputs = [5,7 ]\nbatchs = [1000]\nepochs = [500]\nmax_R2S = -1\nmax_XVS = -1\nbest = [-1,-1,-1]\nboo = []\nY_train = []\nx_vec = X.to_numpy()\ny_vec = Y.to_numpy()\n\n\n\nfor timeNum in timeInputs:\n boo.clear()\n Y_train.clear()\n\n for i in range(timeNum,len(X)):\n boo.append(x_vec[i-timeNum:i,:])\n Y_train.append(y_vec[i])\n b, y = np.array(boo), np.array(Y_train)\n ################### ML MODEL CREATION ############################################\n X_train, X_test, y_train, y_test = train_test_split(b, y, test_size=0.2)\n del b, y\n\n #sets the data to 3d for LSTM model\n # boo = []\n # Y_train = []\n # x_vec = X_train.to_numpy()\n # y_vec = y_train.to_numpy()\n # for i in range(10,len(X_train)):\n # boo.append(x_vec[i-10:i,:])\n # Y_train.append(y_vec[i])\n # b, y_train = np.array(boo), np.array(Y_train)\n #b = b[:,np.newaxis, : ] # [datarows, time_interval, features]\n print(X_train.shape)\n print(y_train.shape)\n\n\n\n\n #Okay so now I have my data set in 3d witha time interval of 1 (day).\n\n # #Create the LSTM RNN Model\n def model():\n mod=Sequential()\n mod.add(LSTM(units = 64, dropout = 0.3, return_sequences=True, input_shape = (timeNum, 3042)))\n mod.add(LayerNormalization())\n mod.add(LSTM(units = 64))\n mod.add(LayerNormalization())\n mod.add((Dense(64, activation='tanh')))\n mod.add(Dropout(0.4))\n mod.add(Dense(1, activation='linear'))\n\n mod.compile(loss='mean_squared_error', optimizer='adam', metrics=[RootMeanSquaredError(),'mean_squared_error'])\n mod.summary()\n return mod\n\n # #Create the ANN Model\n def ann_model():\n mod=Sequential()\n mod.add(Dense(32, kernel_initializer='normal',input_dim = 24, activation='relu'))\n mod.add(Dense(64, kernel_initializer='normal',activation='relu'))\n mod.add(Dropout(0.4))\n mod.add(Dense(128, kernel_initializer='normal',activation='relu'))\n mod.add(Dropout(0.4))\n mod.add(Dense(256, kernel_initializer='normal',activation='relu'))\n mod.add(Dropout(0.4))\n mod.add(Dense(1, kernel_initializer='normal',activation='linear')) # needed for it to quantify the number for output\n\n mod.compile(loss='mean_absolute_error', optimizer='adam', metrics=['accuracy','mean_absolute_error'])\n mod.summary()\n return mod\n\n for epochNum in epochs:\n for batchNum in batchs:\n # regressor = KerasRegressor(build_fn=ann_model, batch_size=4,epochs=500)\n # callback=tf.keras.callbacks.ModelCheckpoint(filepath='regression_model',\n # monitor='mean_absolute_error',\n # verbose=1,\n # save_best_only=True,\n # save_weights_only=False,\n # mode='auto')\n # results=regressor.fit(X_train,y_train,callbacks=[callback])\n\n RNN_model = model()\n callback=tf.keras.callbacks.ModelCheckpoint(filepath='./RNN_model.h5',\n monitor= 'mean_squared_error',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n save_freq='epoch')\n RNN_model.fit(X_train, y_train, epochs = epochNum , batch_size = batchNum,callbacks=[callback])\n #RNN_model = tf.keras.models.load_model('./RNN_model.h5')\n\n #############################################\n # # testing # #\n #############################################\n\n y_pred = []\n y_valu = []\n y_pred = RNN_model.predict(X_test)\n y_pred = (y_pred * coeff) + y_min #reverse the normalization\n\n y_valu = y_test\n y_valu = (y_valu * coeff) + y_min\n\n print(y_pred)\n print(y_valu)\n\n XVS = explained_variance_score(y_valu, y_pred)\n R2S = r2_score(y_valu, y_pred)\n\n if max_R2S < R2S:\n max_R2S = R2S\n max_XVS = XVS\n best = [timeNum, epochNum, batchNum]\n print(\"ran BEST: \")\n print(best)\n print(max_R2S, max_XVS)\n\n print(best)\n print(max_R2S, max_XVS)\n\n print(\" _______________________________________\")\n print(\" ____________ Parameters : t, E, B ____\")\n print(\" _______________________________________\")\n\n print(timeNum, epochNum, batchNum)\n\n\n # plotting the line 1 points\n\n print(best)\n print(max_R2S, max_XVS)\n # plt.plot(np.arange(0, 45), y_spy, label = \"prediction\")\n #\n # # plotting the line 2 points\n # plt.plot(np.arange(0, 45), y_test, label = \"test\")\n # plt.xlim(0,100)\n # # naming the x axis\n # plt.xlabel('x - axis')\n # # naming the y axis\n # plt.ylabel('y - axis')\n # # giving a title to my graph\n # plt.title('Two lines on same graph!')\n #\n # # show a legend on the plot\n # plt.legend()\n\n # function to show the plot\n #plt.show()\n","sub_path":"stonk_bot.py","file_name":"stonk_bot.py","file_ext":"py","file_size_in_byte":10731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"399291080","text":"import json\nimport unittest\n\nfrom SensuOutputBuilder import SensuOutputBuilder\n\nclass TestSequenceFunctions(unittest.TestCase):\n \"\"\"\n Test the SensuOutputBuilder\n \"\"\"\n\n def setUp(self):\n\n self.sensu_healthy_job_output = {'status' : 'running'}\n self.sensu_healthy_json_output='{\"status\": 0, \"name\": \"test job\", \"handlers\": [\"default\"], \"monit data status\": \"running\", \"subscribers\": \"cfmonit\", \"output\": \"Monit Job test job is running\", \"type\": \"monit\"}'\n self.sensu_unhealthy_job_output = {'status' : 'not monitored'}\n self.sensu_pending_stop_action_job_output = {'status' : 'stop pending'}\n self.sensu_pending_start_action_job_output = {'status' : 'start pending'}\n self.sensu_pending_restart_action_job_output = {'status' : 'restart pending'}\n\n def test_sensu_output_contains_the_correct_data(self):\n \"\"\"\n Make sure the basic parser returns the correct JSON\n \"\"\"\n sensu_output_healthy = SensuOutputBuilder().create_output_for_job('test job', self.sensu_healthy_job_output)\n self.assertEqual(sensu_output_healthy, self.sensu_healthy_json_output)\n\n def test_sensu_output_contains_all_required_fields(self):\n \"\"\"\n Make sure the summary parser returns the correct number of jobs.\n \"\"\"\n sensu_output_healthy = SensuOutputBuilder().create_output_for_job('test job', self.sensu_healthy_job_output)\n sensu_output_healthy_json = json.loads(sensu_output_healthy)\n self.assertTrue('type' in sensu_output_healthy_json)\n self.assertTrue('output' in sensu_output_healthy_json)\n self.assertTrue('handlers' in sensu_output_healthy_json)\n self.assertTrue('subscribers' in sensu_output_healthy_json)\n self.assertTrue('name' in sensu_output_healthy_json)\n self.assertTrue('status' in sensu_output_healthy_json)\n\n def test_sensu_output_returns_heathly_status_for_healthy_jobs(self):\n \"\"\"\n Make sure the ouput builder uses the correct status code for a healthy job\n \"\"\"\n sensu_output_healthy = SensuOutputBuilder().create_output_for_job('test job', self.sensu_healthy_job_output)\n sensu_output_healthy_json = json.loads(sensu_output_healthy)\n self.assertTrue('status' in sensu_output_healthy_json)\n self.assertEqual(sensu_output_healthy_json['status'], 0)\n\n def test_sensu_output_returns_error_status_for_failed_jobs(self):\n \"\"\"\n Make sure the ouput builder uses the correct status code for a failed job\n \"\"\"\n sensu_output_dead = SensuOutputBuilder().create_output_for_job('test job', self.sensu_unhealthy_job_output)\n sensu_output_dead_json = json.loads(sensu_output_dead)\n self.assertTrue('status' in sensu_output_dead_json)\n self.assertEqual(sensu_output_dead_json['status'], 2)\n\n def test_sensu_output_returns_warning_status_for_pending_stop_jobs(self):\n \"\"\"\n Make sure the ouput builder uses the correct status code for a pending stop job\n \"\"\"\n sensu_output_pending = SensuOutputBuilder().create_output_for_job('test job', self.sensu_pending_stop_action_job_output)\n sensu_output_pending_json = json.loads(sensu_output_pending)\n self.assertTrue('status' in sensu_output_pending_json)\n self.assertEqual(sensu_output_pending_json['status'], 1)\n\n def test_sensu_output_returns_warning_status_for_pending_start_jobs(self):\n \"\"\"\n Make sure the ouput builder uses the correct status code for a pending start job\n \"\"\"\n sensu_output_pending = SensuOutputBuilder().create_output_for_job('test job', self.sensu_pending_start_action_job_output)\n sensu_output_pending_json = json.loads(sensu_output_pending)\n self.assertTrue('status' in sensu_output_pending_json)\n self.assertEqual(sensu_output_pending_json['status'], 1)\n\n def test_sensu_output_returns_warning_status_for_pending_restart_jobs(self):\n \"\"\"\n Make sure the ouput builder uses the correct status code for a pending restart job\n \"\"\"\n sensu_output_pending = SensuOutputBuilder().create_output_for_job('test job', self.sensu_pending_restart_action_job_output)\n sensu_output_pending_json = json.loads(sensu_output_pending)\n self.assertTrue('status' in sensu_output_pending_json)\n self.assertEqual(sensu_output_pending_json['status'], 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/sensu_client/testSensuOutputBuilder.py","file_name":"testSensuOutputBuilder.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"177822883","text":"# torch\nimport torch\nfrom torch.nn.utils import parameters_to_vector, vector_to_parameters\nfrom torch import from_numpy\n\nimport gym, gym_ToricCode\n# python lib\nimport numpy as np \nimport random\nfrom copy import deepcopy\n# from file \nfrom src.util import action_type\nfrom src.util_actor import generateTransitionParallel, selectActionParallel, computePrioritiesParallel #,selectAction, computePriorities, generateTransition\n\n# Quality of life\nfrom src.nn.torch.NN import NN_11, NN_17\nfrom src.EnvSet import EnvSet\n \n\ndef actor(args):\n \n device = args[\"device\"]\n\n discount_factor = args[\"discount_factor\"]\n\n # local buffer of fixed size to store transitions before sending\n n_step = args[\"n_step\"]\n size_local_memory_buffer = args[\"size_local_memory_buffer\"] + n_step\n local_buffer_T = [None] * size_local_memory_buffer # Transtions\n local_buffer_Q = [None] * size_local_memory_buffer # Q values\n buffer_idx = 0\n n_step_S = [None] * n_step # State\n n_step_A = [None] * n_step # Actions\n n_step_Q = [None] * n_step # Q values\n n_step_R = [0 ] * n_step # Rewards\n n_step_idx = 0 # index\n\n # set network to eval mode\n NN = args[\"model\"]\n if NN == NN_11 or NN == NN_17:\n NN_config = args[\"model_config\"]\n model = NN(NN_config[\"system_size\"], NN_config[\"number_of_actions\"], args[\"device\"])\n else:\n model = NN()\n \n model.to(device)\n model.eval()\n \n # env and env params\n no_envs = args[\"no_envs\"]\n env = gym.make(args[\"env\"], config=args[\"env_config\"])\n envs = EnvSet(env, no_envs)\n size = env.system_size\n\n no_actions = int(env.action_space.high[-1])\n grid_shift = int(env.system_size/2)\n \n epsilon = args[\"epsilon\"]\n \n \n transition_type = np.dtype([('perspective', (np.int, (2,size,size))),\n ('action', action_type),\n ('reward', np.float),\n ('next_perspective', (np.int, (2,size,size))),\n ('terminal',np.bool)])\n\n # startup\n state = envs.resetAll()\n steps_per_episode = np.zeros(no_envs)\n\n\n # Local buffer of fixed size to store transitions before sending.\n size_local_memory_buffer = args[\"size_local_memory_buffer\"] + 1\n local_buffer_T = np.empty((no_envs, size_local_memory_buffer), dtype=transition_type) # Transtions\n local_buffer_A = np.empty((no_envs, size_local_memory_buffer, 4), dtype=np.int) # A values\n local_buffer_Q = np.empty((no_envs, size_local_memory_buffer), dtype=(np.float, 3)) # Q values\n local_buffer_R = np.empty((no_envs, size_local_memory_buffer)) # R values\n buffer_idx = 0\n\n # Get initial network params\n base_comm = args[\"mpi_base_comm\"]\n learner_rank = args[\"mpi_learner_rank\"]\n msg = None\n msg = base_comm.bcast(msg, root=learner_rank)\n \n msg, weights = msg\n if msg != \"weights\":\n weights = None\n\n # load weights\n vector_to_parameters(weights.to(device), model.parameters())\n\n # init counters\n steps_counter = 0\n update_counter = 1\n local_memory_index = 0\n # main loop over training steps\n \n print(\"Actor start loop\")\n while True:\n \n\n steps_per_episode += 1 \n # select action using epsilon greedy policy\n\n action, q_values = selectActionParallel(number_of_actions=no_actions,\n epsilon=epsilon,\n grid_shift=grid_shift,\n toric_size = size,\n state = state,\n model = model,\n device = device)\n next_state, reward, terminal_state, _ = envs.step(action)\n \n transition = generateTransitionParallel(action,\n reward, \n state,\n next_state, \n terminal_state,\n grid_shift,\n transition_type)\n\n local_buffer_T[:, buffer_idx] = transition\n local_buffer_A[:, buffer_idx] = action\n local_buffer_Q[:, buffer_idx] = q_values\n local_buffer_R[:, buffer_idx] = reward\n buffer_idx += 1\n\n \n # If buffer full, send transitions\n if buffer_idx >= size_local_memory_buffer:\n print(\"Actor Sync\")\n # receive new weights\n msg = base_comm.bcast(msg, root=learner_rank)\n msg, weights = msg\n if msg == \"weights\":\n vector_to_parameters(weights.to(device), model.parameters())\n elif msg == \"terminate\":\n break; \n\n priorities = computePrioritiesParallel(local_buffer_A[:,:-1],\n local_buffer_R[:,:-1],\n local_buffer_Q[:,:-1],\n np.roll(local_buffer_Q, -1, axis=1)[:,:-1],\n discount_factor)\n\n to_send = [*zip(local_buffer_T[:,:-1].flatten(), priorities.flatten())]\n\n # send buffer to learner\n base_comm.gather(to_send, root=learner_rank)\n buffer_idx = 0\n\n too_many_steps = steps_per_episode > args[\"max_actions_per_episode\"]\n if np.any(terminal_state) or np.any(too_many_steps):\n \n # Reset terminal envs\n idx = np.argwhere(np.logical_or(terminal_state, too_many_steps)).flatten()\n reset_states = envs.resetTerminalEnvs(idx)\n\n # Reset n_step buffers\n next_state[idx] = reset_states\n steps_per_episode[idx] = 0\n \n state = next_state\n\n\n \n","sub_path":"src/mpi/Actor_mpi.py","file_name":"Actor_mpi.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"137603592","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: sms/base.py\n# Compiled at: 2014-08-01 07:08:33\nimport os\nfrom abc import ABCMeta, abstractmethod\nfrom django.utils import six\nfrom django.template import TemplateDoesNotExist\nfrom mail_factory.mails import BaseMail\n\nclass BaseSMS(BaseMail, six.with_metaclass(ABCMeta)):\n\n def get_template_part(self, part, lang=None):\n templates = []\n localized = os.path.join('sms', self.template_name, lang or self.lang, part)\n templates.append(localized)\n fallback = os.path.join('sms', self.template_name, part)\n templates.append(fallback)\n return templates\n\n def mail_admins(self, attachments=None, from_email=None):\n raise NotImplementedError()\n\n def create_sms_msg(self, lang=None):\n try:\n body = self._render_part('body.txt', lang=lang)\n except TemplateDoesNotExist:\n raise TemplateDoesNotExist('Txt template have not been found')\n\n return body\n\n @abstractmethod\n def send(self, to_phone, from_phone=None):\n pass","sub_path":"pycfiles/django-mailfactory-extras-0.25.tar/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"445542833","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\n\nclass Partner(models.Model):\n _inherit = 'res.partner'\n \n province_id = fields.Many2one('res.country.province', string='Province')\n district_id = fields.Many2one('res.country.district', string='District')\n\n @api.onchange('state_id')\n def _onchange_state_id(self):\n if self.state_id:\n if not self.country_id:\n self.country_id=self.state_id.country_id.id\n return {'domain': {'province_id': [('state_id', '=', self.state_id.id)]}}\n else:\n return {'domain': {'province_id': []}}\n \n \n @api.onchange('province_id')\n def _onchange_province_id(self):\n if self.province_id:\n if not self.state_id:\n self.state_id=self.province_id.state_id.id\n return {'domain': {'district_id': [('province_id', '=', self.province_id.id)]}}\n else:\n return {'domain': {'district_id': []}}\n \n @api.onchange('district_id')\n def _onchange_district_id(self):\n if self.district_id:\n if not self.province_id:\n self.province_id=self.district_id.province_id.id\n self.zip= self.district_id.code and self.district_id.code[2:] or False\n self.city = self.district_id.name ","sub_path":"models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"502154097","text":"from django.core.serializers.json import DjangoJSONEncoder\nfrom jsonrpc import jsonrpc_method\nfrom jsonrpc.site import JsonRpcSite\n\nfrom waescrow.escrow_api import SQL_ESCROW_API\n\nfrom . import escrow_api\n\n# MONKEY-PATCH django-jsonrpc package so that it uses Extended Json on responses\nfrom bson.json_util import dumps, loads\nfrom jsonrpc import site\n\nassert site.loads\nsite.loads = loads\nassert site.dumps\nsite.dumps = dumps\n\nclass ExtendedDjangoJSONEncoder(DjangoJSONEncoder):\n def default(self, o):\n try:\n return super().default(o)\n except TypeError:\n return \"\" % o # Just to please jsonrpc _response_dict() method...\n\n\nextended_jsonrpc_site = JsonRpcSite(json_encoder=ExtendedDjangoJSONEncoder)\n\"\"\"\n\n@jsonrpc_method(\"waescrow.sayhelloworld\")\ndef sayhelloworld(request):\n return \"Hello world\"\n\n\n@jsonrpc_method(\"generate_keypair(str) -> str\")\ndef get_public_key(request, algo):\n pem_keypair = key_generation.generate_assymetric_keypair(uid=None, key_type=algo)\n return pem_keypair[\"public_key\"]\n\n\"\"\"\n\n@jsonrpc_method(\"get_public_key\", site=extended_jsonrpc_site)\ndef get_public_key(request, keychain_uid, key_type):\n del request\n return SQL_ESCROW_API.get_public_key(keychain_uid=keychain_uid, key_type=key_type)\n\n@jsonrpc_method(\"get_message_signature\", site=extended_jsonrpc_site)\ndef get_message_signature(request, keychain_uid, message, key_type, signature_algo):\n del request\n return SQL_ESCROW_API.get_message_signature(\n keychain_uid=keychain_uid, message=message, key_type=key_type, signature_algo=signature_algo\n )\n\n@jsonrpc_method(\"decrypt_with_private_key\", site=extended_jsonrpc_site)\ndef decrypt_with_private_key(request, keychain_uid, key_type, encryption_algo, cipherdict):\n del request\n return SQL_ESCROW_API.decrypt_with_private_key(\n keychain_uid=keychain_uid,\n key_type=key_type,\n encryption_algo=encryption_algo,\n cipherdict=cipherdict,\n )\n","sub_path":"src/waescrow/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"231853698","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\nimport re\nimport os\nimport sys\nsys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + \"/lib\" )\n\n## Load WeChall Exploit Framework\nimport wcef\n\n## Pass over a string of Cookie to the WeChall object if it is given from command line\nif len(sys.argv) == 2:\n\twechall = wcef.WeChall(sys.argv[1])\nelse:\n\twechall = wcef.WeChall()\n\n## Open the problem page as a Mechanize object\nproblem_url = \"http://www.wechall.net/challenge/training/encodings/ascii/index.php\"\nwechall.open( problem_url )\n\n## Extract a line that includes a sequence of ASCII codes from the problem page\nascii_code = re.search(r'(\\d{1,3}, ){3,}\\d{1,3}', wechall.text).group().split(\", \")\n\n## Convert each ASCII codes into characters\nascii_text = ''.join( [chr(int(code)) for code in ascii_code] )\n\n## Extract only the necessary part from the solution text\nsolution = ascii_text.split(\": \")[1]\n\n## Submit solution to the WeChall server\nwechall.submit(solution)\n\n## Print the solution\nprint(\"The solution was: %s\" % solution)\n\n# [EOF]\n","sub_path":"exploit-ascii.py","file_name":"exploit-ascii.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"620827466","text":"from ftw.zipexport.generation import ZipGenerator\nfrom ftw.zipexport.utils import normalize_path\nfrom opengever.meeting.command import AgendaItemListOperations\nfrom opengever.meeting.command import CreateGeneratedDocumentCommand\nfrom opengever.meeting.command import MergeDocxProtocolCommand\nfrom opengever.meeting.command import ProtocolOperations\nfrom opengever.meeting.exceptions import AgendaItemListMissingTemplate\nfrom opengever.meeting.interfaces import IMeetingWrapper\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.Five.browser import BrowserView\nfrom StringIO import StringIO\nfrom ZPublisher.Iterators import filestream_iterator\nimport json\nimport os\nimport pytz\n\n\nclass MeetingZipExport(BrowserView):\n \"\"\"Iterate over meeting contents and return results in a .zip archive.\"\"\"\n\n def __init__(self, context, request):\n super(MeetingZipExport, self).__init__(context, request)\n self.model = self.context.model\n\n def __call__(self):\n # Download zip file\n return self.generate_zip()\n\n def visible_in_actions_menu(self):\n \"\"\"Returns ``True`` when the zip export action should be displayed\n in the actions menu.\n\n The action should only appear when we are on a meeting view..\n \"\"\"\n return IMeetingWrapper.providedBy(self.context)\n\n def generate_zip(self):\n response = self.request.response\n\n with ZipGenerator() as generator:\n # Protocol\n generator.add_file(*self.get_protocol())\n\n # Agenda items\n self.add_agenda_items_attachments(generator)\n self.add_agenda_item_proposal_documents(generator)\n\n # Agenda items list\n try:\n generator.add_file(*self.get_agendaitem_list())\n except AgendaItemListMissingTemplate:\n pass\n\n generator.add_file(*self.get_meeting_json())\n\n # Return zip\n zip_file = generator.generate()\n filename = '{}.zip'.format(normalize_path(self.model.title))\n response.setHeader(\n \"Content-Disposition\",\n 'inline; filename=\"{0}\"'.format(\n safe_unicode(filename).encode('utf-8')))\n response.setHeader(\"Content-type\", \"application/zip\")\n response.setHeader(\n \"Content-Length\",\n os.stat(zip_file.name).st_size)\n\n return filestream_iterator(zip_file.name, 'rb')\n\n def get_protocol(self):\n if self.model.has_protocol_document():\n protocol = self.model.protocol_document.resolve_document()\n protocol_modified = protocol.modified().asdatetime().astimezone(\n pytz.utc)\n\n if self.model.modified < protocol_modified:\n # Return current protocol\n return (u'{}.docx'.format(safe_unicode(protocol.Title())),\n protocol.file.open())\n\n # Create new protocol\n operations = ProtocolOperations()\n command = MergeDocxProtocolCommand(\n self.context,\n self.model,\n operations,\n lock_document_after_creation=False)\n\n filename = u'{}.docx'.format(operations.get_title(self.model))\n return (filename, StringIO(command.generate_file_data()))\n\n def add_agenda_item_proposal_documents(self, generator):\n for agenda_item in self.model.agenda_items:\n if not agenda_item.has_document:\n continue\n\n document = agenda_item.resolve_document()\n if not document:\n continue\n\n path = agenda_item.get_document_filename_for_zip(document)\n generator.add_file(path, document.file.open())\n\n def add_agenda_items_attachments(self, generator):\n for agenda_item in self.model.agenda_items:\n if not agenda_item.has_submitted_documents():\n continue\n\n for document in agenda_item.proposal.resolve_submitted_documents():\n path = agenda_item.get_document_filename_for_zip(document)\n generator.add_file(path, document.file.open())\n\n def get_agendaitem_list(self):\n if self.model.has_agendaitem_list_document():\n agendaitem_list = self.model.agendaitem_list_document.resolve_document()\n agendaitem_list_modified = agendaitem_list.modified().asdatetime().astimezone(\n pytz.utc)\n\n if self.model.modified < agendaitem_list_modified:\n # Return current protocol\n return (u'{}.docx'.format(safe_unicode(agendaitem_list.Title())),\n agendaitem_list.file.open())\n\n # Create new protocol\n operations = AgendaItemListOperations()\n command = CreateGeneratedDocumentCommand(\n self.context,\n self.model,\n operations,\n )\n\n filename = u'{}.docx'.format(operations.get_title(self.model))\n return (filename, StringIO(command.generate_file_data()))\n\n def get_meeting_json(self):\n json_data = {\n 'version': '1.0.0',\n 'meetings': [self.context.get_data_for_zip_export()],\n }\n return 'meeting.json', StringIO(json.dumps(json_data,\n sort_keys=True,\n indent=4))\n","sub_path":"opengever/meeting/browser/meetings/zipexport.py","file_name":"zipexport.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"244747640","text":"class baseTower:\n def __init__(self, canvas, pos):\n self.canvas = canvas\n self.inBattle = False\n self.x, self.y = pos[0], pos[1]\n self.hpbarBackground = canvas.create_rectangle(\n self.x - 25, self.y - 30, self.x + 25, self.y - 40, fill=\"gray\"\n )\n self.hpbar = canvas.create_rectangle(\n self.x - 24, self.y - 31, self.x + 24, self.y - 39, fill=\"red\"\n )\n\n def update(self):\n if not self.inBattle:\n enemy = self.nearEnemy()\n if enemy != None:\n self.attackAction(self.canvas.unitList[enemy])\n\n def const(self):\n difficulty = self.canvas.parent.difficulty.get()\n if difficulty == \"쉬움\":\n return 3\n elif difficulty == \"보통\":\n return 2\n else:\n return 1\n\n def attacked(self, attack):\n if self.HP <= attack:\n self.HP = 0\n self.canvas.parent.money += self.maxHP * self.const() / 50\n self.canvas.delete(self.id)\n self.canvas.delete(self.hpbar)\n self.canvas.delete(self.hpbarBackground)\n self.inBattle = False\n del self.canvas.towerList[self.canvas.towerList.index(self.parent)]\n else:\n self.HP -= attack\n self.canvas.parent.money += attack * (self.const() - 1) / 5\n (x1, y1, x2, y2) = self.canvas.coords(self.hpbar)\n x2 = x1 + self.HP / self.maxHP * 48\n self.canvas.coords(self.hpbar, x1, y1, x2, y2)\n\n def attackAction(self, unit):\n if unit in self.canvas.unitList and self.HP > 0 and self.distance(unit.unit) < self.range:\n unit.unit.attacked(self.attack)\n self.canvas.after(int(1000 * self.attackRate), lambda: self.attackAction(unit))\n else:\n self.inBattle = False\n\n def distance(self, other):\n x1, y1, x2, y2 = self.canvas.coords(self.id)\n z1, w1, z2, w2 = other.canvas.coords(other.id)\n return 0.5 * ((x1 + x2 - z1 - z2) ** 2 + (y1 + y2 - w1 - w2) ** 2) ** 0.5\n\n def nearEnemy(self):\n if self.canvas.unitList:\n dist = [-1, self.range]\n for i in range(len(self.canvas.unitList)):\n unit = self.canvas.unitList[i].unit\n newdist = self.distance(unit)\n if newdist < dist[1]:\n dist[1] = newdist\n dist[0] = i\n if dist[0] != -1:\n self.inBattle = True\n return dist[0]\n","sub_path":"tower/baseTower.py","file_name":"baseTower.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"214476741","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/3/29 17:21\r\n# @Author : Sidian\r\n# @Email : sidian305@163.com\r\n# @File : 002.py\r\n# @Software: PyCharm\r\n'''\r\n2. 三色球问题\r\n有红、黄、蓝三种颜色的球,其中红球 3 个,黄球 3 个,绿球 6 个。\r\n先将这 12 个球混合放在一个盒子中,从中任意摸出 8 个球,编程计算摸出球的各种颜色搭配。\r\n'''\r\nred = ['r0','r1','r2']\r\nyellow = ['r0','r1','r2']\r\ngreen = ['g0','g1','g2','g3','g4','g5']\r\nfor r in red:\r\n for y in yellow:\r\n for g in green:\r\n print(r,y,g)\r\n#上面的方法不对,如果取出所有的组合,继续思考。\r\n\r\nfor red in range(0, 4):\r\n for yellow in range(0, 4):\r\n for green in range(2, 7):\r\n if red + yellow + green == 8:\r\n # 注意,下边不是字符串拼接,因此不用“+”哦~\r\n print(red, '\\t', yellow, '\\t', green)","sub_path":"009/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"107927514","text":"\"\"\"Raymond Healy\"\"\"\r\n\r\ndef divisible(a, b):\r\n if not (a > 0) and not (b > 0):\r\n print(\"Inputs must be positive integers!\")\r\n elif a == b:\r\n print(\"Both inputs are equal\")\r\n elif a > b:\r\n if a % b == 0:\r\n print(a, \" is evenly divisible by \", b)\r\n else:\r\n print(a, \" is not evenly divisible by \", b)\r\n else:\r\n if b % a == 0:\r\n print(b, \" is evenly divisible by \", a)\r\n else:\r\n print(b, \" is not evenly divisible by \", a)\r\n\r\n\r\ndef test_divisible():\r\n divisible(1, 2)\r\n divisible(2000, 150)\r\n divisible(20, 40)\r\n divisible(123123, 213123)\r\n divisible(7918470982137, 123412349128469128734)\r\n divisible(8754, 12341)\r\n\r\n\r\ndef squared(a, b):\r\n if a ** 2 == b:\r\n print(a, \" squared is \", b)\r\n else:\r\n print(a, \" squared is not \", b)\r\n\r\n\r\ndef test_squared():\r\n squared(1, 1)\r\n squared(2, 4)\r\n squared(15, 225)\r\n squared(1, 1)\r\n squared(7, 98)\r\n squared(12, 123123)\r\n\r\n\r\ndef main():\r\n test_divisible()\r\n test_squared()\r\n\r\n\r\nmain()\r\n","sub_path":"CS 1/Homeworks/Homework 2/Task 4.py","file_name":"Task 4.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"215299946","text":"import concurrent.futures\nimport itertools\nimport os\nimport subprocess\n\ninputdir = \"GORILA-Images\"\noutputdir = \"GORILA-Inscriptions-Facsimiles\"\nif not os.path.isdir(outputdir):\n os.makedirs(outputdir)\n\ndef processFile(file_components):\n file_name = file_components[1]\n file_number = int(file_name[12:15])\n volume = file_name[7:11]\n if volume not in image_start:\n return \"\"\n if file_number < image_start[volume]:\n return \"\"\n\n outputdir = file_components[0]\n outputfile = file_components[0] + os.sep + file_components[1]\n inputfile = inputdir + os.sep + file_name\n print(\"Started \" + inputfile)\n subprocess.check_call(['./multicrop2', '-d', '900', inputfile, outputfile], stdout=subprocess.DEVNULL)\n return \"Completed \" + inputfile\n\nimage_start = { \"Vol1\": 38, \"Vol2\": 64, \"Vol3\" : 26}\n#image_start = { \"Vol3\" : 26}\nfor subdir, dirs, files in os.walk(inputdir):\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for result in executor.map(processFile, zip(itertools.repeat(outputdir), files)):\n print(result)\n","sub_path":"000-Manage-Images/000 - Old/010 - Vols 1 - 3/020-split_gorila_images_with_multicrop.py","file_name":"020-split_gorila_images_with_multicrop.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"155430543","text":"\n# import the necessary packages\nimport os\nimport cv2\nimport imutils\nimport argparse\nimport datetime\nimport numpy as np\nfrom modules import distancing_config as config\nfrom modules.detection import detect_people\nfrom scipy.spatial import distance as dist\n\n\n\n# Construct the argument parser and parse the arguments for command line operations\nap = argparse.ArgumentParser()\nap.add_argument(\"-o\", \"--output\", type=str, default=\"\",\n\thelp=\"path to (optional) output video file\")\nargs = vars(ap.parse_args())\n\nlabelsPath = \"./yolov3/classes.names\"\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\nnp.random.seed(42)\nCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n\tdtype=\"uint8\")\n\nweightsPath = \"./yolov3/yolov3_custom_final.weights\"\nconfigPath = \"./yolov3/yolov3_test.cfg\"\n\nthreshold=170\n\n# Load YOLOv3 object detector trained on COCO dataset (80 classes)\nprint(\"Loading YOLOv3 weights from the disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\n\n# Check if we are going to use GPU\nif config.USE_GPU:\n\t# Set CUDA as the preferable backend and target\n\tprint(\"Setting-up preferable backend and target to CUDA...\")\n\tnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n\tnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n\n# Determine the *output* layer names that is needed from YOLOv3\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n#Initialize the video stream and pointer to output video file\nprint(\"Accessing video stream...\")\nvs = cv2.VideoCapture('./testing/new_clip_1.mp4')\nwriter = None\n\n\nfps_start = datetime.datetime.now()\nfps = 0\ncount_frames = 0\n\n# loop over the video stream\nwhile True:\n\t# read video stream\n\t(grabbed, frame) = vs.read()\n\n\t# if the frame was not grabbed, terminate the loop\n\tif not grabbed:\n\t\tbreak\n\n\t# resize frame and then detect people in the frame\n\tframe = imutils.resize(frame, width=700)\n\tcount_frames = count_frames + 1\n\tresults = detect_people(frame, net, ln, personIdx=LABELS.index(\"Actors\"))\n\n\t# initialize the set of indexes that violate the minimum social distance\n\tviolate = set()\n\n\t# ensure there are *at least* two people detections (required in order to compute our pairwise distance maps)\n\tif len(results) >= 2:\n\t\t# extract all centroids from the results and compute the Euclidean distances between all pair of the centroids\n\t\tcentroids = np.array([r[2] for r in results])\n\t\tD = dist.cdist(centroids, centroids, metric=\"euclidean\")\n\n\t\t# loop over the upper triangular of the distance matrix\n\t\tfor i in range(0, D.shape[0]):\n\t\t\tfor j in range(i + 1, D.shape[1]):\n\t\t\t\t# check if the distance between any two centroid pairs is less than the configured number of pixels\n\t\t\t\tif D[i, j] < threshold:\n\t\t\t\t\t# update violation set with the indexes of the centroids\n\t\t\t\t\tviolate.add(i)\n\t\t\t\t\tviolate.add(j)\n\n\t# loop over the results\n\tfor (i, (prob, bbox, centroid)) in enumerate(results):\n\t\t# extract the bounding box and centroid coordinates, then initialize the color of the annotation\n\t\t(startX, startY, endX, endY) = bbox\n\t\t(cX, cY) = centroid\n\t\tcolor = (0, 255, 0)\n\n\t\t# if index pair exist in the violation set - update the color\n\t\tif i in violate:\n\t\t\tcolor = (0, 0, 255)\n\n\t\t# draw (1) a bounding box around the person and (2) the centroid coordinates of the person,\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n\t#Show social distancing violations on the output frame\n\ttext = \"Violations: {}\".format(len(violate))\n\n\tfps_end = datetime.datetime.now()\n\tabs_time = fps_end - fps_start\n\tif abs_time.seconds == 0:\n\t\tfps = 0.0\n\telse:\n\t\tfps = (count_frames / abs_time.seconds)\n\tcurrent_fps = \"FPS: {:.2f}\".format(fps)\n\n\tif (fps*480)<=0:\n\t\tlat = 0.0\n\telse:\n\t\tlat = np.around((1 / (fps*480)),5)\n\tlatency = \"Latency: {}\".format(lat)\n\n\tif (len(results))==0:\n\t\tpred = 0\n\telse:\n\t\tpred = len(results)\n\tpredict = \"Total People: {}\".format(pred)\n\n\n\tcv2.putText(frame, predict, (2, frame.shape[0] - 61), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 255), 2)\n\tcv2.putText(frame, current_fps, (2, frame.shape[0] - 41), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 165, 255), 2)\n\tcv2.putText(frame, latency, (2, frame.shape[0] - 23), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (255, 255, 0), 2)\n\tcv2.putText(frame, text, (2, frame.shape[0] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n\n\n\n\t# if output file path has been supplied and the video writer has not been initialized, do it now\n\tif args[\"output\"] != \"\" and writer is None:\n\t\t# initialize our video writer\n\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\twriter = cv2.VideoWriter(args[\"output\"], fourcc, 25,\n\t\t\t(frame.shape[1], frame.shape[0]), True)\n\n\t# if the video writer is not None, write the frame to the output video file\n\tif writer is not None:\n\t\twriter.write(frame)\n\nvs.release()\ncv2.destroyAllWindows()\n","sub_path":"social_distancing_violations_trained_yolov3/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"608844373","text":"'''\nhttps://gist.github.com/ramusus/4343464\nadminreverse from here http://djangosnippets.org/snippets/2032/\nchanged for working with ForeignKeys\n'''\n'''\nreverseadmin\n============\nModule that makes django admin handle OneToOneFields in a better way.\n \nA common use case for one-to-one relationships is to \"embed\" a model\ninside another one. For example, a Person may have multiple foreign\nkeys pointing to an Address entity, one home address, one business\naddress and so on. Django admin displays those relations using select\nboxes, letting the user choose which address entity to connect to a\nperson. A more natural way to handle the relationship is using\ninlines. However, since the foreign key is placed on the owning\nentity, django admins standard inline classes can't be used. Which is\nwhy I created this module that implements \"reverse inlines\" for this\nuse case.\n \nExample:\n \n from django.db import models\n class Address(models.Model):\n street = models.CharField(max_length = 255)\n zipcode = models.CharField(max_length = 10)\n city = models.CharField(max_length = 255)\n class Person(models.Model):\n name = models.CharField(max_length = 255)\n business_addr = models.ForeignKey(Address,\n related_name = 'business_addr')\n home_addr = models.OneToOneField(Address, related_name = 'home_addr')\n other_addr = models.OneToOneField(Address, related_name = 'other_addr')\n \nThis is how standard django admin renders it:\n \n http://img9.imageshack.us/i/beforetz.png/\n \nHere is how it looks when using the reverseadmin module:\n \n http://img408.imageshack.us/i/afterw.png/\n \nYou use reverseadmin in the following way:\n \n from django.contrib import admin\n from django.db import models\n from models import Person\n from reverseadmin import ReverseModelAdmin\n class AddressForm(models.Form):\n pass\n class PersonAdmin(ReverseModelAdmin):\n inline_type = 'tabular'\n inline_reverse = ('business_addr', ('home_addr', AddressForm), ('other_addr' (\n 'form': OtherForm\n 'exclude': ()\n )))\n admin.site.register(Person, PersonAdmin)\n \ninline_type can be either \"tabular\" or \"stacked\" for tabular and\nstacked inlines respectively.\n \nThe module is designed to work with Django 1.1.1. Since it hooks into\nthe internals of the admin package, it may not work with later Django\nversions.\n'''\nfrom django.contrib.admin import helpers, ModelAdmin\nfrom django.contrib.admin.options import InlineModelAdmin\nfrom django.contrib.admin.util import flatten_fieldsets\nfrom django.db import transaction, models\nfrom django.db.models import OneToOneField, ForeignKey\nfrom django.forms import ModelForm\nfrom django.forms.formsets import all_valid\nfrom django.forms.models import BaseModelFormSet, modelformset_factory\nfrom django.utils.encoding import force_unicode\nfrom django.utils.functional import curry\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\nfrom django.core.exceptions import PermissionDenied\n \nclass ReverseInlineFormSet(BaseModelFormSet):\n '''\n A formset with either a single object or a single empty\n form. Since the formset is used to render a required OneToOne\n relation, the forms must not be empty.\n '''\n model = None\n parent_fk_name = ''\n def __init__(self,\n data = None,\n files = None,\n instance = None,\n prefix = None,\n queryset = None,\n save_as_new = False):\n try:\n object = getattr(instance, self.parent_fk_name)\n except AttributeError:\n qs = self.model.objects.filter(pk = -1)\n self.extra = 1\n else:\n qs = self.model.objects.filter(pk = object.id)\n super(ReverseInlineFormSet, self).__init__(data, files,\n prefix = prefix,\n queryset = qs)\n for form in self.forms:\n form.empty_permitted = False\n \ndef reverse_inlineformset_factory(parent_model,\n model,\n parent_fk_name,\n form = ModelForm,\n fields = None,\n exclude = None,\n formfield_callback = lambda f: f.formfield()):\n kwargs = {\n 'form': form,\n 'formfield_callback': formfield_callback,\n 'formset': ReverseInlineFormSet,\n 'extra': 0,\n 'can_delete': False,\n 'can_order': False,\n 'fields': fields,\n 'exclude': exclude,\n 'max_num': 1,\n 'fields': '__all__',\n }\n FormSet = modelformset_factory(model, **kwargs)\n FormSet.parent_fk_name = parent_fk_name\n return FormSet\n \nclass ReverseInlineModelAdmin(InlineModelAdmin):\n '''\n Use the name and the help_text of the owning models field to\n render the verbose_name and verbose_name_plural texts.\n '''\n def __init__(self,\n parent_model,\n parent_fk_name,\n model, admin_site,\n inline_type):\n self.template = 'admin/edit_inline/%s.html' % inline_type\n self.parent_fk_name = parent_fk_name\n self.model = model\n field_descriptor = getattr(parent_model, self.parent_fk_name)\n field = field_descriptor.field\n \n self.verbose_name_plural = field.verbose_name.title()\n self.verbose_name = field.help_text\n if not self.verbose_name:\n self.verbose_name = self.verbose_name_plural\n super(ReverseInlineModelAdmin, self).__init__(parent_model, admin_site)\n \n def get_formset(self, request, obj = None, **kwargs):\n if self.declared_fieldsets:\n fields = flatten_fieldsets(self.declared_fieldsets)\n else:\n fields = None\n if self.exclude is None:\n exclude = []\n else:\n exclude = list(self.exclude)\n # if exclude is an empty list we use None, since that's the actual\n # default\n exclude = (exclude + kwargs.get(\"exclude\", [])) or None\n defaults = {\n \"form\": self.form,\n \"fields\": fields,\n \"exclude\": exclude,\n \"formfield_callback\": curry(self.formfield_for_dbfield, request=request),\n }\n defaults.update(kwargs)\n return reverse_inlineformset_factory(self.parent_model,\n self.model,\n self.parent_fk_name,\n **defaults)\n \nclass ReverseModelAdmin(ModelAdmin):\n '''\n Patched ModelAdmin class. The add_view method is overridden to\n allow the reverse inline formsets to be saved before the parent\n model.\n '''\n def __init__(self, model, admin_site):\n \n super(ReverseModelAdmin, self).__init__(model, admin_site)\n if self.exclude is None:\n self.exclude = []\n \n inline_instances = []\n for field_name in self.inline_reverse:\n \n kwargs = {}\n if isinstance(field_name, tuple):\n if isinstance(field_name[1], dict):\n kwargs = field_name[1]\n elif isinstance(field_name[1], ModelForm):\n kwargs['form'] = field_name[1]\n field_name = field_name[0]\n \n field = model._meta.get_field(field_name)\n if isinstance(field, (OneToOneField, ForeignKey)):\n name = field.name\n parent = field.related.parent_model\n inline = ReverseInlineModelAdmin(model,\n name,\n parent,\n admin_site,\n self.inline_type)\n if kwargs:\n inline.__dict__.update(kwargs)\n inline_instances.append(inline)\n self.exclude.append(name)\n self.tmp_inline_instances = inline_instances\n \n def get_inline_instances(self, request, obj=None):\n return self.tmp_inline_instances + super(ReverseModelAdmin, self).get_inline_instances(request, obj)\n","sub_path":"utils/reverseadmin.py","file_name":"reverseadmin.py","file_ext":"py","file_size_in_byte":8417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"487487104","text":"from BaseControlPlots import BaseControlPlots\n\n# Requirements:\n# event.muons\n# event.electrons\n\nclass LeptonControlPlots(BaseControlPlots):\n \"\"\"A class to create control plots for leptons\"\"\"\n\n def __init__(self, dir=None, dataset=None, mode=\"plots\"):\n # create output file if needed. If no file is given, it means it is delegated\n BaseControlPlots.__init__(self, dir=dir, purpose=\"leptons\", dataset=dataset, mode=mode)\n\n def beginJob(self):\n # declare histograms\n self.add(\"NMuons\",\";N muons (p_{T}>20 GeV);N\",10,0,10)\n self.add(\"Muon1Pt\",\";Muon Pt;N\",100,0,500)\n self.add(\"Muon1Eta\",\";Muon Eta;N\",50,-2.5,2.5)\n self.add(\"Muon2Pt\",\";Muon Pt;N\",100,0,500)\n self.add(\"Muon2Eta\",\";Muon Eta;N\",50,-2.5,2.5)\n self.add(\"NElectrons\",\";N electrons (p_{T}>20 GeV);N\",10,0,10)\n self.add(\"Electron1Pt\",\";Electron Pt;N\",100,0,500)\n self.add(\"Electron1Eta\",\";Electron Eta;N\",50,-2.5,2.5)\n self.add(\"Electron2Pt\",\";Electron Pt;N\",100,0,500)\n self.add(\"Electron2Eta\",\";Electron Eta;N\",50,-2.5,2.5)\n\n def process(self, event):\n #get information\n result = { }\n result[\"NMuons\"] = len([m for m in event.muons if m.PT>20])\n result[\"Muon1Pt\"] = [ ]\n result[\"Muon1Eta\"] = [ ]\n result[\"Muon2Pt\"] = [ ]\n result[\"Muon2Eta\"] = [ ]\n if event.muons.GetEntries()>0:\n result[\"Muon1Pt\"].append(event.muons[0].PT)\n result[\"Muon1Eta\"].append(event.muons[0].Eta)\n if event.muons.GetEntries()>1:\n result[\"Muon2Pt\"].append(event.muons[1].PT)\n result[\"Muon2Eta\"].append(event.muons[1].Eta)\n result[\"NElectrons\"] = len([e for e in event.electrons if e.PT>20])\n result[\"Electron1Pt\"] = [ ]\n result[\"Electron1Eta\"] = [ ]\n result[\"Electron2Pt\"] = [ ]\n result[\"Electron2Eta\"] = [ ]\n if event.electrons.GetEntries()>0:\n result[\"Electron1Pt\"].append(event.electrons[0].PT)\n result[\"Electron1Eta\"].append(event.electrons[0].Eta)\n if event.electrons.GetEntries()>1:\n result[\"Electron2Pt\"].append(event.electrons[1].PT)\n result[\"Electron2Eta\"].append(event.electrons[1].Eta)\n return result\n\nif __name__==\"__main__\":\n import sys\n from DelphesAnalysis.BaseControlPlots import runTest\n runTest(sys.argv[1], LeptonControlPlots())\n\n","sub_path":"python/Examples/LeptonControlPlots_old2.py","file_name":"LeptonControlPlots_old2.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"424750131","text":"from rest_framework.response import Response\nfrom rest_framework import viewsets\nfrom rest_framework import status\n\n######################## ABSTRACT VIEWS #################################\nclass AbstractListApiView(viewsets.ModelViewSet):\n lookup_url_kwarg = \"username\"\n\n def get(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n serializer = self.serializer_class(queryset, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset()) \n serializer = self.serializer_class(queryset, many=True)\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n \n def create(self, request, *args, **kwargs):\n data = request.data.get(self.json_name, {})\n username = self.kwargs.get(self.lookup_url_kwarg)\n serializer = self.serializer_class(\n data=data,\n context={'username': username, 'created_by': request.user.user_id},\n partial=True,\n # many=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(\"Success\", status=status.HTTP_200_OK)\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get(self.lookup_url_kwarg)\n queryset = self.class_name.objects.filter(user__user_name__exact=username).all()\n queryset = queryset.filter(delete_ind=\"N\")\n return queryset","sub_path":"api/views/common_views.py","file_name":"common_views.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"64941592","text":"import matplotlib.pyplot as plt\nimport random\n\n\ndef showChart(x, y, title, range_, limit=5000):\n fig, ax1 = plt.subplots()\n c1 = ''\n c2 = ''\n c3 = ''\n c4 = ''\n c5 = ''\n c6 = ''\n c7 = ''\n c8 = ''\n c9 = ''\n c10 = ''\n c11 = ''\n c12 = ''\n c13 = ''\n c14 = ''\n comp1 = 0\n comp2 = 1\n # comp3 = 3\n\n colors = ['magenta', 'turquoise', 'darkorange', 'navajowhite', 'salmon'\n , 'azure', 'blue', 'brown', 'cadetblue', 'limegreen', 'maroon'\n , 'cornsilk', 'gold', 'black']\n\n for i in random.sample(range(range_), limit):\n if y[i] == 'bg':\n c1 = ax1.scatter(x[i, comp1], x[i, comp2], label='bg', color=colors[0])\n if y[i] == 'mk':\n c2 = ax1.scatter(x[i, comp1], x[i, comp2], label='mk', color=colors[1])\n if y[i] == 'bs':\n c3 = ax1.scatter(x[i, comp1], x[i, comp2], label='bs', color=colors[2])\n if y[i] == 'hr':\n c4 = ax1.scatter(x[i, comp1], x[i, comp2], label='hr', color=colors[3])\n if y[i] == 'sr':\n c5 = ax1.scatter(x[i, comp1], x[i, comp2], label='sr', color=colors[4])\n if y[i] == 'cz':\n c6 = ax1.scatter(x[i, comp1], x[i, comp2], label='cz', color=colors[5])\n if y[i] == 'sk':\n c7 = ax1.scatter(x[i, comp1], x[i, comp2], label='sk', color=colors[6])\n if y[i] == 'es-AR':\n c8 = ax1.scatter(x[i, comp1], x[i, comp2], label='es_AR', color=colors[7])\n if y[i] == 'es-ES':\n c9 = ax1.scatter(x[i, comp1], x[i, comp2], label='es_ES', color=colors[8])\n if y[i] == 'pt-BR':\n c10 = ax1.scatter(x[i, comp1], x[i, comp2], label='pt-BR', color=colors[9])\n if y[i] == 'pt-PT':\n c11 = ax1.scatter(x[i, comp1], x[i, comp2], label='pt-PT', color=colors[10])\n if y[i] == 'id':\n c12 = ax1.scatter(x[i, comp1], x[i, comp2], label='id', color=colors[11])\n if y[i] == 'my':\n c13 = ax1.scatter(x[i, comp1], x[i, comp2], label='my', color=colors[12])\n if y[i] == 'xx':\n c14 = ax1.scatter(x[i, comp1], x[i, comp2], label='xx', color=colors[13])\n\n scatters = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14]\n\n ax1.legend(handles=scatters)\n ax1.grid(True)\n ax1.set_title(title)\n plt.show()","sub_path":"show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"415632134","text":"import json\n\nfrom flask import Flask, Response, request\nfrom flask_cors import CORS\nfrom flask.json import JSONEncoder\nfrom decimal import Decimal\nfrom datetime import datetime, date\nfrom response import *\n\nfrom view import RankView, CRDView\n\n\nclass CustomJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, set):\n return list(obj)\n if isinstance(obj, Decimal):\n return float(obj)\n if isinstance(obj, bytes):\n return obj.decode(\"utf-8\")\n if isinstance(obj, datetime):\n return obj.strftime('%Y-%m-%d %H:%M:%S')\n if isinstance(obj, date):\n return obj.strftime('%Y-%m-%d')\n return JSONEncoder.default(self, obj)\n\ndef create_app():\n# def create_app(test_config=None):\n app = Flask(__name__)\n app.json_encoder = CustomJSONEncoder\n app.config.from_pyfile('config.py')\n app.register_blueprint(RankView.rank_app)\n app.register_blueprint(CRDView.crd_app)\n CORS(app, resources={'*': {'origins': '*'}}, expose_header='Authorization')\n return app\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"583454482","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2018-2020 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis contains the utility objects used by the StellarGraph library.\n\n\"\"\"\n\nimport warnings\nimport sys\nimport types\n\nfrom .. import calibration, ensemble\nfrom ..calibration import *\nfrom ..ensemble import *\nfrom ..interpretability import *\nfrom .history import *\n\n_E = \"ensemble\"\n_C = \"calibration\"\n_I = \"interpretability\"\n\n_MAPPING = {\n # modules\n \"calibration\": (None, calibration),\n \"ensemble\": (None, ensemble),\n \"saliency_maps\": (None, saliency_maps),\n \"integrated_gradients\": (None, saliency_maps.integrated_gradients),\n \"integrated_gradients_gat\": (None, saliency_maps.integrated_gradients_gat),\n \"saliency_gat\": (None, saliency_maps.saliency_gat),\n # calibration\n \"IsotonicCalibration\": (_C, IsotonicCalibration),\n \"TemperatureCalibration\": (_C, TemperatureCalibration),\n \"expected_calibration_error\": (_C, expected_calibration_error),\n \"plot_reliability_diagram\": (_C, plot_reliability_diagram),\n # ensembles\n \"Ensemble\": (_E, Ensemble),\n \"BaggingEnsemble\": (_E, BaggingEnsemble),\n # interpretability\n \"IntegratedGradients\": (_I, IntegratedGradients),\n \"IntegratedGradientsGAT\": (_I, IntegratedGradientsGAT),\n \"GradientSaliencyGAT\": (_I, GradientSaliencyGAT),\n}\n\n\nclass _Wrapper(types.ModuleType):\n def __init__(self, current):\n super().__init__(current.__name__, current.__doc__)\n self.__package__ = current.__package__\n self.__loader__ = current.__loader__\n self.__path__ = current.__path__\n\n plot_history = staticmethod(plot_history)\n\n def __getattr__(self, attr):\n try:\n new_module_name, new_value = _MAPPING[attr]\n except KeyError:\n # don't know about it, so do the normal access\n return super().__getattribute__(attr)\n else:\n # this attribute looks like one of the deprecated ones!\n if isinstance(new_value, types.ModuleType):\n new_location = new_value.__name__\n else:\n new_location = f\"stellargraph.{new_module_name}.{new_value.__name__}\"\n\n warnings.warn(\n f\"'stellargraph.utils.{attr}' has been moved to '{new_location}'\",\n DeprecationWarning,\n )\n return new_value\n\n\nsys.modules[__name__] = _Wrapper(sys.modules[__name__])\n","sub_path":"stellargraph/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"92931593","text":"\nimport numpy as np\nimport torch\nfrom siamese_network import SIAMESE\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\n\n\nall_error = []\nall_error_2 = []\nsample_length = 80\nwrite_video = 1\nshow_hr = 1\n\nmodel = SIAMESE()\nmodel.load_state_dict(torch.load('./model/Siamese_noShareWeights -24 -11.840457027668016.pkl'))\nmodel.cuda()\nmodel.eval()\n# print(model)\n\n#total_path_face = np.load('./npy_path/test/face.npy')\ntotal_path_forehead = np.load('./save_npy/all_val_forehead.npy')\n#total_path_check = np.load('./npy_path/test/check.npy')\ntotal_gt = np.load('./save_npy/all_valid_gts.npy')\n#print(total_path_forehead)\n\n\n\n\nresult_path = './result/'\ntest_fold = './test_video/'\n\n\ngt_array = []\nans_array = []\nall_e = 0\nall_e2 = 0\n\n\nfor nth_subject in range(len(total_path_forehead)):\n forehead_paths = total_path_forehead[nth_subject]\n gt = total_gt[nth_subject]\n\n error = []\n forehead_imgs = []\n #check_imgs = []\n\n transform = transforms.Compose([\n transforms.ToTensor()])\n # ---------------------------\n #change video\n # ---------------------------\n if write_video == 1:\n if '/0.jpg' in total_path_forehead[nth_subject][0]:\n if nth_subject != 0:\n print(predict_array)\n plt.subplot(211)\n plt.plot(predict_array, 'r')\n plt.plot(gt_array, 'g')\n plt.legend((nth_subject,'predict', 'ground truth'), shadow=True, loc=(0.01, 0.01))\n plt.subplot(212)\n plt.plot(error_array, 'r')\n plt.plot(ans_array, 'g')\n plt.legend(('error', 'ground truth'), shadow=True, loc=(0.01, 0.01))\n plt.pause(0.01)\n plt.savefig(result_path + fold_id + '_Siamese_result.jpg')\n plt.close()\n predict_array = []\n ans_array = []\n gt_array = []\n error_array = []\n pp = total_path_forehead[nth_subject][0]\n print(pp)\n #if gt>=100:\n # fold_id = pp[30:-37]\n #else:\n # fold_id = pp[15:-36]\n #if nth_subject\n fold_id = pp[30:-11]\n print(fold_id)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video_path = result_path + fold_id + '_result.avi'\n videowriter = cv2.VideoWriter(video_path, fourcc, 30.0, (480, 640))\n # write_video_forehead = cv2.VideoWriter(forehead_video_path, fourcc, 20.0, (140, 40))\n # ---------------------------\n gt_array.append(gt)\n ans_array.append(gt)\n for nth_img in range(1, len(forehead_paths)):\n forehead_path = total_path_forehead[nth_subject][nth_img]\n #check_path = total_path_check[nth_subject][nth_img]\n #face_path = total_path_face[nth_subject][nth_img]\n\n\n imgBGR = cv2.imread(forehead_path)\n forehead_img = Image.open(forehead_path)\n #check_img = Image.open(check_path)\n forehead_img = transform(forehead_img)\n forehead_img = forehead_img.cuda()\n forehead_img = torch.unsqueeze(forehead_img, 1)\n #check_img = transform(check_img)\n #check_img = check_img.cuda()\n #check_img = torch.unsqueeze(check_img, 1)\n\n if nth_img == 1:\n video_forehead = forehead_img\n #video_check = check_img\n else:\n video_forehead = torch.cat([video_forehead, forehead_img], 1)\n #video_check = torch.cat([video_check, check_img], 1)\n\n # ---------------------------\n # make every 80 frame together\n # ---------------------------\n video_forehead = torch.unsqueeze(video_forehead, 0)\n #video_check = torch.unsqueeze(video_check, 0)\n aa = model(video_forehead)\n aa = aa.squeeze(-1)\n aa = aa.squeeze(-1)\n aa = aa.squeeze(-1)\n aa = aa.squeeze(-1)\n ans = int(aa)\n\n # ---------------------------\n # check the ans\n # ---------------------------\n error = abs(ans - gt)\n predict_array.append(ans)\n error_array.append(error)\n # ---------------------------\n # show the testing result\n # ---------------------------\n # ---------------------------\n # show the testing result\n # ---------------------------\n if show_hr == 1:\n imgBGR = cv2.putText(imgBGR, 'predict : ' + str(np.round(ans, 3)), (5, 15), cv2.FONT_HERSHEY_PLAIN,\n 0.7,\n (0, 255, 255))\n imgBGR = cv2.putText(imgBGR, 'GT : ' + str(int(np.round(gt))), (5, 25), cv2.FONT_HERSHEY_PLAIN, 0.7,\n (0, 255, 255))\n if error > 3:\n color = (0, 0, 255)\n all_e = all_e + 1\n if error > 5:\n all_e2 = all_e2 + 1\n color = (0, 0, 255)\n else:\n color = color = (0, 255, 255)\n imgBGR = cv2.putText(imgBGR, 'error : ' + str(np.round(error, 3)), (5, 35), cv2.FONT_HERSHEY_PLAIN,\n 0.7, color)\n # ---------------------------\n # write video\n # ---------------------------\n if write_video == 1:\n imgBGR = cv2.resize(imgBGR, (480, 640))\n video = videowriter.write(imgBGR)\n # ---------------------------\n\n cv2.imshow('win', imgBGR)\n cv2.waitKey(1)\n\n\n\n all_error.append(np.mean(error_array))\n all_error_2.append((np.mean(error_array) / np.mean(ans_array) * 100))\n\nprint(np.mean(all_error))\nprint(np.mean(all_error_2))\nprint(all_e / len(gt_array))\nprint(all_e2 / len(gt_array))\n","sub_path":"RGB_heart_rate_detection/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"621057796","text":"###############################################################################\n#\n# Copyright 2011-2012 Pants Developers (see AUTHORS.txt)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###############################################################################\n\"\"\"\nStreaming connection channel. Used to develop streaming client and\nserver connection handlers.\n\"\"\"\n\n###############################################################################\n# Imports\n###############################################################################\n\nimport errno\nimport functools\nimport os\nimport re\nimport socket\nimport ssl\nimport struct\n\nfrom pants._channel import _Channel, HAS_IPV6\nfrom pants.engine import Engine\n\n\n###############################################################################\n# Constants\n###############################################################################\n\nRegexType = type(re.compile(\"\"))\nStruct = struct.Struct\n\n\n###############################################################################\n# Logging\n###############################################################################\n\nimport logging\nlog = logging.getLogger(\"pants\")\n\n\n###############################################################################\n# Stream Class\n###############################################################################\n\nclass Stream(_Channel):\n \"\"\"\n The stream-oriented connection channel.\n\n A :class:`~pants.stream.Stream` instance represents either a local\n connection to a remote server or a remote connection to a local\n server over a streaming, connection-oriented protocol such as TCP.\n\n ================= ================================================\n Keyword Argument Description\n ================= ================================================\n engine *Optional.* The engine to which the channel\n should be added. Defaults to the global engine.\n socket *Optional.* A pre-existing socket to wrap. This\n can be a regular :obj:`~socket.socket` or an\n :obj:`~ssl.SSLSocket`. If a socket is not\n provided, a new socket will be created for the\n channel when required.\n ssl_options *Optional.* If provided,\n :meth:`~pants.stream.Stream.startSSL` will be\n called with these options once the stream is\n ready. By default, SSL will not be enabled.\n ================= ================================================\n \"\"\"\n SEND_STRING = 0\n SEND_FILE = 1\n SEND_SSL_HANDSHAKE = 2\n\n def __init__(self, **kwargs):\n sock = kwargs.get(\"socket\", None)\n if sock and sock.type != socket.SOCK_STREAM:\n raise TypeError(\"Cannot create a %s with a socket type other than SOCK_STREAM.\"\n % self.__class__.__name__)\n\n _Channel.__init__(self, **kwargs)\n\n # Socket\n self._remote_address = None\n self._local_address = None\n\n # I/O attributes\n self._read_delimiter = None\n self._recv_buffer = \"\"\n self._recv_buffer_size_limit = self._buffer_size\n self._send_buffer = []\n\n # Channel state\n self.connected = False\n self.connecting = False\n self._closing = False\n\n # SSL state\n self.ssl_enabled = False\n self._ssl_enabling = False\n self._ssl_socket_wrapped = False\n self._ssl_handshake_done = False\n self._ssl_call_on_connect = False\n if isinstance(kwargs.get(\"socket\", None), ssl.SSLSocket):\n self._ssl_socket_wrapped = True\n self.startSSL()\n elif kwargs.get(\"ssl_options\", None) is not None:\n self.startSSL(kwargs[\"ssl_options\"])\n\n ##### Properties ##########################################################\n\n @property\n def remote_address(self):\n \"\"\"\n The remote address to which the channel is connected.\n\n By default, this will be the value of ``socket.getpeername`` or\n None. It is possible for user code to override the default\n behaviour and set the value of the property manually. In order\n to return the property to its default behaviour, user code then\n has to delete the value. Example::\n\n # default behaviour\n channel.remote_address = custom_value\n # channel.remote_address will return custom_value now\n del channel.remote_address\n # default behaviour\n \"\"\"\n if self._remote_address is not None:\n return self._remote_address\n elif self._socket:\n try:\n return self._socket.getpeername()\n except socket.error:\n return None\n else:\n return None\n\n @remote_address.setter\n def remote_address(self, val):\n self._remote_address = val\n\n @remote_address.deleter\n def remote_address(self):\n self._remote_address = None\n\n @property\n def local_address(self):\n \"\"\"\n The address of the channel on the local machine.\n\n By default, this will be the value of ``socket.getsockname`` or\n None. It is possible for user code to override the default\n behaviour and set the value of the property manually. In order\n to return the property to its default behaviour, user code then\n has to delete the value. Example::\n\n # default behaviour\n channel.local_address = custom_value\n # channel.local_address will return custom_value now\n del channel.local_address\n # default behaviour\n \"\"\"\n if self._local_address is not None:\n return self._local_address\n elif self._socket:\n try:\n return self._socket.getsockname()\n except socket.error:\n return None\n else:\n return None\n\n @local_address.setter\n def local_address(self, val):\n self._local_address = val\n\n @local_address.deleter\n def local_address(self):\n self._local_address = None\n\n @property\n def read_delimiter(self):\n \"\"\"\n The magical read delimiter which determines how incoming data is\n buffered by the stream.\n\n As data is read from the socket, it is buffered internally by\n the stream before being passed to the\n :meth:`~pants.stream.Stream.on_read` callback. The value of the\n read delimiter determines when the data is passed to the\n callback. Valid values are ``None``, a string, an integer/long,\n a compiled regular expression, or an instance of :class:`struct.Struct`.\n\n When the read delimiter is ``None``, data will be passed to\n :meth:`~pants.stream.Stream.on_read` immediately after it is\n read from the socket. This is the default behaviour.\n\n When the read delimiter is a string, data will be buffered\n internally until that string is encountered in the incoming\n data. All data up to and including the read delimiter is then\n passed to :meth:`~pants.stream.Stream.on_read`.\n\n When the read delimiter is an integer or a long, it is treated\n as the number of bytes to read before passing the data to\n :meth:`~pants.stream.Stream.on_read`.\n\n When the read delimiter is a :class:`struct.Struct` instance, the\n Struct's ``size`` is fully buffered and the data is unpacked using the\n Struct before its sent to :meth:`on_read`. Unlike other types of read\n delimiters, this can result in more than one argument being passed to\n on_read. Example::\n\n import struct\n from pants import Stream\n\n class Example(Stream):\n def on_connect(self):\n self.read_delimiter = struct.Struct(\"!LLH\")\n\n def on_read(self, packet_type, length, id):\n pass\n\n When the read delimiter is a compiled regular expression, there\n are two possible behaviors, selected by the value of\n :attr:`~pants.stream.Stream.regex_search`. If ``regex_search``\n is True, as is default, the delimiter's ``search`` method is\n used, and if a match is found, the string before that match is\n passed to :meth:`~pants.stream.Stream.on_read` while all data up\n to the end of the matched content is removed from the buffer.\n\n If ``regex_search`` is False, the delimiter's ``match`` method\n is used instead, and if a match is found, the match object\n itself will be passed to :meth:`~pants.stream.Stream.on_read`,\n giving you access to the capture groups. Again, all data up to\n the end of the matched content is removed from the buffer.\n\n Attempting to set the read delimiter to any other value will\n raise a :exc:`TypeError`.\n\n The effective use of the read delimiter can greatly simplify the\n implementation of certain protocols.\n \"\"\"\n return self._read_delimiter\n\n @read_delimiter.setter\n def read_delimiter(self, value):\n if value is None or isinstance(value, basestring) or \\\n isinstance(value, RegexType):\n self._read_delimiter = value\n self._recv_buffer_size_limit = self._buffer_size\n\n elif isinstance(value, (int, long)):\n self._read_delimiter = value\n self._recv_buffer_size_limit = max(self._buffer_size, value)\n\n elif isinstance(value, Struct):\n self._read_delimiter = value\n self._recv_buffer_size_limit = max(self._buffer_size, value.size)\n\n else:\n raise TypeError(\"Attempted to set read_delimiter to a value with an invalid type.\")\n\n # Setting these at the class level makes them easy to override on a\n # per-class basis.\n regex_search = True\n _buffer_size = 2 ** 16 # 64kb\n\n @property\n def buffer_size(self):\n \"\"\"\n The maximum size, in bytes, of the internal buffer used for\n incoming data.\n\n When buffering data it is important to ensure that inordinate\n amounts of memory are not used. Setting the buffer size to a\n sensible value can prevent coding errors or malicious use from\n causing your application to consume increasingly large amounts\n of memory. By default, a maximum of 64kb of data will be stored.\n\n The buffer size is mainly relevant when using a string value for\n the :attr:`~pants.stream.Stream.read_delimiter`. Because you\n cannot guarantee that the string will appear, having an upper\n limit on the size of the data is appropriate.\n\n If the read delimiter is set to a number larger than the buffer\n size, the buffer size will be increased to accommodate the read\n delimiter.\n\n When the internal buffer's size exceeds the maximum allowed, the\n :meth:`~pants.stream.Stream.on_overflow_error` callback will be\n invoked.\n\n Attempting to set the buffer size to anything other than an\n integer or long will raise a :exc:`TypeError`.\n \"\"\"\n return self._buffer_size\n\n @buffer_size.setter\n def buffer_size(self, value):\n if not isinstance(value, (long, int)):\n raise TypeError(\"buffer_size must be an int or a long\")\n self._buffer_size = value\n if isinstance(self._read_delimiter, (int, long)):\n self._recv_buffer_size_limit = max(value, self._read_delimiter)\n elif isinstance(self._read_delimiter, Struct):\n self._recv_buffer_size_limit = max(value,\n self._read_delimiter.size)\n else:\n self._recv_buffer_size_limit = value\n\n ##### Control Methods #####################################################\n\n def startSSL(self, ssl_options={}):\n \"\"\"\n Enable SSL on the channel and perform a handshake at the next\n opportunity.\n\n SSL is only enabled on a channel once all currently pending data\n has been written. If a problem occurs at this stage,\n :meth:`~pants.stream.Stream.on_ssl_error` is called. Once SSL\n has been enabled, the SSL handshake begins - this typically\n takes some time and may fail, in which case\n :meth:`~pants.stream.Stream.on_ssl_handshake_error` will be\n called. When the handshake is successfully completed,\n :meth:`~pants.stream.Stream.on_ssl_handshake` is called and the\n channel is secure.\n\n Typically, this method is called before\n :meth:`~pants.stream.Stream.connect`. In this case,\n :meth:`~pants.stream.Stream.on_ssl_handshake` will be called\n before :meth:`~pants.stream.Stream.on_connect`. If\n :meth:`~pants.stream.Stream.startSSL` is called after\n :meth:`~pants.stream.Stream.connect`, the reverse is true.\n\n It is possible, although unusual, to start SSL on a channel that\n is already connected and active. In this case, as noted above,\n SSL will only be enabled and the handshake performed after all\n currently pending data has been written.\n\n The SSL options argument will be passed through to\n :func:`ssl.wrap_socket` as keyword arguments - see the\n :mod:`ssl` documentation for further information. You will\n typically want to provide the ``keyfile``, ``certfile`` and\n ``ca_certs`` options. The ``do_handshake_on_connect`` option\n **must** be ``False``, or a :exc:`ValueError` will be raised.\n\n Attempting to enable SSL on a closed channel or a channel that\n already has SSL enabled on it will raise a :exc:`RuntimeError`.\n\n Returns the channel.\n\n ============ ===================================================\n Arguments Description\n ============ ===================================================\n ssl_options *Optional.* Keyword arguments to pass to\n :func:`ssl.wrap_socket`.\n ============ ===================================================\n \"\"\"\n if self.ssl_enabled or self._ssl_enabling:\n raise RuntimeError(\"startSSL() called on SSL-enabled %r\" % self)\n\n if self._closed or self._closing:\n raise RuntimeError(\"startSSL() called on closed %r\" % self)\n\n if ssl_options.setdefault(\"do_handshake_on_connect\", False) is not False:\n raise ValueError(\"SSL option 'do_handshake_on_connect' must be False.\")\n\n self._ssl_enabling = True\n self._send_buffer.append((Stream.SEND_SSL_HANDSHAKE, ssl_options))\n\n if self.connected:\n self._process_send_buffer()\n\n return self\n\n def connect(self, address, native_resolve=True):\n \"\"\"\n Connect the channel to a remote socket.\n\n The given ``address`` is resolved and used by the channel to\n connect to the remote server. If an error occurs at any stage in\n this process, :meth:`~pants.stream.Stream.on_connect_error` is\n called. When a connection is successfully established,\n :meth:`~pants.stream.Stream.on_connect` is called.\n\n Addresses can be represented in a number of different ways. A\n single string is treated as a UNIX address. A single integer is\n treated as a port and converted to a 2-tuple of the form\n ``('', port)``. A 2-tuple is treated as an IPv4 address and a\n 4-tuple is treated as an IPv6 address. See the :mod:`socket`\n documentation for further information on socket addresses.\n\n If no socket exists on the channel, one will be created with a\n socket family appropriate for the given address.\n\n An error will occur during the connection if the given address\n is not of a valid format or of an inappropriate format for the\n socket (e.g. if an IP address is given to a UNIX socket).\n\n Calling :meth:`connect()` on a closed channel or a channel that\n is already connected will raise a :exc:`RuntimeError`.\n\n Returns the channel.\n\n =============== ===============================================\n Arguments Description\n =============== ===============================================\n address The remote address to connect to.\n native_resolve *Optional.* If True, use Python's builtin\n address resolution. Otherwise, Pants'\n non-blocking address resolution will be used.\n =============== ===============================================\n \"\"\"\n if self.connected or self.connecting:\n raise RuntimeError(\"connect() called on active %r.\" % self)\n\n if self._closed or self._closing:\n raise RuntimeError(\"connect() called on closed %r.\" % self)\n\n self.connecting = True\n\n address, family = self._format_address(address)\n if native_resolve:\n self._do_connect(address, family)\n else:\n self._resolve_address(address, family, self._do_connect)\n\n return self\n\n def close(self, flush=True):\n \"\"\"\n Close the channel.\n \"\"\"\n if self._closed:\n return\n\n if flush and self._send_buffer:\n self._closing = True\n return\n\n self.read_delimiter = None\n self._recv_buffer = \"\"\n self._send_buffer = []\n\n self.connected = False\n self.connecting = False\n\n self.ssl_enabled = False\n self._ssl_enabling = False\n self._ssl_socket_wrapped = False\n self._ssl_handshake_done = False\n self._ssl_call_on_connect = False\n\n self._safely_call(self.on_close)\n\n self._remote_address = None\n self._local_address = None\n\n _Channel.close(self)\n\n self._closing = False\n\n ##### I/O Methods #########################################################\n\n def write(self, data, flush=False):\n \"\"\"\n Write data to the channel.\n\n Data will not be written immediately, but will be buffered\n internally until it can be sent without blocking the process.\n\n Calling :meth:`write()` on a closed or disconnected channel will\n raise a :exc:`RuntimeError`.\n\n ========== ===================================================\n Arguments Description\n ========== ===================================================\n data A string of data to write to the channel.\n flush *Optional.* If True, flush the internal write\n buffer. See :meth:`~pants.stream.Stream.flush` for\n details.\n ========== ===================================================\n \"\"\"\n if self._closed or self._closing:\n raise RuntimeError(\"write() called on closed %r.\" % self)\n\n if not self.connected:\n raise RuntimeError(\"write() called on disconnected %r.\" % self)\n\n if self._send_buffer and self._send_buffer[-1][0] == Stream.SEND_STRING:\n data_type, existing_data = self._send_buffer.pop(-1)\n data = existing_data + data\n\n self._send_buffer.append((Stream.SEND_STRING, data))\n\n if flush:\n self._process_send_buffer()\n else:\n self._start_waiting_for_write_event()\n\n def write_file(self, sfile, nbytes=0, offset=0, flush=False):\n \"\"\"\n Write a file to the channel.\n\n The file will not be written immediately, but will be buffered\n internally until it can be sent without blocking the process.\n\n Calling :meth:`write_file()` on a closed or disconnected channel\n will raise a :exc:`RuntimeError`.\n\n ========== ====================================================\n Arguments Description\n ========== ====================================================\n sfile A file object to write to the channel.\n nbytes *Optional.* The number of bytes of the file to\n write. If 0, all bytes will be written.\n offset *Optional.* The number of bytes to offset writing\n by.\n flush *Optional.* If True, flush the internal write\n buffer. See :meth:`~pants.stream.Stream.flush` for\n details.\n ========== ====================================================\n \"\"\"\n if self._closed or self._closing:\n raise RuntimeError(\"write_file() called on closed %r.\" % self)\n\n if not self.connected:\n raise RuntimeError(\"write_file() called on disconnected %r.\" % self)\n\n self._send_buffer.append((Stream.SEND_FILE, (sfile, offset, nbytes)))\n\n if flush:\n self._process_send_buffer()\n else:\n self._start_waiting_for_write_event()\n\n def write_packed(self, *data, **kwargs):\n \"\"\"\n Write packed binary data to the channel.\n\n If the current :attr:`read_delimiter` is an instance of\n :class:`struct.Struct` the format will be read from that Struct,\n otherwise you will need to provide a ``format``.\n\n ========== ====================================================\n Argument Description\n ========== ====================================================\n *data Any number of values to be passed through\n :mod:`struct` and written to the remote host.\n flush *Optional.* If True, flush the internal write\n buffer. See :meth:`~pants.stream.Stream.flush`\n for details.\n format *Optional.* A formatting string to pack the\n provided data with. If one isn't provided, the read\n delimiter will be used.\n ========== ====================================================\n \"\"\"\n format = kwargs.get(\"format\")\n if format:\n self.write(struct.pack(format, *data), kwargs.get(\"flush\", False))\n elif not isinstance(self._read_delimiter, Struct):\n raise ValueError(\"No format is available for writing packed data.\")\n else:\n self.write(self._read_delimiter.pack(*data),\n kwargs.get(\"flush\", False))\n\n def flush(self):\n \"\"\"\n Attempt to immediately write any internally buffered data to the\n channel without waiting for a write event.\n\n This method can be fairly expensive to call and should be used\n sparingly.\n\n Calling :meth:`flush()` on a closed or disconnected channel will\n raise a :exc:`RuntimeError`.\n \"\"\"\n if self._closed or self._closing:\n raise RuntimeError(\"flush() called on closed %r.\" % self)\n\n if not self.connected:\n raise RuntimeError(\"flush() called on disconnected %r.\" % self)\n\n if not self._send_buffer:\n return\n\n self._stop_waiting_for_write_event()\n self._process_send_buffer()\n\n ##### Public Event Handlers ###############################################\n\n def on_ssl_handshake(self):\n \"\"\"\n Placeholder. Called after the channel has finished its SSL\n handshake.\n \"\"\"\n pass\n\n ##### Public Error Handlers ###############################################\n\n def on_ssl_handshake_error(self, exception):\n \"\"\"\n Placeholder. Called when an error occurs during the SSL\n handshake.\n\n By default, logs the exception and closes the channel.\n\n ========== ============\n Argument Description\n ========== ============\n exception The exception that was raised.\n ========== ============\n \"\"\"\n log.exception(exception)\n self.close(flush=False)\n\n def on_ssl_error(self, exception):\n \"\"\"\n Placeholder. Called when an error occurs in the underlying SSL\n implementation.\n\n By default, logs the exception and closes the channel.\n\n ========== ============\n Argument Description\n ========== ============\n exception The exception that was raised.\n ========== ============\n \"\"\"\n log.exception(exception)\n self.close(flush=False)\n\n ##### Internal Methods ####################################################\n\n def _do_connect(self, address, family, error=None):\n \"\"\"\n A callback method to be used with\n :meth:`~pants._channel._Channel._resolve_addr` - either connects\n immediately or notifies the user of an error.\n\n ========= =====================================================\n Argument Description\n ========= =====================================================\n address The address to connect to or None if address\n resolution failed.\n family The detected socket family or None if address\n resolution failed.\n error *Optional.* Error information or None if no error\n occurred.\n ========= =====================================================\n \"\"\"\n if not address:\n self.connecting = False\n e = StreamConnectError(*error)\n self._safely_call(self.on_connect_error, e)\n return\n\n if self._socket:\n if self._socket.family != family:\n self.engine.remove_channel(self)\n self._socket_close()\n self._closed = False\n\n sock = socket.socket(family, socket.SOCK_STREAM)\n self._socket_set(sock)\n self.engine.add_channel(self)\n\n try:\n connected = self._socket_connect(address)\n except socket.error as err:\n self.close(flush=False)\n e = StreamConnectError(err.errno, err.strerror)\n self._safely_call(self.on_connect_error, e)\n return\n\n if connected:\n self._handle_connect_event()\n\n ##### Internal Event Handler Methods ######################################\n\n def _handle_read_event(self):\n \"\"\"\n Handle a read event raised on the channel.\n \"\"\"\n if self.ssl_enabled and not self._ssl_handshake_done:\n self._ssl_do_handshake()\n return\n\n while True:\n try:\n data = self._socket_recv()\n except socket.error as err:\n self._safely_call(self.on_read_error, err)\n return\n\n if not data:\n break\n else:\n self._recv_buffer += data\n\n if len(self._recv_buffer) > self._recv_buffer_size_limit:\n # Try processing the buffer to reduce its length.\n self._process_recv_buffer()\n\n # If the buffer's still too long, overflow error.\n if len(self._recv_buffer) > self._recv_buffer_size_limit:\n e = StreamBufferOverflow(\"Buffer length exceeded upper limit on %r.\" % self)\n self._safely_call(self.on_overflow_error, e)\n return\n\n self._process_recv_buffer()\n\n # This block was moved out of the above loop to address issue #41.\n if data is None:\n self.close(flush=False)\n\n def _handle_write_event(self):\n \"\"\"\n Handle a write event raised on the channel.\n \"\"\"\n if self.ssl_enabled and not self._ssl_handshake_done:\n self._ssl_do_handshake()\n return\n\n if not self.connected:\n self._handle_connect_event()\n\n if not self._send_buffer:\n return\n\n self._process_send_buffer()\n\n def _handle_error_event(self):\n \"\"\"\n Handle an error event raised on the channel.\n \"\"\"\n if self.connecting:\n # That's no moon...\n self._handle_connect_event()\n else:\n _Channel._handle_error_event(self)\n\n def _handle_connect_event(self):\n \"\"\"\n Handle a connect event raised on the channel.\n \"\"\"\n self.connecting = False\n err, errstr = self._get_socket_error()\n if err == 0:\n self.connected = True\n if self._ssl_enabling:\n self._ssl_call_on_connect = True\n self._process_send_buffer()\n else:\n self._safely_call(self.on_connect)\n else:\n # ... it's a space station!\n e = StreamConnectError(err, errstr)\n self._safely_call(self.on_connect_error, e)\n\n ##### Internal Processing Methods #########################################\n\n def _process_recv_buffer(self):\n \"\"\"\n Process the :attr:`~pants.stream.Stream._recv_buffer`, passing\n chunks of data to :meth:`~pants.stream.Stream.on_read`.\n \"\"\"\n while self._recv_buffer:\n delimiter = self.read_delimiter\n\n if delimiter is None:\n data = self._recv_buffer\n self._recv_buffer = \"\"\n self._safely_call(self.on_read, data)\n\n elif isinstance(delimiter, (int, long)):\n if len(self._recv_buffer) < delimiter:\n break\n data = self._recv_buffer[:delimiter]\n self._recv_buffer = self._recv_buffer[delimiter:]\n self._safely_call(self.on_read, data)\n\n elif isinstance(delimiter, basestring):\n mark = self._recv_buffer.find(delimiter)\n if mark == -1:\n break\n data = self._recv_buffer[:mark]\n self._recv_buffer = self._recv_buffer[mark + len(delimiter):]\n self._safely_call(self.on_read, data)\n\n elif isinstance(delimiter, Struct):\n if len(self._recv_buffer) < delimiter.size:\n break\n data = self._recv_buffer[:delimiter.size]\n self._recv_buffer = self._recv_buffer[delimiter.size:]\n\n # Safely unpack it. This should *probably* never error.\n try:\n data = delimiter.unpack(data)\n except struct.error:\n log.exception(\"Unable to unpack data on %r.\" % self)\n self.close()\n break\n\n # Unlike most on_read calls, this one sends every variable of\n # the parsed data as its own argument.\n self._safely_call(self.on_read, *data)\n\n elif isinstance(delimiter, RegexType):\n # Depending on regex_search, we could do this two ways.\n if self.regex_search:\n match = delimiter.search(self._recv_buffer)\n if not match:\n break\n\n data = self._recv_buffer[:match.start()]\n self._recv_buffer = self._recv_buffer[match.end():]\n\n else:\n # Require the match to be at the beginning.\n data = delimiter.match(self._recv_buffer)\n if not data:\n break\n\n self._recv_buffer = self._recv_buffer[data.end():]\n\n # Send either the string or the match object.\n self._safely_call(self.on_read, data)\n\n else:\n # The safeguards in the read delimiter property should\n # prevent this from happening unless people start\n # getting too crafty for their own good.\n err = InvalidReadDelimiterError(\"Invalid read delimiter on %r.\" % self)\n self._safely_call(self.on_error, err)\n break\n\n if self._closed or not self.connected:\n break\n\n def _process_send_buffer(self):\n \"\"\"\n Process the :attr:`~pants.stream.Stream._send_buffer`, passing\n outgoing data to :meth:`~pants._channel._Channel._socket_send`\n or :meth:`~pants._channel._Channel._socket_sendfile` and calling\n :meth:`~pants.stream.Stream.on_write` when sending has finished.\n \"\"\"\n while self._send_buffer:\n data_type, data = self._send_buffer.pop(0)\n\n if data_type == Stream.SEND_STRING:\n bytes_sent = self._process_send_string(data)\n elif data_type == Stream.SEND_FILE:\n bytes_sent = self._process_send_file(*data)\n elif data_type == Stream.SEND_SSL_HANDSHAKE:\n bytes_sent = self._process_send_ssl_handshake(data)\n\n if bytes_sent == 0:\n break\n\n if not self._closed and not self._send_buffer:\n self._safely_call(self.on_write)\n\n if self._closing:\n self.close(flush=False)\n\n def _process_send_string(self, data):\n \"\"\"\n Send data from a string to the remote socket.\n \"\"\"\n try:\n bytes_sent = self._socket_send(data)\n except socket.error as err:\n self._safely_call(self.on_write_error, err)\n return 0\n\n if len(data) > bytes_sent:\n self._send_buffer.insert(0, (Stream.SEND_STRING, data[bytes_sent:]))\n\n return bytes_sent\n\n def _process_send_file(self, sfile, offset, nbytes):\n \"\"\"\n Send data from a file to the remote socket.\n \"\"\"\n try:\n bytes_sent = self._socket_sendfile(sfile, offset, nbytes)\n except socket.error as err:\n self._safely_call(self.on_write_error, err)\n return 0\n\n offset += bytes_sent\n\n if nbytes > 0:\n if nbytes - bytes_sent > 0:\n nbytes -= bytes_sent\n else:\n # Reached the end of the segment.\n return bytes_sent\n\n # TODO This is awful. Find a better way.\n if os.fstat(sfile.fileno()).st_size - offset <= 0:\n # Reached the end of the file.\n return bytes_sent\n\n self._send_buffer.insert(0, (Stream.SEND_FILE, (sfile, offset, nbytes)))\n\n return bytes_sent\n\n def _process_send_ssl_handshake(self, ssl_options):\n \"\"\"\n Enable SSL and begin the handshake.\n \"\"\"\n self._ssl_enabling = False\n\n if not self._ssl_socket_wrapped:\n try:\n self._socket = ssl.wrap_socket(self._socket, **ssl_options)\n except ssl.SSLError as err:\n self._ssl_enabling = True\n self._safely_call(self.on_ssl_error, err)\n return 0\n else:\n self._ssl_socket_wrapped = True\n\n self.ssl_enabled = True\n\n try:\n bytes_sent = self._ssl_do_handshake()\n except Exception as err:\n self._safely_call(self.on_ssl_handshake_error, err)\n return 0\n\n # Unlike strings and files, the SSL handshake is not re-added to\n # the queue. This is because the stream's state has been\n # modified and the handshake will continue until it's complete.\n return bytes_sent\n\n ##### SSL Implementation ##################################################\n\n def _socket_recv(self):\n \"\"\"\n Receive data from the socket.\n\n Returns a string of data read from the socket. The data is None if\n the socket has been closed.\n\n Overrides :meth:`pants._channel._Channel._socket_recv` to handle\n SSL-specific behaviour.\n \"\"\"\n try:\n return _Channel._socket_recv(self)\n except ssl.SSLError as err:\n if err.args[0] == ssl.SSL_ERROR_WANT_READ:\n return ''\n else:\n raise\n\n def _socket_send(self, data):\n \"\"\"\n Send data to the socket.\n\n Returns the number of bytes that were sent to the socket.\n\n Overrides :meth:`pants._channel._Channel._socket_send` to handle\n SSL-specific behaviour.\n\n ========= ============\n Argument Description\n ========= ============\n data The string of data to send.\n ========= ============\n \"\"\"\n try:\n bytes_sent = _Channel._socket_send(self, data)\n except ssl.SSLError as err:\n if err.args[0] == ssl.SSL_ERROR_WANT_WRITE:\n self._start_waiting_for_write_event()\n return 0\n else:\n raise\n\n # SSLSocket.send() can return 0 rather than raise an exception\n # if it needs a write event.\n if self.ssl_enabled and bytes_sent == 0:\n self._start_waiting_for_write_event()\n return bytes_sent\n\n def _socket_sendfile(self, sfile, offset, nbytes):\n \"\"\"\n Send data from a file to a remote socket.\n\n Returns the number of bytes that were sent to the socket.\n\n Overrides :meth:`pants._channel._Channel._socket_sendfile` to\n handle SSL-specific behaviour.\n\n ========= ============\n Argument Description\n ========= ============\n sfile The file to send.\n offset The number of bytes to offset writing by.\n nbytes The number of bytes of the file to write. If 0, all bytes will be written.\n ========= ============\n \"\"\"\n return _Channel._socket_sendfile(self, sfile, offset, nbytes, self.ssl_enabled)\n\n def _ssl_do_handshake(self):\n \"\"\"\n Perform an asynchronous SSL handshake.\n \"\"\"\n try:\n self._socket.do_handshake()\n except ssl.SSLError as err:\n if err.args[0] == ssl.SSL_ERROR_WANT_READ:\n return 0\n elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:\n self._start_waiting_for_write_event()\n return 0\n elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):\n self.close(flush=False)\n return 0\n elif err.args[0] == ssl.SSL_ERROR_SSL:\n self._safely_call(self.on_ssl_handshake_error, err)\n return 0\n else:\n raise\n except socket.error as err:\n if err.args[0] in (errno.ECONNRESET, errno.EPIPE):\n self.close(flush=False)\n return 0\n else:\n raise\n else:\n self._ssl_handshake_done = True\n self._safely_call(self.on_ssl_handshake)\n if self._ssl_call_on_connect:\n self._safely_call(self.on_connect)\n return None\n\n\n###############################################################################\n# Exceptions\n###############################################################################\n\nclass StreamBufferOverflow(Exception):\n \"\"\"\n Raised when a stream's internal buffer has exceeded its maximum\n allowed size.\n \"\"\"\n pass\n\nclass StreamConnectError(Exception):\n \"\"\"\n Raised when an error has occurred during an attempt to connect a\n stream to a remote host.\n \"\"\"\n pass\n\nclass InvalidReadDelimiterError(Exception):\n \"\"\"\n Raised when a channel tries to process incoming data with an\n invalid read delimiter.\n \"\"\"\n pass\n","sub_path":"pants/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":39823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"493054422","text":"#!/usr/bin/env python\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport requests\nfrom multiprocessing import Pool\nfrom mongoengine import ValidationError, NotUniqueError\nimport sys\nimport re\nimport datetime\nfrom pymongo import MongoClient\n\ntry:\n\tfrom jupiter.sentient.reviews.models.model import Reviews, Record\n\tfrom jupiter.sentient.reviews.nlp import Senti\nexcept Exception as e:\n\tfrom reviews.models.model import Reviews, Record\n\tfrom reviews.nlp import Senti\n\n#except:\n#\tfrom reviews.models.model import Reviews,Recor,AspectQ\n#\tfrom reviews.nlp import Senti\n\nfrom jupiter._config import mongo_params, mongo_dbi\n\n# Get database\nclient = MongoClient(mongo_params['host'], mongo_params['port'])\ndb = client[mongo_dbi]\n\n\nclass Booking(object):\n\t\"\"\"docstring for\"\"\"\n\tdef __init__(self, url, survey_id, provider=\"booking\"):\n\t\tself.url = url\n\t\tself.p = provider\n\t\tself.sid = survey_id\n\t\tself.base_url = 'http://www.booking.com'\n\n\tdef get_total_review(self):\n\t\tresponse = urlopen(self.url).read()\n\t\tsoup = BeautifulSoup(response, \"html.parser\")\n\t\ttotal = int(soup.find('a', {'class': 'hp_nav_reviews_link toggle_review track_review_link_zh'}).text.strip().replace('(', '').replace(')' ,'').split()[-1])\n\t\treturn total\n\n\tdef get_next_link(self, soup):\n\t\tnext_page = soup.find('a', {'id': 'review_next_page_link'})\n\t\tnew_url = self.base_url+next_page['href'] if next_page!=None else None\n\t\treturn new_url\n\n\tdef get_reviews(self, soup, time_reviewed, soup_url):\n\t\tlinks = []\n\t\ttotal_reviews_collected = 0\n\t\twhile True:\n\t\t\tnew_data = urlopen(soup_url)\n\t\t\tnew_soup = BeautifulSoup(new_data, 'html.parser')\n\t\t\tprint(\"collection reviews from url:\" + soup_url)\n\t\t\tlists = new_soup.find_all('li', {'class': 'review_item'})\n\t\t\tfor li in lists:\n\t\t\t\treview_date = li.find('meta', {'itemprop': 'datePublished'})\n\t\t\t\treview_date = review_date['content'] if review_date!=None else None\n\t\t\t\tparsed_date = datetime.datetime.strptime(review_date, '%Y-%m-%d')\n\t\t\t\tprint(time_reviewed, \"|\", parsed_date)\n\t\t\t\tif parsed_date >= time_reviewed:\n\t\t\t\t\treview = li.find('div', {'class': 'review_item_review'})\n\t\t\t\t\tcontent = review.find('div', {'class': 'review_item_review_content'}).find_all('span', {'itemprop': 'reviewBody'})\n\t\t\t\t\theader = review.find('div', {'class': 'review_item_review_header'})\n\t\t\t\t\trating = str(float(header.find('meta', {'itemprop': 'ratingValue'})['content']) / 2.0)\n\t\t\t\t\tsentiment = Senti(review).sent(rating)\n\t\t\t\t\treview_identifier = header.find('span', {'itemprop': 'name'}).text.strip()\n\t\t\t\t\tfor texts in content:\n\t\t\t\t\t\ttexts = texts.text.strip() if texts!=None else None\n\t\t\t\t\t\treview_identifier += texts[0:10]\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsave = Reviews(survey_id=self.sid, provider=self.p,\n\t\t\t\t\t\t\t\treview=texts, review_identifier=review_identifier,\n\t\t\t\t\t\t\t\trating=rating, sentiment=sentiment).save()\n\t\t\t\t\t\t\tprint(\"reviews saved identified by:\", review_identifier)\n\t\t\t\t\t\t\ttotal_reviews_collected += 1\n\t\t\t\t\t\texcept NotUniqueError:\n\t\t\t\t\t\t\tprint(\"NotUniqueError\")\n\t\t\t\t\t\t\traise NotUniqueError(\"A non unique error found. Skipping review collection\")\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tprint (\"An exception occured ignoring \", e)\n\t\t\t\telse:\n\t\t\t\t\tprint('empty review')\n\t\t\tlinks.append(soup_url)\n\t\t\tnext_url = self.get_next_link(new_soup)\n\t\t\tif next_url == None:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tsoup_url = next_url\n\t\tprint(\"total reviews collected =\", total_reviews_collected)\n\t\tRecord(survey_id=self.sid, provider=\"booking\", links=set(links))\n\n\tdef get_data(self):\n\t\tprint(\"Getting data for \", self.sid)\n\t\tpage_no = 1\n\t\tcurrent_url = self.url\n\t\tresponse = urlopen(current_url)\n\t\tsoup = BeautifulSoup(response, \"html.parser\")\n\t\tobj = db.aspect_q.find_one({'survey_id': self.sid})\n\t\ttime_review = obj['time_review']\n\t\tlast_update = obj['last_update']\n\n\t\tif last_update != None:\n\t\t\ttime_reviewed = time_review if (time_review >= last_update) else last_update\n\t\telse:\n\t\t\ttime_reviewed = time_review\n\t\tstart = soup.find_all('a', {\"class\": \"show_all_reviews_btn\"})\n\t\t# print(\"\\n start: \", start)\n\t\tif start:\n\t\t\tstart_url = self.base_url + start[0]['href']\n\t\t\t# print(\"\\nStart URL: \", start_url)\n\t\t\ttry:\n\t\t\t\tself.get_reviews(soup, time_reviewed, start_url)\n\t\t\texcept NotUniqueError:\n\t\t\t\tpass\n\t\telse:\n\t\t\tprint(\"total reviews collected 0\")\n\n\nif __name__ == '__main__':\n\ttest_url = \"http://www.booking.com/hotel/in/swissa-tel-goa.html?label=gen173nr-1FCAEoggJCAlhYSDNiBW5vcmVmaGyIAQGYATG4AQ_IAQ_YAQHoAQH4AQKoAgM;sid=69a80e530b4bd33d6a2ce0e128061485;dcid=12;dest_id=4127;dest_type=region;dist=0;group_adults=2;room1=A%2CA;sb_price_type=total;srfid=39183e2a96da4c1e9df94a8b1555fa36ec6494ddX4;type=total;ucfs=1&#\"\n\ttest = Booking(test_url, \"2WzzBWZAvVKoJonJvW2\")\n\tr = test.get_data()\n\tprint(\"end\")\n","sub_path":"jupiter/deprecated/sentient_1/reviews/bookingpool.py","file_name":"bookingpool.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"152392611","text":"class Solution(object):\n def countDigitOne(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n k=1\n res=0\n r=n\n while r>0:\n r=n//k\n res+=(r+8)//10*k\n if r%10==1:\n res+=n%k+1\n k*=10\n return res\n\nprint (Solution().countDigitOne(14))\n","sub_path":"_NumberofDigitOne.py","file_name":"_NumberofDigitOne.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"488853110","text":"import random\nimport datetime\nimport time\n\n\nclass player():\n def __init__(self, name):\n \"\"\"\n creates a player instance\n :param name: player name\n board is initialized once the player is attached to a game\n \"\"\"\n self.name = name\n self.board = None\n\n def get_player(self):\n \"\"\"\n get player details\n :return: dict of player name and board\n \"\"\"\n return {'Name': self.name, 'Board': str(self.board.get_display_matrix())}\n\n def is_bingo(self):\n \"\"\"\n check bingo for the player\n :return: boolean, whether the player got bingo or not\n \"\"\"\n return self.board.is_bingo\n\n def cross_number(self, number):\n \"\"\"\n cross the particular number in the board player's board\n :param number: number selected by the player\n :return: NA\n \"\"\"\n self.board.cross(number)\n\n def print_board(self):\n \"\"\"\n print the board\n :return: NA\n \"\"\"\n self.board.print_matrix()\n\n def is_selected(self, number):\n \"\"\"\n check whether the number is alredy selected or not\n :param number: number to check\n :return: boolean\n \"\"\"\n return self.board.is_selected(number)\n\n def pick_number(self):\n \"\"\"\n pick a random/possible number from the board for next move\n :return: number picked from the board\n \"\"\"\n return self.board.pick_number()\n\n\nclass board():\n def __init__(self, dimension):\n self.dimension = dimension\n self.matrix = self.get_new_board()\n self.is_bingo = False\n\n def get_display_matrix(self):\n display_matrix = list()\n for i in range(0, self.dimension):\n display_matrix.append(self.matrix[str(i)][0])\n return display_matrix\n\n def get_new_board(self):\n n = self.dimension\n num_list = [i for i in range(1, (n * n) + 1)]\n\n num_list = random.sample(num_list, k=len(num_list))\n\n def divide_chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n mat = list(divide_chunks(num_list, n))\n _row1 = [mat[i][i] for i in range(0, len(mat))]\n _row2 = [mat[i][len(mat) - i - 1] for i in range(0, len(mat))]\n for i in range(0, n):\n _row = [mat[j][i] for j in range(0, n)]\n mat.append(_row)\n mat.append(_row1)\n mat.append(_row2)\n mat_dict = dict()\n for i in range(0, len(mat)):\n mat_dict[str(i)] = [mat[i], [0 for i in range(0, len(mat[i]))], 0]\n return mat_dict\n\n def pick_number(self):\n mat = self.matrix\n sum_list = [mat[i][2] for i in mat]\n max_cut = max(sum_list)\n sel_num = list()\n\n while len(sel_num) == 0:\n for i in mat:\n if mat[i][2] == max_cut:\n num_list = list()\n for j in range(0, len(mat[i][0])):\n if mat[i][1][j] == 0:\n num_list.append(mat[i][0][j])\n sel_num = sel_num + num_list\n sum_list.remove(max_cut)\n max_cut = max(sum_list)\n # print(sel_num)\n return random.choice(sel_num)\n\n def cross(self, n):\n mat = self.matrix\n for i in mat:\n if n in mat[i][0]:\n if mat[i][1][mat[i][0].index(n)] == 0:\n mat[i][1][mat[i][0].index(n)] = 1\n mat[i][2] = mat[i][2] + 1\n self.matrix = mat\n self.is_bingo = self.check_bingo()\n\n def check_bingo(self):\n mat = self.matrix\n sum_list = [mat[i][2] for i in mat]\n return self.dimension == max(sum_list) <= sum_list.count(max(sum_list))\n\n def is_selected(self, n):\n mat = self.matrix\n for i in mat:\n if n in mat[i][0]:\n return 1 == mat[i][1][mat[i][0].index(n)]\n return False\n\n def get_position(self, n):\n matrix = self.matrix\n for i in matrix:\n for j in range(0, len(matrix[i][0])):\n if n == matrix[i][0][j]:\n return (int(i) + 1, j + 1)\n return (None, None)\n\n def print_matrix(self):\n def strike(text):\n result = ''\n for c in text:\n result = result + c + '\\u0336'\n return result\n\n mat = self.matrix\n n = int((len(mat) - 2) / 2)\n for i in range(0, n):\n print_string = [strike(str(i).rjust(2, '0')) if self.is_selected(i) else str(i).rjust(2, '0') for i in\n mat[str(i)][0]]\n print(print_string)\n\n\nclass bingo():\n def __init__(self, dimension=5):\n self.game_id = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(100, 999))\n self.dimension = dimension\n self.players = list()\n self.winners = list()\n self.log = dict()\n self.round = 0\n self.put_log(\"GameID\", self.game_id)\n self.put_log(\"Dimension\", self.dimension)\n self.put_log(\"Players\", [])\n self.put_log(\"Rounds\", [])\n self.put_log(\"Result\", [])\n self.start_time = time.time()\n\n def put_log(self, key, value):\n if key in self.log:\n if isinstance(self.log[key],list):\n self.log[key].append(value)\n else:\n self.log[key] = value\n\n def is_number_valid(self, number):\n return number in [i for i in range(1, (self.dimension * self.dimension) + 1)]\n\n def select_order(self):\n selected_one = random.randint(0, len(self.players) - 1)\n reordered_players = list()\n for i in range(0, len(self.players)):\n reordered_players.append(self.players[(i + selected_one) % len(self.players)])\n self.players = reordered_players\n return selected_one\n\n def get_log(self):\n return self.log\n\n def add_player(self, player_name):\n new_player = player(player_name)\n new_player.board = board(self.dimension)\n self.players.append(new_player)\n self.put_log(\"Players\", new_player.get_player())\n return new_player\n\n def remove_player(self, player):\n self.players.remove(player)\n\n def play(self, player, number):\n if not self.is_game_over():\n self.round += 1\n self.put_log(\"Rounds\", {\"RoundNumber\": self.round, 'PlayerName': player.name, 'SelectedNumber': number,\n 'SelectedPosition': player.board.get_position(number)})\n for player in self.players:\n player.cross_number(number)\n return\n\n def is_game_over(self):\n for player in self.players:\n if player.is_bingo() and self.winners.count(player) == 0:\n self.winners.append(player)\n self.put_log(\"Result\", player.name)\n self.put_log(\"GameTime\", (time.time()-self.start_time))\n return True if len(self.winners) > 0 else False\n\n def auto_play(self):\n # random.shuffle(self.players)\n self.select_order()\n while not self.is_game_over():\n for player in self.players:\n num = player.board.pick_number()\n # for player in self.players:\n # player.cross_number(num)\n self.play(player, num)\n if self.is_game_over():\n break\n\n def print_result(self):\n if len(self.winners) == len(self.players):\n print(\"\\nIt's a tie!\\n\")\n elif self.is_game_over():\n print(\"\\nWinner[s] is[are]: \\n\")\n for player in self.winners:\n print(player.name)\n print(\"\")\n else:\n print(\"\\n The game is in progress \\n\")\n return\n for player in self.players:\n print(player.name + \"'s board\")\n player.print_board()\n print(\"\")\n\n\nif __name__ == '__main__':\n game = bingo()\n print(game.game_id)\n player1 = game.add_player('Bot1')\n player2 = game.add_player('Bot2')\n player3 = game.add_player('Bot3')\n\n game.auto_play()\n game.print_result()\n","sub_path":"bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"65022973","text":"from sqlite3.dbapi2 import DatabaseError\nfrom static.model import student\nfrom database.csv_editor import csv_editor as editor\nimport sqlite3\n\nclass data_handle:\n\n\n def __init__(self,student_1,student_2,location,date,time):\n self.student_1 = student_1 \n self.student_2 = student_2 \n self.location = location \n self.date = date \n self.time = time\n \n def found(self):\n fieldnames = ['student_1','student_2','location','date','time']\n csv_file = editor('database/finds.csv',fieldnames)\n data = {\n 'student_1' : self.student_1,\n 'student_2' : self.student_2,\n 'location' : self.location,\n 'date' : self.date,\n 'time' : self.time\n }\n csv_file.append_file(data)\n\n def store_to_DB(self):\n \"\"\"\n write code to store data to db on runtime\n \"\"\"\n name2 = \"\"\n if len(self.student_2) > 0:\n name2 = self.student_2\n else:\n name2 = 'NULL'\n with sqlite3.connect('database/student.db') as conn:\n _query = f\"\"\"\n INSERT INTO finds (student_1,student_2,Location,date,time)\n VALUES({self.student_1},{name2},'{self.location}','{self.date}','{self.time}');\n \"\"\"\n row = []\n try:\n cursor = conn.execute(_query)\n conn.commit()\n row = cursor.lastrowid()\n print(\"row is added swith success\",end='')\n print(row)\n except DatabaseError as identifier:\n print(\"error \" + str(identifier))\n finally:\n return row\n # make it a seperate thread\n def getData(sid):\n row = []\n conn = sqlite3.connect('database/student.db')\n # print(\"Opened database successfully\")\n cursor = conn.execute(\"SELECT first_name,last_name,sid,email,age,gender From student WHERE first_name = \" + '\"' + sid + '\"')\n row = cursor.fetchone()\n # print(row)\n conn.close()\n if row:\n return {\n 'first_name' : row[0] ,\n 'last_name' : row[1],\n 'sid': row[2],\n 'email' : row[3],\n 'age' : row[4],\n 'gender' : row[5]\n }\n return None\n","sub_path":"database/student_info.py","file_name":"student_info.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"326882874","text":"#pylint: disable=invalid-name,unused-argument\r\n\"\"\"A set of classes to fake a backend webservice.\"\"\"\r\n\r\nimport threading\r\nimport http.server\r\nimport queue\r\nimport json\r\n\r\nfrom dungeonz.util.webservice import WebserviceException\r\n\r\n\r\nclass ReplayWebservice(object):\r\n \"\"\"Manages the worker thread and passing messages to it.\"\"\"\r\n def __init__(self, port=18000):\r\n self.request_queue = queue.Queue()\r\n self.worker = WebserviceThread(self.request_queue, port)\r\n\r\n def add_response(self, response_type, endpoint, response, status_code=200,\r\n persist=False):\r\n self.request_queue.put((response_type, endpoint, response, status_code,\r\n persist))\r\n self.request_queue.join()\r\n\r\n\r\nclass CustomHandler(http.server.BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n \"\"\"Respond to a GET request.\"\"\"\r\n rm = self.server.service.response_manager\r\n status_code, response = rm.get('GET', self.path)\r\n\r\n self.send_response(status_code)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n self.wfile.write(response.encode(encoding='UTF-8'))\r\n\r\n def do_POST(self):\r\n \"\"\"Respond to a POST request.\"\"\"\r\n rm = self.server.service.response_manager\r\n status_code, response = rm.get('POST', self.path)\r\n\r\n self.send_response(status_code)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n self.wfile.write(response.encode(encoding='UTF-8'))\r\n\r\n def do_PATCH(self):\r\n \"\"\"Respond to a PATCH request.\"\"\"\r\n rm = self.server.service.response_manager\r\n status_code, response = rm.get('PATCH', self.path)\r\n\r\n self.send_response(status_code)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n self.wfile.write(response.encode(encoding='UTF-8'))\r\n\r\n def log_message(self, message_format, *args):\r\n \"\"\"Be quiet\"\"\"\r\n return\r\n\r\n\r\nclass WebserviceThread(threading.Thread):\r\n \"\"\" A worker thread that handles HTTP requests and response additions.\"\"\"\r\n def __init__(self, request_q, port):\r\n super(WebserviceThread, self).__init__()\r\n self.httpd = None\r\n self.request_q = request_q\r\n self.stoprequest = threading.Event()\r\n self.response_manager = ResponseManager()\r\n self.init_webserver(port)\r\n\r\n def run(self):\r\n while not self.stoprequest.isSet():\r\n try:\r\n # Check for new response additions\r\n response_type, endpoint, response, status_code, persist = \\\r\n self.request_q.get(True, 0.25)\r\n self.response_manager.put(response_type, endpoint, response,\r\n status_code=status_code,\r\n persist=persist)\r\n self.request_q.task_done()\r\n except queue.Empty:\r\n pass\r\n\r\n # Handle any requests\r\n self.httpd.handle_request()\r\n self.httpd.server_close()\r\n\r\n def join(self, timeout=None):\r\n self.stoprequest.set()\r\n super(WebserviceThread, self).join(timeout)\r\n\r\n def init_webserver(self, port):\r\n HandlerClass = CustomHandler\r\n ServerClass = http.server.HTTPServer\r\n Protocol = \"HTTP/1.0\"\r\n\r\n server_address = ('127.0.0.1', port)\r\n\r\n HandlerClass.protocol_version = Protocol\r\n self.httpd = ServerClass(server_address, HandlerClass)\r\n self.httpd.timeout = 0.005\r\n self.httpd.service = self\r\n\r\n\r\nclass ResponseManager(object):\r\n def __init__(self):\r\n self.responses = {\r\n 'GET': {},\r\n 'POST': {},\r\n 'PATCH': {}\r\n }\r\n\r\n def put(self, response_type, endpoint, response, **kwargs):\r\n status_code = kwargs.get('status_code', 200)\r\n persist = kwargs.get('persist', False)\r\n\r\n self.responses[response_type][endpoint] = (json.dumps(response),\r\n status_code, persist)\r\n\r\n def get(self, response_type, endpoint):\r\n responses = self.responses[response_type]\r\n\r\n try:\r\n response, status_code, persist = responses[endpoint]\r\n if not persist:\r\n del responses[endpoint]\r\n return (status_code, response)\r\n except KeyError:\r\n return (404, json.dumps({'url': endpoint,\r\n 'method': response_type}))\r\n\r\n\r\nclass MockWebservice(object):\r\n \"\"\"Emulates the webservice module by wrapping around a ResponseManager.\"\"\"\r\n def __init__(self):\r\n self.response_manager = ResponseManager()\r\n\r\n def get(self, url, headers=None, auth=None, absolute=False):\r\n return self._handle_response('GET', url)\r\n\r\n def post(self, url, data, headers=None, auth=None, absolute=False):\r\n return self._handle_response('POST', url)\r\n\r\n def patch(self, url, data, headers=None, auth=None, absolute=False):\r\n return self._handle_response('PATCH', url)\r\n\r\n def _handle_response(self, method, url, data=None, absolute=False):\r\n status_code, response = self.response_manager.get(method, url)\r\n if status_code >= 400:\r\n raise WebserviceException(status_code, url=url,\r\n data=None, detail=response)\r\n return json.loads(response)\r\n\r\n def set(self, *args, **kwargs):\r\n \"\"\"Forwards a call to add a response to the response manager.\"\"\"\r\n self.response_manager.put(*args, **kwargs)\r\n","sub_path":"dungeonz/util/replay_webservice.py","file_name":"replay_webservice.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"544259294","text":"\n# even parity computation\ndef computeParity(num):\n result = 0\n while num:\n result = result ^ (num & 1)\n num = num >> 1\n return result\n\ndef ones(num):\n count = 0\n while num:\n if num & 1:\n count += 1\n num = num >> 1\n return count\n\n\ndef RLEncoder(input):\n if not input:\n return ValueError('Incorrect input')\n code = ''\n prev = input[0]\n count = 1\n for char in input[1:]:\n if prev != char:\n code += str(count) + prev\n prev = char\n count = 1\n else:\n count += 1\n # write the last char and count\n code += str(count) + prev\n return code\n","sub_path":"practice/practice_201711.py","file_name":"practice_201711.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"328501909","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nfrom helper.TestSuiteGen import TestSuiteGen\nfrom TitanicDisasterTestSuite import TitanicDisasterTestSuite\nfrom ProcessManagerTest import ProcessManagerTest\n\n\"\"\"\n @author : Fatih Hamurcu @date: 2015\n @project: Titanic Machine Learning from Disaster\n\"\"\"\nclass ProcessManagerTestALL(unittest.TestSuite, TitanicDisasterTestSuite):\n def __init__(self):\n self.__testSuite = unittest.TestSuite()\n self.__testRunner = unittest.TextTestRunner(verbosity=2)\n self.__testClasses = [ProcessManagerTest]\n \n def start(self):\n self.__loadTestSuite()\n self.__testRunner.run(self.__testSuite)\n \n def __loadTestSuite(self):\n suiteGen = TestSuiteGen(self.__testClasses)\n \n suiteGen.start()\n self.__testSuite = suiteGen.getTestSuite()\n \n \nif __name__ != '__main__':\n ProcessManagerTestALL().start()","sub_path":"TitanicMLfromDisaster/testCode/ProcessManagerTest/ProcessManagerTestALL.py","file_name":"ProcessManagerTestALL.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"354573333","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 20 19:47:10 2020\n\n@author: Max\n\"\"\"\n\nfrom datetime import datetime\nimport backtrader as bt \n\n#define class representing trading strategy\nclass SmaSignal(bt.Signal):\n params = (('period', 20), )\n def __init__(self):\n self.lines.signal = self.data - bt.ind.SMA(period=self.p.period)\n#data\ndata = bt.feeds.YahooFinanceData(dataname='AAPL',\n fromdate = datetime(2018,1,1),\n todate = datetime(2018,12,31)\n )\n#set-up\ncerebro = bt.Cerebro(stdstats = False)\n\ncerebro.adddata(data)\ncerebro.broker.setcash(1000.0)\ncerebro.add_signal(bt.SIGNAL_LONG, SmaSignal)\ncerebro.addobserver(bt.observers.BuySell)\ncerebro.addobserver(bt.observers.Value)\n\n\n#Run\nprint(f'Starting Portfolio Value: {cerebro.broker.getvalue():.2f}')\ncerebro.run()\nprint(f'Final Portfolio Value: {cerebro.broker.getvalue():.2f}')\n\ncerebro.plot(iplot=True, volume=False)","sub_path":"Ch2/BacktestingStrategySignal.py","file_name":"BacktestingStrategySignal.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"113908687","text":"import pyglet\n\nimport sequences\nimport physics\n\nMAGIC_PLATFORM_NUMBER = 1\n\n#Create Super Class Bullet\nclass Bullets:\n BULLET_SPEED = 40\n def __init__(self,x, y, x_path, y_path):\n self.sprite = pyglet.sprite.Sprite(\n sequences.PLAYER['BASIC_BULLET'], x, y\n )\n self.x_path = x_path\n self.y_path = y_path\n self.done = False\n\n def update(self):\n self.sprite.x += self.BULLET_SPEED * self.x_path\n self.sprite.y += self.BULLET_SPEED * self.y_path\n\nclass Player:\n INITIAL_Y_VELOCITY = -80\n X_MOVEMENT = 15\n MOVING_SEQUENCE_LEN = 4\n LEFT_LIMIT = 40\n RIGHT_LIMIT = 320\n\n def __init__(self, controls, lives):\n self.shield = 0\n self.lives = lives\n self._alive = True\n self.in_mid_air = False\n self.hanging = False\n self.climbing = False\n self.stand()\n self.y_velocity = 0\n self._sequence = 0\n self.destruct = 0\n self.looking_forward = True\n self.aim_forward()\n self.cancel_y_aim()\n self.stage = None\n self.platform_id = 0\n self.controls = controls\n self.bullets = []\n\n self.speed = (0, 0)\n\n looking_forward_int = int(self.looking_forward)\n standing_sequence = sequences.PLAYER['1']['STANDING'][\n looking_forward_int\n ]\n self.sprite = pyglet.sprite.Sprite(standing_sequence, 0, 0)\n\n def react(self, keyboard):\n action = 'nope'\n if keyboard.get(self.controls.up_key):\n action = 'up'\n if keyboard.get(self.controls.down_key):\n action = 'down'\n if keyboard.get(self.controls.left_key):\n action = 'left'\n if keyboard.get(self.controls.rigth_key):\n action = 'right'\n if keyboard.get(self.controls.jump_key):\n action = 'jump'\n if keyboard.get(self.controls.fire_key):\n action = self.fire\n return action\n\n def fire(self):\n if len(self.bullets) < 12:\n\n bullet_x_direction = int(self.looking_forward)\n new_bullet_x_position = (\n self.sprite.x + (bullet_x_direction * 85) - 18\n )\n new_bullet_y_position = (\n self.sprite.y + (self.aiming_y * 40) + 48\n )\n if self.in_mid_air:\n new_bullet_x_position = (\n self.sprite.x + (self.aiming_x * 28) + 12\n )\n new_bullet_y_position = (\n self.sprite.y + (self.aiming_y * 28) + 12\n )\n elif self.is_ducking():\n new_bullet_x_position = (\n self.sprite.x + (bullet_x_direction * 100) - 12\n )\n new_bullet_y_position = self.sprite.y + 6\n elif self.aiming_x == 0 and self.aiming_y == 1:\n new_bullet_x_position = (\n self.sprite.x + (bullet_x_direction * 4) + 20\n )\n new_bullet_y_position = self.sprite.y + 112\n new_bullet = Bullets(\n new_bullet_x_position, new_bullet_y_position,\n self.aiming_x, self.aiming_y\n )\n self.bullets.append(new_bullet)\n\n def draw(self):\n looking_forward_int = int(self.looking_forward)\n action_name = 'STANDING'\n action_sequence = looking_forward_int\n new_x = self.sprite.x\n if self._alive:\n is_looking_up = self.aiming_y == 1\n is_looking_down = self.aiming_y == -1\n if is_looking_up:\n action_name = 'STANDING_LOOK_UP'\n if self.is_running() or self.in_mid_air:\n looking_direction_label = 'BACK'\n if self.looking_forward:\n looking_direction_label = 'FRONT'\n action_name = looking_direction_label\n if self.in_mid_air:\n action_name = 'JUMPING_' + looking_direction_label\n elif is_looking_up:\n action_name = looking_direction_label + '_LOOKING_UP'\n elif is_looking_down:\n action_name = looking_direction_label + '_LOOKING_DOWN'\n action_sequence = self._sequence\n elif self.is_ducking():\n action_name = 'DUCKING'\n else:\n action_name = 'DEAD'\n if self.in_mid_air:\n action_name = 'DYING'\n action_sequence = self._sequence\n if not self.looking_forward:\n new_x = self.sprite.x + self.X_MOVEMENT\n if new_x > Player.RIGHT_LIMIT:\n new_x = Player.RIGHT_LIMIT\n else:\n action_name = action_name + '_BACK'\n new_x = self.sprite.x - self.X_MOVEMENT\n if new_x < 0:\n new_x = 0\n self.fall()\n else:\n self.destruct += 1\n player_sequence = (\n sequences.PLAYER['1'][action_name][action_sequence]\n )\n if self.destruct != 9:\n self.sprite = pyglet.sprite.Sprite(\n player_sequence, new_x, self.sprite.y\n )\n self._sequence = (self._sequence + 1) % self.MOVING_SEQUENCE_LEN\n else:\n self.lives -= 1\n self.revive()\n self.sprite.draw()\n\n def move_forward(self, stage):\n self.looking_forward = True\n self.aim_forward()\n self.stand()\n\n self.speed = (self.X_MOVEMENT, 0)\n new_x = self.sprite.x + self.speed[0]\n if new_x > Player.RIGHT_LIMIT:\n new_x = Player.RIGHT_LIMIT\n self.sprite.x = new_x\n\n def aim_up(self):\n self.aiming_y = 1\n\n def aim_down(self):\n self.aiming_y = -1\n\n def cancel_y_aim(self):\n self.aiming_y = 0\n\n def aim_forward(self):\n self.aiming_x = 1\n\n def aim_backward(self):\n self.aiming_x = -1\n\n def cancel_x_aim(self):\n self.aiming_x = 0\n\n def is_in_right_limit(self):\n return (self.sprite.x + self.sprite.width/2) > Player.RIGHT_LIMIT\n\n def is_running(self):\n return self.speed[0] != 0\n\n def is_moving_right(self):\n return self.speed[0] > 0\n\n def move_backward(self, stage):\n self.looking_forward = False\n self.aim_backward()\n self.stand()\n\n self.speed = (-1 * self.X_MOVEMENT, 0)\n new_x = self.sprite.x + self.speed[0]\n if new_x < 0:\n new_x = 0\n self.sprite.x = new_x\n\n def is_ducking(self):\n return self.ducking\n\n def duck(self):\n self.ducking = True\n\n def stand(self):\n self.ducking = False\n\n def jump(self):\n self.stand()\n if not self.in_mid_air:\n self.in_mid_air = True\n self.y_velocity = self.INITIAL_Y_VELOCITY\n\n def collision_platform(self):\n platforms = self.stage.get_platforms()\n self.platform_id = 0\n on_platform = False\n if self.in_mid_air and self.y_velocity > 0:\n for p in platforms:\n if self.sprite.x + 70 > p.x and self.sprite.x + 10 < p.x + p.width and self.sprite.y < p.y and self.sprite.y + self.y_velocity + MAGIC_PLATFORM_NUMBER > p.y:\n if not on_platform:\n on_platform = True\n self.platform_id = platforms.index(p)\n else:\n if platforms[self.platform_id].y + platforms[self.platform_id].height < p.y + p.height:\n self.platform_id = platforms.index(p)\n if on_platform:\n self.in_mid_air = False\n self.sprite.y = platforms[self.platform_id].y\n self.y_velocity = 0\n\n def hazard_collision(self, hazards):\n if self.shield > 15:\n if not self.is_ducking() or not self.in_mid_air:\n x_hitbox_offset = self.sprite.width / 4\n else:\n x_hitbox_offset = 0\n for h in hazards:\n if h.__class__.__name__ == \"Bullets_Enemy\" or h.__class__.__name__ == \"Cannon_Ball\":\n if not h.done:\n if self.sprite.x > h.sprite.x - self.sprite.width + 1 + x_hitbox_offset and self.sprite.x < h.sprite.x + h.sprite.width - 1 - x_hitbox_offset and self.sprite.y > h.sprite.y - self.sprite.height + 1 and self.sprite.y < h.sprite.y + h.sprite.height:\n self._alive = False\n self.jump()\n h.done = True\n else:\n if h.__class__.__name__ == \"EnemyDog\":\n if h.alive and h.state == 2:\n if self.sprite.x > h.sprite.x - self.sprite.width + 1 and self.sprite.x < h.sprite.x + h.sprite.width - 1 and self.sprite.y > h.sprite.y - self.sprite.height + 1 and self.sprite.y < h.sprite.y + h.sprite.height:\n self._alive = False\n self.jump()\n elif h.alive:\n if self.sprite.x > h.sprite.x - self.sprite.width + 1 and self.sprite.x < h.sprite.x + h.sprite.width - 1 and self.sprite.y > h.sprite.y - self.sprite.height + 1 and self.sprite.y < h.sprite.y + h.sprite.height:\n self._alive = False\n self.jump()\n\n def platform_walk(self):\n platforms = self.stage.get_platforms()\n if not (self.sprite.x + 70 > platforms[self.platform_id].x and self.sprite.x + 10 < platforms[self.platform_id].x + platforms[self.platform_id].width):\n self.in_mid_air = True\n\n def fall(self):\n self.y_velocity += physics.GRAVITY_ACCELERATION\n self.sprite.y -= self.y_velocity\n self.collision_platform()\n\n def drop(self):\n if self.stage.get_platforms()[self.platform_id].solid:\n self.jump()\n else:\n self.sprite.y -= 1\n self.in_mid_air = True\n\n def is_alive(self):\n return self._alive\n\n def revive(self):\n self.shield = 0\n self.destruct = 0\n self._alive = True\n self.in_mid_air = True\n self.looking_forward = True\n self.platform_id = 0\n looking_forward_int = int(self.looking_forward)\n standing_sequence = sequences.PLAYER['1']['STANDING'][\n looking_forward_int\n ]\n self.sprite = pyglet.sprite.Sprite(standing_sequence, 20, 300)\n\n def get_position(self):\n return (self.sprite.x + self.sprite.width/2, self.sprite.y + self.sprite.height/2)\n\n def get_speed(self):\n return self.speed\n\n def clear_speed(self):\n self.speed = (0, 0)\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":10759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"348979076","text":"import numpy as np\r\nimport source_coding.discus.ldpc_design as code_design\r\nimport source_coding.discus.binaryDSC as dsc_binary_codec\r\nimport source_coding.discus.nonbinaryDSC as dsc_nonbinary_codec\r\n\r\nclass DISCUSEngine:\r\n def __init__(self):\r\n self._num_chks = -1\r\n self._pxy = None\r\n self._py = None\r\n\r\n self._dsc = None\r\n self._ldpc_codes = None\r\n self._alphabet_size = 2\r\n self._bits_per_symbol = 1\r\n\r\n def initialize(self, alphabet_size, ldpc_codes):\r\n self._alphabet_size = alphabet_size\r\n self._ldpc_codes = ldpc_codes\r\n\r\n # check if alphabet size is a power of 2\r\n if (alphabet_size < 2) or ((alphabet_size & (alphabet_size - 1)) != 0):\r\n alphabet_size = int(2 ** (np.ceil(np.log2(alphabet_size))))\r\n self._alphabet_size = alphabet_size\r\n\r\n # initialize the probability distributions\r\n self._bits_per_symbol = int(np.log2(self._alphabet_size).item())\r\n\r\n\r\n if self._alphabet_size == 2:\r\n self._dsc = dsc_binary_codec.BinaryDSC()\r\n else:\r\n self._dsc = dsc_nonbinary_codec.NonbinaryDSC()\r\n\r\n self.set_distributions()\r\n\r\n\r\n # =========================================================================\r\n # set the parameters of the DISCUS algorithm\r\n def set_distributions(self, pxy=None, py=None):\r\n if pxy is None:\r\n pxy = np.ones(shape=[self._alphabet_size, self._alphabet_size], dtype=np.float) / self._alphabet_size\r\n\r\n if py is None:\r\n py = np.ones(shape=self._alphabet_size, dtype=np.float) / self._alphabet_size\r\n\r\n self._pxy = pxy\r\n self._py = py\r\n\r\n h = -np.dot(np.sum(self._pxy * np.log2(self._pxy + 1e-12), axis=0), self._py) / np.log2(self._alphabet_size)\r\n\r\n num_vars = self._ldpc_codes['N']\r\n if h > 0.9:\r\n num_checks = num_vars\r\n var_index = np.arange(0, num_vars).astype(np.uint32)\r\n chk_index = np.arange(0, num_checks).astype(np.uint32)\r\n else:\r\n num_checks = int(1.1 * h * num_vars)\r\n index = np.where(self._ldpc_codes['M'] > num_checks)\r\n idx = np.argmin(self._ldpc_codes['M'][index])\r\n index = index[0][idx]\r\n num_checks = self._ldpc_codes['M'][index].item(0)\r\n chk_name = 'chk_index_{}'.format(index)\r\n var_name = 'var_index_{}'.format(index)\r\n chk_index = self._ldpc_codes[chk_name]\r\n var_index = self._ldpc_codes[var_name]\r\n\r\n if (num_checks != self._num_chks):\r\n self._num_chks = num_checks\r\n # new code should be used\r\n if self._alphabet_size == 2:\r\n self._dsc.initialize(num_checks, num_vars, chk_index, var_index)\r\n else:\r\n h_values = np.random.randint(1, self._alphabet_size, size=len(chk_index)).astype(np.uint8)\r\n self._dsc.initialize(self._bits_per_symbol, num_checks, num_vars, chk_index, var_index, h_values)\r\n\r\n # =========================================================================\r\n # get the alphabet size of the codec\r\n @property\r\n def alphabet_size(self):\r\n return self._alphabet_size\r\n\r\n # =========================================================================\r\n # encoding function\r\n def encode(self, x):\r\n code = self._dsc.encode(input=x)\r\n code_len = self._bits_per_symbol * code.size\r\n\r\n return code, code_len\r\n\r\n # =================================================================================================================\r\n # decoding function\r\n def decode(self, code, si_sequence):\r\n x = self._dsc.decode(input_code=code, si_sequence=si_sequence.astype(np.uint8), px_y=self._pxy, max_iter=100)\r\n\r\n return x\r\n","sub_path":"dsc/discus_engine.py","file_name":"discus_engine.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"87636868","text":"#!/usr/bin/env python3\nimport argparse\nimport codecs\nimport difflib\nimport html\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom string import Template\n\nimport feedgenerator\nimport requests\n\nfrom webdiff.database import Database\nfrom webdiff.config import load_config\n\n\nDESCRIPTION = \"\"\"\\\nMonitor an url for changes and generates an RSS feed for it.\n\"\"\"\n\n\nDESCRIPTION_TEMPLATE = \"\"\"\n
\n$diff\n
\n\"\"\"\n\n\ndef external_diff(tool, old_path, new_path):\n command = Template(tool).substitute(old=old_path, new=new_path)\n return subprocess.check_output(command, shell=True)\n\n\ndef internal_diff(old_path, new_path):\n with codecs.open(old_path, 'r', 'utf-8') as fp:\n old_lines = fp.readlines()\n with codecs.open(new_path, 'r', 'utf-8') as fp:\n new_lines = fp.readlines()\n diff_lines = difflib.unified_diff(old_lines, new_lines, old_path, new_path)\n return ''.join(diff_lines).encode('utf-8')\n\n\ndef generate_diff(config, old_snapshot, new_snapshot):\n old_path = old_snapshot.content_path\n new_path = new_snapshot.content_path\n\n diff_tool = config.get('diff_tool')\n if diff_tool:\n diff = external_diff(diff_tool, old_path, new_path)\n else:\n diff = internal_diff(old_path, new_path)\n\n with open(new_snapshot.content_diff_path, 'wb') as f:\n f.write(diff)\n\n\ndef regen(config, db):\n logging.info('Regenerating feed')\n feed = feedgenerator.Rss201rev2Feed(\n title=config['title'],\n description=config['description'],\n link=config['url'])\n\n description_template = Template(DESCRIPTION_TEMPLATE)\n\n snapshots = db.get_snapshots()\n previous_snapshot = snapshots[0]\n for snapshot in snapshots[1:]:\n content_diff = snapshot.content_diff_path\n if not os.path.exists(content_diff):\n logging.info('Generating diff for {}'.format(\n snapshot.content_path))\n generate_diff(config, previous_snapshot, snapshot)\n previous_snapshot = snapshot\n\n with codecs.open(content_diff, 'r', 'utf-8') as fp:\n diff = fp.read()\n\n if not diff:\n # No real diff, do not create feed entry\n continue\n\n title = 'Changes from {}'.format(snapshot.date_str)\n description = description_template.substitute(diff=html.escape(diff))\n pubdate = snapshot.date\n\n feed.add_item(title=title,\n link='dummy',\n description=description,\n pubdate=pubdate)\n\n with open(config['rssfile'], 'wb') as fp:\n feed.write(fp, 'utf-8')\n\n\ndef update(config, db):\n content = requests.get(config['url']).text\n try:\n previous_snapshot = db.get_snapshots()[-1]\n except IndexError:\n previous_snapshot = None\n if previous_snapshot and content == previous_snapshot.get_content():\n # Content is the same, no need to save a new snapshot\n logging.info('Not creating a new snapshot, content is the same')\n return False\n\n snapshot = db.create_snapshot()\n logging.info('Creating snapshot in {}'.format(snapshot.content_path))\n\n os.makedirs(snapshot.dir, exist_ok=True)\n with codecs.open(snapshot.content_path, 'w', 'utf-8') as fp:\n fp.write(content)\n return True\n\n\ndef setup_logger(logfile):\n FORMAT = '%(asctime)s %(levelname)s: %(message)s'\n log_config = dict(format=FORMAT, level=logging.DEBUG)\n if logfile:\n log_config['filename'] = logfile\n logging.basicConfig(**log_config)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.description = DESCRIPTION\n\n parser.add_argument('-c', '--config', dest='config', required=True,\n help='Read config from CONFIG_FILE',\n metavar='CONFIG_FILE')\n\n parser.add_argument('action', choices=['update', 'regen'],\n help='Action to perform. `update` fetches the url '\n 'and updates the RSS feed. `regen` only '\n 'regenerates the RSS feed.')\n\n args = parser.parse_args()\n\n config = load_config(args.config)\n\n setup_logger(config['logfile'])\n\n try:\n db = Database(config['snapshots_dir'])\n\n logging.info('Started with action {}'.format(args.action))\n\n if args.action == 'regen':\n regen(config, db)\n\n elif args.action == 'update':\n if update(config, db):\n regen(config, db)\n\n return 0\n except Exception:\n logging.exception('Stopping because of an exception')\n return 1\n\n\nif __name__ == '__main__':\n sys.exit(main())\n# vi: ts=4 sw=4 et\n","sub_path":"webdiff/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"80632403","text":"'''\nPlot the solution for the case where the ionisation front collapses\nSam Geen, March 2016\n'''\n\nimport Hamu\n\nfrom ragacompare import * # eeeehhhhh\n\ndef findrs(flux,n):\n return (flux * 3.0 / (4.0*np.pi*beta2*n**2.0))**(1.0/3.0)\n\ndef findtff(profile):\n n0 = profile._n0\n rho0 = n0*mH/X\n return np.sqrt(3.0*np.pi / (32.0*G*rho0))\n\ndef analytic(sim):\n flux = 10.0**float(sim.Name()[1:3])\n profile = ProfAna(sim)\n tff = findtff(profile)\n rs = findrs(flux,profile._n0)\n t = np.arange(0,3,0.05)*Myrins\n psi = 4.0/7.0\n r = rs*(cs / rs / psi * t * (1 - 0.5*t/tff))**psi\n return t/Myrins, r / pcincm\n\ndef run():\n tstart = 2.531\n simname = \"N49-NSN\"\n sim = Hamu.Simulation(simname)\n timeplot.starts[simname] = tstart\n # Simulation result\n tsim, rsim = timeplot.funcovertime(sim,radiimodule.MedianRadiusProfile)\n plt.plot(tsim,rsim,\"k-\",label=\"Simulation\")\n # Analytic solution (no vext)\n t, r = analytic(sim)\n plt.plot(t,r,\"k--\",label=\"Analytic ($v_{ext} = 0$)\")\n # Numerical solution (dN values are unused variables)\n tn, rn, d1, d2, d3, d4, d5, d6 = ComputeExpansion(sim,\"full\",\"ana\",\n collapsemodel=True)\n plt.plot(tn,rn,\"k:\",label=\"Analytic ($v_{ext} = v_{vir}$)\")\n plt.xlim([0,3])\n plt.ylabel(\"Radius / pc\")\n plt.xlabel(\"Time / Myr\")\n plt.legend(fontsize=\"small\",frameon=False,loc=\"upper right\")\n plt.savefig(\"../plots/collapsesolution.pdf\")\n \n\nif __name__==\"__main__\":\n run()\n","sub_path":"HIISN/scripts/collapsesolution.py","file_name":"collapsesolution.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"347912617","text":"import logging\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n#logging.disable(logging.CRITICAL)\n\ndef main():\n N = int(input())\n\n A = []\n x = []\n y = []\n\n for i in range(N):\n Ai = int(input())\n xi = []\n yi = []\n A.append(Ai)\n x.append(xi)\n y.append(yi)\n\n for j in range(Ai):\n xij, yij = map(int, input().split(' '))\n xi.append(xij)\n yi.append(yij)\n\n logging.info(A)\n logging.info(x)\n logging.info(y)\n\n ok_list = []\n for bits in range(2 ** N):\n ok_flag = True\n for i in range(N):\n if (bits >> i) & 1 == 0:\n continue\n\n for j in range(A[i]):\n if (bits >> (x[i][j] - 1) & 1) != y[i][j]:\n ok_flag = False\n\n if ok_flag:\n ok_list.append(bin(bits).count('1'))\n\n print(max(ok_list))\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"BeginnerContest_147/C/solver2.py","file_name":"solver2.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"473819164","text":"import json\nimport typing as tp\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom gemmi import cif\nfrom pyobjcryst import loadCrystal\nfrom pyobjcryst.crystal import Crystal\n\nfrom .tools import get_value\n\n\ndef cif_to_dict(cif_file: str, mmjson: bool = False) -> tp.Generator:\n \"\"\"Convert cif file to a dictionary.\"\"\"\n cif_path = Path(cif_file)\n doc = cif.read_file(str(cif_path))\n dct: dict = json.loads(\n doc.as_json(mmjson=mmjson)\n )\n if not mmjson:\n for block_name, block_dct in dct.items():\n block_dct['name'] = block_name\n block_dct['cif_file'] = str(cif_path.absolute())\n yield block_dct\n else:\n yield dct\n\n\ndef to_crystal(dct: dict, keys=(\"genresults\", 0, \"stru_str\")) -> Crystal:\n \"\"\"Load the information in the dictionary to a crystasl object. The info is a string of cif file.\"\"\"\n with TemporaryDirectory() as temp_dir:\n cif_file = Path(temp_dir) / \"temp.cif\"\n cif_file.write_text(get_value(dct, keys))\n crystal = loadCrystal(str(cif_file))\n return crystal\n","sub_path":"pdfstream/parsers/ciffile.py","file_name":"ciffile.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"9959430","text":"import operator\nimport xapian\n\nfrom django.db import models\nfrom django.utils.functional import curry\n\nclass X(models.Q):\n pass\n\nclass CompositeDecider(xapian.MatchDecider):\n # operators map\n # lookup type: (operator, reverse operands)\n op_map = {\n 'exact': (operator.eq, False),\n 'in': (operator.contains, True),\n 'gt': (operator.gt, False),\n 'gte': (operator.ge, False),\n 'lt': (operator.lt, False),\n 'lte': (operator.le, False),\n }\n\n def __init__(self, model, tags, filter, exclude):\n xapian.MatchDecider.__init__(self)\n\n self._model = model\n self._tags = tags\n self._values_map = dict([(t.prefix, t.number) for t in tags])\n self._filter = filter\n self._exclude = exclude\n\n def __call__(self, document):\n if self._filter and not self._do_x(self._filter, document):\n return False\n\n if self._exclude and self._do_x(self._exclude, document):\n return False\n\n return True\n\n def get_tag(self, index):\n for tag in self._tags:\n if tag.number == index:\n return tag\n raise ValueError(\"No tag with number '%s'\" % index)\n\n def _do_x(self, field, document):\n for child in field.children:\n if isinstance(child, X):\n result = self._do_x(child, document)\n else:\n result = self._do_field(child[0], child[1], document)\n\n if (result and field.connector == 'OR')\\\n or (not result and field.connector == 'AND'):\n break\n\n if field.negated:\n return not result\n else:\n return result\n\n def _do_field(self, lookup, value, document):\n if '__' in lookup:\n field, op = lookup.split('__', 1)\n else:\n field, op = lookup, 'exact'\n\n if op not in self.op_map:\n raise ValueError(\"Unknown lookup operator '%s'\" % op)\n\n op, reverse = self.op_map[op]\n\n doc_value = document.get_value(self._values_map[field])\n\n convert = curry(\n self.get_tag(self._values_map[field]).convert,\n model=self._model\n )\n\n if isinstance(value, (list, tuple)):\n value = map(convert, value)\n else:\n value = convert(value)\n\n operands = [\n doc_value,\n value,\n ]\n\n if reverse:\n operands.reverse()\n\n return reduce(op, operands)\n","sub_path":"djapian/decider.py","file_name":"decider.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"274219244","text":"#!/usr/bin/python\nimport sys\nimport os\nsys.path.insert(0,\"/usr/local\")\nclass PlotParameters :\n probeName = ['ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF']\n confidence = False\n aggregate = False\n originalPlots = False\n aggrParam = ''\n fileName = 'experiment3_throughput_clients'\n type = 'Param'\n campaignId = '2136'\n xLabel = 'load'\n yLabel = 'mean of Aggregated throughput [Bit/s]'\n confidenceLevel = 0.95\n yLabel = 'mean of Aggregated throughput [Bit/s]'\n parameterName = 'load'\n probeEntry = 'mean'\n useXProbe = False\n useYProbe = True\n filterExpression = 'load in [0.01, 0.025, 0.0375, 0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0] and experiment in [\"Experiment2\", \"Experiment3\"] and nStations in [2, 10, 50]'\n scaleFactorX = 1 #1/1e6 #bit to MBit\n scaleFactorY = 1 #1/1e6 #bit to MBit\n doClip = True\n minX = 0.0 * scaleFactorX \n maxX = 1.0 * scaleFactorX \n minY = 0.0 * scaleFactorY \n maxY = 2500000.0 * scaleFactorY \n moveX = 0\n moveY = 0\n grid = (True, False, True, False)\n scale = ('linear', None, 'linear', None)\n marker = '.'\n legend = False\n legendPosition = 'best' #alternatives: upper right, upper left, lower left, lower right, right, center left, center right, lower center, upper center, center or (x,y) with x,y in [0-1]\n showTitle = False\n figureTitle = 'Parameter Figure '\n color = True\n legendLabelMapping = {\n \"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment2; nStations: 2\":\"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment2; nStations: 2\" , #graph 0\n \"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment2; nStations: 10\":\"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment2; nStations: 10\" , #graph 1\n \"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment2; nStations: 50\":\"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment2; nStations: 50\" , #graph 2\n \"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment3; nStations: 2\":\"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment3; nStations: 2\" , #graph 3\n \"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment3; nStations: 10\":\"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment3; nStations: 10\" , #graph 4\n \"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment3; nStations: 50\":\"ip.endToEnd.window.aggregated.bitThroughput_DLL.StationType_client_PDF; experiment: Experiment3; nStations: 50\" , #graph 5\n }\n plotOrder = [0, 1, 2, 3, 4, 5]\n #plotOrder = [0,2,1,3] # plot first graph 0 , then graph 2 , ...\n color_styles = ['b-', 'g-', 'r-', 'c-', 'm-','b--', 'r--', 'g--', 'c--', 'm--']\n bw_markers = ['+','.','*','x','o','v','^','<','>','s','p','*','h','H','D','d',',','|']\n #additional plots are defined as done here\n additional_plots = [\n #{'x': [1,400], 'y':[200,200], 'label':'a horizontal line' , 'style':'b--'},\n #{'x': [1,400], 'y':[1*.98,400*0.98], 'label':'98 % line' , 'style':'g--'}\n ]\n outputFormats = [ 'png', 'pdf']\n\nimport openwns.wrowser.FigurePlotter\nif __name__ == '__main__': openwns.wrowser.FigurePlotter.loadCampaignAndPlotGraphs(PlotParameters)\n\n","sub_path":"usersGuide/modules/dll/glue/funTutorial/images/results/experiment3_throughput_clients.py","file_name":"experiment3_throughput_clients.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"543932159","text":"#author yang\n\n#create the hash table of a simple directed acyclic graph(DAG)\ngraph={}\ngraph[\"start\"]={}\ngraph[\"start\"][\"a\"]=6\ngraph[\"start\"][\"b\"]=2\ngraph[\"a\"]={}\ngraph[\"a\"][\"fin\"]=1\ngraph[\"b\"]={}\ngraph[\"b\"][\"a\"]=3\ngraph[\"b\"][\"fin\"]=5\ngraph[\"fin\"]={}\n\n#create the hash table of costs from the start node.\n\ncosts={}\n#the expression of infinity in python.\ninfinity=float(\"inf\")\ncosts[\"a\"]=6\ncosts[\"b\"]=2\ncosts[\"fin\"]=infinity\n\n#create a hash table of parent nodes\nparents={}\nparents[\"a\"]=\"start\"\nparents[\"b\"]=\"start\"\nparents[\"fin\"]=None\n\n#create an array of storing processed nodes in costs hash tables;\nprocessed=[]\n\n#the function to find the lowest cost node in costs hash table\ndef find_lowest_cost_node(costs):\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in costs:\n cost=costs[node]\n if costnew_cost:\n costs[n]=new_cost\n parents[n]=node\n processed.append(node)\n node=find_lowest_cost_node(costs)\n\n\n\n\n\n","sub_path":"algorithm/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"553316511","text":"#\n# @lc app=leetcode.cn id=283 lang=python3\n#\n# [283] 移动零\n#\n# https://leetcode-cn.com/problems/move-zeroes/description/\n#\n# algorithms\n# Easy (57.81%)\n# Likes: 446\n# Dislikes: 0\n# Total Accepted: 87.1K\n# Total Submissions: 150K\n# Testcase Example: '[0,1,0,3,12]'\n#\n# 给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。\n# \n# 示例:\n# \n# 输入: [0,1,0,3,12]\n# 输出: [1,3,12,0,0]\n# \n# 说明:\n# \n# \n# 必须在原数组上操作,不能拷贝额外的数组。\n# 尽量减少操作次数。\n# \n# \n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n\n j = 0\n\n for i, num in enumerate(nums):\n if num:\n nums[i], nums[j] = nums[j], nums[i]\n j += 1\n\n# @lc code=end\n","sub_path":"easy/283.移动零.py","file_name":"283.移动零.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"119754449","text":"#!/usr/bin/env python\nimport boto3\nimport time\n\ncohesitycluster = 'mycluster'\naccesskey = 'Oorht1mLTr39TmsxWTh3PQsHsVYfjGVWKgoXd0kAtqQ'\nsecretkey = '_xRTQHHrAMDB3g7hQAJktlu6aevwQ-MERzlw8B7RwKrk'\nviewname = 'mys3view'\nfilename = '200MB.zip'\n\ns3 = boto3.resource('s3',\n endpoint_url='https://%s:3000' % cohesitycluster,\n aws_access_key_id=accesskey,\n aws_secret_access_key=secretkey)\n\nbucket = s3.Bucket(viewname)\n\n# list contents of bucket\nfor obj in bucket.objects.all():\n print(obj.key)\n\n# download file\nstart = time.time()\ntry:\n s3.Object(viewname, filename).download_file(filename)\nexcept Exception as e:\n pass\nelapsed_time = time.time() - start\nmilli_secs = int(round(elapsed_time * 1000))\nprint(milli_secs)\n","sub_path":"python/s3test/s3download.py","file_name":"s3download.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"331801895","text":"# image handling API\nimport os\nimport random\nfrom . import db\nfrom bson.binary import Binary\nfrom cStringIO import StringIO\n\nimport Image as PIL # too many image variables...\n\ndef add_image(data, img_format):\n img_binary = Binary(data)\n pic_id = db.images.count()\n new_img = {'img_format' : img_format,\n 'img_data' : img_binary, \n 'comments' : [],\n 'pic_id' : pic_id}\n db.images.insert(new_img)\n return pic_id\n\ndef get_image(num, for_thumbnail=False):\n img = db.images.find_one({\"pic_id\" : num})\n # sending the binary data in a response will work \n img_binary_data = img['img_data']\n if for_thumbnail:\n # resize the image for a thumbnail\n width = 100\n height = 100\n # need to wrap the binary img data in a StringIO for PIL to be able to read it\n img_IO = StringIO(img_binary_data)\n PIL_img = PIL.open(img_IO)\n resized_img = PIL_img.resize((width, height), PIL.ANTIALIAS)\n output = StringIO()\n # format will only be PNG since that is the only image format accepted\n resized_img.save(output, format='PNG')\n return output.getvalue()\n else:\n return img_binary_data\n\ndef get_latest_image():\n last_id = db.images.count() - 1\n img = db.images.find_one({\"pic_id\" : last_id} )\n return img['img_data']\n \ndef add_comment(pic_id, comment):\n db.images.update({\"pic_id\" : pic_id}, {\"$push\" : {\"comments\": comment}}) \n\ndef get_comments(pic_id):\n img = db.images.find_one({\"pic_id\" : pic_id})\n return img[\"comments\"]\n\ndef get_num_of_images():\n return db.images.count()\n","sub_path":"flask_app/flask_imageapp/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"226172761","text":"from django.urls import path, re_path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = 'shop'\n\n\nurlpatterns = [\n path('', views.product_list, name='product_list'),\n path('search', views.search_result_view, name='search_results'),\n path('shop//', views.product_list,\n name='product_list_by_category'),\n path('shop///', views.product_detail,\n name='product_detail'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n urlpatterns += static(\n settings.STATIC_URL, document_root=settings.STATIC_ROOT\n )\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"490262308","text":"import numpy as np\r\nimport h5py\r\n\r\ndef load_data() :\r\n train_data = h5py.File('train_catvnoncat.h5', \"r\")\r\n\r\n train_set_x_orig = np.array(train_data[\"train_set_x\"][:])\r\n train_set_y_orig = np.array(train_data[\"train_set_y\"][:])\r\n\r\n test_data = h5py.File('test_catvnoncat.h5', \"r\")\r\n\r\n test_set_x_orig = np.array(test_data[\"test_set_x\"][:])\r\n test_set_y_orig = np.array(test_data[\"test_set_y\"][:])\r\n\r\n classes = np.array(test_data[\"list_classes\"][:])\r\n\r\n\r\n train_set_y_orig = train_set_y_orig.reshape(1, train_set_y_orig.shape[0])\r\n test_set_y_orig = test_set_y_orig.reshape(1, test_set_y_orig.shape[0])\r\n\r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\r\n","sub_path":"load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"493966914","text":"def from_binary_to_base_ten(Pineapple):\n power = len(Pineapple) - 1\n result = counter = 0\n binary_dictionary = {0:0, 1:1}\n for a in Pineapple:\n result += (binary_dictionary[int(Pineapple[counter])])*(2**power)\n power -= 1\n counter += 1\n print(result)\n\ndef from_base_ten_to_binary(Pineapple):\n Pineapple = int(Pineapple)\n result = \"\"\n while Pineapple:\n result = \"01\"[Pineapple % 2] + result\n Pineapple -= 1\n Pineapple = int(-~Pineapple/2)\n print (result or 0)\n\ndef from_base_ten_to_balanced_ternary(Pineapple):\n Pineapple = Nineapple = int(Pineapple)\n result = negative_result = \"\"\n if Pineapple < 0:\n Pineapple *= -1\n while Pineapple:\n result = \"0+-\"[Pineapple % 3] + result\n Pineapple = int(-~Pineapple/3)\n if Nineapple < 0:\n for x in result:\n negative_result += \"-\" if x == \"+\" else \"+\" if x == \"-\" else \"0\"\n print (negative_result)\n else: print(result or 0)\n\ndef from_balanced_ternary_to_base_ten(Pineapple):\n power = len(Pineapple) - 1\n result = counter = 0\n balanced_ternary_dictionary = {\"+\":1, \"0\":0, \"-\":-1}\n for a in Pineapple:\n result += (balanced_ternary_dictionary[Pineapple[counter]]*(3**power))\n power -= 1\n counter += 1\n print (result)\n\ndef Jacob_Stupid_Face(Prompt, Supplied_Function):\n while 1:\n apple = input(Prompt)\n if apple == \"break\": return\n elif apple == \"quit\": quit()\n elif apple == \"help\":\n print(\"Enter 'break' to change modes (Return to menu)\\nEnter 'quit' to quit the program\\n\")\n return\n try:\n Supplied_Function(apple)\n except OverflowError:\n print(\"Error: Number too large\")\n pass\n except ValueError:\n print(\"Error: Invalid input\")\n pass\n except KeyError:\n print(\"Error: Invalid input\")\n pass\n\nwhile 1:\n print (\"Enter 'help' for help\\n(a) Base-10 to Binary\\n(b) Binary to Base-10\\n(c) Base-10 to Balanced Ternary\\n(d) Balanced Ternary to Base-10\")\n option = input().lower()\n if option == \"quit\":\n quit()\n elif option == \"help\":\n print (\"\\nEnter 'break' to change modes (Return to menu)\\nEnter 'quit' to quit the program\\n\")\n elif option == \"a\":\n Jacob_Stupid_Face(\"Base-10: \", from_base_ten_to_binary)\n elif option == \"b\":\n Jacob_Stupid_Face(\"Binary: \", from_binary_to_base_ten)\n elif option == \"c\":\n Jacob_Stupid_Face(\"Base-10: \", from_base_ten_to_balanced_ternary)\n elif option == \"d\":\n Jacob_Stupid_Face(\"Balanced Ternary: \", from_balanced_ternary_to_base_ten)","sub_path":"JayRequizoCodeA/Sandbox/Random Stuff/BalancedTernary_Base10 Converter (1).py","file_name":"BalancedTernary_Base10 Converter (1).py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"507445126","text":"import os\nimport numpy as np\nimport pickle\nimport random\n#import concurrent.futures\nfrom multiprocessing import Pool, TimeoutError, cpu_count\nfrom multiprocessing.dummy import Pool as DummyPool\n\nfrom iot_hub import DeviceManager, D2CMessageSender\nfrom device import Device\nfrom azure.storage.table import TableService, Entity, TablePermissions\nimport datetime\nimport time\n\nSTORAGE_ACCOUNT_NAME = os.environ['STORAGE_ACCOUNT_NAME']\nSTORAGE_ACCOUNT_KEY = os.environ['STORAGE_ACCOUNT_KEY']\n\nIOT_HUB_NAME = os.environ['IOT_HUB_NAME']\nIOT_HUB_OWNER_KEY = os.environ['IOT_HUB_OWNER_KEY']\nIOT_HUB_DEVICE_KEY = os.environ['IOT_HUB_DEVICE_KEY']\n\ntable_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)\n\nownerConnectionString ='HostName=%s.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=%s' % (IOT_HUB_NAME, IOT_HUB_OWNER_KEY)\ndeviceConnectionString = 'HostName=%s.azure-devices.net;SharedAccessKeyName=device;SharedAccessKey=%s' % (IOT_HUB_NAME, IOT_HUB_DEVICE_KEY)\n\ndm = DeviceManager(ownerConnectionString)\n\nsender = D2CMessageSender(deviceConnectionString)\n\ndef get_target_speed(device):\n asset = table_service.get_entity('equipment', device.make, device.device_id)\n return asset['Speed']\n\ndef process(device):\n # sending iothub message\n pl = pickle.dumps(device.next_state())\n return sender.sendD2CMsg(device.device_id, pl)\n\n\nif __name__ == '__main__':\n\n assets = table_service.query_entities('equipment')\n \n for asset in assets:\n deviceId = asset.RowKey\n dm.createDeviceId(deviceId)\n print(dm.retrieveDeviceId(deviceId))\n\n print(dm.listDeviceIds())\n\n devices = []\n for asset in assets:\n devices.append(\n Device(asset.RowKey, make=asset.PartitionKey, W = (1, 2, 3, 4, 5, 12, 15), A = (5, 8, 2/3, 9, 8, 13, 5))\n )\n\n devices[3].pressure_factor = 1.5\n print('CPU count: {0}'.format(cpu_count()))\n pool = Pool(processes=cpu_count())\n dummyPool = DummyPool(100)\n\n async_result = None\n target_speeds = None\n\n #for _ in range(19):\n while True:\n interval_start = time.time()\n\n states = None\n\n if async_result is None:\n #start = time.time()\n async_result = dummyPool.map_async(get_target_speed, devices)\n \n if target_speeds is None or async_result.ready():\n target_speeds = async_result.get()\n async_result = None\n\n print(target_speeds)\n\n for device, target_speed in zip(devices, target_speeds):\n device.set_speed((target_speed + device.get_speed()) / 2)\n\n start = time.time()\n devices = pool.map(Device.next_state_device, devices)\n end = time.time()\n print(end - start)\n\n start = time.time()\n dummyPool.starmap(process, zip(devices))\n end = time.time()\n print(end - start)\n\n time_elapsed = time.time() - interval_start\n print('Cadence: {0}'.format(time_elapsed))\n time.sleep(max(1 - time_elapsed, 0))\n\n # for device, state in zip(devices, states):\n # process(device.device_id, state)\n # from scipy.io.wavfile import write\n # write('test-{0}.wav'.format(device.device_id), 8000, wave_data)\n","sub_path":"src/WebApp/App_Data/jobs/continuous/Simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"560504516","text":"from unittest import TestCase\nfrom unittest import main\n\nfrom reversi import Game\nfrom reversi import Position\nfrom reversi import BLACK\nfrom reversi import WHITE\nfrom reversi.direction import (LEFT,\n RIGHT,\n TOP,\n BOTTOM,\n TOP_LEFT,\n TOP_RIGHT,\n BOTTOM_LEFT,\n BOTTOM_RIGHT)\n\ndefault_board = \"\"\"\n. . . . . . . .\n. . . . . . . .\n. . . . . . . .\n. . . B W . . .\n. . . W B . . .\n. . . . . . . .\n. . . . . . . .\n. . . . . . . .\n\"\"\"\n\ndefault_player = BLACK\n\ndefault_game = default_board + default_player\n\n\nclass TestGame(TestCase):\n def test_current_player_returns_BLACK_on_black_s_turn(self):\n game = Game(default_game)\n\n player = game.current_player()\n\n self.assertEqual(player, BLACK)\n\n def test_current_player_returns_WHITE_on_white_s_turn(self):\n game = Game(default_board + WHITE)\n\n player = game.current_player()\n\n self.assertEqual(player, WHITE)\n\n def test_current_opponent_returns_WHITE_on_black_s_turn(self):\n game = Game(default_game)\n\n opponent = game.current_opponent()\n\n self.assertEqual(opponent, WHITE)\n\n def test_current_opponent_returns_BLACK_on_white_s_turn(self):\n game = Game(default_board + WHITE)\n\n opponent = game.current_opponent()\n\n self.assertEqual(opponent, BLACK)\n\n def test_player_at_returns_the_player_at_the_given_position(self):\n game = Game(default_game)\n\n player = game.player_at('D4')\n\n self.assertEqual(player, BLACK)\n\n def test_player_at_returns_None_when_no_player_at_the_given_position(self):\n game = Game(default_game)\n\n player = game.player_at('C4')\n\n self.assertEqual(player, None)\n\n def test_neighbour_opponent_positions(self):\n game = Game(default_game)\n\n positions = game.neighbour_opponent_positions('D4')\n\n self.assertEqual(positions, set(('D5', 'E4')))\n\n def test_left_valid_position_when_opponant_exists(self):\n game = Game(default_game)\n position = game.valid_position_in_direction('E5', LEFT)\n\n self.assertEqual(position, 'C5')\n\n def test_left_valid_position_when_no_opponant_exists(self):\n game = Game(default_game)\n position = game.valid_position_in_direction('D4', LEFT)\n\n self.assertIsNone(position)\n\n def test_right_valid_position_when_opponant_exists(self):\n game = Game(default_game)\n position = game.valid_position_in_direction('D4', RIGHT)\n\n self.assertEqual(position, 'F4')\n\n def test_top_valid_position_when_opponant_exists(self):\n game = Game(default_game)\n position = game.valid_position_in_direction('E5', TOP)\n\n self.assertEqual(position, 'E3')\n\n def test_bottom_valid_position_when_opponant_exists(self):\n game = Game(default_game)\n position = game.valid_position_in_direction('D4', BOTTOM)\n\n self.assertEqual(position, 'D6')\n\n def test_vertical_and_horizontal_valid_positions(self):\n game = Game(default_game)\n positions = game.valid_positions()\n\n self.assertEqual(positions, set(('C5', 'D6', 'E3', 'F4')))\n\n\nclass TestPosition(TestCase):\n\n def test_left_of_D4_is_C4(self):\n position = Position('D4') + LEFT\n\n self.assertEqual(position, 'C4')\n\n def test_left_of_A1_is_None(self):\n position = Position('A1') + LEFT\n\n self.assertEqual(position, None)\n\n def test_right_of_C4_is_D4(self):\n position = Position('C4') + RIGHT\n\n self.assertEqual(position, 'D4')\n\n def test_right_of_H8_is_None(self):\n position = Position('H8') + RIGHT\n\n self.assertEqual(position, None)\n\n def test_top_of_D4_is_D3(self):\n position = Position('D4') + TOP\n\n self.assertEqual(position, 'D3')\n\n def test_top_of_A1_is_None(self):\n position = Position('A1') + TOP\n\n self.assertEqual(position, None)\n\n def test_bottom_of_D4_is_D5(self):\n position = Position('D4') + BOTTOM\n\n self.assertEqual(position, 'D5')\n\n def test_bottom_of_H8_is_None(self):\n position = Position('H8') + BOTTOM\n\n self.assertEqual(position, None)\n\n def test_D4_neighbour_positions(self):\n d4 = Position('D4')\n\n neighbours = d4.neighbour_positions()\n\n self.assertEqual(\n neighbours,\n set(('C4', 'C3', 'D3', 'E3', 'E4', 'E5', 'D5', 'C5'))\n )\n\n def test_A1_neighbour_positions(self):\n d4 = Position('A1')\n\n neighbours = d4.neighbour_positions()\n\n self.assertEqual(\n neighbours,\n set(('B1', 'B2', 'A2'))\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"81863049","text":"import connexion\nimport six\nfrom openapi_server import query_manager\nfrom openapi_server.utils.vars import INTERVENTION_TYPE_NAME, INTERVENTION_TYPE_URI\n\nfrom openapi_server.models.intervention import Intervention # noqa: E501\nfrom openapi_server import util\n\ndef interventions_get(username=None, label=None, page=None, per_page=None): # noqa: E501\n \"\"\"List all instances of Intervention\n\n Gets a list of all instances of Intervention (more information in https://w3id.org/okn/o/sdm#Intervention) # noqa: E501\n\n :param username: Name of the user graph to query\n :type username: str\n :param label: Filter by label\n :type label: str\n :param page: Page number\n :type page: int\n :param per_page: Items per page\n :type per_page: int\n\n :rtype: List[Intervention]\n \"\"\"\n\n\n return query_manager.get_resource(\n username=username,\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=INTERVENTION_TYPE_URI,\n rdf_type_name=INTERVENTION_TYPE_NAME, \n kls=Intervention)\n\ndef interventions_id_delete(id, user=None): # noqa: E501\n \"\"\"Delete an existing Intervention\n\n Delete an existing Intervention (more information in https://w3id.org/okn/o/sdm#Intervention) # noqa: E501\n\n :param id: The ID of the Intervention to be retrieved\n :type id: str\n :param user: Username\n :type user: str\n\n :rtype: None\n \"\"\"\n\n\n return query_manager.delete_resource(id=id,\n user=user,\n rdf_type_uri=INTERVENTION_TYPE_URI,\n rdf_type_name=INTERVENTION_TYPE_NAME, \n kls=Intervention)\n\ndef interventions_id_get(id, username=None): # noqa: E501\n \"\"\"Get a single Intervention by its id\n\n Gets the details of a given Intervention (more information in https://w3id.org/okn/o/sdm#Intervention) # noqa: E501\n\n :param id: The ID of the Intervention to be retrieved\n :type id: str\n :param username: Name of the user graph to query\n :type username: str\n\n :rtype: Intervention\n \"\"\"\n\n\n return query_manager.get_resource(id=id,\n username=username,\n rdf_type_uri=INTERVENTION_TYPE_URI,\n rdf_type_name=INTERVENTION_TYPE_NAME, \n kls=Intervention)\n\ndef interventions_id_put(id, user=None, intervention=None): # noqa: E501\n \"\"\"Update an existing Intervention\n\n Updates an existing Intervention (more information in https://w3id.org/okn/o/sdm#Intervention) # noqa: E501\n\n :param id: The ID of the Intervention to be retrieved\n :type id: str\n :param user: Username\n :type user: str\n :param intervention: An old Interventionto be updated\n :type intervention: dict | bytes\n\n :rtype: Intervention\n \"\"\"\n\n if connexion.request.is_json:\n intervention = Intervention.from_dict(connexion.request.get_json()) # noqa: E501\n\n return query_manager.put_resource(id=id,\n user=user,\n body=intervention,\n rdf_type_uri=INTERVENTION_TYPE_URI,\n rdf_type_name=INTERVENTION_TYPE_NAME, \n kls=Intervention)\n\ndef interventions_post(user=None, intervention=None): # noqa: E501\n \"\"\"Create one Intervention\n\n Create a new instance of Intervention (more information in https://w3id.org/okn/o/sdm#Intervention) # noqa: E501\n\n :param user: Username\n :type user: str\n :param intervention: Information about the Interventionto be created\n :type intervention: dict | bytes\n\n :rtype: Intervention\n \"\"\"\n\n if connexion.request.is_json:\n intervention = Intervention.from_dict(connexion.request.get_json()) # noqa: E501\n\n return query_manager.post_resource(\n user=user,\n body=intervention,\n rdf_type_uri=INTERVENTION_TYPE_URI,\n rdf_type_name=INTERVENTION_TYPE_NAME, \n kls=Intervention)\n","sub_path":"server/openapi_server/controllers/intervention_controller.py","file_name":"intervention_controller.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"314668952","text":"from keras.datasets import reuters\nfrom keras.utils import to_categorical\nfrom keras import models\nfrom keras import layers\nimport numpy as np\n\n(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)\n\n\ndef vectorize_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1\n return results\n\n\ninput_train = vectorize_sequences(train_data)\ninput_test = vectorize_sequences(test_data)\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\ninput_validation = input_train[:1000]\nsplit_input_train = input_train[1000:]\n\ntarget_validation = train_labels[:1000]\nsplit_target_train = train_labels[1000:]\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(64, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(46, activation='softmax'))\n\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\n\nhistory = model.fit(split_input_train,\n split_target_train,\n epochs=10,\n batch_size=512,\n validation_data=(input_validation, target_validation))\ntest_model = model.evaluate(input_test, test_labels)","sub_path":"Custom_NeuralNet/Custom Model with Reuters.py","file_name":"Custom Model with Reuters.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"452005424","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\ncreated by me for task1 to Write a program to reads in the FASTQ file, reverse\ncomplements each sequence, and writes the reverse complemented sequences out as\na FASTA-formatted file\n\"\"\"\nfrom Bio import SeqRecord\nfrom Bio import SeqIO\nimport argparse\n\n\ndef get_parser():\n \"\"\"\n using argparse to takes the list as input\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--inputfile\", required=True)\n parser.add_argument(\"--outputfile\", required=True)\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_parser()\n outfile = open(args.outputfile, \"w\")\n with open(args.inputfile, \"r\") as infile:\n for record in SeqIO.parse(infile, 'fastq'):\n SeqIO.write([SeqRecord.SeqRecord(\n record.seq.reverse_complement(),\n id=record.id, name=record.name,\n description=record.description)],\n outfile, 'fasta')\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/mforoozani1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"318191557","text":"import os\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_iter', default=1000, type=int)\nparser.add_argument('--style_root', required=True, type=str)\nparser.add_argument('--checkpoints_dir', required=True, type=str)\nargs = parser.parse_args()\n\n# style_root = '/home/biomag/tivadar/pytorch-CycleGAN-and-pix2pix/datasets/csaba/styles'\n\nfor style in os.listdir(args.style_root):\n style_path = os.path.join(args.style_root, style)\n model_name = style\n command = \"\"\"\n python train.py \\\n --dataroot %s \\\n --name %s \\\n --model pix2pix \\\n --which_model_netG unet_256 \\\n --which_direction BtoA \\\n --lambda_A 100 \\\n --dataset_mode aligned \\\n --no_lsgan \\\n --norm batch \\\n --pool_size 0 \\\n --save_epoch_freq %d \\\n --niter %d \\\n --checkpoints_dir %s\n \"\"\" % (style_path, model_name, args.n_iter + 99,args.n_iter, args.checkpoints_dir)\n\n os.system(command)\n","sub_path":"biomag-kaggle/src/2_DL/style-transfer/pytorch-CycleGAN-and-pix2pix-etasnadi/learn_styles.py","file_name":"learn_styles.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"358366934","text":"import torch\nimport torch.nn.functional as F\nfrom scipy.ndimage import gaussian_filter\nfrom torch.nn import Module, Parameter\n\nfrom backbones import backbone_models, backbone_kinds\nfrom utils.dataloader_utils import get_dataloader, get_transformations\n\n\nclass PaDiMBase(Module):\n\n def __init__(self, params: dict, backbone_params: dict, device):\n super().__init__()\n\n self.device = device\n\n self.params = params\n self.crop_size = self.params[\"crop_size\"]\n\n self.backbone = backbone_models[backbone_params[\"backbone\"]](**backbone_params)\n backbone_kind = backbone_kinds[backbone_params[\"backbone\"]]\n\n if backbone_kind == \"vae\":\n state_dict = torch.load(backbone_params[\"pretrained_file_path\"], map_location=self.device)[\"state_dict\"]\n self.backbone.load_state_dict(state_dict, strict=False)\n\n self.backbone.to(device)\n self.backbone.eval()\n\n transform = get_transformations(backbone_kind=backbone_kind, crop_size=self.crop_size)\n\n normal_data_dataloader = get_dataloader(self.params, split=\"train\", abnormal_data=False, shuffle=True,\n transform=transform)\n\n test_batch = next(iter(normal_data_dataloader))[0].to(device)\n feature_1, feature_2, feature_3 = self.backbone(test_batch)\n\n self.number_of_patches = feature_1.size(2) * feature_1.size(3)\n embeddings_size = feature_1.size(1) + feature_2.size(1) + feature_3.size(1)\n\n if \"number_of_embeddings\" in self.params:\n self.number_of_embeddings = params[\"number_of_embeddings\"]\n\n self.embedding_ids = Parameter(\n torch.randperm(embeddings_size, device=self.device)[:self.number_of_embeddings],\n requires_grad=False\n )\n else:\n self.number_of_embeddings = embeddings_size\n\n self.embedding_ids = Parameter(\n torch.arange(0, self.number_of_embeddings, device=self.device),\n requires_grad=False\n )\n\n self.n = 0\n\n\n def calculate_means_and_covariances(self):\n raise NotImplementedError()\n\n def forward(self, x, min_max_norm: bool = True):\n raise NotImplementedError()\n\n def _calculate_dist_list(self, embedding, embedding_dimensions: tuple):\n raise NotImplementedError()\n\n def calculate_score_map(self, embedding, embedding_dimensions: tuple, min_max_norm: bool) -> torch.Tensor:\n dist_list = self._calculate_dist_list(embedding, embedding_dimensions)\n\n # Upsample\n score_map = F.interpolate(dist_list.unsqueeze(1), size=self.crop_size, mode='bilinear',\n align_corners=False).squeeze().cpu().numpy()\n\n # Apply gaussian smoothing on the score map\n for i in range(score_map.shape[0]):\n score_map[i] = gaussian_filter(score_map[i], sigma=4)\n\n # Normalization\n if min_max_norm:\n max_score = score_map.max()\n min_score = score_map.min()\n scores = (score_map - min_score) / (max_score - min_score)\n else:\n scores = score_map\n\n return torch.tensor(scores, device=self.device)\n","sub_path":"models/padim_base.py","file_name":"padim_base.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"358153152","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect,HttpResponse,JsonResponse\nfrom Article.models import *\nimport hashlib\nfrom django.core.paginator import Paginator\n# Create your views here.\n#密码加密\ndef setPassword(password):\n md5=hashlib.md5()\n md5.update(password.encode())\n result=md5.hexdigest()\n return result\n#注册\ndef register(requset):\n if requset.method==\"POST\":\n error_msg=''\n email=requset.POST.get('email')\n password=requset.POST.get(\"password\")\n if email:\n #判断邮箱是否存在\n loginuser=Author.objects.filter(email=email).first()\n if not loginuser:\n #不存在 写库\n user=Author()\n user.email=email\n user.name=email\n user.password=setPassword(password)\n user.user_type=0\n user.save()\n else:\n error_msg=\"邮箱已经被注册,请登录\"\n else:\n error_msg=\"邮箱不可以为空\"\n return render(requset,'article/register.html',locals())\n#登录装饰器\ndef LoginVaild(func):\n #1.获取cookie中username和email\n #2.判断username和email\n #3.如果成功 跳转\n #4.如果失败 login.html\n def inner(request,*args,**kwargs):\n username=request.COOKIES.get('username')\n\n #获取session\n session_username=request.session.get('username')\n if username and session_username and username==session_username:\n return func(request,*args,**kwargs)\n else:\n return HttpResponseRedirect('/Article/login/')\n return inner\n#登录\n\nimport datetime\nimport time\ndef login(request):\n if request.method == \"POST\":\n error_msg = \"\"\n email = request.POST.get(\"email\")\n password = request.POST.get(\"password\")\n if email:\n user = Author.objects.filter(email=email,user_type=0).first()\n if user:\n ## 存在\n if user.password == setPassword(password):\n ## 登录成功\n ## 跳转页面\n # error_msg = \"登录成功\"\n # return HttpResponseRedirect('/index/')\n ## 设置cookie\n response = HttpResponseRedirect(\"/Article/index/\")\n response.set_cookie(\"username\",user.name)\n response.set_cookie(\"userid\",user.id)\n request.session['username'] = user.name ## 设置session\n return response\n else:\n error_msg = \"密码错误\"\n else:\n error_msg = \"用户不存在\"\n else:\n error_msg = \"邮箱不可以为空\"\n return render(request,\"article/login.html\",locals())\n\n#首页\n@LoginVaild\ndef index(request):\n # loginuser=LoginUser.objects.get(id=1)\n return render(request,'article/index.html')\n # return render(request, 'vuedemo.html')\n#登出\ndef logout(request):\n #删除cookie 删除session\n respose=HttpResponseRedirect('/Article/login/')\n # respose.delete_cookie('kename')\n keys=request.COOKIES.keys()\n for one in keys:\n respose.delete_cookie(one)\n del request.session['username']\n return respose\n#商品列表,分页:\ndef goods_list(request,status,page=1):\n\n \"\"\"\n :param request:\n :param status: 想要获取的是 在售或者下架的商品 在售传参1 下架是0\n :param page:页\n :return:\n \"\"\"\n #第一次\n page=int(page)\n # 第二次\n\n user_id = request.COOKIES.get(\"userid\")\n print(user_id)\n # user = Author.objects.filter(id=user_id).first()\n user=Author.objects.get(id=user_id)\n print(user)\n #获取文章的作者:不行\n # writer = Article.objects.filter()\n #获取作者的文章\n\n if status == \"0\":\n # 下架商品\n goods_obj = user.article_set.filter(status=0).order_by(\"-id\")\n # goods_obj = Article.objects.filter(status=0).order_by('id')\n else:\n # 在售商品\n goods_obj = user.article_set.filter(status=1).order_by(\"-id\")\n # goods_obj = Article.objects.filter(status=1).order_by('id')\n\n # ar=user.article_set.all()\n # print(ar)\n # ar=user.Article_set.all()错\n\n # for one in ar:\n # if one.status==\"0\":\n # #下架商品\n # goods_obj=ar.\n # goods_obj=Article.objects.filter(status=0).order_by('id')\n # else:\n # #在售商品\n # goods_obj=Article.objects.filter(status=1).order_by('id')\n\n\n # goods_obj=Goods.objects.all().order_by('goods_number')\n # print(goods_obj)\n goods_all=Paginator(goods_obj,10)\n goods_list=goods_all.page(page)\n # goods_list=Goods.objects.all()\n # 不需要返回数据,现在index就是访问的这一个\n # return render(request, \"vue_goods_list.html\")\n return render(request,'article/goods_list.html',locals())\n## 商品状态\ndef goods_status(request,status,id):\n# \"\"\"\n# 完成当下架 修改status为0\n# 当上架的 修改status为1\n#status传过来的内容为up或者down\n# :param request:\n# :param status:操作内容 up 上架 down 下架\n# :param id: 商品id\n# :return:\n# \"\"\"\n id=int(id)\n goods=Article.objects.get(id=id)\n if status==\"up\":\n #上架\n goods.status=1\n else:\n goods.status=0\n goods.save()\n#当完成对这个对象的操作,修改状态,页面应该还是在当前页面不会改变\n # return HttpResponseRedirect('/goods_list/1/1/')\n url=request.META.get(\"HTTP_REFERER\",\"/Article/goods_list/1/1/\")\n return HttpResponseRedirect(url)\n#个人信息\n@LoginVaild\ndef personal_info(request):\n user_id=request.COOKIES.get(\"userid\")\n print(user_id)\n user=Author.objects.filter(id=user_id).first()\n if request.method==\"POST\":\n #获取 数据 ,保存数据\n data=request.POST\n print(data.get(\"email\"))\n user.name=data.get(\"username\")\n user.phone_number=data.get(\"phone_number\")\n user.age=data.get(\"age\")\n user.gender=data.get(\"gender\")\n user.age=data.get(\"age\")\n user.gender = data.get(\"gender\")\n user.address = data.get(\"address\")\n print(request.FILES.get('photo'))\n user.photo = request.FILES.get(\"photo\")\n user.save()\n print (data)\n return render(request,\"article/personal_info.html\",locals())\n\n#卖家添加商品\n@LoginVaild\ndef goods_add(request):\n ##处理post请求,获取数据,保存数据,返回响应\n goods_type=Type.objects.all()\n if request.method==\"POST\":\n data=request.POST\n print(data)\n goods=Article()\n goods.title = data.get(\"title\")\n goods.content = data.get(\"content\")\n goods.description = data.get(\"description\")\n goods.recommend = data.get(\"recommend\")\n goods.click = data.get(\"click\")\n goods.picture=request.FILES.get('picture')\n goods.status=1\n goods.save()\n #上面先获取到\n #下面添加类型外键\n #这是添加外键所在的列\n goods_type=request.POST.get(\"goods_type\")#select标签的value类型是string\n print(goods_type)\n goods_type=[goods_type]\n #正向查询的两种方法\n #第一种方法,外键和上面的相等\n # goods.goods_type_id=int(goods_type)\n # goods.type=Type.objects.get(id=goods_type)#保存类型\n goods.type.set(goods_type)\n #保存店铺\n #从cookie中获取到用户信息\n user_id=request.COOKIES.get(\"userid\")\n goods.author=Author.objects.get(id=user_id)\n goods.save()\n return render(request,\"article/goods_add.html\",locals())\n\n\n\n\n\n","sub_path":"ArticleBlog/Article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"190666593","text":"import os\nimport json\nimport copy\nimport requests\nimport pattern.en\n\nfrom flask import current_app as app\nfrom server.models import Word, Category\nfrom server.database import db\n\n\n# adds words to memory if they don't already exist\ndef add_words_to_memory(words):\n db_words = []\n\n if app.config['DEBUG']:\n print('********************\\n')\n print('Now adding words to the database...\\n')\n\n for w in words:\n\n # checks for existing word and creates one if needed\n word = Word.query.filter_by(string=w).first()\n\n # checks if conjugated\n conjugated = False\n infinitive = pattern.en.conjugate(w, 'INFINITIVE')\n if infinitive != w:\n conjugated = True\n\n # checks if plural\n plural = False\n singularized = pattern.en.singularize(w)\n if singularized != w:\n plural = True\n w = singularized\n\n if not word:\n\n # adds the word to the database\n word = Word(string=w)\n db.session.add(word)\n\n # adds the plural form\n if plural:\n plural_word = Word(string=pattern.en.pluralize(w))\n db.session.add(plural_word)\n\n # gets the word from oxford dictionaries api\n url = f\"https://od-api.oxforddictionaries.com/api/v2/entries/en-us/{w.lower()}\"\n r = requests.get(\n url,\n headers={\n \"app_id\": app.config['OXFORD_APP_ID'],\n \"app_key\": app.config['OXFORD_APP_KEY']\n }\n )\n\n # to json\n r = r.json()\n\n # gets the lexical categories\n for entry in r['results'][0]['lexicalEntries']:\n\n # sets the lexical category\n category = entry['lexicalCategory']['id']\n\n # checks for existing category\n c = Category.query.filter_by(string=category).first()\n if not c:\n c = Category(string=category)\n db.session.add(c)\n\n word.categories.append(c)\n\n # handles plural\n if plural:\n plural_word.categories.append(c)\n\n if app.config['DEBUG']:\n print(f'Added {word} to the {category} category')\n\n # plural\n if plural:\n print(\n f'Added {plural_word} to the {category} category')\n\n # adds verb category if conjugated\n if conjugated:\n verb = Category.query.filter_by(string='verb').first()\n\n if not verb:\n verb = Category(string='verb')\n db.session.add(verb)\n\n word.categories.append(verb)\n\n if app.config['DEBUG']:\n print(f'Added {word} to the {verb} category')\n\n db.session.commit()\n\n if app.config['DEBUG']:\n print(f'Added {word} to the database')\n\n # debug\n else:\n\n if app.config['DEBUG']:\n print(f'Already have {word} in the database')\n\n db_words.append(word)\n\n if app.config['DEBUG']:\n print('\\n')\n\n return db_words\n","sub_path":"server/actions/memory/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"336139979","text":"from unittest import mock\nfrom .base import VaultHelperTest\nimport vaulthelpers\n\n\nclass VaultAuthenticationTest(VaultHelperTest):\n\n @mock.patch('os.getpid')\n @mock.patch('hvac.Client')\n def test_process_forking(self, Client, getpid):\n # Mock the PID, so that we can test process forking\n getpid.return_value = 1\n\n # Construct a mock Vault client\n mockClient = mock.MagicMock()\n Client.return_value = mockClient\n\n # Client should not have been created yet\n self.assertEqual(Client.call_count, 0)\n\n # Construct a client\n vaulthelpers.common.get_vault_auth().authenticated_client()\n\n # Client should have been created called once\n self.assertEqual(Client.call_count, 1)\n\n # Construct a client again\n vaulthelpers.common.get_vault_auth().authenticated_client()\n\n # Existing client should have been recycled, rather than recreated\n self.assertEqual(Client.call_count, 1)\n\n # Change the PID, simulating the process getting forked after a client already exists\n getpid.return_value = 2\n\n # Construct a client again\n vaulthelpers.common.get_vault_auth().authenticated_client()\n\n # Since the PID changed, the existing client should have been discarded and a new one created.\n self.assertEqual(Client.call_count, 2)\n\n\n def test_token_recycling(self):\n # Construct a client and record the token it gets after logging in\n vcl = vaulthelpers.common.get_vault_auth().authenticated_client()\n token_A = vcl.token\n\n # Construct a client again and get it's token.\n vcl = vaulthelpers.common.get_vault_auth().authenticated_client()\n token_B = vcl.token\n\n # Token should be reused, not regenerated.\n self.assertEqual(token_A, token_B)\n\n # Force the client to be destroyed, then create it again.\n vaulthelpers.common.reset_vault()\n vcl = vaulthelpers.common.get_vault_auth().authenticated_client()\n token_C = vcl.token\n\n # Token should be reused again, this time due to file caching.\n self.assertEqual(token_A, token_C)\n\n # Purge the file cache, force the client to be destroyed, then create the client again.\n vaulthelpers.common.get_vault_auth().purge_token_cache()\n vaulthelpers.common.reset_vault()\n vcl = vaulthelpers.common.get_vault_auth().authenticated_client()\n token_D = vcl.token\n\n # Token should be different this time.\n self.assertNotEqual(token_A, token_D)\n\n # Once more, destroy the client and get a token\n vaulthelpers.common.reset_vault()\n vcl = vaulthelpers.common.get_vault_auth().authenticated_client()\n token_E = vcl.token\n\n # Cache should have recycled token_D again\n self.assertEqual(token_D, token_E)\n","sub_path":"src/vaulthelpers/tests/test_authentication.py","file_name":"test_authentication.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74084786","text":"\n# coding: utf-8\n\n# In[1]:\n\n#initializing everything\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import KFold\nimport scipy as sp\nimport pandas as pd\nimport numpy as np\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\nimport random\nimport requests \nfrom yandex_translate import YandexTranslate\nfrom scipy.spatial.distance import cosine\nimport gensim\nimport re\nfrom nltk.corpus import stopwords \nimport nltk \nfrom random import shuffle\n\npath = \"c:\\\\hackaton\\\\files\\\\\"\npunct = ['.', ',', ')', '(', '/', ';', ':', '?', '!', '*', '-', '[', ']', '+', '$', ',']\n#nltk.download(\"stopwords\") \nstop_words = stopwords.words(\"russian\")\n\n# In[ ]:\n\n# подготовка датасета (ОБУЧЕНИЕ)\ndef prepare_dataset(filename):\n df = pd.read_csv(filename, header=None, sep='\\t')\n df['splitted'] = df[1].astype(str).map(lambda x : x.split(\" \"))\n print('Dataset has been prepared')\n return df\n\n\n# In[ ]:\n\n# получаем тэги\ndef get_tags(image_url):\n api_key = '' \n api_secret = '' \n response = requests.get('https://api.imagga.com/v1/tagging?url=%s' % image_url, auth=(\"acc_b11e78b93103f70\", \"16f8377cec177e135a427a67533d9442\")) \n results = response.json()['results']\n tags = results[0]['tags']\n strtags = []\n for i in range(1,len(tags)): \n strtags.append(tags[i]['tag'])\n print('Tags from image have been collected')\n print(strtags)\n return strtags\n\n\n# In[ ]:\n\n# перевод тегов\ndef translate_tags(strtags):\n translate = YandexTranslate('trnsl.1.1.20170310T212620Z.40f26af3fc2c4aef.12a7643f85bbbf6214784f2f098cdfaaecce5df1')\n s = \"\"\n picture_tags = []\n picture_tags = translate.translate(strtags, 'en-ru')['text']\n picture_tags = list(set(picture_tags))\n print('Tags have been translated')\n return picture_tags\n\n\n# In[ ]:\n\n# формирование словаря для марковского генератора (ОБУЧЕНИЕ)\ndef dict_generator(df):\n ngrams = {}\n for index, r in df.iterrows():\n row = r['splitted']\n #print(row)\n for i in range(0, len(row)-2):\n if not (row[i], row[i+1]) in ngrams.keys():\n ngrams[(row[i], row[i+1])] = []\n ngrams[(row[i], row[i+1])].append((row[i+1], row[i+2]))\n \n return ngrams\n\n#генерация бреда (не обучение, писать в бесконечном цикле бота)\ndef bullshit_generator(ngrams, picture_tags):\n matched = []\n if 'люди' in picture_tags:\n picture_tags.remove('люди')\n for key in ngrams.keys():\n for t in picture_tags:\n if t in key:\n matched.append(key)\n\n index = random.randint(0, len(matched)-1)\n starter = matched[index]\n \n aphorism = []\n aphorism.append(starter)\n while (starter in ngrams) and (len(aphorism) < 15):\n if len(ngrams[starter]) > 1:\n r = random.randint(0, len(ngrams[starter])-1)\n next_key = ngrams[starter][r]\n else:\n next_key = ngrams[starter][0]\n aphorism.append(next_key)\n starter = next_key\n\n sentence = aphorism[0][0] + \" \"\n for t in range(1, len(aphorism)):\n sentence += aphorism[t][0] + \" \"\n\n sentence += aphorism[t][1] \n \n return sentence\n\n\n# In[ ]:\n\ndef get_string_from_list(lst):\n s = \"\"\n for l in lst:\n s += l + \" \"\n return s\n\n\n# In[ ]:\n\n# создание тфайдиэф матрицы (ОБУЧЕНИЕ)\ndef make_vectorizer(df):\n vectorizer = TfidfVectorizer(ngram_range=(1,1), max_df=0.8, min_df=2, stop_words=stop_words)\n trainv = vectorizer.fit_transform(df[0])\n return vectorizer, trainv\n\n# матчинг цитат (не обучение, писать в бесконечном цикле)\ndef tfidf_matcher(vectorizer, trainv, picture_tags, df):\n s = get_string_from_list(picture_tags)\n testv = vectorizer.transform([s])\n coss = []\n for i in range(trainv.shape[0]):\n c = 1 - cosine(trainv[i].toarray(), testv[0].toarray())\n if not np.isnan(c):\n coss.append(c)\n else:\n coss.append(-1)\n \n index0 = np.argsort(coss)[-8:]\n shuffle(index0)\n \n index1 = index0[:3]\n \n for i in index1:\n print(df[0].loc[i])\n return (index1)\n\n\n# In[ ]:\n\ndataset = prepare_dataset(\"all.csv\")\ndd = dict_generator(dataset)\nvvv = make_vectorizer(dataset)\n\n\nimport telepot\n\nbot = telepot.Bot('337114057:AAFhqf7xR4Q10V6Ew-ADPUrMaQPjAEGHAdM')\n\n\n\nlast_len = len(bot.getUpdates())\nh = []\nwhile True:\n print('cycle')\n if len(bot.getUpdates()) > last_len:\n #if len(bot.getUpdates()) > 1:\n response = bot.getUpdates()[-1]\n #for response in h:\n #print(\" h: \", len(h))\n url = ''\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"], \"ХЭЙ, БРО, ПОДОЖДИ, Я ОБРАБОТАЮ ФОТКУ. СПС\")\n if \"document\" in response[\"message\"]:\n lll = bot.getFile(response[\"message\"][\"document\"][\"file_id\"])\n url_end = lll[\"file_path\"]\n url = \"https://api.telegram.org/file/bot337114057:AAFhqf7xR4Q10V6Ew-ADPUrMaQPjAEGHAdM/\"+url_end\n #bot.sendMessage(response[\"message\"][\"from\"][\"id\"], \"Wait, dude, your photo is in process\")\n elif \"photo\" in response[\"message\"]:\n lll = bot.getFile(response[\"message\"][\"photo\"][2][\"file_id\"])\n url_end = lll[\"file_path\"]\n url = \"https://api.telegram.org/file/bot337114057:AAFhqf7xR4Q10V6Ew-ADPUrMaQPjAEGHAdM/\"+url_end\n #bot.sendMessage(response[\"message\"][\"from\"][\"id\"], url)\n print('*'*15)\n output = []\n print('URL:')\n print(url)\n english_tags = get_tags(url)\n tags = translate_tags(english_tags)\n print('*'*15)\n print(tags)\n print('*'*15)\n \n tf = tfidf_matcher(vvv[0], vvv[1], tags, dataset)\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"],\"-----------\")\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"],\"ЭТО Я НАШЕЛ В ОДНОМ ИЗ ВАНИЛЬНЫХ ПАБЛИКОВ В ВК:\")\n for i in tf:\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"], dataset[0].loc[i])\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"],\"-----------\")\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"],\"А ЭТО Я САМ СОЧИНИЛ ПРО ТЕБЯ:\")\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"], bullshit_generator(dd, tags))\n bot.sendMessage(response[\"message\"][\"from\"][\"id\"], bullshit_generator(dd, tags))\n \n print('end of message')\n last_len = len(bot.getUpdates())\n \n\n\n\n","sub_path":"ai_hackathon/last_stable.py","file_name":"last_stable.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268244114","text":"# Licensed to Cloudera, Inc. under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. Cloudera, Inc. licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport posixpath\nimport threading\nimport time\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nfrom django.utils.translation import ugettext as _\n\nfrom desktop.conf import TIME_ZONE\nfrom desktop.lib.rest.http_client import HttpClient\n\nfrom connection import Connection, SqoopConnectionException\nfrom connector import Connector\nfrom framework import Framework\nfrom job import Job, SqoopJobException\nfrom submission import Submission, SqoopSubmissionException\nfrom resource import SqoopResource\n\n\nLOG = logging.getLogger(__name__)\nDEFAULT_USER = 'hue'\nAPI_VERSION = 'v1'\n\n_JSON_CONTENT_TYPE = 'application/json'\n\n\nclass SqoopClient(object):\n \"\"\"\n Sqoop client\n \"\"\"\n STATUS_GOOD = ('FINE', 'ACCEPTABLE')\n STATUS_BAD = ('UNACCEPTABLE', 'FAILURE_ON_SUBMIT')\n\n def __init__(self, url, username, language='en'):\n self._url = url\n self._client = HttpClient(self._url, logger=LOG)\n self._root = SqoopResource(self._client)\n self._language = language\n self._username = username\n\n def __str__(self):\n return \"SqoopClient at %s\" % self._url\n\n @property\n def url(self):\n return self._url\n\n @property\n def headers(self):\n return {\n 'Accept': 'application/json',\n 'Accept-Language': self._language,\n 'sqoop-user-name': self._username\n }\n\n def get_version(self):\n return self._root.get('version', headers=self.headers)\n\n def get_framework(self):\n resp_dict = self._root.get('%s/framework/all' % API_VERSION, headers=self.headers)\n framework = Framework.from_dict(resp_dict)\n return framework\n\n def get_connectors(self):\n resp_dict = self._root.get('%s/connector/all' % API_VERSION, headers=self.headers)\n connectors = [ Connector.from_dict(connector_dict, resp_dict['resources-connector']) for connector_dict in resp_dict['all'] ]\n return connectors\n\n def get_connector(self, connector_id):\n resp_dict = self._root.get('%s/connector/%d/' % (API_VERSION, connector_id), headers=self.headers)\n if resp_dict['all']:\n return Connector.from_dict(resp_dict['all'][0], resp_dict['resources-connector'])\n return None\n\n def get_connections(self):\n resp_dict = self._root.get('%s/connection/all' % API_VERSION, headers=self.headers)\n connections = [Connection.from_dict(conn_dict) for conn_dict in resp_dict['all']]\n return connections\n\n def get_connection(self, connection_id):\n resp_dict = self._root.get('%s/connection/%d/' % (API_VERSION, connection_id), headers=self.headers)\n if resp_dict['all']:\n return Connection.from_dict(resp_dict['all'][0])\n return None\n\n def create_connection(self, connection):\n if not connection.connector:\n connection.connector = self.get_connectors()[0].con_forms\n if not connection.framework:\n connection.framework = self.get_framework().con_forms\n connection.created = int( round(time.time() * 1000) )\n connection.updated = connection.created\n connection_dict = connection.to_dict()\n request_dict = {\n 'all': [connection_dict]\n }\n resp = self._root.post('%s/connection/' % API_VERSION, data=json.dumps(request_dict), headers=self.headers)\n if 'id' not in resp:\n raise SqoopConnectionException.from_dict(resp)\n connection.id = resp['id']\n return connection\n\n def update_connection(self, connection):\n \"\"\" Update a connection \"\"\"\n if not connection.connector:\n connection.connector = self.get_connectors()[0].con_forms\n if not connection.framework:\n connection.framework = self.get_framework().con_forms\n connection.updated = int( round(time.time() * 1000) )\n connection_dict = connection.to_dict()\n request_dict = {\n 'all': [connection_dict]\n }\n resp = self._root.put('%s/connection/%d/' % (API_VERSION, connection.id), data=json.dumps(request_dict), headers=self.headers)\n if resp['connector']['status'] in SqoopClient.STATUS_BAD or resp['framework']['status'] in SqoopClient.STATUS_BAD:\n raise SqoopConnectionException.from_dict(resp)\n return connection\n\n def delete_connection(self, connection):\n resp = self._root.delete('%s/connection/%d/' % (API_VERSION, connection.id), headers=self.headers)\n return None\n\n def get_jobs(self):\n resp_dict = self._root.get('%s/job/all' % API_VERSION, headers=self.headers)\n jobs = [Job.from_dict(job_dict) for job_dict in resp_dict['all']]\n return jobs\n\n def get_job(self, job_id):\n resp_dict = self._root.get('%s/job/%d/' % (API_VERSION, job_id), headers=self.headers)\n if resp_dict['all']:\n return Job.from_dict(resp_dict['all'][0])\n return None\n\n def create_job(self, job):\n if not job.connector:\n job.connector = self.get_connectors()[0].job_forms[job.type.upper()]\n if not job.framework:\n job.framework = self.get_framework().job_forms[job.type.upper()]\n job.created = int( round(time.time() * 1000) )\n job.updated = job.created\n job_dict = job.to_dict()\n request_dict = {\n 'all': [job_dict]\n }\n resp = self._root.post('%s/job/' % API_VERSION, data=json.dumps(request_dict), headers=self.headers)\n if 'id' not in resp:\n raise SqoopJobException.from_dict(resp)\n job.id = resp['id']\n return job\n\n def update_job(self, job):\n if not job.connector:\n job.connector = self.get_connectors()[0].job_forms[job.type.upper()]\n if not job.framework:\n job.framework = self.get_framework().job_forms[job.type.upper()]\n job.updated = int( round(time.time() * 1000) )\n job_dict = job.to_dict()\n request_dict = {\n 'all': [job_dict]\n }\n resp = self._root.put('%s/job/%d/' % (API_VERSION, job.id), data=json.dumps(request_dict), headers=self.headers)\n if resp['connector']['status'] in SqoopClient.STATUS_BAD or resp['framework']['status'] in SqoopClient.STATUS_BAD:\n raise SqoopJobException.from_dict(resp)\n return job\n\n def delete_job(self, job):\n resp_dict = self._root.delete('%s/job/%d/' % (API_VERSION, job.id), headers=self.headers)\n return None\n\n def get_job_status(self, job):\n resp_dict = self._root.get('%s/submission/action/%d/' % (API_VERSION, job.id), headers=self.headers)\n return Submission.from_dict(resp_dict['all'][0])\n\n def start_job(self, job):\n resp_dict = self._root.post('%s/submission/action/%d/' % (API_VERSION, job.id), headers=self.headers)\n if resp_dict['all'][0]['status'] in SqoopClient.STATUS_BAD:\n raise SqoopSubmissionException.from_dict(resp_dict['all'][0])\n return Submission.from_dict(resp_dict['all'][0])\n\n def stop_job(self, job):\n resp_dict = self._root.delete('%s/submission/action/%d/' % (API_VERSION, job.id), headers=self.headers)\n return Submission.from_dict(resp_dict['all'][0])\n\n def get_submissions(self):\n resp_dict = self._root.get('%s/submission/history/all' % API_VERSION, headers=self.headers)\n submissions = [Submission.from_dict(submission_dict) for submission_dict in resp_dict['all']]\n return submissions\n\n def set_user(self, user):\n self._user = user\n\n def set_language(self, language):\n self._language = language\n","sub_path":"apps/sqoop/src/sqoop/client/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"151812292","text":"from functions import *\nimport csv\n\nsubmission = r.submission(id='7l5yax')\nshortlink = submission.shortlink\nbotDisclaimerText = bot_disclaimer()\n\nfinalistsCSV = 'SubmissionsArchive/finalists2017.csv'\n\nf = open(finalistsCSV, 'r')\nreader = csv.reader(f)\ntitle_to_finalist = 'The Annual Best Map of the Year contest is now live!'\nmessage_to_finalist = ('**The Annual Best Map of the Year contest is now live!** \\nThank you for contributing a map. [The voting on the contest is open now at this link.]('\n + shortlink + ') \\n' + botDisclaimerText)\n\nfor row in reader:\n try:\n print(row[3])\n r.redditor(row[3]).message(title_to_finalist, message_to_finalist)\n except:\n print('Error sending message to ' + row[3])\nprint(shortlink)\n","sub_path":"dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"410983413","text":"__author__ = 'HY'\nfrom tkinter import *\nfrom tkinter import colorchooser\n\nimport math\nfrom math import *\nfrom operator import *\n\n\ncolor=colorchooser.askcolor()\n\ndef Color():\n\n Color = str(color[1])\n return Color\n\nfrom random import randrange\n\n\n\n\ndef frame(root, side):\n\n\n w = Frame(root,relief=FLAT,bg=\"black\")\n\n\n w.pack(side=side, expand=YES, fill=BOTH)\n\n\n return w\n\n\n\n\n\n\n\ndef button(root, side, text, command=None):\n\n\n w = Button(root, text=text, command=command, width=4, height=3,bg=Color(),fg=\"white\" )\n\n\n w.pack(side=side, expand=YES, fill=BOTH)\n\n\n return w\n\n\n\n\n\n\n\n\n\n\n\nclass Calculator(Frame):\n\n\n\n\n def __init__(self):\n\n\n Frame.__init__(self)\n\n\n self.option_add('*Font', 'AERIAL 12 bold')\n\n\n self.pack(expand=YES, fill=BOTH)\n\n\n self.master.title('Made by JJJMH')\n\n\n display = StringVar()\n\n\n quote = StringVar()\n\n\n msg1=[\"This calculator is for Civil Engineering, so good luck guys\"]\n\n irand = randrange(0, 1)\n\n\n quote.set(msg1[irand])\n\n\n\n\n\n Message( self, textvariable=quote, relief=SUNKEN, font=('times', 12),width=500,bg=\"black\",fg=\"white\").pack(side=TOP,expand=YES,fill=BOTH)\n\n\n\n\n\n trig = frame(self,LEFT)\n\n\n\n\n\n extra = frame(self,RIGHT)\n\n\n\n\n\n opsF = frame(self,RIGHT)\n\n\n Entry(self, relief=SUNKEN, textvariable=display).pack(side=TOP, expand=YES, fill=BOTH)\n\n\n\n\n for key in (\"123\", \"456\", \"789\", \"0.\"):\n\n keyF = frame(self, TOP)\n for char in key:\n button(keyF, LEFT, char,\n lambda w=display, c=char: w.set(w.get()+c))\n\n\n\n\n\n\n button(opsF, TOP, \"<-\",\n\n\n lambda w=display, c=char: w.set(w.get()[:-1]))\n\n\n\n button(extra, TOP, 'Clr', lambda w=display: w.set(''))\n\n button(extra, TOP, \"sq\",\n\n\n lambda w=display, c=char: w.set(pow(float(w.get()),2)))\n\n\n button(extra, TOP, \"sqrt\",\n\n\n lambda w=display, c=char: w.set(math.sqrt(float(w.get()))))\n\n\n button(extra, TOP, \"e\",\n\n\n lambda w=display, c=char: w.set(math.exp(float(w.get()))))\n\n\n button(extra, TOP, \"log10\",\n\n\n lambda w=display, c=char: w.set(math.log10(float(w.get()))))\n\n\n\n\n\n button(trig, TOP, \"sin\",\n\n\n lambda w=display, c=char: w.set(math.sin(math.radians(float(w.get())))))\n\n\n button(trig, TOP, \"cos\",\n\n\n lambda w=display, c=char: w.set(math.cos(math.radians(float(w.get())))))\n\n\n button(trig, TOP, \"tan\",\n\n\n lambda w=display, c=char: w.set(math.tan(math.radians(float(w.get())))))\n\n\n button(trig, TOP, \"n!\",\n\n\n lambda w=display, c=char: w.set(math.factorial(float(w.get()))))\n\n button(trig, TOP, \"ln\",\n\n\n lambda w=display, c=char: w.set(math.log(float(w.get()),math.e)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n button(extra, TOP, ')', lambda w=display, c=char: w.set(w.get()+ ')'))\n\n\n\n\n\n\n\n for char in \"+-*/=(\":\n\n\n if char == '=':\n\n\n btn = button(keyF, LEFT, char)\n\n\n\n btn.bind('',\n\n lambda e, s=self, w=display: s.calc(w), '+')\n\n\n else:\n\n\n btn = button(opsF, TOP, char,\n\n\n lambda w=display, s='%s'%char: w.set(w.get()+s))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n clearF = frame(self, BOTTOM)\n\n\n\n\n\n\n\n def calc(self, display):\n\n\n try:\n\n\n display.set(eval(display.get()))\n\n\n except:\n\n\n display.set(\"ERROR\")\n\n\n\n\n\n\nif __name__ == '__main__':\n\n\n Calculator().mainloop()\n","sub_path":"civil-final/2nd_presentation/8조/8group Calculator.py","file_name":"8group Calculator.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"532778256","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom website.forms import ProductForm\nfrom website.models.product_model import Product\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n# import base64\n\ndef sell_product(request):\n \"\"\"\n This method is invoked to post a product to sell\n\n ---Arguments---\n None\n\n ---GET---\n Renders create.html\n ---Context---\n 'product_form': the form from product_form.py\n\n ---POST---\n Renders success/product_added_to_sell_links.html\n\n ---Context---\n 'posted_object': 'Your Product added to Sell'\n 'posted_object_identifier': The product's title\n\n Author: Jessica Younker\n \"\"\"\n\n if request.method == 'GET':\n product_form = ProductForm()\n template_name = 'create.html'\n return render(request, template_name, {'product_form': product_form,})\n\n elif request.method == 'POST':\n form = ProductForm(request.POST, request.FILES)\n form_data = request.POST\n try: \n form_data['local_delivery'] == 'on'\n local_delivery_boolean = True\n location_data = form_data['location']\n except KeyError: \n local_delivery_boolean = False\n location_data = None\n if form.is_valid():\n # b64_image = base64.b64encode(request.FILES['image'].read())\n p = Product(\n seller = request.user,\n title = form_data['title'],\n description = form_data['description'],\n price = form_data['price'],\n current_inventory = form_data['current_inventory'],\n product_category_id = form_data['product_category'],\n date_added = timezone.now(),\n local_delivery = local_delivery_boolean,\n image = form.cleaned_data['image'],\n is_active = 1,\n location = location_data\n )\n p.save()\n # template_name = 'success/product_added_to_sell_links.html'\n # return render(request, template_name, {\n # 'posted_object': 'Your Product added to Sell', \n # 'posted_object_identifier': p.title})\n return HttpResponseRedirect(reverse('website:product_detail', \n args=[p.id]))\n else:\n return HttpResponseRedirect(reverse('website:sell' \n ))\n\n","sub_path":"website/views/sell_product_view.py","file_name":"sell_product_view.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"247147386","text":"#!/usr/bin/python\nfrom sys import stdin, stdout\nimport xml.etree.ElementTree as ET\nfrom lxml import html\n\n# debug\n# f = open(\"feed.xml\")\n# content = f.read()\n# f.close()\n\ncontent = stdin.read()\n\nroot = ET.fromstring(content)\n\nfor entry in root.findall('{http://www.w3.org/2005/Atom}entry'):\n title = entry.find('{http://www.w3.org/2005/Atom}title')\n content = entry.find('{http://www.w3.org/2005/Atom}content')\n r = html.document_fromstring(content.text)\n t = r.xpath('//div[@class=\"title\"]/a[2]')[0].get('title')\n title.text += \" [%s]\" % t\n\nET.dump(root)\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"243614291","text":"import re\nimport math\nimport json\nimport time\nimport random\nfrom threading import Thread\nfrom articlesite.models import Article\nfrom django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom visitorecord.recordvisitorinfo import recordinfo\n# Create your views here.\n\nnav = ['
  • 首页
  • ',\n '
  • 全部
  • ',\n '
  • 手机
  • ',\n '
  • BAT
  • ',\n '
  • 共享单车
  • ',\n '
  • 人工智能
  • ',\n '
  • 硬件
  • ']\n\ndef index(request):\n t = Thread(target=recordinfo, args=(request,))\n t.start()\n ctx = {}\n contents = Article.objects.all()\n # contents = postgresql.Connpsql().readfromtable('article_article')\n recommend = []\n if len(contents) < 5:\n recommend = [[content.article_title, content.page_urlcode] for content in contents]\n articleindatabase = []\n else:\n while len(recommend) < 5:\n content = random.choice(contents)\n recommend.append([content.article_title, content.page_urlcode])\n articleindatabase = contents[:3]\n articles = []\n for content in articleindatabase:\n articlepostime = caculocaltime(content.release_time_detected)\n articles.append([content.article_title, content.article_url, articlepostime, content.article_excerpt, content.article_content, content.page_urlcode])\n ctx['articles'] = articles\n ctx['recommend'] = recommend\n navout = changenav(nav, '首页')\n ctx['nav'] = ''.join(navout)\n return render(request, 'index.html', ctx)\n\n@csrf_exempt\ndef articleclasses(request, classesname):\n t = Thread(target=recordinfo, args=(request,))\n t.start()\n ctx = {}\n if request.method=='GET':\n # 因为选择了通过后台来改变已选中元素的状态的方法,所以需要在后台处理一些html代码\n navout = changenav(nav, classesname)\n articles = Article.objects.filter(article_class__contains=classesname)\n articlelist = []\n for articleinfo in articles[:10]:\n articlepostime = caculocaltime(articleinfo.release_time_detected)\n articlelist.append([articleinfo.article_title, articleinfo.article_url, articlepostime, articleinfo.article_excerpt, articleinfo.article_content, articleinfo.page_urlcode])\n ctx['articles'] = articlelist\n if len(articles) > 10:\n ctx['class_name'] = classesname\n maxpage = int(math.ceil(len(articles) / 10))\n pagenumlist = [x for x in range(1, maxpage + 1)]\n ctx['pagelist'] = pagenumlist\n ctx['maxpagenum'] = maxpage\n ctx['currentpagenum'] = 1\n else:\n ctx['class_name'] = classesname\n ctx['nav'] = ''.join(navout)\n return render(request, 'classes.html', ctx)\n elif request.method=='POST':\n postargs = request.POST\n nextpage = int(postargs.get('targetindex', None))\n maxpage = int(postargs.get('maxpagenum', None))\n if nextpage < 1:\n nextpage = 1\n if nextpage > maxpage:\n nextpage = maxpage\n if maxpage:\n ctx['pagelist'] = [x for x in range(1, maxpage + 1)]\n start = 10*(nextpage-1)\n stop = 10*(nextpage-1)+10\n articlecontents = Article.objects.filter(article_class__contains=classesname).order_by('-release_time_detected')[start: stop]\n return_json = {}\n articledictlist = []\n for content in articlecontents:\n articlepostime = caculocaltime(content.release_time_detected)\n articledictlist.append({'article_title':content.article_title, 'article_url':content.article_url, 'release_time': articlepostime, 'article_excerpt':content.article_excerpt, 'article_content':content.article_content, 'page_urlcode': content.page_urlcode})\n return_json['articles'] = articledictlist\n return_json['maxpagenum'] = maxpage\n return_json['currentpagenum'] = nextpage\n return HttpResponse(json.dumps(return_json), content_type='application/json')\n else:\n return render(request, 'classes.html')\n\ndef caculocaltime(intunixtime):\n structime = time.localtime(intunixtime)\n articlepostime = '-'.join((str(structime.tm_year), str(structime.tm_mon), str(structime.tm_mday)))\n return articlepostime\n\ndef aboutpage(request):\n t = Thread(target=recordinfo, args=(request,))\n t.start()\n return render(request, 'about.html')\n\ndef changenav(navin, classesname):\n for i in range(0, len(navin)):\n thiscontent = navin[i]\n if re.search(classesname, thiscontent):\n navin[i] = re.sub('class=\\\"\\\"', 'class=\"am-active\"', thiscontent)\n else:\n navin[i] = re.sub('class=\"am-active\"', 'class=\\\"\\\"', thiscontent)\n return navin\n\nif __name__ == '__main__':\n index('')\n","sub_path":"blogsite/articlelistpages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383920650","text":"# coding:utf-8\n\n# Reverse a single linked list\n# Difficulty: easy\ndef reverse_single_list(head):\n '''double pointer'''\n # 1.定义两个指针:prev和cur,一前一后,从第一个节点开始,遍历每一个节点,\n # 2.在cur.next改变指向前,先把cur.next保存到next_temp中\n # 3.在cur指向next_temp前,先把prev指向cur\n prev = None\n cur = head\n while cur != None:\n next_temp = cur.next\n cur.next = prev\n prev = cur\n cur = next_temp\n return prev\n","sub_path":"linked_list/206-反转链表.py","file_name":"206-反转链表.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"159159585","text":"from flask.ext.script import Manager, Server\nfrom app import app, db\n\nmanager = Manager(app)\n\nmanager.add_command(\"runserver\",\n Server(host = \"0.0.0.0\", port = 5000, use_debugger = True))\n\n@manager.command\ndef create_all():\n \"\"\"create db tables\"\"\"\n db.create_all()\n\nif __name__ == '__main__':\n manager.run()","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74792054","text":"# Copyright 2018 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom collections import OrderedDict\nimport importlib\nimport json\nfrom math import sqrt\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import cond_v2\nfrom tensorflow.python.ops import while_v2\n\nfrom tensorforce import TensorforceError, util\n\n\ntf.enable_resource_variables()\n\n\nclass Module(object):\n \"\"\"\n Base class for modules.\n\n Args:\n name (string): Module name\n (internal use).\n device (string): Device name\n (default: inherit value of parent module).\n summary_labels ('all' | iter[string]): Labels of summaries to record\n (default: inherit value of parent module).\n l2_regularization (float >= 0.0): Scalar controlling L2 regularization\n (default: inherit value of parent module).\n \"\"\"\n\n global_scope = None\n global_tensors_spec = None\n global_tensors = None # per agent, main module, or so\n global_summary_step = None\n\n @staticmethod\n def register_tensor(name, spec, batched):\n if '/' in name:\n raise TensorforceError.value(name='name', value=name)\n\n if Module.global_scope is None: # ???\n raise TensorforceError.unexpected()\n\n scoped_name = name\n\n # if scoped_name in Module.global_tensors_spec:\n # raise TensorforceError(\"Global tensor already exists: {}.\".format(scoped_name))\n\n # optional? better to put in spec?\n spec = dict(spec)\n spec['batched'] = batched\n\n if scoped_name in Module.global_tensors_spec and \\\n spec != Module.global_tensors_spec[scoped_name]:\n raise TensorforceError.mismatch(\n name='tensor-spec', value1=spec, value2=Module.global_tensors_spec[scoped_name]\n )\n\n if not util.valid_value_spec(value_spec=spec):\n raise TensorforceError.unexpected()\n\n if 'batched' in spec and spec['batched'] != batched:\n raise TensorforceError.unexpected()\n\n Module.global_tensors_spec[scoped_name] = spec\n\n @staticmethod\n def get_tensor_spec(name):\n if name not in Module.global_tensors_spec:\n raise TensorforceError.value(name='name', value=name)\n\n spec = dict(Module.global_tensors_spec[name])\n spec.pop('batched')\n\n return spec\n\n @staticmethod\n def update_tensor(name, tensor):\n # for n in range(len(Module.global_scope) + 1):\n # partial_scope = Module.global_scope[:len(Module.global_scope) - n]\n # scoped_name = util.join_scopes(*partial_scope, name)\n # if scoped_name in Module.global_tensors_spec:\n # break\n # else:\n # raise TensorforceError(\"Global tensor is not registered: {}.\".format(name))\n if name not in Module.global_tensors_spec:\n raise TensorforceError(\"Global tensor is not registered: {}.\".format(name))\n\n scoped_name = name\n spec = Module.global_tensors_spec[scoped_name]\n\n if not util.is_consistent_with_value_spec(value_spec=spec, x=tensor):\n raise TensorforceError(\"Invalid overwriting tensor: {}.\".format(tensor))\n\n scoped_name = util.join_scopes(*Module.global_scope, name)\n\n previous = Module.global_tensors.get(scoped_name)\n Module.global_tensors[scoped_name] = tensor\n\n return previous\n\n @staticmethod\n def update_tensors(**kwargs):\n for name, tensor in kwargs.items():\n Module.update_tensor(name=name, tensor=tensor)\n\n @staticmethod\n def retrieve_tensor(name):\n # for n in range(len(Module.global_scope) + 1):\n # partial_scope = Module.global_scope[:len(Module.global_scope) - n]\n # scoped_name = util.join_scopes(*partial_scope, name)\n # if scoped_name in Module.global_tensors_spec:\n # break\n # else:\n # raise TensorforceError(\"Global tensor is not registered: {}.\".format(name))\n if name not in Module.global_tensors_spec:\n raise TensorforceError(\"Global tensor is not registered: {}.\".format(name))\n\n for n in range(len(Module.global_scope) + 1):\n partial_scope = Module.global_scope[:len(Module.global_scope) - n]\n scoped_name = util.join_scopes(*partial_scope, name)\n if scoped_name in Module.global_tensors:\n break\n else:\n raise TensorforceError(\"Global tensor is not set: {}.\".format(name))\n\n # scoped_name = util.join_scopes(*Module.global_scope, name)\n\n # if scoped_name not in Module.global_tensors:\n # raise TensorforceError(\"Global tensor is not set: {}.\".format(scoped_name))\n\n return Module.global_tensors[scoped_name]\n\n is_add_module = False\n\n # Set internal attributes\n set_parent = None\n\n # Inherit arguments\n inherit_l2_regularization = None\n inherit_summary_labels = None\n\n def __init__(self, name, device=None, summary_labels=None, l2_regularization=None):\n # Internal attributes\n self.parent = Module.set_parent\n self.scope = None\n self.is_subscope = None\n self.modules = OrderedDict()\n self.trainable_modules = OrderedDict()\n self.saved_modules = OrderedDict()\n self.is_initialized = False\n self.variables = None\n self.trainable_variables = None\n self.saved_variables = None\n self.output_tensors = None\n self.query_tensors = None\n self.available_summaries = None\n\n # name\n if not util.is_valid_name(name=name):\n raise TensorforceError.value(name='module', argument='name', value=name)\n # summary_labels\n if summary_labels is not None and \\\n not all(isinstance(label, str) for label in summary_labels):\n raise TensorforceError.type(\n name='module', argument='summary_labels', value=summary_labels\n )\n # device\n # ???\n\n self.name = name\n self.device = device\n if summary_labels is None:\n # Otherwise inherit arguments\n self.summary_labels = Module.inherit_summary_labels\n elif summary_labels == 'all':\n self.summary_labels = summary_labels\n else:\n self.summary_labels = set(summary_labels)\n\n if not Module.is_add_module:\n Module.global_scope = list()\n Module.global_tensors_spec = OrderedDict()\n\n if Module.inherit_l2_regularization is None and l2_regularization is None:\n self.l2_regularization = None # otherwise infinite recursion\n elif l2_regularization is not None:\n from tensorforce.core import parameter_modules\n self.l2_regularization = None # for first module\n self.l2_regularization = self.add_module(\n name='l2-regularization', module=l2_regularization, modules=parameter_modules,\n is_trainable=False, dtype='float'\n )\n else:\n # Otherwise inherit arguments\n self.l2_regularization = Module.inherit_l2_regularization\n\n def tf_initialize(self):\n pass\n\n def tf_regularize(self):\n zero = tf.constant(value=0.0, dtype=util.tf_dtype(dtype='float'))\n\n if len(self.trainable_variables) == 0:\n regularization_loss = zero\n\n else:\n l2_regularization = self.l2_regularization.value()\n\n def no_l2_regularization():\n return zero\n\n def apply_l2_regularization():\n l2_variables = list()\n for variable in self.trainable_variables.values():\n if variable.dtype != util.tf_dtype(dtype='float'):\n variable = tf.dtypes.cast(x=variable, dtype=util.tf_dtype(dtype='float'))\n l2_variables.append(tf.reduce_sum(input_tensor=tf.square(x=variable)))\n return l2_regularization * tf.math.add_n(inputs=l2_variables)\n\n skip_l2_regularization = tf.math.equal(x=l2_regularization, y=zero)\n regularization_loss = self.cond(\n pred=skip_l2_regularization, true_fn=no_l2_regularization,\n false_fn=apply_l2_regularization # , use_cond_v2=True\n )\n\n for module in self.trainable_modules.values():\n regularization_loss += module.regularize()\n\n return regularization_loss\n\n def initialize(self):\n # Check whether module is already initialized\n if self.is_initialized:\n raise TensorforceError(message=\"Module already initialized.\")\n\n # Set internal attributes\n self.is_initialized = True\n self.variables = OrderedDict()\n self.trainable_variables = OrderedDict()\n self.saved_variables = OrderedDict()\n self.output_tensors = dict()\n self.query_tensors = dict()\n self.available_summaries = set()\n\n if self.parent is None:\n Module.global_scope = list()\n Module.global_summary_step = 'timestep'\n\n Module.global_scope.append(self.name)\n\n if self.parent is None:\n # Global timestep\n self.global_timestep = self.add_variable(\n name='global-timestep', dtype='long', shape=(), is_trainable=False,\n initializer='zeros', shared='global-timestep'\n )\n collection = tf.get_collection(key=tf.GraphKeys.GLOBAL_STEP)\n if len(collection) == 0:\n tf.add_to_collection(\n name=tf.GraphKeys.GLOBAL_STEP, value=self.global_timestep\n )\n\n if self.summarizer_spec is not None:\n with tf.name_scope(name='summarizer'):\n\n directory = self.summarizer_spec['directory']\n if os.path.isdir(directory):\n directories = sorted(\n d for d in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, d))\n and d.startswith('summary-')\n )\n else:\n os.makedirs(directory)\n directories = list()\n max_summaries = self.summarizer_spec.get('max-summaries', 5)\n if len(directories) > max_summaries - 1:\n for subdir in directories[:-max_summaries + 1]:\n subdir = os.path.join(directory, subdir)\n os.remove(os.path.join(subdir, os.listdir(subdir)[0]))\n os.rmdir(subdir)\n\n logdir = os.path.join(directory, time.strftime('summary-%Y%m%d-%H%M%S'))\n flush_millis = (self.summarizer_spec.get('flush', 10) * 1000)\n self.summarizer = tf.contrib.summary.create_file_writer(\n logdir=logdir, flush_millis=flush_millis, max_queue=None,\n filename_suffix=None\n )\n self.summarizer_init = self.summarizer.init()\n self.summarizer_flush = self.summarizer.flush()\n self.summarizer_close = self.summarizer.close()\n default_summarizer = self.summarizer.as_default()\n default_summarizer.__enter__()\n\n if 'frequency' in self.summarizer_spec:\n if isinstance(self.summarizer_spec['frequency'], int):\n record_summaries = \\\n tf.contrib.summary.record_summaries_every_n_global_steps(\n n=self.summarizer_spec['frequency']\n )\n elif 'variables' in self.summarizer_spec['frequency']:\n record_summaries = \\\n tf.contrib.summary.record_summaries_every_n_global_steps(\n n=self.summarizer_spec['frequency']['variables']\n )\n else:\n record_summaries = tf.contrib.summary.never_record_summaries()\n else:\n record_summaries = tf.contrib.summary.always_record_summaries()\n record_summaries.__enter__()\n\n # TensorFlow device and variable scope\n if self.device is not None:\n self.device = tf.device(device_name_or_function=self.device)\n self.device.__enter__()\n self.scope = tf.variable_scope(name_or_scope=self.name, use_resource=True)\n\n with self.scope:\n if self.parent is None:\n # with tf.device(device_name_or_function=(self.global_model.device if self.global_model else self.device)):\n\n # Global timestep before summarizer, otherwise problems with\n # record_summaries_every_n_global_steps\n\n # Global episode\n self.global_episode = self.add_variable(\n name='global-episode', dtype='long', shape=(), is_trainable=False,\n initializer='zeros', shared='global-episode'\n )\n\n # Global update\n self.global_update = self.add_variable(\n name='global-update', dtype='long', shape=(), is_trainable=False,\n initializer='zeros', shared='global-update'\n )\n\n Module.global_tensors = OrderedDict(\n timestep=self.global_timestep, episode=self.global_episode,\n update=self.global_update\n )\n\n # if self.summarizer_spec is not None:\n # if 'steps' in self.summarizer_spec:\n # record_summaries = tf.contrib.summary.record_summaries_every_n_global_steps(\n # n=self.summarizer_spec['steps'],\n # global_step=self.global_timestep\n # )\n # else:\n # record_summaries = tf.contrib.summary.always_record_summaries()\n # record_summaries.__enter__()\n\n for module in self.modules.values():\n module.initialize()\n self.tf_initialize()\n\n if self.device is not None:\n self.device.__exit__(None, None, None)\n\n Module.global_scope.pop()\n\n if self.parent is None:\n assert len(Module.global_scope) == 0\n Module.global_tensors = None\n Module.global_scope = None\n Module.global_summary_step = None\n\n num_variables = len(tf.trainable_variables())\n\n # Internal TensorFlow functions, prefixed by 'tf_'\n for attribute in sorted(dir(self)):\n if attribute.startswith('tf_') and attribute != 'tf_initialize':\n function_name = attribute[3:]\n\n if not util.is_valid_name(name=function_name):\n raise TensorforceError.value(name='TF-function name', value=function_name)\n if hasattr(self, function_name):\n raise TensorforceError.exists(name='TF-function', value=function_name)\n\n tf_function = getattr(self, attribute)\n if not callable(tf_function):\n raise TensorforceError.exists(name='TF-function', value=tf_function)\n\n function = self.create_tf_function(\n name='{}.{}'.format(self.name, function_name), tf_function=tf_function\n )\n\n setattr(self, function_name, function)\n\n # API TensorFlow functions, prefixed by 'api_'\n for attribute in sorted(dir(self)):\n if attribute.startswith('api_'):\n function_name = attribute[4:]\n assert hasattr(self, 'config')\n if self.config is not None and 'api_functions' in self.config and \\\n function_name not in self.config['api_functions']:\n continue\n\n # Todo: own every_n_step implementation, plus maybe per function steps argument\n fct_record_summaries = None\n if self.summarizer_spec is not None and 'frequency' in self.summarizer_spec:\n if isinstance(self.summarizer_spec['frequency'], int):\n if function_name in ('observe', 'update'):\n fct_record_summaries = tf.contrib.summary.always_record_summaries()\n elif function_name in self.summarizer_spec['frequency']:\n if function_name in ('observe', 'update'):\n step = self.global_update\n else:\n step = self.global_timestep\n fct_record_summaries = \\\n tf.contrib.summary.record_summaries_every_n_global_steps(\n n=self.summarizer_spec['frequency'][function_name], global_step=step\n )\n else:\n fct_record_summaries = tf.contrib.summary.never_record_summaries()\n if fct_record_summaries is not None:\n fct_record_summaries.__enter__()\n\n if not util.is_valid_name(name=function_name):\n raise TensorforceError.value(name='API-function name', value=function_name)\n if hasattr(self, function_name):\n raise TensorforceError.exists(name='API-function', value=function_name)\n\n api_function = getattr(self, attribute)\n if not callable(api_function):\n raise TensorforceError.exists(name='API-function', value=tf_function)\n\n function = self.create_api_function(\n name='{}.{}'.format(self.name, function_name), api_function=api_function\n )\n\n setattr(self, function_name, function)\n\n if fct_record_summaries is not None:\n fct_record_summaries.__exit__(None, None, None)\n\n assert num_variables == len(tf.trainable_variables())\n\n if self.parent is None:\n # if self.summarizer_spec is not None:\n # record_summaries.__exit__(None, None, None)\n\n if self.summary_labels is not None and \\\n (self.summary_labels == 'all' or 'graph' in self.summary_labels):\n self.available_summaries.add('graph')\n with tf.name_scope(name='summarizer'):\n # summarizer_init = tf.contrib.summary.summary_writer_initializer_op()\n # assert len(summarizer_init) == 1\n # initialization = (tf.global_variables_initializer(), summarizer_init[0])\n graph_def = self.graph.as_graph_def()\n graph_str = tf.constant(\n value=graph_def.SerializeToString(), dtype=tf.string, shape=()\n )\n self.graph_summary = tf.contrib.summary.graph(\n param=graph_str, step=self.global_timestep # episode?\n )\n else:\n self.graph_summary = None\n\n if self.summarizer_spec is not None:\n record_summaries.__exit__(None, None, None)\n default_summarizer.__exit__(None, None, None)\n\n def create_tf_function(self, name, tf_function):\n # Call internal TensorFlow function\n def fn(*args, **kwargs):\n if self.is_subscope:\n Module.global_scope.append(self.name)\n if self.device is not None:\n self.device.__enter__()\n with tf.name_scope(name=name):\n results = tf_function(*args, **kwargs)\n if self.device is not None:\n self.device.__exit__(None, None, None)\n if self.is_subscope:\n Module.global_scope.pop()\n return results\n\n return fn\n\n def create_api_function(self, name, api_function):\n # Call API TensorFlow function\n Module.global_scope = list()\n Module.global_tensors = OrderedDict()\n Module.global_summary_step = 'timestep'\n if self.device is not None:\n self.device.__enter__()\n with tf.name_scope(name=name):\n results = api_function()\n self.output_tensors[name[name.index('.') + 1:]] = sorted(\n x.name[len(name) + 1: -9] for x in util.flatten(xs=results)\n )\n\n # Function-level identity operation for retrieval\n query_tensors = set()\n for scoped_name, tensor in Module.global_tensors.items():\n if not scoped_name.startswith('cond/') and '/cond/' not in scoped_name and \\\n not scoped_name.startswith('while/') and '/while/' not in scoped_name:\n util.identity_operation(x=tensor, operation_name=(scoped_name + '-output'))\n assert scoped_name not in query_tensors\n query_tensors.add(scoped_name)\n self.query_tensors[name[name.index('.') + 1:]] = sorted(query_tensors)\n\n if self.device is not None:\n self.device.__exit__(None, None, None)\n Module.global_tensors = None\n Module.global_scope = None\n Module.global_summary_step = None\n\n def fn(query=None, **kwargs):\n # Feed_dict dictionary\n feed_dict = dict()\n for key, arg in kwargs.items():\n if arg is None:\n continue\n elif isinstance(arg, dict):\n # Support single nesting (for states, internals, actions)\n for key, arg in arg.items():\n feed_dict[util.join_scopes(self.name, key) + '-input:0'] = arg\n else:\n feed_dict[util.join_scopes(self.name, key) + '-input:0'] = arg\n if not all(isinstance(x, str) and x.endswith('-input:0') for x in feed_dict):\n raise TensorforceError.unexpected()\n\n # Fetches value/tuple\n fetches = util.fmap(function=(lambda x: x.name), xs=results)\n if query is not None:\n # If additional tensors are to be fetched\n query = util.fmap(\n function=(lambda x: util.join_scopes(name, x) + '-output:0'), xs=query\n )\n if util.is_iterable(x=fetches):\n fetches = tuple(fetches) + (query,)\n else:\n fetches = (fetches, query)\n if not util.reduce_all(\n predicate=(lambda x: isinstance(x, str) and x.endswith('-output:0')), xs=fetches\n ):\n raise TensorforceError.unexpected()\n\n # TensorFlow session call\n fetched = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)\n\n return fetched\n\n return fn\n\n def cond(self, pred, true_fn, false_fn, use_cond_v2=False):\n Module.global_scope.append('cond')\n if use_cond_v2:\n x = cond_v2.cond_v2(pred=pred, true_fn=true_fn, false_fn=false_fn)\n else:\n x = tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn)\n Module.global_scope.pop()\n return x\n\n def while_loop(\n self, cond, body, loop_vars, shape_invariants=None, parallel_iterations=10,\n back_prop=False, swap_memory=False, maximum_iterations=None, return_same_structure=False,\n use_while_v2=False\n ):\n Module.global_scope.append('while')\n if maximum_iterations is not None and maximum_iterations.dtype is not tf.int32:\n maximum_iterations = tf.dtypes.cast(x=maximum_iterations, dtype=tf.int32)\n if use_while_v2:\n x = while_v2.while_loop(\n cond=cond, body=body, loop_vars=loop_vars, shape_invariants=shape_invariants,\n maximum_iterations=maximum_iterations, return_same_structure=return_same_structure\n )\n else:\n x = tf.while_loop(\n cond=cond, body=body, loop_vars=loop_vars, shape_invariants=shape_invariants,\n parallel_iterations=parallel_iterations, back_prop=back_prop,\n swap_memory=swap_memory, maximum_iterations=maximum_iterations,\n return_same_structure=return_same_structure\n )\n\n Module.global_scope.pop()\n return x\n\n def add_variable(\n self, name, dtype, shape, is_trainable, initializer='zeros', is_saved=True, summarize=None,\n shared=None\n ):\n # name\n if not util.is_valid_name(name=name):\n raise TensorforceError.value(name='variable', argument='name', value=name)\n elif name in self.variables:\n raise TensorforceError.exists(name='variable', value=name)\n # dtype\n if not util.is_valid_type(dtype=dtype):\n raise TensorforceError.value(name='variable', argument='dtype', value=dtype)\n # shape\n if not util.is_iterable(x=shape) or not all(isinstance(dims, int) for dims in shape):\n raise TensorforceError.type(name='variable', argument='shape', value=shape)\n elif not all(dims > 0 for dims in shape):\n raise TensorforceError.value(name='variable', argument='shape', value=shape)\n # is_trainable\n if not isinstance(is_trainable, bool):\n raise TensorforceError.type(\n name='variable', argument='is_trainable', value=is_trainable\n )\n elif is_trainable and dtype != 'float':\n raise TensorforceError.unexpected()\n # initializer\n initializer_names = (\n 'normal', 'normal-relu', 'orthogonal', 'orthogonal-relu', 'zeros', 'ones'\n )\n if not isinstance(initializer, (util.py_dtype(dtype=dtype), np.ndarray, tf.Tensor)) and \\\n initializer not in initializer_names:\n raise TensorforceError.value(\n name='variable', argument='initializer', value=initializer\n )\n elif isinstance(initializer, np.ndarray) and \\\n initializer.dtype != util.np_dtype(dtype=dtype):\n raise TensorforceError.type(\n name='variable', argument='initializer', value=initializer\n )\n elif isinstance(initializer, tf.Tensor) and util.dtype(x=initializer) != dtype:\n raise TensorforceError.type(\n name='variable', argument='initializer', value=initializer\n )\n # is_saved\n if not isinstance(is_saved, bool):\n raise TensorforceError.type(name='variable', argument='is_saved', value=is_saved)\n # summarize\n if summarize is not None and not isinstance(summarize, bool):\n raise TensorforceError.type(name='variable', argument='summarize', value=summarize)\n # shared\n if shared is not None and not isinstance(shared, str):\n raise TensorforceError.type(name='variable', argument='shared', value=shared)\n\n variable = None\n\n if shared is not None and len(tf.get_collection(key=shared)) > 0:\n # Retrieve shared variable from TensorFlow\n collection = tf.get_collection(key=shared)\n if len(collection) > 1:\n raise TensorforceError.unexpected()\n variable = collection[0]\n\n else:\n tf_dtype = util.tf_dtype(dtype=dtype)\n\n # Variable initializer\n if isinstance(initializer, util.py_dtype(dtype=dtype)):\n initializer = tf.constant(value=initializer, dtype=tf_dtype, shape=shape)\n elif isinstance(initializer, np.ndarray):\n if initializer.shape != shape:\n raise TensorforceError(\n \"Invalid variable initializer shape: {}.\".format(initializer.shape)\n )\n initializer = tf.constant(value=initializer, dtype=tf_dtype)\n elif isinstance(initializer, tf.Tensor):\n if util.shape(x=initializer) != shape:\n raise TensorforceError(\n \"Invalid variable initializer shape: {}.\".format(util.shape(x=initializer))\n )\n initializer = initializer\n elif not isinstance(initializer, str):\n raise TensorforceError(\"Invalid variable initializer: {}\".format(initializer))\n elif initializer[:6] == 'normal':\n if dtype != 'float':\n raise TensorforceError(\n message=\"Invalid variable initializer value for non-float variable: {}.\".format(\n initializer\n )\n )\n if initializer[6:] == '-relu':\n stddev = min(0.1, sqrt(2.0 / util.product(xs=shape[:-1])))\n else:\n stddev = min(0.1, sqrt(2.0 / (util.product(xs=shape[:-1]) + shape[-1])))\n initializer = tf.random.normal(shape=shape, stddev=stddev, dtype=tf_dtype)\n elif initializer[:10] == 'orthogonal':\n if dtype != 'float':\n raise TensorforceError(\n message=\"Invalid variable initializer value for non-float variable: {}.\".format(\n initializer\n )\n )\n if len(shape) < 2:\n raise TensorforceError(\n message=\"Invalid variable initializer value for 0/1-rank variable: {}.\".format(\n initializer\n )\n )\n normal = np.random.normal(size=(util.product(xs=shape[:-1]), shape[-1]))\n u, _, v = np.linalg.svd(a=normal, full_matrices=False)\n orthogonal = u if u.shape[1] == shape[-1] else v\n if initializer[10:] == '-relu':\n orthogonal = orthogonal * sqrt(2.0)\n initializer = tf.constant(value=orthogonal.reshape(shape), dtype=tf_dtype)\n elif initializer == 'zeros':\n initializer = tf.zeros(shape=shape, dtype=tf_dtype)\n elif initializer == 'ones':\n initializer = tf.ones(shape=shape, dtype=tf_dtype)\n\n # Variable\n variable = tf.Variable(\n initial_value=initializer, trainable=is_trainable, validate_shape=True, name=name,\n dtype=tf_dtype, expected_shape=shape, use_resource=True\n ) # collections=\n\n # Register shared variable with TensorFlow\n if shared is not None:\n tf.add_to_collection(name=shared, value=variable)\n\n # Register variable\n self.variables[name] = variable\n if is_trainable:\n self.trainable_variables[name] = variable\n if is_saved:\n self.saved_variables[name] = variable\n\n # Add summary\n if (summarize is None and is_trainable) or summarize:\n variable = self.add_summary(\n label=('variables', 'variables-full'), name=name, tensor=variable,\n mean_variance=True\n )\n variable = self.add_summary(label='variables-full', name=name, tensor=variable)\n\n return variable\n\n def add_placeholder(self, name, dtype, shape, batched, default=None):\n # name\n name = name + '-input'\n if not util.is_valid_name(name=name):\n raise TensorforceError.value(name='placeholder', argument='name', value=name)\n # dtype\n if not util.is_valid_type(dtype=dtype):\n raise TensorforceError.value(name='placeholder', argument='dtype', value=dtype)\n # shape\n if not util.is_iterable(x=shape) or not all(isinstance(dims, int) for dims in shape):\n raise TensorforceError.type(name='placeholder', argument='shape', value=shape)\n elif not all(dims > 0 for dims in shape):\n raise TensorforceError.value(name='placeholder', argument='shape', value=shape)\n # batched\n if not isinstance(batched, bool):\n raise TensorforceError.type(name='placeholder', argument='batched', value=batched)\n # default\n if default is not None:\n # if batched:\n # raise TensorforceError.unexpected()\n if not isinstance(default, tf.Tensor):\n raise TensorforceError.unexpected()\n elif util.dtype(x=default) != dtype:\n raise TensorforceError.unexpected()\n\n # Placeholder\n if batched:\n shape = (None,) + shape\n if default is None:\n dtype = util.tf_dtype(dtype=dtype)\n placeholder = tf.placeholder(dtype=dtype, shape=shape, name=name)\n else:\n # check dtype and shape !!!\n placeholder = tf.placeholder_with_default(input=default, shape=shape, name=name)\n\n return placeholder\n\n def add_summary(\n self, label, name, tensor, pass_tensors=None, step=None, return_summaries=False,\n mean_variance=False, enumerate_last_rank=False\n ):\n # should be \"labels\" !!!\n # label\n if util.is_iterable(x=label):\n if not all(isinstance(x, str) for x in label):\n raise TensorforceError.type(name='summary', argument='label', value=label)\n else:\n if not isinstance(label, str):\n raise TensorforceError.type(name='summary', argument='label', value=label)\n # name\n if not isinstance(name, str):\n raise TensorforceError.type(name='summary', argument='name', value=name)\n # tensor\n if not isinstance(tensor, (tf.Tensor, tf.Variable)):\n raise TensorforceError.type(name='summary', argument='tensor', value=tensor)\n # pass_tensors\n if util.is_iterable(x=pass_tensors):\n if not all(isinstance(x, (tf.Tensor, tf.IndexedSlices)) for x in pass_tensors):\n raise TensorforceError.type(\n name='summary', argument='pass_tensors', value=pass_tensors\n )\n elif pass_tensors is not None:\n if not isinstance(pass_tensors, tf.Tensor):\n raise TensorforceError.type(\n name='summary', argument='pass_tensors', value=pass_tensors\n )\n # step\n # enumerate_last_rank\n if not isinstance(enumerate_last_rank, bool):\n raise TensorforceError.type(\n name='summary', argument='enumerate_last_rank', value=tensor\n )\n\n if pass_tensors is None:\n pass_tensors = tensor\n\n # Check whether summaries are logged\n if self.summary_labels is None:\n return pass_tensors\n\n # Check whether not in while loop\n if 'while' in Module.global_scope: # 'cond' in Module.global_scope\n return pass_tensors\n\n # Check whether given label is logged\n if util.is_iterable(x=label):\n if self.summary_labels != 'all' and all(x not in self.summary_labels for x in label):\n return pass_tensors\n self.available_summaries.update(label)\n else:\n if self.summary_labels != 'all' and label not in self.summary_labels:\n return pass_tensors\n self.available_summaries.add(label)\n\n # Handle enumerate_last_rank\n if enumerate_last_rank:\n dims = util.shape(x=tensor)[-1]\n tensors = OrderedDict([(name + str(n), tensor[..., n]) for n in range(dims)])\n else:\n tensors = OrderedDict([(name, tensor)])\n\n if mean_variance:\n for name in list(tensors):\n tensor = tensors.pop(name)\n mean, variance = tf.nn.moments(x=tensor, axes=tuple(range(util.rank(x=tensor))))\n tensors[name + '-mean'] = mean\n tensors[name + '-variance'] = variance\n\n # TensorFlow summaries\n summaries = list()\n if step is None:\n assert Module.global_summary_step is not None\n step = Module.retrieve_tensor(name=Module.global_summary_step)\n else:\n step = Module.retrieve_tensor(name=step)\n for name, tensor in tensors.items():\n shape = util.shape(x=tensor)\n if shape == () or shape == (-1,):\n # Scalar\n summaries.append(tf.contrib.summary.scalar(name=name, tensor=tensor, step=step))\n elif shape == (1,) or shape == (-1, 1):\n # Single-value tensor as scalar\n tensor = tf.squeeze(input=tensor, axis=-1)\n summaries.append(tf.contrib.summary.scalar(name=name, tensor=tensor, step=step))\n else:\n # General tensor as histogram\n summaries.append(tf.contrib.summary.histogram(name=name, tensor=tensor, step=step))\n\n with tf.control_dependencies(control_inputs=summaries):\n return util.fmap(function=util.identity_operation, xs=pass_tensors)\n\n @staticmethod\n def get_module_class_and_kwargs(\n name, module=None, modules=None, default_module=None, **kwargs\n ):\n # name\n if not util.is_valid_name(name=name):\n raise TensorforceError.value(name='module', argument='name', value=name)\n # module\n # ???\n # modules\n if modules is not None and not isinstance(modules, dict):\n raise TensorforceError.type(name='module', argument='modules', value=modules)\n # default_module\n # ???\n if isinstance(module, dict):\n # Dictionary module specification (type either given via 'type' or 'default_module')\n util.deep_disjoint_update(target=kwargs, source=module)\n module = kwargs.pop('type', default_module)\n return Module.get_module_class_and_kwargs(\n name=name, module=module, modules=modules, default_module=default_module, **kwargs\n )\n\n elif isinstance(module, str):\n if os.path.isfile(module):\n # JSON file module specification\n with open(module, 'r') as fp:\n module = json.load(fp=fp)\n return Module.get_module_class_and_kwargs(\n name=name, module=module, modules=modules, default_module=default_module,\n **kwargs\n )\n\n elif '.' in module:\n # Library module specification\n library_name, module_name = module.rsplit('.', 1)\n library = importlib.import_module(name=library_name)\n module = getattr(library, module_name)\n return Module.get_module_class_and_kwargs(\n name=name, module=module, modules=modules, default_module=default_module,\n **kwargs\n )\n\n elif modules is not None and module in modules:\n # Keyword module specification\n return Module.get_module_class_and_kwargs(\n name=name, module=modules[module], default_module=default_module, **kwargs\n )\n\n elif 'default' in modules or default_module is not None:\n # Default module specification\n if '_first_arg' in kwargs:\n raise TensorforceError.value(name='module kwargs', value='_first_arg')\n if module is not None:\n kwargs['_first_arg'] = module\n if default_module is None:\n default_module = modules['default']\n return Module.get_module_class_and_kwargs(\n name=name, module=default_module, modules=modules, **kwargs\n )\n\n else:\n raise TensorforceError.value(name='module specification', value=module)\n\n elif not callable(module) and ('default' in modules or default_module is not None):\n # Default module specification\n if '_first_arg' in kwargs:\n raise TensorforceError.value(name='module kwargs', value='_first_arg')\n if module is not None:\n kwargs['_first_arg'] = module\n if default_module is None:\n default_module = modules['default']\n return Module.get_module_class_and_kwargs(\n name=name, module=default_module, modules=modules, **kwargs\n )\n\n elif callable(module):\n for key, arg in kwargs.items():\n assert arg is not None, (key, arg)\n if arg is None:\n assert False\n kwargs.pop(key)\n first_arg = kwargs.pop('_first_arg', None)\n return module, first_arg, kwargs\n\n else:\n raise TensorforceError.value(name='module specification', value=module)\n\n def add_module(\n self, name, module=None, modules=None, default_module=None, is_trainable=True,\n is_saved=True, is_subscope=False, **kwargs\n ):\n # name\n if name in self.modules:\n raise TensorforceError.exists(name='sub-module', value=name)\n # is_trainable\n if not isinstance(is_trainable, bool):\n raise TensorforceError.type(name='module', argument='is_trainable', value=is_trainable)\n # is_saved\n if not isinstance(is_saved, bool):\n raise TensorforceError.type(name='module', argument='is_saved', value=is_saved)\n\n module_cls, first_arg, kwargs = Module.get_module_class_and_kwargs(\n name=name, module=module, modules=modules, default_module=default_module, **kwargs\n )\n\n # Final callable module specification\n if Module.global_scope is None:\n raise TensorforceError.unexpected()\n\n # Global scope handling\n Module.is_add_module = True\n if is_subscope:\n Module.global_scope.append(name)\n\n # Set internal attributes\n Module.set_parent = self\n\n # Inherit arguments\n Module.inherit_l2_regularization = self.l2_regularization\n Module.inherit_summary_labels = self.summary_labels\n\n # Module constructor\n if first_arg is None:\n module = module_cls(name, **kwargs)\n else:\n module = module_cls(name, first_arg, **kwargs)\n\n # Reset\n Module.set_parent = None\n Module.inherit_l2_regularization = None\n Module.inherit_summary_labels = None\n\n # Global scope handling\n if is_subscope:\n Module.global_scope.pop()\n Module.is_add_module = False\n\n # Internal attributes\n module.is_subscope = is_subscope\n\n # Register module\n self.modules[name] = module\n if is_trainable:\n self.trainable_modules[name] = module\n if is_saved:\n self.saved_modules[name] = module\n\n return module\n\n def get_variables(self, only_trainable=False, only_saved=False):\n # only_trainable\n if not isinstance(only_trainable, bool):\n raise TensorforceError.type(name='get_variables', argument='only_trainable', value=only_trainable)\n # only_saved\n if not isinstance(only_saved, bool):\n raise TensorforceError.type(name='get_variables', argument='only_saved', value=only_saved)\n # not both\n if only_trainable and only_saved:\n raise TensorforceError.unexpected()\n\n if only_trainable:\n # Only trainable variables\n variables = list(self.trainable_variables.values())\n for module in self.trainable_modules.values():\n variables.extend(module.get_variables(only_trainable=only_trainable))\n\n elif only_saved:\n # Only saved variables\n variables = list(self.saved_variables.values())\n for module in self.saved_modules.values():\n variables.extend(module.get_variables(only_saved=only_saved))\n\n else:\n # All variables\n variables = list(self.variables.values())\n for module in self.modules.values():\n variables.extend(module.get_variables(only_trainable=only_trainable))\n\n return variables\n\n def get_available_summaries(self):\n summaries = set(self.available_summaries)\n for module in self.modules.values():\n summaries.update(module.get_available_summaries())\n return sorted(summaries)\n","sub_path":"tensorforce/core/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":45484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"57189659","text":"#!/usr/bin/env python\n\nfrom swift.common.utils import whataremyips\nfrom swift.common.ring import Ring\n\n\ndef whataremyactivelabels(ring_files=None, swift_conf_dir=\"/etc/swift\"):\n \"\"\"Read ring_files and extract a set of paths that should be on the\nhost running this script.\n\n Keyword arguments:\n ring_files -- a list of filenames containing the rings to scan.\n (default: [\"/etc/swift/object.ring.gz,\n \"/etc/swift/account.ring.gz,\n \"/etc/swift/container.ring.gz\"]\n swift_conf_dir -- the base path for swift config. (default: \"/etc/swift\")\n \"\"\"\n # takes an optional list of filenames to use as the ring files\n # returns a list of paths that should be mounted under swift_path\n if ring_files is None:\n rings_files = [\"/etc/swift/%s.ring.gz\" % thing for thing in\n [\"object\", \"container\", \"account\"]]\n my_ips = whataremyips()\n devices = set()\n for ring_file in rings_files:\n ring = Ring(ring_file)\n devices.update([entry['device'] for entry in ring.devs\n if float(entry['weight']) > 0 and\n entry['ip'] in my_ips])\n return devices\n","sub_path":"swiftscripts/utils/rings.py","file_name":"rings.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268478093","text":"# coding: utf-8\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext\nimport random\nimport string\nimport simplejson\n\nfrom django.contrib import auth\n\nfrom django.contrib.auth.models import User\n\nfrom gait_analysis_service.user_management.models import Gait_user\nfrom gait_analysis_service.user_management.forms import userLoginoutForm, userModifyForm\n\nfrom django.template.loader import get_template\n\ndef noneIfEmptyString(value):\n if value == \"\":\n return None\n return value\ndef noneIfNoKey(dict, key):\n if key in dict:\n value = dict[key]\n if value == \"\":\n return None\n return value\n\n return None\nclass myError(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\ndef index(request):\n user_name = request.COOKIES.get('gait_user_name', '')\n user_password = request.COOKIES.get('gait_user_pwd', '')\n template = get_template('homepage/login.html')\n html = template.render({'username': user_name, 'password': user_password}, request)\n return HttpResponse(html)\n\n\ndef register(request):\n try:\n if request.method == \"POST\":\n user_name = request.POST['user_name']\n user_pwd1 = request.POST['user_pwd1']\n user_pwd2 = request.POST['user_pwd2']\n user_nickname = request.POST['user_nickname']\n\n user_sex = request.POST['user_sex']\n user_height = request.POST['user_height']\n user_weight = request.POST['user_weight']\n user_birthday = request.POST['year'] + request.POST['month']\n user_email = request.POST['user_email']\n errors = {}\n\n # FORM VALIDATION\n form = userLoginoutForm(request.POST)\n if not form.is_valid():\n errors = form.errors\n template = get_template('homepage/register.html')\n html = template.render({'errors': errors, 'form': form}, request)\n return HttpResponse(html)\n if user_pwd1 != user_pwd2:\n errors['error_pwd'] = '两次输入的密码不一致!'\n template = get_template('homepage/register.html')\n html = template.render({'errors': errors, 'form': form}, request)\n return HttpResponse(html)\n\n filterUser = User.objects.filter(username=user_name)\n if len(filterUser) > 0:\n errors['error_username'] = '用户名已存在'\n template = get_template('homepage/register.html')\n html = template.render({'errors': errors, 'form': form}, request)\n return HttpResponse(html)\n\n user = User()\n user.username = user_name\n user.set_password(user_pwd1)\n user.first_name = user_nickname\n user.email = user_email\n user.save()\n\n gaituser = Gait_user(user=user, role=1, sex=user_sex, height=user_height, weight=user_weight,\n birthday=user_birthday)\n gaituser.save()\n\n new_user = auth.authenticate(username=user_name, password=user_pwd1)\n if new_user is not None:\n auth.login(request, new_user)\n return HttpResponseRedirect('/analysis/index/')\n except Exception:\n response = HttpResponse()\n response.write(\"\")\n return response\n\n template = get_template('homepage/register.html')\n html = template.render({}, request)\n return HttpResponse(html)\n\n\ndef userRegister_mobile(request):\n try:\n data = simplejson.loads(request.body)\n\n user_name = data['user']['name']\n user_pwd1 = data['user']['password']\n user_nickname = noneIfNoKey(data['user'], 'real_name')\n\n user_sex = noneIfNoKey(data['user'], 'sex')\n user_height = noneIfNoKey(data['user'], 'height')\n user_weight = noneIfNoKey(data['user'], 'weight')\n user_birthday = noneIfNoKey(data['user'], 'birthday')\n user_email = noneIfNoKey(data['user'], 'email')\n\n if user_nickname is None:\n user_nickname = 'user'\n if user_email is None:\n user_email = 'no email'\n\n user = User()\n user.username = user_name\n user.first_name = user_nickname\n user.set_password(user_pwd1)\n user.email = user_email\n user.save()\n\n gaituser = Gait_user(user=user, role=1, sex=user_sex, height=user_height, weight=user_weight,\n birthday=user_birthday)\n gaituser.save()\n\n result = {\n 'successful': True,\n 'error': {\n 'id': '',\n 'message': ''\n }\n }\n\n except Exception as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '1024',\n 'message': e.args\n }\n }\n finally:\n return HttpResponse(simplejson.dumps(result), content_type=\"application/json\")\n\n\ndef userLogin(request):\n if request.method == \"POST\":\n user_name = request.POST.get('user_name', '')\n user_password = request.POST.get('user_pwd', '')\n user_remember = request.POST.get('remember_pwd', '')\n\n try:\n user = auth.authenticate(username=user_name, password=user_password)\n if user and user.is_active:\n auth.login(request, user)\n response = HttpResponseRedirect('/analysis/index/')\n if user_remember == 'user_select':\n response.set_cookie('gait_user_name', user_name, 60 * 60 * 24 * 7)\n response.set_cookie('gait_user_pwd', user_password, 60 * 60 * 24 * 7)\n return response\n else:\n response = HttpResponse()\n response.write(\"\")\n return response\n except Exception as e:\n print('login error: %s', e)\n response = HttpResponse()\n response.write(\"\")\n return response\n else:\n template = get_template('homepage/index.html')\n html = template.render({}, request)\n return HttpResponse(html)\n\n\ndef userLogin_mobile(request):\n try:\n data = simplejson.loads(request.body)\n u_name = data['user']['name']\n u_password = data['user']['password']\n\n customerUser = auth.authenticate(username=u_name, password=u_password)\n\n if customerUser:\n customerToken = ''.join(random.sample(string.ascii_letters + string.digits, 30))\n User.objects.filter(username=u_name).update(last_name=customerToken)\n\n result = {\n 'data': {\n 'token': customerToken,\n 'expire': -1\n },\n 'successful': True,\n 'error': {\n 'id': '',\n 'message': ''\n }\n }\n except Exception as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '1024',\n 'message': e.args\n }\n }\n finally:\n return HttpResponse(simplejson.dumps(result), content_type=\"application/json\")\n\n\ndef userLogout(request):\n auth.logout(request)\n response = HttpResponseRedirect('/homepage/index/')\n # response.delete_cookie('gait_user_name')\n # response.delete_cookie('gait_user_pwd')\n return response\n\n\ndef userLogout_mobile(request):\n try:\n data = simplejson.loads(request.body)\n # 获取用户令牌\n customerToken = data['token']\n token = User.objects.get(last_name=customerToken)\n token.delete()\n result = {\n 'successful': True,\n 'error': {\n 'id': '',\n 'message': ''\n }\n }\n except Exception as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '1024',\n 'message': e.args\n }\n }\n finally:\n return HttpResponse(simplejson.dumps(result), content_type=\"application/json\")\n\n\ndef checkUsername(request):\n if request.method == 'GET':\n try:\n user = User.objects.get(username=request.GET['user_name'])\n if user is not None:\n return HttpResponse(simplejson.dumps({'result': 0}))\n except Exception:\n return HttpResponse(simplejson.dumps({'result': 1}))\n\n\ndef modify(request):\n try:\n if request.user.is_authenticated:\n user = request.user\n gaituser = Gait_user.objects.get(user=user)\n\n user_sex = gaituser.sex\n\n user_birthday = gaituser.birthday\n year = str(user_birthday)[0:4]\n month = str(user_birthday)[4:]\n\n result = {}\n result['nickname'] = user.first_name\n result['user_weight'] = gaituser.weight\n result['user_height'] = gaituser.height\n result['email'] = user.email\n result['year'] = year\n result['month'] = month\n\n template = get_template('homepage/modify.html')\n html = template.render({'result': result, 'sex': simplejson.dumps(user_sex)}, request)\n return HttpResponse(html)\n else:\n response = HttpResponse()\n response.write(\"\")\n return response\n\n except Exception as e:\n response = HttpResponse()\n response.write(\"\")\n return response\n\n\ndef queryInformation_mobile(request):\n try:\n data = simplejson.loads(request.body)\n\n customerUser = User.objects.get(last_name=data['token'])\n gaituser = Gait_user.objects.get(user=customerUser)\n result = {\n 'user': {\n \"name\": customerUser.username,\n \"real_name\": customerUser.first_name,\n \"height\": gaituser.height,\n \"weight\": gaituser.weight,\n \"sex\": gaituser.sex,\n \"birthday\": gaituser.birthday,\n \"email\": customerUser.email\n },\n 'successful': True,\n 'error': {\n 'id': '',\n 'message': ''\n }\n }\n except Exception as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '1024',\n 'message': e.args\n }\n }\n finally:\n return HttpResponse(simplejson.dumps(result), content_type=\"application/json\")\n\n\ndef modifyPassword(request):\n if request.user.is_authenticated():\n try:\n if request.method == \"POST\":\n old_pwd = request.POST['oldpwd']\n new_pwd1 = request.POST['newpwd1']\n new_pwd2 = request.POST['newpwd2']\n\n response = HttpResponse()\n if not old_pwd:\n response.write(\"\")\n return response\n\n if new_pwd1 != new_pwd2:\n response.write(\n \"\")\n return response\n\n user = auth.authenticate(username=request.user, password=old_pwd)\n\n if user and user.is_active:\n user.set_password(new_pwd1)\n user.save()\n response.write(\"\")\n return response\n else:\n response.write(\"\")\n return response\n\n except Exception as e:\n print(e)\n response = HttpResponse()\n response.write(\"\")\n return response\n else:\n response = HttpResponse()\n response.write(\"\")\n return response\n\n\ndef modifyPassword_mobile(request):\n try:\n data = simplejson.loads(request.body)\n\n user_token = User.objects.get(last_name=data['token'])\n user = auth.authenticate(username=request.user, password=data['user']['old_password'])\n\n if user and user.is_active and user == user_token:\n user.set_password(data['user']['new_password'])\n user.save()\n\n result = {\n 'successful': True,\n 'error': {\n 'id': '',\n 'message': ''\n }\n }\n else:\n raise myError('原密码输入错误!')\n except myError as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '3',\n 'message': e.value\n }\n }\n except Exception as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '1024',\n 'message': e.args\n }\n }\n finally:\n return HttpResponse(simplejson.dumps(result), content_type=\"application/json\")\n\n\ndef modifyInformation(request):\n if request.user.is_authenticated():\n try:\n if request.method == \"POST\":\n nickname = request.POST['user_nickname']\n user_sex = request.POST['user_sex']\n user_birthday = request.POST['year'] + request.POST['month']\n user_weight = request.POST['user_weight']\n user_height = request.POST['user_height']\n email = request.POST['user_email']\n\n form = userModifyForm(request.POST)\n if not form.is_valid():\n print(form.errors)\n response = HttpResponse()\n response.write(\"\")\n return response\n else:\n user = request.user\n gaituser = Gait_user.objects.get(user=request.user)\n\n user.first_name = nickname\n user.email = email\n gaituser.weight = user_weight\n gaituser.height = user_height\n gaituser.sex = user_sex\n gaituser.birthday = user_birthday\n\n user.save()\n gaituser.save()\n\n response = HttpResponse()\n response.write(\"\")\n return response\n\n except Exception as e:\n print(e)\n response = HttpResponse()\n response.write(\"\")\n return response\n\n else:\n response = HttpResponse()\n response.write(\"\")\n return response\n\n\ndef modifyInformation_mobile(request):\n try:\n data = simplejson.loads(request.body)\n user = User.objects.get(last_name=data['token'])\n gaituser = Gait_user.objects.get(user=request.user)\n if 'real_name' in data['user']:\n user.first_name = noneIfEmptyString(data['user']['real_name'])\n if 'height' in data['user']:\n gaituser.height = noneIfEmptyString(data['user']['height'])\n if 'weight' in data['user']:\n gaituser.weight = noneIfEmptyString(data['user']['weight'])\n if 'sex' in data['user']:\n gaituser.sex = noneIfEmptyString(data['user']['sex'])\n if 'birthday' in data['user']:\n gaituser.birthday = noneIfEmptyString(data['user']['birthday'])\n if 'email' in data['user']:\n user.email = noneIfEmptyString(data['user']['email'])\n user.save()\n result = {\n 'successful': True,\n 'error': {\n 'id': '',\n 'message': ''\n }\n }\n except Exception as e:\n result = {\n 'successful': False,\n 'error': {\n 'id': '1024',\n 'message': e.args\n }\n }\n finally:\n return HttpResponse(simplejson.dumps(result), content_type=\"application/json\")\n","sub_path":"gait_analysis_service/user_management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"529937232","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('principal', '0008_auto_20141120_1138'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Pases',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('langcode', models.CharField(max_length=10)),\n ('enurl', models.CharField(max_length=100)),\n ('esurl', models.CharField(max_length=100)),\n ('activo', models.BooleanField(default=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='items',\n name='tipo',\n field=models.CharField(default=b'I', max_length=1),\n ),\n ]\n","sub_path":"principal/migrations/0009_auto_20141124_1505.py","file_name":"0009_auto_20141124_1505.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"17000834","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Nov 5 12:34:36 2019\n\n@author: suman.choudhury\n\"\"\"\n\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport imutils\nimport os\nimport glob\nimport json\nfrom src.apparel_detector import ApparelDetector\nfrom src.color_detector import ColorDetector\nfrom src.get_product_details import *\nimport argparse\nimport csv\n\nglobal graph\ngraph = tf.get_default_graph()\n\n\nclass ApparelColorDetector():\n\n \"\"\"\n Apparel and its corresponding color detector\n\n \"\"\"\n\n def __init__(self):\n\n with open(\"config/config.json\", \"r\") as f:\n config = json.load(f)\n f.close()\n self.model_apparel_detection = ApparelDetector(config)\n self.model_color_detection = ColorDetector(config)\n\n def predict(self,image_arr):\n\n test_image_path = image_arr\n\n # getting the file name with .jpg extension\n #base_file_name = os.path.basename(test_image_path)\n\n # reading an image\n image = test_image_path\n\n # getting the width and height\n [h, w] = image.shape[:2]\n\n\n # calling the apparel detector and getting the boxes and scores\n (boxes, scores, classes, num_detections) = self.model_apparel_detection.model_predict(image)\n\n # getting the total number of products and predictions\n totalcount, product_name, product_confidence, product_image = results(image, h, w, boxes, scores, classes,\n num_detections)\n resp={}\n resp[\"number_of_products\"] = totalcount\n\n for num in range(0, totalcount):\n product_number = num + 1\n name = product_name[num]\n\n conf = product_confidence[num]\n cropped_image = product_image[num]\n with graph.as_default():\n color_class_name, color_class_score = self.model_color_detection.model_predict(cropped_image)\n aa = \"Blue : {:.2f}% Green : {:.2f}% Red : {:.2f}%\".format(color_class_score[0], color_class_score[1],\n color_class_score[2])\n\n resp['Product_' + str(product_number)] = name\n resp['Product_' + str(product_number) + '' + '_confidence'] = conf\n resp['Product_' + str(product_number) + '' + '_predicted color'] = color_class_name\n resp['Product_' + str(product_number) + '' + '_color_percentages'] = aa\n resp['Product_' + str(product_number) + '' + '_name'] = name\n result = resp\n return result\n\n\n\n","sub_path":"src/apparel_color_detector.py","file_name":"apparel_color_detector.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"205681063","text":"'''Plugin entry point.'''\n\n# pylint: disable=broad-except\nimport re\nimport pynvim # type: ignore\nfrom gdb.common import BaseCommon, Common\nfrom gdb.app import App\nfrom gdb.config import Config\nfrom gdb.logger import Logger\n\n\n@pynvim.plugin\nclass Gdb(Common):\n '''Plugin implementation.'''\n def __init__(self, vim):\n common = BaseCommon(vim, Logger(), None)\n super().__init__(common)\n self.apps = {}\n self.ansi_escaper = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n\n def _get_app(self):\n return self.apps[self.vim.current.tabpage.handle]\n\n @pynvim.function('GdbInit', sync=True)\n def gdb_init(self, args):\n '''Command GdbInit.'''\n # Prepare configuration: keymaps, hooks, parameters etc.\n common = BaseCommon(self.vim, self.logger, Config(self))\n app = App(common, *args)\n self.apps[self.vim.current.tabpage.handle] = app\n app.start()\n\n @pynvim.function('GdbCleanup', sync=True)\n def gdb_cleanup(self, _):\n '''Command GdbCleanup.'''\n tab = self.vim.current.tabpage.handle\n try:\n app = self.apps[tab]\n app.cleanup()\n finally:\n del self.apps[tab]\n\n @pynvim.function('GdbCheckTab', sync=True)\n def gdb_check_tab(self, _):\n '''Command GdbCheckTab.'''\n try:\n return self.vim.current.tabpage.handle in self.apps\n except Exception as ex:\n self.log(\"GdbCheckTab: \" + str(ex))\n\n @pynvim.function('GdbHandleEvent', sync=True)\n def gdb_handle_event(self, args):\n '''Command GdbHandleEvent.'''\n try:\n app = self._get_app()\n handler = getattr(app, args[0])\n handler()\n except Exception as ex:\n self.log(\"GdbHandleEvent: \" + str(ex))\n\n @pynvim.function('GdbSend', sync=True)\n def gdb_send(self, args):\n '''Command GdbSend.'''\n try:\n app = self._get_app()\n app.send(*args)\n except Exception as ex:\n self.log(\"GdbSend: \" + str(ex))\n\n @pynvim.function('GdbBreakpointToggle', sync=True)\n def gdb_breakpoint_toggle(self, _):\n '''Command GdbBreakpointToggle.'''\n try:\n app = self._get_app()\n app.breakpoint_toggle()\n except Exception as ex:\n self.log('GdbBreakpointToggle: ' + str(ex))\n\n @pynvim.function('GdbBreakpointClearAll', sync=True)\n def gdb_breakpoint_clear_all(self, _):\n '''Command GdbBreakpointClearAll.'''\n try:\n app = self._get_app()\n app.breakpoint_clear_all()\n except Exception as ex:\n self.log('GdbBreakpointClearAll: ' + str(ex))\n\n @pynvim.function('GdbParserFeed')\n def gdb_parser_feed(self, args):\n '''Command GdbParserFeed.'''\n\n try:\n tab = args[0]\n app = self.apps[tab]\n content = args[1]\n for i, ele in enumerate(content):\n content[i] = self.ansi_escaper.sub('', ele)\n app.parser.feed(content)\n except Exception as ex:\n self.log('GdbParserFeed: ' + str(ex))\n\n @pynvim.function('GdbCallAsync')\n def gdb_call_async(self, args):\n '''Command GdbCallAsync.'''\n try:\n obj = self._get_app()\n for name in args[0].split('.'):\n obj = getattr(obj, name)\n obj(*args[1:])\n except Exception as ex:\n self.log('GdbCallAsync: ' + str(ex))\n\n @pynvim.function('GdbCall', sync=True)\n def gdb_call(self, args):\n '''\n Reads a period separated list of words and invokes the corresponding\n method on the `App` class.\n e.g.\n self.gdb_call(['custom_command'] + args)\n maps to\n self.app.custom_command(args)\n '''\n try:\n obj = self._get_app()\n for name in args[0].split('.'):\n obj = getattr(obj, name)\n if callable(obj):\n return obj(*args[1:])\n return obj\n except Exception as ex:\n self.log('GdbCall: ' + str(ex))\n return None\n\n @pynvim.function('GdbCustomCommand', sync=True)\n def gdb_custom_command(self, args):\n '''Command GdbCustomCommand.'''\n return self.gdb_call([\"custom_command\"] + args)\n\n @pynvim.function('GdbCreateWatch', sync=True)\n def gdb_create_watch(self, args):\n '''Command GdbCreateWatch.'''\n return self.gdb_call([\"create_watch\"] + args)\n\n @pynvim.function('GdbTestPeek', sync=True)\n def gdb_test_peek(self, args):\n '''Command GdbTestPeek.'''\n try:\n obj = self._get_app()\n for i, arg in enumerate(args):\n obj = getattr(obj, arg)\n if callable(obj):\n return obj(*args[i+1:])\n return obj\n except Exception as ex:\n self.log('GdbTestPeek: ' + str(ex))\n return None\n\n @pynvim.function('GdbTestPeekConfig', sync=True)\n def gdb_test_peek_config(self, _):\n '''Command GdbTestPeekConfig.'''\n try:\n app = self._get_app()\n config = {k: v for k, v in app.config.config.items()}\n for key, val in config.items():\n if callable(val):\n config[key] = str(val)\n return config\n except Exception as ex:\n self.log('GdbTestPeekConfig: ' + str(ex))\n return None\n","sub_path":"rplugin/python3/gdb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"458673393","text":"from tkinter import *\r\nimport pyttsx3\r\n\r\nroot = Tk()\r\nroot.title(\"Text To Speech\")\r\nroot.geometry(\"600x500\")\r\nroot.iconbitmap('C:/Users/HP/Desktop')\r\n\r\ndef talk():\r\n e = pyttsx3.init()\r\n e.say(my_entry.get())\r\n e.runAndWait()\r\n my_entry.delete(0,END)\r\n\r\nmy_entry = Entry(root,font=(\"Helvetica\",28))\r\nmy_entry.pack(pady=20)\r\n\r\nmy_button=Button(root,text=\"Speak\",bg=\"#bfde23\",padx=20,pady=9,command=talk)\r\nmy_button.pack(padx=20)\r\n\r\nroot.mainloop()","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"291442159","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThe script calculates temperature difference between Sodankyla and Helsinki stations.\r\nCalculates mean value and standard deviation for summer temperatures for both stations.\r\n\r\n@author: Pavel\r\n\"\"\"\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Import monthly data of Helsinki station\r\nfrom temperature_anomalies import monthlyData as monthlyDataHel, monthes_dictionary as monthes_dictionary\r\n\r\n# Read datafile\r\ndata = pd.read_csv(\"1308661.txt\", sep=\"\\s+\", names=['STATION','STATION_NAME','STATION_NAME_1','STATION_NAME_2','ELEVATION','LATITUDE','LONGITUDE','DATE','PRCP','TMAX','TMIN'], skiprows=2, na_values=['-9999'])\r\n\r\n# Merge station name to single column\r\ndata['STATION_NAME'] = data['STATION_NAME'] + ' ' + data['STATION_NAME_1'] + ' ' + data['STATION_NAME_2'] \r\n\r\n# Drop unnecessary columns\r\ndata = data.drop(['STATION_NAME_1','STATION_NAME_2'],axis=1)\r\n\r\n# Calculate average temperature\r\ndata['TAVG'] = data[['TMAX','TMIN']].mean(axis=1,skipna=False)\r\n\r\n# The function converts temperature from Fahrenheit to Celsius\r\ndef fahrenheitToCelsius(fahr_temp):\r\n converted_temp = (fahr_temp - 32) / 1.8\r\n return converted_temp\r\n \r\n# Create year-month, month columns\r\ndata['YM'] = (data['DATE'].astype(str)).str.slice(start=0,stop=6)\r\ndata['monthNumber'] = (data['DATE'].astype(str)).str.slice(start=4,stop=6)\r\n\r\n# Convert temperatures to Celsius\r\ndata['TAVG_Celsius'] = fahrenheitToCelsius(data['TAVG'])\r\n\r\n# Create empty Dataframe\r\nmonthlyData = pd.DataFrame()\r\n\r\n# Group Dataframe\r\ngrouped_month = data.groupby('YM')\r\n\r\n# Aggregate data\r\nfor key, group in grouped_month:\r\n mean_value = group[['TAVG_Celsius']].mean()\r\n mean_value['YM'] = key\r\n mean_value['monthNumber'] = key[4:6]\r\n monthlyData = monthlyData.append(mean_value, ignore_index=True)\r\n \r\n# Reorder columns\r\nmonthlyData = monthlyData[['YM','monthNumber','TAVG_Celsius']]\r\n\r\n# Create empty Dataframe\r\nreferenceTemps = pd.DataFrame()\r\n\r\n# Group by month\r\ngrouped_data = data.groupby('monthNumber')\r\n\r\n# Iterate groups\r\nfor key, group in grouped_data:\r\n row = group[['TAVG_Celsius']].mean()\r\n row['monthNumber'] = key\r\n referenceTemps = referenceTemps.append(row,ignore_index=True)\r\n \r\n# Rename columns\r\nreferenceTemps = referenceTemps.rename(columns={'TAVG_Celsius':'avgTempsC'})\r\n \r\n# Merge with dictionary\r\nreferenceTemps = referenceTemps.merge(monthes_dictionary,on='monthNumber')\r\n\r\n# Join monthlyData and referenceTemps\r\nmonthlyData = monthlyData.merge(referenceTemps,how='left', on='monthNumber',sort=False)\r\n\r\n# Compare temperatures\r\nmonthlyData['Diff'] = monthlyData['TAVG_Celsius'] - monthlyData['avgTempsC']\r\n\r\n# Merge monthly temperatures in Sodankyla Lokka and Helsinki\r\nmonthlyData = monthlyData.merge(monthlyDataHel,how='inner', on='YM',sort=False)\r\n\r\n# Colculate difference \r\nmonthlyData['Diff_SodHel'] = monthlyData['TAVG_Celsius_x'] - monthlyData['TAVG_Celsius_y']\r\n\r\n# Choose summer temperatures\r\nsummer = monthlyData.ix[(monthlyData['monthNumber_x'] == '06') | (monthlyData['monthNumber_x'] == '07') | (monthlyData['monthNumber_x'] == '08')]\r\n\r\n# Output summer temperatures to csv\r\nsummer.to_csv(\"Summer_SodHel.csv\", sep=',', index=False, float_format='%.3f')\r\n\r\n# Calculate mean summer temperatures for Sodankyla Lokka station\r\nmeanSummerSod = summer['TAVG_Celsius_x'].mean()\r\n\r\n# Calculate mean summer temperatures for Helsinki station\r\nmeanSummerHel = summer['TAVG_Celsius_y'].mean()\r\n\r\n# Calculate std summer temperatures for Sodankyla Lokka station\r\nstdSummerSod = summer['TAVG_Celsius_x'].std()\r\n\r\n# Calculate std summer temperatures for Helsinki station\r\nstdSummerHel = summer['TAVG_Celsius_y'].std()\r\n\r\n\r\n","sub_path":"weather_comparisons.py","file_name":"weather_comparisons.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"382954295","text":"from tkinter import Tk, StringVar, Button, Text, Entry, LabelFrame, Radiobutton, IntVar, Toplevel\nimport xlrd\nimport random\n\nwb = xlrd.open_workbook('ynm3000.xls')\nws0 = wb.sheet_by_index(0)\n\nwin = Tk()\nwin.title('exam')\nsw = win.winfo_screenwidth()\nsh = win.winfo_screenheight()\nww = 250\nwh = 180\nx = (sw - ww) / 4\ny = (sh - wh) / 2\nwin.geometry(\"%dx%d+%d+%d\" % (ww, wh, x, y))\n\nvar2 = StringVar()\nflag0 = 0\nr = range(1, 3037)\nvar2.set(5)\n\n\ndef Reset(event=0):\n global r\n global flag0\n r = range(1, 3037)\n flag0 = 0\n\n\ndef Copy(event=0):\n global num\n global numlasat\n numlasat = num\n word0 = t.selection_get()\n for i in range(2, ws0.nrows + 1):\n if ws0.cell(i - 1, 4).value == word0:\n num = i\n show()\n win.clipboard_clear()\n win.clipboard_append(t.selection_get())\n # print(t.selection_get())\n\n\ndef Exam(event=0):\n global r\n global flag0\n flag2 = 0\n n = var2.get()\n n = int(n)\n if len(r) < n + 1:\n random.shuffle(r)\n flag2 = 1\n else:\n l = random.sample(r, n)\n r = [x for x in r if x not in set(l)]\n t.delete('1.0', 'end')\n if flag2:\n if flag0:\n t.insert('end', 'The End')\n else:\n flag0 = 1\n for i in r:\n t.insert('end', ws0.cell(i, 4).value)\n t.insert('end', '\\n')\n else:\n for i in l:\n t.insert('end', ws0.cell(i, 4).value)\n t.insert('end', '\\n')\n\n\nframe1 = LabelFrame(win)\nframe1.pack(side='left')\nb = Button(frame1, text='Exam', font=(\n 'Times New Roman', 12), command=Exam)\n# b.pack(side='left')\nb.pack()\nb1 = Button(frame1, text='Reset', font=(\n 'Times New Roman', 12), command=Reset)\n# b1.pack(side='left')\nb1.pack()\nb2 = Button(frame1, text='Copy', font=(\n 'Times New Roman', 12), command=Copy)\nb2.pack()\ne1 = Entry(frame1, width=5, textvariable=var2,\n justify='center', font=('Times New Roman', 12))\n# e.pack(side='left')\ne1.pack()\nt = Text(win, font=('Times New Roman', 18))\nt.pack(side='left')\nt.bind('', Copy)\nt.bind('', Copy)\nwin.bind('', Exam)\n\n\nwin2 = Toplevel(win)\nwin2.title('word')\nww = 500\nwh = 400\nx = (sw - ww) / 2\ny = (sh - wh) / 2\nwin2.geometry(\"%dx%d+%d+%d\" % (ww, wh, x, y))\n\nvar = StringVar()\nnum = 0\nflag = 0\nnumlasat = 0\nnnum = 0\n\n\ndef show():\n var.set(ws0.cell(num - 1, 4).value)\n t1.delete('1.0', 'end')\n t1.insert('end', '难度:')\n t1.insert('end', ws0.cell(num - 1, 0).value)\n t1.insert('end', '\\n\\n')\n t1.insert('end', '考法义:\\n')\n t1.insert('end', ws0.cell(num - 1, 5).value)\n t1.insert('end', '\\n\\n')\n t1.insert('end', '例句:\\n')\n t1.insert('end', ws0.cell(num - 1, 6).value)\n if ws0.cell(num - 1, 2).value != '':\n t1.insert('end', '\\n\\n')\n t1.insert('end', '记忆法:\\n')\n t1.insert('end', ws0.cell(num - 1, 2).value)\n if ws0.cell(num - 1, 3).value != '':\n t1.insert('end', '\\n\\n')\n t1.insert('end', '派生词:\\n')\n t1.insert('end', ws0.cell(num - 1, 3).value)\n if ws0.cell(num - 1, 7).value != '':\n t1.insert('end', '\\n\\n')\n t1.insert('end', '近义词:\\n')\n t1.insert('end', ws0.cell(num - 1, 7).value)\n if ws0.cell(num - 1, 8).value != '':\n t1.insert('end', '\\n\\n')\n t1.insert('end', '反义词:\\n')\n t1.insert('end', ws0.cell(num - 1, 8).value)\n if ws0.cell(num - 1, 9).value != '':\n t1.insert('end', '\\n\\n')\n t1.insert('end', '填空:\\n')\n t1.insert('end', ws0.cell(num - 1, 9).value)\n\n\ndef Next(event=0):\n global num\n global nnum\n global numlasat\n numlasat = num\n if v2.get() == 1:\n nnum = ws0.cell(num - 1, 12).value\n while True:\n nnum += 1\n for i in range(1, ws0.nrows):\n if ws0.cell(i - 1, 12).value == nnum:\n num = i\n if ws0.cell(num - 1, 0).value == v.get() or v.get() == 0 \\\n or (v.get() == 4 and (ws0.cell(num - 1, 0).value == 2 or ws0.cell(num - 1, 0).value == 3)):\n break\n else:\n while True:\n num += 1\n if ws0.cell(num - 1, 0).value == v.get() or v.get() == 0 \\\n or (v.get() == 4 and (ws0.cell(num - 1, 0).value == 2 or ws0.cell(num - 1, 0).value == 3)):\n break\n show()\n\n\ndef Prev(event=0):\n global num\n global numlasat\n numlasat = num\n if v2.get() == 1:\n nnum = ws0.cell(num - 1, 12).value\n while True:\n nnum -= 1\n for i in range(1, ws0.nrows):\n if ws0.cell(i - 1, 12).value == nnum:\n num = i\n if ws0.cell(num - 1, 0).value == v.get() or v.get() == 0 \\\n or (v.get() == 4 and (ws0.cell(num - 1, 0).value == 2 or ws0.cell(num - 1, 0).value == 3)):\n break\n else:\n while True:\n num -= 1\n if ws0.cell(num - 1, 0).value == v.get() or v.get() == 0 \\\n or (v.get() == 4 and (ws0.cell(num - 1, 0).value == 2 or ws0.cell(num - 1, 0).value == 3)):\n break\n show()\n\n\ndef Rand(event=0):\n global num\n global numlasat\n numlasat = num\n while True:\n num = random.randint(1, ws0.nrows)\n if ws0.cell(num - 1, 0).value == v.get() or v.get() == 0 \\\n or (v.get() == 4 and (ws0.cell(num - 1, 0).value == 2 or ws0.cell(num - 1, 0).value == 3)):\n break\n show()\n\n\ndef Last(event=0):\n global num\n global numlasat\n num, numlasat = numlasat, num\n show()\n\n\ndef Hide(event=0):\n global flag\n if flag == 0:\n win2.geometry('230x80')\n flag = 1\n else:\n win2.geometry('500x400')\n flag = 0\n\n\ndef Find(event):\n global num\n global numlasat\n numlasat = num\n word0 = e.get()\n # find_word(word0, ws0)\n for i in range(2, ws0.nrows + 1):\n if ws0.cell(i - 1, 4).value == word0:\n num = i\n show()\n\n\ndef Copy2(event=0):\n global num\n global numlasat\n numlasat = num\n word0 = t1.selection_get()\n for i in range(2, ws0.nrows + 1):\n if ws0.cell(i - 1, 4).value == word0:\n num = i\n show()\n\n\ngroup0 = LabelFrame(win2)\ngroup0.pack(expand='yes', fill='both')\n\nframe1 = LabelFrame(group0)\nframe1.pack(side='left')\nframe2 = LabelFrame(group0)\nframe2.pack(side='right')\n\nb = Button(frame1, text='Hide', font=(\n 'Times New Roman', 12), command=Hide)\n# b.pack(expand='yes', fill='both')\nb.pack(side='left')\nspan = 2\n# b.grid(row=1, column=1)\n# b.place(x=30, y=0)\n\nb2 = Button(frame1, text='Rand', font=(\n 'Times New Roman', 12), command=Rand)\nb2.pack(padx=0, side='right')\n\nb3 = Button(frame2, text='Next', font=(\n 'Times New Roman', 12), command=Next)\nb3.pack(padx=0, side='left')\n\nb4 = Button(frame2, text='Prev', font=(\n 'Times New Roman', 12), command=Prev)\nb4.pack(side='right')\n\nb5 = Button(frame1, text='Last', font=(\n 'Times New Roman', 12), command=Last)\nb5.pack(side='right')\n\n\ne = Entry(win2, width=10, textvariable=var, bd=2, font=('Times New Roman', 20))\ne.pack(expand='yes', fill='both')\n\nt1 = Text(win2, width=40, font=('Times New Roman', 16))\nt1.pack(side='left')\nt1.bind('', Prev)\nt1.bind('', Next)\nt1.bind('', Rand)\nt1.bind('', Hide)\nt1.bind('', Last)\nt1.bind('', Copy2)\ne.bind('', Find)\n\ngroup = LabelFrame(win2, text='难度')\ngroup.pack(side='top', expand='yes', fill='both')\n\nDIFF = [('全部', 0), ('简单', 1), ('可记', 2), ('难记', 3), ('要记', 4)]\nv = IntVar()\nv.set(0)\nfor texts, nums in DIFF:\n r1 = Radiobutton(group, text=texts, variable=v, value=nums)\n r1.pack()\n\ngroup2 = LabelFrame(win2, text='排序')\ngroup2.pack(side='bottom', expand='yes', fill='both')\n\nDIFF2 = [('顺序', 0), ('逆序', 1)]\nv2 = IntVar()\nv2.set(0)\nfor texts, nums in DIFF2:\n r2 = Radiobutton(group2, text=texts, variable=v2, value=nums)\n r2.pack()\n\n\nwin.mainloop()\n","sub_path":"exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"492725564","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.test import TestCase, override_settings, modify_settings\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom wagtailcache.settings import wagtailcache_settings\nfrom wagtailcache.cache import CacheControl, Status, clear_cache\nfrom wagtail.core import hooks\nfrom wagtail.core.models import PageViewRestriction\n\nfrom home.models import (\n CachedPage,\n CacheControlPage,\n CallableCacheControlPage,\n WagtailPage,\n)\n\n\ndef hook_true(obj, is_cacheable: bool) -> bool:\n return True\n\n\ndef hook_false(obj, is_cacheable: bool) -> bool:\n return False\n\n\ndef hook_any(obj, is_cacheable: bool):\n return obj\n\n\nclass WagtailCacheTest(TestCase):\n @classmethod\n def get_content_type(cls, modelname: str):\n ctype, _ = ContentType.objects.get_or_create(\n model=modelname, app_label=\"home\"\n )\n return ctype\n\n @classmethod\n def setUpClass(cls):\n cls.header_name = wagtailcache_settings.WAGTAIL_CACHE_HEADER\n # Create an admin user.\n cls.user = User.objects.create(\n username=\"admin\",\n is_superuser=True,\n )\n # Create some pages.\n cls.page_cachedpage = CachedPage(\n title=\"CachedPage\",\n slug=\"cachedpage\",\n content_type=cls.get_content_type(\"cachedpage\"),\n )\n cls.page_cachedpage_restricted = CachedPage(\n title=\"CachedPage\",\n slug=\"cachedpage-restricted\",\n content_type=cls.get_content_type(\"cachedpage\"),\n )\n cls.page_cachecontrolpage = CacheControlPage(\n title=\"CacheControlPage\",\n slug=\"cachecontrolpage\",\n content_type=cls.get_content_type(\"cachecontrolpage\"),\n )\n cls.page_callablecachecontrolpage = CallableCacheControlPage(\n title=\"CachedPage\",\n slug=\"callablecachecontrolpage\",\n content_type=cls.get_content_type(\"callablecachecontrolpage\"),\n )\n cls.page_wagtailpage = WagtailPage.objects.get(slug=\"home\")\n cls.page_wagtailpage.add_child(instance=cls.page_cachedpage)\n cls.page_wagtailpage.add_child(instance=cls.page_cachedpage_restricted)\n cls.page_wagtailpage.add_child(instance=cls.page_cachecontrolpage)\n cls.page_wagtailpage.add_child(\n instance=cls.page_callablecachecontrolpage\n )\n\n # Create the view restriction.\n cls.view_restriction = PageViewRestriction.objects.create(\n page=cls.page_cachedpage_restricted,\n restriction_type=PageViewRestriction.PASSWORD,\n password=\"the cybers\",\n )\n\n # List of pages to test.\n cls.should_cache_pages = [\n cls.page_wagtailpage,\n cls.page_cachedpage,\n ]\n cls.skip_cache_pages = [\n cls.page_cachedpage_restricted,\n cls.page_cachecontrolpage,\n cls.page_callablecachecontrolpage,\n ]\n\n @classmethod\n def tearDownClass(cls):\n # Delete view restriction.\n cls.view_restriction.delete()\n # Delete pages.\n cls.page_cachedpage.delete()\n cls.page_cachecontrolpage.delete()\n cls.page_callablecachecontrolpage.delete()\n # Delete user.\n cls.user.delete()\n\n def tearDown(self):\n # Clear the cache and log out between each test.\n clear_cache()\n self.client.logout()\n # Delete any hooks.\n try:\n del hooks._hooks[\"is_request_cacheable\"]\n except KeyError:\n pass\n try:\n del hooks._hooks[\"is_response_cacheable\"]\n except KeyError:\n pass\n\n # --- UTILITIES ------------------------------------------------------------\n\n def get_hit(self, url: str):\n \"\"\"\n Gets a page and tests that it was served from the cache.\n \"\"\"\n # HEAD\n response = self.client.head(url)\n self.assertEqual(response.get(self.header_name, None), Status.HIT.value)\n # GET\n response = self.client.get(url)\n self.assertEqual(response.get(self.header_name, None), Status.HIT.value)\n return response\n\n def get_miss(self, url: str):\n \"\"\"\n Gets a page and tests that it was not served from the cache.\n \"\"\"\n # HEAD\n response = self.client.head(url)\n self.assertEqual(\n response.get(self.header_name, None), Status.MISS.value\n )\n # GET\n response = self.client.get(url)\n self.assertEqual(\n response.get(self.header_name, None), Status.MISS.value\n )\n return response\n\n def get_skip(self, url: str):\n \"\"\"\n Gets a page and tests that it was intentionally not served from\n the cache.\n \"\"\"\n # HEAD\n response = self.client.head(url)\n self.assertEqual(\n response.get(self.header_name, None), Status.SKIP.value\n )\n self.assertTrue(\n CacheControl.NOCACHE.value in response.get(\"Cache-Control\", \"\")\n or CacheControl.PRIVATE.value in response.get(\"Cache-Control\", \"\")\n )\n # GET\n response = self.client.get(url)\n self.assertEqual(\n response.get(self.header_name, None), Status.SKIP.value\n )\n self.assertTrue(\n CacheControl.NOCACHE.value in response.get(\"Cache-Control\", \"\")\n or CacheControl.PRIVATE.value in response.get(\"Cache-Control\", \"\")\n )\n return response\n\n # ---- TEST PAGES ----------------------------------------------------------\n\n def test_page_miss(self):\n for page in self.should_cache_pages:\n self.get_miss(page.get_url())\n\n def test_page_hit(self):\n for page in self.should_cache_pages:\n # First get should miss cache.\n self.get_miss(page.get_url())\n # Second get should hit cache.\n self.get_hit(page.get_url())\n\n def test_page_skip(self):\n for page in self.skip_cache_pages:\n # First get should skip cache.\n self.get_skip(page.get_url())\n # Second get should continue to skip.\n self.get_skip(page.get_url())\n\n def test_page_restricted(self):\n auth_url = \"/_util/authenticate_with_password/%d/%d/\" % (\n self.view_restriction.id,\n self.page_cachedpage_restricted.id,\n )\n response = self.client.post(\n auth_url,\n {\n \"password\": \"the cybers\",\n \"return_url\": self.page_cachedpage_restricted.get_url(),\n },\n )\n self.assertRedirects(\n response, self.page_cachedpage_restricted.get_url()\n )\n # First get should skip cache, and also be set to private.\n response = self.get_skip(self.page_cachedpage_restricted.get_url())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Cache-Control\", None), CacheControl.PRIVATE.value\n )\n # Second get should continue to skip and also be set to private.\n response = self.get_skip(self.page_cachedpage_restricted.get_url())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Cache-Control\", None), CacheControl.PRIVATE.value\n )\n\n def test_page_404(self):\n # 404s should also be cached.\n self.get_miss(\"/gimme-a-404/\")\n self.get_hit(\"/gimme-a-404/\")\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"django.contrib.auth.middleware.AuthenticationMiddleware\", # noqa\n }\n )\n def test_page_miss_without_auth(self):\n self.test_page_miss()\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"django.contrib.auth.middleware.AuthenticationMiddleware\", # noqa\n }\n )\n def test_page_hit_without_auth(self):\n self.test_page_hit()\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"django.contrib.auth.middleware.AuthenticationMiddleware\", # noqa\n }\n )\n def test_page_skip_without_auth(self):\n self.test_page_skip()\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"django.contrib.auth.middleware.AuthenticationMiddleware\", # noqa\n }\n )\n def test_page_restricted_without_auth(self):\n self.test_page_restricted()\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"django.contrib.auth.middleware.AuthenticationMiddleware\", # noqa\n }\n )\n def test_page_404_without_auth(self):\n self.test_page_404()\n\n # ---- TEST VIEWS ----------------------------------------------------------\n # Views use the decorators and should work without the middleware.\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"wagtailcache.cache.UpdateCacheMiddleware\", # noqa\n \"remove\": \"wagtailcache.cache.FetchFromCacheMiddleware\", # noqa\n }\n )\n def test_view_miss(self):\n # First get should miss cache.\n self.get_miss(reverse(\"cached_view\"))\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"wagtailcache.cache.UpdateCacheMiddleware\", # noqa\n \"remove\": \"wagtailcache.cache.FetchFromCacheMiddleware\", # noqa\n }\n )\n def test_view_hit(self):\n # First get should miss cache.\n self.get_miss(reverse(\"cached_view\"))\n # Second get should hit cache.\n self.get_hit(reverse(\"cached_view\"))\n\n @modify_settings(\n MIDDLEWARE={\n \"remove\": \"wagtailcache.cache.UpdateCacheMiddleware\", # noqa\n \"remove\": \"wagtailcache.cache.FetchFromCacheMiddleware\", # noqa\n }\n )\n def test_view_skip(self):\n # First get should skip cache.\n self.get_skip(reverse(\"nocached_view\"))\n # Second get should continue to skip.\n self.get_skip(reverse(\"nocached_view\"))\n\n # ---- ADMIN VIEWS ---------------------------------------------------------\n\n def test_admin(self):\n self.client.force_login(self.user)\n response = self.client.get(reverse(\"wagtailcache:index\"))\n self.client.logout()\n self.assertEqual(response.status_code, 200)\n\n def test_admin_clearcache(self):\n # First get should miss cache.\n self.get_miss(self.page_cachedpage.get_url())\n # Second get should hit cache.\n self.get_hit(self.page_cachedpage.get_url())\n # Now log in as admin and clear the cache.\n self.client.force_login(self.user)\n response = self.client.get(reverse(\"wagtailcache:clearcache\"))\n self.client.logout()\n self.assertEqual(response.status_code, 200)\n # Now the page should miss cache.\n self.get_miss(self.page_cachedpage.get_url())\n\n # ---- ALTERNATE SETTINGS --------------------------------------------------\n\n @override_settings(WAGTAIL_CACHE=True)\n def test_enable_wagtailcache(self):\n # Intentionally enable wagtail-cache, make sure it works.\n response = self.client.get(self.page_cachedpage.get_url())\n self.assertIsNotNone(response.get(self.header_name, None))\n\n @override_settings(WAGTAIL_CACHE=False)\n def test_disable_wagtailcache(self):\n # Intentionally disable wagtail-cache, make sure it is inactive.\n response = self.client.get(self.page_cachedpage.get_url())\n self.assertIsNone(response.get(self.header_name, None))\n\n @override_settings(WAGTAIL_CACHE_BACKEND=\"zero\")\n def test_zero_timeout(self):\n # Wagtail-cache should ignore the page when a timeout is zero.\n response = self.client.get(self.page_cachedpage.get_url())\n self.assertIsNone(response.get(self.header_name, None))\n # Second should also not cache.\n response = self.client.get(self.page_cachedpage.get_url())\n self.assertIsNone(response.get(self.header_name, None))\n # Load admin panel to render the zero timeout.\n self.test_admin()\n\n # ---- HOOKS ---------------------------------------------------------------\n\n def test_request_hook_true(self):\n # A POST should never be cached.\n response = self.client.post(reverse(\"cached_view\"))\n self.assertEqual(\n response.get(self.header_name, None), Status.SKIP.value\n )\n response = self.client.post(reverse(\"cached_view\"))\n self.assertEqual(\n response.get(self.header_name, None), Status.SKIP.value\n )\n\n # Register hook and assert it was actually registered.\n hooks.register(\"is_request_cacheable\", hook_true)\n hook_fns = hooks.get_hooks(\"is_request_cacheable\")\n self.assertEqual(hook_fns, [hook_true])\n\n # Setting `is_request_cacheale=True` does not really do much, because\n # the response still has the final say in whether or not the response is\n # cached. The no-cache page will still not be cached due to the\n # response. However a simple POST request will now be checked against\n # the cache, although once again, it will probably not get cached due to\n # the response.\n response = self.client.post(reverse(\"cached_view\"))\n self.assertEqual(\n response.get(self.header_name, None), Status.MISS.value\n )\n response = self.client.post(reverse(\"cached_view\"))\n self.assertEqual(\n response.get(self.header_name, None), Status.MISS.value\n )\n\n def test_request_hook_false(self):\n # Register hook and assert it was actually registered.\n hooks.register(\"is_request_cacheable\", hook_false)\n hook_fns = hooks.get_hooks(\"is_request_cacheable\")\n self.assertEqual(hook_fns, [hook_false])\n # The cached page should be force skipped due to the hook returning\n # false.\n self.get_skip(self.page_cachedpage.get_url())\n self.get_skip(self.page_cachedpage.get_url())\n\n def test_request_hook_any(self):\n # Register hook and assert it was actually registered.\n hooks.register(\"is_request_cacheable\", hook_any)\n hook_fns = hooks.get_hooks(\"is_request_cacheable\")\n self.assertEqual(hook_fns, [hook_any])\n # The page should be cached normally due to hook returning garbage.\n self.test_page_hit()\n\n def test_response_hook_true(self):\n # Register hook and assert it was actually registered.\n hooks.register(\"is_response_cacheable\", hook_true)\n hook_fns = hooks.get_hooks(\"is_response_cacheable\")\n self.assertEqual(hook_fns, [hook_true])\n # The no-cache page should be force cached due to the hook returning\n # true.\n self.get_miss(self.page_cachecontrolpage.get_url())\n self.get_hit(self.page_cachecontrolpage.get_url())\n\n def test_response_hook_false(self):\n # Register hook and assert it was actually registered.\n hooks.register(\"is_response_cacheable\", hook_false)\n hook_fns = hooks.get_hooks(\"is_response_cacheable\")\n self.assertEqual(hook_fns, [hook_false])\n # The cached page should be force skipped due to the hook returning\n # false.\n self.get_skip(self.page_cachedpage.get_url())\n self.get_skip(self.page_cachedpage.get_url())\n\n def test_response_hook_any(self):\n # Register hook and assert it was actually registered.\n hooks.register(\"is_response_cacheable\", hook_any)\n hook_fns = hooks.get_hooks(\"is_response_cacheable\")\n self.assertEqual(hook_fns, [hook_any])\n # The page should be cached normally due to hook returning garbage.\n self.test_page_hit()\n","sub_path":"testproject/home/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":15654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"62872630","text":"import os\n\n\npath_dir= os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n\ntitle1 = ['基础数据','列车信息']\n\ntitle2 = ['基础数据','故障单信息']\n\ntitle3 = ['基础数据','维修级别信息维护']\n\ntitle4 = ['基础数据','逻辑决断图节点信息维护']\n\ntitle5 = ['基础数据','逻辑决断图后果维护']\n\ntitle6 = ['基础数据','逻辑决断图维护']\n\ntitle7 = ['基础数据','自定义车组']\n\ntitle8 = ['基础数据','自定义编码生成器']\n\ntitle9 = ['RCM分析及计算','寿命特性分析']\n\ntitle10 = ['RCM分析及计算','RCM计算']\n\ntitle11 = ['RCM分析及计算','RCM分析']\n\ntitle12 = ['修程修制','车辆维修信息']","sub_path":"scheme_selenium/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"445596450","text":"__author__ = 'yuwenhao'\n\nfrom rllab.uposi.policy_split_rl_evaluation import *\nimport joblib\nimport numpy as np\n\n\ndef get_flat_gradient(algo, samples_data):\n all_input_values = tuple(ext.extract(\n samples_data,\n \"observations\", \"actions\", \"advantages\"\n ))\n agent_infos = samples_data[\"agent_infos\"]\n state_info_list = [agent_infos[k] for k in algo.policy.state_info_keys]\n dist_info_list = [agent_infos[k] for k in algo.policy.distribution.dist_info_keys]\n all_input_values += tuple(state_info_list) + tuple(dist_info_list)\n\n grad = sliced_fun(algo.optimizer._opt_fun[\"f_grad\"], 1)(\n tuple(all_input_values), tuple())\n\n return grad\n\nif __name__ == '__main__':\n num_parallel = 4\n\n directory = 'data/trained/gradient_temp/rl_split_reacher_3modelsexp1_alivepenalty_tasksplit_taskinput_6432net_sd1_vanbaseline_splitstd_accumgrad_40k_0_1_unweighted_accumulate_gradient/'\n policy = joblib.load(directory + '/final_policy_0.0.pkl')\n test_trajs = 50\n pathlength = 1000\n\n env_name = \"DartReacher3d-v1\"\n\n env = normalize(GymEnv(env_name, record_log=False, record_video=False))\n\n baseline = LinearFeatureBaseline(env_spec=env.spec, additional_dim=0)\n\n algo = TRPO( # _MultiTask(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=40000,\n max_path_length=pathlength,\n n_itr=5,\n\n discount=0.995,\n step_size=0.02,\n gae_lambda=0.97,\n whole_paths=False,\n )\n algo.init_opt()\n\n from rllab.sampler import parallel_sampler\n\n parallel_sampler.initialize(n_parallel=num_parallel)\n parallel_sampler.set_seed(0)\n\n algo.start_worker()\n\n paths = algo.sampler.obtain_samples(0)\n algo.sampler.process_samples(0, paths)\n samples_data_ori = algo.sampler.process_samples(0, paths)\n\n print('Original return', dict(logger._tabular)['AverageReturn'])\n\n sample_grads = []\n sample_grad_van = []\n for i in range(100):\n samples_data = {}\n indices = np.arange(len(samples_data_ori['observations']))\n np.random.shuffle(indices)\n samples_data[\"observations\"] = samples_data_ori[\"observations\"][indices[0:35000]]\n samples_data[\"actions\"] = samples_data_ori[\"actions\"][indices[0:35000]]\n samples_data[\"rewards\"] = samples_data_ori[\"rewards\"][indices[0:35000]]\n samples_data[\"advantages\"] = samples_data_ori[\"advantages\"][indices[0:35000]]\n samples_data[\"agent_infos\"] = {}\n samples_data[\"agent_infos\"][\"log_std\"] = samples_data_ori[\"agent_infos\"][\"log_std\"][\n indices[0:35000]]\n samples_data[\"agent_infos\"][\"mean\"] = samples_data_ori[\"agent_infos\"][\"mean\"][\n indices[0:35000]]\n grad = get_flat_gradient(algo, samples_data)\n sample_grads.append(grad)\n sample_grad_van.append(get_gradient(algo, samples_data, False))\n\n grad_variance = np.var(sample_grads, axis=0)\n\n sorted_grad_var = np.copy(grad_variance)\n sorted_grad_var.sort()\n\n mat_grads = []\n for k in range(len(sample_grad_van[0])):\n one_grad = []\n for i in range(len(sample_grad_van)):\n one_grad.append(sample_grad_van[i][k])\n mat_grads.append(np.var(one_grad, axis=0))\n\n for j in range(len(mat_grads)):\n plt.figure()\n plt.title(policy.get_params()[j].name)\n if len(mat_grads[j].shape) == 2:\n plt.imshow(mat_grads[j])\n plt.colorbar()\n elif len(mat_grads[j].shape) == 1:\n plt.plot(mat_grads[j])\n plt.savefig(directory + policy.get_params()[\n j].name + '_grad_variance.png')\n\n max_var = np.max(grad_variance)\n\n perturbed_performances = []\n old_params = np.copy(policy.get_param_values())\n for i in range(20):\n start = sorted_grad_var[int(0.05 * i * len(grad_variance))]\n end = sorted_grad_var[int(0.05 * (i+1) * len(grad_variance))-1]\n new_params = np.copy(old_params)\n num = 0\n for j in range(len(new_params)):\n if grad_variance[j] >= start and grad_variance[j] < end:\n new_params[j] += np.random.normal(0, np.sqrt(max_var))\n num += 1\n policy.set_param_values(new_params)\n print('NUM: ', num, max_var)\n paths = algo.sampler.obtain_samples(0)\n algo.sampler.process_samples(0, paths, False)\n perturbed_performances.append(dict(logger._tabular)['AverageReturn'])\n\n plt.figure()\n plt.plot(perturbed_performances)\n plt.savefig(directory+'/perturbed_performances.png')\n\n algo.shutdown_worker()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"uposi_script/rl_sensitivity_test.py","file_name":"rl_sensitivity_test.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"318826912","text":"from flask import request\nfrom flask_restful import Resource\nfrom database import db\n\n\nclass Location(db.Model):\n __tablename__ = 'location'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return ''.format(name=self.name)\n\n\nclass LocationList(Resource):\n def get(self):\n locations = Location.query.all()\n location_info_list = []\n for location in locations:\n location_info = dict()\n\n location_info['id'] = location.id\n location_info['name'] = location.name\n\n location_info_list.append(location_info)\n\n return {'locations': location_info_list}\n\n def post(self):\n location = Location(\n name=request.form.get('name')\n )\n db.session.add(location)\n db.session.commit()\n return {\n \"id\": location.id,\n }\n","sub_path":"model/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"231022735","text":"# MIT License\n#\n# Copyright (c) 2020-2021 CNRS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nfrom __future__ import annotations\n\nimport multiprocessing\nimport sys\nimport warnings\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import List, Optional, Text\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset, IterableDataset\nfrom torch.utils.data._utils.collate import default_collate\nfrom torch_audiomentations.core.transforms_interface import BaseWaveformTransform\n\nfrom pyannote.audio.utils.protocol import check_protocol\nfrom pyannote.database import Protocol\n\n\n# Type of machine learning problem\nclass Problem(Enum):\n BINARY_CLASSIFICATION = 0\n MONO_LABEL_CLASSIFICATION = 1\n MULTI_LABEL_CLASSIFICATION = 2\n REPRESENTATION = 3\n REGRESSION = 4\n # any other we could think of?\n\n\n# A task takes an audio chunk as input and returns\n# either a temporal sequence of predictions\n# or just one prediction for the whole audio chunk\nclass Resolution(Enum):\n FRAME = 1 # model outputs a sequence of frames\n CHUNK = 2 # model outputs just one vector for the whole chunk\n\n\n@dataclass\nclass Specifications:\n problem: Problem\n resolution: Resolution\n\n # chunk duration in seconds.\n # use None for variable-length chunks\n duration: Optional[float] = None\n\n # (for classification tasks only) list of classes\n classes: Optional[List[Text]] = None\n\n # whether classes are permutation-invariant (e.g. diarization)\n permutation_invariant: bool = False\n\n def __len__(self):\n # makes it possible to do something like:\n # multi_task = len(specifications) > 1\n # because multi-task specifications are stored as {task_name: specifications} dict\n return 1\n\n def __getitem__(self, key):\n if key is not None:\n raise KeyError\n return self\n\n def items(self):\n yield None, self\n\n def keys(self):\n yield None\n\n def __iter__(self):\n yield None\n\n\nclass TrainDataset(IterableDataset):\n def __init__(self, task: Task):\n super().__init__()\n self.task = task\n\n def __iter__(self):\n return self.task.train__iter__()\n\n def __len__(self):\n return self.task.train__len__()\n\n\nclass ValDataset(Dataset):\n def __init__(self, task: Task):\n super().__init__()\n self.task = task\n\n def __getitem__(self, idx):\n return self.task.val__getitem__(idx)\n\n def __len__(self):\n return self.task.val__len__()\n\n\nclass Task(pl.LightningDataModule):\n \"\"\"Base task class\n\n A task is the combination of a \"problem\" and a \"dataset\".\n For example, here are a few tasks:\n - voice activity detection on the AMI corpus\n - speaker embedding on the VoxCeleb corpus\n - end-to-end speaker diarization on the VoxConverse corpus\n\n A task is expected to be solved by a \"model\" that takes an\n audio chunk as input and returns the solution. Hence, the\n task is in charge of generating (input, expected_output)\n samples used for training the model.\n\n Parameters\n ----------\n protocol : Protocol\n pyannote.database protocol\n duration : float, optional\n Chunks duration in seconds. Defaults to two seconds (2.).\n min_duration : float, optional\n Sample training chunks duration uniformely between `min_duration`\n and `duration`. Defaults to `duration` (i.e. fixed length chunks).\n batch_size : int, optional\n Number of training samples per batch. Defaults to 32.\n num_workers : int, optional\n Number of workers used for generating training samples.\n Defaults to multiprocessing.cpu_count() // 2.\n pin_memory : bool, optional\n If True, data loaders will copy tensors into CUDA pinned\n memory before returning them. See pytorch documentation\n for more details. Defaults to False.\n augmentation : BaseWaveformTransform, optional\n torch_audiomentations waveform transform, used by dataloader\n during training.\n\n Attributes\n ----------\n specifications : Specifications or dict of Specifications\n Task specifications (available after `Task.setup` has been called.)\n For multi-task learning, this should be a dictionary where keys are\n task names and values are corresponding Specifications instances.\n \"\"\"\n\n def __init__(\n self,\n protocol: Protocol,\n duration: float = 2.0,\n min_duration: float = None,\n batch_size: int = 32,\n num_workers: int = None,\n pin_memory: bool = False,\n augmentation: BaseWaveformTransform = None,\n ):\n super().__init__()\n\n # dataset\n self.protocol = check_protocol(protocol)\n\n # batching\n self.duration = duration\n self.min_duration = duration if min_duration is None else min_duration\n self.batch_size = batch_size\n\n # multi-processing\n if num_workers is None:\n num_workers = multiprocessing.cpu_count() // 2\n\n if (\n num_workers > 0\n and sys.platform == \"darwin\"\n and sys.version_info[0] >= 3\n and sys.version_info[1] >= 8\n ):\n warnings.warn(\n \"num_workers > 0 is not supported with macOS and Python 3.8+: \"\n \"setting num_workers = 0.\"\n )\n num_workers = 0\n\n self.num_workers = num_workers\n self.pin_memory = pin_memory\n self.augmentation = augmentation\n\n def prepare_data(self):\n \"\"\"Use this to download and prepare data\n\n This is where we might end up downloading datasets\n and transform them so that they are ready to be used\n with pyannote.database. but for now, the API assume\n that we directly provide a pyannote.database.Protocol.\n\n Notes\n -----\n Called only once.\n \"\"\"\n pass\n\n def setup(self, stage=None):\n \"\"\"Called at the beginning of fit and test just before Model.setup()\n\n Parameters\n ----------\n stage : \"fit\" or \"test\"\n Whether model is being trained (\"fit\") or used for inference (\"test\").\n\n Notes\n -----\n This hook is called on every process when using DDP.\n\n If `specifications` attribute has not been set in `__init__`,\n `setup` is your last chance to set it.\n \"\"\"\n pass\n\n def setup_loss_func(self):\n pass\n\n def setup_validation_metric(self):\n pass\n\n @property\n def is_multi_task(self) -> bool:\n \"\"\"\"Check whether multiple tasks are addressed at once\"\"\"\n return len(self.specifications) > 1\n\n def train__iter__(self):\n # will become train_dataset.__iter__ method\n msg = f\"Missing '{self.__class__.__name__}.train__iter__' method.\"\n raise NotImplementedError(msg)\n\n def train__len__(self):\n # will become train_dataset.__len__ method\n msg = f\"Missing '{self.__class__.__name__}.train__len__' method.\"\n raise NotImplementedError(msg)\n\n def collate_fn(self, batch):\n collated_batch = default_collate(batch)\n if self.augmentation is not None:\n collated_batch[\"X\"] = self.augmentation(\n collated_batch[\"X\"], sample_rate=self.model.hparams.sample_rate\n )\n return collated_batch\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n TrainDataset(self),\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=True,\n collate_fn=self.collate_fn,\n )\n\n def default_loss(self, specifications: Specifications, y, y_pred) -> torch.Tensor:\n \"\"\"Guess and compute default loss according to task specification\"\"\"\n\n if specifications.problem == Problem.BINARY_CLASSIFICATION:\n loss = F.binary_cross_entropy(y_pred.squeeze(dim=-1), y.float())\n\n elif specifications.problem == Problem.MONO_LABEL_CLASSIFICATION:\n loss = F.nll_loss(y_pred.view(-1, len(specifications.classes)), y.view(-1))\n\n elif specifications.problem == Problem.MULTI_LABEL_CLASSIFICATION:\n loss = F.binary_cross_entropy(y_pred, y.float())\n\n else:\n msg = \"TODO: implement for other types of problems\"\n raise NotImplementedError(msg)\n\n return loss\n\n # default training_step provided for convenience\n # can obviously be overriden for each task\n def training_step(self, batch, batch_idx: int):\n \"\"\"Default training_step according to task specification\n\n * binary cross-entropy loss for binary or multi-label classification\n * negative log-likelihood loss for regular classification\n\n In case of multi-tasking, it will default to summing loss of each task.\n\n Parameters\n ----------\n batch : (usually) dict of torch.Tensor\n Current batch.\n batch_idx: int\n Batch index.\n\n Returns\n -------\n loss : {str: torch.tensor}\n {\"loss\": loss} with additional \"loss_{task_name}\" keys for multi-task models.\n \"\"\"\n\n X, y = batch[\"X\"], batch[\"y\"]\n y_pred = self.model(X)\n\n if self.is_multi_task:\n loss = dict()\n for task_name, specifications in self.specifications.items():\n loss[task_name] = self.default_loss(\n specifications, y[task_name], y_pred[task_name]\n )\n self.model.log(\n f\"{task_name}@train_loss\",\n loss[task_name],\n on_step=True,\n on_epoch=True,\n prog_bar=False,\n logger=False,\n )\n\n loss[\"loss\"] = sum(loss.values())\n self.model.log(\n f\"{self.ACRONYM}@train_loss\",\n loss[\"loss\"],\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n )\n return loss\n\n loss = self.default_loss(self.specifications, y, y_pred)\n self.model.log(\n f\"{self.ACRONYM}@train_loss\",\n loss,\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n )\n return {\"loss\": loss}\n\n def val__getitem__(self, idx):\n # will become val_dataset.__getitem__ method\n msg = f\"Missing '{self.__class__.__name__}.val__getitem__' method.\"\n raise NotImplementedError(msg)\n\n def val__len__(self):\n # will become val_dataset.__len__ method\n msg = f\"Missing '{self.__class__.__name__}.val__len__' method.\"\n raise NotImplementedError(msg)\n\n def val_dataloader(self) -> Optional[DataLoader]:\n return DataLoader(\n ValDataset(self),\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=False,\n )\n\n # default validation_step provided for convenience\n # can obviously be overriden for each task\n def validation_step(self, batch, batch_idx: int):\n \"\"\"Guess default validation_step according to task specification\n\n * binary cross-entropy loss for binary or multi-label classification\n * negative log-likelihood loss for regular classification\n\n In case of multi-tasking, it will default to summing loss of each task.\n\n Parameters\n ----------\n batch : (usually) dict of torch.Tensor\n Current batch.\n batch_idx: int\n Batch index.\n\n Returns\n -------\n loss : {str: torch.tensor}\n {\"loss\": loss} with additional \"{task_name}\" keys for multi-task models.\n \"\"\"\n\n X, y = batch[\"X\"], batch[\"y\"]\n y_pred = self.model(X)\n\n if self.is_multi_task:\n loss = dict()\n for task_name, specifications in self.specifications.items():\n loss[task_name] = self.default_loss(\n specifications, y[task_name], y_pred[task_name]\n )\n self.model.log(f\"{task_name}@val_loss\", loss[task_name])\n\n loss[\"loss\"] = sum(loss.values())\n self.model.log(\n f\"{self.ACRONYM}@val_loss\",\n loss[\"loss\"],\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n )\n return loss\n\n loss = self.default_loss(self.specifications, y, y_pred)\n self.model.log(\n f\"{self.ACRONYM}@val_loss\",\n loss,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n )\n return {\"loss\": loss}\n\n def validation_epoch_end(self, outputs):\n pass\n\n @property\n def val_monitor(self):\n \"\"\"Quantity (and direction) to monitor\n\n Useful for model checkpointing or early stopping.\n\n Returns\n -------\n monitor : str\n Name of quantity to monitor.\n mode : {'min', 'max}\n Minimize\n\n See also\n --------\n pytorch_lightning.callbacks.ModelCheckpoint\n pytorch_lightning.callbacks.EarlyStopping\n \"\"\"\n\n return f\"{self.ACRONYM}@val_loss\", \"min\"\n","sub_path":"pyannote/audio/core/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":14454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"555828656","text":"from tourapi.list import TourAPI\nfrom tourapi.config import ServiceKey, MobileOS, MobileApp, Languages\nfrom mysql_config import MysqlHost, MysqlUser, MysqlPass, MysqlDB\nimport pymysql\nimport json\nimport re\n\n\ndef cat2url(name_eng):\n ''' 카테고리 영어 이름으로 고유 URL 만들기\n '''\n url = name_eng.lower()\n url = re.sub('/^\\s+|\\s+$/g', '', url) # 앞뒤 공백 제거\n url = url.replace('-', '_').replace(' (', '-').replace('/', '-').replace(' & ', '-').replace(')', '').replace(' ', '_')\n url = re.sub('/\\s/g', \"-\", url) # 남은 공백을 '-'로 치환 ( 작동 안함) \n return url\n\nconn = pymysql.connect(host = MysqlHost, user = MysqlUser, password = MysqlPass, db = MysqlDB)\ncurs = conn.cursor(pymysql.cursors.DictCursor)\n\n### 없는 영어 카테고리나 중복 카테고리 조정\n\ncat_dict = [\n {'name_kor': '추천코스', 'name_eng': 'Recommended Course'},\n {'name_kor': '가족코스', 'name_eng': 'Family course'},\n {'name_kor': '나홀로코스', 'name_eng': 'Alone course'},\n {'name_kor': '힐링코스', 'name_eng': 'Healing course'},\n {'name_kor': '도보코스', 'name_eng': 'On foot course'},\n {'name_kor': '캠핑코스', 'name_eng': 'Camping course'},\n {'name_kor': '맛코스', 'name_eng': 'Taste course'},\n {'name_kor': '육상레포츠', 'name_eng': 'Land Leports'},\n {'name_kor': '수상레포츠', 'name_eng': 'Water Leports'},\n {'name_kor': '항공레포츠', 'name_eng': 'Sky Leports'},\n {'name_kor': '기타행사', 'name_eng': 'Other Events'},\n]\n\nfor cat in cat_dict:\n curs.execute(\"UPDATE category_code SET name_eng=%s WHERE name_kor=%s\",\n (cat['name_eng'], cat['name_kor']) )\nconn.commit()\n\n### 전체 카테고리를 읽어서 처리\n\ncurs.execute(\"SELECT * FROM category_code\")\nconn.commit()\n\nrows = curs.fetchall()\n\nfor row in rows:\n if type(row['name_eng']) == str:\n url = cat2url(row['name_eng'])\n else:\n url = ''\n\n curs.execute(\"UPDATE category_code SET url=%s WHERE code=%s\",\n (url, row['code']))\n print(row['name_eng'], '=>', url)\n\nconn.commit()\nconn.close()","sub_path":"cat2url.py","file_name":"cat2url.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"426898900","text":"import sys\nimport numpy as np\nimport cv2\nimport sqlite3\n\n\n\nsys.path.append('/usr/local/lib/python2.7/site-packages')\n\n\n\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\ncap = cv2.VideoCapture(0)\n\n\n\ndef insertOrUpdate(Id,Name):\n\n\n conn=sqlite3.connect(\"FaceData.db\")\n\n cursor = conn.cursor()\n # cmd= INSERT INTO StudentsFaces(ID,Name)Values((Id),(Name))\n cmd = \"SELECT * FROM StudentsData WHERE ID = \" + Id\n cursor = conn.execute(cmd)\n isRecordExist = 0\n for row in cursor: \n isRecordExist = 1\n if isRecordExist == 1:\n conn.execute(\"UPDATE StudentsData SET Name = ? WHERE ID = ?\",(Name,Id))\n else:\n conn.execute(\"INSERT INTO StudentsData(ID,Name)Values(?,?)\",(Id,Name))\n conn.commit()\n conn.close()\n\n\n\n\nId = input(\"enter users id :\")\nName = input(\"enter the name:\")\n\n\n\nsampleNum = 0\n\ninsertOrUpdate(Id,Name)\n\nwhile True:\n # Read the frame\n ret, img = cap.read()\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray,1.3,5)\n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n sampleNum=sampleNum+1\n\n cv2.imwrite(\"dataSet/Users.\"+str(Id)+\".\"+str(sampleNum)+\".jpg\",gray[y:y+h,x:x+w])\n cv2.rectangle(img, (x,y), (x+w, y+h),(255,0,0),2)\n cv2.putText(img,\"Face Detected\",(x,y+h+30),cv2.FONT_HERSHEY_SIMPLEX,1,255)\n cv2.waitKey(300)\n \n # Display\n cv2.imshow(\"Face\", img)\n # Stop if escape key is pressed\n cv2.waitKey(1)\n# Release the VideoCapture object\n if(sampleNum>20):\n cap.release()\n break\n","sub_path":"DatasetCreator1stVersion.py","file_name":"DatasetCreator1stVersion.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"502503432","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom wagtail.admin import urls as wagtailadmin_urls\nfrom wagtail.core import urls as wagtail_urls\nfrom wagtail.documents import urls as wagtaildocs_urls\nfrom django.urls import path\nfrom backend import views as backendviews\nfrom home import views as views\nfrom search import views as search_views\n\nurlpatterns = [\n url(r'^django-admin/', admin.site.urls),\n\n url(r'^admin/', include(wagtailadmin_urls)),\n url(r'^documents/', include(wagtaildocs_urls)),\n # url('/', views.home, name='home'),\n path('', views.home, name='home'),\n # path('contacts/', views.contactPage),\n # path('contact/', views.contact, name = 'contact'),\n # path('personaldetails/', views.Personal.as_view(), name = 'personal'),\n # path('ajax/contact', views.postContact, name ='contact_submit'),\n # url(r'home/', views.home, name='home'),\n url(r'^create$', views.create, name='create'),\n url(r'^createnextofkin$', views.createnextofkin, name='createnextofkin'),\n url(r'^createemploymentinfo$', views.createemploymentinfo, name='createemploymentinfo'),\n # url(r'^personaldetails/$', views.personaldetails, name='personaldetails'),\n url(r'^search/$', search_views.search, name='search'),\n url('loantype/', views.loantype, name='loantype'),\n url('loansummary/', views.loansummary, name='loansummary'),\n url('employmentinfo/', views.employmentinfo, name='employmentinfo'),\n url('paymentinfo/', views.paymentinfo, name='personalinfo'),\n url('acknowledgement/', views.acknowledgement, name='acknowledgement'),\n url('bvnerror/', views.bvnerror, name='bvnerror'),\n url('bvnaccepted/', views.bvnaccepted, name='bvnaccepted'),\n url('verify/', views.verifybvn, name='verifybvn'),\n url('nextofkin/', views.nextofkin, name='nextofkin'),\n url('otherdetails/', views.otherdetails, name='otherdetails'),\n url('summary/', views.summary, name='summary'),\n\n # Upload File \n url(r'^terms$', views.acknowledgement_form_upload, name='acknowledgement_form_upload'),\n\n #Frontend Reg\n #url(r'^create_user/$',(CreateView.as_view(model=BluecreditUser, get_success_url =lambda: reverse('pending'), form_class=UserCreationForm, template_name=\"register\")), name='register'),\n #User Dashboard\n url('creditcheck/', views.creditcheck, name='creditcheck'),\n url('loanhistory/', views.loanhistory, name='loanhistory'),\n url('repaymenthistory/', views.repaymenthistory, name='repaymenthistory'),\n url('repaymenthistory_doc/',views.repaymenthistory_doc,name='repaymenthistory_doc'),\n\n # Backend Urls \n #Auth\n # path('signup/', backendviews.signup, name='signup'),\n path('register/', backendviews.signup, name='register'),\n path('pending/', backendviews.pending, name='pending'),\n # url('logout/',backendviews.login,name='logout'),\n #dashboard\n path('bluecredit/', backendviews.index, name='Home'),\n path('results/', backendviews.results, name='results'),\n path('applicant/', backendviews.personal, name='applicant'),\n path('loandetails/', backendviews.loandetails, name='loandetails'),\n path('accounts/', include('django.contrib.auth.urls'))\n\n\n # url(r'^signup/$', core_views.signup, name='signup'),\n # url(r'^account_activation_sent/$', core_views.account_activation_sent, name='account_activation_sent'),\n # url(r'^activate/(?P[0-9A-Za-z_\\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n # core_views.activate, name='activate'),\n\n\n\n # For anything not caught by a more specific rule above, hand over to\n # Wagtail's page serving mechanism. This should be the last pattern in\n # the list:\n # url(r'', include(wagtail_urls)),\n\n # Alternatively, if you want Wagtail pages to be served from a subpath\n # of your site, rather than the site root:\n # url(r'^pages/', include(wagtail_urls)),\n]\n\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n # Serve static and media files from development server\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"bluehost/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"322042285","text":"# UMLS API requires getting TGT every 8 hours: https://documentation.uts.nlm.nih.gov/rest/authentication.html\n# Or just get every run (below)\nimport getpass\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport pandas as pd\nimport os\nfrom os import path\n\n\nheaders = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n# user = input(\"Please enter your username: \")\n# pw = getpass.getpass(\"Please enter your password: \")\nuser = 'jeffma'\npw = 'Mr13812283066'\n\nparams = {\"username\": user,\n \"password\" : pw}\n\nTGT_URL = \"https://utslogin.nlm.nih.gov/cas/v1/tickets\"\n\nresponse = (requests.post(TGT_URL, headers = headers, params = params)).text\nticketgetter = BeautifulSoup(response, 'lxml')\nTGT = ticketgetter.form['action']\n\n\n# icd-query # ICD10: Cauda equina syndrome; Brain Injuries Traumatic\ndef search_icd(icd):\n \"\"\"\n return dict {CUI,file_path, paired_results}\n\n \"\"\"\n\n # request info\n f_search = {'paired_results': {}, 'file_path': ''}\n headers_ST = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n params = {\"service\": \"http://umlsks.nlm.nih.gov\"}\n\n # search the query\n queryString = icd\n ST = requests.post(TGT, headers=headers_ST, params=params)\n URL = \"https://uts-ws.nlm.nih.gov/rest/search/current?string={}&ticket={}\".format(queryString, ST.text)\n response = requests.get(URL)\n j = json.loads(response.text)\n\n\n # parse cui & select the first cui\n\n # TODO we can keep searching the cui, until finding the most accurate one.\n try:\n mth_cui = []\n for i in j['result']['results']:\n if i['rootSource'] == 'MTH':\n mth_cui.append((i['ui'], i['name']))\n\n CUI = mth_cui[0][0]\n f_search['CUI'] = CUI\n\n ST = requests.post(TGT, headers=headers_ST, params=params)\n URL_cui = \"https://uts-ws.nlm.nih.gov/rest/content/current/CUI/{}/atoms?sabs=ICD10CM,MSH&ticket={}\".format(CUI,\n ST.text)\n response_2 = requests.get(URL_cui)\n r2 = json.loads(response_2.text)\n\n except:\n return None\n\n\n\n # find related files\n Mesh_terms = []\n icd_terms = []\n found_file = 'Sorry, we can not find the local files'\n\n for atom in r2['result']:\n if atom['rootSource'] == 'MSH':\n for i in r2['result']:\n if i['rootSource'] == 'ICD10CM':\n Mesh_terms.append(atom[\"name\"])\n icd_terms.append(i[\"name\"])\n\n\n # Print all the matched pairs\n paired_results = list(zip(Mesh_terms, icd_terms))\n # print(f'Here we found {len(paired_results)} paired results')\n for i, v in enumerate(paired_results):\n # print(f'Matched: ICD10CM({v[1]}) ---> MeSH({v[0]})')\n f_search['paired_results'][i] = 'ICD10: ' + v[1] + ' with MeSH :' + v[0]\n # f_search['paired_results'][i] = f'Matched: ICD10CM({v[1]}) with MeSH({v[0]})'\n\n # return the files for the required Mesh terms\n for item in paired_results:\n mesh_path = ' '.join(item[0].split(',')) + '.csv'\n try:\n found_file = pd.read_csv(os.path.join('/Users/marong/Desktop/big_data_project/Mesh_terms', mesh_path))\n f_search['file_path'] = os.path.join('/Users/marong/Desktop/big_data_project/Mesh_terms', mesh_path)\n break\n except:\n pass\n # print(f'MeSH term: {mesh_path} not_found')\n\n if f_search['file_path'] == '':\n if paired_results:\n f_search['file_path'] = f\"https://www.ncbi.nlm.nih.gov/pubmed/?term={'+'.join(paired_results[0][0].split(' '))}\"\n else:\n f_search['file_path'] = f\"https://www.ncbi.nlm.nih.gov/pubmed/?term={'+'.join(queryString.split(' '))}\"\n\n return f_search\n\n","sub_path":"pubmed/pubmed/search_icd.py","file_name":"search_icd.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"276783699","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nユーザA,Bの音声認識結果を抽出するスクリプト\nID,contentの2列\nIDはどのユーザの発話かを一意に決めるもの,contentは発話内容\n新しい発話が入ってくるまで前の発話で埋めている\n\"\"\"\nfrom __future__ import print_function\n\n__author__ = \"Hayato Katayama\"\n__date__ = \"20190907\"\n\nimport pandas as pd\nfrom datetime import timedelta\nimport sys,time\nimport argparse\nsys.path.append(\"..\")\nfrom util.frame_generator import FrameGenerator\nfrom util.time_keeper import set_time, TimeKeeper\nfrom util.file_reader import FileReader\n\nfrom glob import glob\n\nutterance_labels = [\"None\", \"Passive\", \"Active\", \"Nod\"]\ntarget_labels = [\"A\", \"B\"]\n\nclass EventLog(object):\n\n def __init__(self, filename):\n self.raw_data = pd.read_csv(filename, header=None, names=('time', 'action', 'topic', 'target', 'utterance'),\n dtype={'time' : str, 'action' : str, 'topic' : str, 'target' : str, 'utterance' : str})\n self.start_row = 0\n self.end_row = 0\n self.start_time = 0\n self.end_time = 0\n self.datetime = \"\" # 会話開始の時刻を識別子として利用\n self.data = self.split(filename)\n\n def split(self, filename):\n '''\n 会話の開始(start)から終了(end)までのログを切り出す\n :return: 該当部分のログ (pandas.DataFrame)\n '''\n for i, v in self.raw_data.iterrows():\n if v['action'] == 'start':\n self.start_row = i\n self.datetime = v['time'].split(\".\")[0]\n self.start_time = set_time(v['time'])\n elif v['action'] == 'end':\n self.end_row = i\n self.end_time = set_time(v['time'])\n break\n\n return (self.raw_data[self.start_row:self.end_row+1].loc[(self.raw_data.action!=\"change_topic\")])\\\n .loc[(self.raw_data.action!=\"change_genre\")].loc[self.raw_data.utterance!=\"Recognizing\"]\\\n\n\n def to_list(self, dataframe):\n return dataframe.as_matrix().tolist()\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir', '-d', type=str, default='/mnt/aoni02/katayama/dataset/RawDATA/*',\n help='specify the conversaton folder PATH')\n parser.add_argument('--out', '-o', type=str, default='/mnt/aoni02/katayama/dataset/DATA2019/decode_new/',\n help='specify the label output folder PATH')\n args = parser.parse_args()\n print('Extaction Folder : {}'.format(args.dir))\n print('Output Folder : {}'.format(args.out))\n directory = glob(args.dir)\n output = args.out\n\n for i in directory:\n number = glob(i+\"/*\")\n for num in number:\n act_file = glob(num+\"/*[!A].csv\")[0]\n eventlog = EventLog(act_file)\n tk = TimeKeeper(act_file)\n\n fo = open(output + \"{}.decode.csv\".format(tk.recording_datetime), \"w\")\n print(\"pre_ID,ID,pre_content,content\", file=fo)\n f_genenrator = FrameGenerator(tk.start_time, tk.end_time,frame_rate=100)\n\n target = \"A\"\n action = \"\"\n utterance_ = pre_utter = \"0\"\n ID = pre_ID = 0\n lkcount = 0\n event_list = eventlog.data.as_matrix().tolist()\n for f_time in f_genenrator:#フレーム単位ごとに\n log_time = set_time(event_list[0][0])#logにあるイベント\n\n if f_time >= log_time:\n event = event_list.pop(0)\n if event[1] == \"SpReco\":\n pre_utter = utterance_\n pre_ID = ID\n person = {\"A\":1,\"B\":2}\n ID = person[event[3][0]]\n utterance_ = event[4]#[0].encode('utf-8')\n print(\"{},{},{},{}\".format(pre_ID,ID,pre_utter,utterance_), file=fo)\n else:\n print(\"{},{},{},{}\".format(pre_ID,ID,pre_utter,utterance_), file=fo)\n\n else:\n print(\"{},{},{},{}\".format(pre_ID,ID,pre_utter,utterance_), file=fo)\n \n fo.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"label/generate_speechrecognition_summarize.py","file_name":"generate_speechrecognition_summarize.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"23696884","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nimport logging\n\nclass DataQualityOperator(BaseOperator):\n\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n conn_id,\n tests,\n test_descriptions,\n record_validations,\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.conn_id = conn_id\n self.tests = tests\n self.test_descriptions = test_descriptions\n self.record_validations = record_validations\n\n def execute(self, context):\n redshift_hook = PostgresHook(self.conn_id)\n for test, description, record_validation in zip(self.tests, self.test_descriptions, self.record_validations):\n records = redshift_hook.get_records(test)\n if record_validation(records):\n logging.error(f\"Test '{description}' failed for '{test}'\")\n raise ValueError(f\"Test '{description}' failed for '{test}'\")\n else:\n logging.info(f\"Test '{description}' passed\")\n \n ","sub_path":"P5_data_pipelines_airflow/airflow/plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"235249905","text":"'''\nCreated on Aug 25, 2013\n\n@author: I072190\n'''\n\nfrom pe3 import is_prime\n\ndef nth_prime(n):\n i = 2\n while n>0:\n if is_prime(i):\n n -= 1\n if n == 0:\n print(i)\n i += 1\n\nnth_prime(10001)","sub_path":"pe7.py","file_name":"pe7.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"155653506","text":"# Given a set of distinct integers, nums, return all possible subsets.\n\n# Note: The solution set must not contain duplicate subsets.\n\n# For example,\n# If nums = [1,2,3], a solution is:\n\n# [\n# [3],\n# [1],\n# [2],\n# [1,2,3],\n# [1,3],\n# [2,3],\n# [1,2],\n# []\n# ]\ndef subsets2(arr):\n\tif not arr:\n\t\treturn []\n\n\tresult = []\n\tdfs2(0, arr, [], result)\n\treturn result\n\ndef dfs2(start, arr, path, result):\n\tif start == len(arr):\n\t\tresult.append(path)\n\t\treturn\n\n\tdfs2(start + 1, arr, path, result)\n\tdfs2(start + 1, arr, path + [arr[start]], result)\n\nprint(subsets2([1, 2, 3]))\ndef subsets(arr):\n\tif not arr:\n\t\treturn []\n\n\tresult = []\n\tdfs(0, arr, [], result)\n\treturn result\n\n\ndef dfs(start, arr, path, result):\n\tresult.append(path[:])\n\tfor i in range(start, len(arr)):\n\t\tpath.append(arr[i])\n\t\tdfs(i + 1, arr, path, result)\n\t\tpath.pop()\n\n\nprint(subsets([1, 2, 3]))\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Facebook/Subsets.py","file_name":"Subsets.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"538462108","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nimport tkinter.messagebox\r\nimport mysql.connector\r\nimport itertools\r\nimport login\r\nimport testing\r\nimport shipping\r\nimport receiving\r\nimport accounting\r\nimport admin\r\nimport inventory\r\n\r\n\r\nclass Assembly:\r\n def __init__(self, master, emp_id):\r\n self.master = master\r\n self.wo = \"\"\r\n self.wonum = \"Work Order: \" + self.wo\r\n self.emp_id = \"Worker ID: \" + emp_id\r\n self.mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Razgriz!949\",\r\n database=\"inventory_system\"\r\n )\r\n self.cursor = self.mydb.cursor(buffered=True)\r\n self.assyWindow()\r\n\r\n\r\n def assyWindow(self):\r\n chassis_query = \"SELECT part_no FROM inventory WHERE part_type = \\\"Chassis\\\";\"\r\n engine_query = \"SELECT part_no FROM inventory WHERE part_type = \\\"Engine\\\";\"\r\n wheel_query = \"SELECT part_no FROM inventory WHERE part_type = \\\"Wheel\\\";\"\r\n\r\n self.cursor.execute(chassis_query)\r\n chassis_list = self.cursor.fetchall()\r\n chassis_list = list(itertools.chain(*chassis_list))\r\n\r\n self.cursor.execute(engine_query)\r\n engine_list = self.cursor.fetchall()\r\n engine_list = list(itertools.chain(*engine_list))\r\n\r\n self.cursor.execute(wheel_query)\r\n wheel_list = self.cursor.fetchall()\r\n wheel_list = list(itertools.chain(*wheel_list))\r\n\r\n chassis = StringVar()\r\n chassis.set(chassis_list[0])\r\n\r\n engine = StringVar()\r\n engine.set(engine_list[0])\r\n\r\n wheel = StringVar()\r\n wheel.set(wheel_list[0])\r\n\r\n # Instantiating text variables\r\n self.partNum1 = StringVar()\r\n self.partNum2 = StringVar()\r\n self.partNum3 = StringVar()\r\n self.partNum1Qty = StringVar()\r\n self.partNum2Qty = StringVar()\r\n self.partNum3Qty = StringVar()\r\n self.partNum1Stock = StringVar()\r\n self.partNum2Stock = StringVar()\r\n self.partNum3Stock = StringVar()\r\n\r\n # Instantiates style for ttk buttons\r\n style = Style()\r\n\r\n #orderNumber_label = Label(self.master, text=\"Work Order Number: \", font=(\"arial\", 12, \"bold\"))\r\n #workerID_label = Label(self.master, text=\"Worker ID: \", font=(\"arial\", 12, \"bold\"))\r\n \r\n #Creating file menu\r\n self.FileMenu = Menu(self.master)\r\n self.master.config(menu=self.FileMenu)\r\n self.subMenu = Menu(self.master)\r\n self.subMenu2 = Menu(self.master)\r\n self.FileMenu.add_cascade(label=\"File\", menu=self.subMenu)\r\n self.subMenu.add_command(label=\"Exit\", command=self.master.destroy)\r\n self.subMenu.add_command(label=\"Logout\", command=self.Logoff)\r\n\r\n self.testing_label = Label(self.master, text=\"ASSEMBLY\", relief=\"solid\", width=10.5,\r\n font=(\"arial\", 25, \"bold\"))\r\n qty_label = Label(self.master, text=\"Qty\", font=(\"arial\", 13, \"bold\"))\r\n stock_label = Label(self.master, text=\"Stock\", font=(\"arial\", 13, \"bold\"))\r\n partNum1_label = Label(self.master, text=\"Chassis: \", font=(\"arial\", 13, \"bold\"))\r\n partNum2_label = Label(self.master, text=\"Engine: \", font=(\"arial\", 13, \"bold\"))\r\n partNum3_label = Label(self.master, text=\"Wheel: \", font=(\"arial\", 13, \"bold\"))\r\n self.part1Stock_label = Label(self.master, text=\"\", font=(\"arial\", 13, \"bold\"))\r\n self.part2Stock_label = Label(self.master, text=\"\", font=(\"arial\", 13, \"bold\"))\r\n self.part3Stock_label = Label(self.master, text=\"\", font=(\"arial\", 13, \"bold\"))\r\n\r\n style.configure('C.TButton', padding=0, font=(\"arial\", 12), background='gray',\r\n foreground='Green')\r\n enter_button = Button(self.master, text=\"Enter\",\r\n command=lambda: self.onClick(tree, chassis.get(), partNum1_qty.get(), engine.get(),\r\n partNum2_qty.get(),\r\n wheel.get(), partNum3_qty.get()), style='C.TButton')\r\n reset_button = Button(self.master, text=\"Reset\", command=self.reset, style='C.TButton')\r\n stock_button = Button(self.master, text=\"Stock\",\r\n command=lambda: self.stock(chassis.get(), engine.get(), wheel.get()), style='C.TButton')\r\n\r\n partNum1_entry = OptionMenu(self.master, chassis, *chassis_list)\r\n partNum2_entry = OptionMenu(self.master, engine, *engine_list)\r\n partNum3_entry = OptionMenu(self.master, wheel, *wheel_list)\r\n partNum1_qty = Entry(self.master, width=5, textvariable=self.partNum1Qty)\r\n partNum2_qty = Entry(self.master, width=5, textvariable=self.partNum2Qty)\r\n partNum3_qty = Entry(self.master, width=5, textvariable=self.partNum3Qty)\r\n\r\n self.testing_label.place(x=240, y=10)\r\n id = Label(self.master, text=self.emp_id, font=(\"arial\", 12, \"bold\"))\r\n id.place(x=5, y=65)\r\n\r\n qty_label.place(x=205, y=115)\r\n stock_label.place(x=265, y=115)\r\n\r\n partNum1_label.place(x=5, y=140)\r\n partNum2_label.place(x=5, y=165)\r\n partNum3_label.place(x=5, y=190)\r\n\r\n reset_button.place(x=480, y=95, width=80)\r\n enter_button.place(x=480, y=125, width=80)\r\n stock_button.place(x=480, y=155, width=80)\r\n\r\n partNum1_entry.place(x=85, y=140)\r\n partNum2_entry.place(x=85, y=165)\r\n partNum3_entry.place(x=85, y=190)\r\n partNum1_qty.place(x=205, y=140)\r\n partNum2_qty.place(x=205, y=165)\r\n partNum3_qty.place(x=205, y=190)\r\n\r\n #Selection Grid\r\n tree = Treeview(self.master, column=(\"column\", \"column1\", \"column2\", \"column3\", \"column4\"))\r\n searchQuery = StringVar()\r\n selection = StringVar()\r\n\r\n load_button = Button(self.master, text=\"Select Work Order\", command=lambda: self.select(selection.get()), style='C.TButton')\r\n selection_entry = Entry(self.master, width=15, textvariable=selection)\r\n\r\n selection_entry.place(x=400, y=68)\r\n load_button.place(x=500, y=67)\r\n\r\n tree.column(\"#0\", minwidth=0, width=0, stretch=False)\r\n tree.heading(\"#1\", text=\"Work Order\", command=lambda: self.sort_column(tree, 0, \"Assembly\", False))\r\n tree.column(\"#1\", minwidth=0, width=100, stretch=False)\r\n tree.heading(\"#2\", text=\"Department\", command=lambda: self.sort_column(tree, 1, \"Assembly\", False))\r\n tree.column(\"#2\", minwidth=0, width=100, stretch=False)\r\n tree.heading(\"#3\", text=\"Customer\", command=lambda: self.sort_column(tree, 2, \"Assembly\", False))\r\n tree.column(\"#3\", minwidth=0, width=150, stretch=False)\r\n tree.heading(\"#4\", text=\"Received Date\", command=lambda: self.sort_column(tree, 3, \"Assembly\", False))\r\n tree.column(\"#4\", minwidth=0, width=150, stretch=False)\r\n tree.heading(\"#5\", text=\"Estimated Ship\", command=lambda: self.sort_column(tree, 4, \"Assembly\", False))\r\n tree.column(\"#5\", minwidth=0, width=150, stretch=False)\r\n\r\n tree.configure(height=5)\r\n tree.place(x=7, y=240)\r\n\r\n printAll = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + \"Assembly\" + \"\\\"\"\r\n cursor = self.mydb.cursor()\r\n cursor.execute(printAll)\r\n records = cursor.fetchall()\r\n\r\n for row in records:\r\n custQuery = \"SELECT name FROM customer WHERE cust_id = %s\"\r\n self.cursor.execute(custQuery, [row[4]])\r\n cust = self.cursor.fetchone()\r\n print(cust)\r\n tree.insert('', 'end', values=\r\n (row[0], row[1], cust[0], row[2], row[3]))\r\n\r\n #logout fucntion\r\n def Logoff(self):\r\n answer = tkinter.messagebox.askquestion(\"Logout\", \"Are you sure you want to logout? \")\r\n\r\n if answer == \"yes\":\r\n tkinter.messagebox.showinfo(\"Logout\", \"Goodbye\")\r\n self.master.destroy()\r\n\r\n root = Tk()\r\n root.geometry(\"350x200\")\r\n root.title(\"Login\")\r\n root.resizable(False, False)\r\n login1 = login.Login(root)\r\n root.mainloop()\r\n if login1.dept == \"Receiving\":\r\n root = Tk()\r\n root.geometry(\"400x500\")\r\n root.title(\"Receiving\")\r\n root.resizable(False, False)\r\n root.configure(bg=\"light gray\")\r\n receiving1 = receiving.Receiving(root)\r\n root.mainloop()\r\n elif login1.dept == \"Assembly\":\r\n root = Tk()\r\n root.geometry(\"665x380\")\r\n root.title(\"Assembly\")\r\n root.resizable(False, False)\r\n app = Assembly(root, \"0002\")\r\n root.mainloop()\r\n elif login1.dept == \"Testing\":\r\n root = Tk()\r\n root.geometry(\"680x500\")\r\n root.title(\"Testing\")\r\n root.resizable(False, False)\r\n app = testing.TestingWindow(root, \"0003\")\r\n root.mainloop()\r\n elif login1.dept == \"Shipping\":\r\n root = Tk()\r\n root.geometry(\"665x380\")\r\n root.title(\"Shipping\")\r\n root.resizable(False, False)\r\n shipping.Shipping(root, \"0004\")\r\n root.mainloop()\r\n elif login1.dept == \"Accounting\":\r\n root = Tk()\r\n root.geometry(\"500x300\")\r\n root.title(\"Accounting\")\r\n root.resizable(False, False)\r\n close_window = Button(root, text=\"Close\", command=root.quit)\r\n close_window.place(x=90, y=230)\r\n app = accounting(root, \"0005\")\r\n root.mainloop()\r\n elif login1.dept == \"Admin\":\r\n root = Tk()\r\n root.geometry(\"600x500\")\r\n root.title(\"New User\")\r\n root.resizable(False, False)\r\n root.configure(bg=\"light gray\")\r\n admin1 = admin.Admin(root)\r\n root.mainloop()\r\n elif login1.dept == \"Inventory\":\r\n root = Tk()\r\n root.geometry(\"620x500\")\r\n root.title(\"Inventory\")\r\n root.resizable(False, False)\r\n app = inventory.Inventory(root)\r\n root.mainloop()\r\n \r\n def reset(self):\r\n self.partNum1.set(\"\")\r\n self.partNum2.set(\"\")\r\n self.partNum3.set(\"\")\r\n self.partNum1Qty.set(\"\")\r\n self.partNum2Qty.set(\"\")\r\n self.partNum3Qty.set(\"\")\r\n return\r\n\r\n def onClick(self, tree, chassisNum, chassisQty, engNum, engQty, whlNum, whlQty):\r\n if chassisNum == \"\" or chassisQty == \"\" or engNum == \"\" or engQty == \"\" or whlNum == \"\" or whlQty == \"\":\r\n tkinter.messagebox.showinfo(\"Failed\", \"Fields are required\")\r\n return\r\n\r\n answer = tkinter.messagebox.askquestion(\"Confirmation\", \"Assembly completed? \")\r\n chas_query = \"UPDATE INVENTORY SET qty = qty - \" + str(\r\n chassisQty) + \" WHERE part_no = \\\"\" + chassisNum + \"\\\";\"\r\n eng_query = \"UPDATE INVENTORY SET qty = qty - \" + str(\r\n engQty) + \" WHERE part_no = \\\"\" + engNum + \"\\\";\"\r\n whl_query = \"UPDATE INVENTORY SET qty = qty - \" + str(\r\n whlQty) + \" WHERE part_no = \\\"\" + whlNum + \"\\\";\"\r\n update_status = \"UPDATE work_in_progress SET status = \\\"Testing\\\" WHERE wo_number = \\\"\" + self.wo + \"\\\";\"\r\n if answer == \"yes\":\r\n self.cursor.execute(chas_query)\r\n self.cursor.execute(eng_query)\r\n self.cursor.execute(whl_query)\r\n self.cursor.execute(update_status)\r\n self.mydb.commit()\r\n tkinter.messagebox.showinfo(\"Confirmation\", \"Changes made\")\r\n\r\n\r\n printAll = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + \"Assembly\" + \"\\\"\"\r\n cursor = self.mydb.cursor()\r\n cursor.execute(printAll)\r\n records = cursor.fetchall()\r\n\r\n for i in tree.get_children():\r\n tree.delete(i)\r\n\r\n for row in records:\r\n custQuery = \"SELECT name FROM customer WHERE cust_id = %s\"\r\n self.cursor.execute(custQuery, [row[4]])\r\n cust = self.cursor.fetchone()\r\n print(cust)\r\n tree.insert('', 'end', values=\r\n (row[0], row[1], cust[0], row[2], row[3]))\r\n\r\n def stock(self, chassis, engine, wheel):\r\n part1_query = \"SELECT qty FROM inventory WHERE part_no = \\\"\" + chassis + \"\\\";\"\r\n part2_query = \"SELECT qty FROM inventory WHERE part_no = \\\"\" + engine + \"\\\";\"\r\n part3_query = \"SELECT qty FROM inventory WHERE part_no = \\\"\" + wheel + \"\\\";\"\r\n self.cursor.execute(part1_query)\r\n part1_qty = self.cursor.fetchone()\r\n self.cursor.execute(part2_query)\r\n part2_qty = self.cursor.fetchone()\r\n self.cursor.execute(part3_query)\r\n part3_qty = self.cursor.fetchone()\r\n\r\n self.part1Stock_label.destroy()\r\n self.part2Stock_label.destroy()\r\n self.part3Stock_label.destroy()\r\n self.part1Stock_label = Label(self.master, text=part1_qty, font=(\"arial\", 13, \"bold\"))\r\n self.part2Stock_label = Label(self.master, text=part2_qty, font=(\"arial\", 13, \"bold\"))\r\n self.part3Stock_label = Label(self.master, text=part3_qty, font=(\"arial\", 13, \"bold\"))\r\n self.part1Stock_label.place(x=260, y=135)\r\n self.part2Stock_label.place(x=260, y=160)\r\n self.part3Stock_label.place(x=260, y=185)\r\n\r\n def sort_column(self, tree, col, department, reverse):\r\n tree.delete(*tree.get_children())\r\n\r\n connection = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\", password=\"Razgriz!949\",\r\n auth_plugin=\"mysql_native_password\", database=\"inventory_system\")\r\n db_Info = connection.get_server_info()\r\n cursor = connection.cursor()\r\n cursor.execute(\"select database()\")\r\n records = cursor.fetchone()\r\n\r\n if (col == 0):\r\n querySort = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + department + \"\\\" ORDER BY wo_number\"\r\n elif (col == 1):\r\n querySort = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + department + \"\\\" ORDER BY status\"\r\n elif (col == 2):\r\n querySort = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + department + \"\\\" ORDER BY date_recv\"\r\n elif (col == 3):\r\n querySort = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + department + \"\\\" ORDER BY eta\"\r\n elif (col == 4):\r\n querySort = \"SELECT * FROM work_in_progress WHERE status = \\\"\" + department + \"\\\" ORDER BY cust_id\"\r\n\r\n cursor.execute(querySort)\r\n records = cursor.fetchall()\r\n for row in records:\r\n print(\"Work Number: \", row[0])\r\n print(\"Status: \", row[1])\r\n print(\"Date Received: \", row[2])\r\n print(\"ETA: \", row[3])\r\n print(\"Customer ID: \", row[4])\r\n\r\n for row in records:\r\n custQuery = \"SELECT name FROM customer WHERE cust_id = %s\"\r\n self.cursor.execute(custQuery, [row[4]])\r\n cust = self.cursor.fetchone()\r\n print(cust)\r\n tree.insert('', 'end', values=\r\n (row[0], row[1], cust[0], row[2], row[3]))\r\n\r\n cursor.close()\r\n connection.close()\r\n\r\n def select(self, wo):\r\n self.wonum = \"Work Order: \" + wo\r\n self.wo = wo\r\n number = Label(self.master, text=self.wonum, font=(\"arial\", 12, \"bold\"))\r\n number.place(x=7, y=85)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = Tk()\r\n root.geometry(\"665x380\")\r\n root.title(\"Assembly\")\r\n app = Assembly(root, \"0001\")\r\n root.mainloop()\r\n","sub_path":"assembly_class.py","file_name":"assembly_class.py","file_ext":"py","file_size_in_byte":15845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"360569014","text":"# Do not change these lines.\n__winc_id__ = \"a2bc36ea784242e4989deb157d527ba0\"\n__human_name__ = \"superpy\"\n\n# Your code below this line.\n\nfrom arguments import arguments\nfrom make_report import make_report\nfrom buying_products import buy_product\nfrom selling_products import sell_product\nfrom advance_time import advance_date\nfrom rich.console import Console\n\n\ndef main(args):\n args = arguments()\n if args.command == \"report\":\n make_report(args)\n elif args.command == \"buy\":\n buy_product(args)\n elif args.command == \"sell\":\n sell_product(args)\n elif args.command == \"advance\":\n advance_date(args)\n\n\n\nif __name__ == \"__main__\":\n myconsole = Console()\n myconsole.print(\"-\" * 80)\n\n args = arguments()\n main(args)\n\n myconsole.print(\"-\" * 80)\n myconsole.print(\"# Arguments\", args)\n myconsole.print(\"-\" * 80)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"654069098","text":"from tkinter import *\nfrom PIL import ImageTk,Image\nroot=Tk()\nroot.title(\"Learn to code\")\nroot.iconbitmap('f:iron.ico')\n\n\ndef open():\n top=Toplevel()\n c=Button(top,text=\"Destroy Window\",command=top.destroy).pack()\n\nb=Button(root,text=\"Open Second Window\",command=open).pack()\n\n\n\n\n\nroot.mainloop()","sub_path":"tk9.py","file_name":"tk9.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"37341456","text":"from __future__ import division, print_function\n\n'''\nFunctionality for handling citations\n'''\n\nimport string\n\nimport libtbx.phil\nfrom libtbx import str_utils\nfrom libtbx.utils import to_unicode\n\n# =============================================================================\n# PHIL definition for citations\nmaster_citation_phil_str = '''\ncitation\n .multiple = True\n{\n article_id = None\n .type = str\n .optional = False\n caption = None\n .type = str\n authors = None\n .type = str\n title = None\n .type = str\n year = None\n .type = int\n journal = None\n .type = str\n volume = None\n .type = str\n pages = None\n .type = str\n pmid = None\n .type = int\n doi_id = None\n .type = str\n url = None\n .type = str\n}\n'''\nmaster_citation_phil = libtbx.phil.parse(master_citation_phil_str)\n\n# -----------------------------------------------------------------------------\n# PHIL definition for journals\n# This is used for providing information in CIF blocks\nmaster_journal_phil_str = '''\njournal\n .multiple = True\n{\n name = None\n .type = str\n .multiple = True\n name_full = None\n .type = str\n abbrev_CAS = None\n .type = str\n .help = Abbreviated name of the cited journal as given in the \\\n Chemical Abstracts Service Source Index.\n id_ASTM = None\n .type = str\n .help = The American Society for Testing and Materials (ASTM) code \\\n assigned to the journal cited (also referred to as the CODEN \\\n designator of the Chemical Abstracts Service).\n id_CSD = None\n .type = str\n .help = The Cambridge Structural Database (CSD) code assigned to the \\\n journal cited.\n id_ISSN = None\n .type = str\n .help = The International Standard Serial Number (ISSN) code assigned to \\\n the journal cited.\n}\n'''\nmaster_journal_phil = libtbx.phil.parse(master_journal_phil_str)\n\n# -----------------------------------------------------------------------------\n# Construct common database of citations and journals\n# This prevents duplication of citations in individual programs if methods from\n# different references are used in multiple programs\n\ncitations_and_journals = libtbx.phil.read_default(__file__)\ncitations = master_citation_phil.fetch(source=citations_and_journals).extract()\ncitations_db = dict( [ (c.article_id, c) for c in citations.citation ] )\n\njournals_db = dict()\njournals = master_journal_phil.fetch(source=citations_and_journals).extract()\nfor journal in journals.journal:\n for name in journal.name:\n journals_db[name] = journal\n\n# =============================================================================\ndef format_citation (article) :\n authors = article.authors\n author_list = authors.split(\", \")\n if len(author_list) == 1 :\n authors_out = authors\n else :\n authors_out = \", \".join(author_list[:-1]) + \", and %s\" % author_list[-1]\n output = \"%s.\" % authors\n if article.year is not None : output += \" (%d)\" % article.year\n title = article.title\n if (title is not None) :\n title = title.strip()\n if (not title.endswith(\".\")) :\n title += \".\"\n output += \" %s\" % title\n if article.journal is not None : output += \" %s\" % article.journal\n if article.volume is not None :\n if article.journal is not None and 'Acta Cryst. ' in article.journal:\n # special case for Acta Cryst journals to get e.g.:\n # Acta Cryst. D66\n output += \"%s\" % article.volume\n else:\n output += \" %s\" % article.volume\n if article.pages is not None :\n if article.volume is not None : output += \":%s\" % article.pages\n else : output += \", pp. %s\" % article.pages\n if output[-1] != '.' : output += \".\"\n return output\n\n# -----------------------------------------------------------------------------\ndef author_list_with_periods (authors, initials_first=False) :\n author_list = authors.split(\", \")\n authors_formatted = []\n for author in author_list :\n names = author.split(\" \")\n if len(names) == 1 :\n authors_formatted.append(names[0])\n else :\n initials = names[-1]\n new_initials = \"\"\n for letter in initials :\n if letter in string.letters :\n new_initials += (\"%s.\" % letter)\n else : # usually '-'\n new_initials += letter\n if initials_first :\n reformatted = \"%s %s\" % (new_initials, \" \".join(names[:-1]))\n else :\n reformatted = \"%s %s\" % (\" \".join(names[:-1]), new_initials)\n authors_formatted.append(reformatted)\n return authors_formatted\n\n# -----------------------------------------------------------------------------\ndef format_citation_cell (article) :\n author_list = author_list_with_periods(article.authors)\n if len(author_list) == 1 :\n authors_out = author_list[0]\n else :\n authors_out = \", \".join(author_list[:-1]) + \", and %s\" % author_list[-1]\n output = \"%s\" % authors_out # XXX no extra period at end!\n if article.year is not None : output += \" (%d).\" % article.year\n title = article.title\n if (title is not None) :\n title = title.strip()\n if (not title.endswith(\".\")) :\n title += \".\"\n output += \" %s\" % title\n if article.journal is not None : output += \" %s\" % article.journal\n if article.volume is not None :\n if article.journal is not None and 'Acta Cryst. ' in article.journal:\n # special case for Acta Cryst journals to get e.g.:\n # Acta Cryst. D66\n output += \"%s\" % article.volume\n else:\n output += \" %s\" % article.volume\n if article.pages is not None :\n if article.volume is not None : output += \", %s\" % article.pages\n else : output += \", pp. %s\" % article.pages\n if output[-1] != '.' : output += \".\"\n return output\n\n# -----------------------------------------------------------------------------\ndef format_citation_iucr (article) :\n author_list = author_list_with_periods(article.authors)\n if len(author_list) == 1 :\n authors_out = author_list[0]\n else :\n authors_out = \", \".join(author_list[:-1]) + \", & %s\" % author_list[-1]\n output = \"%s\" % authors_out\n if article.year is not None : output += \" (%d).\" % article.year\n if article.journal is not None : output += \" %s\" % article.journal\n if article.volume is not None :\n if article.journal is not None and 'Acta Cryst. ' in article.journal:\n # special case for Acta Cryst journals to get e.g.:\n # Acta Cryst. D66\n output += \"%s\" % article.volume\n else:\n output += \" %s\" % article.volume\n if article.pages is not None :\n if article.volume is not None : output += \", %s\" % article.pages\n else : output += \", pp. %s\" % article.pages\n if output[-1] != '.' : output += \".\"\n return output\n\n# -----------------------------------------------------------------------------\ndef format_citation_html (article) :\n if (article.journal is None) :\n raise ValueError(\"Missing journal name for '%s'.\" % article.article_id)\n author_list = author_list_with_periods(article.authors, initials_first=True)\n if len(author_list) == 1 :\n authors_out = author_list[0]\n else :\n authors_out = \", \".join(author_list[:-1]) + \", and %s\" % author_list[-1]\n title = article.title.strip()\n if (not title.endswith(\".\")) :\n title += \".\"\n output = \"%s %s. \" % (title, authors_out)\n if 'Acta Cryst.' in article.journal:\n journal_ref = \"Acta Cryst.\"\n journal_section = article.journal.split(\"Acta Cryst. \")[1]\n else:\n journal_ref = \"%s\" % article.journal\n journal_section = None\n if (article.volume is not None) :\n if journal_section is not None:\n journal_ref += \" %s%s\" %(journal_section, article.volume)\n else:\n journal_ref += \" %s\" % article.volume\n if (article.pages is not None) :\n journal_ref += \", %s\" % article.pages\n if (article.year is not None) :\n journal_ref += \" (%s)\" % article.year\n if (article.url is not None) :\n output += \"\"\"%s.\"\"\" % (article.url, journal_ref)\n elif (article.doi_id is not None) :\n output += \"\"\"%s.\"\"\" % (article.doi_id,\n journal_ref)\n elif (article.pmid is not None) :\n output += \"\"\"%s.\"\"\" % \\\n (article.pmid, journal_ref)\n else :\n output += \" %s.\" % journal_ref\n return output\n\n# -----------------------------------------------------------------------------\ndef show_citation(article, out=None, max_width=79, format='default'):\n if format == 'default' :\n output = format_citation(article)\n elif format == 'iucr' :\n output = format_citation_iucr(article)\n elif format == 'cell' :\n output = format_citation_cell(article)\n if max_width is None or max_width < 1 :\n print(to_unicode(output), file=out)\n else :\n for line in str_utils.line_breaker(output, max_width) :\n print(to_unicode(line), file=out)\n print(to_unicode(''), file=out)\n\ndef show_citations(articles, out=None, max_width=79, sort_by_name=True,\n format='default'):\n if (sort_by_name):\n articles.sort(lambda x, y: cmp(x.authors, y.authors))\n for article in articles:\n show_citation(article, out, max_width, format)\n\n# -----------------------------------------------------------------------------\ndef citations_as_cif_block(articles, cif_block=None):\n import iotbx.cif.model\n if cif_block is None:\n cif_block = iotbx.cif.model.block()\n def replace_none_with_question_mark(s):\n if s is None: return '?'\n return s\n citation_loop = iotbx.cif.model.loop(header=(\n '_citation.id', '_citation.title', '_citation.journal_abbrev',\n '_citation.journal_volume', '_citation.page_first', '_citation.page_last',\n '_citation.year', '_citation.journal_id_ASTM', '_citation.journal_id_ISSN',\n '_citation.journal_id_CSD'))\n for article in articles:\n if article.pages is None:\n first_page, last_page = \"?\", \"?\"\n else:\n pages = article.pages.split('-')\n first_page = pages[0]\n if len(pages) == 1:\n last_page = '?'\n else:\n assert len(pages) == 2\n last_page = pages[1]\n journal = journals_db.get(article.journal)\n assert journal is not None\n citation_loop.add_row(\n {'_citation.id': article.article_id,\n '_citation.title': article.title,\n '_citation.journal_abbrev': journal.abbrev_CAS,\n '_citation.journal_volume': article.volume,\n '_citation.page_first': first_page,\n '_citation.page_last': last_page,\n '_citation.year': article.year,\n '_citation.journal_id_ASTM':\n replace_none_with_question_mark(journal.id_ASTM),\n '_citation.journal_id_ISSN':\n replace_none_with_question_mark(journal.id_ISSN),\n '_citation.journal_id_CSD':\n replace_none_with_question_mark(journal.id_CSD),\n })\n cif_block.add_loop(citation_loop)\n return cif_block\n\n# =============================================================================\n# end\n","sub_path":"libtbx/citations.py","file_name":"citations.py","file_ext":"py","file_size_in_byte":10955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"150365656","text":"import sys\r\nimport cv2\r\nimport numpy as np\r\nimport tkinter as tk\r\nimport tkinter.ttk as ttk\r\nimport math\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter import filedialog\r\n\r\ntry:\r\n import Tkinter as tk\r\nexcept ImportError:\r\n import tkinter as tk\r\n\r\ntry:\r\n import ttk\r\n py3 = False\r\nexcept ImportError:\r\n import tkinter.ttk as ttk\r\n py3 = True\r\n\r\nimport mam_support\r\n\r\n\r\ndef vp_start_gui():\r\n '''Starting point when module is the main routine.'''\r\n global val, w, root\r\n root = tk.Tk()\r\n top = Toplevel1 (root)\r\n mam_support.init(root, top)\r\n root.mainloop()\r\n\r\nw = None\r\ndef create_Toplevel1(rt, *args, **kwargs):\r\n '''Starting point when module is imported by another module.\r\n Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''\r\n global w, w_win, root\r\n #rt = root\r\n root = rt\r\n w = tk.Toplevel (root)\r\n top = Toplevel1 (w)\r\n mam_support.init(w, top, *args, **kwargs)\r\n return (w, top)\r\n\r\ndef destroy_Toplevel1():\r\n global w\r\n w.destroy()\r\n w = None\r\n\r\nclass Toplevel1:\r\n def __init__(self, top=None):\r\n '''This class configures and populates the toplevel window.\r\n top is the toplevel containing window.'''\r\n _bgcolor = '#d9d9d9' # X11 color: 'gray85'\r\n _fgcolor = '#000000' # X11 color: 'black'\r\n _compcolor = '#d9d9d9' # X11 color: 'gray85'\r\n _ana1color = '#d9d9d9' # X11 color: 'gray85'\r\n _ana2color = '#ececec' # Closest X11 color: 'gray92'\r\n\r\n top.geometry(\"1631x902+218+27\")\r\n top.minsize(148, 1)\r\n top.maxsize(4104, 1055)\r\n top.resizable(1, 1)\r\n top.title(\"New Toplevel\")\r\n top.configure(background=\"#d9d9d9\")\r\n\r\n self.menubar = tk.Menu(top,font=\"TkMenuFont\",bg=_bgcolor,fg=_fgcolor)\r\n top.configure(menu = self.menubar)\r\n\r\n self.Frame1 = tk.Frame(top)\r\n self.Frame1.place(relx=0.006, rely=0.011, relheight=0.969\r\n , relwidth=0.983)\r\n self.Frame1.configure(relief='groove')\r\n self.Frame1.configure(borderwidth=\"2\")\r\n self.Frame1.configure(relief=\"groove\")\r\n self.Frame1.configure(background=\"#d9d9d9\")\r\n self.Frame1.configure(cursor=\"fleur\")\r\n\r\n self.Label1 = tk.Label(self.Frame1)\r\n self.Label1.place(relx=0.014, rely=0.011, height=56, width=1559)\r\n self.Label1.configure(background=\"#408080\")\r\n self.Label1.configure(disabledforeground=\"#a3a3a3\")\r\n self.Label1.configure(font=\"-family {Segoe UI} -size 16 -weight bold -slant italic\")\r\n self.Label1.configure(foreground=\"#ffffff\")\r\n self.Label1.configure(text='''Mammogram Image Enhancement, Diameter/Area Calculations and Predict Tumor Stage''')\r\n\r\n self.browselabel = tk.Label(self.Frame1)\r\n self.browselabel.place(relx=0.019, rely=0.114, height=404, width=477)\r\n self.browselabel.configure(background=\"#ffffff\")\r\n self.browselabel.configure(disabledforeground=\"#a3a3a3\")\r\n self.browselabel.configure(foreground=\"#000000\")\r\n\r\n self.enhanceimage = tk.Label(self.Frame1)\r\n self.enhanceimage.place(relx=0.347, rely=0.114, height=404, width=475)\r\n self.enhanceimage.configure(activebackground=\"#f9f9f9\")\r\n self.enhanceimage.configure(activeforeground=\"black\")\r\n self.enhanceimage.configure(background=\"#ffffff\")\r\n self.enhanceimage.configure(disabledforeground=\"#a3a3a3\")\r\n self.enhanceimage.configure(foreground=\"#000000\")\r\n self.enhanceimage.configure(highlightbackground=\"#d9d9d9\")\r\n self.enhanceimage.configure(highlightcolor=\"black\")\r\n\r\n self.segImg = tk.Label(self.Frame1)\r\n self.segImg.place(relx=0.682, rely=0.114, height=404, width=475)\r\n self.segImg.configure(activebackground=\"#f9f9f9\")\r\n self.segImg.configure(activeforeground=\"black\")\r\n self.segImg.configure(background=\"#ffffff\")\r\n self.segImg.configure(disabledforeground=\"#a3a3a3\")\r\n self.segImg.configure(foreground=\"#000000\")\r\n self.segImg.configure(highlightbackground=\"#d9d9d9\")\r\n self.segImg.configure(highlightcolor=\"black\")\r\n\r\n self.browseimg = tk.Button(self.Frame1)\r\n self.browseimg.place(relx=0.039, rely=0.604, height=33, width=366)\r\n self.browseimg.configure(activebackground=\"#ececec\")\r\n self.browseimg.configure(activeforeground=\"#000000\")\r\n self.browseimg.configure(background=\"#d9d9d9\")\r\n self.browseimg.configure(command=self.browse)\r\n self.browseimg.configure(disabledforeground=\"#a3a3a3\")\r\n self.browseimg.configure(foreground=\"#000000\")\r\n self.browseimg.configure(highlightbackground=\"#d9d9d9\")\r\n self.browseimg.configure(highlightcolor=\"black\")\r\n self.browseimg.configure(pady=\"0\")\r\n self.browseimg.configure(text='''Browse Image''')\r\n\r\n self.enhance = tk.Button(self.Frame1)\r\n self.enhance.place(relx=0.373, rely=0.604, height=33, width=366)\r\n self.enhance.configure(activebackground=\"#ececec\")\r\n self.enhance.configure(activeforeground=\"#000000\")\r\n self.enhance.configure(background=\"#d9d9d9\")\r\n self.enhance.configure(cursor=\"fleur\")\r\n self.enhance.configure(disabledforeground=\"#a3a3a3\")\r\n self.enhance.configure(foreground=\"#000000\")\r\n self.enhance.configure(highlightbackground=\"#d9d9d9\")\r\n self.enhance.configure(highlightcolor=\"black\")\r\n self.enhance.configure(pady=\"0\")\r\n self.enhance.configure(text='''Enhance Image''')\r\n\r\n self.segment = tk.Button(self.Frame1)\r\n self.segment.place(relx=0.723, rely=0.595, height=33, width=366)\r\n self.segment.configure(activebackground=\"#ececec\")\r\n self.segment.configure(activeforeground=\"#000000\")\r\n self.segment.configure(command=self.segment)\r\n self.segment.configure(background=\"#d9d9d9\")\r\n self.segment.configure(disabledforeground=\"#a3a3a3\")\r\n self.segment.configure(foreground=\"#000000\")\r\n self.segment.configure(highlightbackground=\"#d9d9d9\")\r\n self.segment.configure(highlightcolor=\"black\")\r\n self.segment.configure(pady=\"0\")\r\n self.segment.configure(text='''Segment Image''')\r\n\r\n self.opening = tk.Label(self.Frame1)\r\n self.opening.place(relx=0.355, rely=0.686, height=28, width=197)\r\n self.opening.configure(background=\"#d9d9d9\")\r\n self.opening.configure(disabledforeground=\"#a3a3a3\")\r\n self.opening.configure(foreground=\"#000000\")\r\n self.opening.configure(text='''Opening :''')\r\n\r\n self.Scale1 = tk.Scale(self.Frame1, from_=0.0, to=50.0)\r\n self.Scale1.place(relx=0.48, rely=0.664, relwidth=0.145, relheight=0.0\r\n , height=48, bordermode='ignore')\r\n self.Scale1.configure(activebackground=\"#ececec\")\r\n self.Scale1.configure(background=\"#d9d9d9\")\r\n self.Scale1.configure(foreground=\"#000000\")\r\n self.Scale1.configure(highlightbackground=\"#d9d9d9\")\r\n self.Scale1.configure(highlightcolor=\"black\")\r\n self.Scale1.configure(orient=\"horizontal\")\r\n self.Scale1.configure(troughcolor=\"#d9d9d9\")\r\n\r\n self.Label2 = tk.Label(self.Frame1)\r\n self.Label2.place(relx=0.387, rely=0.767, height=27, width=111)\r\n self.Label2.configure(background=\"#d9d9d9\")\r\n self.Label2.configure(disabledforeground=\"#a3a3a3\")\r\n self.Label2.configure(foreground=\"#000000\")\r\n self.Label2.configure(text='''Closing :''')\r\n\r\n self.Scale2 = tk.Scale(self.Frame1, from_=0.0, to=50.0)\r\n self.Scale2.place(relx=0.48, rely=0.744, relwidth=0.147, relheight=0.0\r\n , height=47, bordermode='ignore')\r\n self.Scale2.configure(activebackground=\"#ececec\")\r\n self.Scale2.configure(background=\"#d9d9d9\")\r\n self.Scale2.configure(foreground=\"#000000\")\r\n self.Scale2.configure(highlightbackground=\"#d9d9d9\")\r\n self.Scale2.configure(highlightcolor=\"black\")\r\n self.Scale2.configure(orient=\"horizontal\")\r\n self.Scale2.configure(troughcolor=\"#d9d9d9\")\r\n\r\n self.Label3 = tk.Label(self.Frame1)\r\n self.Label3.place(relx=0.387, rely=0.847, height=27, width=113)\r\n self.Label3.configure(background=\"#d9d9d9\")\r\n self.Label3.configure(disabledforeground=\"#a3a3a3\")\r\n self.Label3.configure(foreground=\"#000000\")\r\n self.Label3.configure(text='''Erosion :''')\r\n\r\n self.Scale3 = tk.Scale(self.Frame1, from_=0.0, to=50.0)\r\n self.Scale3.place(relx=0.48, rely=0.824, relwidth=0.147, relheight=0.0\r\n , height=47, bordermode='ignore')\r\n self.Scale3.configure(activebackground=\"#ececec\")\r\n self.Scale3.configure(background=\"#d9d9d9\")\r\n self.Scale3.configure(foreground=\"#000000\")\r\n self.Scale3.configure(highlightbackground=\"#d9d9d9\")\r\n self.Scale3.configure(highlightcolor=\"black\")\r\n self.Scale3.configure(orient=\"horizontal\")\r\n self.Scale3.configure(troughcolor=\"#d9d9d9\")\r\n\r\n self.Label4 = tk.Label(self.Frame1)\r\n self.Label4.place(relx=0.031, rely=0.744, height=26, width=171)\r\n self.Label4.configure(background=\"#d9d9d9\")\r\n self.Label4.configure(disabledforeground=\"#a3a3a3\")\r\n self.Label4.configure(foreground=\"#000000\")\r\n self.Label4.configure(text='''Diameter of the Tumor :''')\r\n\r\n self.Label4_1 = tk.Label(self.Frame1)\r\n self.Label4_1.place(relx=0.031, rely=0.812, height=26, width=171)\r\n self.Label4_1.configure(activebackground=\"#f9f9f9\")\r\n self.Label4_1.configure(activeforeground=\"black\")\r\n self.Label4_1.configure(background=\"#d9d9d9\")\r\n self.Label4_1.configure(disabledforeground=\"#a3a3a3\")\r\n self.Label4_1.configure(foreground=\"#000000\")\r\n self.Label4_1.configure(highlightbackground=\"#d9d9d9\")\r\n self.Label4_1.configure(highlightcolor=\"black\")\r\n self.Label4_1.configure(text='''2d Area of the Tumor :''')\r\n\r\n self.Label4_2 = tk.Label(self.Frame1)\r\n self.Label4_2.place(relx=0.05, rely=0.881, height=26, width=172)\r\n self.Label4_2.configure(activebackground=\"#f9f9f9\")\r\n self.Label4_2.configure(activeforeground=\"black\")\r\n self.Label4_2.configure(background=\"#d9d9d9\")\r\n self.Label4_2.configure(disabledforeground=\"#a3a3a3\")\r\n self.Label4_2.configure(foreground=\"#000000\")\r\n self.Label4_2.configure(highlightbackground=\"#d9d9d9\")\r\n self.Label4_2.configure(highlightcolor=\"black\")\r\n self.Label4_2.configure(text='''Tumor stage :''')\r\n\r\n self.diameter = tk.Label(self.Frame1)\r\n self.diameter.place(relx=0.15, rely=0.744, height=27, width=302)\r\n self.diameter.configure(activebackground=\"#f9f9f9\")\r\n self.diameter.configure(activeforeground=\"black\")\r\n self.diameter.configure(background=\"#ffffff\")\r\n self.diameter.configure(cursor=\"fleur\")\r\n self.diameter.configure(disabledforeground=\"#a3a3a3\")\r\n self.diameter.configure(foreground=\"#000000\")\r\n self.diameter.configure(highlightbackground=\"#d9d9d9\")\r\n self.diameter.configure(highlightcolor=\"black\")\r\n\r\n self.area = tk.Label(self.Frame1)\r\n self.area.place(relx=0.15, rely=0.812, height=27, width=302)\r\n self.area.configure(activebackground=\"#f9f9f9\")\r\n self.area.configure(activeforeground=\"black\")\r\n self.area.configure(background=\"#ffffff\")\r\n self.area.configure(cursor=\"fleur\")\r\n self.area.configure(disabledforeground=\"#a3a3a3\")\r\n self.area.configure(foreground=\"#000000\")\r\n self.area.configure(highlightbackground=\"#d9d9d9\")\r\n self.area.configure(highlightcolor=\"black\")\r\n\r\n self.tumorstage = tk.Label(self.Frame1)\r\n self.tumorstage.place(relx=0.15, rely=0.881, height=27, width=302)\r\n self.tumorstage.configure(activebackground=\"#f9f9f9\")\r\n self.tumorstage.configure(activeforeground=\"black\")\r\n self.tumorstage.configure(background=\"#ffffff\")\r\n self.tumorstage.configure(disabledforeground=\"#a3a3a3\")\r\n self.tumorstage.configure(foreground=\"#000000\")\r\n self.tumorstage.configure(highlightbackground=\"#d9d9d9\")\r\n self.tumorstage.configure(highlightcolor=\"black\")\r\n\r\n\t# -------------------------------------Image browser function----------------------------------------------------------\r\n def browse(self):\r\n img = Image.open(filedialog.askopenfilename())\r\n val = img.size\r\n\t\t# Actual size to resized image percentage calculation\r\n calc_area_percentage = ((477*404)/(val[0]*val[1]))*100\r\n\t\t\r\n\t\t# Resizing the image\r\n resized = img.resize((477,404), Image.ANTIALIAS)\r\n\t\t\r\n\t\t# convert the image to ImageTk format\r\n imgtk = ImageTk.PhotoImage(image=resized)\r\n\t\t\r\n\t\t# storing image in the browselabel\r\n self.browselabel.image = imgtk\r\n self.browselabel.configure(image=imgtk)\r\n \r\n\t\t# calling enhance_img function to enhance the image\r\n self.enhance_img(resized,calc_area_percentage)\r\n sys.stdout.flush()\r\n\r\n\t# -----------------------------------Image enhancement function-----------------------------------------------------------\r\n def enhance_img(self,img,area):\r\n\t\t# convert image to grayscale image\r\n resized_gray = cv2.cvtColor(np.float32(img), cv2.COLOR_BGR2GRAY)\r\n \r\n\t\t# apply thresholding\r\n thresh,img_bin = cv2.threshold(resized_gray,145,255,cv2.THRESH_BINARY)\r\n\r\n\t\t# getting opening, closing, erosion kernel values from the scale bars in the GUI\r\n val1 = self.Scale1.get()\r\n val2 = self.Scale2.get()\r\n val3 = self.Scale3.get()\r\n\t\t\t\r\n\t\t# IMPLEMENTING KERNELS\r\n kernel = np.ones((val1,val1), np.uint8)\r\n kernel2 = np.ones((val2,val2), np.uint8)\r\n \r\n\t\t# APPLYING MORPHOLOGICAL OPERATIONS TO THE IMAGE\r\n opening = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel2)\r\n erosion = cv2.erode(closing,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(val3,val3)))\r\n\t\t\r\n\t\t# CROP IMAGE TO EXTRACT THE TUMOR PART\r\n crop_img = erosion[50:400, 60:477]\r\n\t\t\r\n\t\t# convert the image to ImageTk format\r\n image1 = Image.fromarray(crop_img)\r\n imgtk = ImageTk.PhotoImage(image=image1)\r\n\t\t\r\n\t\t# storing image in the enhanceimage label\r\n self.enhanceimage.image = imgtk\r\n self.enhanceimage.configure(image=imgtk)\r\n\t\t\r\n\t\t# writing the enhanced image to the working directory\r\n cv2.imwrite(\"img.png\", crop_img)\r\n\t\t\r\n\t\t# calling the image segmenting function\r\n self.segment_img(area)\r\n sys.stdout.flush()\r\n\r\n \r\n\t\r\n\t#----------------------------------------Image segmentation function---------------------------------------------------------- \r\n def segment_img(self,area):\r\n thresh = cv2.imread('img.png',0)\r\n\t\t# check if the image is black without any detected tumors in white objects\r\n\t\r\n if np.mean(thresh) == 0:\r\n\t\t\t# convert the image to ImageTk format\r\n image1 = Image.fromarray(thresh)\r\n imgtk = ImageTk.PhotoImage(image=image1)\r\n\t\t\t\r\n\t\t\t# storing image in the segImg label\r\n self.segImg.image = imgtk\r\n self.segImg.configure(image=imgtk)\r\n\t\t\t\r\n\t\t\t# return messages to the following labels\r\n self.diameter.configure(text=\"No Tumor Detected\")\r\n self.area.configure(text=\"No Tumor Detected\")\r\n self.tumorstage.configure(text=\"No Tumor Detected\")\r\n else:\r\n\t\t\t# find contours\r\n contours,hierarchy = cv2.findContours(thresh,2,1) #include another variable before 'contuors' if you get an error saying \"ValueError: not enough values to unpack (expected 3, got 2)\"\r\n\t\t\t# convert image to grayscale image\r\n thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB)\r\n cnt = contours\r\n\t\t\t# draw contours on the image\r\n cv2.drawContours(thresh, cnt, -1, (36, 255, 12), 2)\r\n\r\n arr = [0 for i in cnt]\r\n \r\n\t\t\t# finding the radius of the segmented objects\r\n for i in range (len(cnt)):\r\n (x,y),radius = cv2.minEnclosingCircle(cnt[i])\r\n center = (int(x),int(y))\r\n radius = int(radius)\r\n cv2.circle(thresh,center,radius,(0,255,0),2)\r\n\t\t\t# populating the diameters of the objects to arr\r\n arr[i] = radius*0.26458333\r\n\r\n # convert the image to ImageTk format\r\n image1 = Image.fromarray(thresh)\r\n imgtk = ImageTk.PhotoImage(image=image1)\r\n\t # storing image in the segImg label\r\n self.segImg.image = imgtk\r\n self.segImg.configure(image=imgtk)\t\t\t\r\n\t # sorting the array in descending order\r\n arr.sort(reverse=True)\r\n\t # find the area of the biggest tumor in the image\r\n area_circle = math.pi * (arr[0]*arr[0])\r\n\t # finding the area for the actual sized image\r\n actual_circle_area = area_circle*(100/area)\r\n\t # calculating the diameter from the actual circle area\r\n diameter_result1 = round(math.sqrt((actual_circle_area/math.pi))*2, ndigits=2)\r\n diameter_result2 = \"(Actual image) Diameter of the biggest tumor is \" + str(diameter_result1) + \"mm\"\r\n\t\t\t\r\n\t # storing the diameter in the diameter label\r\n self.diameter.configure(text=diameter_result2)\r\n result = round(actual_circle_area, ndigits=2)\r\n resultant_area = \"(Actual image) Area of the biggest tumor is \" + str(result) + \"mm\"\r\n\t\t\t\r\n\t\t\t# storing the area of the tumor in the area label\r\n self.area.configure(text=resultant_area)\t\r\n\t\t\t# calling the tumor_st function\r\n self.tumor_st(float(diameter_result1/10))\r\n sys.stdout.flush()\r\n\r\n\t# --------------Tumor stage identifying function--------------\r\n def tumor_st(self,diameter):\r\n if diameter == 0:\r\n self.tumorstage.configure(text=\"Tumor Stage T0\")\r\n\r\n elif diameter > 0 and diameter <= 2:\r\n if diameter > 0 and diameter <= 0.1:\r\n self.tumorstage.configure(text=\"Tumor Stage T1mi\")\r\n elif diameter > 0.1 and diameter <= 0.5:\r\n self.tumorstage.configure(text=\"Tumor Stage T1a\")\r\n elif diameter > 0.5 and diameter <= 1:\r\n self.tumorstage.configure(text=\"Tumor Stage T1b\")\r\n else:\r\n self.tumorstage.configure(text=\"Tumor Stage T1c\")\r\n\r\n elif (diameter > 2) and (diameter <= 5):\r\n self.tumorstage.configure(text=\"Tumor Stage T2\")\r\n elif diameter > 5:\r\n self.tumorstage.configure(text=\"Tumor Stage T3\")\r\n else:\r\n self.tumorstage.configure(text=\"Tumor Stage T4\")\r\n sys.stdout.flush()\r\n\r\nif __name__ == '__main__':\r\n vp_start_gui()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Mammogram Project CS314 (S16533)/mam.py","file_name":"mam.py","file_ext":"py","file_size_in_byte":18798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"488439864","text":"import os\nimport time\nimport shutil\nimport torch\nimport torchvision\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport cv2\nimport utils.transforms as tf\nimport numpy as np\nimport models\nfrom models import sync_bn\nimport dataset as ds\nfrom options.options import parser\nimport numpy as np\n\nbest_mIoU = 0\n\n\ndef main():\n global args, best_mIoU\n args = parser.parse_args()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(gpu) for gpu in args.gpus)\n args.gpus = len(args.gpus)\n\n if args.no_partialbn:\n sync_bn.Synchronize.init(args.gpus)\n\n if args.dataset == 'VOCAug' or args.dataset == 'VOC2012' or args.dataset == 'COCO':\n num_class = 21\n ignore_label = 255\n scale_series = [10, 20, 30, 60]\n elif args.dataset == 'Cityscapes':\n num_class = 19\n ignore_label = 255 # 0\n scale_series = [15, 30, 45, 90]\n elif args.dataset == 'ApolloScape':\n num_class = 37 # merge the noise and ignore labels\n ignore_label = 255 # 0\n else:\n raise ValueError('Unknown dataset ' + args.dataset)\n\n model = models.PSPNet(num_class, base_model=args.arch, dropout=args.dropout, partial_bn=not args.no_partialbn)\n input_mean = model.input_mean\n input_std = model.input_std\n policies = model.get_optim_policies()\n model = torch.nn.DataParallel(model, device_ids=range(args.gpus)).cuda()\n\n if args.resume:\n if os.path.isfile(args.resume):\n print((\"=> loading checkpoint '{}'\".format(args.resume)))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_mIoU = checkpoint['best_mIoU']\n torch.nn.Module.load_state_dict(model, checkpoint['state_dict'])\n print((\"=> loaded checkpoint '{}' (epoch {})\".format(args.evaluate, checkpoint['epoch'])))\n else:\n print((\"=> no checkpoint found at '{}'\".format(args.resume)))\n\n\n cudnn.benchmark = True\n cudnn.fastest = True\n\n # Data loading code\n\n test_loader = torch.utils.data.DataLoader(\n getattr(ds, args.dataset.replace(\"ApolloScape\", \"VOCAug\") + 'DataSet')(data_list=args.val_list, transform=[\n torchvision.transforms.Compose([\n tf.GroupRandomScaleRatio(size=(1692, 1692, 505, 505), interpolation=(cv2.INTER_LINEAR, cv2.INTER_NEAREST)),\n tf.GroupNormalize(mean=(input_mean, (0, )), std=(input_std, (1, ))),]), \n torchvision.transforms.Compose([\n tf.GroupRandomScaleRatio(size=(1861, 1861, 556, 556), interpolation=(cv2.INTER_LINEAR, cv2.INTER_NEAREST)),\n tf.GroupNormalize(mean=(input_mean, (0, )), std=(input_std, (1, ))),]), \n torchvision.transforms.Compose([\n tf.GroupRandomScaleRatio(size=(1624, 1624, 485, 485), interpolation=(cv2.INTER_LINEAR, cv2.INTER_NEAREST)),\n tf.GroupNormalize(mean=(input_mean, (0, )), std=(input_std, (1, ))),]),\n torchvision.transforms.Compose([\n tf.GroupRandomScaleRatio(size=(2030, 2030, 606, 606), interpolation=(cv2.INTER_LINEAR, cv2.INTER_NEAREST)),\n tf.GroupNormalize(mean=(input_mean, (0, )), std=(input_std, (1, ))),])\n ]), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)\n\n # define loss function (criterion) optimizer and evaluator\n weights = [1.0 for _ in range(37)]\n weights[0] = 0.05\n weights[36] = 0.05\n class_weights = torch.FloatTensor(weights).cuda()\n criterion = torch.nn.NLLLoss(ignore_index=ignore_label, weight=class_weights).cuda()\n for group in policies:\n print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(group['name'], len(group['params']), group['lr_mult'], group['decay_mult'])))\n optimizer = torch.optim.SGD(policies, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n evaluator = EvalSegmentation(num_class, ignore_label)\n\n ### evaluate ###\n validate(test_loader, model, criterion, 0, evaluator)\n return\n\ndef cal_model_output(model, img_scale_dict, cnt, img_h_list, img_w_list):\n\n input_img = img_scale_dict[str(cnt)]\n # with torch.no_grad():\n input_var = torch.autograd.Variable(input_img, volatile=True) \n input_var_1 = input_var[:, :, :int(args.test_size / 3), :args.test_size]\n input_var_2 = input_var[:, :, :int(args.test_size / 3), (img_w_list[cnt] - args.test_size):]\n input_var_3 = input_var[:, :, (img_h_list[cnt] - int(args.test_size / 3)):, :args.test_size]\n input_var_4 = input_var[:, :, (img_h_list[cnt] - int(args.test_size / 3)):, (img_w_list[cnt] - args.test_size):]\n\n # compute output\n output_1 = model(input_var_1)\n output_2 = model(input_var_2)\n output_3 = model(input_var_3)\n output_4 = model(input_var_4)\n\n # measure accuracy and record loss\n\n pred_1 = output_1.data.cpu().numpy()#.transpose(0, 2, 3, 1)\n pred_2 = output_2.data.cpu().numpy()#.transpose(0, 2, 3, 1)\n pred_3 = output_3.data.cpu().numpy()#.transpose(0, 2, 3, 1)\n pred_4 = output_4.data.cpu().numpy()#.transpose(0, 2, 3, 1)\n\n pred = np.zeros((args.batch_size, 37, img_h_list[cnt], img_w_list[cnt]))\n pred[:, :, :int(args.test_size / 3), :args.test_size] += pred_1\n pred[:, :, :int(args.test_size / 3), (img_w_list[cnt] - args.test_size):] += pred_2\n pred[:, :, (img_h_list[cnt] - int(args.test_size / 3)):, :args.test_size] += pred_3\n pred[:, :, (img_h_list[cnt] - int(args.test_size / 3)):, (img_w_list[cnt] - args.test_size):] += pred_4\n\n return pred\n\n\ndef validate(val_loader, model, criterion, iter, evaluator, logger=None):\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n IoU = AverageMeter()\n mIoU = 0\n val_img_list = []\n img_w_list = [1692, 1861, 1624, 2030] #[1692, 1861, 1624, 1590, 2030]\n img_h_list = [505, 556, 485, 606] #[505, 556, 485, 475, 606]\n #with open('/home/houyuenan/remote/ApolloScapes/list/test_img.txt', 'r') as f:\n # for line in f.readlines():\n # val_img_list.append(line.strip().split(' ')[0])\n # switch to evaluate mode\n model.eval()\n end = time.time()\n \n for i, (input, input_2, input_3, input_4, img_name) in enumerate(val_loader): #, input_5\n # target = target.cuda(async=True)\n img_scale_dict = {'0':input, '1':input_2, '2':input_3, '3':input_4} #, '4':input_5}\n if i == 0:\n freq_mat = np.zeros((img_h_list[0], img_w_list[0]))\n freq_mat[:int(args.test_size / 3), :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat[:int(args.test_size / 3), (img_w_list[0] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat[(img_h_list[0] - int(args.test_size / 3)):, :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat[(img_h_list[0] - int(args.test_size / 3)):, (img_w_list[0] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n\n freq_mat_1 = np.zeros((img_h_list[1], img_w_list[1]))\n freq_mat_1[:int(args.test_size / 3), :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_1[:int(args.test_size / 3), (img_w_list[1] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_1[(img_h_list[1] - int(args.test_size / 3)):, :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_1[(img_h_list[1] - int(args.test_size / 3)):, (img_w_list[1] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n\n freq_mat_2 = np.zeros((img_h_list[2], img_w_list[2]))\n freq_mat_2[:int(args.test_size / 3), :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_2[:int(args.test_size / 3), (img_w_list[2] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_2[(img_h_list[2] - int(args.test_size / 3)):, :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_2[(img_h_list[2] - int(args.test_size / 3)):, (img_w_list[2] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n\n freq_mat_3 = np.zeros((img_h_list[3], img_w_list[3]))\n freq_mat_3[:int(args.test_size / 3), :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_3[:int(args.test_size / 3), (img_w_list[3] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_3[(img_h_list[3] - int(args.test_size / 3)):, :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_3[(img_h_list[3] - int(args.test_size / 3)):, (img_w_list[3] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n\n '''freq_mat_4 = np.zeros((img_h_list[4], img_w_list[4]))\n freq_mat_4[:int(args.test_size / 3), :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_4[:int(args.test_size / 3), (img_w_list[4] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_4[(img_h_list[4] - int(args.test_size / 3)):, :args.test_size] += np.ones((int(args.test_size / 3), args.test_size))\n freq_mat_4[(img_h_list[4] - int(args.test_size / 3)):, (img_w_list[4] - args.test_size):] += np.ones((int(args.test_size / 3), args.test_size))'''\n freq_scale_dict = {'0':freq_mat, '1':freq_mat_1, '2':freq_mat_2, '3':freq_mat_3} #, '4':freq_mat_4}\n pred_final = np.zeros((args.batch_size, 37, img_h_list[0], img_w_list[0]))\n for cnt in range(4):#5\n pred = cal_model_output(model, img_scale_dict, cnt, img_h_list, img_w_list)\n pred = pred / freq_scale_dict[str(cnt)]\n # print(pred.shape)\n if cnt > 0:\n for num in range(args.batch_size):\n pred_copy = cv2.resize(pred[num].transpose(1, 2, 0), dsize=(img_w_list[0], img_h_list[0]), interpolation=cv2.INTER_LINEAR)\n # pred_copy = np.expand_dims(pred_copy, axis=0)\n pred_copy = pred_copy.transpose(2, 0, 1)\n pred_final[num] += pred_copy\n pred = pred_final / 4.0 #5.0\n pred = pred.transpose(0, 2, 3, 1)\n\n pred = np.argmax(pred, axis=3).astype(np.uint8)\n pred = pred + 1\n for cnt in range(len(img_name)):\n np.save('road05_tmp/' + img_name[cnt].split('/')[5].replace('jpg', 'npy'), pred[cnt]) #split('/')[5]\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (i + 1) % args.print_freq == 0:\n print(('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'.format(i, len(val_loader), batch_time=batch_time)))\n\n print('finished, #test:{}'.format(i))\n return mIoU\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = None\n self.avg = None\n self.sum = None\n self.count = None\n\n def update(self, val, n=1):\n if self.val is None:\n self.val = val\n self.sum = val * n\n self.count = n\n self.avg = self.sum / self.count\n else:\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass EvalSegmentation(object):\n def __init__(self, num_class, ignore_label=None):\n self.num_class = num_class\n self.ignore_label = ignore_label\n\n def __call__(self, pred, gt):\n assert (pred.shape == gt.shape)\n gt = gt.flatten().astype(int)\n pred = pred.flatten().astype(int)\n locs = (gt != self.ignore_label)\n sumim = gt + pred * self.num_class\n hs = np.bincount(sumim[locs], minlength=self.num_class**2).reshape(self.num_class, self.num_class)\n return hs\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test_pspnet_multi_scale.py","file_name":"test_pspnet_multi_scale.py","file_ext":"py","file_size_in_byte":12080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"424796277","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ilp - a tag based file indexer\n#\n# Author: slowpoke \n#\n# This program is Free Software under the non-terms\n# of the Anti-License. Do whatever the fuck you want.\n\nimport os\nimport subprocess\n\nfrom distutils.util import strtobool\n\nimport plac\n\nfrom .. import database\nfrom ..utils import file\nfrom ..utils import funcset\nfrom ..utils import check_program\n\nfrom ..index import Index\n\n\nclass ILP:\n\n \"\"\"A tag based file indexer.\"\"\"\n\n commands = (\n \"index\",\n \"forget\",\n \"tag\",\n \"untag\",\n \"deltag\",\n \"list\",\n \"info\",\n \"search\",\n \"clear\",\n \"show\")\n\n def __init__(self):\n home = os.getenv(\"HOME\")\n self._confdir = os.path.join(home, \".ilp\")\n if not file.exists(self._confdir):\n os.mkdir(self._confdir)\n self._dbfile = os.path.join(self._confdir, \"database\")\n self._index = Index()\n\n def __enter__(self):\n self._database = database.ShelveDB(self._dbfile)\n if \"index\" in self._database:\n self._index = self._database.retrieve(\"index\")\n else:\n self._index = Index()\n self._database.store(\"index\", self._index)\n return self\n\n def __exit__(self, etype, exc, tb):\n self._database.update(\"index\", self._index)\n #self._database.close()\n pass\n\n def index(self,\n recursive: (\"recursively add files below a given directory\",\n \"flag\",\n \"r\"),\n *paths: \"files or directories to add to the index\"):\n for path in paths:\n if file.isdir(path):\n if not recursive:\n yield \"Is a directory: {}\".format(path)\n yield \"(To recursively add files to the index, pass -r)\"\n else:\n for subpath in file.walk(path):\n for output in self.index(False, subpath):\n yield output\n continue\n full_path = os.path.abspath(path)\n hashstring = None\n try:\n hashstring = file.hash(full_path)\n except Exception as e:\n yield \"Couldn't read file: {}\".format(full_path, e)\n continue\n self._index = self._index.add_file(full_path, hashstring)\n yield \"Adding to the index: {}\".format(path)\n\n def forget(self,\n recursive: (\"recursively remove files below a given directory\",\n \"flag\",\n \"r\"),\n *paths: \"files or directories to remove from the index\"):\n for path in paths:\n if file.isdir(path):\n if not recursive:\n yield \"Is a directory: {}\".format(path)\n yield \"(To recursively remove files from the index, pass -r)\"\n else:\n for subpath in file.walk(path):\n for output in self.forget(False, subpath):\n yield output\n continue\n full_path = os.path.abspath(path)\n if full_path in self._index.files:\n self._index = self._index.remove_file(full_path)\n yield \"Removing from the index: {}\".format(path)\n else:\n yield \"{} doesn't exist in the index.\".format(path)\n\n def tag(self,\n path: \"file or directory to apply tags to\",\n recursive: (\"recursively add tags to all files below a given directory\",\n \"flag\",\n \"r\"),\n *tags: \"tags to add to the file or directory\"):\n full_path = os.path.abspath(path)\n if file.isdir(full_path):\n if recursive:\n for subpath in file.walk(path):\n for output in self.tag(subpath, False, *tags):\n yield output\n else:\n yield \"Is a directory: {}\".format(path)\n yield \"(To recursively tag files in a directory, pass -r)\"\n else:\n for tag_name in tags:\n if not tag_name in self._index.tags:\n self._index = self._index.add_tag(tag_name)\n yield \"New tag added: {}\".format(tag_name)\n if not self._index.file_has_tag(full_path, tag_name):\n self._index = self._index.tag_file(full_path, tag_name)\n yield \"{} → {}\".format(tag_name, path)\n else:\n yield \"{} is already tagging {}\".format(tag_name, path)\n\n def untag(self,\n path: \"file or directory to remove tags from\",\n recursive: (\"recursively remove tags from all files below a given directory\",\n \"flag\",\n \"r\"),\n *tags: \"tags to remove from the file or directory\"):\n full_path = os.path.abspath(path)\n if file.isdir(full_path):\n if recursive:\n for subpath in file.walk(path):\n for output in self.untag(subpath, False, *tags):\n yield output\n else:\n yield \"Is a directory: {}\".format(path)\n yield \"(To recursively untag files in a directory, pass -r)\"\n else:\n for tag_name in tags:\n if not tag_name in self._index.tags:\n yield \"Tag doesn't exist: {}\".format(tag_name)\n if self._index.file_has_tag(full_path, tag_name):\n self._index = self._index.untag_file(full_path, tag_name)\n yield \"Removed {} from {}\".format(tag_name, path)\n else:\n yield \"{} isn't tagging {}\".format(tag_name, path)\n\n def deltag(self,\n *tag_names: \"tags to delete\"):\n for tag_name in tag_names:\n if not tag_name in self._index.tags:\n yield \"Tag not in the index: {}\".format(tag_name)\n else:\n self._index = self._index.remove_tag(tag_name)\n yield \"Deleted {}\".format(tag_name)\n\n def list(self,\n what: (\"items to list\",\n \"positional\",\n None,\n None,\n (\"files\", \"tags\"),\n None)):\n if what == \"files\":\n for path in self._index.files:\n yield path\n elif what == \"tags\":\n for tag_name in self._index.tags:\n yield tag_name\n else:\n yield \"Can't list {}.\".format(what)\n\n def info(self,\n item: \"file or tag to get information about\",\n tag: (\"get information on tags\",\n \"flag\",\n \"t\")):\n # for better code readability and less accidental collisions\n is_tag = tag\n # get all tags tagging the given path\n if not is_tag:\n path = item\n full_path = os.path.abspath(path)\n if not full_path in self._index.files:\n yield \"File not in the index: {}\".format(path)\n else:\n for tag_name in self._index.tags_of_file(full_path):\n yield(tag_name)\n # get all paths this tag is tagging\n else:\n tag_name = item\n if not tag_name in self._index.tags:\n yield \"Tag not in the index: {}\".format(tag_name)\n else:\n tag = self._index.tags.get(tag_name)\n for hashstring in tag:\n for path in self._index.hashes.get(hashstring):\n yield(path)\n\n def search(self,\n *query: \"the search query (see ILP(1) for info about syntax)\"):\n try:\n result = self._build_search_result(\n funcset(self._index.hashes), \"and\", query)\n except KeyError as e:\n yield e.args[0]\n return\n for hashstring in result:\n try:\n for path in self._index.hashes.get(hashstring):\n yield path\n except KeyError:\n yield \"Unknown hash: {}\".format(hashstring)\n\n def _build_search_result(self, result, operator, tail):\n if len(tail) == 0:\n return result\n else:\n item, tail = tail[0], tail[1:]\n if item in (\"and\", \"or\", \"not\", \"xor\"):\n operator = item\n return self._build_search_result(result, operator, tail)\n else:\n if item not in self._index.tags:\n raise KeyError(\"Tag not in index: {}\".format(item))\n operation = {\n \"or\": result.__or__,\n \"and\": result.__and__,\n \"not\": result.__sub__,\n \"xor\": result.__xor__}[operator]\n tag = self._index.tags.get(item)\n result = operation(tag)\n return self._build_search_result(result, operator, tail)\n\n def clear(self,\n yes: (\"Confirm deletion\",\n \"flag\",\n \"y\")):\n \"\"\"Clear the database.\"\"\"\n if not yes:\n yield \"This command purges the database.\"\n yield \"You will LOSE ALL DATA!\"\n yield \"If you really want this, pass --yes/-y to this command.\"\n else:\n yield \"Purging the database.\"\n self._index = Index()\n\n def show(self,\n path: \"the file to display\"):\n \"\"\"Try to open and display a file using xdg-open.\"\"\"\n if not check_program(\"xdg-open\"):\n yield \"xdg-open is not available, is xdg-utils installed?\"\n else:\n subprocess.call([\"xdg-open\", path])\n\n\ndef main():\n plac.Interpreter.call(ILP, prompt=\"ilp> \")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ilp/cmdline/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"110569621","text":"from scipy import stats\nimport matplotlib.pyplot as plt\n\nx1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)\nkde1 = stats.gaussian_kde(x1)\nkde2 = stats.gaussian_kde(x1, bw_method='silverman')\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.plot(x1, np.zeros(x1.shape), 'b+', ms=20) # rug plot\nx_eval = np.linspace(-10, 10, num=200)\nax.plot(x_eval, kde1(x_eval), 'k-', label=\"Scott's Rule\")\nax.plot(x_eval, kde2(x_eval), 'r-', label=\"Silverman's Rule\")\n\nplt.show()\n","sub_path":"doc/scipy-html-1.1.0/tutorial/stats-1.py","file_name":"stats-1.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"240722714","text":"import json\nimport os\nimport sys\nimport git\nimport requests\nfrom pathlib import Path\n\ndef main_scraper():\n out_path = \"/data/s1/haritz/scraped\"\n with open('links/links-between-papers-and-code.json') as f:\n data = json.load(f)\n for i in range(len(data)):\n url = data[i]['repo_url']\n aid = data[i]['paper_arxiv_id']\n yymm = '1902'\n if aid is not None and aid.startswith(yymm):\n path = os.path.join(out_path, \"repos\", aid)\n if not os.path.isdir(path):\n print(f\"Cloning {url} into {path}\")\n try:\n git.Repo.clone_from(url, path)\n except git.exc.GitError:\n print(f'ERROR! {url} does not exist')\n else:\n print(f\"Folder with repo {url} already exists. Skipping.\")\n tex_path = os.path.join(out_path, f\"{yymm} pwc\", aid)\n if not os.path.isdir(tex_path):\n os.makedirs(tex_path)\n print(f\"Downloading {aid} from arXiv\")\n a_url = f\"https://arxiv.org/e-print/{aid}\"\n r = requests.get(a_url, allow_redirects=True)\n open(f'{tex_path}/{aid}', 'wb').write(r.content)\n else:\n print(f\"Folder with paper {aid} already exists. Skipping.\")\n\nif __name__==\"__main__\":\n main_scraper()\n","sub_path":"scraper_standalone.py","file_name":"scraper_standalone.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"602176588","text":"\"\"\"\nMethods for downloading movie data from http://omdbapi.com.\n\"\"\"\n\nimport json\n\n# Python 3\n# import http.client\n# from urllib.parse import quote\n\n# Python 2.7\nimport httplib\nfrom urllib import quote\n\n\ndef get_movie_data(title):\n\n \"\"\"Downloads movie data from imdbapi.com.\n Returns a dictionary with the movie's data.\"\"\"\n\n # Create a connection object that will talk to the server.\n server = \"www.omdbapi.com\"\n\n # connection = http.client.HTTPConnection(server) # Python 3\n connection = httplib.HTTPConnection(server) # Python 2.7\n\n # Quote the string to make it safe for use in a URL\n url_encoded_title = quote(title)\n\n # Dictionary with the request parameters.\n query_parameters = {\n 'r': 'JSON', # Asking for in result to me in JSON format\n 't': url_encoded_title\n }\n\n # Use list comprehension to build a list the key values pairs as strings: ['k0=v0', 'k1=v1', 'k2=v2']\n query_items = [k + \"=\" + v for k, v in query_parameters.items()]\n\n # Start a query string and join all the query items with '&' between item.\n query = \"?\" + \"&\".join(query_items)\n\n # Configure the request with the HTTP command and the url.\n path = \"/\"\n url = path + query\n connection.request(\"GET\", url)\n\n # Make the call to the server\n response = connection.getresponse()\n\n raw_bytes = bytes()\n movie_info = None\n\n # Read the response data only if the status is good.\n if response.status == 200:\n\n # Read the bytes from the response convert it to a unicode string.\n raw_bytes = response.read()\n data_string = raw_bytes.decode(\"utf-8\")\n\n # The data_string is a string in JSON format.\n # Convert the string to a dictionary using the loads method from the json module.\n movie_info = json.loads(data_string)\n\n else:\n print(\"Error. Status: {0} Message: {1}\".format(response.status, response.msg))\n\n return movie_info\n\n\nif __name__ == \"__main__\":\n\n data = get_movie_data(\"The Matrix\")\n\n for key, value in data.items():\n print(\"{0}: {1}\".format(key, value))\n\n","sub_path":"12_MovieAppOnAppEngine/movie_app/movie_client.py","file_name":"movie_client.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"217998996","text":"\"\"\"\nGiven a non-negative integer n, count all numbers with unique digits, x, where 0 ≤ x < 10n.\n\nExample:\n\nInput: 2\nOutput: 91 \nExplanation: The answer should be the total numbers in the range of 0 ≤ x < 100, \n excluding 11,22,33,44,55,66,77,88,99\n\"\"\"\n# time complexity -- 0(N)\n# space complexity -- O(1)\n# Runtime: 32 ms, faster than 88.89% of Python3 online submissions for Count Numbers with Unique Digits.\n# Memory Usage: 13.9 MB, less than 50.00% of Python3 online submissions for Count Numbers with Unique Digits.\n\n\n\nclass Solution:\n def countNumbersWithUniqueDigits(self, n: int) -> int:\n if n == 0:\n return 1\n if n == 1:\n return 10\n last_digit = 10\n curr_digit = 9 * 9 \n cnt = last_digit + curr_digit\n for i in range(3, n+1):\n last_digit = curr_digit\n curr_digit = last_digit * (9-i+2)\n cnt += curr_digit\n return cnt\n \n \n\n\n","sub_path":"Widen/LC357_Count_Numbers_with_Unique_Digits.py","file_name":"LC357_Count_Numbers_with_Unique_Digits.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"53245832","text":"import time\n\nepoch_origin = time.time()\nprint(\"\\tTime since epoch_origin: {}\".format(epoch_origin))\nprint(\"Local time from epoch_origin: {}\".format(time.ctime(epoch_origin)))\n\nst_local = time.localtime(epoch_origin)\nepoch_regenerate = time.mktime(st_local)\nprint(\"\\tTime since epoch_regenerate, with seconds resolution: {}\".format(epoch_regenerate))\n\nprint(\"Local datetime with direct access: {}/{}/{} {}:{}:{}\".format(\n st_local.tm_year,\n st_local.tm_mon,\n st_local.tm_mday,\n st_local.tm_hour,\n st_local.tm_min,\n st_local.tm_sec))\n\nst_utc = time.gmtime(epoch_origin)\nprint(\"UTC datetime with direct access: {}/{}/{} {}:{}:{}\".format(\n st_utc.tm_year,\n st_utc.tm_mon,\n st_utc.tm_mday,\n st_utc.tm_hour,\n st_utc.tm_min,\n st_utc.tm_sec))\n\nfmt = \"It's %A, %B %d, %Y, local time %I:%M:%S%p\"\nt = time.localtime(time.time())\nprint(time.strftime(fmt, t))\n\nfmt2 = \"%Y-%m-%d\"\nst_parsed = time.strptime(\"2019-03-21\", fmt2)\nprint(\"Parsed struct_time with direct access: {}/{}/{}\".format(\n st_parsed.tm_year,\n st_parsed.tm_mon,\n st_parsed.tm_mday))\n","sub_path":"10system/py_time.py","file_name":"py_time.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"230942873","text":"inputfile = open(\"cholera_DNA.txt\", \"r\")\ndna = inputfile.read().upper()\n# print(dna)\n\nlength = len(dna)\nprint(\"length of dna:\", length)\n\n\ndef PatternCount( patt, text ):\n i = 0\n count = 0\n n = len(patt)\n while i < len(text)+1-n:\n if text[i:i+n] == patt:\n count += 1\n i += 1\n return count\npattern = \"TGATCA\"\nprint( \"Searching for pattern\", pattern )\nres = PatternCount( pattern, dna )\nprint( \"... found\",res,\"times\" )\n\n\ndef CountDict(Text, k):\n count = {}\n for i in range(len(Text)-k+1):\n Pattern = Text[i:i+k]\n count[i] = PatternCount(Pattern, Text)\n return count\n\n\ndef FrequentWords(Text, k):\n FrequentPatterns = []\n Count = CountDict(Text, k)\n m = max(Count.values())\n for i in Count:\n if Count[i] == m:\n FrequentPatterns.append(Text[i:i+k])\n return FrequentPatterns\n\ndef RemoveDuplicates(Items):\n ItemsNoDuplicates = [] # output variable\n ItemsNoDuplicates = list(set(Items))\n return ItemsNoDuplicates\n\n","sub_path":"toy examples/dna/cholera.py","file_name":"cholera.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"590596596","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Stolen from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow\nclass DeepQNetwork(object):\n def __init__(\n self,\n n_actions,\n n_features,\n learning_rate=0.01,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=300,\n memory_size=500,\n batch_size=32,\n e_greedy_increment=None,\n output_graph=False,\n ):\n self.n_actions = n_actions\n self.n_features = n_features\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon_max = e_greedy\n self.replace_target_iter = replace_target_iter\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.epsilon_increment = e_greedy_increment\n self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max\n\n # total learning step\n self.learn_step_counter = 0\n\n # initialize zero memory [s, a, r, s_]\n self.memory = np.zeros((self.memory_size, n_features * 2 + 2))\n\n # consist of [target_net, evaluate_net]\n self._build_net()\n\n t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')\n e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net')\n\n\n with tf.variable_scope('soft_replacement'):\n self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n self.sess = tf.Session()\n\n if output_graph:\n # $ tensorboard --logdir=logs\n tf.summary.FileWriter(\"logs/\", self.sess.graph)\n\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.cost_his = []\n self.reward = []\n self.memory_counter = 0\n\n def _build_net(self):\n # ------------------ all inputs ------------------------\n self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input State\n self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input Next State\n self.r = tf.placeholder(tf.float32, [None, ], name='r') # input Reward\n self.a = tf.placeholder(tf.int32, [None, ], name='a') # input Action\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n w_initializer, b_initializer = tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)\n\n # ------------------ build evaluate_net ------------------\n with tf.variable_scope('eval_net'):\n e_z1 = tf.layers.dense(self.s, 6, activation=None, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='e1')\n e_bn1 = tf.layers.batch_normalization(e_z1, training=True)\n e_a1 = tf.nn.relu(e_bn1)\n ### output layer\n self.q_eval = tf.layers.dense(e_a1, self.n_actions, activation=tf.nn.relu, kernel_initializer=w_initializer,\n bias_initializer=b_initializer, name='q')\n\n # ------------------ build target_net ------------------\n\n with tf.variable_scope('target_net'):\n t_z1 = tf.layers.dense(self.s_, 6, activation=None, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='t1')\n t_bn1 = tf.layers.batch_normalization(t_z1, training=True)\n t_a1 = tf.nn.relu(t_bn1)\n ### output layer\n self.q_next = tf.layers.dense(t_a1, self.n_actions, activation=tf.nn.relu, kernel_initializer=w_initializer,\n bias_initializer=b_initializer, name='t2')\n\n with tf.variable_scope('q_target'):\n q_target = self.r + self.gamma * tf.reduce_max(self.q_next, axis=1, name='Qmax_s_') # shape=(None, )\n self.q_target = tf.stop_gradient(q_target)\n with tf.variable_scope('q_eval'):\n a_indices = tf.stack([tf.range(tf.shape(self.a)[0], dtype=tf.int32), self.a], axis=1)\n self.q_eval_wrt_a = tf.gather_nd(params=self.q_eval, indices=a_indices) # shape=(None, )\n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval_wrt_a, name='TD_error'))\n with tf.variable_scope('train'):\n with tf.control_dependencies(update_ops):\n self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n\n def store_transition(self, s, a, r, s_):\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n\n self.reward.append(r)\n # transform a and r into 1D array\n transition = np.hstack((s, [a], [r], s_))\n # replace the old memory with new memory\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def choose_action(self, observation, disabled_actions):\n # to have batch dimension when feed into tf placeholder\n observation = observation[np.newaxis, :]\n\n if np.random.uniform() < self.epsilon:\n # forward feed the observation and get q value for every actions\n actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})\n\n # assign to 0 since 0 is the min of relu\n for i in range(0, len(disabled_actions)):\n actions_value[0][disabled_actions[i]] = 0 # may have to change\n\n action = np.argmax(actions_value)\n else:\n action = np.random.randint(0, self.n_actions)\n return action\n\n def learn(self):\n # check to replace target parameters\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.target_replace_op)\n print('\\ntarget_params_replaced\\n')\n\n # sample batch memory from all memory\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n\n batch_memory = self.memory[sample_index, :]\n\n _, cost = self.sess.run(\n [self._train_op, self.loss],\n feed_dict={\n self.s: batch_memory[:, :self.n_features],\n self.a: batch_memory[:, self.n_features],\n self.r: batch_memory[:, self.n_features + 1],\n self.s_: batch_memory[:, -self.n_features:],\n })\n\n self.cost_his.append(cost)\n\n # increasing epsilon\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learn_step_counter += 1\n\n def plot_reward(self, path, save):\n plt.plot(np.arange(len(self.reward)), self.reward)\n plt.ylabel('Reward')\n plt.xlabel('training steps')\n if save:\n plt.savefig(path + '/reward.png')\n plt.show()\n\n def plot_cost(self, path, save):\n plt.plot(np.arange(len(self.cost_his)), self.cost_his)\n plt.ylabel('Cost')\n plt.xlabel('training steps')\n if save:\n plt.savefig(path + '/cost.png')\n plt.show()\n\n def save_model(self, path, count):\n self.saver.save(self.sess, path + '/model.pkl', count)\n\n def load_model(self, path):\n ckpt = tf.train.get_checkpoint_state(path)\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n return int(ckpt.model_checkpoint_path.split('-')[-1])\n","sub_path":"algorithms/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":7592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"127693984","text":"import sys\ninput = sys.stdin.readline\n\n\ndef check(m):\n hp = m\n atk = H_atk\n\n for t, a, h in rooms:\n if t == 1:\n if (h-1)//atk > (hp-1)//a:\n return False\n else:\n hp -= (h-1)//atk * a\n else:\n atk += a\n hp = min(hp + h, m)\n\n return True\n \n\nn, H_atk = map(int, input().split())\n\nrooms = [list(map(int, input().split())) for x in range(n)]\n\nleft = 1\nright = 999999000001 * n\n\nwhile left <= right:\n mid = (left + right)//2\n\n if check(mid):\n right = mid - 1\n else:\n left = mid + 1\n\nprint(left)\n","sub_path":"Hangil/day07_16434_choi.py","file_name":"day07_16434_choi.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"416140499","text":"from django.core import exceptions\nfrom django.http import FileResponse\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom shared.audit_log.viewsets import AuditLoggingModelViewSet\n\nfrom applications.api.v1.permissions import (\n ALLOWED_APPLICATION_UPDATE_STATUSES,\n ALLOWED_APPLICATION_VIEW_STATUSES,\n ApplicationPermission,\n get_user_company,\n StaffPermission,\n SummerVoucherPermission,\n)\nfrom applications.api.v1.serializers import (\n ApplicationSerializer,\n AttachmentSerializer,\n SummerVoucherSerializer,\n)\nfrom applications.enums import ApplicationStatus\nfrom applications.models import Application, SummerVoucher\n\n\nclass ApplicationViewSet(AuditLoggingModelViewSet):\n queryset = Application.objects.all()\n serializer_class = ApplicationSerializer\n permission_classes = [IsAuthenticated, ApplicationPermission]\n\n def get_queryset(self):\n \"\"\"\n Fetch all DRAFT status applications of the user & company.\n Should inlcude only 1 application since we don't allow creation of multiple\n DRAFT applications per user & company.\n \"\"\"\n queryset = (\n super()\n .get_queryset()\n .select_related(\"company\")\n .prefetch_related(\"summer_vouchers\")\n )\n\n user = self.request.user\n if user.is_anonymous:\n return queryset.none()\n\n user_company = get_user_company(self.request)\n\n return queryset.filter(\n company=user_company,\n user=user,\n status__in=ALLOWED_APPLICATION_VIEW_STATUSES,\n )\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n Allow only 1 (DRAFT) application per user & company.\n \"\"\"\n if self.get_queryset().filter(status=ApplicationStatus.DRAFT).exists():\n raise ValidationError(\"Company & user can have only one draft application\")\n return super().create(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n \"\"\"\n Allow to update only DRAFT status applications.\n \"\"\"\n instance = self.get_object()\n if instance.status not in ALLOWED_APPLICATION_UPDATE_STATUSES:\n raise ValidationError(\"Only DRAFT applications can be updated\")\n return super().update(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\nclass SummerVoucherViewSet(AuditLoggingModelViewSet):\n queryset = SummerVoucher.objects.all()\n serializer_class = SummerVoucherSerializer\n permission_classes = [IsAuthenticated, StaffPermission | SummerVoucherPermission]\n\n def get_queryset(self):\n \"\"\"\n Fetch summer vouchers of DRAFT status applications of the user & company.\n \"\"\"\n queryset = (\n super()\n .get_queryset()\n .select_related(\"application\")\n .prefetch_related(\"attachments\")\n )\n\n user = self.request.user\n if user.is_staff:\n return queryset\n elif user.is_anonymous:\n return queryset.none()\n\n user_company = get_user_company(self.request)\n\n return queryset.filter(\n application__company=user_company,\n application__user=user,\n application__status__in=ALLOWED_APPLICATION_VIEW_STATUSES,\n )\n\n def create(self, request, *args, **kwargs):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def update(self, request, *args, **kwargs):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def retrieve(self, request, *args, **kwargs):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def list(self, request, *args, **kwargs):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def destroy(self, request, *args, **kwargs):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n @action(\n methods=(\"POST\",),\n detail=True,\n url_path=\"attachments\",\n parser_classes=(MultiPartParser,),\n )\n def post_attachment(self, request, *args, **kwargs):\n \"\"\"\n Upload a single file as attachment\n \"\"\"\n obj = self.get_object()\n\n if obj.application.status not in ALLOWED_APPLICATION_UPDATE_STATUSES:\n raise ValidationError(\n \"Attachments can be uploaded only for DRAFT applications\"\n )\n\n # Validate request data\n serializer = AttachmentSerializer(\n data={\n \"summer_voucher\": obj.id,\n \"attachment_file\": request.data[\"attachment_file\"],\n \"content_type\": request.data[\"attachment_file\"].content_type,\n \"attachment_type\": request.data[\"attachment_type\"],\n }\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n @action(\n methods=(\n \"GET\",\n \"DELETE\",\n ),\n detail=True,\n url_path=\"attachments/(?P[^/.]+)\",\n )\n def handle_attachment(self, request, attachment_pk, *args, **kwargs):\n obj = self.get_object()\n\n if request.method == \"GET\":\n \"\"\"\n Read a single attachment as file\n \"\"\"\n attachment = obj.attachments.filter(pk=attachment_pk).first()\n if not attachment or not attachment.attachment_file:\n return Response(\n {\n \"detail\": format_lazy(\n _(\"File not found.\"),\n )\n },\n status=status.HTTP_404_NOT_FOUND,\n )\n return FileResponse(attachment.attachment_file)\n\n elif request.method == \"DELETE\":\n \"\"\"\n Delete a single attachment as file\n \"\"\"\n if obj.application.status not in ALLOWED_APPLICATION_UPDATE_STATUSES:\n raise ValidationError(\n \"Attachments can be deleted only for DRAFT applications\"\n )\n\n if (\n obj.application.status\n not in AttachmentSerializer.ATTACHMENT_MODIFICATION_ALLOWED_STATUSES\n ):\n return Response(\n {\"detail\": _(\"Operation not allowed for this application status.\")},\n status=status.HTTP_403_FORBIDDEN,\n )\n try:\n instance = obj.attachments.get(id=attachment_pk)\n except exceptions.ObjectDoesNotExist:\n return Response(\n {\"detail\": _(\"File not found.\")}, status=status.HTTP_404_NOT_FOUND\n )\n instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"backend/kesaseteli/applications/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"187040625","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Matplotlib Example Program 1: Plotting Data Series.\"\"\"\n\nimport matplotlib.pyplot as plt\n\n\n# Fixed Data.\nxvals = [1, 2, 3, 4, 5]\nyvals = [1, 2, 1, 2, 1]\n\nplt.plot(xvals, yvals)\nplt.title(\"Series 1\")\nplt.show()\n\n# Series Data using Range().\nxvals = [val for val in range(10)]\nyvals = [x*2 for x in xvals]\nplt.plot(xvals, yvals, 'bo-')\nplt.title(\"Series 2\")\nplt.xlabel(\"* Note that range(10) returns values from 0 to 9.\")\nplt.show()\n","sub_path":"02-matplotlib/01_series.py","file_name":"01_series.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"149015657","text":"import csv\nimport pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\ndef write_data(file_name,row):\n with open(file_name, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"Student\",\"Maths\", \"Computers\",\"Physics\",\"Chemistry\"]) #Takes the Subjest names and saves in first row \n for i in range(1,row+1):\n student=input(\"Enter the name of the \"+str(i)+\" student \")\n maths=int(input(\"enter the maths marks of \"+student+\" \"))\n computers=int(input(\"enter the computers marks of \"+student+\" \"))\n physics=int(input(\"enter the physics marks of \"+student+\" \"))\n chemistry=int(input(\"enter the chemistry marks of \"+student+\" \"))\n writer.writerows([[student,maths,computers,physics,chemistry]])\n\n\n\ndef display_data(file_name,n):\n df=pd.read_csv(file_name) #python -m pip install pandas # raeding .csv file from pandas\n print(df) #printing the file\n # code for bargraph\n names = df['Student'].values\n #print(names)\n x = np.arange(len(names)) #same as [0,1,2_,_,_] for x axis from which bar starts\n w = 0.2\n bar1=[i+w for i in x]\n bar2=[i+w for i in bar1]\n bar3=[i+w for i in bar2]\n plt.bar(x, df['Maths'].values, width=w, label='Maths') #syntx plt.bar(x,height,width,bottom,align,data,**keywords)\n plt.bar(bar1, df['Computers'].values, width=w, label='Computers')\n plt.bar(bar2, df['Physics'].values, width=w, label='Physics')\n plt.bar(bar3, df['Chemistry'].values, width=w, label='Chemistry')\n plt.xticks(x+w+0.1, names)\n #plt.ylim([0,100]) # limits y axis upto 100 only\n plt.xlabel('Students')\n plt.ylabel('Marks')\n plt.legend(bbox_to_anchor =(0.75, 1.15),ncol=4)\n figure=n+\".png\" # to convert filename into image format\n plt.savefig(figure, bbox_inches=\"tight\") # to save the image of result bar graph\n plt.show() #To show the bar graph\n\n\ndef main():\n n=input(\"enter the file name to save data in .csv format \")\n file_name=n+\".csv\" # convert file name in csv format\n row=int(input(\"enter how many student data you want to enter \")) #how many students data to enter\n write_data(file_name,row)\n display_data(file_name,n)\n \n\n\nif __name__ == \"__main__\":\n main()","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"334980998","text":"import sys\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nfrom PyQt5.QtWidgets import QApplication, QWidget,QFileDialog\r\n\r\n\r\nclass Canvas(FigureCanvas):\r\n\r\n def __init__(self, parent):\r\n fig, self.ax = plt.subplots(figsize=(5, 4), dpi=200)\r\n super().__init__(fig)\r\n self.setParent(parent)\r\n \r\n filename= QFileDialog.getOpenFileName(None, \"Выберите текст...\",\r\n 'C:/', filter=\"All files (*)\")[0]\r\n self.show()\r\n print(filename)\r\n with open(filename,'r',encoding = 'utf-8') as txt_file:\r\n data=txt_file.read().replace('\\n','')\r\n data =data.lower()\r\n print(data)\r\n \r\n letters=list('абвгдеёжзийклмнопрстуфхцчшщъыьэюя')\r\n hist=[]\r\n for letter in letters:\r\n hist.append((data.count(letter)))\r\n \r\n print(hist)\r\n \r\n \r\n \r\n \r\n plt.bar(np.arange(len(hist)),hist)\r\n \r\n\r\n plt.plot()\r\n print(len(hist))\r\n \r\nclass AppDemo(QWidget) :\r\n def __init__(self):\r\n super().__init__()\r\n self.resize(1600, 800)\r\n\r\n chart = Canvas(self)\r\n\r\napp = QApplication(sys.argv) \r\ndemo = AppDemo()\r\ndemo.show()\r\nsys.exit(app.exec_())\r\n \r\n","sub_path":"09_pract_pyqt/pyp2.py","file_name":"pyp2.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"640971716","text":"import requests, json, os, sys, getpass\nimport pprint\nfrom .cmdutil import cmdutil\nfrom .cog import cog\nfrom .llog import llog\nimport time\nimport logging\n\n\nErrcode='blank'\n\nInitEElog = True\n\nDebug = True\n#Debug = False\n\ndef dprint(obj):\n if (Debug):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(obj)\n\nclass EarthExplorer(object):\n\n def __init__(self, version='1.4.1'):\n global InitEElog\n self.baseurl = 'https://earthexplorer.usgs.gov/inventory/json/v/%s/' % version\n nameV = 'ee'\n self.logger = logging.getLogger(nameV)\n self.logger.setLevel(logging.DEBUG)\n if InitEElog:\n logfile = '/data/log/' + nameV + '.log'\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n InitEElog=False\n\n #self.logger.debug(\"Logging INIT Earth Explorer\")\n #self.logger.debug(\"Logging INIT Earth Explorer Twice\")\n\n\n\n def _api(self, endpoint='login', body=None):\n global Errcode\n #self.logger.debug(endpoint)\n body = {'jsonRequest': json.dumps(body)} if body else {}\n for retry in range(10):\n r = requests.post(self.baseurl+endpoint, data=body)\n r.raise_for_status()\n dat = r.json()\n if dat.get('error'):\n sys.stderr.write(': '.join([dat.get('errorCode'), dat.get('error')]))\n Errcode = ': '.join([dat.get('errorCode'), dat.get('error')])\n self.logger.error(Errcode)\n self.logger.error(\"Retry %s %s\", retry, endpoint)\n else:\n break\n return dat\n\n @classmethod\n def login(cls, username, password=None):\n if password is None:\n #password = getpass.getpass('Password (%s): ' % username)\n password = geters('/data/home/.ers')\n payload = {'username': username, 'password': password}\n return cls()._api('login', payload).get('data')\n\n @classmethod\n def search(cls, **kwargs):\n return cls()._api('search', kwargs).get('data')\n\n @classmethod\n def download(cls, **kwargs):\n return cls()._api('download', kwargs).get('data')\n\n def grid2ll(self, path, row):\n\n ee_request = '{\"gridType\":\"WRS2\",\"responseShape\":\"point\",\"path\":\"%s\",\"row\":\"%s\"}' % (path,row)\n dprint (ee_request)\n\n body = 'jsonRequest=%s' % ee_request\n url = self.baseurl+'grid2ll?'+body\n dprint(url)\n r = requests.post(url)\n r.raise_for_status()\n dat = r.json()\n if dat.get('error'):\n sys.stderr.write(': '.join([dat.get('errorCode'), dat.get('error')]))\n return dat\n\n def set_bucket(self, bucket):\n self.bucket = bucket;\n\n\n\nclass DogFetch(object):\n \"\"\" DogFetch class for higher level earth explorer searches \n such as Path, Row, Dataset \"\"\"\n\n def __init__(self, dogName):\n\n self.cogBucket=get_cog_bucket()\n self.redis=get_redis()\n self.data = '/data/tar/'\n cmd1='mkdir -p /data/tar'\n cmd2='mkdir -p /data/log'\n os.system(cmd1)\n os.system(cmd2)\n self.log = llog.Llog(dogName)\n\n def set_defaults(self, data_dir):\n pass\n\n def make_dir_name(self, product):\n dprint(product)\n a = product.split(\"_\")\n pathrow = a[2]\n path = pathrow[0:3]\n row = pathrow[3:7]\n flavor = a[0]\n dir = 'cog/' + path + '/' + row + '/' + flavor + '/'\n dprint(dir)\n return(dir)\n\n def hunt(self, path, row, product):\n\n self.log.logi (\"%s -- PATH %s ROW %s\" % (product, path,row))\n self.gamelist = []\n \n ee=EarthExplorer()\n\n jdata = ee.grid2ll(path, row)\n dprint(jdata)\n\n coord = jdata['data']['coordinates']\n dprint(coord)\n\n c0 = coord[0]\n ll_lat = c0['latitude']\n ll_long = c0['longitude']\n\n\t# just copy these for other corner - cheat\n\n ur_lat = ll_lat\n ur_long = ll_long\n\n spatial = { \"filterType\": \"mbr\",\n \"lowerLeft\": {\n \"latitude\": ll_lat,\n \"longitude\": ll_long\n },\n \"upperRight\": {\n \"latitude\": ur_lat,\n \"longitude\": ll_long\n }\n }\n \n #spatial = '''{ \"filterType\": \"mbr\", \"lowerLeft\": { \"latitude\": %s, \"longitude\": %s }, \"upperRight\": { \"latitude\": %s, \"longitude\": %s } }''' % (ll_lat, ll_long, ur_lat, ur_long)\n\n dprint(spatial)\n\n rcnt=1000\n api_key = EarthExplorer.login(username='tonybutzer')\n scenes = EarthExplorer.search(apiKey=api_key, datasetName=product, spatialFilter=spatial, maxResults=rcnt)\n\n for item in scenes['results']:\n tup = (item['entityId'], item['displayId'])\n self.gamelist.append(tup)\n\n return(self.gamelist)\n\n def get_products(self):\n prod_list = ['LANDSAT_8_C1','LANDSAT_ETM_C1','LANDSAT_TM_C1']\n return(prod_list)\n\n def logi(self, obj):\n self.log.logi(obj)\n\n def fetch(self, entityId, productId, dataset):\n \"\"\" fetch - gets a tar file from eart explorer to /data/tar dir \"\"\"\n global Errcode\n self.log.logi(\"fetch: %s %s %s\" % (entityId, productId, dataset))\n\n ee = EarthExplorer()\n time.sleep(1)\n api_key = ee.login(username='tonybutzer')\n\n junkjson = ee.download(apiKey=api_key, datasetName=dataset, products='STANDARD', entityIds=entityId)\n\n if junkjson == None:\n self.log.logi(Errcode)\n url = Errcode\n else:\n self.log.logd(junkjson[0]['url'])\n url = junkjson[0]['url']\n url = '\\\"' + url + '\\\"'\n\n #self.log.logi(url)\n\n myout = self.data + productId + '.tar.gz'\n cmd = 'wget -q ' + url + ' -O ' + myout\n self.log.logd(cmd)\n os.system(cmd)\n\n def chew(self, bucket, productId):\n \"\"\" chew - untars the file and syncs the fiels to a destination bucket \"\"\"\n dir = self.make_dir_name(product=productId)\n self.log.logi(\"chew: %s %s %s\" % (bucket, dir, productId))\n\n topdir = '/data/' + productId + '/'\n tmpdir = topdir + dir\n\n print(cmdutil.__file__)\n cmdutil.mkdir(tmpdir)\n os.chdir(tmpdir)\n myin = self.data + productId + '.tar.gz'\n cmdutil.untarFile(myin)\n\n def nap(self, bucket, productId):\n \"\"\" nap - create a work order to pass on to the lambda function \"\"\"\n # THIS subroutine is defunct\n dir = self.make_dir_name(product=productId)\n\n tmpdir = '/data/exptop/' + dir\n os.chdir(tmpdir)\n listOfFiles = os.listdir('.')\n myDict = {\n 'bucket':bucket,\n 'prefix':dir,\n 'files':listOfFiles\n }\n\n myJson = json.dumps(myDict, indent=4, sort_keys=True)\n print (myJson)\n\n jfn = productId + '.json'\n jfile = open(jfn,'w') \n jfile.write(myJson) \n jfile.close()\n\n tofile = 's3://' + bucket + '/workorder/' + jfn\n fromfile = jfn\n cmdutil.s3get(fromfile,tofile)\n cmdutil.rm(fromfile)\n\n os.chdir('/data')\n cmdutil.rmdir(tmpdir)\n\n\n def cog(self, bucket, productId):\n dir = self.make_dir_name(product=productId)\n self.log.logi(\"cog: %s %s %s\" % (bucket, dir, productId))\n\n\n topdir = '/data/' + productId + '/'\n tmpdir = topdir + dir\n os.chdir(tmpdir)\n\n listOfFiles = os.listdir('.')\n\n for item in listOfFiles:\n fullFile = tmpdir + item\n self.log.logd(\"cog this %s \" % fullFile)\n if fullFile.endswith('.TIF'):\n cog.build_cog(fullFile)\n\n self.log.logi(\"s3sync: %s %s %s\" % (bucket, topdir, productId))\n cmdutil.s3sync(bucket, topdir)\n os.chdir('/data')\n self.log.logi(\"rmdir: %s\" % (topdir))\n cmdutil.rmdir(topdir)\n\n\n\ndef geters(file):\n\n with open(file) as data_file: \n data = json.load(data_file)\n return(data['greeting'])\n\ndef get_redis():\n\n file = '/data/home/dogBreed.json'\n with open(file) as data_file: \n data = json.load(data_file)\n return(data['redis'])\n\ndef get_cog_bucket():\n\n file = '/data/home/dogBreed.json'\n with open(file) as data_file: \n data = json.load(data_file)\n return(data['cog_bucket'])\n\n\n\nimport subprocess\n\ndef ec2status():\n #print (\"hello from util\")\n command = 'aws ec2 describe-instances --region=us-west-2'\n process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n stupidBytesObject = proc_stdout\n outStr = (stupidBytesObject.decode(\"utf-8\"))\n #print(outStr)\n return(outStr)\n\ndef ec2start(id):\n print (\"start id %s \" % id)\n command = \"aws ec2 start-instances --instance-ids %s --region=us-west-2\" % id\n process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n stupidBytesObject = proc_stdout\n outStr = (stupidBytesObject.decode(\"utf-8\"))\n print(outStr)\n return(outStr)\n\ndef ec2stop(id):\n print (\"stop id %s \" % id)\n command = \"aws ec2 stop-instances --instance-ids %s --region=us-west-2\" % id\n process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n stupidBytesObject = proc_stdout\n outStr = (stupidBytesObject.decode(\"utf-8\"))\n print(outStr)\n return(outStr)\n\n\ndef get_tag_name(theId):\n\n tagName='BOGUS1'\n if 'Tags' in theId:\n tags = theId[\"Tags\"]\n for tg in tags:\n if tg[\"Key\"] == \"Name\":\n tagName = tg[\"Value\"]\n else:\n tagName='BOGUS'\n\n return(tagName)\n\n","sub_path":"dataDog/dogTrainer/llib/earthexplorer.py","file_name":"earthexplorer.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"169614138","text":"# general imports\nimport os\nimport pytest\nfrom pathlib import Path\n\n# DragonPHY imports\nfrom dragonphy import *\n\nTHIS_DIR = Path(__file__).parent.resolve()\nBUILD_DIR = THIS_DIR / 'build'\nif 'FPGA_SERVER' in os.environ:\n SIMULATOR = 'vivado'\nelse:\n SIMULATOR = 'ncsim'\n\n@pytest.mark.wip\ndef test_sim():\n deps = get_deps_cpu_sim_new(impl_file=THIS_DIR / 'test.sv')\n print(deps)\n\n DragonTester(\n ext_srcs=deps,\n directory=BUILD_DIR,\n top_module='test',\n inc_dirs=[get_mlingua_dir() / 'samples', get_dir('inc/new_cpu')],\n defines={'DAVE_TIMEUNIT': '1fs', 'NCVLOG': None},\n simulator=SIMULATOR\n ).run()","sub_path":"tests/in_progress/pfd_offset/test_pfd_offset.py","file_name":"test_pfd_offset.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"545399725","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 25 08:28:20 2021\r\n\r\n@author: crjol\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#df = pd.read_csv(\"dow_jones_index.data\")\r\n#df.to_csv('final_project.csv')\r\n\r\ndata = pd.read_csv('final_project.csv')\r\n\r\nstocks = {'3M':'MMM',\r\n 'American Express':'AXP',\r\n 'Alcoa':'AA',\r\n 'AT&T':'T',\r\n 'Bank of America':'BAC',\r\n 'Boeing':'BA',\r\n 'Caterpillar':'CAT',\r\n 'Chevron':'CVX',\r\n 'Cisco Systems':'CSCO',\r\n 'Coca-Cola':'KO',\r\n 'DuPont':'DD',\r\n 'ExxonMobil':'XOM',\r\n 'General Electric':'GE',\r\n 'Hewlett-Packard':'HPQ',\r\n 'The Home Depot':'HD',\r\n 'Intel':'INTC',\r\n 'IBM':'IBM',\r\n 'Johnson & Johnson':'JNJ',\r\n 'JPMorgan Chase':'JPM',\r\n 'Kraft':'KRFT',\r\n \"McDonald's\":'MCD',\r\n 'Merck':'MRK',\r\n 'Microsoft':'MSFT',\r\n 'Pfizer':'PFE',\r\n 'Procter & Gamble':'PG',\r\n 'Travelers':'TRV',\r\n 'United Technologies':'UTX',\r\n 'Verizon':'VZ',\r\n 'Wal-Mart':'WMT',\r\n 'Walt Disney':'DIS'}\r\n\r\ndef avg_return_next_dividend(df, x):\r\n '''\r\n Parameters\r\n ----------\r\n x : string\r\n stock symbol for a stock in the 2011 Dow Jones Index.\r\n\r\n Returns\r\n -------\r\n avg : float\r\n average percentage of return on the next dividend.\r\n '''\r\n total = 0\r\n count = 0\r\n \r\n for i in range(len(df['stock'])):\r\n if df['stock'][i] == x:\r\n total += df['percent_return_next_dividend'][i]\r\n count += 1\r\n \r\n avg = round(total/count, 9)\r\n return avg\r\n\r\n\r\ndef create_dividend_dict(df, names):\r\n avg_returns = {}\r\n\r\n for key, val in names.items():\r\n avg_return = avg_return_next_dividend(df, val)\r\n avg_returns[key] = avg_return\r\n return avg_returns\r\n \r\n\r\ndef plot_avg_returns(save_file=False):\r\n avg_returns = create_dividend_dict(data, stocks)\r\n avg_returns_df = pd.DataFrame.from_dict(avg_returns, orient = 'index')\r\n avg_returns_df.columns = ['Average Percent Return']\r\n avg_returns_df = avg_returns_df.sort_values('Average Percent Return')\r\n\r\n ax = avg_returns_df.plot.bar(ylabel = 'Average Percent Return', legend = False, figsize=(25, 12))\r\n if save_file:\r\n ax.figure.savefig('./visualizations/dividend_history.png')\r\n return ax\r\n\r\n\r\ndef plot_avg_returns_old(save_file=False):\r\n avg_returns = {}\r\n\r\n for key, val in stocks.items():\r\n avg_return = avg_return_next_dividend(val)\r\n avg_returns[key] = avg_return\r\n\r\n avg_returns_df = pd.DataFrame.from_dict(avg_returns, orient = 'index')\r\n avg_returns_df.columns = ['Average Percent Return']\r\n avg_returns_df = avg_returns_df.sort_values('Average Percent Return')\r\n\r\n ax = avg_returns_df.plot.bar(ylabel = 'Average Percent Return', legend = False, figsize=(22, 8))\r\n if save_file:\r\n ax.figure.savefig('./visualizations/avg_returns.png')\r\n return ax\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n plot_avg_returns(True)","sub_path":"code/dividend_history.py","file_name":"dividend_history.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"138937207","text":"from six import BytesIO\n\nfrom egnyte.tests.config import IntegrationCase\n\n\nclass TestFiles(IntegrationCase):\n def setUp(self):\n super(TestFiles, self).setUp()\n self.filepath = self.root_folder.path + '/test.txt'\n self.root_folder.create()\n\n def test_create_file_bytesio(self):\n source = BytesIO(b'vijayendra')\n source.seek(0)\n\n self.client.folder(self.root_folder.path).create()\n self.client.file(self.filepath).upload(source)\n\n dest = BytesIO()\n self.client.file(self.filepath).download().write_to(dest)\n\n dest.seek(0)\n source.seek(0)\n\n self.assertEqual(source.read(), dest.read(), \"Uploaded and downloaded file's contents do not match\")\n\n def test_create_file_strings(self):\n source = b'vijayendra'\n self.client.folder(self.root_folder.path).create()\n self.client.file(self.filepath).upload(source)\n\n dest = self.client.file(self.filepath).download().read()\n\n self.assertEqual(source, dest, \"Uploaded and downloaded file's contents do not match\")\n\n def test_create_file_chunked(self):\n source = BytesIO(b'0123456789' * 1024 * 10) # 100k bytes\n source.seek(0)\n self.client.folder(self.root_folder.path).create()\n\n f = self.client.file(self.filepath)\n f.upload_chunk_size = 40000\n f.upload(source)\n\n dest = BytesIO()\n self.client.file(self.filepath).download().write_to(dest)\n\n dest.seek(0)\n source.seek(0)\n\n self.assertEqual(source.read(), dest.read(), \"Uploaded and downloaded file's contents do not match\")\n\n partial_start = 5009\n partial_size = 104\n partial = f.download((partial_start, partial_start + partial_size - 1))\n source.seek(partial_start)\n\n source_content = source.read(partial_size)\n partial_content = partial.read()\n self.assertEqual(source_content, partial_content, \"Partial download content does not match\")\n","sub_path":"egnyte/tests/test_files.py","file_name":"test_files.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"93607945","text":"#!/usr/bin/python\n# ---- coding:utf-8 ----\nimport sys\np = {}\npairs = {}\nclustrs = sys.argv[1].split('\\n')\nclustrs = [eval(x.split('\\t')[0]) for x in clustrs]\n\ndef l2(x1,x2):\n td = 0\n for i in range(len(x1)):\n td += (x2[i]-x1[i])**2\n return td**0.5\n\ndef closest(point,clusters,taken):\n peak = 0\n m_d = 100000000\n p_point = -1\n for cluster in clusters:\n z=abs(point-cluster)\n if z < m_d and peak not in taken:\n m_d = z \n p_point = peak\n peak += 1\n return p_point\n\nfor line in sys.stdin:\n try:\n info = line.split(',')\n zone = info[9:12]\n zone = [int(x) for x in zone]\n if 0 in zone:\n continue\n if info[33] != 'BLK':\n continue\n except:\n continue\n max_d = 10000000000\n best_clstr = -1\n k=0\n for clstr in clustrs:\n zone_reorg = [0 for _ in zone]\n tkn = []\n for point in zone:\n temp_clstr = clstr\n indx_closest = closest(point,temp_clstr,tkn)\n zone_reorg[indx_closest] = point\n tkn.append(indx_closest)\n dist = l2(zone_reorg,clstr)\n if dist < max_d:\n best_clstr = k\n max_d = dist\n k+=1\n if str(clustrs[best_clstr]) not in pairs:\n pairs[str(clustrs[best_clstr])] = []\n pairs[str(clustrs[best_clstr])].append(zone)\n\nfor key,item in pairs.items():\n print(str(key)+'\\t'+str(item))\n","sub_path":"q3/mapper_a2.py","file_name":"mapper_a2.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"251801584","text":"#!/usr/bin/python3\n\n'''\n--- Day 12: Rain Risk ---\n\nYour ferry made decent progress toward the island, but the storm came in faster than anyone expected. The ferry needs to take evasive actions!\n\nUnfortunately, the ship's navigation computer seems to be malfunctioning; rather than giving a route directly to safety, it produced extremely circuitous instructions. When the captain uses the PA system to ask if anyone can help, you quickly volunteer.\n\nThe navigation instructions (your puzzle input) consists of a sequence of single-character actions paired with integer input values. After staring at them for a few minutes, you work out what they probably mean:\n\n Action N means to move north by the given value.\n Action S means to move south by the given value.\n Action E means to move east by the given value.\n Action W means to move west by the given value.\n Action L means to turn left the given number of degrees.\n Action R means to turn right the given number of degrees.\n Action F means to move forward by the given value in the direction the ship is currently facing.\n\nThe ship starts by facing east. Only the L and R actions change the direction the ship is facing. (That is, if the ship is facing east and the next instruction is N10, the ship would move north 10 units, but would still move east if the following action were F.)\n\nFor example:\n\nF10\nN3\nF7\nR90\nF11\n\nThese instructions would be handled as follows:\n\n F10 would move the ship 10 units east (because the ship starts by facing east) to east 10, north 0.\n N3 would move the ship 3 units north to east 10, north 3.\n F7 would move the ship another 7 units east (because the ship is still facing east) to east 17, north 3.\n R90 would cause the ship to turn right by 90 degrees and face south; it remains at east 17, north 3.\n F11 would move the ship 11 units south to east 17, south 8.\n\nAt the end of these instructions, the ship's Manhattan distance (sum of the absolute values of its east/west position and its north/south position) from its starting position is 17 + 8 = 25.\n\nFigure out where the navigation instructions lead. What is the Manhattan distance between that location and the ship's starting position?\n--- Part Two ---\n\nBefore you can give the destination to the captain, you realize that the actual action meanings were printed on the back of the instructions the whole time.\n\nAlmost all of the actions indicate how to move a waypoint which is relative to the ship's position:\n\n Action N means to move the waypoint north by the given value.\n Action S means to move the waypoint south by the given value.\n Action E means to move the waypoint east by the given value.\n Action W means to move the waypoint west by the given value.\n Action L means to rotate the waypoint around the ship left (counter-clockwise) the given number of degrees.\n Action R means to rotate the waypoint around the ship right (clockwise) the given number of degrees.\n Action F means to move forward to the waypoint a number of times equal to the given value.\n\nThe waypoint starts 10 units east and 1 unit north relative to the ship. The waypoint is relative to the ship; that is, if the ship moves, the waypoint moves with it.\n\nFor example, using the same instructions as above:\n\n F10 moves the ship to the waypoint 10 times (a total of 100 units east and 10 units north), leaving the ship at east 100, north 10. The waypoint stays 10 units east and 1 unit north of the ship.\n N3 moves the waypoint 3 units north to 10 units east and 4 units north of the ship. The ship remains at east 100, north 10.\n F7 moves the ship to the waypoint 7 times (a total of 70 units east and 28 units north), leaving the ship at east 170, north 38. The waypoint stays 10 units east and 4 units north of the ship.\n R90 rotates the waypoint around the ship clockwise 90 degrees, moving it to 4 units east and 10 units south of the ship. The ship remains at east 170, north 38.\n F11 moves the ship to the waypoint 11 times (a total of 44 units east and 110 units south), leaving the ship at east 214, south 72. The waypoint stays 4 units east and 10 units south of the ship.\n\nAfter these operations, the ship's Manhattan distance from its starting position is 214 + 72 = 286.\n\nFigure out where the navigation instructions actually lead. What is the Manhattan distance between that location and the ship's starting position?\n'''\n\nimport os\nimport re\n\nDAY = \"DAY 12\"\n\ndef PrepareList(aList):\n # handle additional list preparation/manipulation\n # ex: numList = [int(item) for item in aList]\n return aList\n\ndef PrintAnswers(aPart1, aPart2):\n print (DAY)\n print (\" Part 1: {}\".format(aPart1))\n print (\" Part 2: {}\".format(aPart2))\n\ndef Answer(aList):\n part1 = 0\n part2 = \"?\"\n\n # DO IT\n regex = \"(N|E|S|W|F|R|L)(\\d+)\"\n locationDict1 = {\"N\":0, \"E\":0, \"S\":0, \"W\":0}\n locationDict2 = {\"N\":0, \"E\":0, \"S\":0, \"W\":0}\n waypointDict = {\"N\":1, \"E\":10, \"S\":0, \"W\":0}\n facing = \"E\"\n for line in aList:\n line = line.strip()\n match = re.search(regex, line)\n if match:\n if match.group(1) in locationDict1.keys():\n locationDict1[match.group(1)] += int(match.group(2))\n waypointDict[match.group(1)] += int(match.group(2))\n elif match.group(1) == \"F\":\n locationDict1[facing] += int(match.group(2))\n for key in locationDict2.keys():\n locationDict2[key] += waypointDict[key] * int(match.group(2))\n elif match.group(1) == \"R\":\n facing = list(locationDict1.keys())[int(list(locationDict1.keys()).index(facing) + (int(match.group(2)) / 90)) % 4]\n tempDict = waypointDict.copy()\n for key in waypointDict.keys():\n newKey = list(locationDict1.keys())[int(list(locationDict1.keys()).index(key) + (int(match.group(2)) / 90)) % 4]\n tempDict[newKey] = waypointDict[key]\n waypointDict = tempDict.copy()\n elif match.group(1) == \"L\":\n facing = list(locationDict1.keys())[int(list(locationDict1.keys()).index(facing) - (int(match.group(2)) / 90)) % 4]\n tempDict = waypointDict.copy()\n for key in waypointDict.keys():\n newKey = list(locationDict1.keys())[int(list(locationDict1.keys()).index(key) - (int(match.group(2)) / 90)) % 4]\n tempDict[newKey] = waypointDict[key]\n waypointDict = tempDict.copy()\n\n part1 = abs(locationDict1[\"N\"] - locationDict1[\"S\"]) + abs(locationDict1[\"E\"] - locationDict1[\"W\"])\n part2 = abs(locationDict2[\"N\"] - locationDict2[\"S\"]) + abs(locationDict2[\"E\"] - locationDict2[\"W\"])\n #\n\n PrintAnswers(part1, part2)\n \ndef Main():\n inputFileName = __file__.replace(\".py\", \".input\")\n if not os.path.isfile(inputFileName):\n print (\"Input file ({}) does not exist.\".format(inputFileName))\n return\n with open(inputFileName, 'r') as fh:\n lines = [line.strip() for line in fh]\n \n # Prepare line list (as necessary)\n modList = PrepareList(lines)\n # Part 1/2 function call(s)\n Answer(modList)\n\n# if run stand-alone\nif __name__ == '__main__':\n Main()\n","sub_path":"days/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"556785173","text":"#!/usr/bin/env python\n\n# TestClassChinaCoin.py\n\nimport ClassicalChinaCoin, Square\nimport AncientObject, Metal\nimport SpecialChinaCoin\nimport sys, getopt\n\ndef usage():\n print ('Usage: TestClassChinaCoin.py -h')\n print ('Usage: TestClassChinaCoin.py -a -r -s -m ')\n print ('Usage: TestClassChinaCoin.py --age= --radius= --side= --material=')\n\ndef main(argv):\n age = ''\n radius = ''\n side = ''\n material = ''\n\n try:\n opts, args = getopt.getopt(argv,\"ha:r:s:m:\",[\"age=\",\"radius=\", \"side=\", \"material=\"])\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n usage()\n sys.exit()\n elif opt in (\"-a\", \"--age\"):\n age = arg\n elif opt in (\"-r\", \"--radius\"):\n radius = arg\n elif opt in (\"-s\", \"--side\"):\n side = arg\n elif opt in (\"-m\", \"--material\"):\n material = arg\n \n\n myCoin = SpecialChinaCoin.SpecialChinaCoin(age,radius,side,material)\n \n print(myCoin.toString() + \"\\n\")\n myCoin.isValid()\n myCoin.setAge(700)\n myCoin.setSide(2)\n myCoin.setRadius(float(5.0))\n myCoin.setMaterial(\"Gold\")\n print(myCoin.toString() + \"\\n\")\n myCoin.destroy()\n print(myCoin.toString())\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"TestClassChinaCoin.py","file_name":"TestClassChinaCoin.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"264211596","text":"from sqlalchemy import *\nfrom migrate import *\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n meta = MetaData(bind=migrate_engine)\n t = Table('AnalysisTable', meta, autoload=True)\n t.c.irradiation_position_id.drop()\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n\tmeta = MetaData(bind=migrate_engine)\n\tt = Table('AnalysisTable', meta, autoload=True)\n\tcol = Column('irradiation_position_id', Integer)\n\tcol.create(t)\n","sub_path":"src/database/migrate/isotopedb/versions/012_Removed_irrad_pos_id_from_AnalysisTable.py","file_name":"012_Removed_irrad_pos_id_from_AnalysisTable.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"41630629","text":"import numpy as np\nimport pyaudio\n\nfrom config import CONFIGS\nfrom patterns.default import Default\nfrom visualization import Visualizer\n\n_gamma = np.load(CONFIGS['gamma_table_path'])\n\"\"\"Gamma lookup table used for nonlinear brightness correction\"\"\"\n\n\nclass Music(Default):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.pattern_name = \"Music\"\n\n CONFIGS['n_pixels'] = self.strip_length\n\n # init visualizer\n self.vis = Visualizer(CONFIGS)\n\n # dict used to set the visualizer effect\n self.effect_dict = dict(\n spectrum=self.vis.visualize_spectrum,\n energy=self.vis.visualize_energy,\n scroll=self.vis.visualize_scroll,\n )\n\n # name of the effect to be used\n self.effect = 'spectrum'\n\n self.modifiers = dict(\n visualizer=self.effect,\n )\n\n # attributes for the mic\n self.p = None\n self.stream = None\n self.frames_per_buffer = int(CONFIGS['mic_rate'] / CONFIGS['fps'])\n\n self.setup()\n\n def setup(self):\n \"\"\"\n Setup stream\n \"\"\"\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=pyaudio.paInt16,\n channels=1,\n rate=CONFIGS['mic_rate'],\n input=True,\n frames_per_buffer=self.frames_per_buffer)\n\n @property\n def effect(self):\n return self._effect\n\n @effect.setter\n def effect(self, value):\n \"\"\"\n Set the effect to a certain value and change the visualization effect in the vis calss\n \"\"\"\n try:\n ef = self.effect_dict[value]\n self.vis.visualization_effect = ef\n self._effect = value\n\n except KeyError as e:\n print(f\"Error for key {value}\\n{e}\")\n\n @property\n def rate(self):\n \"\"\"\n Rate should always be zero here9\n \"\"\"\n return 0\n\n @rate.setter\n def rate(self, value):\n pass\n\n def read_audio(self):\n \"\"\"\n Read audio and return it\n \"\"\"\n try:\n y = np.fromstring(self.stream.read(self.frames_per_buffer, exception_on_overflow=False),\n dtype=np.int16)\n y = y.astype(np.float32)\n self.stream.read(self.stream.get_read_available(), exception_on_overflow=False)\n return y\n except IOError:\n print('Audio buffer has overflowed')\n\n def fill(self):\n \"\"\"\n Read from audio stream and set pixels\n \"\"\"\n # read audio input, can also be none when the mic has not started yet\n output = self.read_audio()\n\n try:\n # use visualization\n pixels, _ = self.vis.audio_to_rgb(output)\n # Truncate values and cast to integer\n pixels = np.clip(pixels, 0, 255).astype(int)\n # Optional gamma correction\n pixels = _gamma[pixels]\n\n r, g, b = pixels\n for idx in range(len(r)):\n self.pixels[idx]['color'] = (r[idx], g[idx], b[idx], 255)\n\n except TypeError:\n pass\n\n def stop(self):\n \"\"\"\n Call super method and close audio stream\n \"\"\"\n super(Music, self).stop()\n\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n","sub_path":"src/patterns/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"505639555","text":"#!/usr/bin/env python\n\nimport roslib; roslib.load_manifest('wifi_lookup')\nimport rospy, os, re\nfrom wifi_lookup.msg import WifiData, Wifi\n\nclass DataNode():\n\tdef __init__(self):\n\t\tpub = rospy.Publisher('wifi_data', WifiData)\n\n\t\tr = rospy.Rate(rospy.get_param('~rate', 1))\n\t\twhile not rospy.is_shutdown():\n\t\t\tos.system(\"iwlist wlan0 scanning >> datatemp.txt\")\n\n\t\t\twifiraw = open(\"datatemp.txt\").read()\n\t\t\tos.remove(\"datatemp.txt\")\n\n\t\t\tessids = re.findall(\"ESSID:\\\"(.*)\\\"\", wifiraw)\n\t\t\taddresses = re.findall(\"Address: ([0-9A-F:]{17})\", wifiraw)\n\t\t\tsignals = re.findall(\"Signal level=.*?([0-9]+)\", wifiraw)\n\n\t\t\tmsg = WifiData()\n\n\t\t\tfor i in range(len(essids)):\n\t\t\t\tif (essids[i] == rospy.get_param('~ssid', 'restricted.utexas.edu')):\n\t\t\t\t\ttemp = Wifi()\t\t\t \n\t\t\t\t\ttemp.MAC = addresses[i] \n\t\t\t\t\ttemp.dB = int(signals[i])\n\t\t\t\t\tmsg.HotSpots.append(temp)\n\n\t\t\tmsg.length = len(msg.HotSpots)\n\t\t\tpub.publish(msg)\n\t\t\tr.sleep()\n\nif __name__ == '__main__':\n\trospy.init_node('wifi_data')\n\ttry:\n\t\tnode = DataNode()\n\texcept rospy.ROSInterruptException: pass\n\t\n","sub_path":"src/wifi_data.py","file_name":"wifi_data.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"298529816","text":"from os import stat_result\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import scrolledtext as scrolledtext\nfrom typing import runtime_checkable\nimport webbrowser\nimport getpass\nimport filter\n\n\nroot = Tk()\nroot.configure(bg=\"white\")\nroot.title(\"TSVFilter\")\nroot.geometry('1000x600')\nroot.resizable(height=False, width=False)\n\n\n##################################################################################################################################\n\n\n#* FUNCTIONS FOR SWITCHING BETWEEN PAGES\n\n# clears content of page 1\ndef page1_clear():\n\n canvas.delete(ALL)\n next_btn.place_forget()\n file_open.place_forget()\n docmbtn.place_forget()\n display.place_forget()\n\n# clears content of page 2\ndef page2_clear():\n\n canvas.delete(ALL)\n prev_btn.place_forget()\n process_btn.place_forget()\n\n rt_check.place_forget()\n updown_check.place_forget()\n fold_check.place_forget()\n pvalue_check.place_forget()\n mz_check.place_forget()\n\n rtmin_entry.place_forget()\n rtmin_desc.place_forget()\n rtmax_entry.place_forget()\n rtmax_desc.place_forget()\n\n updown_entry.place_forget()\n\n foldmin_entry.place_forget()\n foldmin_desc.place_forget()\n foldmax_entry.place_forget()\n foldmax_desc.place_forget()\n\n pvaluemin_entry.place_forget()\n pvaluemin_desc.place_forget()\n pvaluemax_entry.place_forget()\n pvaluemax_desc.place_forget()\n\n mzmin_entry.place_forget()\n mzmin_desc.place_forget()\n mzmax_entry.place_forget()\n mzmax_desc.place_forget()\n\n checkbtn.place_forget()\n uncheckbtn.place_forget()\n\n\n\ndef next_page():\n\n page2()\n\n\ndef prev_page():\n\n page1()\n\n\n\n\n#* ELEMENTS AND WIDGETS WHICH STAY THE SAME FOR BOTH PAGES \n\n\ncanvas = Canvas(\n root,\n height=600,\n width=1000, \n bd=0,\n highlightthickness=0, \n bg=\"white\", \n relief=RIDGE)\ncanvas.place(\n x=0, y=0\n)\n\n\n# tooltip bar\ntooltip = Label(\n root,\n text=\"\",\n bg=\"white\",\n bd=1,\n relief=SUNKEN,\n anchor=NW,\n)\ntooltip.place(\n x=0, y=560,\n height=40,\n width=1000\n)\n\n\n#* next button - re-renders root\n\n# tooltip function\ndef next_hover(e):\n\n tooltip.config(\n text=\"Load next page where you may configure tolerance(s).\"\n )\n\n\n\nbtn_texture = PhotoImage(\n file=\"textures/xxx_nextbtn.png\"\n)\n\nnext_btn = Button(\n image=btn_texture,\n highlightthickness=0,\n borderwidth=0,\n command=next_page,\n pady=30,\n bg=\"white\",\n state=DISABLED\n)\n\nnext_btn.bind(\n \"\",\n next_hover\n)\n\n\n#* TEXTURES FOR PAGE 1\n\nfld_texture = PhotoImage(\n file=\"textures/xxx_filedisplay.png\"\n)\n\nlogo = PhotoImage(\n file=\"textures/xxx_logo.png\"\n)\n\ndocmbtn_txtr = PhotoImage(\n file=\"textures/xxx_docmbtn.png\"\n)\n\nopn_texture = PhotoImage(\n file=\"textures/xxx_openfile.png\"\n)\n\n\n#* WIDGETS FOR PAGE 1 AND THEIR FUNCTIONS\n\n#* documentation button\n# open documentation in default browser\ndef opensite():\n\n webbrowser.open_new_tab(\"https://github.com/xdNecron/TSVFilter\") # TODO: #3 change link when the documentation is done\n\n\n# hover function\ndef docmbtn_onhover(e):\n\n tooltip.config(\n text=\"Redirects you to GitHub documentation for the script.\"\n )\n\n\ndocmbtn = Button(\n root,\n image=docmbtn_txtr,\n bd=0,\n borderwidth=0,\n highlightthickness=0,\n command=opensite,\n bg=\"white\"\n)\n\ndocmbtn.bind(\n \"\",\n docmbtn_onhover\n)\n\n\n#* openfile button\ndef openfile():\n\n # obtain current username\n user = getpass.getuser()\n\n global filepath\n\n filepath = filedialog.askopenfilename(\n title=\"Open a TSV/CSV file\",\n filetypes=(\n (\"CSV and TSV files\", \"*.csv *.tsv\"),\n (\"All files\", \"*.*\")\n ),\n initialdir=f\"C:/Users/{user}\" \n )\n\n # send filepath to the filtration script\n filter.obtain_file(filepath)\n\n datafile = open(filepath, 'r')\n\n \n next_btn.config(\n state=NORMAL\n )\n \n \n display.config(\n state=NORMAL\n )\n\n display.delete(\n 1.0, END\n )\n\n display.insert(\n END,\n datafile.read()\n )\n\n display.config(\n state=DISABLED\n )\n\n\ndef opn_hover(e):\n\n tooltip.config(\n text=\"Import source file which the script will take data from.\"\n )\n\n\nfile_open = Button(\n root,\n text=\"Open File\",\n command=openfile,\n image=opn_texture,\n borderwidth=0,\n bg=\"white\",\n pady=30\n)\n\nfile_open.bind(\n \"\", opn_hover\n)\n\n\n#* file display\ndef dpl_hover(e):\n\n tooltip.config(\n text=\"In this window is displayed the content of imported file.\"\n )\n\n\ndisplay = scrolledtext.ScrolledText(\n root,\n borderwidth=0,\n highlightthickness=0,\n height=21,\n width=71,\n padx=5,\n state=DISABLED\n)\n\ndisplay.bind(\n \"\", dpl_hover\n)\n\ndisplay.config(\n state=NORMAL\n)\ndisplay.insert(\n END,\n \"Hi, this is just a temporary text, which will change after imporing\\na file.\"\n)\ndisplay.config(\n state=DISABLED\n)\n\n\n\n\n#* PAGE 2 ELEMENTS AND WIDGETS\n\n#* textures of elements\ntitle_txtr = PhotoImage(\n file=\"textures/page2_title.png\"\n)\n\nmainfield_txtr = PhotoImage(\n file=\"textures/page2_mainfield.png\"\n)\n\nprevbtn_txtr = PhotoImage(\n file=\"textures/page2_prevbtn.png\"\n)\n\nprocessbtn_txtr = PhotoImage(\n file=\"textures/page2_processbtn.png\"\n)\n\nrt_txtr = PhotoImage(\n file=\"textures/page2_rt.png\"\n)\n\nmz_txtr = PhotoImage(\n file=\"textures/page2_mz.png\"\n)\n\nfold_txtr = PhotoImage(\n file=\"textures/page2_fold.png\"\n)\n\nupdown_txtr = PhotoImage(\n file=\"textures/page2_updown.png\"\n)\n\npvalue_txtr = PhotoImage(\n file=\"textures/page2_pvalue.png\"\n)\n\ncheckbtn_txtr = PhotoImage(\n file=\"textures/page2_checkall.png\"\n)\n\nuncheckbtn_txtr = PhotoImage(\n file=\"textures/page2_uncheckall.png\"\n)\n\n\n#* widgets which communicate with the filtration script\nrt_var = IntVar()\nupdown_var = IntVar()\nfold_var = IntVar()\npvalue_var = IntVar()\nmz_var = IntVar()\n\nvars = [\n rt_var,\n updown_var,\n fold_var,\n pvalue_var,\n mz_var\n]\n\nrt_check = Checkbutton(\n root,\n bg=\"white\",\n highlightthickness=0,\n highlightcolor=\"white\",\n variable=rt_var,\n bd=3\n)\n\nupdown_check = Checkbutton(\n root,\n bg=\"white\",\n highlightthickness=0,\n highlightcolor=\"white\",\n variable=updown_var,\n bd=3\n)\n\nfold_check = Checkbutton(\n root,\n bg=\"white\",\n highlightthickness=0,\n highlightcolor=\"white\",\n variable=fold_var\n)\n\npvalue_check = Checkbutton(\n root,\n bg=\"white\",\n highlightthickness=0,\n highlightcolor=\"white\",\n variable=pvalue_var,\n bd=3\n)\n\nmz_check = Checkbutton(\n root,\n bg=\"white\",\n highlightthickness=0,\n highlightcolor=\"white\",\n variable=mz_var,\n bd=3\n)\n\n# rt entries and descs\nrtmin_entry = Entry(\n root,\n width=20,\n)\n\nrtmax_entry = Entry(\n root,\n width=20\n)\n\nrtmin_desc = Label(\n root,\n text=\"Minimum\",\n bg=\"white\"\n)\n\nrtmax_desc = Label(\n root,\n text=\"Maximum\",\n bg=\"white\"\n)\n\n# updown dropdown menu\nchoice = StringVar()\nchoice.set(\"UP\")\n\nupdown_entry = OptionMenu(\n root,\n choice,\n \"UP\",\n \"DOWN\",\n)\n\n\n# fold entries and descs\nfoldmin_entry = Entry(\n root,\n width=15\n)\n\nfoldmax_entry = Entry(\n root,\n width=15\n)\n\nfoldmin_desc = Label(\n root,\n text=\"Minimum\",\n bg=\"white\"\n)\n\nfoldmax_desc = Label(\n root,\n text=\"Maxmimum\",\n bg=\"white\"\n)\n\n\n# pvalue entries and descs\npvaluemin_entry = Entry(\n root,\n width=20\n)\n\npvaluemax_entry = Entry(\n root,\n width=20\n)\n\npvaluemin_desc = Label(\n root,\n text=\"Minimum\",\n bg=\"white\"\n)\n\npvaluemax_desc = Label(\n root,\n text=\"Maximum\",\n bg=\"white\"\n)\n\n\n# mz entries and descs\nmzmin_entry = Entry(\n root,\n width=25\n)\n\nmzmax_entry = Entry(\n root,\n width=25\n)\n\nmzmin_desc = Label(\n root,\n text=\"Minimum\",\n bg=\"white\"\n)\n\nmzmax_desc = Label(\n root,\n text=\"Maximum\",\n bg=\"white\"\n)\n\n\n\n# previous button\ndef prev_hover(e):\n\n tooltip.config(\n text=\"Back to file upload.\"\n )\n\n\nprev_btn = Button(\n root,\n image=prevbtn_txtr,\n bd=0,\n borderwidth=0,\n highlightthickness=0,\n bg=\"white\",\n command=prev_page\n)\n\nprev_btn.bind(\n \"\",\n prev_hover\n)\n\n# un/check all buyttons\n\ndef checkall():\n\n for var in vars:\n\n var.set(1)\n\n\ndef check_hover(e):\n\n tooltip.config(\n text=\"Checks all filtration methods.\"\n )\n\n\ndef uncheckall():\n\n for var in vars:\n\n var.set(0)\n\n\ndef uncheck_hover(e):\n\n tooltip.config(\n text=\"Unhecks all filtration methods.\"\n )\n\n\ncheckbtn = Button(\n root,\n bg=\"white\",\n bd=0,\n borderwidth=0,\n highlightthickness=0,\n image=checkbtn_txtr,\n command=checkall\n)\n\ncheckbtn.bind(\n \"\",\n check_hover\n)\n\n\nuncheckbtn = Button(\n root,\n bg=\"white\",\n bd=0,\n borderwidth=0,\n highlightthickness=0,\n image=uncheckbtn_txtr,\n command=uncheckall\n)\n\nuncheckbtn.bind(\n \"\",\n uncheck_hover\n)\n\n\n# process button\n\n\ndef process():\n\n try:\n if rt_var.get() == 1:\n \n rt_min = rtmin_entry.get()\n rt_max = rtmax_entry.get()\n\n rt_min = rt_min.replace(\" \", \"\")\n rt_max = rt_max.replace(\" \", \"\")\n\n if rt_min == '':\n\n pass\n else:\n\n filter.filter(\"rtmed\", rt_min, \"\")\n\n if rt_max == '':\n\n pass\n else:\n\n filter.filter(\"rtmed\", \"\", rt_max)\n\n\n if updown_var.get() == 1:\n\n filter.updown(choice.get())\n\n\n\n if fold_var.get() == 1:\n\n fold_min = foldmin_entry.get()\n fold_max = foldmax_entry.get()\n\n fold_min = fold_min.replace(\" \", \"\")\n fold_max = fold_max.replace(\" \", \"\")\n\n if fold_min == '':\n\n pass\n else:\n\n filter.filter(\"fold\", fold_min, \"\")\n\n if fold_max == '':\n\n pass\n else:\n\n filter.filter(\"fold\", \"\", fold_max)\n\n\n\n if pvalue_var.get() == 1:\n\n pvalue_min = pvaluemin_entry.get()\n pvalue_max = pvaluemax_entry.get()\n\n pvalue_min = pvalue_min.replace(\" \", \"\")\n pvalue_max = pvalue_max.replace(\" \", \"\")\n\n if pvalue_min == '':\n\n pass\n else:\n\n filter.filter(\"pvalue\", pvalue_min, \"\")\n\n if pvalue_max == '':\n\n pass\n else:\n\n filter.filter(\"pvalue\", \"\", pvalue_max)\n\n\n\n\n if mz_var.get() == 1:\n \n mz_min = mzmin_entry.get()\n mz_max = mzmax_entry.get()\n\n mz_min = mz_min.replace(\" \", \"\")\n mz_max = mz_max.replace(\" \", \"\")\n\n if mz_min == '':\n\n pass\n else:\n\n filter.filter(\"mzmed\", mz_min, \"\")\n\n if mz_max == '':\n\n pass\n else:\n\n filter.filter(\"mzmed\", \"\", mz_max)\n\n\n\n print(filter.df)\n filter.out_tsv()\n\n messagebox.showinfo(\n \"Filtering done.\",\n\n \"The processing has finished and saved the data to \\\"out.tsv\\\".\\n IMPORTANT: this file is overwritten during each process. If you don't want to lose the result, please move it to another directory.\"\n )\n \n except NameError:\n filter.missing_column(filter.missing)\n\n\n\n filter.obtain_file(filepath)\n\n\ndef process_hover(e):\n\n tooltip.config(\n text=\"Iniciate processing. May take longer based on the amount of data.\"\n )\n\n\nprocess_btn = Button(\n root,\n image=processbtn_txtr,\n bd=0,\n borderwidth=0,\n highlightthickness=0,\n bg=\"white\",\n command=process\n)\n\nprocess_btn.bind(\n \"\",\n process_hover\n)\n\n\n#######################################################################################################################################\n\n\n#* PAGE 1 \n\ndef page1():\n\n tooltip.config(\n text=\"\"\n )\n\n page2_clear()\n\n #* MAIN ELEMENTS for page 1\n canvas.create_image(\n 316, 120,\n anchor=NW,\n image=fld_texture,\n )\n\n\n rctngl = canvas.create_rectangle(\n 0, 0, 1000, 177,\n fill=\"black\" \n )\n\n canvas.tag_lower(rctngl)\n\n\n canvas.create_image(\n 48, 61,\n anchor=NW,\n image=logo\n )\n\n\n\n #* DOCUMENTATION BUTTON\n docmbtn.place(\n x=53, y=305,\n height=35,\n width=173\n )\n\n\n #* NEXT BUTTON\n next_btn.place(\n x=776, y=502,\n width=173,\n height=35\n )\n\n\n #*IMPORTING DATAFILE\n # TODO settings\n file_open.place(\n x=53, y=256,\n width=175,\n height=35,\n )\n\n\n #* FILE DISPLAY\n display.place(\n x=323, y=130\n )\n\n\npage1()\n\n\n###############################################################################################################################\n\n\ndef page2():\n\n page1_clear()\n\n # clear tooltip\n tooltip.config(\n text=\"\"\n )\n\n\n canvas.create_rectangle(\n 0, 0, 1000, 177,\n fill=\"black\"\n )\n\n canvas.create_image(\n 77, 46,\n anchor=NW,\n image=title_txtr\n )\n\n # ! main field - might be removed later\n # ! textures are defined outside the fucntion\n canvas.create_image(\n 100, 120,\n anchor=NW,\n image=mainfield_txtr\n )\n\n canvas.create_image(\n 123, 138,\n anchor=NW,\n image=rt_txtr\n )\n\n canvas.create_image(\n 426, 138,\n anchor=NW,\n image=updown_txtr\n )\n\n canvas.create_image(\n 655, 138,\n anchor=NW,\n image=fold_txtr\n )\n\n canvas.create_image(\n 123, 268,\n anchor=NW,\n image=pvalue_txtr\n )\n\n canvas.create_image(\n 426, 268,\n anchor=NW,\n image=mz_txtr\n )\n\n checkbtn.place(\n x=462, y=422\n )\n\n uncheckbtn.place(\n x=655, y=422\n )\n\n prev_btn.place(\n x=77, y=502,\n height=35,\n width=173\n )\n \n\n process_btn.place(\n x=776, y=502,\n width=173,\n height=35,\n )\n\n\n # check buttons\n rt_check.place(\n x=138, y=150,\n width=16,\n height=16\n )\n\n updown_check.place(\n x=444, y=150,\n width=16,\n height=16\n )\n\n fold_check.place(\n x=673, y=150,\n width=16,\n height=16\n )\n\n pvalue_check.place(\n x=138, y=289,\n width=16,\n height=16\n )\n\n mz_check.place(\n x=444, y=284,\n width=16,\n height=16\n )\n\n #* entries and labels\n\n # rt\n rtmin_entry.place(\n x=224, y=181\n )\n\n rtmin_desc.place(\n x=144, y=181\n )\n\n\n rtmax_entry.place(\n x=224, y=214\n )\n\n rtmax_desc.place(\n x=144, y=214\n )\n\n\n # updown\n updown_entry.place(\n x=480, y=188\n )\n\n\n # fold\n foldmin_entry.place(\n x=740, y=181\n )\n\n foldmin_desc.place(\n x=670, y=181\n )\n\n foldmax_entry.place(\n x=740, y=214\n )\n\n foldmax_desc.place(\n x=660, y=214\n )\n\n\n # pvalue\n pvaluemin_entry.place(\n x=224, y=334\n )\n\n pvaluemax_entry.place(\n x=224, y=385\n )\n\n pvaluemin_desc.place(\n x=144, y=334\n )\n\n pvaluemax_desc.place(\n x=144, y=385\n )\n\n\n # mz entires and descs\n mzmin_entry.place(\n x=583, y=320\n )\n\n mzmin_desc.place(\n x=458, y=320\n )\n\n mzmax_entry.place(\n x=583, y=371\n )\n\n mzmax_desc.place(\n x=458, y=371\n )\n\n\n###########################################################################################\n\n\nmainloop()\n\n\n\"\"\"\nTODO:\n- make a button which opens GitHub docunentation\n- create a local README file in case the device has no internet access\n\n\"\"\"","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":15766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"89856953","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as mp\n\nfrom bokeh.io import output_file\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import Dropdown\nfrom bokeh.layouts import widgetbox, row, column\n\n##### A bit of cleanup for the data #####\n\nallData = pd.read_csv(\"15min_EV_PV_homes_only.csv\") # Load all the data\nfilterData = allData[[\"car1\",\"grid\",\"solar\",\"local_15min\",\"dataid\",\"state\"]] # cutting down for the sake of runtime\nfilterData = filterData.rename(columns={\"local_15min\":\"time\"}) # this is a long name, time is better\nfilterData[\"time\"] = pd.to_datetime(filterData[\"time\"]) # change to appropriate data type\n\n##### Function with imput of the desired HOUSE, the kind of DATA, and the time AXIS and output of SERIES with these specifications\n##### House is an int, the rest are str\n##### Note that this reqires that filterData is defined already \n##### The function OUTPUTS a dataframe with columns 'data' and 'axis' to be plot easily. \n\ndef makeData(house,data,axis): # Yo caleb if you want to change the variable names to something more intuitive feel free\n houseData = filterData[filterData['dataid'] == house][[data,'time']] # first separate from everything so operations can be faster\n \n if axis == 'hour': \n groupHour = houseData.groupby(houseData['time'].dt.hour)[data].mean() \n groupHour = pd.DataFrame(groupHour)\n state = filterData[filterData['dataid'] == house].at[1,'state']\n \n if state == 'NY': # the timestamps are in UTC so I did an operation and reorganization after the groupby\n groupHour['axis'] = ['5am','6am','7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm','8pm','9pm','10pm','11pm','12am','1am','2am','3am','4am'] \n groupHour.index = (groupHour.index + 5)%24 #check back on this\n \n if state == 'TX': \n groupHour['axis'] = ['6am','7am','8am','9am','10am','11am','12pm','1pm','2pm','3pm','4pm','5pm','6pm','7pm','8pm','9pm','10pm','11pm','12am','1am','2am','3am','4am''5am']\n groupHour.index = (groupHour.index + 6)%24 #check back on this\n \n groupHour = groupHour.sort_index(ascending = True)\n return groupHour\n \n if axis == 'day':\n groupDay = houseData.groupby(houseData['time'].dt.dayofyear)[data].sum()\n groupDay = pd.DataFrame(groupDay) \n groupDay['day'] = np.zeros([len(groupDay),1]) # so this is initializing a column so the formatted timestamp data can be pieced in\n groupDay['day'] = pd.to_datetime(groupDay['day']) # so the datatypes are the same\n \n for j in groupDay.index:\n groupDay.at[j,'day'] = houseData[houseData['time'].dt.dayofyear == j].at[houseData[houseData['time'].dt.dayofyear == j].index[1],\"time\"]\n # this for loop passes in the timestamps that were lost from the groupby operation\n # groupDay at this point is net values per day for all of the days. \n\n groupDay = groupDay.groupby(groupDay['day'].dt.dayofweek)[data].mean() # now average for the days of the week. This is the net average\n groupDay = pd.DataFrame(groupDay)\n groupDay['axis'] = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'] # axis formatting\n\n return groupDay\n\n\n\n\nhouse = 5679\ndata = 'solar'\naxis = 'hour'\n\nnyhouseSolar = makeData(house,data,axis)\n\nhourSolarPlot = figure(x_range = nyhouseSolar['axis'],\n plot_height=250, \n plot_width = 900,\n title='Average solar generation for one house from May to August', \n toolbar_location=None, tools = \"\")\n\nhourSolarPlot.vbar(x=nyhouseSolar['axis'],top = nyhouseSolar[data],width = 1)\nhourSolarPlot.y_range.start = 0\nhourSolarPlot.yaxis.axis_label = \"Solar Generation (KW)\"\n\n\n########\nhouse = 5679\ndata = 'grid'\naxis = 'hour'\n\nnyhouseGrid = makeData(house,data,axis)\n\nhourGridPlot = figure(x_range = nyhouseGrid['axis'],\n plot_height=250, \n plot_width = 900,\n title='Average power grid consumption/generation for one house (May-August)', \n toolbar_location=None, tools = \"\")\n\nhourGridPlot.vbar(x=nyhouseGrid['axis'],top = nyhouseGrid[data],width = 1)\nhourGridPlot.y_range.start = -2\nhourGridPlot.yaxis.axis_label = \"Generation/Consumption (KW)\"\n\n\n########\n\nhouse = 1222\ndata = 'car1'\naxis = 'day'\n\nnyhouseCar = makeData(house,data,axis)\n\nhourCarPlot = figure(x_range = nyhouseCar['axis'],\n plot_height=250, \n plot_width = 450,\n title='Average EV power consumption, net per day (house 1222)', \n toolbar_location=None, tools = \"\")\n\nhourCarPlot.vbar(x=nyhouseCar['axis'],top = nyhouseCar[data],width = 1)\nhourCarPlot.y_range.start = 0\nhourCarPlot.yaxis.axis_label = \"EV Consumption (KW)\"\n\n\n\n#####\n\nhouse = 5679\ndata = 'solar'\naxis = 'day'\n\nnyhousePV = makeData(house,data,axis)\n\ndayPV = figure(x_range = nyhouseCar['axis'],\n plot_height=250, \n plot_width = 450,\n title='Average PV power generation, net per day (house 5679)', \n toolbar_location=None, tools = \"\")\n\ndayPV.vbar(x=nyhousePV['axis'],top = nyhousePV[data],width = 1)\ndayPV.y_range.start = 60\ndayPV.yaxis.axis_label = \"PV Generation (KW)\"\n\n\n\nshow(column(hourSolarPlot,hourGridPlot))\n\nshow(row(hourCarPlot,dayPV))\n\n\n","sub_path":"simplefunction.py","file_name":"simplefunction.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"462444527","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 30 20:30:44 2017\n\n@author: muthu\n\"\"\"\n\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport pandas as pd\nlatLong =pd.read_csv('latlong17.csv')\nlatLong.set_index(['Country'])\n\nmapObject = Basemap(projection='mill',llcrnrlat=-60,urcrnrlat=75,\\\n llcrnrlon=-180,urcrnrlon=180,resolution='c')\nmapObject.drawcoastlines()\nmapObject.drawcountries()\nmapObject.fillcontinents(color='beige',lake_color='lightblue')\nax= plt.subplot(111)\nredPoints = []\namberPoints = []\ngreenPoints = []\nfor countries,data in latLong.iterrows():\n if data['Happiness Score'] < 4.0:\n tup = (data['Longitude'],data['Latitude'])\n redPoints.append(tup)\n elif data['Happiness Score'] > 4.0 and data['Happiness Score'] < 5.5:\n tup = (data['Longitude'],data['Latitude'])\n amberPoints.append(tup)\n else:\n tup = (data['Longitude'],data['Latitude'])\n greenPoints.append(tup)\n\nfor item in redPoints:\n redlons = [point[0] for point in redPoints]\n redlat = [point[1] for point in redPoints]\n redX,redY = mapObject(redlons, redlat)\n ax.plot(redX, redY, 'ro', color='red',markersize=5,alpha=0.1,label='Low happiness score')\nfor item in greenPoints:\n greenlons = [point[0] for point in greenPoints]\n greenlat = [point[1] for point in greenPoints]\n greenX,greenY = mapObject(greenlons, greenlat)\n ax.plot(greenX, greenY, 'go',color='green', markersize=5,alpha =0.1,label='Most happy')\nfor item in amberPoints:\n amberlons = [point[0] for point in amberPoints]\n amberlat = [point[1] for point in amberPoints]\n amberX,amberY = mapObject(amberlons, amberlat)\n ax.plot(amberX, amberY, 'bo', color='blue',markersize=5,alpha = 0.1,label ='Moderately happy') \nplt.title('Predicted Happiness Map-2017')\nplt.show()","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"43547751","text":"class Solution(object):\n def mergeSort(self, nums):\n def myMergeSort(nums, low, high):\n if low >= high:\n return\n mid = low + ((high - low ) >> 1)\n myMergeSort(nums, low, mid)\n myMergeSort(nums, mid + 1, high)\n merge(nums, low, mid, high)\n \n def merge(nums, low, mid, high):\n temp = []\n i, j = low, mid + 1\n while i <= mid and j <= high:\n if nums[i] <= nums[j]:\n temp.append(nums[i])\n i += 1\n else:\n temp.append(nums[j])\n j += 1\n while i <= mid:\n temp.append(nums[i])\n i += 1\n while j <= high:\n temp.append(nums[j])\n j += 1\n nums[low:high + 1] = temp\n\n if nums:\n myMergeSort(nums, 0, len(nums) - 1)\n return nums\n\n# print(Solution().mergeSort([4,3,5,2,1]))","sub_path":"Week_08/合并排序.py","file_name":"合并排序.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"134327197","text":"# EDITED\nfrom __future__ import absolute_import, division, print_function\n\nimport time\nimport os\nimport numpy as n\nimport tensorflow as t\nt.enable_eager_execution()\n\n# data = t.keras.utils.get_file(\n# \"shakespeare.txt\", \"https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt\")\n\n# So, like, basically, shakespeare.txt is a one-letter recount of Shakespeare...\ndata = open(\"shakespeare.txt\", 'rb').read().decode(encoding=\"utf-8\")[:250]\n# for line in data:\n# print(line)\n\n# This basically figures out the vocab and puts it in a list.\nuni = sorted(set(data))\n# print(uni)\n# print('{} unique chatacters'.format(len(uni)))\ncool_obj = {u: i for i, u in enumerate(uni)} # Obj with \"CH\": index\n# print(cool_obj)\narr = n.array(uni) # As an array\n# print(arr)\nnum = n.array([cool_obj[i] for i in data])\n# print(num)\n\n\nin_max = 100\nepochEx = len(data)\nfancyData = t.data.Dataset.from_tensor_slices(\n num) # Has every single character mapped\n# for i in fancyData.take(5):\n# print(arr[i.numpy()])\nseq = fancyData.batch(in_max+1, drop_remainder=True) # In groups\n# for i in seq.take(5):\n# print(repr(''.join(arr[i.numpy()])))\n\n\ndef divide_and_conquer(data):\n dataIn = data[:-1] # All but last\n dataOut = data[1:] # All but first\n return dataIn, dataOut\n# print(divide_and_conquer(\"SARAH\"))\n\n\nbigData = seq.map(divide_and_conquer)\n\n# for inpu, output in bigData.take(1):\n# print(\"IN\", repr(''.join(arr[inpu.numpy()])))\n# print(\"OUT\", repr(''.join(arr[output.numpy()])))\n\n\n# for i, (inp, tar) in enumerate(zip(inpu[:5], output[:5])):\n# print(\"Step {:4d}\".format(i))\n# print(\" input: {} ({:s})\".format(inp, repr(arr[inp])))\n# print(\" expected output: {} ({:s})\".format(tar, repr(arr[tar])))\n\nba = 1\nepochSt = epochEx//ba\nbuff = 10000\nbigData = bigData.shuffle(buff).batch(ba, drop_remainder=True)\n# bigData\n\n\nsz = len(uni)\nembDim = 256\nrnnUni = 1024\n\nif (t.test.is_gpu_available()):\n rNN = t.keras.layers.CuDNNGRU\nelse:\n import functools as f\n rNN = f.partial(t.keras.layers.GRU, recurrent_activation=\"sigmoid\")\n\n\ndef make_NN(sz=sz, embDim=embDim, rnnUni=rnnUni, ba=ba):\n m = t.keras.Sequential([t.keras.layers.Embedding(sz, embDim, batch_input_shape=[ba, None]),\n rNN(rnnUni, return_sequences=True,\n recurrent_initializer=\"glorot_uniform\", stateful=True),\n t.keras.layers.Dense(sz)\n ])\n return m\n\n\nm = make_NN()\nm.summary()\n\n\ndef loss(lab, log):\n return t.keras.losses.sparse_categorical_crossentropy(lab, log, from_logits=True)\n\n\n# for inpu, output in bigData.take(1):\n# print(\"IN\", repr(''.join(arr[inpu.numpy()])))\n# print(\"OUT\", repr(''.join(arr[output.numpy()])))\n# # for i, (inp, tar) in enumerate(zip(inpu[:5], output[:5])):\n# # print(\"Step {:4d}\".format(i))\n# # print(\" input: {} ({:s})\".format(inp, repr(arr[inp])))\n# # print(\" expected output: {} ({:s})\".format(tar, repr(arr[tar])))\nfor inp, tar in bigData.take(1):\n pred = m(inp)\n# print(pred.shape, \"# (batch_size, sequence_length, vocab_size)\")\n# samp = t.random.categorical(pred[0], num_samples=1)\n# samp = t.squeeze(samp,axis=-1).numpy()\n# samp\n# print(\"Input: \\n\", repr(\"\".join(arr[inp[0]])))\n# print()\n# print(\"Next Char Predictions: \\n\", repr(\"\".join(arr[samp ])))\n\n exBL = loss(tar, pred)\n print(exBL.numpy().mean())\n\n\nm.compile(optimizer=t.train.AdamOptimizer(), loss=loss)\n\nhome = \"./train_check\"\nchPre = os.path.join(home, \"ckpt_{epoch}\")\ncallB = t.keras.callbacks.ModelCheckpoint(\n filepath=chPre, save_weights_only=True)\n\nhist = m.fit(bigData.repeat(), epochs=1,\n steps_per_epoch=epochSt, callbacks=[callB])\n\n\ndef generate_text(model, start_string):\n # Evaluation step (generating text using the learned model)\n\n # Number of characters to generate\n num_generate = 1000\n\n # Converting our start string to numbers (vectorizing)\n input_eval = [cool_obj[s] for s in start_string]\n input_eval = t.expand_dims(input_eval, 0)\n\n # Empty string to store our results\n text_generated = []\n\n # Low temperatures results in more predictable text.\n # Higher temperatures results in more surprising text.\n # Experiment to find the best setting.\n temperature = 1.0\n\n # Here batch size == 1\n model.reset_states()\n for i in range(num_generate):\n predictions = model(input_eval)\n # remove the batch dimension\n predictions = t.squeeze(predictions, 0)\n\n # using a multinomial distribution to predict the word returned by the model\n predictions = predictions / temperature\n predicted_id = t.multinomial(predictions, num_samples=1)[-1, 0].numpy()\n\n # We pass the predicted word as the next input to the model\n # along with the previous hidden state\n input_eval = t.expand_dims([predicted_id], 0)\n\n text_generated.append(arr[predicted_id])\n\n return (start_string + ''.join(text_generated))\n\n\nprint(generate_text(m, start_string=u\"SARAH: \"))\n","sub_path":"Folder3.1/Folder3.11/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"542846284","text":"#!/bin/python3\n#This is the initial framework code for a sportsball like game\n\nimport sys, pygame\nfrom Birds import Bird\nfrom Ball import Ball\n\n\n\n\npygame.init()\n\nclockobject = pygame.time.Clock()\nkeys=pygame.key.get_pressed()\n\nsize = width, height = 1080, 720\n\n\n\nscreen = pygame.display.set_mode(size)\n\nbackground_image = pygame.image.load(\"Background1.png\")\nscreen.blit(background_image, [0, 0])\n\nbird = Bird()\nball = Ball()\n\n#This is the main loop\n\nwhile 1:\n\n clockobject.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n events = pygame.event.get()\n \n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT :\n bird.control(-1, 0)\n if event.key == pygame.K_RIGHT :\n bird.control(1, 0)\n if event.key == pygame.K_UP :\n bird.control(0, -5)\n if event.key == pygame.K_DOWN :\n bird.control(0, 1)\n\n\n#Bounding\n\n if bird.hitbox.left < 0:\n bird.speedx = +2\n if bird.hitbox.right > width:\n bird.speedx = -2\n if bird.hitbox.top < 0:\n bird.speedz = +2\n if (bird.hitbox.bottom > height) and (bird.speedz > 0):\n bird.speedz = -2\n\n if ball.rect.left < 0:\n ball.speedx = -(ball.speedx + 3)\n if ball.rect.right > width:\n ball.speedx = -(ball.speedx + 3)\n if ball.rect.top < 0:\n ball.speedz = -(ball.speedz + 3)\n if ball.rect.bottom > height:\n ball.speedz = -(ball.speedz + 3)\n\n\n if ball.rect.colliderect(bird.hitbox) == 1:\n ball.contact(bird.rect.centerx, bird.rect.centery, ball.rect.centerx, ball.rect.centery)\n\n # print(bird.speedx, bird.speedz)\n screen.blit(background_image, [0, 0])\n\n bird.update()\n ball.update()\n screen.blit(bird.image, bird.rect)\n screen.blit(ball.image, ball.rect)\n pygame.display.flip()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"305553972","text":"from keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras.models import load_model\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nencoding_dim = 64\n\nencoder = load_model('Compiled/variationalAutoencoders_noKL_encoder.h5') # Please modify this to your wanted encoder.\n\n# Get all of the coded images from the Trainning Sets\nx_train = np.genfromtxt('CRCHistoDataSets/Detection/nucleis_data.dat', delimiter=',')\nx_train = x_train.astype('float32') / 255.\nx_classi_positive = encoder.predict(x_train)\ny_classi_positive = np.ones(13400)\n\nx_train_neg = np.genfromtxt('CRCHistoDataSets/Detection/nucleis_neg_data.dat', delimiter=',')\nx_train_neg= x_train_neg.astype('float32') / 255.\nx_classi_negative = encoder.predict(x_train_neg)\ny_classi_negative = np.zeros(13400)\n\n# Get all of the coded images from the Test Sets\nx_test = np.genfromtxt('CRCHistoDataSets/Detection/test_nucleis_data.dat', delimiter=',')\nx_test = x_test.astype('float32') / 255.\ntest_classi_positive = encoder.predict(x_test)\n\nx_test_n = np.genfromtxt('CRCHistoDataSets/Detection/test_nucleis_neg_data.dat', delimiter=',')\nx_test_n= x_test_n.astype('float32') / 255.\ntest_classi_negative = encoder.predict(x_test_n)\n\n# Build new coded labeled trainning sets\nclassi_sets_x = np.concatenate((x_classi_positive,x_classi_negative), axis=0)\nclassi_sets_y = np.concatenate((y_classi_positive,y_classi_negative), axis=0)\n\n# Start to build a two layers classifier\ninput_img = Input(shape=(64, ))\nclassifier_l1 = Dense(32, activation='relu')(input_img)\nclassifier_l2 = Dense(8, activation='relu')(classifier_l1)\nclassifier_out = Dense(1, activation='sigmoid')(classifier_l2)\nclassifier = Model(input=input_img, output=classifier_out)\n\nclassifier.compile(optimizer='Adadelta', loss='binary_crossentropy')\n\nclassifier_history = classifier.fit(classi_sets_x, classi_sets_y,\n nb_epoch=50,\n batch_size=25,\n shuffle=True,\n validation_data=(classi_sets_x, classi_sets_y))\n\nclassifier.save('Compiled/Classifiers/variationalAutoencoders_noKL.h5') # Please modify this to your wanted encoder.\n\n# Save history\nae_history = np.array(classifier_history.history['loss'])\nnp.savetxt('history_classifier_vA_noKL.csv', ae_history, delimiter=',')\n\n# Training Sets Precision\nresult_neg = classifier.predict(x_classi_negative)\nresult_pos = classifier.predict(x_classi_positive)\nnp.savetxt('Training_rawResult_vA_noKL.csv', np.concatenate((result_pos,result_neg), axis=0), delimiter=',')\nresult_neg[result_neg<0.2] = 0\nresult_pos[result_pos>0.8] = 1\nprint('Trainning Sets Precision:')\nprint(np.count_nonzero(result_pos == 1) / 13400)\nprint(np.count_nonzero(result_neg == 0) / 13400)\nnp.savetxt('Training_Precision_vA_noKL.csv', np.array([np.count_nonzero(result_pos == 1), np.count_nonzero(result_neg == 0)]), delimiter=',')\n\n# Test Sets Precision\nresult_neg = classifier.predict(test_classi_negative)\nresult_pos = classifier.predict(test_classi_positive)\nnp.savetxt('Test_rawResult_vA_noKL.csv', np.concatenate((result_pos,result_neg), axis=0), delimiter=',')\nresult_neg[result_neg<0.2] = 0\nresult_pos[result_pos>0.8] = 1\nprint('Test Sets Precision:')\nprint(np.count_nonzero(result_pos == 1) / 13400)\nprint(np.count_nonzero(result_neg == 0) / 13400)\nnp.savetxt('Test_Precision_vA_noKL.csv', np.array([np.count_nonzero(result_pos == 1), np.count_nonzero(result_neg == 0)]), delimiter=',')\n","sub_path":"variationalAutoencoders_noKL_detection.py","file_name":"variationalAutoencoders_noKL_detection.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"419387348","text":"def solution(heights):\n\tanswer = list()\n\tfor num,height in enumerate(heights):\n\t\trecevied = 0\n\t\tfor i in range(num):\n\t\t\tif heights[num-(i+1)] > height:\n\t\t\t\trecevied = num-(i+1) + 1\n\t\t\t\tbreak\n\t\tanswer.append(recevied)\n\treturn answer\n\ni = [5,4,3,2,1]\nprint(solution(i))","sub_path":"42588.py","file_name":"42588.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"602676286","text":"import sys\nfrom sklearn import tree\nimport numpy\n\ndef parseFile(fileName) :\n fileStream = open(fileName, \"r\")\n tokens = [line.split() for line in fileStream]\n dataPointsCount = len(tokens)-1\n dimensionality = 4\n X = [[0.0 for x in range(dimensionality)] for x in range(dataPointsCount)]\n Y = [0.0 for x in range(dataPointsCount)]\n i = 1\n j = 1\n while i < len(tokens):\n while j < dimensionality+2:\n if(j == dimensionality+1):\n Y[i-1] = int(tokens[i][j])\n else:\n X[i-1][j-1] = int(tokens[i][j])\n j = j + 1\n j = 0\n i = i + 1\n return X, Y\n\ntrainingFileName = sys.argv[1]\ntestingFileName = sys.argv[2]\n\ntrainingX, trainingY = parseFile(trainingFileName)\ntestingX, testingY = parseFile(testingFileName)\n\nclassifier = tree.DecisionTreeClassifier()\nclassifier = classifier.fit(trainingX, trainingY)\n\npredictedY = classifier.predict(testingX)\ni = 0\nprint(\"Number of movies = \" + str(len(testingY)))\ntruePositives = 0\nfalsePositives = 0\ntrueNegatives = 0\nfalseNegatives = 0\n\nwhile i 0:\n for j in range(namber_min, number_max):\n if z < number_max:\n if i % j == 0:\n z += 1\n smallest_multiple = (i-1)\n continue\n else:\n z = 0\n break\n else:\n i = -1\n break\n i += 1\n\nprint(smallest_multiple)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"Projects_Euler/Smallest_multiple.py","file_name":"Smallest_multiple.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"71718140","text":"#!/usr/bin/python2\n#################################################################################################\n####\n####\tFILE\t\t: SAUVC_qualification_vision.py\n####\tMaintain\t: Supasan Komonlit\n####\tCreate on\t: 2019 , FEB 23\n####\tPurpose\t\t: For connection with control part\n####\n#################################################################################################\nfrom __future__ import print_function\n#===============>\n\n################################# EXPAND MISSION SOLUTION #######################################\n####\tThis file is mission for qualification round for SAUVC2019 \n####\tIn case have vision for make and we can use only center in x axis\n####\tDon't worry about depth because we use pressure to matk that\n####\tAnd the last one we have decide to do, although vision find only single pier\n#################################################################################################\n\nimport\trospy\nimport\tmath\nimport\ttime\n\nfrom vision_collector\t\timport VisionCollector\n\nfrom standard_mission\t\timport StandardMission\n\nnew_pid = 1\n\nclass MissionQualification( StandardMission ):\n\t\n\tdef __init__( self , name ):\n\t\tself.name = name\n\n\t\tStandardMission.__init__( self , self.name , \"/mission/qualification\" , self.callback )\n\n\t\tself.vision = VisionCollector( \"qualification\" )\n\t\n\t\tself.state = False\n\n\t\tprint(\"MISSION QUALIFICATION FINISHED SETUP\")\n\n\tdef callback( self , message ):\n\n\t\tresult = False\n\n\t\tif( message.data and self.state ):\n\t\t\tself.echo( self.name , \"Now mission qualification have running\")\n\t\t\treturn False\n\t\telif( message.data ):\n\t\t\tself.state = True\n\t\telse:\n\t\t\tself.state = False\n\t\t\treturn False\n\n\t\t# This function will call by switch we must to reset data target\n\t\tself.reset_target( \"xy\" )\n\t\tself.reset_target( \"yaw\" )\n\t\tself.reset_velocity( \"xy\" )\n\t\tself.fix_z( -0.5 )\n\n\t\tself.echo( self.name , \"START MISSION QUALIFICATION\")\n\n\t\t#\tThis situation we don't know what picture we have to find but we known we have start\n\t\t# direct we will go forward until find the picture\n\n\t\tself.type_pier = 0 # -1 : 0 : 1 is single left : double pier : single right\n\n\t\tself.collect_state()\n\t\tself.velocity_xy( 0.3 , 0 )\n\t\tcount_have_object = 0\n\t\tstart_time = time.time()\n\t\twhile( self.ok_state() ):\n\t\t\tself.sleep( 0.05 )\n\t\t\tif( ( time.time() - start_time ) < 5 ):\n\t\t\t\tself.echo( self.name , \"Now time is \" + str( time.time() - start_time ))\n\t\t\t\tcontinue\t\n\t\t\tself.vision.analysis_all( \"qualification\" , \"sevinar\" , 5 )\n\t\t\tself.echo_vision( self.vision.echo_data() )\n\t\t\tif( self.vision.have_object() ):\n\t\t\t\tcount_have_object += 1\n\t\t\t\tif( count_have_object == 6 ):\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcount_have_object = 0\n\t\t\tself.echo( self.name , \"distance : count_have_object are \" + \n\t\t\t\t\tstr( self.distance() ) + str( count_have_object ) )\n\t\t\tif( self.distance() > 4.5 ):\n\t\t\t\tself.reset_velocity( \"xy\" )\n\t\t\t\tself.velocity_xy( 0.1 , 0 )\n\t\t# we want to ensure we find object 3 round and next we will move follow data\n\t\tself.reset_velocity( \"xy\" )\n\t\tself.fix_z( -0.3 )\n\n\t\tself.vision.analysis_all( \"qualification\" , \"servinar\" , 5 )\n\n\t\t# we assign data of run_type is \n\t\t#\t1 <== Found only single pier\t\t# 2 <== Found double pier \n\t\t#\t3 <== Solution of last move form found single pier\n\n\t\tif( self.vision.num_object() == 2 ): self.run_type = 2\n\t\telif( self.vision.num_object() == 1 ): self.run_type = 1\n\t\telse: self.run_type = 0 \n\n\t\tself.echo( self.name , \"We will start do it on function type : \" + str( self.run_type) )\n\n\t\twhile( self.ok_state() ):\t\n\t\t\tif( self.run_type == 1 ): self.type_1()\n\t\t\telif( self.run_type == 2 ): self.type_2()\n\t\t\telif( self.run_type == 3 ): self.type_3()\n\t\t\telse: break\n\n\t\tself.echo( self.name , \"Finished run callback alway response TRUE\")\n\t\treturn True\n\n\tdef type_1( self ):\n\t\tif( self.type_pier == 0 ):\n\t\t\tself.vision.analysis_all( \"qualification\" , \"sevinar\" , 5 )\n\t\t\tself.echo_vision( self.vision.echo_specific() )\n\t\t\tif( self.vision.num_object() == 1 ):\n\t\t\t\tif( self.vision.center_x() < 0 ): \n\t\t\t\t\tself.echo( self.name , \"I decide that pier is left pier\")\n\t\t\t\t\tself.type_pier = -1\n\t\t\t\telse:\n\t\t\t\t\tself.echo( self.name , \"I decide that pier is right pier\")\n\t\t\t\t\tself.type_pier = 1\n\t\t\telif( self.vision.num_object() == 2 ):\n\t\t\t\tself.type_pier = 0\n\t\t\t\tself.echo( self.name , \"What I found 2 pier why?\")\n\t\t\t\tself.run_type = 2\n\t\t\t\treturn 0\n\t\t\n\t\tcount_not_single = 0\n\t\tcount_not_found = 0\n\t\twhile( self.ok_state() ):\n\t\t\tself.sleep( 0.05 )\n\t\t\tself.vision.analysis_all( \"qualification\" , \"sevinar\" , 5 )\n\t\t\tself.echo_vision( self.vision.echo_data() )\n\t\t\tif( self.vision.num_object() == 1 ):\n\t\t\t\tcount_not_single = 0\n\t\t\t\tcount_not_found = 0\n\t\t\t\tif( abs(self.vision.center_x() ) < 0.3 ):\n\t\t\t\t\tself.velocity( {'x' : 0.10 } )\n\t\t\t\t\tself.echo( self.name , \"type_1 object : \" + str( self.type_pier ) + \n\t\t\t\t\t\t\t\" We decide to move forward\" )\n\t\t\t\telif( self.vision.center_x() < 0 ):\n\t\t\t\t\tself.velocity( {'y' : 0.1 } )\n\t\t\t\t\tself.echo( self.name , \"type_1 object : \" + str( self.type_pier ) + \n\t\t\t\t\t\t\t\" We decide to move left\" )\n\t\t\t\telif( self.vision.center_x() > 0 ):\n\t\t\t\t\tself.velocity( {'y' : -0.1 } )\n\t\t\t\t\tself.echo( self.name , \"type_1 object : \" + str( self.type_pier ) +\n\t\t\t\t\t\t\t\" We decide to move right\" )\n\t\t\t\telse:\n\t\t\t\t\tself.echo( self.name , \"BUG ON LINE 127\" )\n\t\t\t\t\tself.run_type = 0\n\t\t\t\t\tbreak\n\t\t\t\tself.echo_vision( self.vision.echo_specific() )\n\t\t\t\tif( self.vision.distance_x() > 0.1 ):\n\t\t\t\t\tself.echo( self.name , \"Now over distance we have to chance to last move\")\n\t\t\t\t\tself.run_type = 3\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\telif( self.vision.num_object() == 2 ):\n\t\t\t\tself.velocity( { 'y' : math.copysign( 0.1 , self.vision.center_x() * -1 ) } )\n\t\t\t\tself.run_type = 2\n\t\t\t\tself.type_pier = 0\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcount_not_found += 1\n\t\t\t\tself.echo( self.name , \"type_1 not_found pier : \" + str( count_not_found ) )\n\t\t\t\tif( count_not_found == 5 ):\n\t\t\t\t\tself.run_type = 0\n\t\t\t\t\tself.relative_xy( 5 , math.copysign( 1.5, self.type_pier ) )\n\t\t\t\t\tself.echo( self.name , \"We decide to last move side is \" + \n\t\t\t\t\t\t\tstr(math.copysign( 1.5 , -1*self.type_pier) ) )\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\n\tdef type_2( self ):\n\t\tcount_not_found = 0\n\t\tcount_not_doulbe = 0\n\t\tcurrent_fix_velocity = False\n\t\twhile( self.ok_state() ):\n\t\t\tself.sleep( 0.1 )\n\t\t\tself.vision.analysis_all( \"qualification\" , \"sevinar\" , 5 )\n\t\t\tself.echo_vision( self.vision.echo_data() )\n\t\t\tif( self.vision.num_object() == 2 ):\n\t\t\t\tcount_not_doulbe = 0\n\t\t\t\tcount_not_found = 0\n\t\t\t\tif( abs( self.vision.center_x() ) < 0.10 ):\n\t\t\t\t\tif( not current_fix_velocity ):\n\t\t\t\t\t\tself.velocity_xy( 0.2 , 0 )\n\t\t\t\t\t\tcurrent_fix_velocity = True\n\t\t\t\t\tself.echo( self.name , \"type_2 we move direct by fix velocity\")\n\t\t\t\telif( self.vision.center_x() < 0 ):\n\t\t\t\t\tif( current_fix_velocity ):\n\t\t\t\t\t\tcurrent_fix_velocity = False\n\t\t\t\t\t\tself.reset_velocity( \"xy\" )\n\t\t\t\t\tself.velocity( {'y' : 0.1 } )\n\t\t\t\t\tself.echo( self.name , \"type_2 we move left\")\n\t\t\t\telif( self.vision.center_x() > 0 ):\n\t\t\t\t\tif( current_fix_velocity ):\n\t\t\t\t\t\tcurrent_fix_velocity = False\n\t\t\t\t\t\tself.reset_velocity( \"xy\" )\n\t\t\t\t\tself.velocity( {'y' : -0.1})\n\t\t\t\t\tself.echo( self.name , \"type_2 we move right\")\n\t\t\t\tself.echo_vision( self.vision.echo_specific() )\n\t\t\t\tif( self.vision.distance_x() > 1.0 ):\n\t\t\t\t\tself.echo( self.name , \"Now over distance we decide to only move forward\")\n\t\t\t\t\tself.run_type = 0\n\t\t\t\t\tif( not current_fix_velocity ):\n\t\t\t\t\t\tself.velocity_xy( 0.3 , 0 )\n\t\t\t\t\t\tself.echo( self.name , \"We order contant velocity x is 0.3\")\n\t\t\t\t\t\tcurrent_fix_velocity = True\n\t\t\t\t\tbreak\n\t\t\telif( self.vision.num_object() == 1 ):\n\t\t\t\tif( current_fix_velocity ):\n\t\t\t\t\tcurrent_fix_velocity = False\n\t\t\t\t\tself.reset_velocity( \"xy\") \n\t\t\t\tcount_not_doulbe += 1\n\t\t\t\tself.echo( self.name , \"Type 2 we found only single pier : \" + \n\t\t\t\t\t\tstr( count_not_doulbe) )\n\t\t\t\tif( count_not_doulbe == 5 ):\n\t\t\t\t\tself.echo( self.name , \"We change to mode type 1 single pier\")\n\t\t\t\t\tself.run_type = 1\n\t\t\t\t\tself.type_pier = 0\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcount_not_found += 1\n\t\t\t\tself.echo( self.name , \"Type 2 we not found pier : \" + str( count_not_found ) )\n\t\t\t\tif( count_not_found == 5 ):\n\t\t\t\t\tself.echo( self.name , \"Last move\")\n\t\t\t\t\tself.run_type = 0\n\t\t\t\t\tself.type_pier = 0\n\t\t\t\t\tif( not current_fix_velocity ):\n\t\t\t\t\t\tself.velocity_xy( 0.3 , 0 )\n\t\t\t\t\t\tself.echo( self.name , \"I don't know what should I do FORWARD!\")\n\t\t\t\t\t\tcurrent_fix_velocity = True\n\t\t\t\t\tbreak\n\n\tdef type_3( self ):\t\n\t\tself.echo( self.name , \"We move in last mode type_3 and type peir is \" + \n\t\t\t\tstr(self.type_pier ) )\n\t\t#\tthis step we will move side to don't find pier and \n\t\t# will move a little time befor move forward\n\t\tcount_not_found = 0\n\t\twhile( self.ok_state() ):\n\t\t\tself.sleep( 0.1 )\n\t\t\tself.vision.analysis_all( \"qualification\" , \"sevinar\" , 5 )\n\t\t\tself.echo( self.name , \"Type 3 and move slide to don't have object : \" + \n\t\t\t\t\tstr(count_not_found) )\n\t\t\tself.velocity( {'y' : math.copysign( 0.10 , self.type_pier )} )\t\n\t\t\tif( self.vision.have_object() ):\n\t\t\t\tcount_not_found = 0\n\t\t\telse:\n\t\t\t\tcount_not_found += 1\n\t\t\tif( count_not_found == 5 ):\n\t\t\t\tbreak\n\t\tstart_time = time.time()\n\t\tself.echo( self.name , \"Move side 3 second\")\n\t\twhile( self.ok_state() ):\n\t\t\tself.sleep( 0.1 )\n\t\t\tdiff_time = time.time() - start_time\n\t\t\tself.velocity( {'y' : math.copysign( 0.10 , self.type_pier )} )\t\n\t\t\tself.echo( self.name , \"Type3 now move time is \" + str( diff_time ) )\n\t\t\tif( diff_time > 5 ): break\n\t\tself.echo( self.name , \"Time out move forward\" )\n\t\tself.velocity_xy( 0.3 , 0 )\n\t\tself.run_type = 0\t\n\t\t\t\nif __name__ == \"__main__\":\n\trospy.init_node(\"mission_qualification\")\n\tMQ = MissionQualification( \"mission_qualification\" )\n\trospy.spin()\t\n","sub_path":"zeabus_mission/scripts/SAUVC_qualification_vision.py","file_name":"SAUVC_qualification_vision.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"193094401","text":"'''\n99. Recover Binary Search Tree\n\nTwo elements of a binary search tree (BST) are swapped by mistake.\n\nRecover the tree without changing its structure.\n'''\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def recoverTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n# idea: use dfs to check whether root.val is between left and right subtree\n if not root:\n return\n if root.left and root.right and root.left.val < root.val < root.right.val: # no need to swap nodes\n self.recoverTree(root.left)\n self.recoverTree(root.right)\n # swap with most right in left subtree\n if root.left and root.val < root.left.val:\n leftSub = root.left\n while leftSub.right:\n leftSub = leftSub.right\n root.val, leftSub.val = leftSub.val, root.val\n self.recoverTree(root.left)\n\n # swap with most right in left subtree\n if root.right and root.val > root.right.val:\n rightSub = root.right\n while rightSub.left:\n rightSub = rightSub.left\n root.val, rightSub.val = rightSub.val, root.val\n self.recoverTree(root.right)\n\nif __name__ == '__main__':\n root = TreeNode(0)\n root.left = TreeNode(1)\n res = Solution().recoverTree(root)\n while res:\n print(res.val)\n res = res.left\n\n","sub_path":"99_recoverTree.py","file_name":"99_recoverTree.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"113959288","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"listagents/\", views.list_agents),\n path(\"byclient//\", views.by_client),\n path(\"bysite///\", views.by_site),\n path(\"overdueaction/\", views.overdue_action),\n path(\"sendrawcmd/\", views.send_raw_cmd),\n path(\"/agentdetail/\", views.agent_detail),\n path(\"/meshtabs/\", views.meshcentral_tabs),\n path(\"/takecontrol/\", views.take_control),\n path(\"poweraction/\", views.power_action),\n path(\"uninstallagent/\", views.uninstall_agent),\n path(\"editagent/\", views.edit_agent),\n path(\"/geteventlog///\", views.get_event_log),\n path(\"getagentversions/\", views.get_agent_versions),\n path(\"updateagents/\", views.update_agents),\n path(\"/getprocs/\", views.get_processes),\n path(\"//killproc/\", views.kill_proc),\n]\n","sub_path":"api/tacticalrmm/agents/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"135308787","text":"\"\"\"\n# Tests for ml-logger.\n\n## Testing with a server\n\nTo test with a live server, first run (in a separate console)\n```\npython -m ml_logger.server --log-dir /tmp/ml-logger-debug\n```\nor do:\n```bash\nmake start-test-server\n```\n\nThen run this test script with the option:\n```bash\npython -m pytest tests --capture=no --log-dir http://0.0.0.0:8081\n```\nor do\n```bash\nmake test-with-server\n```\n\"\"\"\nimport pytest\nfrom time import sleep\nfrom os.path import join as pathJoin\nfrom ml_logger import logger, Color, metrify\nfrom ml_logger.helpers.color_helpers import percent\n\n\n# from tests.conftest import LOCAL_TEST_DIR\n\n\n@pytest.fixture(scope='session')\ndef log_dir(request):\n return request.config.getoption('--log-dir')\n\n\n@pytest.fixture(scope=\"session\")\ndef setup(log_dir):\n logger.configure(log_dir, prefix='main_test_script')\n logger.remove('')\n logger.log_line('hey')\n logger.log_data(dict(test=True), \"test-data/dict.pkl\")\n\n print(f\"logging to {pathJoin(logger.root_dir, logger.prefix)}\")\n\n\ndef test_glob(setup):\n kwargs = dict(query=\"*.pkl\", wd=\"test-data\")\n print(f'globbing {kwargs[\"query\"]} under {kwargs[\"wd\"]}')\n file_paths = logger.glob(**kwargs)\n print(f\"globbed file paths: {[file_paths]}\")\n\n\ndef test_save_and_load_pkl(setup):\n import numpy\n d1 = numpy.random.randn(20, 10)\n logger.log_data(d1, 'test_file.pkl')\n sleep(1.0)\n d2 = numpy.random.randn(20, 10)\n logger.log_data(d2, 'test_file.pkl')\n sleep(1.0)\n\n data = logger.load_pkl('test_file.pkl')\n assert len(data) == 2, \"data should contain two arrays\"\n assert numpy.array_equal(data[0], d1), \"first should be the same as d1\"\n assert numpy.array_equal(data[1], d2), \"first should be the same as d2\"\n\n\n# def __save_unavailable_pkl():\n# import pickle\n# from uvpn.domains.sawyer import GoalImg\n# config = dict(wrappers=[GoalImg, ])\n# logger.log_data(config, 'demo_config.pkl', overwrite=True)\n# return pickle.dumps(config)\n#\n#\n# # todo: check if this is actually testing correctly\n# def test_pkl_with_unavailable_modules():\n# agent = logger.load_pkl('demo_config.pkl')\n\n\ndef test_log_data(setup):\n import numpy\n d1 = numpy.random.randn(20, 10)\n logger.log_data(d1, 'test_file.pkl')\n sleep(1.0)\n d2 = numpy.random.randn(20, 10)\n logger.log_data(d2, 'test_file.pkl', overwrite=True)\n sleep(1.0)\n\n data = logger.load_pkl('test_file.pkl')\n assert len(data) == 1, \"data should contain only one array because we overwrote it.\"\n assert numpy.array_equal(data[0], d2), \"first should be the same as d2\"\n\n\ndef test(setup):\n d = Color(3.1415926, 'red')\n s = \"{:.1}\".format(d)\n\n logger.log_params(G=dict(some_config=\"hey\"))\n logger.log(step=0, some=Color(0.1, 'yellow'))\n logger.log(step=1, some=Color(0.28571, 'yellow', lambda v: \"{:.5f}%\".format(v * 100)))\n logger.log(step=2, some=Color(0.85, 'yellow', percent))\n logger.log({\"some_var/smooth\": 10}, some=Color(0.85, 'yellow', percent), step=3)\n logger.log(step=4, some=Color(10, 'yellow'))\n\n\ndef test_json(setup):\n a = dict(a=0)\n logger.save_json(dict(a=0), \"data/d.json\")\n b = logger.load_json(\"data/d.json\")\n assert a == b, \"a and b should be the same\"\n\n\ndef test_yaml(setup):\n a = dict(a=0)\n logger.save_yaml(a, \"data/d.yaml\")\n b = logger.load_yaml(\"data/d.yaml\")\n assert a == b, \"a and b should be identical\"\n\n\ndef test_image(setup):\n import scipy.misc\n import numpy as np\n\n image_bw = np.zeros((64, 64, 1), dtype=np.uint8)\n image_bw_2 = scipy.misc.face(gray=True)[::4, ::4]\n image_rgb = np.zeros((64, 64, 3), dtype=np.uint8)\n image_rgba = scipy.misc.face()[::4, ::4, :]\n logger.save_image(image_bw, \"black_white.png\")\n logger.save_image(image_bw_2, \"bw_face.png\")\n logger.save_image(image_rgb, 'rgb.png')\n logger.save_image(image_rgba, f'rgba_face_{100}.png')\n logger.save_image(image_bw, f\"bw_{100}.png\")\n logger.save_image(image_rgba, f\"rbga_{100}.png\")\n\n logger.save_image(image_bw[:, :, 0].astype(np.float32), \"black_white_individual.png\", normalize='individual')\n logger.save_image(np.ones([64, 64]), \"black_white_grid.png\", normalize='grid')\n\n\ndef test_pyplot(setup):\n import scipy.misc\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import numpy as np\n\n face = scipy.misc.face()\n logger.save_image(face, \"face.png\")\n\n fig = plt.figure(figsize=(4, 2))\n xs = np.linspace(0, 5, 1000)\n plt.plot(xs, np.cos(xs))\n logger.savefig(\"face_02.png\", fig=fig)\n plt.close()\n\n fig = plt.figure(figsize=(4, 2))\n xs = np.linspace(0, 5, 1000)\n plt.plot(xs, np.cos(xs))\n logger.savefig('sine.pdf')\n\n\ndef test_video(setup):\n import numpy as np\n\n def im(x, y):\n canvas = np.ones((640, 480), dtype=np.float32) * 0.001\n for i in range(200):\n for j in range(200):\n if x - 5 < i < x + 5 and y - 5 < j < y + 5:\n canvas[i, j] = 1\n return canvas\n\n frames = [im(100 + i, 80) for i in range(20)]\n\n logger.save_video(frames, \"test_video.mp4\")\n\n\ndef test_video_gif(setup):\n import numpy as np\n\n def im(x, y):\n canvas = np.zeros((200, 200))\n for i in range(200):\n for j in range(200):\n if x - 5 < i < x + 5 and y - 5 < j < y + 5:\n canvas[i, j] = 1\n return canvas\n\n frames = [im(100 + i, 80) for i in range(20)]\n\n logger.save_video(frames, \"test_video.gif\")\n\n\ndef test_load_params(setup):\n pass\n\n\ndef test_diff(setup):\n logger.diff()\n\n\ndef test_git_rev(setup):\n print([logger.__head__])\n\n\ndef test_git_tags(setup):\n print([logger.__tags__])\n\n\ndef test_current_branch(setup):\n print([logger.__current_branch__])\n\n\ndef test_hostname(setup):\n assert len(logger.hostname) > 0, 'hostname should be non-trivial'\n print([logger.hostname])\n\n\ndef test_split(setup):\n assert logger.split() is None, 'The first tick should be None'\n assert type(logger.split()) is float, 'Then it should return a a float in the seconds.'\n\n\ndef test_ping(setup):\n print('test ping starts')\n signals = logger.ping('alive', 0.1)\n print(f\"signals => {signals}\")\n sleep(0.2)\n signals = logger.ping('alive', 0.2)\n print(f\"signals => {signals}\")\n\n logger.client.send_signal(logger.prefix, signal=\"stop\")\n sleep(0.25)\n logger.client.send_signal(logger.prefix, signal=\"pause\")\n sleep(0.15)\n\n for i in range(4):\n signals = logger.ping('other ping')\n print(f\"signals => {signals}\")\n sleep(0.4)\n\n logger.ping('completed')\n\n\ndef test_metrify():\n import numpy as np\n d = np.array(10)\n assert metrify(d) == 10\n d = np.array(10.0)\n assert metrify(d) == 10.0\n d = np.array([10.0, 2])\n assert metrify(d) == [10.0, 2]\n d = np.array([10.0, 2])\n assert metrify(d) == [10.0, 2]\n\n\ndef test_every():\n acc = sum([i for i in range(100) if logger.every(10)])\n assert acc == sum(list(range(100))[9::10])\n\n i_sum, j_sum = 0, 0\n for i in range(100):\n for j in range(100):\n if logger.every(5, \"j\"):\n j_sum += j\n if logger.every(50, \"i\"):\n i_sum += i\n assert i_sum == 4950 * 2, \"i should be summed twice each iteration\"\n assert j_sum == sum(list(range(4, 100, 5))) * 100, \"j should be the sum ⨉ 100\"\n\n\ndef test_timing():\n with logger.time(\"upload files\"):\n import time\n time.sleep(0.1)\n\n for i in range(100):\n with logger.time(\"upload files\", interval=50):\n import time\n time.sleep(0.001)\n\n\ndef test_capture_error():\n with logger.capture_error():\n raise RuntimeError(\"this should not fail\")\n\n logger.print(\"works!\", color=\"green\")\n\n\nif __name__ == \"__main__\":\n # setup(LOCAL_TEST_DIR)\n # test(None)\n # test_video(None)\n # test_video_gif(None)\n test_every()\n","sub_path":"ml_logger/ml_logger_tests/test_ml_logger.py","file_name":"test_ml_logger.py","file_ext":"py","file_size_in_byte":7879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"518925113","text":"\ndef build_simulation(handler):\n def wrapper(type='full_path', **params):\n if(type == 'last_value'):\n return get_last_values(handler, **params)\n else:\n return get_full_paths(handler, **params)\n return wrapper\n\n\ndef get_full_paths(handler, starting_value=0, iterations=0, steps=0, **params):\n paths = []\n state = {}\n for iteration in range(iterations):\n path = [starting_value]\n for step in range(steps):\n result = handler(**{\n 'previous_value': path[step],\n 'current_path': path,\n **params,\n **state\n })\n path.append(result['value'])\n state = result['state']\n paths.append(path)\n return paths\n\n\ndef get_last_values(handler, starting_value=0, iterations=0, steps=0, **params):\n paths = []\n state = {}\n for iteration in range(iterations):\n last_val = starting_value\n for step in range(steps):\n result = handler(**{'previous_value': last_val, **params, **state})\n last_val = result['value']\n state = result['state']\n paths.append(last_val)\n return paths\n\n\ndef result(value, state={}):\n return {\n 'value': value,\n 'state': state\n }\n","sub_path":"walkabout/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"274717765","text":"import sys\nimport logging\n\nFILE = 1\nCONSOLE = 2\nFILE_CONSOLE = 3\n\n\ndef create_logger(name, mode=CONSOLE, level=logging.DEBUG):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n formatter = logging.Formatter('[%(asctime)s] [%(filename)s %(funcName)s:%(lineno)d] [%(levelname)s] %(message)s')\n\n if mode == 2 or mode == 3:\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if mode == 1 or mode == 3:\n fh = logging.FileHandler('output.log')\n fh.setLevel(level)\n\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n if mode == CONSOLE:\n logger.info(\"Initializing logger in console mode\")\n elif mode == FILE:\n logger.info(\"Initializing logger in file mode\")\n elif mode == FILE_CONSOLE:\n logger.info(\"Initializing logger in file and console mode\")\n else:\n raise Exception(\"Invalid logging mode specified\")\n\n logger.info(\"Logger initialized.\")\n\n return logger\n\n\ndef delete_old_log():\n open('output.log', 'w').close()","sub_path":"v0/customlogger.py","file_name":"customlogger.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"485012807","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\" The VGG model is adapted from http://torch.ch/blog/2015/07/30/cifar.html.\nThe best validation accuracy we achieved is about 89% without data augmentation.\nThe performance could be improved by tuning some hyper-parameters, including\nlearning rate, weight decay, max_epoch, parameter initialization, etc.\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import zip\n\nfrom singa import layer\nfrom singa import initializer\nfrom singa import metric\nfrom singa import loss\nfrom singa import net as ffnet\n\n# ffnet.verbose=True\n\n\ndef ConvBnReLU(net, name, nb_filers, sample_shape=None):\n net.add(layer.Conv2D(name + '_1', nb_filers, 3, 1, pad=1,\n input_sample_shape=sample_shape))\n net.add(layer.BatchNormalization(name + '_2'))\n net.add(layer.Activation(name + '_3'))\n\n\ndef create_net(use_cpu=False):\n if use_cpu:\n layer.engine = 'singacpp'\n net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())\n ConvBnReLU(net, 'conv1_1', 64, (3, 32, 32))\n net.add(layer.Dropout('drop1', 0.3))\n ConvBnReLU(net, 'conv1_2', 64)\n net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))\n ConvBnReLU(net, 'conv2_1', 128)\n net.add(layer.Dropout('drop2_1', 0.4))\n ConvBnReLU(net, 'conv2_2', 128)\n net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))\n ConvBnReLU(net, 'conv3_1', 256)\n net.add(layer.Dropout('drop3_1', 0.4))\n ConvBnReLU(net, 'conv3_2', 256)\n net.add(layer.Dropout('drop3_2', 0.4))\n ConvBnReLU(net, 'conv3_3', 256)\n net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))\n ConvBnReLU(net, 'conv4_1', 512)\n net.add(layer.Dropout('drop4_1', 0.4))\n ConvBnReLU(net, 'conv4_2', 512)\n net.add(layer.Dropout('drop4_2', 0.4))\n ConvBnReLU(net, 'conv4_3', 512)\n net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))\n ConvBnReLU(net, 'conv5_1', 512)\n net.add(layer.Dropout('drop5_1', 0.4))\n ConvBnReLU(net, 'conv5_2', 512)\n net.add(layer.Dropout('drop5_2', 0.4))\n ConvBnReLU(net, 'conv5_3', 512)\n net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))\n net.add(layer.Flatten('flat'))\n net.add(layer.Dropout('drop_flat', 0.5))\n net.add(layer.Dense('ip1', 512))\n net.add(layer.BatchNormalization('batchnorm_ip1'))\n net.add(layer.Activation('relu_ip1'))\n net.add(layer.Dropout('drop_ip2', 0.5))\n net.add(layer.Dense('ip2', 10))\n print('Start intialization............')\n for (p, name) in zip(net.param_values(), net.param_names()):\n print(name, p.shape)\n if 'mean' in name or 'beta' in name:\n p.set_value(0.0)\n elif 'var' in name:\n p.set_value(1.0)\n elif 'gamma' in name:\n initializer.uniform(p, 0, 1)\n elif len(p.shape) > 1:\n if 'conv' in name:\n initializer.gaussian(p, 0, 3 * 3 * p.shape[0])\n else:\n p.gaussian(0, 0.02)\n else:\n p.set_value(0)\n print(name, p.l1())\n\n return net\n","sub_path":"examples/cifar10/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"426110594","text":"# tag\n\nboard=input().split(\",\")\nxNum = sum([sum([x == 'X' for x in line]) for line in board])\noNum = sum([sum([x == 'O' for x in line]) for line in board])\n\nif (xNum != oNum) and (xNum - oNum != 1):\n print(\"False\")\nelse:\n \n # 这一段来自checkIO上x-o-referee最简洁的解法\n cols = map(''.join, zip(*board))\n diag = map(''.join, zip(*[(r[i], r[2 - i]) for i, r in enumerate(board)]))\n lines = board + list(cols) + list(diag)\n\n oWin = True if 'OOO' in lines else False\n xWin = True if 'XXX' in lines else False\n\n if oWin and xWin:\n print(\"False\")\n elif oWin and (xNum != oNum):\n print(\"False\")\n elif xWin and (xNum - oNum != 1):\n print(\"False\")\n else:\n print(\"True\")","sub_path":"Code/CodeRecords/2239/60797/317596.py","file_name":"317596.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"633041554","text":"from django.db.models import Q\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .models import News, NewsCategory\nfrom .serializers import NewsSerializer, NewsCategorySerializer, SubNewsCategorySerializer\n\n\nclass NewsView(APIView):\n def get(self, request):\n slide_news = News.objects.filter(is_slide=1)\n slideserializer = NewsSerializer(slide_news, many=True)\n top_news = News.objects.all().order_by('create_time')[0:10]\n topserializer = NewsSerializer(top_news, many=True)\n image_news = News.objects.exclude(img_url='').filter(Q(img_url__isnull=False)).order_by('click')[0:4]\n imageserializer = NewsSerializer(image_news, many=True)\n data = {\n 'slide_news': slideserializer.data,\n 'top_news': topserializer.data,\n 'image_news': imageserializer.data,\n }\n return Response(data)\n\n\nclass NewsCategoryView(APIView):\n def get(self, request):\n\n cat = NewsCategory.objects.filter(parent_id=0)\n serializer = SubNewsCategorySerializer(cat, many=True)\n\n for x in serializer.data:\n news = []\n categories = x['id']\n category = NewsCategory.objects.filter(parent_id=categories)\n if len(category) == 3:\n command_news = News.objects.filter(\n Q(category_id=category[0]) | Q(category_id=category[1]) | Q(category_id=category[2])).order_by(\n \"-create_time\")\n else:\n command_news = News.objects.filter(\n Q(category_id=category[0]) | Q(category_id=category[1])).order_by(\n \"-create_time\")\n for y in category:\n new = News.objects.filter(Q(category_id=y) & Q(img_url__isnull=False))\n serializer1 = NewsSerializer(new, many=True)\n\n for z in serializer1.data:\n if z['img_url'] == '':\n continue\n news.append(z)\n serializer2 = NewsSerializer(command_news, many=True)\n x['news'] = news[0:4]\n\n x['top8'] = serializer2.data\n\n return Response(serializer.data)\n","sub_path":"cms/cms/apps/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"529236288","text":"import os\nimport sys\nimport pathlib\nimport supervisely_lib as sly\n\nmy_app = sly.AppService()\napi = my_app.public_api\ntask_id = my_app.task_id\n\nroot_source_path = str(pathlib.Path(sys.argv[0]).parents[2])\nsly.logger.info(f\"Root source directory: {root_source_path}\")\nsys.path.append(root_source_path)\nsource_path = str(pathlib.Path(sys.argv[0]).parents[0])\nsly.logger.info(f\"App source directory: {source_path}\")\nsys.path.append(source_path)\nui_sources_dir = os.path.join(source_path, \"ui\")\nsly.logger.info(f\"UI source directory: {ui_sources_dir}\")\nsys.path.append(ui_sources_dir)\nsly.logger.info(f\"Added to sys.path: {ui_sources_dir}\")\n\nowner_id = int(os.environ['context.userId'])\nteam_id = int(os.environ['context.teamId'])\nproject_id = int(os.environ['modal.state.slyProjectId'])\nworkspace_id = int(os.environ['context.workspaceId'])\n\nproject_info = api.project.get_info_by_id(project_id)\nif project_info is None: # for debug\n raise ValueError(f\"Project with id={project_id} not found\")\n\nproject_meta: sly.ProjectMeta = sly.ProjectMeta.from_json(my_app.public_api.project.get_meta(project_id))\n\nmodel_info = None\nmodel_meta: sly.ProjectMeta = None\n\ndef finish_step(step_num):\n next_step = step_num + 1\n fields = [\n {\"field\": f\"data.done{step_num}\", \"payload\": True},\n {\"field\": f\"state.collapsed{next_step}\", \"payload\": False},\n {\"field\": f\"state.disabled{next_step}\", \"payload\": False},\n {\"field\": f\"state.activeStep\", \"payload\": next_step},\n ]\n api.app.set_field(task_id, \"data.scrollIntoView\", f\"step{next_step}\")\n api.app.set_fields(task_id, fields)","sub_path":"src/sly_globals.py","file_name":"sly_globals.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"144504151","text":"\"\"\"\nQ031 Next Permutation\nMedium\n\nArray;\n\nthis solution passed but it's not in place!\n\nImplement next permutation, which rearranges numbers\ninto the lexicographically next greater permutation of numbers.\n(that means the order in dictionary)\n\nIf such arrangement is not possible, it must rearrange it\nas the lowest possible order (ie, sorted in ascending order).\n\nThe replacement must be in-place and use only constant extra\nmemory.\n\nHere are some examples. Inputs are in the left-hand column\nand its corresponding outputs are in the right-hand column.\n\n1,2,3 → 1,3,2\n3,2,1 → 1,2,3\n1,1,5 → 1,5,1\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n i = j = len(nums) - 1\n\n while True:\n if i > 0 and nums[i-1] >= nums[i]:\n i -= 1\n else:\n break\n\n while True:\n if j >= 0 and nums[j] <= nums[i-1]:\n j -= 1\n else:\n break\n\n if i == 0:\n nums.reverse()\n else:\n nums[i-1], nums[j] = nums[j], nums[i-1]\n nums[i:] = nums[i:][::-1]\n\n\na = [1,1,1]\nb= [1,2,3]\nc = [2, 1, 3]\nd = [3, 2, 1]\nsol = Solution()\nsol.nextPermutation(d)\nprint(d)","sub_path":"Q031-v2.py","file_name":"Q031-v2.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"481440289","text":"from coffea import hist\nfrom coffea.analysis_objects import JaggedCandidateArray\nimport coffea.processor as processor\nfrom awkward import JaggedArray\nimport numpy as np\n\nclass AnalyzerProcessor(processor.ProcessorABC):\n def __init__(self):\n dataset_axis = hist.Cat(\"dataset\", \"Primary dataset\")\n\n muon_pt_axis = hist.Bin(\"pt\", r\"$p_{T,\\mu}$ [GeV]\", 3000, 0.25, 300)\n muon_eta_axis = hist.Bin(\"eta\", r\"$\\eta_{\\mu}$\", 60, -3.0, 3.0)\n muon_phi_axis = hist.Bin(\"phi\", r\"$\\phi_{\\mu}$\", 70, -3.5, 3.5)\n\n dimu_mass_axis = hist.Bin(\"mass\", r\"$m_{\\mu\\mu}$ [GeV]\", 3600, 0.25, 120)\n dimu_pt_axis = hist.Bin(\"pt\", r\"$p_{T,\\mu\\mu}$ [GeV]\", 3000, 0.25, 300)\n dimu_eta_axis = hist.Bin(\"eta\", r\"$\\eta_{\\mu\\mu}$\", 100, -5.0, 5.0)\n dimu_phi_axis = hist.Bin(\"phi\", r\"$\\phi_{\\mu\\mu}$\", 70, -3.5, 3.5)\n \n self._accumulator = processor.dict_accumulator({\n 'muon_pt': hist.Hist(\"Counts\", dataset_axis, muon_pt_axis),\n 'muon_eta': hist.Hist(\"Counts\", dataset_axis, muon_eta_axis),\n 'muon_phi': hist.Hist(\"Counts\", dataset_axis, muon_phi_axis),\n 'dimu_mass': hist.Hist(\"Counts\", dataset_axis, dimu_mass_axis),\n 'dimu_pt': hist.Hist(\"Counts\", dataset_axis, dimu_pt_axis),\n 'dimu_eta': hist.Hist(\"Counts\", dataset_axis, dimu_eta_axis),\n 'dimu_phi': hist.Hist(\"Counts\", dataset_axis, dimu_phi_axis),\n 'cutflow': processor.defaultdict_accumulator(int),\n })\n \n @property\n def accumulator(self):\n return self._accumulator\n \n def process(self, df):\n output = self.accumulator.identity()\n \n dataset = df['dataset']\n if df['nMuon'].size != 0:\n muons = JaggedCandidateArray.candidatesfromcounts(\n df['nMuon'],\n pt=df['Muon_pt'],\n eta=df['Muon_eta'],\n phi=df['Muon_phi'],\n mass=df['Muon_mass'],\n charge=df['Muon_charge'],\n isGlobal=df['Muon_isGlobal'],\n softId=df['Muon_softId'],\n vtxIdx=df['Muon_vtxIdx'],\n pfRelIso04_all=df['Muon_pfRelIso04_all'],\n x=df['Muon_x'],\n y=df['Muon_y'],\n z=df['Muon_z'],\n )\n else: \n muons = JaggedCandidateArray.candidatesfromcounts(\n np.array([]),\n pt=np.array([]),\n eta=np.array([]),\n phi=np.array([]),\n mass=np.array([]),\n charge=np.array([]),\n isGlobal=np.array([]),\n softId=np.array([]),\n vtxIdx=np.array([]),\n pfRelIso04_all=np.array([]),\n x=np.array([]),\n y=np.array([]),\n z=np.array([]),\n ) \n \n output['cutflow']['all events'] += muons.size\n output['cutflow']['all muons'] += muons.counts.sum()\n \n # global and soft muon\n soft_id = (muons.softId > 0)\n muons = muons[soft_id]\n output['cutflow']['soft muon'] += soft_id.sum().sum()\n\n global_muon = (muons.isGlobal > 0)\n muons = muons[global_muon]\n output['cutflow']['global muon'] += global_muon.sum().sum()\n\n #pt and eta cuts\n pt_cut = (muons.pt > 3)\n muons = muons[pt_cut]\n output['cutflow']['pt cut'] += pt_cut.sum().sum()\n\n eta_cut = (np.absolute(muons.eta) <= 2.4)\n muons = muons[eta_cut]\n output['cutflow']['eta cut'] += eta_cut.sum().sum()\n\n #isolated muon\n iso_muon = (muons.pfRelIso04_all < 0.4)\n muons = muons[iso_muon]\n output['cutflow']['iso muon'] += iso_muon.sum().sum()\n\n #valid vtx\n valid_vtx = (muons.vtxIdx != -1)\n muons = muons[valid_vtx]\n output['cutflow']['valid vtx'] += valid_vtx.sum().sum()\n\n #dimuon\n twomuons = (muons.counts >= 2)\n output['cutflow']['two muons'] += twomuons.sum()\n \n dimuons = muons[twomuons].distincts()\n\n opposite_charge = (dimuons.i0['charge'] * dimuons.i1['charge'] < 0)\n dimuons = dimuons[opposite_charge]\n output['cutflow']['opposite charge'] += opposite_charge.any().sum()\n\n #same vtx or close in z\n same_vtx = (dimuons.i0['vtxIdx'] == dimuons.i1['vtxIdx']) | (np.absolute(dimuons.i0['z'] - dimuons.i1['z']) < 0.2)\n dimuons = dimuons[same_vtx]\n output['cutflow']['same vtx'] += same_vtx.any().sum()\n \n output['muon_pt'].fill(dataset=dataset, pt=muons.pt.flatten())\n output['muon_eta'].fill(dataset=dataset, eta=muons.eta.flatten())\n output['muon_phi'].fill(dataset=dataset, phi=muons.phi.flatten())\n\n output['dimu_mass'].fill(dataset=dataset,mass=dimuons.mass.flatten())\n output['dimu_pt'].fill(dataset=dataset, pt=dimuons.pt.flatten())\n output['dimu_eta'].fill(dataset=dataset, eta=dimuons.eta.flatten())\n output['dimu_phi'].fill(dataset=dataset, phi=dimuons.phi.flatten())\n \n return output\n\n def postprocess(self, accumulator):\n return accumulator","sub_path":"nanoAODplus_processor/AnalyzerProcessor.py","file_name":"AnalyzerProcessor.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"413223556","text":"import jieba\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\nFILE_NAME = 'dying_to_survive.txt'\n\nfObj = open(FILE_NAME, 'r', encoding='utf-8')\ntext = fObj.read()\ncut = jieba.cut(text, cut_all=True)\nsplit_cut = ' '.join(cut)\nwc = WordCloud(font_path='simsun.ttc', width=640, height=480).generate(split_cut)\nmy_word_cloud = wc.generate(split_cut)\nplt.imshow(my_word_cloud)\nplt.axis(\"off\")\nplt.show()","sub_path":"3.DyingtoSurvive/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"400225547","text":"from django.contrib.auth.models import User\r\nfrom django.utils.encoding import force_text\r\nfrom django.db.models import Q\r\n\r\nfrom dal import autocomplete\r\n\r\nfrom .models import Customer, StaffMember\r\n\r\n\r\nclass UserAutoComplete(autocomplete.Select2QuerySetView):\r\n\r\n def get_queryset(self):\r\n # Filter out results for unauthenticated users.\r\n if not self.request.user.has_perm('core.can_autocomplete_users'):\r\n return User.objects.none()\r\n\r\n qs = User.objects.all()\r\n\r\n if self.q:\r\n words = self.q.split(' ')\r\n lastName = words.pop()\r\n firstName = words.pop() if words else lastName\r\n\r\n qs = qs.filter(\r\n Q(first_name__istartswith=firstName) | Q(last_name__istartswith=lastName) |\r\n Q(email__istartswith=self.q)\r\n )\r\n\r\n return qs\r\n\r\n def get_result_label(self,item):\r\n return force_text(item.get_full_name() + ': ' + item.email)\r\n\r\n\r\nclass CustomerAutoComplete(autocomplete.Select2QuerySetView):\r\n\r\n def get_queryset(self):\r\n # Filter out results for unauthenticated users.\r\n if not self.request.user.has_perm('core.can_autocomplete_users'):\r\n return Customer.objects.none()\r\n\r\n qs = Customer.objects.all()\r\n\r\n if self.q:\r\n words = self.q.split(' ')\r\n lastName = words.pop()\r\n firstName = words.pop() if words else lastName\r\n\r\n qs = qs.filter(\r\n Q(first_name__istartswith=firstName) | Q(last_name__istartswith=lastName) |\r\n Q(email__istartswith=self.q)\r\n )\r\n\r\n return qs\r\n\r\n\r\nclass StaffMemberAutoComplete(autocomplete.Select2QuerySetView):\r\n\r\n def get_queryset(self):\r\n # Filter out results for unauthenticated users.\r\n if not self.request.user.has_perm('core.can_autocomplete_staffmembers'):\r\n return StaffMember.objects.none()\r\n\r\n qs = StaffMember.objects.all()\r\n\r\n if self.q:\r\n words = self.q.split(' ')\r\n lastName = words.pop()\r\n firstName = words.pop() if words else lastName\r\n\r\n qs = qs.filter(\r\n Q(firstName__istartswith=firstName) | Q(lastName__istartswith=lastName) |\r\n Q(publicEmail__istartswith=self.q)\r\n )\r\n\r\n return qs\r\n\r\n def create_object(self, text):\r\n ''' Allow creation of staff members using a full name string. '''\r\n if self.create_field == 'fullName':\r\n firstName = text.split(' ')[0]\r\n lastName = ' '.join(text.split(' ')[1:])\r\n return self.get_queryset().create(**{'firstName': firstName, 'lastName': lastName})\r\n else:\r\n return super(StaffMemberAutoComplete,self).create_object(text)\r\n","sub_path":"danceschool/core/autocomplete_light_registry.py","file_name":"autocomplete_light_registry.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"558387998","text":"import pandas as pd\nimport numpy as np\nimport os, re, math, json, nltk, copy\nimport Utilities as utils\nimport sklearn_crfsuite\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom nltk.corpus import stopwords\nfrom attribute_extraction.SuffixTree import SuffixTree\nfrom attribute_extraction.Normalizer import BaseNormalizer\n\nclass Normalizer(BaseNormalizer):\n def __init__(self):\n super(Normalizer, self).__init__()\n \n def normalize(self, out_vals):\n pred_labels = []\n for i in range(len(out_vals)):\n y1 = {}\n\n for j in range(len(out_vals[i])):\n y2 = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", out_vals[i][j], re.IGNORECASE)\n if len(y2) > 0:\n h = y2[0]\n if h not in y1:\n y1[h] = 0\n y1[h] += 1\n\n if len(y1) == 0:\n pred_labels.append('none')\n\n elif len(y1) == 1:\n pred_labels.append(str(list(y1.keys())[0]))\n\n else:\n max_cnt, max_v = 1, None\n for v, cnt in y1.items():\n if cnt > max_cnt:\n max_cnt = cnt\n max_v = v\n\n if max_v is not None:\n pred_labels.append(str(max_v))\n else:\n pred_labels.append('none')\n \n return pred_labels ","sub_path":"ByomKesh/attribute_extraction/extractors/minimum_screen_size/normalizer.py","file_name":"normalizer.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"339672202","text":"class Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n '''\n idea:\n go through the string, record the positions of the characters we want to find. Whenever we find all the characters, keep updating the start position to find a substring with local minimum length. After this, update the start position to next one and then keep looking for the missing character.\n\n Implentation:\n use a dictionary d to store the number of each character we need to find. (negative values mean we have some extra!) ToFind is the total number of characters we still need to find. ind is a list of indices of the characters in d. head is a pointer in ind so ind[head] is the start position of the substring and s[ind[head]] is that character).\n\n Nore: I use a small trick to initialize L, and R, the start and end position of the minimum window substring. (Their initial values have properties that R - L = len(s) and s[L:R+1]=\"\" . Therefore, when we find the first window with indices p and q , q - p must be smaller than R - L so we can update L and R. if we cannot find any window to update L and R, it would return an empty string. )\n '''\n d = {}\n for c in t:\n d[c] = d.get(c,0) + 1 # dict for t, default valure is 1\n\n ToFind, ind = len(t), [] # ind is \"in dict\"\n L, R, head = -len(s)-1, -1, 0\n # '-len(s)-1' is over lap of [-1]\n # [-len(s)-1: 0] is the same as [-1:-1]\n # -1 - '-len(s)-1' is full string lenth\n\n for i, c in enumerate(s):\n if c in d:\n ind.append(i)\n d[c] -= 1\n if d[c] >=0:\n ToFind -= 1\n if ToFind == 0:\n\n # skip the repeated left pointer\n while d[s[ind[head]]] < 0: # s[ind[head]] is the character\n d[s[ind[head]]] += 1\n head += 1\n\n # get the min window\n if i - ind[head] < R - L:\n L, R = ind[head], i\n\n\n d[s[ind[head]]] += 1 # need to find the next left (head) c\n ToFind += 1\n head += 1 # move the left to the second left pointer\n\n return s[L:R+1]","sub_path":"minWindow.py","file_name":"minWindow.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"33358919","text":"from math import sqrt\n\nplist = [2]\nfor n in range(3,100000):\n prime = True\n for p in plist:\n if n % p == 0:\n prime = False\n break\n if prime:\n plist.append(n)\n\nT = int(input())\n\nfor case in range(T):\n N,J = map(int,input().split())\n print('Case #',case+1,':',sep='')\n fmstr = '0'+str(N-2)+'b'\n\n i = 0\n anscnt = 0\n while(anscnt < J):\n cands = '1'+format(i,fmstr)+'1'\n outl = []\n for base in range(2,11):\n candi = int(cands,base)\n #print(candi)\n sqc = sqrt(candi)\n for cd in plist:\n if candi % cd == 0:\n outl.append(cd)\n break\n if cd > sqc:\n break\n if len(outl) == 9:\n print(cands,*outl)\n anscnt += 1\n if anscnt == J:\n break\n if '0' not in cands:\n print('reached counter limit')\n break\n i+=1\n","sub_path":"solutions_5738606668808192_1/Python/macmak/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376039341","text":"\"\"\"\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@ Instituto Superior Tecnico @\n@@ PRI - 1st Delivery @@\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@@ Dinis Araújo - 86406 @@\n@@ Inês Lacerda - 86436 @@\n@@ Maria Duarte - 86474 @@\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\"\"\"\nimport nltk\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\nimport os\nimport datetime\nimport re\nfrom sklearn.feature_extraction import text\n\n\n# Creating topic class to represent its structure\nclass Topic:\n def __init__(self, title, desc, narr):\n self.title = title\n self.desc = desc\n self.narr = narr\n\n# Function that processes qrels file\ndef red_qrels_file(Q_RELS_TEST=\"qrels.train.txt\"):\n rels_dict = {}\n with open(Q_RELS_TEST) as file:\n for line in file:\n topic_num, doc_id, relevante_bool = line.split(' ')\n relevante_bool = relevante_bool.replace('\\n', '')\n if topic_num in rels_dict:\n if relevante_bool == '1':\n rels_dict[topic_num].append(doc_id)\n else:\n if relevante_bool == '1':\n rels_dict[topic_num] = [doc_id]\n return rels_dict\n\n# Function that processes topics file\ndef read_topics_file():\n topics_dic = {}\n f = open(Q_TOPICS_PATH, \"r\")\n content = f.read()\n content = content.replace(\"\\n\", \" \")\n\n for topic in content.split(\"\")[:-1]:\n topic = re.split(r'|<desc>|<narr>',topic)\n num = topic[0].replace(\"<top> <num> Number: \", \"\").replace(\" \", \"\")\n title = topic[1]\n desc = topic[2].replace(\"Description: \", \"\")\n narr = topic[3].replace(\"Narrative: \", \"\")\n narr = narr.replace(\" </top>\", \"\")\n\n topics_dic[num] = Topic(title, desc, narr)\n return topics_dic\n\n\n# Function responsible of preprocessing all the tokens:\n# punctuation, lower casing, stopwords, stemming\ndef preprocessing(content):\n # punctuation\n content = re.sub(r'\\W', ' ', content)\n # lower casing\n tokens = nltk.word_tokenize(content.lower())\n # stop words\n if stop_words_flag == 'True':\n stop_words = text.ENGLISH_STOP_WORDS.union(set(stopwords.words('english')))\n else:\n stop_words = []\n # stemming\n ps = PorterStemmer()\n preprocessed_tokens = []\n for t in tokens:\n if t not in stop_words and not re.search(r'\\d', t) and len(t) > 3:\n preprocessed_tokens.append(ps.stem(t))\n return preprocessed_tokens\n\n\n######################\n# Reading collection #\n######################\n\n# Returns 2 XML Lists:\n# train_xmls, test_xmls\ndef read_xml_files(D_PATH):\n #folder = os.listdir(D_PATH)\n train_xmls = {}\n test_xmls = {}\n codes = {}\n #for folder in folders:\n xml_file_names = os.listdir(D_PATH)\n for xml_file_name in xml_file_names:\n print(xml_file_name)\n if os.path.isfile(os.path.join(D_PATH, xml_file_name)) and xml_file_name.find(\n \".xml\") != -1:\n xml_file = ET.parse(D_PATH + xml_file_name)\n year, month, day = [int(x) for x in\n xml_file.getroot().attrib.get('date').split(\n '-')]\n date = datetime.date(year, month, day)\n document = ''\n for tag in ['headline', 'byline', 'dateline']:\n for content in xml_file.getroot().iter(tag):\n if content.text:\n document += ' ' + content.text\n for content in xml_file.getroot().iter('text'):\n for paragraph in content:\n document += ' ' + paragraph.text\n key = xml_file.getroot().attrib.get('itemid')\n for content in xml_file.getroot().iter('code'):\n if key in codes:\n codes[key].append(content.attrib.get('code'))\n else:\n codes[key] = []\n if date <= DATE_TRAIN_UNTIL:\n train_xmls[key] = preprocessing(document)\n else:\n test_xmls[key] = preprocessing(document)\n\n return train_xmls, test_xmls, codes\n\n#########################################################\n# Main Code #\n#########################################################\n\n# Just some input variable to run our experiments with the analyses.py file\n\nstop_words_flag = 'True'\nQ_PATH = \"topics.txt\"\nQ_TOPICS_PATH = \"topics.txt\"\nQ_RELS_TEST = \"qrels.train.txt\"\nDATE_TRAIN_UNTIL = datetime.date(1996, 9, 30)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"568719714","text":"#生产者消费者\nimport time\nimport queue\nimport threading\n\nq = queue.Queue(10) #生成一个队列,用来存包子,最大数量为10\n\ndef productor(i):\n # 厨师2秒做一个包子\n while True:\n q.put(\"厨师%s做的包子!\" % i)\n time.sleep(2)\n\ndef consumer(j):\n # 顾客不停地每秒吃一个包子\n while True:\n print(\"顾客 %s 吃了一个 %s\"%(j,q.get()))\n time.sleep(1)\n\n# 实例化了3个生产者(厨师)\nfor i in range(3):\n t = threading.Thread(target=productor, args=(i,))\n t.start()\n# 实例化了10个消费者(顾客)\nfor j in range(10):\n v = threading.Thread(target=consumer, args=(j,))\n v.start()\n'''\n顾客 0 吃了一个 厨师0做的包子!\n顾客 1 吃了一个 厨师1做的包子!\n顾客 2 吃了一个 厨师2做的包子!\n顾客 3 吃了一个 厨师2做的包子!\n顾客 4 吃了一个 厨师0做的包子!\n顾客 5 吃了一个 厨师1做的包子!\n顾客 6 吃了一个 厨师1做的包子!\n顾客 7 吃了一个 厨师0做的包子!\n顾客 8 吃了一个 厨师2做的包子!\n顾客 0 吃了一个 厨师1做的包子!\n顾客 1 吃了一个 厨师0做的包子!\n顾客 9 吃了一个 厨师2做的包子!\n'''\n\n\n\n\n\n","sub_path":"algorithm_test/super_python/生产者消费者.py","file_name":"生产者消费者.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"173427928","text":"\"\"\"\nThis module represents the Producer.\n\nComputer Systems Architecture Course\nAssignment 1\nMarch 2021\n\"\"\"\n\nfrom threading import Thread\nimport time\n\nclass Producer(Thread):\n \"\"\"\n Class that represents a producer.\n \"\"\"\n\n def __init__(self, products, marketplace, republish_wait_time, **kwargs):\n \"\"\"\n Constructor.\n\n @type products: List()\n @param products: a list of products that the producer will produce\n\n @type marketplace: Marketplace\n @param marketplace: a reference to the marketplace\n\n @type republish_wait_time: Time\n @param republish_wait_time: the number of seconds that a producer must\n wait until the marketplace becomes available\n\n @type kwargs:\n @param kwargs: other arguments that are passed to the Thread's __init__()\n \"\"\"\n Thread.__init__(self, **kwargs)\n self.products = products\n self.marketplace = marketplace\n self.republish_wait_time = republish_wait_time\n self.id_prod = self.marketplace.register_producer()\n\n def run(self):\n #bucla infinita pentru a produce continuu\n while True:\n #pentru fiecare produs\n for prod, quant, time_to_sleep in self.products:\n #initializez contorul cu 0 la fiecare produs\n prd = 0\n #cat timp nu s-a produs cantitatea dorita de produsul respectiv\n while prd < quant:\n products_published = self.marketplace.publish(self.id_prod, prod)\n if products_published:\n time.sleep(time_to_sleep)\n prd += 1\n else:\n #coada este plina si astept timpul respectiv\n time.sleep(self.republish_wait_time)\n ","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"428461556","text":"def internDetails():\n details = {\n \"name\":\"Agbedor Hope\",\n \"ID\":\"HNG-02110\",\n \"email\":\"martinshope147@gmail.com\",\n \"language\":\"python\"\n }\n\n output = \"Hello World, this is {name} with HNGi7 ID {ID} using {language} for stage 2 task. {email}\".format(\n name=details[\"name\"], ID=details[\"ID\"], email=details[\"email\"], language=details[\"language\"])\n\n print(output)\n\n return output\n\n\ninternDetails()\n","sub_path":"scripts/HNG-02110.py","file_name":"HNG-02110.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157564473","text":"import psycopg2\nfrom datetime import timedelta, date\n\nCONN_POSTGRES = \"host='localhost' dbname='db' user='postgres' password='postgres'\"\n\n\ndef read_mvalidation_on_date(file_path, d_from=date.today() - timedelta(days=1)):\n \"\"\" function load list of rows of table mdata.mvalidation to csv file\n params: file_path - path of target file\n d_from - date for selecting rows (default = yesterday)\"\"\"\n\n # determine end point of date interval\n d_to = d_from + timedelta(days=+1)\n\n # set connection to db\n with psycopg2.connect(CONN_POSTGRES) as conn:\n with conn.cursor() as cur:\n query = 'SELECT * FROM mdata.mvalidation where validation_ts >= %s and validation_ts <= %s'\n\n query_bind = cur.mogrify(query, (d_from, d_to)).decode(\"utf-8\")\n outputquery = \"COPY ({0}) TO STDOUT WITH CSV HEADER\".format(query_bind)\n with open(file_path, 'w') as f:\n cur.copy_expert(outputquery, f)\n print('{} rows were written to csv file'.format(cur.rowcount))\n\n\ndef update_mvalidation_on_date(file_path, d_from=date.today() - timedelta(days=1)):\n \"\"\" function delete rows of table mdata.mvalidation on selected date and then load csv file to table\n params: file_path - path of target file\n d_from - date for selecting rows (default = yesterday)\"\"\"\n\n d_to = d_from + timedelta(days=+1)\n\n with psycopg2.connect(CONN_POSTGRES) as conn:\n with conn.cursor() as cur:\n # !!! open file before deleting rows from table\n # because if file doesn't exist exception will be raised and rows will not be deleted\n with open(file_path, 'r') as f:\n # read first row for column names\n colnames = f.readline().split(',')\n\n # delete rows from yesterday\n query = \"DELETE FROM mdata.mvalidation where validation_ts >= %s and validation_ts <= %s\"\n cur.execute(query, (d_from, d_to))\n print('{} rows were deleted from table'.format(cur.rowcount))\n\n # insert data from csv file\n cur.copy_from(f, 'mdata.mvalidation', columns=colnames, sep=',')\n print('{} rows were loaded from csv file'.format(cur.rowcount))\n\n\nif __name__ == \"__main__\":\n read_mvalidation_on_date(file_path='yesterday.csv')\n update_mvalidation_on_date(file_path='yesterday.csv')\n","sub_path":"1_python.py","file_name":"1_python.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"457708835","text":"import pandas as pd\nimport numpy as np\n\n\ndef denormalizaton(x,norm):\n #mean=np.mean(x)\n #max=np.max(x)\n #min=np.std(x)\n #print(len(norm))\n row,col=np.shape(x)\n #print(len(norm))\n #print(row,col)\n #print(norm[1][1])\n #print(x)\n #print(x[3][0])\n #\n for i in range (0,col-1):\n #print(i)\n for j in range(0,row):\n #print(j)\n #print(x[i][j])\n x[j][i+1]=x[j][i+1]*norm[i][0]+norm[i][1]\n #print(x[i][j])\n #print(x[i])\n # print(x)\n # print(row, col)\n return x\n\n\n","sub_path":"denormalization.py","file_name":"denormalization.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"597706402","text":"from flask import Flask,render_template,flash,request,redirect,url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField,SubmitField\nfrom wtforms.validators import DataRequired\napp = Flask(__name__)\n\n#数据库配置:数据库地址/关闭自动跟踪\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:xxh19971124@127.0.0.1/flask_books'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key='ithie'\n#创建数据库对象\ndb = SQLAlchemy(app)\n'''\n1. 配置数据库\n a.导入SQLAlchemy扩展 \n b.创建db对象并配置参数\n c.创建数据库\n2. 添加书和作者的模型\n a.模型继承db.Model\n b.表名__tablename__\n c.db.Column:字段\n d.db.relationship:关系引用\n3. 添加数据\n4. 使用模板显示数据库查询数据\n a.查询所有作者信息,让信息传递给模板\n b.模板中按照格式依次for循环作者和书籍(作者获取书籍用关系引用)\n5. 使用WTF显示表单\n a.自定义表单类\n b.模板中显示\n c.设置secret_key/csrf_token\n6. 实现相关增删逻辑\n a.增加数据\n b.删除数据-->网页中删除-->点击发送书籍ID给删除书籍的路由-->路由需要接受参数\n url_for使用/for else的使用/redirect的使用\n c.删除作者\n'''\n\n# 定义书和作者模型\n# 作者模型\n\nclass Author(db.Model):\n __tablename__ = 'authors' # 表名\n # 字段\n id = db.Column(db.Integer,primary_key=True)\n name = db.Column(db.String(16),unique=True)\n # 关系引用\n # books是给自己(Author模型)用的,author是给Book模型用\n books = db.relationship('Book',backref='author')\n\n def __repr__(self):\n return 'Author:%s' %self.name\n\nclass Book(db.Model):\n __tablename__='books'\n\n id = db.Column(db.Integer,primary_key=True)\n name = db.Column(db.String(16),unique=True)\n author_id = db.Column(db.Integer,db.ForeignKey('authors.id'))\n\n def __repr__(self):\n return 'Book:%s %s' %(self.name,self.author_id)\n\n#自定义表单类\nclass AuthorForm(FlaskForm):\n author = StringField('作者:',validators=[DataRequired()])\n book = StringField('书籍:',validators=[DataRequired()]) #validators必须是一个iteration\n submit = SubmitField('提交')\n\n@app.route('/delete_author/<author_id>')\ndef delete_author(author_id):\n # 1.查询数据库,是否有该ID的作者,如果有就删除(先删书,再删作者),没有提示错误\n author = Author.query.get(author_id)\n\n # 2.如果有就删除(先删书,再删作者)\n if author:\n try:\n # 查询之后直接删除\n Book.query.filter_by(author_id=author_id).delete()\n # 删除作者\n db.session.delete(author)\n db.session.commit()\n except Exception as e:\n print(e)\n flash('删除作者错误')\n db.session.rollback()\n else:\n flash('作者找不到')\n\n return redirect(url_for('index'))\n\n\n@app.route('/delete_book/<book_id>')\ndef delete_book(book_id):\n # 1.查询数据库是否有该ID的书,如果有就删除,没有就提示错误\n book = Book.query.get(book_id)\n # 2. 如果有就删除\n if book:\n try:\n db.session.delete(book)\n db.session.commit()\n except Exception as e:\n print(e)\n flash('删除书籍出错')\n db.session.rollback()\n # 没有就提示错误\n else:\n flash('书籍找不到')\n # 如何返回当前网址 --> 重定向\n # redirect:重定向,需要传入网络/路由地址\n # url_for():需要传入视图函数名,返回该视图函数对应的路由地址\n return redirect(url_for('index'))\n # 相当��return redirect('/')\n\n\n@app.route('/',methods=['GET','POST'])\ndef index():\n\n #创建自定义表单类\n author_form = AuthorForm()\n # 查询所有的作者信息,将信息传递给模板\n\n '''\n 验证逻辑:\n 1.调用wtf的函数实现验证\n 2.验证通过获取数据\n 3.判断作者是否存在\n 4.如果作者存在,判断书籍是否存在,没有重复书籍就添加书籍,如果重复提示错误\n 5.如果作者不存在,添加作者和书籍\n 6.验证不通过就提示错误\n '''\n\n 1.\n # 调用wtf的函数实现验证\n if author_form.validate_on_submit():\n # 2.验证通过获取数据\n author_name = author_form.author.data\n book_name = author_form.book.data\n # 3.判断作者是否存在\n author = Author.query.filter_by(name=author_name).first()\n # 4.如果作者存在,\n if author:\n # 判断书籍是否存在,\n book = Book.query.filter_by(name=book_name).first()\n # 如果重复提示错误\n if book:\n flash('已存在同名书籍')\n # 没有重复书籍就添加书籍,\n else:\n try:\n new_book = Book(name=book_name,author_id=author.id)\n db.session.add(new_book)\n db.session.commit()\n except Exception as e:\n print(e)\n flash('添加书籍失败')\n db.session.rollback()\n\n # 5.如果作者不存在,添加作者和书籍\n else:\n try:\n new_author = Author(name=author_name)\n db.session.add(new_author)\n db.session.commit()\n\n new_book = Book(name=book_name,author_id=new_author.id)\n db.session.add(new_book)\n db.session.commit()\n except Exception as e:\n print(e)\n flash('添加作者和书籍失败')\n db.session.rollback()\n\n else:\n # 6.验证不通过就提示错误\n if request.method=='POST':\n flash('参数不全')\n authors = Author.query.all()\n return render_template('books.html',authors = authors,form = author_form)\n\n\ndb.drop_all()\n\ndb.create_all()\n\n# 生成数据\nau1 = Author(name='老王')\nau2 = Author(name='老惠')\nau3 = Author(name='老刘')\n\n# 把数据提交给用户会话\ndb.session.add_all([au1,au2,au3])\n#提交会话\ndb.session.commit()\n\nbk1 = Book(name='老王回忆录',author_id=au1.id)\nbk2 = Book(name='我读书少,你别骗我',author_id=au1.id)\nbk3 = Book(name='如何才能让自己更骚',author_id=au2.id)\nbk4 = Book(name='如何征服美丽少女',author_id=au3.id)\nbk5 = Book(name='如何征服英俊少男',author_id=au3.id)\n\n# 把数据提交给用户会话\ndb.session.add_all([bk1,bk2,bk3,bk4,bk5])\ndb.session.commit()\n\n\napp.run(debug=True)\n","sub_path":"Flask_books_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"622019999","text":"from flask import render_template, flash, redirect, request, g, session, url_for, Markup\nfrom flask.ext.rauth import RauthOAuth2, RauthResponse, RauthServiceMixin\nfrom rauth.service import OAuth2Service, OAuth1Service, OflyService, Response, parse_utf8_qsl\nfrom wtforms import StringField, BooleanField, TextAreaField, SelectField\nfrom wtforms.validators import Required, DataRequired\nfrom requests import ConnectionError\n\nfrom app import app\nfrom app.forms import GossipRequestForm\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom validate_email import validate_email\n\nimport boto\nimport boto.ses\n\nimport json\n\n\n# P This is a fix for a bug in the 0.3.2 version of flask_rauth extension.\ndef rauthoauth2_fixed_init(self, fapp=None, base_url=None, consumer_key=None, consumer_secret=None, **kwargs):\n # P This line is the fix. The property 'name' is referenced before it is set in the stock code\n self.name = kwargs['name']\n RauthServiceMixin.__init__(self, app=fapp, base_url=base_url)\n OAuth2Service.__init__(self, consumer_key=consumer_key, consumer_secret=consumer_secret, **kwargs)\n\n\nRauthOAuth2.__init__ = rauthoauth2_fixed_init\n\nwith app.app_context():\n google = RauthOAuth2(\n name='google',\n base_url='https://www.googleapis.com/oauth2/v1/email',\n access_token_url='https://accounts.google.com/o/oauth2/token',\n authorize_url='https://accounts.google.com/o/oauth2/auth'\n )\n\n\ndef sendRequestEmailWithSES(requestername, requesteremail, tool, particulars,\n senderemail=app.config['REQUEST_EMAIL_SENDEREMAILADDRESS'],\n recepientemail=app.config['REQUEST_EMAIL_RECEPIENTEMAILADDRESS']):\n \"\"\"\n Formats and sends the particulars of a request for access as an email\n to the Zen Desk intake email address using Amazon's Simple Email Service. By default\n the from address will come from the configuration file.\n\n See https://boto.readthedocs.org/en/latest/ses_tut.html#verifying-a-sender-email-address for\n sender email verification SES process to allow AWS message to send messages from the specified\n address\n\n \"\"\"\n\n # P Make sure the email addresses look good\n if not validate_email(requesteremail):\n raise ValueError(\"Requester email '%s' is not valid\" % requesteremail)\n if not validate_email(senderemail):\n raise ValueError(\"Sender email '%s' is not valid\" % senderemail)\n if not validate_email(recepientemail):\n raise ValueError(\"Recepient email '%s' is not valid\" % recepientemail)\n\n # P Get a connection to AWS\n #conn = boto.ses.connect_to_region(app.config['REQUEST_AWS_SES_REGION'],\n # aws_access_key_id=app.config['REQUEST_AWS_SES_ACCESS_KEY_ID'],\n # aws_secret_access_key=app.config['REQUEST_AWS_SES_SECRET_ACCESS_KEY'])\n conn = boto.ses.connect_to_region(app.config['REQUEST_AWS_SES_REGION'])\n\n # P Make sure the sender address has already been validated for sending by SES\n\n blessedSESaddresses = conn.list_verified_email_addresses()\n\n if senderemail not in blessedSESaddresses['ListVerifiedEmailAddressesResponse']['ListVerifiedEmailAddressesResult'][\n 'VerifiedEmailAddresses']:\n raise ValueError(\"Sender email '%s' is not SES list of verified email addresses.\" % senderemail)\n\n # P OK, should be validated. Create the message\n msg = MIMEMultipart()\n\n msg['Subject'] = \"Access Gossip request for access to %s by %s\" % (tool, requestername)\n msg['From'] = senderemail\n msg['To'] = recepientemail\n\n body = \"\"\n for particular in particulars:\n body += \"\\n%s:%s\" % (particular[0], particular[1])\n\n msg.attach(MIMEText(body))\n\n # P Send the message\n\n result = conn.send_raw_email(msg.as_string())\n\n return result if 'ErrorResponse' in result else ''\n\n\n@app.route('/login')\ndef login():\n return google.authorize(\n callback=url_for('authorized', _external=True),\n scope='https://www.googleapis.com/auth/userinfo.email')\n\n\n@app.route('/authorized')\ndef authorized():\n if 'error' in request.args and request.args['error'] == 'access_denied':\n flash(Markup(u\"You declined to allow Access_Gossip to get basic Google account information. \\\n It is required to verify your email. Access can be revoked at any time by vising \\\n <a style='color: #52985b;' href='https://security.google.com/settings/u/2/security/permissions'\\>your Google \\\n security settings</a>\"),\n 'warning')\n return render_template('gossiprequest.html',\n title='Request Access to Shared Secret',\n form=GossipRequestForm(),\n tools=app.config['TOOLS'])\n\n resp = RauthResponse(google.get_access_token(method='POST', data={\n 'code': request.args['code'],\n 'redirect_uri': session.pop(google._session_key('redirect_uri'), None)\n }))\n\n if 'error' in resp.content().keys():\n app.logger.error(\"Error authorizing with google error is %s\" % resp.content()['error'])\n flash(Markup(u\"There was an error authorizing with Google. Please contact \\\n <a href='mailto:help@controlgroup.com'>support</a>\"),\n 'error')\n return render_template('gossiprequest.html',\n title='Request Access to Shared Secret',\n form=GossipRequestForm(),\n tools=app.config['TOOLS'])\n\n session['access_token'] = resp.content().get('access_token')\n\n return redirect(url_for('gossiprequest'))\n\n\n@app.route(\"/out\")\ndef logout():\n session['access_token'] = None\n return redirect(url_for('gossiprequest'))\n\n\ndef fieldForFormName(formname, tools):\n # P BAH! There needs to be a better way of doing this\n\n stdify = lambda s: s.lower().replace(' ', '_')\n\n ftoolname, ffieldname = formname.split('__', 1)\n\n for tool in tools.values():\n if stdify(tool['toolname']) == ftoolname:\n for field in tool[\"fields\"]:\n if stdify(field[\"name\"]) == ffieldname:\n return field\n\n return None\n\n\n@app.route('/gossiprequest', methods=['GET', 'POST'])\ndef gossiprequest():\n # P DynamicGossipRequestForm extends GossipRequestForm to allow for tools to add form fields before the web page\n # P is rendered and returned to the requester\n class DynamicGossipRequestForm(GossipRequestForm):\n pass\n\n templateparams = {}\n googleuserinfo = {}\n googleuserprofile = {}\n\n # P The 'blessed domains' are the list of email domains that a user won't have to enter a project contact\n templateparams['blesseddomains'] = app.config['BLESSED_DOMAINS']\n\n # P Get the OAuth2 access token from the request session and grab the uesr details form the OAuth provider\n # P which for right now is only Google.\n access_token = session.get('access_token')\n\n if access_token is not None:\n\n try:\n userinforequest = google.get('https://www.googleapis.com/oauth2/v3/userinfo', access_token=access_token)\n profileinforequest = google.get('https://www.googleapis.com/plus/v1/people/me', access_token=access_token)\n except ConnectionError as ce:\n app.logger.error(ce)\n flash(Markup(u\"There was a problem contacting Google. Please contact \\\n <a href='mailto:help@controlgroup.com'>support</a>\"), 'error')\n session['access_token'] = None\n return redirect(url_for('gossiprequest'))\n\n if userinforequest.response.status_code != 200:\n flash('Google token expired or corrupted. Please login again.', 'warning')\n return redirect(url_for('gossiprequest'))\n\n try:\n googleuserinfo = json.loads(userinforequest.response.content)\n googleuserprofile = json.loads(profileinforequest.response.content)\n except Exception as e:\n session['access_token'] = None\n flash(Markup(u\"There was a problem processing user deatils from Google. Please contact \\\n <a href='mailto:help@controlgroup.com'>support</a>\"),\n 'error')\n app.logger.error(\"Could not parse Google user and/or profile JSON. Execpetion is %e\" % e)\n return redirect(url_for('gossiprequest'))\n\n # P These are details for adding some UI flourish to the rendered web page\n if \"image\" in googleuserprofile.keys() and 'url' in googleuserprofile['image'].keys():\n templateparams['userimgurl'] = googleuserprofile['image']['url']\n else:\n app.logger.warning(\"Could not find image url value in Google profile information\")\n templateparams['userimgurl'] = \"\"\n\n if \"hd\" in googleuserinfo.keys():\n # P the 'hd' value will be the domain name if Google is managing the account, ie not gmail\n templateparams['hd'] = googleuserinfo['hd']\n else:\n templateparams['hd'] = \"UNKNOWN\"\n\n formify = lambda ftoolname, fieldn: ftoolname.lower().replace(' ', '_') + \"__\" + fieldn.lower().replace(' ', '_')\n\n if 'toolselect' in request.form.keys() and request.form['toolselect'] is not None:\n for tool in app.config['TOOLS'].values():\n\n # P Only add the fields of the selected tool. If all of the fields for all of the tools in the config.py\n # P were added, form validation would barf on any field the was 'required' but the client had not\n # P selected\n if tool['toolname'].lower().replace(' ', '_') != request.form['toolselect']:\n continue\n\n for tfield in tool['fields']:\n\n formfield = None\n formfieldname = formify(tool['toolname'], tfield['name'])\n\n if tfield['type'].lower() == 'text':\n formfield = StringField(formfield, validators=[DataRequired()])\n elif tfield['type'].lower() == 'textarea':\n formfield = TextAreaField(formfield, validators=[DataRequired()])\n\n setattr(DynamicGossipRequestForm, formfieldname, formfield)\n\n form = DynamicGossipRequestForm()\n\n form.requesteremail.data = googleuserinfo.get('email', \"\")\n form.requestername.data = googleuserinfo.get('name', \"\")\n\n if form.validate_on_submit():\n\n try:\n\n forminputs = []\n toolname = request.form['toolselect'].lower().replace(' ', '_')\n for toolformfieldname in filter(lambda r: r.startswith(toolname + \"__\"), request.form.keys()):\n toolfield = fieldForFormName(toolformfieldname, app.config['TOOLS'])\n forminputs.append((toolfield['name'], request.form[toolformfieldname]))\n\n toolproppername = request.form['toolselect']\n for t in app.config['TOOLS'].keys():\n if app.config['TOOLS'][t]['toolname'].lower().replace(' ', '_') == request.form['toolselect']:\n toolproppername = app.config['TOOLS'][t]['toolname']\n break\n\n sendRequestEmailWithSES(googleuserinfo['name'],\n googleuserinfo['email'],\n toolproppername,\n forminputs)\n\n # P All is good. Reset the form variables for tool select and its associated fields\n form.tool.data = \"\"\n form.toolselect.data = \"\"\n for toolformfieldname in filter(lambda r: r.startswith(toolname + \"__\"), request.form.keys()):\n getattr(form, toolformfieldname).data = \"\"\n\n flash('Request Sent!', 'success')\n except Exception as e:\n app.logger.error(e)\n flash(Markup(u\"There was an error sending the request. Please contact \\\n <a href='mailto:help@controlgroup.com'>support</a>\"),\n 'error')\n\n else:\n for field, errors in form.errors.items():\n for error in errors:\n fielddict = fieldForFormName(field, app.config['TOOLS'])\n if fielddict is not None:\n label = fielddict['name']\n else:\n label = field\n flash(u\"Error in the %s field - %s\" % (label, error), 'error')\n\n return render_template('gossiprequest.html',\n title='Request Access to Project Resource',\n form=form,\n params=templateparams,\n tools=app.config['TOOLS'])\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"609922741","text":"#!/usr/bin/env python\n\n# Given a string s and a dictionary of words dict,\n# determine if s can be break into a space-separated sequence of one or more dictionary words.\n#\n# Example\n# Given s = \"lintcode\", dict = [\"lint\", \"code\"].\n#\n# Return true because \"lintcode\" can be break as \"lint code\".\n#\n# Tags\n# String Dynamic Programming\n# Related Problems\n# Hard Word Break II\n\n\nclass Solution:\n # @param s: A string s\n # @param dict: A dictionary of words dict\n\n # # Solution 1 - DP without optimization\n # def wordBreak(self, s, dict):\n # # write your code here\n # if dict is None:\n # return s is None\n # if len(dict) == 0:\n # return len(s) == 0\n\n # n = len(s)\n # f = [False] * (n + 1)\n # f[0] = True\n\n # for i in range(1, n + 1):\n # for j in range(1, i + 1):\n # if not f[i - j]:\n # continue\n # if s[i - j: i] in dict:\n # f[i] = True\n # break\n #\n # return f[n]\n\n\n # Solution 2 - DP with optimization\n def wordBreak(self, s, dict):\n if dict is None:\n return s is None\n if len(dict) == 0:\n return len(s) == 0\n\n n = len(s)\n f = [False] * (n + 1)\n f[0] = True\n\n maxLen = max([len(word) for word in dict])\n for i in range(1, n + 1):\n for j in range(1, min(i, maxLen) + 1):\n if not f[i - j]:\n continue\n if s[i - j: i] in dict:\n f[i] = True\n break\n\n return f[n]\n\n\ndef test():\n a = Solution()\n\n assert a.wordBreak('', [])\n assert a.wordBreak('', ['hello'])\n assert not a.wordBreak('hello', [])\n assert a.wordBreak('lintcode', ['lint', 'code'])\n assert not a.wordBreak('helloworld', ['hello', 'globe'])\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"Word Break.py","file_name":"Word Break.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182303393","text":"from sanic_cors import CORS\nfrom sanic_openapi import swagger_blueprint\n\nfrom . import api, errors, settings, utils\nfrom .models import Template\n\n\ndef configure(app):\n app.config.API_HOST = app.config.SERVER_NAME = settings.SERVER_NAME\n app.config.API_BASEPATH = \"/\"\n app.config.API_SCHEMES = [settings.SCHEME]\n app.config.API_VERSION = utils.meta.version()\n app.config.API_TITLE = \"Memegen.link\"\n app.config.API_CONTACT_EMAIL = \"support@maketested.com\"\n app.config.API_LICENSE_NAME = \"View the license\"\n app.config.API_LICENSE_URL = (\n \"https://github.com/jacebrowning/memegen/blob/main/LICENSE.txt\"\n )\n app.config.API_SECURITY = [{\"ApiKeyAuth\": []}]\n app.config.API_SECURITY_DEFINITIONS = {\n \"ApiKeyAuth\": {\"type\": \"apiKey\", \"in\": \"header\", \"name\": \"X-API-KEY\"}\n }\n\n swagger_blueprint.url_prefix = \"/docs\"\n app.blueprint(swagger_blueprint)\n\n app.blueprint(api.clients.blueprint)\n app.blueprint(api.memes.blueprint)\n app.blueprint(api.templates.blueprint)\n app.blueprint(api.shortcuts.blueprint) # registered last to avoid collisions\n\n CORS(app)\n app.error_handler = errors.BugsnagErrorHandler()\n\n\ndef get_valid_templates(request, query: str = \"\") -> list[dict]:\n templates = Template.objects.filter(valid=True, _exclude=\"_custom\")\n if query:\n templates = [t for t in templates if t.matches(query)]\n else:\n templates = sorted(templates)\n return [template.jsonify(request.app) for template in templates]\n\n\ndef get_example_images(request, query: str = \"\") -> list[tuple[str, str]]:\n templates = Template.objects.filter(valid=True, _exclude=\"_custom\")\n if query:\n templates = [t for t in templates if t.matches(query)]\n else:\n templates = sorted(templates)\n return [\n (\n template.build_example_url(request.app, \"Memes.text_jpg\"),\n template.build_self_url(request.app),\n )\n for template in templates\n ]\n\n\ndef get_test_images(request) -> list[str]:\n return [\n request.app.url_for(\n f\"Memes.text_{settings.DEFAULT_EXT}\",\n template_id=id,\n text_paths=utils.text.encode(lines),\n )\n for id, lines in settings.TEST_IMAGES\n ]\n","sub_path":"app/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"104513498","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 20 10:50:44 2020\n\n@author: MEvans\n\"\"\"\nimport tensorflow as tf\n\ndef aug_color(img):\n n_ch = tf.shape(img)[-1]\n contra_adj = 0.05\n bright_adj = 0.05\n\n ch_mean = tf.math.reduce_mean(img, axis = (0,1), keepdims = True)\n #ch_mean = np.mean(img, axis=(0, 1), keepdims=True).astype(np.float32)\n\n contra_mul = tf.random.uniform(shape = (1, 1, n_ch),\n minval = 1-contra_adj,\n maxval = 1+contra_adj)\n # contra_mul = np.random.uniform(1 - contra_adj, 1 + contra_adj, (1, 1, n_ch)).astype(\n # np.float32\n # )\n\n bright_mul = tf.random.uniform(shape = (1, 1, n_ch),\n minval = 1 - bright_adj,\n maxval = 1+bright_adj)\n # bright_mul = np.random.uniform(1 - bright_adj, 1 + bright_adj, (1, 1, n_ch)).astype(\n # np.float32\n # )\n\n recolored = (img - ch_mean) * contra_mul + ch_mean * bright_mul\n return recolored\n\ndef augColor(x):\n \"\"\"Color augmentation\n\n Args:\n x: Image\n\n Returns:\n Augmented image\n \"\"\"\n x = tf.image.random_hue(x, 0.08)\n x = tf.image.random_saturation(x, 0.6, 1.6)\n x = tf.image.random_brightness(x, 0.05)\n x = tf.image.random_contrast(x, 0.7, 1.3)\n return x\n \ndef augImg(img):\n \"\"\"\n Perform image augmentation on tfRecords\n Parameters:\n img (TFRecord): 4D tensor\n Returns:\n 3D tensor: \n \"\"\"\n outDims = tf.shape(img)[0:1]\n x = tf.image.random_flip_left_right(img)\n x = tf.image.random_flip_up_down(x)\n x = tf.image.rot90(x, tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32))\n #x = zoom(x, outDims)\n #since were gonna map_fn this on a 4d image, output must be 3d, so squeeze the artificial 'sample' dimension\n return tf.squeeze(x)\n\ndef normalize(x, axes=[0, 1, 2], epsilon=1e-8):\n \"\"\"\n Standardize incoming image patches by mean and variance\n \n To standardize each pixel use axes = [2]\n To standardize each channel use axes = [0, 1]\n To standardize globally use axes = [0, 1, 2]\n \n Parameters:\n x (tensor): nD image tensor\n axes (array): Array of ints. Axes along which to compute mean and variance, usually length n-1\n epsilon (float): small number to avoid dividing by zero\n Return:\n tensor: nD image tensor normalized by channels\n \"\"\"\n mean, variance = tf.nn.moments(x, axes=axes)\n x_normed = (x - mean) / tf.sqrt(variance + epsilon) # epsilon to avoid dividing by zero\n return x_normed\n\ndef rescale(img, axes = [2]):\n \"\"\"\n Standardize incoming image patch to [0,1] based on min and max values\n \n To standardize each pixel use axes = [2]\n To standardize each channel use axes = [0, 1]\n To standardize globally use axes = [0, 1, 2]\n \n Args:\n img (tensor): 3D (H,W,C) image tensor\n axes (list): axes along which to calculate min/max for rescaling\n Return:\n tensor: 3D tensor of same shape as input, with values [0,1]\n \"\"\"\n minimum = tf.math.reduce_min(img, axis = axes, keepdims = True)\n maximum = tf.math.reduce_max(img, axis = axes, keepdims = True)\n scaled = tf.divide(tf.subtract(img, minimum), tf.subtract(maximum, minimum))\n return scaled\n\n#def parse_tfrecord(example_proto, ftDict):\n# \"\"\"The parsing function.\n# Read a serialized example into the structure defined by FEATURES_DICT.\n# Args:\n# example_proto: a serialized Example.\n# Returns: \n# A dictionary of tensors, keyed by feature name.\n# \"\"\"\n# return tf.io.parse_single_example(example_proto, ftDict)\n\n\ndef to_tuple(inputs, features, response):\n \"\"\"Function to convert a dictionary of tensors to a tuple of (inputs, outputs).\n Turn the tensors returned by parse_tfrecord into a stack in HWC shape.\n Args:\n inputs (dict): A dictionary of tensors, keyed by feature name. Response\n variable must be the last item.\n features (list): List of input feature names\n respones (str): response name(s)\n Returns: \n A dtuple of (inputs, outputs).\n \"\"\"\n inputsList = [inputs.get(key) for key in features + [response]]\n stacked = tf.stack(inputsList, axis=0)\n # Convert from CHW to HWC\n stacked = tf.transpose(stacked, [1, 2, 0])\n stacked = augImg(stacked)\n #split input bands and labels\n bands = stacked[:,:,:len(features)]\n labels = stacked[:,:,len(features):]\n # in case labels are >1\n labels = tf.where(tf.greater(labels, 1.0), 1.0, labels)\n # perform color augmentation on input features\n bands = aug_color(bands)\n # standardize each patch of bands\n bands = normalize(bands, [0,1])\n # return the features and labels\n return bands, labels\n\ndef get_dataset(files, ftDict):\n \"\"\"Function to read, parse and format to tuple a set of input tfrecord files.\n Get all the files matching the pattern, parse and convert to tuple.\n Args:\n files (list): A list of filenames storing tfrecords\n FtDict (dic): Dictionary of input features in tfrecords\n Returns: \n A tf.data.Dataset\n \"\"\"\n keys = list(ftDict.keys())\n features = keys[:-1]\n response = keys[-1]\n \n def parse_tfrecord(example_proto):\n return tf.io.parse_single_example(example_proto, ftDict)\n \n def tupelize(inputs):\n return to_tuple(inputs, features, response)\n \n dataset = tf.data.TFRecordDataset(files, compression_type='GZIP')\n dataset = dataset.map(parse_tfrecord, num_parallel_calls=5)\n dataset = dataset.map(tupelize, num_parallel_calls=5)\n return dataset\n\ndef get_training_dataset(files, ftDict, buff, batch):\n\t\"\"\"\n Get the preprocessed training dataset\n Args:\n files (list): list of tfrecord files to be used for training\n buffer (int): buffer size for shuffle\n batch (int): batch size for training\n Returns: \n A tf.data.Dataset of training data.\n \"\"\"\n\tdataset = get_dataset(files, ftDict)\n\tdataset = dataset.shuffle(buff).batch(batch).repeat()\n\treturn dataset\n\ndef get_eval_dataset(files, ftDict):\n\t\"\"\"\n Get the preprocessed evaluation dataset\n Args:\n files (list): list of tfrecords to be used for evaluation\n Returns: \n A tf.data.Dataset of evaluation data.\n \"\"\"\n\tdataset = get_dataset(files, ftDict)\n\tdataset = dataset.batch(1)\n\treturn dataset","sub_path":"utils/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":6304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"543151555","text":"import numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KernelDensity\nfrom scipy.stats import gaussian_kde\nfrom scipy.stats.distributions import norm\nfrom sklearn.model_selection import GridSearchCV\n\n# The grid we'll use for plotting\nx_grid = np.linspace(0, 1, 1000)\n\n#calculates the pdf from input data and input grid \ndef kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs):\n \"\"\"Kernel Density Estimation with Scikit-learn\"\"\"\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(x[:, np.newaxis])\n # score_samples() returns the log-likelihood of the samples\n log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])\n return np.exp(log_pdf)\n\n\n#datafile\ntemperature = 50\ndatafilename = \"Helixpercentage.csv\"\nKDEfilename = \"KDE\" + str(temperature) + \"C.csv\"\nfigfilename = \"Distribution\" + str(temperature) + \"C.png\"\n\n#load file\ndata = np.genfromtxt(datafilename, delimiter=\",\")\n\n# use estimated bandwidth for the KDE\nbandwidth = 0.05\n\nfig, ax = plt.subplots(1, 1, sharey=True, tight_layout=True)\n \nX_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]\nax.hist(data[:,1], bins=16, normed=1)\npdf = kde_sklearn(data[:,1], x_grid, bandwidth=bandwidth)\nax.plot(x_grid, pdf, color='black', alpha=0.5, lw=3)\n\nplt.ylabel('Probablity density')\nplt.xlabel('Alpha helix ratio')\n\n#save KDE data\nnp.savetxt(KDEfilename, pdf, delimiter=\",\")\n\nax.set_xlim(0,1)\n\nplt.savefig(figfilename, dpi=300)\n#plt.show()\n\t\t\t\n\t\n\t\n","sub_path":"EAAAK/50C/KDEourdata.py","file_name":"KDEourdata.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253134807","text":"from __future__ import print_function\nimport unittest\nfrom pytraj.base import *\nfrom pytraj import adict\nfrom pytraj import io as mdio\nfrom pytraj.utils.check_and_assert import assert_almost_equal\nfrom pytraj.datasets.DataSet_Coords_CRD import DataSet_Coords_CRD\n\nclass Test(unittest.TestCase):\n def test_0(self):\n print (\"load TrajectoryIterator\")\n coords = DataSet_Coords_CRD()\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n coords.top = traj.top\n coords.load(traj)\n assert coords.size == traj.size\n\n def test_1(self):\n print (\"load Trajectory\")\n coords = DataSet_Coords_CRD()\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n farray = traj[:]\n coords.top = traj.top\n coords.load(farray)\n assert coords.size == traj.size\n\n def test_2(self):\n print (\"load string (filenames)\")\n coords = DataSet_Coords_CRD()\n coords.load(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n assert coords.size == traj.size\n\n def test_3(self):\n print (\"load frame_iter\")\n coords = DataSet_Coords_CRD()\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n coords.load(traj(2, 8, 2), traj.top)\n assert coords.size == 4\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_DataSet_Coords_CRD_load.py","file_name":"test_DataSet_Coords_CRD_load.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383328991","text":"from pathlib import Path\nfrom cosmic_ray.work_item import WorkResult, WorkItem, TestOutcome, WorkerOutcome\n\n\nclass TestWorkResult:\n def test_repr(self):\n result = WorkResult(WorkerOutcome.NORMAL)\n repr(result) # Just make sure it doesn't throw.\n\n\nclass TestWorkItem:\n def test_repr(self):\n item = WorkItem(\n module_path=Path('.'),\n operator_name='core/NoOp',\n occurrence=0,\n start_pos=(1, 1),\n end_pos=(1, 2),\n job_id='1')\n repr(item) # Just make sure it doesn't throw.\n","sub_path":"tests/test_suite/unittests/test_work_item.py","file_name":"test_work_item.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"240762102","text":"import cherrypy\nimport MySQLdb\nfrom md5 import md5\nimport os\n\nclass RootServer:\n @cherrypy.expose\n def index(self):\n return \"\"\"This is a public page!\"\"\"\n\nclass SecureServer:\n @cherrypy.expose\n def index(self):\n return \"This is a secure section\"\n\ndef get_users():\n db = MySQLdb.connect(host='localhost', \n user='root',\n passwd='aaaa1111', \n db='users')\n curs = db.cursor()\n curs.execute('select user_name,salted_pw from users')\n return dict(curs.fetchall())\n\ndef encrypt_pw(pw):\n return md5(pw).hexdigest()\n\nif __name__ == '__main__':\n users = get_users()\n PATH = os.path.abspath(os.path.dirname(__file__))\n\n\n conf = {'/': \n { \n 'tools.basic_auth.on': True,\n 'tools.basic_auth.realm': 'Some site2',\n 'tools.basic_auth.users': users,\n 'tools.basic_auth.encrypt': encrypt_pw,\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': PATH,\n 'tools.staticdir.index': 'index.html',\n }\n }\n root = RootServer()\n root.secure = SecureServer()\n cherrypy.quickstart(root, '/', config=conf)","sub_path":"web_interface/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"103767207","text":"from .ReadHelper import signed8, read_int_32le\r\n\r\n\r\ndef read_zhs_stitches(f, out):\r\n count = 0\r\n while True:\r\n count += 1\r\n b = bytearray(f.read(3))\r\n if len(b) != 3:\r\n break\r\n ctrl = b[0]\r\n if ctrl == 0x10:\r\n pass\r\n x = signed8(b[1])\r\n y = signed8(b[2])\r\n if ctrl == 0x02:\r\n out.stitch(x, y)\r\n continue\r\n if ctrl == 0x01:\r\n out.move(x, y)\r\n continue\r\n if ctrl == 0x04:\r\n out.color_change()\r\n continue\r\n if ctrl == 0x80:\r\n break\r\n out.end()\r\n\r\n\r\ndef read(f, out, settings=None):\r\n f.seek(0x0F, 0)\r\n stitch_start_position = read_int_32le(f)\r\n f.seek(stitch_start_position, 0)\r\n read_zhs_stitches(f, out)\r\n","sub_path":"pyembroidery/ZhsReader.py","file_name":"ZhsReader.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"620646867","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nimport asyncssh\nimport re\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass AlistenerException(Exception):\n pass\n\n\nclass Alistener(object):\n def __init__(self, hostname):\n self.hostname = hostname\n self.files = []\n\n async def run_client(self, filepath, expression, timeout):\n async with await asyncssh.connect(self.hostname, username=\"root\") as conn:\n logger.debug(\"Connected to %s\", self.hostname)\n stdin, stdout, stderr = await conn.open_session(\"tail -F %s\" % filepath)\n\n async def match():\n while True:\n output = await stdout.readline()\n m = re.search(expression, output)\n if m and m.group(0):\n logger.debug(\"Found occurrence: %s\", output)\n stdin.write(\"\\x03\")\n return True\n\n try:\n return await asyncio.wait_for(match(), timeout)\n except asyncio.TimeoutError:\n logger.debug(\"Found no results\")\n return False\n\n async def tail(self, filepath, expression, timeout=3):\n if not filepath.lower() in self.files:\n self.files.append(filepath.lower())\n result = await self.run_client(filepath, expression, timeout)\n return result\n","sub_path":"disruption_generator/listener/alistener.py","file_name":"alistener.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"226117512","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 1 11:53:16 2018\n\n@author: 北海若\n\"\"\"\n\nimport game_utils\nimport model\nimport time\nimport numpy\nimport keras\nimport copy\n\n\nm = model.get_model()\nmemory = []\ngamma = 0.9\nepsilon = 1\nepsilon_decay = .995\nepsilon_min = 0.1\nlearning_rate = 0.0001\nm.compile(loss='mse',\n optimizer=keras.optimizers.RMSprop(lr=learning_rate))\n\n\ndef remember(state, action, reward, next_state, done):\n memory.append((state, action, reward, next_state, done))\n\n\ndef train():\n for i in range(30):\n img, hp1, hp2 = game_utils.fetch_screen()\n state = numpy.reshape(numpy.asarray(img.resize((80, 80))), [1, 80, 80, 3])\n for time_t in range(600):\n # turn this on if you want to render\n # env.render()\n # 选择行为\n action = act(state)\n time.sleep(0.017)\n # 在环境中施加行为推动游戏进行\n img, hp1, hp2 = game_utils.fetch_screen()\n next_state = numpy.reshape(numpy.asarray(img.resize((80, 80))), [1, 80, 80, 3])\n reward = hp1 - hp2\n # 记忆先前的状态,行为,回报与下一个状态\n remember(state, action, reward, next_state, False)\n # 使下一个状态成为下一帧的新状态\n state = copy.deepcopy(next_state)\n # 通过之前的经验训练模型\n replay(32)\n\n\ndef act(state):\n if numpy.random.rand() <= epsilon:\n game_utils.act(int(6 * numpy.random.rand()))\n act_values = m.predict(state)\n game_utils.act(numpy.argmax(act_values[0]))\n\n\ndef replay(batch_size):\n global epsilon\n batches = min(batch_size, len(memory))\n batches = numpy.random.choice(len(memory), batches)\n for i in batches:\n state, action, reward, next_state, done = memory[i]\n target = reward\n if not done:\n target = reward + gamma * numpy.amax(m.predict(next_state)[0])\n target_f = m.predict(state)\n target_f[0][action] = target\n m.fit(state, target_f, nb_epoch=1, verbose=0)\n if epsilon > epsilon_min:\n epsilon *= epsilon_decay\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"151820563","text":"import unittest\n\nfrom etl.Transformers.Events.EventsStudentsTransformer import EventsStudentsTransformer\n\n\nclass TestEventsTransformer(unittest.TestCase):\n\n def test_transform(self):\n test_data = [{'events.id': '21152',\n 'attendee_users_on_events.username': 'student1',\n 'attendees_on_events.checked_in': 'Yes',\n 'attendees_on_events.registered': 'No'},\n {'events.id': '424211',\n 'attendee_users_on_events.username': 'student2',\n 'attendees_on_events.checked_in': 'No',\n 'attendees_on_events.registered': 'Yes'}\n ]\n\n expected = [{'event_id': 21152,\n 'username': 'student1',\n 'checked_in': 1,\n 'pre_registered': 0,\n 'is_handshake_data': 1},\n {'event_id': 424211,\n 'username': 'student2',\n 'checked_in': 0,\n 'pre_registered': 1,\n 'is_handshake_data': 1}\n ]\n t = EventsStudentsTransformer()\n self.assertEqual(expected, t.transform(test_data))\n","sub_path":"tests/etl_tests/transformer_tests/events/test_EventsStudentsTransformer.py","file_name":"test_EventsStudentsTransformer.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"546467968","text":"import requests\nimport time\n\napi_link = 'https://l2ig-alert-line-bot.herokuapp.com/'\n\ndef keep_alive():\n while True:\n r = requests.get(api_link)\n if r.status_code != 200 or r.text.find('error') != -1:\n print('error = ' + str(r.status_code) + ' \\n ' + str(r.text.find('error')))\n return -1\n\n time.sleep(300)\n\n\nif __name__ == '__main__':\n keep_alive()\n","sub_path":"keep_alive_server_script.py","file_name":"keep_alive_server_script.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"170952768","text":"import logging\nimport codecs\nimport sys\nimport os\nimport re\nimport time\nfrom datetime import datetime, date, timedelta\nfrom nltk import word_tokenize\nfrom datetime import datetime\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n#import MeCab\n\nsys.path.insert(0, '/data00/duanjinbao/repos/i18n')\n\nfrom recommend_mining.article_analysis.article_quality.data.group_profile_util import get_profiles\nfrom push.pps_util.dal_util import init_web_article_dal\nfrom recommend_mining.article_analysis.article_quality.util import *\n\n\ndef participle_text(text):\n ja_tokenizer = JaTokenClient()\n\n res_text = ja_tokenizer.tokenize(text)\n\n return res_text\n\n\n# !/bin/env python\n# coding:utf-8\n\nimport MeCab, re\nfrom xml.dom.minidom import parse\nfrom math import log\n\n\ndef getNoun(words):\n noun = []\n node = participle_text(words.encode('utf-8'))\n\n for word in node:\n if word != \"\" and word != \" \":\n noun.append(word)\n\n return noun\n\n\ndef getTopKeywords(TF, n):\n list = sorted(TF.items(), key=lambda x: x[1], reverse=True)\n return list[0:n]\n\n\ndef calcTFIDF(N, TF, DF):\n tfidf = TF * log(N / DF)\n return tfidf\n\n\nif __name__ == \"__main__\":\n N = 1675757\n tf = {}\n df = {}\n\n dom = parse(\"jawiki-latest-pages-articles.xml\")\n text = dom.getElementsByTagName(\"text\")\n print(text)\n for i, text in enumerate(text):\n df_list = []\n noun = getNoun(text.childNodes[0].data)\n for word in noun:\n try:\n print(word)\n tf[word] = tf[word] + 1\n except KeyError:\n tf[word] = 1\n for word in noun:\n try:\n if word in df_list:\n continue\n df[word] = df[word] + 1\n except KeyError:\n df[word] = 1\n\n tfidf = {}\n for k, v in getTopKeywords(tf, 100):\n tfidf[k] = calcTFIDF(N, tf[k], df[k])\n\n for k, v in getTopKeywords(tfidf, 100):\n print(k, v)","sub_path":"cralwer/parse_japanese_wiki.py","file_name":"parse_japanese_wiki.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"57986855","text":"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom multiarchy.launch import launch_local\nfrom multiarchy.baselines.hierarchy_sac import hierarchy_sac, hierarchy_sac_variant\nfrom gym.envs.mujoco.half_cheetah import HalfCheetahEnv\n\n\nif __name__ == \"__main__\":\n\n # parameters for the learning experiment\n variant = dict(\n max_num_steps=1000000,\n logging_dir=\"half_cheetah/hierarchy_sac/\",\n num_hierarchy_levels=2,\n time_skip=10,\n hidden_size=256,\n num_hidden_layers=2,\n reward_scale=1.0,\n discount=0.99,\n initial_alpha=1.0,\n policy_learning_rate=0.0003,\n qf_learning_rate=0.0003,\n tau=0.005,\n batch_size=256,\n max_path_length=1000,\n num_workers=10,\n num_warm_up_steps=100000,\n num_steps_per_epoch=1000,\n num_steps_per_eval=10000,\n num_epochs_per_eval=10,\n num_epochs=10000)\n\n # make sure that all the right parameters are here\n assert all([x in variant.keys() for x in hierarchy_sac_variant.keys()])\n\n # launch the experiment using ray\n launch_local(\n hierarchy_sac,\n variant,\n HalfCheetahEnv,\n num_seeds=3)\n","sub_path":"examples/half_cheetah_hsac.py","file_name":"half_cheetah_hsac.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"357170581","text":"\"\"\" Convolutional Neural Network.\n\nBuild and train a convolutional neural network with TensorFlow.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nThis example is using TensorFlow layers API, see 'convolutional_network_raw'\nexample for a raw implementation with variables.\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\nfrom tensorflow.python.tools import freeze_graph\nimport numpy as np\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=False)\n\nimport tensorflow as tf\n\n# Training Parameters\nlearning_rate = 0.001\nnum_steps = 2000\nbatch_size = 128\n\n# Network Parameters\nnum_input = 784 # MNIST data input (img shape: 28*28)\nnum_classes = 10 # MNIST total classes (0-9 digits)\ndropout = 0.75 # Dropout, probability to keep units\n\n\n# Create the neural network\ndef conv_net(x_dict, n_classes, dropout, reuse, is_training):\n # Define a scope for reusing the variables\n # TF Estimator input is a dict, in case of multiple inputs\n x = x_dict['images']\n\n # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n # Reshape to match picture format [Height x Width x Channel]\n # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n x = tf.reshape(x, shape=[-1, 28, 28, 1])\n\n # Convolution Layer with 32 filters and a kernel size of 5\n conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n\n # Convolution Layer with 64 filters and a kernel size of 3\n conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n\n # Flatten the data to a 1-D vector for the fully connected layer\n fc1 = tf.contrib.layers.flatten(conv2)\n\n # Fully connected layer (in tf contrib folder for now)\n fc1 = tf.layers.dense(fc1, 1024)\n # Apply Dropout (if is_training is False, dropout is not applied)\n fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n\n # Output layer, class prediction\n out = tf.layers.dense(fc1, n_classes)\n\n return out\n\n\nfrom tensorflow.python.estimator.export import export\n\n\nwith tf.Session() as sess:\n # Build the Estimator\n feature_spec = {'images': tf.constant(mnist.train.images)}\n serving_input_fn = export.build_raw_serving_input_receiver_fn(feature_spec)\n # Train the Model\n # Evaluate the Model\n # Define the input function for evaluating\n input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'images': mnist.test.images}, y=mnist.test.labels,\n batch_size=batch_size, shuffle=False)\n\n # Define a scope for reusing the variables\n # TF Estimator input is a dict, in case of multiple inputs\n x = mnist.test.images\n is_training = False\n n_classes = 10\n\n # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n # Reshape to match picture format [Height x Width x Channel]\n # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n x = tf.reshape(x, shape=[-1, 28, 28, 1])\n\n # Convolution Layer with 32 filters and a kernel size of 5\n conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n\n # Convolution Layer with 64 filters and a kernel size of 3\n conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n\n # Flatten the data to a 1-D vector for the fully connected layer\n fc1 = tf.contrib.layers.flatten(conv2)\n\n # Fully connected layer (in tf contrib folder for now)\n fc1 = tf.layers.dense(fc1, 1024)\n # Apply Dropout (if is_training is False, dropout is not applied)\n fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n # Use the Estimator 'evaluate' method\n # Output layer, class prediction\n out = tf.layers.dense(fc1, n_classes,name='output')\n\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver(tf.global_variables())\n tf.train.write_graph(sess.graph_def, 'lenet_dir', 'lenet.pbtxt',as_text=True)\n saver.save(sess, 'lenet_dir/test3.ckpt',write_meta_graph=False)\n\n","sub_path":"src/main/resources/tf_graphs/lenet_tf.py","file_name":"lenet_tf.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"497768505","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: b:\\forskning\\papers\\paneltime\\paneltime\\paneltime\\gui\\gui.py\n# Compiled at: 2020-04-20 04:44:00\n# Size of source mod 2**32: 8655 bytes\nimport tkinter as tk\nfrom tkinter import ttk\nfrom multiprocessing import pool\nimport sys\nfrom gui import gui_charts\nfrom gui import gui_functions as guif\nimport os\nfrom gui import gui_buttons\nimport functions as fu\nfrom gui import gui_right_tabs\nfrom gui import gui_scrolltext\nfrom gui import gui_main_tabs\nimport tempstore, numpy as np, traceback\nFONT_SIZE = 10\nFONT_WIDTH = FONT_SIZE * 0.35\nLINE_HEIGHT = 1.54\nGRAPH_IMG_WIDTH = 0.35\nGRAPH_IMG_HEIGHT = 0.85\n\nclass window(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n self.title('Paneltime')\n self.define_styles()\n self.mc = None\n self.geometry('%sx%s+%s+%s' % (self.winfo_screenwidth(), self.winfo_screenheight() - 75, -5, 0))\n if 'nt' == os.name:\n iconpath = os.path.join(fu.currentdir(), 'paneltime.ico')\n self.iconbitmap(iconpath)\n self.iconpath = iconpath\n else:\n self.iconpath = None\n self.add_panes()\n self.main_pane.rowconfigure(0, weight=1)\n self.main_pane.columnconfigure(0, weight=80, uniform='fred')\n self.main_pane.columnconfigure(1)\n self.main_pane.columnconfigure(2, weight=20, uniform='fred')\n self.add_main_frames()\n self.add_delimiters()\n self.output = gui_scrolltext.ScrollText((self.frm_left), format_text=False)\n self.output.grid(row=2, column=0, sticky=(tk.NSEW))\n self.data = datastore(self)\n self.main_tabs = gui_main_tabs.main_tabs(self)\n self.right_tabs = gui_right_tabs.right_tab_widget(self, self.main_tabs)\n self.main_tabs.recreate_tabs()\n self.locals = dict()\n self.globals = {'window':self, 'data':self.right_tabs.data_tree.datasets}\n sys.stdout = stdout_redir(self.output)\n sys.stderr = stdout_redir(self.output)\n self.protocol('WM_DELETE_WINDOW', self.on_closing)\n\n def exec(self, source):\n try:\n exec(source, self.globals, self.locals)\n except Exception as e:\n try:\n traceback.print_exc()\n finally:\n e = None\n del e\n\n def define_styles(self):\n style = ttk.Style()\n style.configure('TFrame', background='white')\n style.configure('TNotebook', background='white')\n\n def add_panes(self):\n self.rowconfigure(0)\n self.rowconfigure(1, weight=1, uniform='fred')\n self.rowconfigure(2)\n self.columnconfigure(0, weight=1)\n self.button_pane = tk.Frame(self, height=10, background='white')\n self.main_pane = tk.Canvas(self, background='pink')\n self.bottom_bar = tk.Frame(self, background='white', height=25)\n self.about = tk.Label((self.bottom_bar), text='This sofware is a scientific work and should be cited if used for scientific purposes. Cite Sirnes (2020)', background='white')\n self.about.grid(row=0, column=0, sticky=(tk.W))\n self.button_pane.grid(row=0, column=0, sticky=(tk.EW))\n self.main_pane.grid(row=1, column=0, sticky=(tk.NSEW))\n self.bottom_bar.grid(row=2, column=0, sticky=(tk.EW))\n\n def do_nothing(self, event):\n pass\n\n def add_delimiters(self):\n self.delimiter_v = tk.Frame((self.frm_left), background='dark grey', height=5, cursor='sb_v_double_arrow')\n self.delimiter_v.grid(row=1, column=0, sticky=(tk.EW))\n self.delimiter_v.bind('<ButtonRelease-1>', self.vertical_resize)\n self.delimiter_h = tk.Frame((self.main_pane), background='dark grey', width=5, cursor='sb_h_double_arrow')\n self.delimiter_h.grid(row=0, column=1, sticky=(tk.NS))\n self.delimiter_h.bind('<ButtonRelease-1>', self.horizontal_resize)\n\n def add_main_frames(self):\n left_weight = 80\n self.frm_left = tk.Frame((self.main_pane), background='green')\n self.frm_left.rowconfigure(0, weight=left_weight, uniform='fred')\n self.frm_left.rowconfigure(1)\n self.frm_left.rowconfigure(2, weight=(100 - left_weight), uniform='fred')\n self.frm_left.columnconfigure(0, weight=1)\n self.frm_left.grid(row=0, column=0, sticky=(tk.NSEW))\n self.frm_right = tk.Frame((self.main_pane), background='white')\n self.frm_right.rowconfigure(0)\n self.frm_right.rowconfigure(1, weight=1)\n self.frm_right.columnconfigure(0, weight=1)\n self.frm_right.grid(row=0, column=2, sticky=(tk.NSEW))\n self.main_frames_weight = left_weight\n\n def vertical_resize(self, event):\n self.pack_propagate(0)\n new_y = self.output.winfo_y() + event.y\n y = int(100 * new_y / self.winfo_height())\n self.frm_left.rowconfigure(0, weight=y, uniform='fred')\n self.frm_left.rowconfigure(2, weight=(100 - y), uniform='fred')\n\n def horizontal_resize(self, event):\n self.pack_propagate(0)\n new_x = self.frm_right.winfo_x() + event.x\n x = int(100 * new_x / self.winfo_width())\n self.main_pane.columnconfigure(0, weight=(max((x, 0))))\n self.main_pane.columnconfigure(2, weight=(max((100 - x, 0))))\n\n def add_menu(self):\n menubar = tk.Menu(self)\n filemenu = tk.Menu(menubar, tearoff=0)\n menubar.add_cascade(label='File', menu=filemenu)\n openmenu = tk.Menu(menubar, tearoff=0)\n filemenu.add_cascade(label='Open data', menu=openmenu)\n openmenu.add_command(label='Data text file', command=(self.donothing))\n openmenu.add_command(label='Sql connection', command=(self.donothing))\n filemenu.add_command(label='Open project', command=(self.donothing))\n filemenu.add_separator()\n filemenu.add_command(label='Save project', command=(self.donothing))\n filemenu.add_command(label='Save project as', command=(self.donothing))\n filemenu.add_separator()\n filemenu.add_command(label='Settings', command=(self.donothing))\n filemenu.add_separator()\n filemenu.add_command(label='Quit', command=(self.donothing))\n self.settingsmenu = tk.Menu(menubar, tearoff=0)\n menubar.add_cascade(label='Regression', menu=(self.settingsmenu))\n self.settingsmenu.add_command(label='Run Ctrl+R', command=(self.donothing))\n self.settingsmenu.add_command(label='Abort Ctrl+A', command=(self.abort))\n filemenu.add_separator()\n self.settingsmenu.add_command(label='Settings', command=(self.donothing))\n filemenu.add_separator()\n self.settingsmenu.add_command(label='Scatter plots raw', command=(self.show_scatter), state='disabled')\n self.settingsmenu.add_command(label='Scatter plots normalized', command=(self.show_scatter_norm), state='disabled')\n self.config(menu=menubar)\n\n def donothing(self):\n pass\n\n def abort(self):\n if self.btn_abort.cget('relief') == tk.RAISED:\n self.btn_abort.config(relief=(tk.SUNKEN))\n else:\n self.btn_abort.config(relief=(tk.RAISED))\n\n def done(self, x):\n sys.stdout = sys.__stdout__\n self.pool.terminate()\n self.destroy()\n self.quit()\n\n def on_closing(self):\n self.data.save()\n if self.right_tabs.preferences.options.save_datasets.value:\n d = self.right_tabs.data_tree.datasets\n for i in list(d.keys()):\n for j in list(d[i].keys()):\n if not type(d[i][j]) == np.ndarray:\n d[i].pop(j)\n\n tempstore.save_obj(tempstore.fname_datasets, self.right_tabs.data_tree.datasets)\n exit()\n\n def show_scatter(self):\n if not hasattr(self, 'panel'):\n return\n self.schatter_charts = gui_charts.scatter_charts(self, self.panel, self.panel.input.X, self.panel.input.Y, self.iconpath, 700, 1000)\n\n def show_scatter_norm(self):\n return hasattr(self, 'panel') and hasattr(self, 'll') or None\n self.ll.standardize()\n X = self.ll.X_st[self.panel.included[:, :, 0]]\n Y = self.ll.Y_st[self.panel.included[:, :, 0]]\n self.schatter_charts = gui_charts.scatter_charts(self, self.panel, X, Y, self.iconpath, 700, 1000)\n\n def get(self):\n return self.process.get()\n\n\nclass stdout_redir:\n\n def __init__(self, textbox):\n self.textbox = textbox\n\n def write(self, string):\n self.textbox.insert('end', string)\n self.textbox.see('end')\n\n\nclass datastore(dict):\n\n def __init__(self, window):\n d = tempstore.load_obj(tempstore.fname_window)\n if d is None or type(d) != dict:\n dict.__init__(self)\n else:\n dict.__init__(self, d)\n self.dict_default = dict()\n self.dict_default['sql_str'] = 'SELECT * FROM TABLE <table>'\n self.dict_default['conn_str'] = def_conn_str\n self.win = window\n self['current path'] = os.getcwd()\n self['current json path'] = os.getcwd()\n\n def get(self, key, default=None):\n try:\n v = self[key]\n if v is None:\n self[key] = default\n return self[key]\n except:\n self[key] = default\n\n return self[key]\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except:\n self[key] = self.dict_default[key]\n return dict.__getitem__(self, key)\n\n def save(self):\n self.win.main_tabs._tabs.save_all_in_temp()\n tempstore.save_obj(tempstore.fname_window, dict(self))\n\n\ndef_conn_str = \"conn = pymysql.connect(host='<hostname>', \\n\\n\\t\\t\\tuser='<username>', \\n\\t\\t\\tpasswd='<password>', \\n\\t\\t\\tdb='<dbname>')\\t\"","sub_path":"pycfiles/paneltime-1.1.12-cp37-cp37m-win_amd64/gui.cpython-37.py","file_name":"gui.cpython-37.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"126892262","text":"\"\"\"This module contains methods needed to subdivide a polygon into N subpolyons of equal area.\"\"\"\n\nimport numpy as np\nfrom matplotlib import path \n\nclass Polygon(object):\n \"\"\"A representation for a polygon and it's devisions.\n\n Attributes:\n area (float): The area of the polygon.\n vertices (list of float): The vertices of the polygon. \n perimiter (float): The perimiter of the polygon. \n sub_polgons (list of list): The list of the vertices of the subpolygons\n \"\"\"\n\n def __init__(self,vertices,eps_=None):\n \"\"\"Initializes the polygon.\n\n Args:\n vertices (list of float): The verticies of the polgon listed in \n counterclockwise order.\n eps (optional float): Floating point accuracy, default 1E-7.\n \"\"\"\n\n self.vertices = vertices\n self._segments = self._find_segments()\n self.area = self._find_area()\n self.perimiter = self._find_permimter()\n self._path = path.Path(vertices)\n if eps_ is None:\n self._eps = 1E-7\n else:\n self._eps = eps_\n\n\n def subdivide(self,N):\n \"\"\"Subdivides the polygon into segments of equal area with minimal perimiters.\n\n Args:\n N (int): The number of subdivisions desired.\n \n Returns:\n sub_polys (list): A list of the vertices of the subpolygons found.\n\n Raises:\n TypeError: If N is not an integer.\n \"\"\"\n from copy import deepcopy\n sub_polys = []\n if isinstance(N,int):\n subarea = self.area/N\n remaining_verts = deepcopy(self.vertices)\n while len(sub_polys) < N-1:\n sub_poly, remaining_verts = self._find_subdivision(remaining_verts,subarea)\n print(\"sub_paly\",sub_poly)\n sub_polys.append(sub_poly)\n sub_polys.append(remaining_verts)\n else:\n raise TypeError(\"Please specify an integer number of subdivisions of the \"\n \"polygon.\")\n\n return sub_polys\n\n def _find_subdivision(self,verts,area):\n \"\"\"Finds a subdivision of the desired area for the polygon formed by the vertices\n\n Args:\n verts (list): The vertices of the target polygon.\n area (float): The area for the new sub polygon.\n \n Returns:\n sub_poly (list): The vertices of the polygon with the desired area.\n remaining_poly (list): The vertices definining the remaining polygon.\n\n Raises:\n RuntimeError: if the code finds more than 3 sub-areas which should \n be impossible.\n RuntimeError: if the code cannot reconstruct the remaining polygon \n after removing the sub_polygon's vertices.\n \"\"\"\n from copy import deepcopy\n print(\"START************************************************\")\n print(\"verts\",verts)\n segments = self._find_segments(verts=verts)\n\n total_area = self._find_area(segments=segments)\n n_segs = len(segments)\n sub_poly_per = None\n \n for i in range(n_segs):\n for j in range(n_segs):\n if i != j:\n new_segments, bisector, self._new_points = self._create_new_segments(segments[i],segments[j])\n divisions = self._create_new_areas(new_segments,segments)\n test_poly = None\n\n if len(divisions) == 1 and self._find_area(segments=divisions[0]) > area:\n if len(divisions[0]) == 3:\n test_poly = self._triangle_cut(divisions[0],area,None)\n elif len(divisions[0]) == 4:\n test_poly = self._trapezoid_cut(divisions[0],area,bisector,None)\n elif len(divisions) == 2:\n if self._find_area(segments=divisions[0]) > area:\n if len(divisions[0]) == 3:\n test_poly = self._triangle_cut(divisions[0],area,None)\n elif len(divisions[0]) == 4:\n test_poly = self._trapezoid_cut(divisions[0],area,bisector,None)\n if self._find_area(segments=divisions[1]) > area and test_poly is None:\n if len(divisions[1]) == 3:\n test_poly = self._triangle_cut(divisions[1],area,None)\n elif len(divisions[1]) == 4:\n test_poly = self._trapezoid_cut(divisions[1],area,bisector,None)\n elif len(divisions) == 3:\n area1 = self._find_area(segments=divisions[0])\n area2 = self._find_area(segments=divisions[1])\n area3 = self._find_area(segments=divisions[2])\n if area1 > area:\n if len(divisions[0]) == 3:\n test_poly = self._triangle_cut(divisions[0],area,None)\n elif len(divisions[0]) == 4:\n test_poly = self._trapezoid_cut(divisions[0],area,bisector,None)\n elif (area1 + area2) > area:\n if len(divisions[1]) == 3:\n test_poly = self._triangle_cut(divisions[1],area,divisions[0])\n elif len(divisions[1]) == 4:\n test_poly = self._trapezoid_cut(divisions[1],area,bisector,divisions[0])\n if area3 > area and test_poly is None:\n if len(divisions[2]) == 3:\n test_poly = self._triangle_cut(divisions[2],area,None)\n elif len(divisions[2]) == 4:\n test_poly = self._trapezoid_cut(divisions[2],area.bisector,None)\n else: #pragma: no cover\n raise RuntimeError(\"The code found more than 3 divisions of \"\n \"the polygon for a given pair of line \"\n \"segments. This should not be possible.\")\n if test_poly is not None:\n test_segs = self._find_segments(verts=test_poly)\n if sub_poly_per is None:\n if abs(self._find_area(segments=test_segs) - area) < self._eps:\n sub_poly = test_poly\n sub_poly_per = self._find_permimter(segments=test_segs)\n else:\n if abs(self._find_area(segments=test_segs) - area) < self._eps and self._find_permimter(segments=test_segs) < sub_poly_per:\n sub_poly = test_poly\n sub_poly_per = self._find_permimter(segments=test_segs)\n\n # Add the vertices for the newly found polygon to the list so\n # that we can determine what the remaining larger polygon looks like.\n temp_verts = deepcopy(verts)\n for vert in sub_poly:\n if vert not in temp_verts:\n loc = 0\n new_loc = None\n while new_loc is None:\n if loc == len(temp_verts)-1:\n next_loc = 0\n else:\n next_loc = loc+1\n\n if self._is_between(temp_verts[loc],temp_verts[next_loc],vert):\n new_loc = next_loc\n else:\n loc += 1\n\n t_size = 0\n orig_list = 0\n temp = []\n while len(temp) < len(temp_verts)+1:\n if new_loc == t_size:\n temp.append(vert)\n t_size += 1\n else:\n temp.append(temp_verts[orig_list])\n t_size += 1\n orig_list += 1\n temp_verts = temp \n\n # Now we need to figure out which vertices form the remaining volume.\n from itertools import combinations\n remaining_poly = []\n for vert in temp_verts:\n if vert in sub_poly and vert not in verts:\n #this is a new vertex that splits the regoins.\n remaining_poly.append(vert)\n elif vert in verts and vert not in sub_poly:\n # This is a vertex in the original polygon only.\n remaining_poly.append(vert)\n\n # Check to make sure we didn't miss any vertices. This will\n # only happen when the sub_poly and remaining polygon to share\n # a vertex of the original polygon.\n test_area = self._find_area(segments=self._find_segments(verts=remaining_poly))\n if not np.allclose(test_area+area,total_area):\n area_check = False\n for vert in sub_poly:\n if vert not in remaining_poly:\n test_case = remaining_poly + [vert]\n temp_area = self._find_area(segments=self._find_segments(verts=test_case))\n if np.allclose(temp_area+area,total_area):\n remaining_poly.append(vert)\n area_check = True\n break\n\n else:\n area_check = True \n\n if not area_check: #pragma: no cover\n raise RuntimeError(\"Couldn't find the vertices of the remaining polygon.\")\n\n return self._counter_clockwise_sort(sub_poly), self._counter_clockwise_sort(remaining_poly)\n\n def _create_new_segments(self,line1,line2):\n \"\"\"Finds the projection of the endpoints of the lines across the angle\n bisector of the lines. Then uses those points to construct the\n new possible segments of the input lines.\n\n Args:\n line1 (list): The endpoints of the first line.\n line2 (list): The endpoints of the second line.\n\n Returns:\n new_segments (list): The new segments formed by the new points.\n bisector (list): The starting and ending points of the bisector.\n new_verts (list): A list of the new vertices.\n \"\"\"\n\n bisector = self._angle_bisection(line1,line2)\n new_points = self._projections(line1,line2,bisector)\n \n new_segments = []\n\n if len(new_points) == 2 and ((np.allclose(new_points[0][0],new_points[1][1]) and np.allclose(new_points[0][1],new_points[1][0])) or (np.allclose(new_points[0][0],new_points[1][0]) and np.allclose(new_points[0][1],new_points[1][1]))):\n new_points = [new_points[0]]\n\n for point in new_points:\n option1 = (point[1][0]-point[0][0])*(point[1][1]+point[0][1])\n if option1 < 0:\n new_segments.append((point[0],point[1]))\n else:\n new_segments.append((point[1],point[0]))\n\n # if the point isn't an end point then we need to find out\n # which line it's in to create new subsegments of the line.\n if not (np.allclose(point[0],line1[0]) or np.allclose(point[0],line1[1])) and not (np.allclose(point[0],line2[0]) or np.allclose(point[0],line2[1])):\n if self._is_between(line1[0],line1[1],point[0]):\n new_segments.append((line1[0],point[0]))\n new_segments.append((point[0],line1[1]))\n elif self._is_between(line2[0],line2[1],point[0]):\n new_segments.append((line2[0],point[0]))\n new_segments.append((point[0],line2[1]))\n\n return new_segments, bisector, new_points\n \n def _create_new_areas(self,new_segments,old_segments):\n \"\"\"Uses the newly created segments to diffine subvolumes of the polygon.\n\n Args:\n new_segments (list): The list of segments created by \n self._create_new_segments.\n old_segments (list): A list containing the other segments \n needed to form the polygon.\n\n Returns:\n new_areas (list): A list of lists of segments where each list defines a \n new polgon of smaller area.\n \"\"\"\n\n new_segments_local = []\n for seg in new_segments:\n if seg not in new_segments_local:\n new_segments_local.append(seg)\n if seg[::-1] not in new_segments_local:\n new_segments_local.append(seg[::-1])\n\n all_segments = new_segments_local+old_segments\n\n new_areas = []\n verts_lists = []\n for seg in new_segments_local:\n new_path =[seg]\n new_verts = [seg[0]]\n cur_seg = seg\n for test_seg in all_segments:\n if not (np.allclose(test_seg[0],cur_seg[0]) and np.allclose(test_seg[1],cur_seg[1])) and not (np.allclose(test_seg[0],cur_seg[1]) and np.allclose(test_seg[1],cur_seg[0])) and np.allclose(test_seg[0],cur_seg[1]):\n \n between = False\n for v in new_verts:\n if self._is_between(test_seg[0],test_seg[1],v) and v not in test_seg:\n between = True\n break\n \n if not between:\n new_path.append(test_seg)\n new_verts.append(test_seg[0])\n cur_seg = test_seg\n\n if np.allclose(cur_seg[1],seg[0]):\n break\n\n for test_seg_2 in new_segments_local:\n if not (np.allclose(test_seg_2[0],cur_seg[0]) and np.allclose(test_seg_2[1],cur_seg[1])) and not (np.allclose(test_seg_2[0],cur_seg[1]) and np.allclose(test_seg_2[1],cur_seg[0])) and np.allclose(test_seg_2[0],cur_seg[1]):\n\n between = False\n for v in new_verts:\n if self._is_between(test_seg_2[0],test_seg_2[1],v) and v not in test_seg_2:\n between = True\n break\n \n if not between:\n new_path.append(test_seg_2)\n new_verts.append(test_seg_2[0])\n cur_seg = test_seg_2\n if np.allclose(cur_seg[1],seg[0]):\n break\n\n if np.allclose(cur_seg[1],seg[0]):\n break\n \n if not np.allclose(cur_seg[1],seg[0]):\n for test_seg in all_segments:\n if not (np.allclose(test_seg[0],cur_seg[0]) and np.allclose(test_seg[1],cur_seg[1])) and not (np.allclose(test_seg[0],cur_seg[1]) and np.allclose(test_seg[1],cur_seg[0])) and np.allclose(test_seg[0],cur_seg[1]):\n between = False\n\n for v in new_verts:\n if self._is_between(test_seg[0],test_seg[1],v) and v not in test_seg:\n between = True\n break\n \n if not between:\n new_path.append(test_seg)\n new_verts.append(test_seg[0])\n cur_seg = test_seg\n if np.allclose(cur_seg[1],seg[0]):\n break\n \n for test_seg_2 in new_segments_local:\n if not (np.allclose(test_seg_2[0],cur_seg[0]) and np.allclose(test_seg_2[1],cur_seg[1])) and not (np.allclose(test_seg_2[0],cur_seg[1]) and np.allclose(test_seg_2[1],cur_seg[0])) and np.allclose(test_seg_2[0],cur_seg[1]):\n between = False\n \n for v in new_verts:\n if self._is_between(test_seg_2[0],test_seg_2[1],v) and v not in test_seg_2:\n between = True\n break\n \n if not between:\n new_path.append(test_seg_2)\n new_verts.append(test_seg_2[0])\n cur_seg = test_seg_2\n if np.allclose(cur_seg[1],seg[0]):\n break\n\n if np.allclose(cur_seg[1],seg[0]):\n break\n \n cur_verts = [x for (x,y) in new_path]\n cur_verts = self._counter_clockwise_sort(cur_verts)\n # new_path = self._find_segments(verts=cur_verts)\n if not new_path in new_areas and len(new_path) > 2 and len(new_path) <= len(self._segments) and self._find_permimter(segments=new_path) <= self.perimiter and np.allclose(new_path[0][0],new_path[-1][1]) and cur_verts not in verts_lists:\n new_areas.append(new_path)\n verts_lists.append(cur_verts)\n\n return new_areas\n\n def _triangle_cut(self,segments,total_area,rest_of_poly):\n \"\"\"Finds the desired cut inside a triangle to get the correct area.\n \n Args:\n segments (list): The line segments that form the triangel.\n total_area (float): The area desired after the cut.\n rest_of_poly (list): The line segments containing the rest of the\n polygon whose area will contribute.\n\n Returns:\n poly (list): The vertices that form the polygon with the desired area.\n\n Raises:\n RunTimeError: A RunTimeError is raised if the corroct area cannot be found.\n \"\"\"\n\n if rest_of_poly is None:\n target = total_area\n else:\n target = total_area - self._find_area(rest_of_poly)\n\n a = None\n for point in self._new_points:\n if segments[0] in self._segments:\n a = segments[0][0]\n b = segments[1][1]\n c = segments[0][1]\n elif segments[1] in self._segments:\n a = segments[1][0]\n b = segments[2][1]\n c = segments[1][1]\n elif segments[2] in self._segments:\n a = segments[2][0]\n b = segments[0][1]\n c = segments[2][1]\n\n if a is not None:\n break\n \n cut_point = list(np.array(c) + target/self._find_area(segments=segments)*(np.array(b)-np.array(c)))\n\n if rest_of_poly is None:\n poly = [a,cut_point,c]\n else:\n poly = []\n for seg in rest_of_poly:\n if seg[0] == a:\n poly.append(a)\n poly.append(cut_point)\n poly.append(c)\n elif seg[0] == c:\n poly.append(c)\n poly.append(b)\n poly.append(a)\n else:\n poly.append(seg[0])\n \n if abs(self._find_area(segments=self._find_segments(verts=poly))-total_area) > self._eps: #pragma: no cover\n raise RuntimeError(\"Failed to find a cut line for the target area in \"\n \"triange_cut.\")\n\n return poly\n\n def _trapezoid_cut(self,segments,total_area,bisector,rest_of_poly):\n \"\"\"Finds the desired cut inside a trapezoid to get the correct area.\n \n Args:\n segments (list): The line segments that form the trapezoid.\n total_area (float): The area desired after the cut.\n bisector (list): The endpoints of the bisector.\n rest_of_poly (list): The line segments containing the rest of the\n polygon whose area will contribute.\n\n Returns:\n poly (list): The vertices that form the polygon with the desired area.\n\n Raises:\n RunTimeError: A RunTimeError is raised if the corroct area cannot be found.\n \"\"\"\n print(\"segments\",segments)\n \n if rest_of_poly is None:\n target = total_area\n else:\n target = total_area - self._find_area(rest_of_poly)\n\n at = segments[0][0]\n bt = segments[1][0]\n ct = segments[2][0]\n dt = segments[3][0]\n\n a = None\n for point in self._new_points:\n if at == point[1]:\n a = at\n b = bt\n c = ct\n d = dt\n elif bt == point[1]:\n a = bt\n b = ct\n c = dt\n d = at\n elif ct == point[1]:\n a = ct\n b = dt\n c = at\n d = bt\n elif dt == point[1]:\n a = dt\n b = at\n c = bt\n d = ct\n\n if a is not None:\n break\n\n ad = np.array(d)-np.array(a)\n bc = np.array(c)-np.array(b)\n bi_v = self._unit_vec(np.array(bisector[1]),np.array(bisector[0]))\n h_o = abs(np.dot(ad,bi_v)/np.linalg.norm(bi_v))\n\n # If the vector ad is perpendicular to the bisector then\n if np.allclose(h_o,0.0):\n h_o = abs(np.linalg.norm(np.array(b)-np.array(a)))\n\n # Here we'll use a bisection approach to find the correct value\n # for h. Technically there is a closed form solution but it is\n # really ugly and this might honestly be faster.\n correct_h = False\n h_test = h_o/2.\n prev = h_o/2.\n \n def project(a,b):\n \"\"\"Projects a onto b.\n \n Args:\n a (numpy array): The first vector.\n b (numpy array): The second vector.\n\n Returns:\n proj (numpy array): The projection.\n \"\"\"\n\n return b * np.dot(a,b)/np.linalg.norm(b)\n\n count = 0\n while not correct_h and count < 100:\n test_v = bi_v*h_test\n new_d = a + project(test_v,ad)\n if np.allclose(new_d,a):\n new_d = a + ad*h_test\n\n new_c = b + project(test_v,bc)\n if np.allclose(new_c,b):\n new_c = b + bc*h_test\n # print(\"a\",a,\"b\",b,\"c\",new_c,\"d\",new_d)\n # print(\"conv?\",abs(self._find_area(segments=self._find_segments(verts=[a,b,new_c,new_d]))-target))\n if abs(self._find_area(segments=self._find_segments(verts=[a,b,new_c,new_d]))-target) < self._eps:\n correct_h = True\n\n elif self._find_area(segments=self._find_segments(verts=[a,b,new_c,new_d])) > target:\n h_test -= prev/2.\n prev = prev/2.\n else:\n h_test +=prev/2.\n prev = prev/2.\n count += 1\n\n if count == 100: #pragma: no cover\n raise RuntimeError(\"Could not find correct cut line for trapezoid in 100 iterations.\")\n\n if rest_of_poly is None:\n poly = [a,b,list(new_c),list(new_d)]\n else:\n poly = []\n for seg in rest_of_poly:\n if seg[0] == a:\n poly.append(a)\n poly.append(b)\n poly.append(list(new_c))\n poly.append(list(new_d))\n elif seg[0] == b:\n poly.append(b)\n poly.append(list(new_c))\n poly.append(list(new_d))\n poly.append(a)\n else:\n poly.append(seg[0])\n\n if abs(self._find_area(segments=self._find_segments(verts=poly))-total_area) > self._eps: #pragma: no cover\n raise RuntimeError(\"Failed to find a cut line for the target area in \"\n \"trapeziod_cut.\")\n\n return poly \n \n def _is_between(self,a,b,c):\n \"\"\"Determines if point c is between points a and b.\n\n Args:\n a (numpy array): An endpoint of the line.\n b (numpy array): The other endpoint of the line.\n c (numpy array): A point in space.\n \n Returns:\n between (bool): True if c is on the line between a and b.\n \"\"\"\n\n if not isinstance(a,np.ndarray):\n a = np.array(a)\n if not isinstance(b,np.ndarray):\n b = np.array(b)\n if not isinstance(c,np.ndarray):\n c = np.array(c)\n\n cross = np.cross(b-a,c-a)\n if abs(cross) > self._eps:\n return False\n\n dot = np.dot(b-a,c-a)\n if dot < 0:\n return False\n\n sqrlen = np.dot((b-a),(b-a))\n if dot > sqrlen:\n return False\n\n return True\n \n @staticmethod\n def _line_intersection(line1,line2):\n \"\"\"Finds the intersection of two lines. Code contributed by Paul Draper:\n https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python\n \n Args:\n line1 (list): The end points of the first line.\n line2 (list): The end points of the second line.\n\n Returns:\n intersection (list): The intersection of the two lines.\n \"\"\"\n \n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1]) \n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n x = (line1[0][0] + line2[1][0])/2.\n y = (line1[0][1] + line2[1][1])/2.\n else:\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n \n return [x, y]\n\n def _projections(self,line1, line2, bisection):\n \"\"\"Finds the projections of the vertices of line1 onto line2 and vice\n versa such that the line between the vertex and the projection\n is perpendicular to the bisection line.\n\n Args:\n line1 (numpy array): The vertices of the first line.\n line2 (numpy array): The vertices of the second line.\n bisection (numpy array): The vertices of the bisection found by _angle_bisection\n\n Returns:\n projections (list): The projected point paired with the point it was projecting.\n \"\"\"\n\n projections = []\n\n lines = [line1,line2]\n bisec = self._unit_vec(bisection[1],bisection[0])\n\n for i in range(2):\n for j in range(2):\n v = lines[i][j] - bisection[0]\n proj = bisec * np.dot(v,bisec)/np.linalg.norm(bisec)\n if i == 0:\n tt = 1\n else:\n tt = 0\n\n if not np.allclose(proj+bisection[0],lines[i][j]):\n test_proj = self._line_intersection(lines[tt],[lines[i][j],list(proj+bisection[0])])\n for vert in self.vertices:\n if np.allclose(test_proj,vert):\n test_proj = vert\n break\n if np.allclose(0,test_proj[0]):\n test_proj[0] = 0\n if np.allclose(0,test_proj[1]):\n test_proj[1] = 0\n if self._is_between(lines[tt][0],lines[tt][1],test_proj):\n projections.append([test_proj,lines[i][j]])\n\n return projections\n \n def _angle_bisection(self,line1,line2):\n\n \"\"\"Finds the points that bisect the edges diffined by line1 and line2.\n\n Args:\n line1 (list): The endpoints of the first line.\n line2 (list): The endpoints of the second line.\n\n Returns:\n bisector (list): The endpoints of the bisector.\n \"\"\"\n\n B = np.array(self._line_intersection(line1,line2))\n if not np.allclose(B,line1[0]):\n A = np.array(line1[0])\n else:\n A = np.array(line1[1])\n if not np.allclose(B,line2[1]):\n C = np.array(line2[1])\n else:\n C = np.array(line2[0])\n\n if self._is_between(A,C,B):\n if not np.allclose(B,line1[1]):\n A = np.array(line1[1])\n else:\n A = np.array(line1[0])\n if not np.allclose(B,line2[0]):\n C = np.array(line2[0])\n else:\n C = np.array(line2[1])\n \n if self._is_between(A,C,B): #pragma: no cover\n raise RuntimeError(\"Could not find a valid bisection of the line segmentns selected.\")\n \n BA = self._unit_vec(B,A)\n BC = self._unit_vec(B,C)\n # The vector that bisects the angle between BA and BC is (BA+BC)/2\n bis_vec = (BA+BC)/2\n\n # we return B since it is the vertex of the bisection and\n # bis_vec+B since it is a second point on the vector starting at B.\n return [B,bis_vec+B]\n \n @staticmethod\n def _unit_vec(A,B):\n \"\"\"Finds the unit vector that points from A to B.\n\n Args:\n A (numpy array): The starting point.\n B (numpy array): The ending point.\n\n Returns:\n uV (numpy array): The unit vector tha points from A to B.\n \"\"\"\n\n A = np.array(A)\n B = np.array(B)\n dist = np.linalg.norm(B-A)\n return (B-A) / dist\n \n def _find_area(self, segments=None):\n \"\"\"Finds the area of the polygon from the shoelace algorithm. Code\n contributed by Darius Bacon:\n https://stackoverflow.com/questions/451426/how-do-i-calculate-the-area-of-a-2d-polygon.\n\n Args:\n segments (list optional): A list of the line segments for the area. If None then \n self._segments is used.\n \n Returns:\n area (float): The area of the polygon.\n \"\"\"\n if segments is None:\n return 0.5 * abs(sum(x0*y1 - x1*y0\n for ((x0, y0), (x1, y1)) in self._segments))\n else:\n return 0.5 * abs(sum(x0*y1 - x1*y0\n for ((x0, y0), (x1, y1)) in segments))\n\n def _find_segments(self, verts=None):\n \"\"\"Finds the segments of the polygon from the vertices.\n \n Args:\n verts (list, optional): A list of vertices, if none are supplied then the \n self.vertices is used.\n\n Returns:\n segments (list): The paired segments of the vertices.\n \"\"\"\n if verts is None:\n return list(zip(self.vertices, self.vertices[1:] + [self.vertices[0]]))\n else:\n return list(zip(verts, verts[1:] + [verts[0]]))\n\n def _find_permimter(self,segments=None):\n \"\"\"Finds the perimiter of the polygon.\n\n Args:\n segments (list optional): A list of the line segments for the area. If None then \n self._segments is used.\n\n Returns:\n perimiter (float): The perimiter.\n \"\"\"\n if segments is None:\n return sum(np.sqrt((x1-x0)**2 + (y1-y0)**2) for ((x0,y0),(x1,y1)) in self._segments)\n else:\n return sum(np.sqrt((x1-x0)**2 + (y1-y0)**2) for ((x0,y0),(x1,y1)) in segments)\n\n @staticmethod\n def _find_centroid(points):\n \"\"\"Finds the centroid of the given list of points. \n\n Args:\n points (list): A list of [x, y] pairs.\n\n Returns:\n centroid (list): The [x, y] pair of the centroid.\n \"\"\"\n\n x = [p[0] for p in points]\n y = [p[1] for p in points]\n n = len(points)\n \n centroid = [sum(x)/float(n),sum(y)/float(n)]\n\n return centroid\n\n def _counter_clockwise_sort(self,points):\n \"\"\"Sorts the points to be in counter clockwise oreder.\n\n Args:\n points (list): A list of [x, y] pairs:\n\n Returns:\n cc_points (list): A list of the [x, y] pairs sorted to be \n in counter clockwise order.\n \"\"\"\n\n self._center = self._find_centroid(points)\n\n def clockwiseangle_and_distance(point):\n \"\"\"Returns the angle and distance from the centeroid of the point.\n This code is modified from code contributed by MSeifert at:\n https://stackoverflow.com/questions/41855695/sorting-list-of-two-dimensional-coordinates-by-clockwise-angle-using-python\n\n Args:\n point (list): The [x,y] pair to be sorted.\n \n Returns:\n angle, distance (float, float): The angle and the distance from the centroid \n of the polygon.\n \"\"\"\n import math\n\n refvec = [0,1]\n # Vector between point and the origin: v = p - \n vector = [point[0]-self._center[0], point[1]-self._center[1]]\n # Length of vector: ||v||\n lenvector = math.hypot(vector[0], vector[1])\n # If length is zero there is no angle\n if lenvector == 0:\n return -math.pi, 0\n # Normalize vector: v/||v||\n normalized = [vector[0]/lenvector, vector[1]/lenvector]\n dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2\n diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2\n angle = math.atan2(diffprod, dotprod)\n # Negative angles represent counter-clockwise angles so we need to subtract them \n # from 2*pi (360 degrees)\n if angle < 0:\n return 2*math.pi+angle, lenvector\n # I return first the angle because that's the primary sorting criterium\n # but if two vectors have the same angle then the shorter distance should come first.\n return angle, lenvector\n\n cc_points = sorted(points,key=clockwiseangle_and_distance)\n\n return cc_points[::-1]\n","sub_path":"subdivide/polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":34389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"396907100","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020 Kumagai group.\nimport argparse\nimport sys\nfrom itertools import groupby\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nfrom monty.serialization import loadfn\nfrom pydefect.chem_pot_diag.chem_pot_diag import ChemPotDiag, CompositionEnergy, \\\n CpdPlotInfo\nfrom pydefect.chem_pot_diag.cpd_plotter import ChemPotDiagPlotly3DMplPlotter, \\\n ChemPotDiagPlotly2DMplPlotter\nfrom pydefect.util.mp_tools import MpQuery\nfrom pymatgen import Composition, Element\nfrom vise.atom_energies.atom_energy import mp_energies\nfrom vise.util.logger import get_logger\n\nparent = Path(__file__).parent\nlogger = get_logger(__name__)\n\n\ndef make_chem_pot_diag_from_mp(target: Union[Composition, str],\n additional_elements: List[str] = None,\n vertex_elements: List[str] = None,\n atom_energy_yaml: Optional[str] = None):\n \"\"\"Obtain the energies from Materials Project.\n\n When the atom_energy_yaml is provided, the total energies are aligned\n via atom energies.\n\n \"\"\"\n properties = [\"task_id\", \"full_formula\", \"final_energy\"]\n target = target if isinstance(target, Composition) else Composition(target)\n elements = target.chemical_system.split(\"-\")\n vertex_elements = vertex_elements or elements\n vertex_elements = [Element(e) for e in vertex_elements]\n if additional_elements:\n elements.extend(additional_elements)\n query = MpQuery(elements, properties=properties)\n comp_es = []\n if atom_energy_yaml:\n energies = loadfn(atom_energy_yaml)\n diff = {e: energies[e] - mp_energies[e] for e in elements}\n else:\n diff = None\n\n for m in query.materials:\n energy = m[\"final_energy\"]\n if diff:\n for k, v in Composition(m[\"full_formula\"]).as_dict().items():\n energy += diff[k] * v\n comp_es.append(CompositionEnergy(\n Composition(m[\"full_formula\"]), energy, m[\"task_id\"]))\n\n comp_es = remove_higher_energy_comp(comp_es)\n\n return ChemPotDiag(comp_es, target, vertex_elements)\n\n\ndef remove_higher_energy_comp(comp_energies: List[CompositionEnergy]):\n result = []\n for _, grouped_comp_energies in groupby(\n comp_energies, key=lambda x: x.composition.reduced_formula):\n result.append(min(list(grouped_comp_energies),\n key=lambda y: y.abs_energy_per_atom))\n return result\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"-e\", \"--elements\", type=str, nargs=\"+\")\n parser.add_argument(\"-he\", \"--host_elements\", type=Element, nargs=\"+\")\n parser.add_argument(\"-t\", \"--target\", type=Composition)\n parser.add_argument(\"-f\", \"--functional\", type=str)\n return parser.parse_args(args)\n\n\nif __name__ == \"__main__\":\n x = parse_args(sys.argv[1:])\n cpd = make_chem_pot_diag_from_mp(x.elements, x.target,\n x.host_elements, x.functional)\n cpd.to_yaml()\n if cpd.dim == 2:\n plotter = ChemPotDiagPlotly2DMplPlotter(CpdPlotInfo(cpd))\n elif cpd.dim == 3:\n plotter = ChemPotDiagPlotly3DMplPlotter(CpdPlotInfo(cpd))\n plotter.figure.show()\n","sub_path":"pydefect/chem_pot_diag/make_chem_pot_diag.py","file_name":"make_chem_pot_diag.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"408434044","text":"# specall\nfrom threading import Thread\nimport time\nimport dr.globals as glbl\nimport classRant as classes\nimport devRantSimple as dRS\n\ndef getNotifsToGlbl():\n\tif glbl.isLoggedIn:\n\t\tuid = glbl.credentials[\"user_id\"]\n\t\ttoken = glbl.credentials[\"token_id\"]\n\t\tkey = glbl.credentials[\"token_key\"]\n\t\tresponse = dRS.getNotifs(uid, token, key)\n\t\titems = response[\"data\"][\"items\"]\n\t\ti = 0\n\t\twhile i < len(items):\n\t\t\tif not bool(items[i][\"read\"]):\n\t\t\t\tglbl.notifs.append(classes.Notif(items[i]))\n\t\t\t\t# print(i)\n\t\t\ti+=1\n\ndef getCommentsToGlbl():\n\tif glbl.currentViewedRant != \"\":\n\t\tcomments = glbl.currentViewedRant.comments\n\t\tglbl.currentViewedRant.comments = []\n\t\ti = 0\n\t\tglbl.commentLen = len(comments)\n\t\twhile i < len(comments):\n\t\t\t# print(i)\n\t\t\tglbl.currentViewedRant.comments.append(classes.Comment(comments[i]))\n\t\t\t# print(glbl.currentViewedRant.comments)\n\t\t\ti+=1\n\nclass getNotifs(Thread):\n\tdef __init__(self):\n\t\tThread.__init__(self)\n\t\tself.running = True\n\n\tdef run(self):\n\t\twhile self.running:\n\t\t\tgetNotifsToGlbl()\n\t\t\ti = 0\n\t\t\t# Cheaty way to get around not being able to kill a thread with a timer\n\t\t\twhile i < glbl.notifInterval and self.running:\n\t\t\t\ttime.sleep(1)\n\t\t\t\ti+=1\n\tdef stop(self):\n\t\tself.running = False\n\n\nclass getComments(Thread):\n\tdef __init__(self):\n\t\tThread.__init__(self)\n\t\tself.running = True\n\n\tdef run(self):\n\t\tgetCommentsToGlbl()\n\tdef stop(self):\n\t\tself.running = False\n","sub_path":"dr-arch/specall.py","file_name":"specall.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"471582618","text":"from adminsortable.models import SortableMixin\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass TopNews(SortableMixin):\n\n\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=50)\n description = models.CharField(max_length=1000)\n link = models.CharField(max_length=1000, default=\"#\")\n image = models.ImageField(default=\"/img/news_default.png\")\n order = models.PositiveIntegerField(editable=False, db_index=True, default=0)\n class Meta:\n ordering = ['order']\n verbose_name = 'TopNews'\n\n def __str__(self):\n return self.title\n\nclass Deals(SortableMixin):\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=50)\n description = models.CharField(max_length=1000)\n link = models.CharField(max_length=1000, default=\"#\")\n data_created = models.DateTimeField(default=timezone.now, editable=False)\n created_user = models.ForeignKey(User, related_name=\"post_created\", editable=False)\n image = models.ImageField(default=\"/img/top_deals_default.png\")\n order = models.PositiveIntegerField(editable=False, db_index=True, default=0)\n class Meta:\n ordering = ['order']\n verbose_name = \"Deals\"\n\n def __str__(self):\n return self.title\n\nclass SponsorsAndAds(models.Model):\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=50)\n image = models.ImageField(default=\"/img/ads_default.png\")","sub_path":"news_app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"183564052","text":"import numpy as np\nimport cv2\n\nimg_back=cv2.imread(\"1.jpg\")\ndef get_rio(ll_line,img_back,rio,interval,dir_file,rio_1,interval_1,dir_file_1):\n b_channel, g_channel, r_channel = cv2.split(img_back)\n alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype)*255\n img_back = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))\n\n rio_x, rio_y = rio[0],rio[1]\n for x in range(0,img_back.shape[0]-rio_x,interval):\n for y in range(0, img_back.shape[1] - rio_y,interval):\n rio_size = img_back[x:x+rio_x,y:y+rio_y,0:img_back.shape[2]]\n # cv2.imwrite(\"{0}{1}|{2}.png\".format(dir_file,x,y),rio_size)\n get_rio_1(ll_line,rio_size, rio_1, interval_1, dir_file_1,\"{0}|{1}\".format(x,y))\n\ndef get_rio_1(ll_line,img_back,rio,interval,dir_file,nums):\n rio_x, rio_y = rio[0],rio[1]\n for x in range(0,img_back.shape[0]-rio_x,interval):\n for y in range(0, img_back.shape[1] - rio_y,interval):\n image_best = img_back.copy()\n # rio_size = img_back[x:x+rio_x,y:y+rio_y,0:img_back.shape[2]]\n\n for x_x in range(ll_line.shape[0]):\n for y_y in range(ll_line.shape[1]):\n if ll_line[x_x,y_y,3] != 0:\n image_best[x_x+x, y_y+y,0:3] = ll_line[x_x,y_y,0:3]\n\n # image_best[x:x + rio_x, y:y + rio_y, 0:img_back.shape[2]] = line_back_merge()\n\n # cv2.imwrite(\"{0}{1}--{2}|{3}.png\".format(dir_file,nums,x,y),rio_size)\n cv2.imwrite(\"{0}{1}--{2}|{3}.jpg\".format(dir_file,nums,x,y),image_best[:,:,0:3])\n\n\n# def line_back_merge():\n# line = cv2.imread(\"/home/db/PycharmProjects/picture_tools/images/0.png\", -1)\n# # print(line.shape)\n# # cv2.imshow(\"line\",line)\n# # cv2.waitKey(0)\n# return line\n\n\n\n# get_rio(img_back,(400,400),500,\"/home/db/PycharmProjects/picture_tools/img/\",(164,164),100,\"/home/db/PycharmProjects/picture_tools/imgs/\")\n# print(\"finished!\")\n\n\n\n\n\n\n# cv2.waitKey(0)","sub_path":"表盘识别v3/圆形指针表盘/深度学习识别表盘指针/生成训练数据/rio_test.py","file_name":"rio_test.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"568899347","text":"import pandas as pd\r\nimport re\r\n\r\ndef preprocess_line(line):\r\n line = line.replace('[', '_').replace(']', '')\r\n line = line.replace('deposit<=>', 'deposit_0<=>', 1).replace('rent<=>', 'rent_0<=>', 1)\r\n return line\r\n\r\n\r\ndef get_feature_names_from_line(line):\r\n line = preprocess_line(line)\r\n feature_names = set(re.findall(r''' # matches feature names that occur between </br> and <=>\r\n # the pattern generally is: <br>feature_name<=>\r\n (?:^|br\\>) # matches line head or <br> marker\r\n (.*?) # matches the string between\r\n \\<\\= # matches the <=> marker\r\n ''', line, re.VERBOSE))\r\n return feature_names\r\n\r\n\r\ndef get_feature_values(line, feature):\r\n line = preprocess_line(line)\r\n line = re.findall('<br>'+\r\n feature_name +\r\n ''' # matches feature values that occur between <=> and <br>\r\n # the pattern is like: <br>feature_name<=>value<->value2<br>\r\n \\<\\=\\> # matches <=> ('<br>feature_name' is above)\r\n (.*?) # matches values\r\n (?=\\<br\\>|$) # matches <br> and endline\r\n ''', line, re.VERBOSE)\r\n if line: # If no match than put nothing (it will be replaced by NULL in DBMS)\r\n return line[0]\r\n else:\r\n return ''\r\n\r\nads_data = pd.read_csv('data/data_ads.csv', header=None, sep=';')\r\ncats_data = pd.read_csv('data/data_categories.csv', header=None, sep=';')\r\n\r\n\r\n# Get feature names ------------------------------------------------------\r\nfeature_names = set() # Here I will store the names of features.\r\ncats_features_f = open('cat_features.csv', 'w') # I wll also store feature names for each category\r\n\r\nfor cat_id in cats_data[0]: # For every category....\r\n text = ads_data[ads_data[3] == cat_id][4].head(1).values # ... get value of the last cell ('params')\r\n if text.size > 0: # if given category does exist:\r\n features = get_feature_names_from_line(text[0]) # get features\r\n print(\"{}: {}\".format(cat_id, features), file=cats_features_f) # insert them into a file\r\n feature_names = feature_names.union(features) # and add to set.\r\n\r\ncats_features_f.close()\r\n\r\n\r\n# Create columns and fill them by values using prepared function -----------------\r\nfor feature_name in feature_names:\r\n ads_data[feature_name] = ads_data[4].apply(lambda x: get_feature_values(x, feature_name))\r\n\r\nads_data.rename(columns={0:'date', 1:'user_id', 2:'ad_id', 3:'category_id'}, inplace=True)\r\n#ads_data.drop(4, axis=1, inplace=True) # Drop useless column\r\n\r\n\r\n# Set columns order - should be the same as in db ------------------------------------------------------------\r\nads_data = ads_data[['date', 'user_id', 'ad_id', 'category_id',\r\n 'price_currency', 'price', 'rent_currency', 'rent', 'deposit_currency', 'deposit',\r\n 'rent_to_students', 'm', 'price_per_m', 'rooms_num', 'floor_no', 'building_floors_num',\r\n 'floors_num', 'build_year', 'free_from', 'market', 'type', 'access_types', 'roof_type',\r\n 'location', 'terrain_area', 'fence_types', 'fence', 'height', 'ramp', 'building_type',\r\n 'parking', 'building_material', 'heating_types', 'heating', 'building_ownership',\r\n 'vicinity_types', 'flooring', 'media_types', 'office_space', 'recreational',\r\n 'use_types', 'is_bungalow', 'dimensions', 'localization','structure', 'lighting',\r\n 'windows_type', 'extras_types', 'construction_status','security_types', 'equipment_types',\r\n 'roofing', 'garret_type', 'social_facilities']]\r\n\r\nads_data.to_csv('data/data_ads_preprocessed.csv', header=None, sep=';', index=None)","sub_path":"parse_ads.py","file_name":"parse_ads.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"394450551","text":"#!/usr/bin/python\r\n#-*-coding:gbk -*-\r\n'''\r\n»ñµÃ·¾¶Ä£¿é\r\n'''\r\nPATH = ''.join(['%s\\\\'%i for i in __file__.split('\\\\')][:-1])\r\nMAYAPLUGPATH = '%smayaPlug\\\\'%PATH\r\nINIPATH = '%siniFile\\\\'%PATH\r\nTOOLPATH = '%stool\\\\'%PATH\r\nDLLPATH = '%sDLL\\\\'%PATH\r\n","sub_path":"CPMel(6)/GetPath.py","file_name":"GetPath.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"531968177","text":"#!/usr/bin/python3\n\"\"\" Task 8 doc \"\"\"\nfrom models import storage\nfrom models.state import State\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef teardown(self):\n \"\"\" Docstring for teardown \"\"\"\n storage.close()\n\n\n@app.route('/states_list', strict_slashes=False)\ndef states_list():\n \"\"\" display a HTML page: (inside the tag BODY) \"\"\"\n states = storage.all('State').values()\n\n return render_template('7-states_list.html', states=states)\n\nif __name__ == '__main__':\n app.run(\n host='0.0.0.0',\n port=5000,\n debug=True\n )\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"455885946","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport logging\nimport os\n\nfrom copy import deepcopy\nfrom toscaparser.common.exception import ExceptionCollector\nfrom toscaparser.common.exception import InvalidTemplateVersion\nfrom toscaparser.common.exception import MissingRequiredFieldError\nfrom toscaparser.common.exception import UnknownFieldError\nfrom toscaparser.common.exception import ValidationError\nfrom toscaparser.elements.entity_type import update_definitions, EntityType\nfrom toscaparser.extensions.exttools import ExtTools\nimport toscaparser.imports\nfrom toscaparser.prereq.csar import CSAR\nfrom toscaparser.repositories import Repository\nfrom toscaparser.topology_template import TopologyTemplate\nfrom toscaparser.substitution_mappings import SubstitutionMappings\nfrom toscaparser.utils.gettextutils import _\nimport toscaparser.utils.yamlparser\n\n# TOSCA template key names\nSECTIONS = (DEFINITION_VERSION, DEFAULT_NAMESPACE, TEMPLATE_NAME,\n TOPOLOGY_TEMPLATE, TEMPLATE_AUTHOR, TEMPLATE_VERSION,\n DESCRIPTION, IMPORTS, DSL_DEFINITIONS, TYPES, NODE_TYPES,\n RELATIONSHIP_TYPES, RELATIONSHIP_TEMPLATES,\n CAPABILITY_TYPES, ARTIFACT_TYPES, DATA_TYPES, INTERFACE_TYPES,\n POLICY_TYPES, GROUP_TYPES, REPOSITORIES) = \\\n ('tosca_definitions_version', 'tosca_default_namespace',\n 'template_name', 'topology_template', 'template_author',\n 'template_version', 'description', 'imports', 'dsl_definitions',\n 'types', 'node_types', 'relationship_types', 'relationship_templates',\n 'capability_types', 'artifact_types', 'data_types',\n 'interface_types', 'policy_types', 'group_types', 'repositories')\n# Sections that are specific to individual template versions\nSPECIAL_SECTIONS = (METADATA, DECORATORS) = ('metadata', 'decorators')\n\nlog = logging.getLogger(\"tosca.model\")\n\nYAML_LOADER = toscaparser.utils.yamlparser.load_yaml\n\n\nclass ToscaTemplate(object):\n exttools = ExtTools()\n strict = False\n\n MAIN_TEMPLATE_VERSIONS = ['tosca_simple_yaml_1_0',\n 'tosca_simple_yaml_1_2',\n 'tosca_simple_yaml_1_3']\n\n VALID_TEMPLATE_VERSIONS = MAIN_TEMPLATE_VERSIONS + exttools.get_versions()\n\n ADDITIONAL_SECTIONS = {'tosca_simple_yaml_1_0': SPECIAL_SECTIONS,\n 'tosca_simple_yaml_1_2': SPECIAL_SECTIONS,\n 'tosca_simple_yaml_1_3': SPECIAL_SECTIONS}\n\n ADDITIONAL_SECTIONS.update(exttools.get_sections())\n\n '''Load the template data.'''\n def __init__(self, path=None, parsed_params=None, a_file=True,\n yaml_dict_tpl=None, import_resolver=None, verify=True, fragment=\"\"):\n\n ExceptionCollector.start()\n self.a_file = a_file\n self.input_path = None\n self.path = None\n self.fragment = fragment\n self.tpl = None\n self.import_resolver = import_resolver\n self.nested_tosca_tpls = {}\n self.nested_topologies = {}\n if path:\n self.input_path = path\n # don't validate or load if yaml_dict_tpl was set\n if yaml_dict_tpl:\n self.path = path\n else:\n self.path = self._get_path(path)\n self.tpl = YAML_LOADER(self.path, self.a_file)\n\n if yaml_dict_tpl:\n self.tpl = yaml_dict_tpl\n\n if not path and not yaml_dict_tpl:\n ExceptionCollector.appendException(\n ValueError(_('No path or yaml_dict_tpl was provided. '\n 'There is nothing to parse.')))\n\n self.topology_template = None\n if self.tpl:\n self.parsed_params = parsed_params\n self._validate_field()\n self.version = self._tpl_version()\n EntityType.reset_caches()\n self.description = self._tpl_description()\n custom_defs = self._get_all_custom_defs()\n self.topology_template = self._topology_template(custom_defs)\n self._repositories = None\n if self.topology_template.tpl:\n self.inputs = self._inputs()\n self.relationship_templates = self._relationship_templates()\n self.outputs = self._outputs()\n self.policies = self._policies()\n self._handle_nested_tosca_templates_with_topology(custom_defs)\n # now that all the node templates have been loaded we can validated the relationships between them\n self.topology_template.validate_relationships(self.strict)\n for nested in self.nested_topologies.values():\n nested.validate_relationships(self.strict)\n\n ExceptionCollector.stop()\n if verify:\n self.verify_template()\n\n def _topology_template(self, custom_defs):\n return TopologyTemplate(self._tpl_topology_template(),\n custom_defs,\n self.parsed_params,\n self)\n\n def _inputs(self):\n return self.topology_template.inputs\n\n @property\n def nodetemplates(self):\n return self.topology_template.nodetemplates\n\n def _relationship_templates(self):\n return self.topology_template.relationship_templates\n\n def _outputs(self):\n return self.topology_template.outputs\n\n def _tpl_version(self):\n return self.tpl.get(DEFINITION_VERSION)\n\n def _tpl_description(self):\n desc = self.tpl.get(DESCRIPTION)\n if desc:\n return desc.rstrip()\n\n def _tpl_imports(self):\n return self.tpl.get(IMPORTS)\n\n @property\n def repositories(self):\n if self._repositories is None:\n repositories = {}\n assert self.topology_template # sets self.nested_tosca_tpls\n for filename, tosca_tpl in self.nested_tosca_tpls.items():\n repositories.update(tosca_tpl.get(REPOSITORIES) or {})\n repositories.update(self.tpl.get(REPOSITORIES) or {})\n # we need to update the template because it is passed directly to the import loader\n self.tpl[REPOSITORIES] = repositories\n if self.import_resolver:\n get_repository = self.import_resolver.get_repository\n else:\n get_repository = Repository\n self._repositories = {name:get_repository(name, val) for name, val in repositories.items()}\n return self._repositories\n\n def _tpl_relationship_templates(self):\n topology_template = self._tpl_topology_template()\n return topology_template.get(RELATIONSHIP_TEMPLATES)\n\n def _tpl_topology_template(self):\n return self.tpl.get(TOPOLOGY_TEMPLATE)\n\n def _policies(self):\n return self.topology_template.policies\n\n def get_type_sections(self):\n return [TYPES, NODE_TYPES, CAPABILITY_TYPES, RELATIONSHIP_TYPES,\n DATA_TYPES, ARTIFACT_TYPES, INTERFACE_TYPES, POLICY_TYPES, GROUP_TYPES]\n\n def _get_all_custom_defs(self):\n custom_defs, nested_tosca_tpls = self._get_custom_defs(self.tpl, self.path, self.path)\n self.nested_tosca_tpls = nested_tosca_tpls\n # Handle custom types defined in current template file\n for type_def in self.get_type_sections():\n inner_custom_types = self.tpl.get(type_def)\n if inner_custom_types:\n custom_defs.update(inner_custom_types)\n return custom_defs\n\n def _get_custom_defs(self, tpl, path, root_path):\n custom_defs_final = {}\n tosca_tpls = {}\n custom_defs, nested_imports = self.load_imports(path, tpl, root_path)\n for filename, (import_tpl, root_path, prefix) in nested_imports.items():\n tosca_tpls[filename] = import_tpl\n import_defs, nested_tosca_tpls = self._get_custom_defs(import_tpl, filename, root_path)\n custom_defs_final.update(import_defs)\n tosca_tpls.update(nested_tosca_tpls)\n if custom_defs:\n custom_defs_final.update(custom_defs)\n return custom_defs_final, tosca_tpls\n\n def load_imports(self, path, tpl, root_path):\n \"\"\"Handle custom types defined in imported template files\n\n This method loads the custom type definitions referenced in \"imports\"\n section of the TOSCA YAML template.\n \"\"\"\n imports = tpl.get(\"imports\")\n if not imports:\n return {}, {}\n\n type_sections = self.get_type_sections()\n imports_loader = toscaparser.imports.ImportsLoader(\n None, path, type_sections, self.tpl.get(\"repositories\"),\n self.import_resolver, root_path\n )\n imports_loader.resolver.load_imports(imports_loader, imports)\n # nested_tosca_tpls is Dict[file_path, (tpl, repository_name, prefix)] of the imported templates\n nested_tosca_tpls = imports_loader.get_nested_tosca_tpls()\n # custom defs are merged together (with possibly namespace prefix)\n custom_defs = imports_loader.get_custom_defs()\n return custom_defs, nested_tosca_tpls\n\n def _handle_nested_tosca_templates_with_topology(self, custom_types):\n ExceptionCollector.near = \"\"\n for filename, tosca_tpl in self.nested_tosca_tpls.items():\n topology_tpl = tosca_tpl.get(TOPOLOGY_TEMPLATE)\n if topology_tpl:\n custom_types = custom_types.copy()\n custom_types.update(tosca_tpl.get('node_types', {})) # XXX isn't this redundant?\n self.nested_topologies[filename] = TopologyTemplate(\n topology_tpl, custom_types, None, self)\n substitutable_topologies = [t for t in self.nested_topologies.values() if t.substitution_mappings]\n self.topology_template._do_substitutions(substitutable_topologies)\n if self.topology_template.substitution_mappings and not self.topology_template.substitution_mappings.node:\n # create a node template for the root topology's substitution mapping\n self.topology_template.substitution_mappings.substitute(None, None)\n\n def _validate_field(self):\n version = self._tpl_version()\n if not version:\n ExceptionCollector.appendException(\n MissingRequiredFieldError(what='Template',\n required=DEFINITION_VERSION))\n else:\n self._validate_version(version)\n self.version = version\n\n for name in self.tpl:\n if (name not in SECTIONS and\n name not in self.ADDITIONAL_SECTIONS.get(version, ())):\n ExceptionCollector.appendException(\n UnknownFieldError(what='Template', field=name))\n\n def _validate_version(self, version):\n if version not in self.VALID_TEMPLATE_VERSIONS:\n ExceptionCollector.appendException(\n InvalidTemplateVersion(\n what=version,\n valid_versions='\", \"'. join(self.VALID_TEMPLATE_VERSIONS)))\n else:\n if version not in self.MAIN_TEMPLATE_VERSIONS:\n update_definitions(self.exttools, version, YAML_LOADER)\n\n def _get_path(self, path):\n if path.lower().endswith('.yaml') or path.lower().endswith('.yml'):\n return path\n elif path.lower().endswith(('.zip', '.csar')):\n # a CSAR archive\n csar = CSAR(path, self.a_file)\n if csar.validate():\n csar.decompress()\n self.a_file = True # the file has been decompressed locally\n return os.path.join(csar.temp_dir, csar.get_main_template())\n else:\n ExceptionCollector.appendException(\n ValueError(_('\"%(path)s\" is not a valid file.')\n % {'path': path}))\n\n def verify_template(self):\n if ExceptionCollector.exceptionsCaught():\n if self.input_path:\n raise ValidationError(\n message=(_('\\nThe input \"%(path)s\" failed validation with '\n 'the following error(s): \\n\\n\\t')\n % {'path': self.input_path}) +\n '\\n\\t'.join(ExceptionCollector.getExceptionsReport()))\n else:\n raise ValidationError(\n message=_('\\nThe pre-parsed input failed validation with '\n 'the following error(s): \\n\\n\\t') +\n '\\n\\t'.join(ExceptionCollector.getExceptionsReport()))\n else:\n if self.input_path:\n msg = (_('The input \"%(path)s\" successfully passed '\n 'validation.') % {'path': self.input_path})\n else:\n msg = _('The pre-parsed input successfully passed validation.')\n\n log.info(msg)\n","sub_path":"toscaparser/tosca_template.py","file_name":"tosca_template.py","file_ext":"py","file_size_in_byte":13397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"150623646","text":"from django.shortcuts import render,redirect\nfrom .models import Mentor,Mentee,Blog\nfrom .forms import inputMentee,inputMentor,inputBlog\n# Create your views here.\ndef ata_view_main(request):\n return render(request,'index.html',{})\ndef author_view(request):\n return render(request,'author.html',{})\ndef mentor_view(request):\n list_mentor = Mentor.objects.all()\n return render(request,'mentor.html',{'list_mentor':list_mentor})\ndef mentee_view(request):\n list_mentee = Mentee.objects.all()\n return render(request, 'mentee.html',{'list_mentee':list_mentee})\ndef blog_view(request):\n list_blog = Blog.objects.all()\n return render(request,'blog.html',{'list_blog':list_blog})\ndef input_mentee(request):\n if request.method == \"POST\":\n form = inputMentee(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('http://127.0.0.1:8001/input_mentee')\n else:\n form = inputMentee()\n return render(request,'input_mentee.html',{'form':form})\ndef input_mentor(request):\n if request.method == \"POST\":\n form = inputMentor(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('http://127.0.0.1:8001/input_mentor')\n else:\n form = inputMentor()\n return render(request,'input_mentor.html',{'form':form})\ndef input_blog(request):\n if request.method == \"POST\":\n form = inputBlog(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('http://127.0.0.1:8001/input_blog')\n else:\n form = inputBlog()\n return render(request,'input_blog.html',{'form':form})","sub_path":"ata_view/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253603847","text":"from datascientist.model.regression.skl.ensemble_model.gradient_boosting_regression import _gradient_boosting_regression\n\nimport numpy as np\n\n\ndef test_gradient_boosting():\n x_train = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n y_train = np.dot(x_train, np.array([1, 2])) + 3\n\n x_test = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n y_test = np.dot(x_test, np.array([1, 2])) + 3\n\n metrics = 'mae'\n answer = _gradient_boosting_regression(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics)\n assert answer[0] == 'GradientBoostingRegressor'\n assert round(answer[1] * 10**4, 2) == 0.4\n assert answer[2] is None\n\n metrics = 'mse'\n answer = _gradient_boosting_regression(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics)\n assert answer[0] == 'GradientBoostingRegressor'\n assert round(answer[1] * 10**8, 2) == 0.23\n assert answer[2] is None\n\n metrics = 'rmse'\n answer = _gradient_boosting_regression(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics)\n assert answer[0] == 'GradientBoostingRegressor'\n assert round(answer[1] * 10 ** 4, 2) == 0.48\n assert answer[2] is None\n\n answer = _gradient_boosting_regression(train=(x_train, y_train), test=(x_test, y_test), metrics=metrics,\n x_predict=x_test)\n arr = np.array([6.00043558, 8.00005297, 8.99991706, 10.99959438])\n for i in range(len(answer[2])):\n assert round(answer[2][i], 2) == round(arr[i], 2)\n","sub_path":"datascientist/model/regression/skl/ensemble_model/tests/test_gradient_boosting.py","file_name":"test_gradient_boosting.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"129673026","text":"n = int(input('Inf. a quantidade de números primos: '))\np = 2\nwhile n > 0:\n ePrimo = True\n for x in range(2,(p//2)+1):\n if p%x==0:\n ePrimo = False\n break\n if ePrimo:\t\n n-=1\n print(p,end=\", \")\n p+=1\n ","sub_path":"alped/n-primos.py","file_name":"n-primos.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"354098952","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*\n#\n# Copyright (C) 2009-2012 Dalton Barreto - daltonmatos@gmail.com\n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n\nfrom setuptools import setup\n\nPACKAGE = 'GroupBasedRedirection'\n\nsetup(name = PACKAGE,\n description = 'Redirects users based on group permissions',\n keywords = 'request redirection permission',\n version = '0.2',\n url = 'http://trac-hacks.org/wiki/GroupBasedRedirectionPlugin',\n author = 'Dalton Barreto',\n author_email = 'daltonmatos@gmail.com',\n license = '3-clause BSD',\n packages = [PACKAGE],\n entry_points = {'trac.plugins': '%s = %s.%s' % (PACKAGE, PACKAGE, PACKAGE)}\n )\n","sub_path":"GroupBasedRedirectionPlugin/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"400852547","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\n\nBUFFER_SIZE = 10000\nBATCH_SIZE = 64\n\ndef input_fn(mode):\n\n datasets, info = tfds.load(name='mnist',\n with_info=True,\n as_supeervised=True)\n\n mnist_dataset = (datasets['train'] if mode == tf.estimator.ModeKeys.TRAIN else datasets['test'])\n \n def scale(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255\n return image, label\n\n return mnist_dataset.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n\n\ntest = input_fn('test')\ntrain = input_fn(tf.estimator.ModeKeys.TRAIN)\nprint(test)\nprint(train)\n\nclassifier = tf.estimator.DNNClassifier(\n feature_columns=my_feature_columns,\n hidden_units=[10, 10],\n n_classes=3\n)\n\nmodel = tf.estimator.train_and_evaluate(\n classifier,\n train_spec=tf.estimator.TrainSpec(input_fn=input_fn),\n test_spec=tf.estimator.EvalSpec(input_fn=input_fn)\n)\n","sub_path":"tensorflow_api_tutorials/exploring/est_1.py","file_name":"est_1.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"9225930","text":"import setuptools, os\n\nbasepath = os.path.dirname(__file__)\nbasepath += '/' if basepath != '' else ''\n\nwith open(basepath + \"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nrequirements = []\nwith open(basepath + 'requirements.txt', 'r') as fh:\n for line in fh:\n requirements.append(line)\n\nsetuptools.setup(\n name='pygments-contemply',\n version='1.0.0',\n packages=setuptools.find_packages(basepath + 'src'),\n package_dir={'': basepath + 'src'},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\"\n ],\n url='https://github.com/smertiens/pygments-contemply',\n project_urls={\n\n },\n keywords='pygments contemply lexer',\n license='AGPL-3.0',\n author='Sean Mertiens',\n author_email='sean@contemply.org',\n description='Pygments lexer for Contemply templates',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n install_requires=requirements,\n python_requires='>=3.4',\n\n package_data={\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"102623573","text":"import tornado.gen\nimport logging\nimport datetime\nimport calendar\n\nimport tornado_mysql\n\nfrom mickey.mysqlcon import get_mysqlcon\n\n_insert_sql = \"\"\"\n INSERT INTO feedback(feedDetail, feedType, createAt, userId, osVer, appVer, userIp, opInfo, netInfo, deviceType, expireAt, deviceId)\n VALUES ('%s', %s, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');\n\"\"\"\n\n_logger = logging.getLogger(__name__)\n\nclass DtsMgr(object):\n\n @classmethod\n @tornado.gen.coroutine\n def createDts(cls, creater, dtsbody):\n dts_id = \"\"\n if not creater:\n _logger.error(\"create dts creater is null\")\n return (403, dts_id)\n\n feed_desc = dtsbody.get(\"desc\", \"\")\n os_ver = dtsbody.get(\"osver\", \"\")\n app_ver = dtsbody.get(\"appver\", \"\")\n userip = dtsbody.get(\"userip\", \"\")\n operinfo = dtsbody.get(\"operinfo\", \"\")\n netinfo = dtsbody.get(\"netinfo\", \"\")\n devicetype = dtsbody.get(\"devicetype\", \"\")\n deviceid = dtsbody.get(\"deviceid\", \"\")\n\n if not feed_desc:\n _logger.error(\"create dts desc is null\")\n return (403, dts_id)\n\n #get create time\n current = calendar.timegm((datetime.datetime.utcnow()).utctimetuple())\n d_current = datetime.datetime.utcfromtimestamp(float(current))\n s_current = d_current.strftime('%Y-%m-%d %H:%M:%S')\n\n expire_t = datetime.datetime.utcfromtimestamp(float(current + 259200))\n expire_strt = expire_t.strftime('%Y-%m-%d %H:%M:%S')\n\n #get mysql connect\n conn = yield get_mysqlcon()\n if not conn:\n _logger.error(\"connect to mysql failed\")\n return (500, dts_id)\n\n try:\n cur = conn.cursor()\n format_sql = _insert_sql % (feed_desc, '0', s_current, creater, os_ver, app_ver, userip, operinfo, netinfo, devicetype, expire_strt, deviceid)\n yield cur.execute(format_sql)\n dts_id = cur.lastrowid\n cur.close()\n\n yield conn.commit()\n except Exception as e:\n _logger.error(\"db oper failed {0}\".format(e))\n finally:\n conn.close()\n\n if not dts_id:\n return (500, dts_id)\n\n return (200, str(dts_id))\n\n","sub_path":"libs/mickey/dts/dtsmgr.py","file_name":"dtsmgr.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83211967","text":"import networkx\nimport plotly\n\nfrom math import sqrt, cos, sin\nfrom random import random\nfrom itertools import combinations\nfrom functools import reduce\n\n\nHEAD_ANGLE = 0.5\n\n\ngraph_width = 800\ngraph_height = 450\n\nnode_size = 20\nnode_color = (255, 255, 255)\n\nedge_width = 2\nedge_color = (0, 0, 0)\n\nnode_label_position = 'middle center'\nedge_label_distance = 10\n\n\ndef _normalize_positions(g):\n xs = []\n ys = []\n for n in g.nodes:\n pos = g.nodes[n]['pos']\n xs.append(pos[0])\n ys.append(pos[1])\n\n xmin = min(xs)\n ymin = min(ys)\n xmax = max(xs) - xmin\n ymax = max(ys) - ymin\n\n for n in g.nodes:\n pos = g.nodes[n]['pos']\n x = (pos[0] - xmin) / xmax\n y = (pos[1] - ymin) / ymax\n g.nodes[n]['pos'] = (x, y)\n\n\ndef _set_layout(g, layout):\n for n, value in layout.items():\n g.nodes[n]['pos'] = (value[0], value[1])\n\n _normalize_positions(g)\n\n\ndef _scale(dx, dy, width, height, size):\n s2 = size**2\n\n x2 = (dx * width)**2\n y2 = (dy * height)**2\n\n return sqrt(s2 / (x2 + y2))\n\n\ndef _rotate(dx, dy, width, height, counter):\n dx *= width\n dy *= height\n\n if counter:\n a = HEAD_ANGLE\n else:\n a = -HEAD_ANGLE\n\n rx = dx * cos(a) - dy * sin(a)\n ry = dx * sin(a) + dy * cos(a)\n\n return rx / width, ry / height\n\n\ndef _convert(color):\n return 'rgb({}, {}, {})'.format(color[0], color[1], color[2])\n\n\ndef _build_node_trace(color):\n if node_label_position == 'hover':\n hoverinfo = 'text'\n mode = 'markers'\n else:\n hoverinfo = 'none'\n mode = 'markers+text'\n\n fontcolor = (0, 0, 0)\n\n if node_label_position == 'middle center':\n if 0.2126 * color[0] + 0.7152 * color[1] + 0.0722 * color[2] < 128:\n fontcolor = (255, 255, 255)\n\n return {\n 'x': [],\n 'y': [],\n 'text': [],\n 'textposition': 'middle center' if node_label_position == 'hover' else node_label_position,\n 'hoverinfo': hoverinfo,\n 'mode': mode,\n 'marker': {\n 'size': node_size,\n 'color': _convert(color),\n 'line': {\n 'width': edge_width,\n 'color': 'rgb(0, 0, 0)',\n },\n },\n 'textfont': {\n 'color': _convert(fontcolor),\n },\n }\n\n\ndef _build_edge_trace(color):\n return {\n 'x': [],\n 'y': [],\n 'hoverinfo': 'none',\n 'mode': 'lines',\n 'line': {\n 'width': edge_width,\n 'color': _convert(color),\n },\n }\n\n\ndef _build_label_trace():\n return {\n 'x': [],\n 'y': [],\n 'text': [],\n 'textposition': 'middle center',\n 'hoverinfo': 'none',\n 'mode': 'text',\n 'textfont': {\n 'color': 'rgb(0, 0, 0)',\n },\n }\n\n\ndef _build_layout(width, height):\n return {\n 'showlegend': False,\n 'width': width,\n 'height': height,\n 'margin': {\n 'b': 0,\n 'l': 0,\n 'r': 0,\n 't': 0,\n },\n 'xaxis': {\n 'showgrid': False,\n 'zeroline': False,\n 'showticklabels': False,\n },\n 'yaxis': {\n 'showgrid': False,\n 'zeroline': False,\n 'showticklabels': False,\n },\n }\n\n\ndef _add_node(g, n, node_trace):\n x, y = g.nodes[n]['pos']\n\n node_trace['x'].append(x)\n node_trace['y'].append(y)\n\n\ndef _add_edge(g, e, edge_trace, label_trace):\n x0, y0 = g.nodes[e[0]]['pos']\n x1, y1 = g.nodes[e[1]]['pos']\n\n dx = y0 - y1\n dy = x1 - x0\n\n # parameters estimated from screenshots\n width = 0.9 * graph_width - 24\n height = 0.9 * graph_height - 24\n\n if isinstance(g, networkx.DiGraph) and g.has_edge(e[1], e[0]):\n scale = _scale(dx, dy, width, height, edge_width)\n x0 += dx * scale\n y0 += dy * scale\n x1 += dx * scale\n y1 += dy * scale\n\n edge_trace['x'].extend([x0, x1, None])\n edge_trace['y'].extend([y0, y1, None])\n\n if label_trace is not None:\n scale = _scale(dx, dy, width, height, edge_label_distance)\n label_trace['x'].append((x0 + x1) / 2 + dx * scale)\n label_trace['y'].append((y0 + y1) / 2 + dy * scale)\n label_trace['text'].append(g.edges[e[0], e[1]]['label'])\n\n if isinstance(g, networkx.DiGraph):\n dx = x0 - x1\n dy = y0 - y1\n\n radius = node_size / 2\n\n scale = _scale(dx, dy, width, height, radius)\n x0 = x1 + dx * scale\n y0 = y1 + dy * scale\n\n if not g.has_edge(e[1], e[0]):\n rx, ry = _rotate(dx, dy, width, height, True)\n scale = _scale(rx, ry, width, height, radius)\n x1 = x0 + rx * scale\n y1 = y0 + ry * scale\n edge_trace['x'].extend([x0, x1, None])\n edge_trace['y'].extend([y0, y1, None])\n\n rx, ry = _rotate(dx, dy, width, height, False)\n scale = _scale(rx, ry, width, height, radius)\n x1 = x0 + rx * scale\n y1 = y0 + ry * scale\n edge_trace['x'].extend([x0, x1, None])\n edge_trace['y'].extend([y0, y1, None])\n\n\ndef reset_node_colors(g):\n for n in g.nodes:\n g.nodes[n]['color'] = node_color\n\n\ndef reset_edge_colors(g):\n for e in g.edges:\n g.edges[e[0], e[1]]['color'] = edge_color\n\n\ndef set_circle_positions(g):\n layout = networkx.circular_layout(g)\n\n _set_layout(g, layout)\n\n\ndef set_spring_positions(g):\n layout = networkx.spring_layout(g)\n\n _set_layout(g, layout)\n\n\ndef reset_positions(g):\n layout = networkx.spring_layout(g)\n\n _set_layout(g, layout)\n\n\ndef load_graph(path, has_pos=False):\n g = networkx.read_gml(path, label='id')\n\n reset_node_colors(g)\n reset_edge_colors(g)\n\n if has_pos:\n for n in g.nodes:\n g.nodes[n]['pos'] = (g.nodes[n]['x'], g.nodes[n]['y'])\n del g.nodes[n]['x']\n del g.nodes[n]['y']\n\n _normalize_positions(g)\n else:\n reset_positions(g)\n\n return g\n\n\ndef show_graph(g, nlab=False, elab=False):\n node_traces = {}\n\n for n in g.nodes:\n color = g.nodes[n]['color']\n if color not in node_traces:\n node_traces[color] = _build_node_trace(color)\n _add_node(g, n, node_traces[color])\n if nlab:\n node_traces[color]['text'].append(g.nodes[n]['label'])\n\n edge_traces = {}\n\n if elab:\n label_trace = _build_label_trace()\n else:\n label_trace = None\n\n for e in g.edges:\n color = g.edges[e[0], e[1]]['color']\n if color not in edge_traces:\n edge_traces[color] = _build_edge_trace(color)\n _add_edge(g, e, edge_traces[color], label_trace)\n\n data = list(edge_traces.values()) + list(node_traces.values())\n if elab:\n data.append(label_trace)\n\n figure = {\n 'data': data,\n 'layout': _build_layout(graph_width, graph_height),\n }\n\n plotly.offline.iplot(figure, config={'displayModeBar': False}, show_link=False)\n\n\ndef generate_frame(g, nlab=False, elab=False):\n node_traces = []\n\n for n in g.nodes:\n trace = _build_node_trace(g.nodes[n]['color'])\n node_traces.append(trace)\n _add_node(g, n, trace)\n if nlab:\n trace['text'].append(g.nodes[n]['label'])\n\n edge_traces = []\n\n if elab:\n label_trace = _build_label_trace()\n else:\n label_trace = None\n\n for e in g.edges:\n trace = _build_edge_trace(g.edges[e[0], e[1]]['color'])\n edge_traces.append(trace)\n _add_edge(g, e, trace, label_trace)\n\n data = edge_traces + node_traces\n if elab:\n data.append(label_trace)\n\n return {\n 'data': data,\n }\n\n\ndef show_animation(frames):\n steps = []\n\n for index, frame in enumerate(frames):\n frame['name'] = index\n steps.append({\n 'args': [[index], {'frame': {'redraw': False}, 'mode': 'immediate'}],\n 'label': '',\n 'method': 'animate',\n })\n\n # parameters estimated from screenshots\n width = 1.05 * graph_width + 72\n height = 1.00 * graph_height + 76\n\n layout = _build_layout(width, height)\n\n layout.update({\n 'updatemenus': [\n {\n 'buttons': [\n {\n 'args': [None, {'frame': {'redraw': False}, 'fromcurrent': True}],\n 'label': 'Play',\n 'method': 'animate',\n },\n {\n 'args': [[None], {'frame': {'redraw': False}, 'mode': 'immediate'}],\n 'label': 'Pause',\n 'method': 'animate',\n },\n ],\n 'showactive': True,\n 'type': 'buttons',\n },\n ],\n 'sliders': [\n {\n 'currentvalue': {'visible': False},\n 'steps': steps,\n },\n ],\n })\n\n figure = {\n 'data': frames[0]['data'],\n 'layout': layout,\n 'frames': frames,\n }\n\n plotly.offline.iplot(figure, config={'displayModeBar': False}, show_link=False)\n\n\ndef build_matrix(g):\n return networkx.to_numpy_matrix(g)\n\n\ndef build_closeness(g):\n closeness = networkx.closeness_centrality(g)\n\n for n in closeness:\n g.nodes[n]['theoretical_closeness'] = closeness[n]\n\n\ndef build_betweenness(g):\n betweenness = networkx.betweenness_centrality(g)\n\n for n in betweenness:\n g.nodes[n]['theoretical_betweenness'] = betweenness[n]\n\n\ndef build_shortest_paths(g, s, t):\n for n in g.nodes:\n g.nodes[n]['shortest_neighbors'] = set()\n\n for path in networkx.all_shortest_paths(g, s, t):\n for i in range(len(path) - 1):\n g.nodes[path[i]]['shortest_neighbors'].add(path[i + 1])\n\n for n in g.nodes:\n g.nodes[n]['shortest_neighbors'] = list(g.nodes[n]['shortest_neighbors'])\n g.nodes[n]['shortest_neighbors'].sort()\n\n\ndef randomize_positions(g):\n for n in g.nodes:\n x = random()\n y = random()\n g.nodes[n]['pos'] = (x, y)\n\n\ndef generate_empty_graph(num_nodes):\n g = networkx.empty_graph(num_nodes)\n\n reset_node_colors(g)\n\n randomize_positions(g)\n\n return g\n\n\ndef generate_complete_graph(num_nodes):\n g = networkx.complete_graph(num_nodes)\n\n reset_node_colors(g)\n reset_edge_colors(g)\n\n randomize_positions(g)\n\n return g\n\n\ndef update_positions(g, weight=None):\n pos = {n: g.nodes[n]['pos'] for n in g.nodes}\n\n layout = networkx.spring_layout(g, pos=pos, iterations=1, weight=weight)\n\n _set_layout(g, layout)\n\n\ndef largest_component(g):\n return max(networkx.algorithms.components.connected_component_subgraphs(g), key=len)\n\n\ndef average_distance(g):\n return networkx.algorithms.shortest_paths.generic.average_shortest_path_length(g)\n\n\ndef average_clustering_coefficient(g):\n return networkx.algorithms.cluster.average_clustering(g)\n\n\ndef global_clustering_coefficient(g):\n num_nodes = g.number_of_nodes()\n\n nodes = list(g.nodes)\n\n has_cache = [\n [\n int(g.has_edge(nodes[i], nodes[j])) if i < j else None\n for j in range(num_nodes)\n ]\n for i in range(num_nodes)\n ]\n\n num_cache = [(0, 0), (0, 0), (0, 1), (3, 3)]\n\n def _mapping(x):\n return num_cache[has_cache[x[0]][x[1]] + has_cache[x[0]][x[2]] + has_cache[x[1]][x[2]]]\n\n def _reduction(x, y):\n return (x[0] + y[0], x[1] + y[1])\n\n num_closed, num_connected = reduce(_reduction, map(_mapping, combinations(range(num_nodes), 3)))\n\n return num_closed / num_connected\n\n\nplotly.offline.init_notebook_mode(connected=True)\n","sub_path":"socnet.py","file_name":"socnet.py","file_ext":"py","file_size_in_byte":11640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"346339966","text":"# Library of routines for generating Boolean expression evaluations scripts\n# Assume constants one & zero are named \"one\" and \"zero\"\n\nimport sys\n\n\n# Class to define use of ZDDs\nclass Z:\n none, vars, convert, avars = range(4)\n names = [\"none\", \"vars\", \"convert\", \"avars\"]\n suffixes = [\"b\", \"v\", \"z\", \"a\"]\n\n def name(self, id):\n return self.names[id]\n\n def suffix(self, id):\n return self.suffixes[id]\n\n# For generating unique names\nclass Uniq:\n nextId = 0\n root = \"n_t\"\n \n def __init(self, root = \"n_t\"):\n self.root = root\n self.nextId = 0\n\n def new(self):\n result = \"%s%d\" % (self.root, self.nextId)\n self.nextId += 1\n return result\n\n# Representation of single nodes\nclass Node:\n name = \"\"\n refCnt = 0\n\n def __init__(self, name):\n self.name = str(name)\n self.refCnt = 1\n\n def addRef(self):\n self.refCnt += 1\n\n def decRef(self):\n self.refCnt -= 1\n\n def dead(self):\n return self.refCnt <= 0\n\n def __str__(self):\n return self.name\n\n\n# Represent signal as vector of names.\n# Should keep these immutable\nclass Vec:\n # Represent as array of strings.\n nodes = []\n refCnt = 0\n \n def __init__(self, nodeList = []):\n self.nodes = [str(n) for n in nodeList]\n self.refCnt = 1\n \n\n def addRef(self):\n self.refCnt += 1\n\n def decRef(self):\n self.refCnt -= 1\n\n def dead(self):\n return self.refCnt <= 0\n\n def __str__(self):\n return \" \".join(self.nodes)\n\n def __len__(self):\n return len(self.nodes)\n\n def __getitem__(self, key):\n return self.nodes[key]\n\n def __setitem__(self, key, value):\n self.nodes[key] = value\n\n # Various ways of constructing new nodes\n\n # Create list of names of form root.i\n # Use little-Endian ordering\n def nameVec(self, root, n):\n ls = [root + '.' + str(i) for i in range(n)]\n self.nodes = ls\n\n def reverse(self):\n n = len(self.nodes)\n return Vec([self.nodes[n-i-1] for i in range(n)])\n\n def dup(self):\n return Vec([nd for nd in self.nodes])\n\n def concatenate(self, other):\n ls = [nd for nd in self.nodes]\n ls.extend(other.nodes)\n return Vec(ls)\n\n # Extend vector with repeated element\n # If necessary, truncate so that final size is n\n def extend(self, n, ele = \"zero\"):\n if n < len(self.nodes):\n ls = [self.nodes[i] for i in range(n)]\n else:\n ls = [nd for nd in self.nodes]\n other = [ele for i in range(n-len(ls))]\n ls.extend(other)\n return Vec(ls)\n\n def shiftLeft(self, n):\n ls = [\"zero\" for i in range(n)]\n ls.extend(self.nodes)\n return Vec(ls)\n\n # Create interleaved vector\n def interleave(self, other):\n if len(self) < len(other):\n short, long = self.nodes, other.nodes\n nshort, nlong = len(self), len(other)\n else:\n short, long = other.nodes, self.nodes\n nshort, nlong = len(other), len(self)\n step = nlong / nshort\n ls = []\n for ele in short:\n ls.append(ele)\n ls.extend(long[:step])\n long = long[step:]\n ls.extend(long)\n return Vec(ls)\n \n# Framework for creating expressions\nclass Circuit:\n outfile = None\n # Set of (nonvector) nodes\n nodes = None\n # Set of vectors\n vecs = None\n uniq = None\n zero = None\n one = None\n \n def __init__(self, outfile = sys.stdout):\n self.outfile = outfile\n self.nodes = set()\n self.vecs = set()\n self.uniq = Uniq()\n self.zero = Node('zero')\n self.one = Node('one')\n\n def addVec(self, v):\n self.vecs.add(v)\n return v\n\n def addNode(self, nd):\n self.nodes.add(nd)\n return nd\n\n def flush(self):\n rnodes = []\n for nd in self.nodes:\n if nd.dead():\n self.delete(nd)\n rnodes.append(nd)\n for nd in rnodes:\n self.nodes.remove(nd)\n rvecs = []\n for v in self.vecs:\n if v.dead():\n self.delete(v)\n rvecs.append(v)\n for v in rvecs:\n self.vecs.remove(v)\n\n def node(self, name):\n nd = Node(name)\n self.addNode(nd)\n return nd\n\n def tmpNode(self):\n name = self.uniq.new()\n return self.node(name)\n\n def nameVec(self, root, n):\n v = Vec()\n v.nameVec(root, n)\n self.addVec(v)\n return v\n\n def tmpVec(self, n):\n root = self.uniq.new()\n return self.nameVec(root, n)\n\n # Decrement, and possibly delete, all nodes & vectors in ls\n def decRefs(self, ls):\n for ele in ls:\n ele.decRef()\n self.flush()\n\n # Write to file. Adds EOL\n def write(self, line):\n self.outfile.write(line + \"\\n\")\n\n def comment(self, line):\n self.write(\"# \" + line)\n\n # Generate single line command\n # Obj can be single node or vector, or list of nodes/vectors\n def cmdLine(self, cmd, obj):\n if type(obj) in [type([]), type(())]:\n ls = map(str, obj)\n s = \" \".join(ls)\n else:\n s = str(obj)\n self.write(cmd + \" \" + s)\n\n def declare(self, varv):\n self.cmdLine(\"var\", varv)\n\n def delete(self, obj):\n self.cmdLine(\"delete\", obj)\n\n def information(self, fv):\n self.cmdLine(\"info\", fv)\n\n def collect(self):\n self.write(\"collect\")\n\n def status(self):\n self.write(\"status\")\n\n def count(self, fv):\n self.cmdLine(\"count\", fv)\n\n def satisfy(self, fv):\n self.cmdLine(\"satisfy\", fv)\n\n # Generate sequence of commands\n # argList should be list of vectors\n def cmdSequence(self, cmd, argList):\n n = max([len(v) for v in argList])\n nargList = [v.extend(n) for v in argList]\n lists = [v.nodes for v in nargList]\n for i in range(n):\n args = [ele[i] for ele in lists]\n self.cmdLine(cmd, args)\n \n def andN(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdLine(\"and\", ls)\n\n def orN(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdLine(\"or\", ls)\n\n def norN(self, dest, argList):\n nargs = [(s[1:] if s[0] == \"!\" else \"!\"+s) for s in argList]\n self.andN(dest, nargs)\n\n def xorN(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdLine(\"xor\", ls)\n\n def iteN(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdLine(\"ite\", ls)\n\n \n def assignConstant(self, dest, val):\n if val == 1:\n self.andN(dest, [])\n elif val == 0:\n self.orN(dest, [])\n else:\n self.comment(\"Couldn't assign value %s to node %s\" % (val, dest)) \n\n def checkConstant(self, dest, val):\n cnode = None\n if val == 1:\n cnode = self.one\n elif val == 0:\n cnode = self.zero\n if cnode is None:\n self.comment(\"Couldn't check that node %s = %d\" % (dest, val))\n return\n self.cmdLine(\"equal\", [dest, cnode])\n\n\n def notV(self, dest, v):\n self.cmdSequence(\"not\", [dest, v])\n\n def andV(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdSequence(\"and\", ls)\n\n def orV(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdSequence(\"or\", ls)\n\n def xorV(self, dest, argList):\n ls = [dest]\n ls.extend(argList)\n self.cmdSequence(\"xor\", ls)\n\n def zc(self, dest, n):\n self.cmdLine(\"zconvert\", [dest, n])\n\n def ac(self, dest, n):\n self.cmdLine(\"aconvert\", [dest, n])\n\n def zcV(self, dest, v):\n self.cmdSequence(\"zconvert\", [dest, v])\n\n def acV(self, dest, v):\n self.cmdSequence(\"aconvert\", [dest, v])\n\n def maj3(self, dest, n1, n2, n3):\n p12 = self.tmpNode()\n p23 = self.tmpNode()\n p13 = self.tmpNode()\n self.cmdLine(\"and\", [p12, n1, n2])\n self.cmdLine(\"and\", [p23, n2, n3])\n self.cmdLine(\"and\", [p13, n1, n3])\n self.cmdLine(\"or\", [dest, p12, p23, p13])\n self.decRefs([p12, p23, p13])\n\n def majorityV3(self, dest, a1, a2, a3):\n n = len(dest)\n p12 = self.tmpVec(n)\n p23 = self.tmpVec(n)\n p13 = self.tmpVec(n)\n self.andV(p12, [a1, a2])\n self.andV(p23, [a2, a3])\n self.andV(p13, [a1, a3])\n self.orV(dest, [p12, p23, p13])\n self.decRefs([p12, p23, p13])\n\n def addV2(self, dest, a1, a2):\n n = len(dest)\n cv = self.tmpVec(n-1)\n carry = cv.shiftLeft(1)\n lsd = dest.nodes\n ls1 = a1.nodes\n ls2 = a2.nodes\n lsc = carry.nodes\n args = [ls1, ls2, lsc]\n for i in range(n):\n ls = [ele[i] for ele in args]\n self.xorN(lsd[i], ls) \n if i < n-1:\n self.maj3(lsc[i+1], ls[0], ls[1], ls[2])\n self.decRefs([cv])\n\n def addV(self, dest, argList):\n n = len(dest)\n isTmp = [False for arg in argList]\n while len(argList) > 2:\n tmpV = self.tmpVec(n)\n self.addV2(tmpV, argList[0], argList[1])\n if isTmp[0]:\n self.decRefs([argList[0]])\n ls = [tmpV]\n ls.extend(argList[2:])\n argList = ls\n ls = [True]\n ls.extend(isTmp[2:])\n isTmp = ls\n self.addV2(dest, argList[0], argList[1])\n if isTmp[0]:\n self.decRefs([argList[0]])\n \n def multV2(self, dest, p1, p2):\n n = len(dest)\n np1 = p1.extend(n)\n np2 = p2.extend(n)\n pp = []\n tlist = []\n for i in range(n):\n a = Vec().extend(n, np2.nodes[i])\n t = self.tmpVec(n)\n tlist.append(t)\n self.andV(t, [a, np1])\n pp.append(t.shiftLeft(i).extend(n))\n self.addV(dest, pp)\n self.decRefs(tlist)\n\n def multV(self, dest, argList):\n n = len(dest)\n isTmp = [False for arg in argList]\n while len(argList) > 2:\n tmpV = self.tmpVec(n)\n self.multV2(tmpV, argList[0], argList[1])\n if isTmp[0]:\n self.decRefs([argList[0]])\n ls = [tmpV]\n ls.extend(argList[2:])\n argList = ls\n ls = [True]\n ls.extend(isTmp[2:])\n isTmp = ls\n self.multV2(dest, argList[0], argList[1])\n if isTmp[0]:\n self.decRefs([argList[0]])\n\n # Generate signals indicating whether have 0 or 1 elements of vector set.\n # nv is negation of v\n def count01(self, c0, c1, v, nv):\n n = len(v.nodes)\n self.assignConstant(c0, 1)\n self.assignConstant(c1, 0)\n for i in range(n):\n name = v.nodes[i]\n nname = nv.nodes[i]\n self.iteN(c1, [name, c0, c1])\n self.andN(c0, [c0, nname])\n\n # Is at most one signal in vector equal to 1?\n def exactly1(self, dest, v, nv):\n c0 = self.tmpNode()\n self.count01(c0, dest, v, nv)\n self.decRefs([c0])\n\n # Is at most one signal in vector equal to 1?\n def atMost1(self, dest, v, nv):\n c0 = self.tmpNode()\n c1 = self.tmpNode()\n self.count01(c0, c1, v, nv)\n self.orN(dest, [c0, c1])\n self.decRefs([c0, c1])\n\n\n # Create counting network for values from 0 up to k\n # Generates destV[l] encodes case where count = l\n # where k = len(destV)-1\n def countGenerator(self, destV, v, nv):\n k = len(destV) - 1\n n = len(v.nodes)\n t = self.tmpVec(k+1)\n self.assignConstant(t[0], 1)\n for l in range(1,k+1):\n self.assignConstant(t[l], 0)\n for i in range(n-1):\n name = v.nodes[i]\n nname = nv.nodes[i] \n for l in range(k+1,0,-1):\n self.iteN(t[l], [name, t[l-1], t[l]])\n self.andN(t[0], [t[0], nname])\n name = v.nodes[n-1]\n nname = nv.nodes[n-1]\n for l in range(k+1,0,-1):\n self.iteN(destV[l], [name, t[l-1], t[l]])\n self.andN(destV[0], [t[0], nname])\n self.decRefs([t])\n\n def atMostK(self, dest, v, nv, k):\n t = self.tmpVec(k+1)\n self.countGenerator(t, v, nv)\n self.orN(dest, t)\n self.decRefs([t])\n\n def getBit(self, v, i):\n return ((v>>i) &1)\n\n # Create product term to match specified value\n # nvec is negation of vec\n def matchVal(self, v, vec, out):\n names = [((\"!%s\" if self.getBit(v, i) == 0 else \"%s\") % vec.nodes[i]) for i in range(len(vec.nodes))]\n lits = Vec(names)\n self.andN(out, lits.nodes)\n\n \n # Components to build up C6288-style multipler\n # Single partial product: a_{i,j}\n def pprod(self, dest, avec, bvec, i, j):\n self.andN(dest, [avec.nodes[j], bvec.nodes[i]])\n\n # Vector of partial products for single level j\n def pprodV(self, dv, avec, bvec, j):\n m = len(dv)\n for i in range(m):\n self.pprod(dv.nodes[i], avec, bvec, i, j)\n\n # Half adder\n def hadd(self, carry, sum, x, z):\n self.andN(carry, [x, z])\n self.xorN(sum, [x, z])\n\n # Vector of half adders\n def haddV(self, cv, sv, xv, yv):\n for i in range(len(cv)):\n self.hadd(cv.nodes[i], sv.nodes[i], xv.nodes[i], yv.nodes[i])\n\n # Full adder\n def fadd(self, carry, sum, x, y, z):\n self.xorN(sum, [x, y, z])\n self.maj3(carry, x, y, z)\n\n # Vector of full adders\n def faddV(self, cv, sv, xv, yv, zv):\n for i in range(len(cv)):\n self.fadd(cv.nodes[i], sv.nodes[i], xv.nodes[i], yv.nodes[i], zv.nodes[i])\n \n # Top layer of multipler\n # out: Generates bits 0 and 1 of product\n # cvec: carry outputs (m-1)\n # svec: sum outputs (m-2)\n def tlayer(self, out, cvec, svec, avec, bvec):\n self.comment(\"Building top level of multiplier\")\n m = len(avec)\n n = len(bvec)\n v0 = self.tmpVec(m-1)\n pv0 = Vec([out.nodes[0]] + v0.nodes)\n self.pprodV(pv0, avec, bvec, 0)\n v1 = self.tmpVec(m-1)\n self.pprodV(v1, avec, bvec, 1)\n sveclong = Vec([out.nodes[1]] + svec.nodes)\n self.haddV(cvec, sveclong, v0, v1)\n self.decRefs([v0, v1])\n\n # Middle layers of multiplier\n # out: Generates bit j of product\n # cvec: carry outputs (m-1)\n # svec: sum outputs (m-2)\n # cinvec: input carries (m-1)\n # sinvec: input sums (m-2)\n def mlayer(self, out, cvec, svec, cinvec, sinvec, avec, bvec, j):\n self.comment(\"Building level %d of multiplier\" % j)\n m = len(avec)\n n = len(bvec)\n v = self.tmpVec(m-1)\n self.pprodV(v, avec, bvec, j)\n pp = self.tmpNode()\n self.pprod(pp, avec, bvec, m-1, j-1)\n sinveclong = Vec(sinvec.nodes + [pp])\n sveclong = Vec([out.nodes[j]] + svec.nodes)\n self.faddV(cvec, sveclong, sinveclong, cinvec, v)\n self.decRefs([v, pp])\n\n # Bottom level of multiplier\n # out: Generates bits n .. m+n-1 of product\n # cinvec: input carries (m-1)\n # sinvec: input sums (m-2)\n def blayer(self, out, cinvec, sinvec, avec, bvec):\n self.comment(\"Building bottom level of multiplier\")\n m = len(avec)\n n = len(bvec)\n pp = self.tmpNode()\n self.pprod(pp, avec, bvec, m-1, n-1)\n cv = self.tmpVec(m-2)\n self.hadd(cv.nodes[0], out.nodes[n], sinvec.nodes[0], cinvec.nodes[0])\n for i in range(m-3):\n self.fadd(cv.nodes[i+1], out.nodes[n+1+i], sinvec.nodes[i+1], cinvec.nodes[i+1], cv.nodes[i])\n self.fadd(out.nodes[n+m-1], out.nodes[n+m-2], pp, cinvec.nodes[m-2], cv.nodes[m-3])\n self.decRefs([cv, pp])\n\n # m X n bit multiplier\n def multiplier(self, out, avec, bvec, verbose=False):\n # Require m >= 3, n >= 1\n m = len(avec)\n n = len(bvec)\n svec = self.tmpVec(m-2)\n cvec = self.tmpVec(m-1)\n self.tlayer(out, cvec, svec, avec, bvec)\n if verbose:\n self.information(svec.nodes + cvec.nodes)\n for j in range(2,n):\n sinvec = svec\n cinvec = cvec\n svec = self.tmpVec(m-2)\n cvec = self.tmpVec(m-1)\n self.mlayer(out, cvec, svec, cinvec, sinvec, avec, bvec, j)\n if verbose:\n self.information(svec.nodes + cvec.nodes)\n self.decRefs([sinvec, cinvec])\n self.blayer(out, cvec, svec, avec, bvec)\n self.decRefs([cvec, svec])\n \n # Construct function based on enumeration of function\n # User provides function mapping list of integers to integer\n def blast(self, out, inlist, ifun):\n # Initialize nodes to 0\n self.orV(out, [])\n xval = [0 for input in inlist]\n allnodes = []\n for input in inlist:\n allnodes = allnodes + input.nodes\n allinputs = Vec(allnodes)\n for idx in range(1 << len(allinputs)):\n v = idx\n for i in range(len(inlist)):\n xval[i] = 0\n for j in range(len(inlist[i])):\n xval[i] += ((v & 0x1) << j)\n v = v >> 1\n y = ifun(xval)\n select = None\n for j in range(len(out)):\n if y & (1 << j) != 0:\n if select == None:\n select = self.tmpNode()\n self.matchVal(idx, allinputs, select)\n self.orN(out[j], [out[j], select])\n if select != None:\n self.decRefs([select])\n \n def multfun(self, xlist):\n v = 1\n for x in xlist:\n v *= x\n return v\n\n def multblast(self, out, avec, bvec):\n self.blast(out, [avec, bvec], self.multfun)\n \n\n## Some benchmarks:\n# Show that addition is associative\ndef addAssociative(n, f = sys.stdout):\n ckt = Circuit(f)\n # Generate vectors\n a, b, c, s, t, x = [ckt.nameVec(r, n) for r in [\"a\", \"b\", \"c\", \"s\", \"t\", \"x\"]]\n e = ckt.node(\"e\")\n da, db, dc = [ckt.nameVec(r, n) for r in [\"a\", \"b\", \"c\"]]\n vars = da.interleave(db).interleave(dc).reverse()\n ckt.write(\"time\")\n ckt.declare(vars)\n\n # Add them together\n ckt.addV(s, [a, b, c])\n ckt.addV(t, [c, b, a])\n ckt.information(t)\n ckt.decRefs([a, b, c])\n\n # Generate comparator\n ckt.xorV(x, [s, t])\n ckt.decRefs([s, t])\n ckt.orN(e, [ckt.node(name) for name in x.nodes])\n ckt.decRefs([x])\n ckt.write(\"equal zero e\")\n ckt.write(\"time\")\n ckt.status()\n\n# Show that multiplication is associative\ndef multAssociative(n, f = sys.stdout):\n ckt = Circuit(f)\n # Generate vectors\n a, b, c, s, t, x = [ckt.nameVec(r, n) for r in [\"a\", \"b\", \"c\", \"s\", \"t\", \"x\"]]\n e = ckt.node(\"e\")\n da, db, dc = [ckt.nameVec(r, n) for r in [\"a\", \"b\", \"c\"]]\n vars = da.interleave(db).interleave(dc).reverse()\n ckt.write(\"time\")\n ckt.declare(vars)\n\n # Mult them together\n ckt.multV(s, [a, b, c])\n\n ckt.multV(t, [c, b, a])\n ckt.information(t)\n ckt.decRefs([a, b, c])\n\n # Generate comparator\n ckt.xorV(x, [s, t])\n ckt.decRefs([s, t])\n ckt.orN(e, [ckt.node(name) for name in x.nodes])\n ckt.decRefs([x])\n ckt.write(\"equal zero e\")\n ckt.write(\"time\")\n ckt.status()\n\ndef Multiplier(n, f = sys.stdout, zdd = Z.none, reverseA = False, reverseB = False, interleave = False, check = False):\n ckt = Circuit(f)\n ckt.comment(\"Construction of %d x %d multiplier\" % (n, n))\n ckt.comment(\"ZDD mode = %s\" % Z().name(zdd))\n avec = ckt.nameVec(\"A\", n)\n bvec = ckt.nameVec(\"B\", n)\n if zdd == Z.none or zdd == Z.convert:\n davec = avec\n dbvec = bvec\n else:\n davec = ckt.nameVec(\"bA\", n)\n dbvec = ckt.nameVec(\"bB\", n)\n if reverseA:\n davec = davec.reverse()\n if reverseB:\n dbvec = dbvec.reverse()\n dvec = davec.interleave(dbvec) if interleave else Vec(davec.nodes + dbvec.nodes)\n ckt.declare(dvec)\n if zdd == Z.vars:\n ckt.zcV(avec, davec)\n ckt.zcV(bvec, dbvec)\n elif zdd == Z.avars:\n ckt.acV(avec, davec)\n ckt.acV(bvec, dbvec)\n outvec = ckt.nameVec(\"out\", n+n)\n outcvec = ckt.nameVec(\"outc\", n+n) if zdd == Z.convert else outvec\n ckt.multiplier(outcvec, avec, bvec)\n if zdd == Z.convert:\n ckt.zcV(outvec, outcvec)\n ckt.decRefs([outcvec])\n ckt.comment(\"%s generation completed\" % (\"ADD\" if zdd == Z.avars else \"BDD\" if zdd == Z.none else \"ZDD\"))\n ckt.write(\"time\")\n ckt.information(outvec.nodes)\n if check:\n checkvec = ckt.nameVec(\"cout\", n+n)\n ckt.multblast(checkvec, avec, bvec)\n ckt.cmdSequence(\"equal\", [outvec, checkvec])\n ckt.status()\n ckt.comment(\"Flush state\")\n ckt.write(\"flush\")\n ckt.comment(\"Exit\")\n ckt.write(\"quit\")\n\n\n \n","sub_path":"scripts/circuit.py","file_name":"circuit.py","file_ext":"py","file_size_in_byte":21010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"225322285","text":"cases = input().split()\nn = int(cases[0])\nm = int(cases[1])\nw = int(cases[2])\nweights = input().split()\nweights = [int(x) for x in weights]\nbeauty = input().split()\nbeauty = [int(x) for x in beauty]\n\nn_list = [0]*n\nhackme = []\nfor i in range(m):\n\tpair = input().split()\n\tpair1 = int(pair[0])\n\tpair2 = int(pair[1])\n\tadded = False\n\tfor i in hackme:\n\t\tif pair1 in i or pair2 in i:\n\t\t\ti.add(pair1)\n\t\t\ti.add(pair2)\n\t\t\tn_list[pair1-1]=1\n\t\t\tn_list[pair2-1]=1\n\t\t\tadded = True\n\tif not added:\n\t\tn_list[pair1-1]=1\n\t\tn_list[pair2-1]=1\n\t\thackme.append(set([pair1,pair2]))\nfor i in range(n):\n\tif not n[i]:\n\t\thackme.append(i+1)\nbest = [(0,0)]*1000\nfor i in range(1, n+1):\n\tmaxium = -1\n\tfor j in hackme:\n\t\tif len(j) == i:\n\t\t\tif j>maxium:\n\t\t\t\tmaxium = j\n\tfor num in range(i):\n\t\tonum = i-1\n","sub_path":"Codeforces/383d2D.py","file_name":"383d2D.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"362626512","text":"def Max(list):\n if len(list) == 1:\n return list[0]\n else:\n maximum = Max(list[1:])\n return maximum if maximum > list[0] else list[0]\n\ndef main():\n try:\n list = eval(input(\"Enter a list of numbers : \"))\n print(\"The largest number in the list : \", Max(list))\n except SyntaxError:\n print(\"Please enter comma seperated numbers\")\n except:\n print(\"Enter only numbers\")\n\nmain()","sub_path":"part-2/question-1/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"493201015","text":"from concurrent.futures import ThreadPoolExecutor\nimport datetime\nimport typing as t\nfrom unittest.mock import patch, MagicMock\nimport uuid\nimport wsgiref.simple_server\n\nimport aiohttp\nfrom apd.sensors.base import Sensor\nfrom apd.sensors.sensors import PythonVersion, ACStatus\nfrom apd.sensors.wsgi import set_up_config\nimport flask\nimport pytest\n\nfrom apd.sensors.wsgi import v21\n\nfrom apd.aggregation import collect\nfrom apd.aggregation.database import Deployment\n\npytestmark = [pytest.mark.functional]\n\n\n@pytest.fixture\ndef sensors() -> t.Iterator[t.List[Sensor[t.Any]]]:\n \"\"\" Patch the get_sensors method to return a known pair of sensors only \"\"\"\n data: t.List[Sensor[t.Any]] = [PythonVersion(), ACStatus()]\n with patch(\"apd.sensors.cli.get_sensors\") as get_sensors:\n get_sensors.return_value = data\n yield data\n\n\ndef get_independent_flask_app(name: str) -> flask.Flask:\n \"\"\" Create a new flask app with the v20 API blueprint loaded, so multiple copies\n of the app can be run in parallel without conflicting configuration \"\"\"\n app = flask.Flask(name)\n app.register_blueprint(v21.version, url_prefix=\"/v/2.1\")\n return app\n\n\ndef run_server_in_thread(\n name: str, config: t.Dict[str, t.Any], port: int\n) -> t.Iterator[str]:\n # Create a new flask app and load in required code, to prevent config conflicts\n app = get_independent_flask_app(name)\n flask_app = set_up_config(config, app)\n server = wsgiref.simple_server.make_server(\"localhost\", port, flask_app)\n\n with ThreadPoolExecutor() as pool:\n pool.submit(server.serve_forever)\n yield f\"http://localhost:{port}/\"\n server.shutdown()\n\n\n@pytest.fixture(scope=\"module\")\ndef http_server():\n yield from run_server_in_thread(\n \"standard\",\n {\n \"APD_SENSORS_API_KEY\": \"testing\",\n \"APD_SENSORS_DEPLOYMENT_ID\": \"a46b1d1207fd4cdcad39bbdf706dfe29\",\n },\n 12081,\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef bad_api_key_http_server():\n yield from run_server_in_thread(\n \"alternate\",\n {\n \"APD_SENSORS_API_KEY\": \"penny\",\n \"APD_SENSORS_DEPLOYMENT_ID\": \"38cf2bae9adb445fad946c82e290487a\",\n },\n 12082,\n )\n\n\nclass TestGetDataPoints:\n @pytest.fixture\n def mut(self):\n return collect.get_data_points\n\n @pytest.mark.asyncio\n async def test_get_data_points(\n self, sensors: t.List[Sensor[t.Any]], mut, http_server: str\n ) -> None:\n # Get the data from the server, storing the time before and after\n # as bounds for the collected_at value\n async with aiohttp.ClientSession() as http:\n collect.http_session_var.set(http)\n time_before = datetime.datetime.now()\n results = await mut(http_server, \"testing\")\n time_after = datetime.datetime.now()\n\n assert len(results) == len(sensors) == 2\n\n for (sensor, result) in zip(sensors, results):\n assert sensor.from_json_compatible(result.data) == sensor.value()\n assert result.sensor_name == sensor.name\n assert time_before <= result.collected_at <= time_after\n\n @pytest.mark.asyncio\n async def test_get_data_points_fails_with_bad_api_key(\n self, sensors: t.List[Sensor[t.Any]], mut, http_server: str\n ) -> None:\n with pytest.raises(\n ValueError,\n match=f\"Error loading data from {http_server}: Supply API key in X-API-Key header\",\n ):\n async with aiohttp.ClientSession() as http:\n collect.http_session_var.set(http)\n await mut(http_server, \"incorrect\")\n\n\nclass TestAddDataFromSensors:\n @pytest.fixture\n def mut(self):\n return collect.add_data_from_sensors\n\n @pytest.fixture\n def mock_db_session(self):\n return MagicMock()\n\n @pytest.mark.asyncio\n async def test_get_get_data_from_sensors(\n self, mock_db_session, sensors: t.List[Sensor[t.Any]], mut, http_server: str\n ) -> None:\n results = await mut(\n mock_db_session,\n [\n Deployment(\n id=None, colour=None, name=None, uri=http_server, api_key=\"testing\"\n )\n ],\n )\n assert mock_db_session.execute.call_count == len(sensors)\n assert len(results) == len(sensors)\n\n @pytest.mark.asyncio\n async def test_get_get_data_from_sensors_with_multiple_servers(\n self, mock_db_session, sensors: t.List[Sensor[t.Any]], mut, http_server: str\n ) -> None:\n results = await mut(\n mock_db_session,\n [\n Deployment(\n id=None, colour=None, name=None, uri=http_server, api_key=\"testing\"\n ),\n Deployment(\n id=None, colour=None, name=None, uri=http_server, api_key=\"testing\"\n ),\n ],\n )\n assert mock_db_session.execute.call_count == len(sensors) * 2\n assert len(results) == len(sensors) * 2\n\n @pytest.mark.asyncio\n async def test_data_points_added_if_only_partial_success(\n self,\n mock_db_session,\n sensors: t.List[Sensor[t.Any]],\n mut,\n http_server: str,\n bad_api_key_http_server: str,\n caplog,\n ) -> None:\n await mut(\n mock_db_session,\n [\n Deployment(\n id=None, colour=None, name=None, uri=http_server, api_key=\"testing\",\n ),\n Deployment(\n id=None,\n colour=None,\n name=None,\n uri=bad_api_key_http_server,\n api_key=\"testing\",\n ),\n ],\n )\n # We expect Python Version and AC status for one endpoint\n assert mock_db_session.execute.call_count == 2\n insertion_calls = mock_db_session.execute.call_args_list\n params = [call[0][0].parameters for call in insertion_calls]\n assert {insertion[\"sensor_name\"] for insertion in params} == {\n \"PythonVersion\",\n \"ACStatus\",\n }\n assert {insertion[\"deployment_id\"] for insertion in params} == {\n uuid.UUID(\"a46b1d1207fd4cdcad39bbdf706dfe29\"),\n }\n\n # We should also have a log message showing details of the failing server and the failure\n assert len(caplog.records) == 1\n assert caplog.records[0].message == \"Data retrieval failed\"\n assert bad_api_key_http_server in caplog.records[0].exc_text\n assert \"Supply API key in X-API-Key header\" in caplog.records[0].exc_text\n","sub_path":"tests/test_http_get.py","file_name":"test_http_get.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492828010","text":"import cv2\nimport numpy as np\nfrom pylie import SO3, SE3\nfrom dataset import read_dataset\nfrom sfm_map import SfmMap, Keyframe, MapPoint, MatchedFrame\nimport open3d as o3d\nfrom optimize import BatchBundleAdjustment, IncrementalBundleAdjustment\n\n\"\"\"Assumes undistorted pixel points\"\"\"\n\n\ndef interactive_isfm():\n matched_frames, _ = read_dataset(shortest_track_length=3)\n\n # Choose optimization method, BatchBundleAdjustment or IncrementalBundleAdjustment.\n optimizer = BatchBundleAdjustment()\n\n # Choose the two first frames for initialization\n frame_0 = matched_frames[0]\n frame_1 = matched_frames[1]\n\n # Initialize map from two-view geometry.\n sfm_map = initialize_map(frame_0, frame_1)\n\n # You can here choose which images to add to the map in add_new_frame().\n next_frames = matched_frames[2::]\n\n # Callback for optimizing the map (press 'O')\n def optimize(vis):\n # Apply BA.\n optimizer.full_bundle_adjustment_update(sfm_map)\n\n vis.clear_geometries()\n for geom in get_geometry():\n vis.add_geometry(geom, reset_bounding_box=False)\n\n # Callback for adding new frame to the map (press 'A')\n def add_new_frame(vis):\n if not next_frames:\n return\n\n # Get next frame\n frame_new = next_frames.pop(0)\n print(\"Adding frame \" + str(frame_new.id()))\n\n # Find 2d-3d correspondences with map and compute initial pose with respect to the map.\n frame_map_corr, pose_w_new = track_map(sfm_map, frame_new)\n\n # Insert frame as keyframe into the map\n kf_new = add_as_keyframe_to_map(sfm_map, frame_new, pose_w_new, frame_map_corr)\n\n # Find new correspondences, triangulate and add as map points.\n find_and_add_new_map_points(sfm_map, kf_new)\n\n vis.clear_geometries()\n for geom in get_geometry():\n vis.add_geometry(geom, reset_bounding_box=False)\n\n # Helper function for extracting the visualization elements from the map.\n def get_geometry():\n poses = sfm_map.get_keyframe_poses()\n p, c = sfm_map.get_pointcloud()\n\n axes = []\n for pose in poses:\n axes.append(o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0).transform(pose.to_matrix()))\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(p.T)\n pcd.colors = o3d.utility.Vector3dVector(c.T / 255)\n\n return [pcd] + axes\n\n # Create visualizer.\n key_to_callback = {}\n key_to_callback[ord(\"O\")] = optimize\n key_to_callback[ord(\"A\")] = add_new_frame\n o3d.visualization.draw_geometries_with_key_callbacks(get_geometry(), key_to_callback)\n\n\ndef initialize_map(frame_0, frame_1):\n # Compute relative pose from two-view geometry.\n kp_0, id_0, kp_1, id_1 = frame_0.extract_correspondences_with_frame(frame_1)\n pose_0_1 = estimate_two_view_relative_pose(frame_0, kp_0, frame_1, kp_1)\n\n # Triangulate points.\n P_0 = frame_0.camera_model().projection_matrix(SE3())\n P_1 = frame_1.camera_model().projection_matrix(pose_0_1.inverse())\n points_0 = triangulate_points_from_two_views(P_0, kp_0, P_1, kp_1)\n\n # Add first keyframe as reference frame.\n sfm_map = SfmMap()\n kf_0 = Keyframe(frame_0, SE3())\n sfm_map.add_keyframe(kf_0)\n\n # Add second keyframe from relative pose.\n kf_1 = Keyframe(frame_1, pose_0_1)\n sfm_map.add_keyframe(kf_1)\n\n # Add triangulated points as map points relative to reference frame.\n num_matches = len(id_0)\n for i in range(num_matches):\n map_point = MapPoint(i, points_0[:, [i]])\n map_point.add_observation(kf_0, id_0[i])\n map_point.add_observation(kf_1, id_1[i])\n sfm_map.add_map_point(map_point)\n\n return sfm_map\n\n\ndef estimate_two_view_relative_pose(frame0: MatchedFrame, kp_0: np.ndarray,\n frame1: MatchedFrame, kp_1: np.ndarray):\n num_matches = kp_0.shape[1]\n if num_matches < 8:\n return None\n\n # Compute fundamental matrix from matches.\n F_0_1, _ = cv2.findFundamentalMat(kp_1.T, kp_0.T, cv2.FM_8POINT)\n\n # Extract the calibration matrices.\n K_0 = frame0.camera_model().calibration_matrix()\n K_1 = frame1.camera_model().calibration_matrix()\n\n # Compute the essential matrix from the fundamental matrix.\n E_0_1 = K_0.T @ F_0_1 @ K_1\n\n # Compute the relative pose.\n # Transform detections to normalized image plane (since cv2.recoverPose() only supports common K)\n kp_n_0 = frame0.camera_model().pixel_to_normalised(kp_0)\n kp_n_1 = frame1.camera_model().pixel_to_normalised(kp_1)\n K_n = np.identity(3)\n _, R_0_1, t_0_1, _ = cv2.recoverPose(E_0_1, kp_n_1.T, kp_n_0.T, K_n)\n\n return SE3((SO3(R_0_1), t_0_1))\n\n\ndef triangulate_points_from_two_views(P_0: MatchedFrame, kp_0: np.ndarray,\n P_1: MatchedFrame, kp_1: np.ndarray):\n # Triangulate wrt frame 0.\n points_hom = cv2.triangulatePoints(P_0, P_1, kp_0, kp_1)\n return points_hom[:-1, :] / points_hom[-1, :]\n\n\ndef track_map(sfm_map, frame_new):\n # Find correspondences with map.\n kp, points_0, frame_map_corr = sfm_map.extract_2d_3d_correspondences(frame_new)\n\n # Estimate initial pose wrt map with PnP.\n pose_w_new = estimate_pose_from_map_correspondences(frame_new, kp, points_0)\n return frame_map_corr, pose_w_new\n\n\ndef estimate_pose_from_map_correspondences(frame: MatchedFrame, kp: np.ndarray, points_w: np.ndarray):\n # Estimate initial pose with a (new) PnP-method.\n K = frame.camera_model().calibration_matrix()\n _, theta_vec, t = cv2.solvePnP(points_w.T, kp.T, K, None, flags=cv2.SOLVEPNP_SQPNP)\n pose_c_w = SE3((SO3.Exp(theta_vec), t.reshape(3, 1)))\n\n return pose_c_w.inverse()\n\n\ndef add_as_keyframe_to_map(sfm_map, frame_new, pose_w_new, frame_map_corr):\n # Add new keyframe\n kf_new = Keyframe(frame_new, pose_w_new)\n sfm_map.add_keyframe(kf_new)\n\n # Add map point observations to new keyframe.\n for kp_id, map_point in frame_map_corr.items():\n map_point.add_observation(kf_new, kp_id)\n return kf_new\n\n\ndef find_and_add_new_map_points(sfm_map, kf_new):\n # Find new correspondences with the keyframes that are not map points.\n corr_for_keyframes = sfm_map.extract_correspondences_for_new_map_points(kf_new)\n\n # Triangulate new points and add new map points\n keyframe_ids = sfm_map.get_keyframe_ids()\n for kf_old, (kp_old, kp_new, tracks) in corr_for_keyframes.items():\n P_old = kf_old.camera_model().projection_matrix(kf_old.pose_w_c().inverse())\n P_new = kf_new.camera_model().projection_matrix(kf_new.pose_w_c().inverse())\n points_w = triangulate_points_from_two_views(P_old, kp_old, P_new, kp_new)\n\n # TODO: Find a better solution for assigning ids to MapPoints!\n start_ind = len(sfm_map.get_map_points())\n for i in range(len(tracks)):\n map_point = MapPoint(start_ind + i, points_w[:, [i]])\n\n for frame, kp_id in tracks[i].get_observations().items():\n if frame.id() in keyframe_ids:\n map_point.add_observation(sfm_map.get_keyframe(frame.id()), kp_id)\n\n sfm_map.add_map_point(map_point)\n\n\nif __name__ == '__main__':\n interactive_isfm()\n","sub_path":"incremental_sfm.py","file_name":"incremental_sfm.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"278064484","text":"# Module for augementing image dataset as a preprocessing step\nimport math\nimport numpy as np\nimport cv2\nfrom scipy import ndimage\n\n\"\"\"\nAugmentations to implement:\n- normalization\n- rotation\n- translation\n- pixelation\n- mirroring\n? change backgrounds\n? add random noise\n\n\nLook into:\n- AutoAugment\n\"\"\"\n\ndef unflatten(image):\n dim = image.shape[0]\n new_dim = int(math.sqrt(dim))\n image = np.reshape(image, (new_dim, new_dim))\n\n return image\n\ndef make_rotations(dataset, labels, angles):\n \"\"\"\n Augment dataset with rotations of source images\n\n Args\n dataset: source dataset\n angles: list of positive angles (in degrees) for mirroring. Function will use negatives of each angle as well.\n\n Returns\n A tuple of augmented images and their corresponding labels\n \"\"\"\n was_flattened = (len(dataset[0].shape) == 1)\n augmented_dataset = []\n augmented_labels = []\n \n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n\n for angle in angles:\n rotated_pos = ndimage.rotate(image, angle)\n rotated_neg = ndimage.rotate(image, -angle)\n\n if was_flattened:\n rotated_pos = rotated_pos.flatten()\n rotated_neg = rotated_neg.flatten()\n\n augmented_dataset.append(rotated_pos)\n augmented_dataset.append(rotated_neg)\n augmented_labels.append(label)\n augmented_labels.append(label)\n \n return (augmented_dataset, augmented_labels)\n\n\ndef make_translations(dataset, labels):\n \"\"\"\n Augment dataset with translations of source images. Shift image around by 10 pixels\n\n Args\n dataset: source dataset\n\n Returns\n A tuple of augmented images and their corresponding labels\n \"\"\"\n offset = 10\n translations = [\n (0, offset),\n (0, -offset),\n (offset, 0),\n (-offset, 0),\n (-offset, -offset),\n (-offset, offset),\n (offset, -offset),\n (offset, offset)\n ]\n\n was_flattened = (len(dataset[0].shape) == 1)\n augmented_dataset = []\n augmented_labels = []\n \n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n \n height = image.shape[0]\n width = image.shape[1]\n \n for t_x, t_y in translations:\n new_image = np.zeros(image.shape)\n t_mat = np.array([[1,0,t_x],[0,1,t_y],[0,0,1]])\n\n for x in range(0, width):\n for y in range(0, height):\n old_coords = np.array([[x],[y],[1]])\n new_coords = t_mat.dot(old_coords) # translation here\n\n if new_coords[0] > 0 and new_coords[0] < width and new_coords[1] > 0 and new_coords[1] < height:\n new_image[new_coords[1], new_coords[0]] = image[y, x]\n \n if was_flattened:\n new_image.flatten()\n augmented_dataset.append(new_image)\n augmented_labels.append(label)\n\n return (augmented_dataset, augmented_labels)\n\n\ndef make_blurry(dataset, labels, filter_size):\n \"\"\"\n Augment dataset by pixelating image (make it blurry)\n\n Args\n dataset: source dataset\n filter_size: size of kernel to convolve\n\n Returns\n A tuple of augmented images and their corresponding labels\n \"\"\"\n kernel = np.ones((filter_size, filter_size))\n k_width = filter_size\n k_height = filter_size\n border_size = int(filter_size / 2)\n\n was_flattened = (len(dataset[0].shape) == 1)\n augmented_dataset = []\n augmented_labels = []\n\n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n\n blurry_image = np.zeros_like(image)\n # pad image\n image = cv2.copyMakeBorder(image, border_size, border_size + 1, border_size, border_size + 1, cv2.BORDER_REPLICATE)\n i_height = image.shape[0]\n i_width = image.shape[1]\n\n for y in range(0, i_height - k_height):\n for x in range(0, i_width - k_width):\n # Extract the sub_matrix at current position\n sub_matrix = image[y:y+k_height,x:x+k_width]\n\n # element-wise multiplication with kernel\n sum_matrix = sub_matrix * kernel\n # sum the matrix and set values of img_out\n asum = np.sum(sum_matrix) / (k_width * k_height)\n blurry_image[y,x] = asum\n\n if was_flattened:\n blurry_image.flatten()\n\n augmented_dataset.append(blurry_image)\n augmented_labels.append(label)\n\n return (augmented_dataset, augmented_labels)\n\n \ndef make_mirrored(dataset, labels, fliplist):\n \"\"\"\n Augment dataset by mirroring source images\n\n Args\n dataset: source dataset\n fliplist: list of desired flips. \n 0: flips around x-axis\n 1: flips around y-axis\n -1: flips both\n\n Returns\n A tuple of augmented images and their corresponding labels\n \"\"\"\n was_flattened = (len(dataset[0].shape) == 1) \n augmented_dataset = []\n augmented_labels = []\n \n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n\n for flip in fliplist:\n altered_image = cv2.flip(image, flip)\n\n if was_flattened:\n altered_image = altered_image.flatten()\n\n augmented_dataset.append(altered_image)\n augmented_labels.append(label)\n\n return (augmented_dataset, augmented_labels)\n","sub_path":"code/utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"180490132","text":"# IlluminaKeyWordSearcher.py\n\nimport csv # Used to read and write csv files\nimport PySimpleGUI as sg # Needed to create simple interactive GUIs\nimport os # Helps when working with folder and file paths\nfrom datetime import datetime, timezone, timedelta # Immensely useful for anything regarding dates and time\nimport re # Simplifies some string operations by utilizing patterns\nfrom typing import Tuple, List # Personal preference, helps with function syntax\n\n\"\"\"\nTODO LIST: (MINOR ORDER OF PRIORITY)\n1. Notify user if any folders do not contain the keyword\n a. Use a checkbox to allow log files that do not contain the keywords to appear in the output file.\n b. Output to a separate text file the log files that do not contain the keywords.\n2. Display drop down list with text of all the selected files.\n3. PROGRAM ALWAYS ASSUME LAST OCCURRENCE DATE OCCURS AFTER FIRST OCCURRENCE DATE. OPPOSITE CASE IS POSSIBLE.\n4. Implement try-except function calls and appropriate error responses.\n\"\"\"\n\ndef findFirstFindLastDates(filePath: str, firstKeyWord: str, lastKeyWord: str) -> Tuple[str, str]:\n \"\"\"Finds the first occurrence of firstKeyWord and last occurrence of lastKeyWord.\n\n filePath - (string) - the path of the file.\n firstKeyWord - (string) - the first key word we are interested in.\n lastKeyWord - (string) - the last key word we are interested in.\n\n Given a file path to a tab delimited csv type file and two key words, search for the first \\\n occurrence of the first keyword and the last occurrence of the last keyword. Return a 2-tuple \\\n of strings which are the dates of the first, and last occurrence in ISO 8601 format.\n\n [1] If newline='' is not specified, newlines embedded inside quoted fields will not be interpreted \\\n correctly, and on platforms that use \\r\\n line endings on write an extra \\r will be added. It should \\\n always be safe to specify newline='', since the csv module does its own (universal) newline handling.\n \"\"\"\n\n firstDate = None # Will use to check that the first occurrence and last occurrence \\\n lastDate = None # actually exist.\n # mode='r' reads the file with UTF-8 BOM encoding\n with open(filePath, mode='r', encoding='utf-8-sig', newline='') as csvDataFile: # [1]\n csvReader = csv.reader(csvDataFile, delimiter='\\t') # csv.reader of tab delimited file\n for line in csvReader: # Read each line\n if firstKeyWord in line[3]: # Index 3 is the message contents\n firstDate = line[0] # Index 0 is the date\n break; # Only care about first occurrence\n for line in csvReader:\n if lastKeyWord in line[3]:\n lastDate = line[0] # Care about the last occurrence\n\n return (firstDate, lastDate)\n\n\ndef userInputWindowLayOut() -> sg.Window:\n \"\"\"Creates an interactive window and returns the Window object.\n\n Contains the data for the layout of a PySimpleGUI window. Creates the window using the layout \\\n information and returns it as a PySimpleGUI.Window object.\n \"\"\"\n\n layout = [[sg.Text(\"Select a folder containing all of the log reports\")],\n [sg.Text(\"File(s)\", size=(10, 1)), sg.Input(visible=False, key='-FILES-'),\n sg.FilesBrowse(button_text='Browse',enable_events=True)],\n [sg.Text(\"Input the first keyword to search for the first occurrence\")],\n [sg.Text(\"Key Word 1\", size=(10, 1)), sg.Input(default_text=\"ix_lgm_power_set_blocking\",\n key='-FIRSTKEYWORD-')],\n [sg.Text(\"Input the last keyword to search for the last occurrence\")],\n [sg.Text(\"Key Word 2\", size=(10, 1)), sg.Input(default_text=\"ix_lgm_power_rsp\",\n key='-LASTKEYWORD-')],\n [sg.Submit(button_text=\"Search\", tooltip=\"Runs a search with the given parameters\"),\n sg.Exit(tooltip=\"Closes and exits the program\")]]\n\n # 'ix_lgm_power_set_blocking' and 'ix_lgm_power_rsp' are set as default values for\n # key word 1 and key word 2. EDIT LATER\n\n return sg.Window('Key Words Search', layout) # Window is titled, and has the given layout\n\n\ndef scanCompleteWindow() -> sg.Window:\n\n layout = [[sg.Text(\"Program has finished scanning and outputted a text file\")],\n [sg.Exit(tooltip=\"Closes the window\")]]\n\n return sg.Window('Scan Complete', layout)\n\n\ndef formatTime() -> str:\n \"\"\"Finds the system's local time and returns it adjusted against UTC+00.\n\n Create an 'aware,' meaning it contains timezone information to locate itself relative to other \\\n 'aware' objects, datetime object with respect to system's local time. Returns the date and time \\\n as a string in ISO 8601 format: YYYY-MM-DDTHHMMSSZ.\n \"\"\"\n\n localTime = datetime.now(timezone.utc).astimezone() # Local time with respect to UTC+00\n\n # Replaced ':' with '' as we are later using this information to name and date the Time Stamp file.\n return localTime.isoformat(timespec='seconds').replace(':', '')\n\n\ndef readLogs(listOfLogStringPaths: list, firstKeyWord: str, lastKeyWord: str) -> List[Tuple[str, str, str]]:\n \"\"\"Given a list of log file string paths and two keywords, creates and returns a list of \\\n all of the occurrences of the two keywords (first occurrence and last occurrence respectively).\n\n listOfLogStringPaths - (list) - list of string paths to selected log files.\n firstKeyWord - (string) - the first key word we are interested in.\n lastKeyWord - (string) -the last key word we are interested in.\n\n Iterate through the log files string paths in the given list calling findFirstFindLastDates() on each log. \\\n Store the name of the file and each set of occurrences then return a list of 3-tuples of the first and \\\n last occurrences.\n \"\"\"\n\n listOfOccurrences = []\n for logStringPath in listOfLogStringPaths:\n if os.path.isfile(logStringPath) and logStringPath.endswith('.log'): # ensures path is to a .log file\n fileName = os.path.basename(logStringPath)\n occurrence = (fileName,) + findFirstFindLastDates(logStringPath, firstKeyWord, lastKeyWord)\n if None not in occurrence: # checks that both occurrences exist\n listOfOccurrences.append(occurrence)\n\n return listOfOccurrences # returns a list of 3-tuples containing the string log path names and dates \\\n # of the occurrences\n\n\ndef findTimeDifference(occurrence: Tuple[str, str]) -> timedelta:\n \"\"\"Given 2 string dates convert and return a timedelta object.\n\n occurrence - (Tuple[str, str]) - 2-tuple of the dates of both occurrences in string ISO 8601 format.\n\n Given the 2-tuple of string dates, convert them into a list of ints in date and time format: \\\n [year, month, day, military hour, minutes, seconds, milliseconds] \\\n Must adjust values due to year inputs being the last 2 digits of the current year (+2000) and \\\n given milliseconds (*1000). Creates two datetime objects and returns the difference between the \\\n two as a timedelta object.\n \"\"\"\n\n # slice the string [:-4] to remove the UTC offset which is format: ' +00'\n # split the string into a list with delimiters: '-', ':', '.', ' '\n # newly split string should be in the date and time format:\n # [year, month, day, military hour, minutes, seconds, milliseconds]\n stringListOfFirst = re.split('-|:|\\.| ', occurrence[0][:-4]) # list of strings in date and time format\n stringListOfLast = re.split('-|:|\\.| ', occurrence[1][:-4])\n # re.split creates a list of strings, must convert into a list of ints\n intListOfFirst = [int(timeValue) for timeValue in stringListOfFirst] # convert values to ints\n intListOfSecond = [int(timeValue) for timeValue in stringListOfLast]\n intListOfFirst[0] += 2000 # Given input is the last 2 digits of the current year\n intListOfSecond[0] += 2000 # Convert by adding 2000\n intListOfFirst[6] *= 1000 # Given input is milli (10^-3), but requested units is micro (10^-6)\n intListOfSecond[6] *= 1000 # Convert by multiplying by 10^3\n firstDateTime = datetime(*intListOfFirst) # unpack list and create datetime class object\n lastDateTime = datetime(*intListOfSecond)\n # !!!\n # ASSUMES LAST DATE ALWAYS OCCURS AFTER FIRST DATE\n # !!!\n return lastDateTime - firstDateTime # Difference is timedelta object\n\n\ndef writeToFile(listOfOccurrences: List[Tuple[str, str, str]]) -> None:\n \"\"\"Given a list of occurrences write the differences in time to a text file.\n\n listOfOccurrences - (List[Tuple[str, str, str]]) - list of 3-tuples containing the log string path and \\\n dates of the occurrences.\n\n Creates an output text file in the current working directory named \"Time Stamps YYYY-MM-DDTHHMMSSZ.txt\" \\\n with UTF BOM encoding. Iterates over each occurrence, calls findTimeDifference() to create a timedelta \\\n object and typecasts the timedelta object to a string, which returns a difference in time in ISO 8601 \\\n format. Finally, it writes the name of the log file and its associated difference in time to the output \\\n text file.\n\n [1] If newline='' is not specified, newlines embedded inside quoted fields will not be interpreted \\\n correctly, and on platforms that use \\r\\n line endings on write an extra \\r will be added. It should \\\n always be safe to specify newline='', since the csv module does its own (universal) newline handling.\n \"\"\"\n\n outputTextFile = \"Time Stamps \" + formatTime() + \".txt\" # Generates the name of the text file\n # mode='x' creates a new file and prepares to write into it with UTF-8 BOM encoding\n with open(outputTextFile, mode='x', encoding='utf-8-sig', newline= '') as csvOutputFile: # [1]\n csvWriter = csv.writer(csvOutputFile, dialect=csv.excel_tab)\n for occurrence in listOfOccurrences: # Iterates over the occurrences in each file\n # Writes the time difference to the file and separates each one with newline character '\\n'\n # slice [1:] to only get the 2-tuple of the first and last occurrence\n csvWriter.writerow([occurrence[0], occurrence[1], occurrence[2],\n str(findTimeDifference(occurrence[1:]))])\n\n\ndef runGUIWindow() -> None:\n \"\"\"a\"\"\"\n\n window = userInputWindowLayOut()\n while True:\n event, values = window.read()\n # print(event, values) # used for debugging purposes\n if event == sg.WIN_CLOSED or event == 'Exit':\n break\n if event == 'Search':\n selectedFiles = values['-FILES-'].split(';')\n firstKeyWord = values['-FIRSTKEYWORD-']\n lastKeyWord = values['-LASTKEYWORD-']\n listOfOccurrences = readLogs(selectedFiles, firstKeyWord, lastKeyWord)\n writeToFile(listOfOccurrences)\n scanCompleteWindow().read(close=True) # Shows a window pop up confirming the scan is complete\n\n\nif __name__ == \"__main__\":\n runGUIWindow()","sub_path":"IlluminaKeyWordSearcher.py","file_name":"IlluminaKeyWordSearcher.py","file_ext":"py","file_size_in_byte":11593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"358387793","text":"import requests, json\nfrom geopy.geocoders import Nominatim\n\ntoken = \"606129537:AAHaYWThtrIRErPrCB_TOSC_TI0N3v-KDvo\"\n\nURL = \"https://api.telegram.org/bot{}/\".format(token)\ngeolocator = Nominatim()\nlocation = geolocator.geocode(\"Lviv\")\nprint((location.latitude, location.longitude))\nprint(URL)\na = \"f2001d674134d1ef061e7733468c6970\"\nurl = \"https://api.darksky.net/forecast/f2001d674134d1ef061e7733468c6970/{},{}\".format(49.85, 24.0166666667)\ns = requests.get(url)\ninf = s.json()\ninf = inf[\"daily\"]\nwith open(\"file.json\", \"w\") as f:\n json.dump(inf, f)\n\n\nclass Main():\n def __init__(self):\n self.old = 0\n self.first = True\n\n def send(self, id, text=\"None\"):\n text = text.split(\",\")\n url = \"https://api.darksky.net/forecast/f2001d674134d1ef061e7733468c6970/{},{}\".format(text[0], text[1])\n s = requests.get(url)\n inf = s.json()\n sum = inf[\"currently\"][\"summary\"]\n temp = inf[\"currently\"][\"temperature\"]\n wind = inf[\"currently\"][\"windSpeed\"]\n apptemp = inf[\"currently\"][\"apparentTemperature\"]\n print(sum, temp, wind, apptemp)\n ans = {\"id\": id, \"text\": \"{}, {}°C WindSpeed, {} m/s, Feels as {}°C\".format(sum,round((float(temp) - 32) * (5 / 9)), round(float((wind * 0.44704))), round((float(apptemp) - 32) * (5 / 9)))}\n # ans = {\"id\": id, \"text\": \"{} and temp={}\".format(sum, round((float(temp) - 32) * (5 / 9)))}\n k = \"sendMessage?chat_id={}&text={}\".format(ans[\"id\"], ans[\"text\"])\n url = URL + k\n print(url)\n return requests.get(url)\n#webhook\n def get_update(self):\n url = URL + \"getUpdates\"\n inf = requests.get(url).json()\n with open(\"file.json\", \"w\") as f:\n json.dump(inf, f)\n return inf\n\n def mess(self, data):\n\n chat_id = data[\"result\"][-1][\"message\"][\"chat\"][\"id\"]\n text = data[\"result\"][-1][\"message\"][\"text\"]\n\n this_id = data[\"result\"][-1][\"update_id\"]\n if self.first:\n self.first = False\n self.old = this_id\n return\n if self.old != data[\"result\"][-1][\"update_id\"]:\n self.old = this_id\n if text:\n print(text, \"TEXT\")\n geolocator = Nominatim()\n location = geolocator.geocode(text)\n print(location.longitude)\n try:\n return self.send(chat_id, \"{},{}\".\n format(location.latitude, location.longitude))\n except AttributeError:\n pass\n elif text == None:\n print(\"sd\")\n\n def main(self):\n data = self.get_update()\n self.mess(data)\n while True:\n self.mess(self.get_update())\n\n\nif __name__ == '__main__':\n run = Main()\n run.main()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45590417","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport pusherclient\nimport subprocess\nimport sys\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument('appkey', help='Pusher app key')\nparser.add_argument('--countries',\n help='List of countries separated by commas, like: france,spain'\n ' (default: all countries)')\nargs = parser.parse_args()\n\n# Add a logging handler so we can see the raw communication data\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler(sys.stdout)\nlogger.addHandler(ch)\n\nglobal pusher\n\n# Courtesy of http://www.lengua.com/anthems/\nanthems = {\n 'france': None,\n 'germany': None,\n 'greatbritain': None,\n 'italy': None,\n 'spain': None,\n 'usa': None,\n}\n\ndef play(data):\n logger.info(\"Play Callback: %s\" % data)\n path = os.path.abspath(os.path.dirname(sys.argv[0]))\n anthems[data] = subprocess.Popen(['mpg123', '-q',\n '%s/../media/%s.mp3' % (path, data)])\n\ndef stop(data):\n logger.info(\"Stop Callback: %s\" % data)\n anthems[data].terminate()\n os.wait()\n\ndef connect_handler(data):\n channels = {}\n countries = args.countries.split(',') if args.countries \\\n else anthems.keys()\n for country in countries:\n channels[country] = pusher.subscribe(country)\n channels[country].bind('play', play)\n channels[country].bind('stop', stop)\n\nif __name__ == '__main__':\n\n pusher = pusherclient.Pusher(args.appkey, cluster='eu')\n\n pusher.connection.bind('pusher:connection_established', connect_handler)\n pusher.connect()\n\n while True:\n time.sleep(1)\n","sub_path":"anthemplayer/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"468154356","text":"import requests, urllib3, yaml, os\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\n\r\n\r\ndir = os.path.dirname(__file__)\r\nCONFIG_FILE_PATH = os.path.abspath( os.path.join(dir, '..', 'configfile', 'user_config.yml') )\r\nwith open(CONFIG_FILE_PATH, 'r') as f:\r\n config = yaml.safe_load(f)\r\n\r\n \r\nCLIENT_ID = config[\"CLIENT_ID\"]\r\nCLIENT_SECRET = config[\"CLIENT_SECRET\"]\r\nREFRESH_TOKEN = config[\"REFRESH_TOKEN\"]\r\n\r\n\r\ndef get_New_Access_Tokens():\r\n auth_url = \"https://www.strava.com/oauth/token\"\r\n\r\n payload = {\r\n 'client_id': CLIENT_ID,\r\n 'client_secret': CLIENT_SECRET,\r\n 'refresh_token': REFRESH_TOKEN,\r\n 'grant_type': \"refresh_token\",\r\n 'f': 'json'\r\n }\r\n\r\n print()\r\n print(\"Requesting new Access Token...\\n\")\r\n res = requests.post(auth_url, data=payload, verify=False)\r\n\r\n access_token = res.json()['access_token']\r\n refresh_token = res.json()['refresh_token']\r\n\r\n #print(\"Access Token = {}\".format(access_token))\r\n #print(\"Refresh Token = {}\\n\".format(refresh_token))\r\n\r\n return access_token","sub_path":"src/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"586613315","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nInitialization module for tpRigToolkit-dccs-maya\n\"\"\"\n\nimport os\nimport inspect\nimport traceback\nimport logging.config\n\nfrom tpDcc.managers import resources\nfrom tpDcc.libs.python import modules\n\nfrom tpDcc.dccs.maya.meta import metanode\nfrom tpDcc.dccs.maya.managers import metadatamanager\n\n\ndef init(dev=False):\n \"\"\"\n Initializes module\n \"\"\"\n\n create_logger(dev=dev)\n\n register_resources()\n register_metarig_classes()\n\n\ndef create_logger(dev=False):\n \"\"\"\n Returns logger of current module\n \"\"\"\n\n logger_directory = os.path.normpath(os.path.join(os.path.expanduser('~'), 'tpRigToolkit-dccs-maya', 'logs'))\n if not os.path.isdir(logger_directory):\n os.makedirs(logger_directory)\n\n logging_config = os.path.normpath(os.path.join(os.path.dirname(__file__), '__logging__.ini'))\n\n logging.config.fileConfig(logging_config, disable_existing_loggers=False)\n logger = logging.getLogger('tpRigToolkit-dccs-maya')\n dev = os.getenv('TPRIGTOOLKIT_DEV', dev)\n if dev:\n logger.setLevel(logging.DEBUG)\n for handler in logger.handlers:\n handler.setLevel(logging.DEBUG)\n\n return logger\n\n\ndef register_resources():\n \"\"\"\n Registers tpRigToolkit-dccs-maya resources path\n \"\"\"\n\n resources_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources')\n resources.register_resource(resources_path, key='tpRigToolkit-core')\n\n\ndef register_metarig_classes():\n\n logger = logging.getLogger('tpRigToolkit-dccs-maya')\n\n # Register tpRigToolkit MetaNodes\n meta_nodes_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'metarig')\n for sub_module in modules.iterate_modules(meta_nodes_path):\n file_name = os.path.splitext(os.path.basename(sub_module))[0]\n if file_name.startswith('__') or sub_module.endswith('.pyc'):\n continue\n module_path = modules.convert_to_dotted_path(os.path.normpath(sub_module))\n try:\n sub_module_obj = modules.import_module(module_path, skip_errors=True)\n except Exception:\n logger.error('Error while importing module: {} | {}'.format(module_path, traceback.format_exc()))\n continue\n if not sub_module_obj:\n continue\n for member in modules.iterate_module_members(sub_module_obj, predicate=inspect.isclass):\n if not issubclass(member[1], metanode.MetaNode):\n continue\n metadatamanager.register_meta_class(member[1])\n\n\ncreate_logger()\n","sub_path":"tpRigToolkit/dccs/maya/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"375717830","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# stdlib imports\n\nimport json\nimport datetime\n\n\nclass Hypocenter:\n\n \"\"\"\n a conversion class used to create, parse, and validate hypocenter\n data as part of processing data.\n \"\"\"\n\n # JSON Keys\n LATITUDE_KEY = \"Latitude\" # Required\n LONGITUDE_KEY = \"Longitude\" # Required\n DEPTH_KEY = \"Depth\" # Required\n TIME_KEY = \"Time\" # Required\n LATITUDE_ERROR_KEY = \"LatitudeError\" # Optional\n LONGITUDE_ERROR_KEY = \"LongitudeError\" # Optional\n DEPTH_ERROR_KEY = \"DepthError\" # Optional\n TIME_ERROR_KEY = \"TimeError\" # Optional\n\n def __init__(\n self,\n newLatitude=None,\n newLongitude=None,\n newDepth=None,\n newTime=None,\n newLatitudeError=None,\n newLongitudeError=None,\n newDepthError=None,\n newTimeError=None,\n ):\n \"\"\"Initializing the object. Constructs an empty object if all arguments are None\n\n newLatitude: a double containing the latitude in degrees\n newLongitude: a double containing the longitude in degrees\n newDepth: a double containing the depth in kilometers\n newTime: a datetime containing the origin time\n newLatitudeError: a double containing the latitude error\n newLongitudeError: a double containing the longitude error\n newDepthError: a double containing the depth error\n newTimeError: a double containing the time error\n \"\"\"\n\n # required keys\n if newLatitude is not None:\n self.latitude = newLatitude\n\n if newLongitude is not None:\n self.longitude = newLongitude\n\n if newDepth is not None:\n self.depth = newDepth\n\n if newTime is not None:\n self.time = newTime\n\n # optional keys\n if newLatitudeError is not None:\n self.latitudeError = newLatitudeError\n\n if newLongitudeError is not None:\n self.longitudeError = newLongitudeError\n\n if newDepthError is not None:\n self.depthError = newDepthError\n\n if newTimeError is not None:\n self.timeError = newTimeError\n\n def fromJSONString(self, JSONString):\n \"\"\"Populates object from a JSONString\n\n JSONString: a required string containing the JSON formatted text\n \"\"\"\n JSONObject = json.loads(JSONString)\n self.fromDict(JSONObject)\n\n def fromDict(self, aDict):\n \"\"\"Populates object from a dictionary\n\n aDict: required dictionary\n \"\"\"\n\n # required keys\n try:\n self.latitude = aDict[self.LATITUDE_KEY]\n self.longitude = aDict[self.LONGITUDE_KEY]\n self.depth = aDict[self.DEPTH_KEY]\n timeString = aDict[self.TIME_KEY][:-1] + \"000Z\"\n self.time = datetime.datetime.strptime(timeString, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n except (ValueError, KeyError, TypeError) as e:\n print(\"Dictionary format error, missing required keys: %s\" % e)\n\n # optional keys\n if self.LATITUDE_ERROR_KEY in aDict:\n self.latitudeError = aDict[self.LATITUDE_ERROR_KEY]\n\n if self.LONGITUDE_ERROR_KEY in aDict:\n self.longitudeError = aDict[self.LONGITUDE_ERROR_KEY]\n\n if self.DEPTH_ERROR_KEY in aDict:\n self.depthError = aDict[self.DEPTH_ERROR_KEY]\n\n if self.TIME_ERROR_KEY in aDict:\n self.timeError = aDict[self.TIME_ERROR_KEY]\n\n def toJSONString(self):\n \"\"\"Converts object to JSON formatted string\n\n Returns: The JSON formatted message as a string\n \"\"\"\n JSONObject = self.toDict()\n\n return json.dumps(JSONObject, ensure_ascii=False)\n\n def toDict(self):\n \"\"\"Converts object to a dictionary\n\n Returns: The Dictionary\n \"\"\"\n\n aDict = {}\n\n # required keys\n try:\n aDict[self.LATITUDE_KEY] = self.latitude\n aDict[self.LONGITUDE_KEY] = self.longitude\n aDict[self.DEPTH_KEY] = self.depth\n timeString = self.time.isoformat(timespec=\"milliseconds\") + \"Z\"\n aDict[self.TIME_KEY] = timeString\n\n except (NameError, AttributeError) as e:\n print(\"Missing required data error: %s\" % e)\n\n # optional keys\n if hasattr(self, \"latitudeError\"):\n aDict[self.LATITUDE_ERROR_KEY] = self.latitudeError\n\n if hasattr(self, \"longitudeError\"):\n aDict[self.LONGITUDE_ERROR_KEY] = self.longitudeError\n\n if hasattr(self, \"depthError\"):\n aDict[self.DEPTH_ERROR_KEY] = self.depthError\n\n if hasattr(self, \"timeError\"):\n aDict[self.TIME_ERROR_KEY] = self.timeError\n\n return aDict\n\n def isValid(self):\n \"\"\"Checks to see if object is valid\n\n Returns: True if object if valid, False otherwise\n \"\"\"\n errorList = self.getErrors()\n\n return not errorList\n\n def getErrors(self):\n \"\"\"Gets list of object validation errors\n\n Returns: a list of strings containing the validation error messages\n \"\"\"\n errorList = []\n\n # required keys:\n # latitude\n try:\n if self.latitude < -90 or self.latitude > 90:\n errorList.append(\n \"Latitude in Hypo Class not in the range of -90 to 90.\"\n )\n except (NameError, AttributeError):\n errorList.append(\"No Latitude in Hypo Class.\")\n\n # longitude\n try:\n if self.longitude < -180 or self.longitude > 180:\n errorList.append(\n \"Longitude in Hypo Class not in the range of -180 to 180.\"\n )\n except (NameError, AttributeError):\n errorList.append(\"No Longitude in Hypo Class.\")\n\n # depth\n try:\n if self.depth < -100 or self.depth > 1500:\n errorList.append(\n \"Depth in Hypo Class not in the range of -100 to 1500.\"\n )\n except (NameError, AttributeError):\n errorList.append(\"No Depth in Hypo Class.\")\n\n return errorList\n","sub_path":"python/processingformats/hypocenter.py","file_name":"hypocenter.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"288481488","text":"\"\"\"\nИзучение туториала с\nhttps://towardsdatascience.com/object-detection-with-neural-networks-a4e2c46b4491\n\nНельзя так просто созранить сеть...\nhttps://cv-tricks.com/how-to/freeze-tensorflow-models\n\nРузультат\nКороч, чувак видимо очень сильно верил в то что его тема работает\nПотеря двух дней работы...\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport logging\nimport os\nimport shutil\nimport json\nimport pathlib\nimport pandas as pd\nimport doctest\nimport pprint\n\nlogging.basicConfig(format='%(levelname)s %(name)s %(asctime)s %(message)s ')\nLOGGER = logging.getLogger('rect_turot_log')\nLOGGER.setLevel(logging.DEBUG)\n\n\ndef generate_sample(rect_color=0, back_color=255, sample_shape=(300, 300)):\n sample = np.full(fill_value=back_color, shape=sample_shape, dtype=np.uint8)\n x, y, w, h = 0, 0, 0, 0\n x = np.random.randint(low=0, high=sample_shape[1] - 1)\n y = np.random.randint(low=0, high=sample_shape[0] - 1)\n max_w = sample_shape[1] - x\n max_h = sample_shape[0] - y\n\n w = np.random.randint(low=1, high=max_w)\n h = np.random.randint(low=1, high=max_h)\n sample[y:y+h, x:x+w] = rect_color\n sample_data = json.dumps({\n 'x': int(x),\n 'y': int(y),\n 'w': int(w),\n 'h': int(h)\n })\n return sample, sample_data\n\n\ndef create_dataset_dir_tree(path):\n \"\"\"\n Создает структуру директории датасета\n path/\n images/\n labels/\n\n если path существует то удалит его и пересоздасто\n :param path:\n :return:\n \"\"\"\n path_images = path + os.sep + 'images'\n path_labels = path + os.sep + 'labels'\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n os.mkdir(path_labels)\n os.mkdir(path_images)\n\n return path_images, path_labels\n\n\ndef generate_dataset_simple():\n \"\"\"\n генерирует 50 000 картинок для начальных тестов\n Параметры картинки\n * разрешение 300x300\n * цвет фона белый\n * объект: черный прямоегольник\n * формат cv2.IMWRITE_GRAYSCALE\n\n сохранит в папку ./simplest_dataset\n :return:\n \"\"\"\n\n path = './simplest_dataset'\n sample_shape = (8, 8)\n datasen_size = 50000\n extention = 'png'\n rect_color = 0\n background_color = 255\n\n path_images, path_labels = create_dataset_dir_tree(path)\n\n for number in range(datasen_size):\n sample, label_str = generate_sample(sample_shape=sample_shape,\n rect_color=rect_color,\n back_color=background_color)\n sample_file_name = str(number) + '.' + extention\n label_file_name = str(number) + '.txt'\n cv2.imwrite(path_images + os.sep + sample_file_name, sample)\n label_file = open(path_labels + os.sep + label_file_name, 'w')\n label_file.write(label_str)\n label_file.close()\n\n file_summary_path = path + os.sep + 'summary.txt'\n file_summary = open(file_summary_path, 'w')\n summary = {\n 'size': datasen_size,\n 'sample_shape': sample_shape,\n 'extention': extention\n }\n\n file_summary.write(json.dumps(summary))\n file_summary.close()\n\n # plt.imshow(sample, cmap='gray')\n # plt.show()\n\n\ndef load_dataset_simple(path: pathlib.Path)->dict:\n \"\"\"\n ЗАгрузит датасет сгененированный generate_dataset_simple\n :return:\n \"\"\"\n summary_path = str(path.absolute()) + os.sep + 'summary.txt'\n images_path = str(path.absolute()) + os.sep + 'images'\n labels_path = str(path.absolute()) + os.sep + 'labels'\n\n summary_file = open(summary_path, 'r')\n summary = json.loads(summary_file.read())\n\n images = np.ndarray(shape=(summary['size'], *summary['sample_shape']))\n labels = np.zeros(shape=(summary['size'], 4))\n\n for sample_number in range(summary['size']):\n image_path = images_path + os.sep + str(sample_number) + '.' + summary['extention']\n label_path = labels_path + os.sep + str(sample_number) + '.txt'\n\n image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n label = json.loads(open(label_path, 'r').read())\n images[sample_number] = image\n labels[sample_number][0] = label['x']\n labels[sample_number][1] = label['y']\n labels[sample_number][2] = label['w']\n labels[sample_number][3] = label['h']\n\n dataset = {\n 'images': images,\n 'labels': labels,\n 'summary': summary\n }\n\n return dataset\n\n\ndef show_several_image(images: np.ndarray):\n\n for i in range(1, 9):\n plt.subplot(3, 3, i)\n plt.imshow(images[i])\n\n plt.show()\n\n\ndef full_connected(tensor, input_size, out_size, activation=None, name_scope='layer'):\n \"\"\"\n Генерирует полносвязный слой\n :param tensor:\n :param input_size:\n :param out_size:\n :param activation tf.nn one of activation\n :param name_scope:\n :return:\n \"\"\"\n\n with tf.name_scope(name_scope):\n w = tf.Variable(tf.random_normal(shape=[input_size, out_size]), name='W')\n b = tf.Variable(tf.random_normal(shape=[out_size]), name='B')\n if not activation:\n return tf.add(tf.matmul(tensor, w), b, name='LOGIT')\n return activation(tf.add(tf.matmul(tensor, w), b), name='SIGMA')\n\n\ndef IOU_error(input_data: np.ndarray) -> float:\n \"\"\"\n Intersection Over Union\n Intersection - Значение размера площади пересевения bound rect найденного сетью и реальной рамки прямоегольника\n Union - Значение объединения площадей рамки найденной сетью и рамки реальной,\n при этом пересечение считается один раз\n\n\n Intersection(I) Over Union(U) = I/U as result\n then minimize: result\n\n :param input_data: [desired, net_out]\n :param desired: [x,y,w,h] - left most corner column, row, and width and height from it (cols and rows).\n :param net_out: equal to desired\n :return:\n >>> sess = tf.Session()\n >>> bboxA = np.array([0,0,150,150],dtype=np.float32)\n >>> bboxB = np.array([3, 3, 150, 150], dtype=np.float32)\n >>> a = sess.run(IOU_error([bboxA, bboxB]))\n >>> 0.89 < a < 0.95\n True\n \"\"\"\n\n sess = tf.Session()\n desired = input_data[0]\n net_out = tf.abs(input_data[1])\n\n x_max_i = tf.maximum(desired[0], net_out[0])\n y_max_i = tf.maximum(desired[1], net_out[1])\n x_min_i = tf.minimum(desired[0] + desired[2], net_out[0] + net_out[2])\n y_min_i = tf.minimum(desired[1] + desired[3], net_out[1] + net_out[3])\n\n # one was added to exclude zero area size\n\n\n\n zero = tf.constant(0, dtype=tf.float32)\n one = tf.constant(1, dtype=tf.float32)\n\n # print(sess.run([x_max_i, y_max_i, x_min_i, y_min_i]))\n\n # ones added to exclude zero square area\n intersect_area = tf.maximum(zero, x_min_i - x_max_i + 1) * tf.maximum(zero, y_min_i - y_max_i + 1)\n union_area = (desired[2] * desired[3] + net_out[2] * net_out[3]) - intersect_area\n\n iou = intersect_area / union_area\n return iou\n\n\n\ndef work_one():\n \"\"\"\n Производит регрессию на картинку с изображдением одного прямоегольника\n Пытается предсказать параметры прямоегольника\n\n делаю нечто вроде\n\n model = Sequential([\n Dense(200, input_dim=64),\n Activation('relu'),\n Dropout(0.2),\n Dense(4)\n ])\n model.compile('adadelta', 'mse')\n\n :return:\n \"\"\"\n\n dataset = load_dataset_simple(pathlib.Path('./simplest_dataset'))\n input_shape = dataset['summary']['sample_shape']\n input_sample = tf.placeholder(shape=[None, *input_shape, 1], dtype=tf.float32, name='Input')\n desired_result = tf.placeholder(shape=[None, 4], dtype=tf.float32, name='DesiredBBox')\n\n conv1 = tf.layers.conv2d(\n inputs=input_sample,\n filters=32,\n kernel_size=[3, 3],\n padding=\"same\",\n activation=tf.nn.relu)\n\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n flatten = tf.layers.flatten(pool1)\n\n h1 = tf.layers.dense(inputs=flatten,\n units=200,\n activation=tf.nn.relu,\n name='h1')\n\n do = tf.nn.dropout(x=h1, keep_prob=0.2)\n out = tf.layers.dense(inputs=do,\n units=4,\n activation=tf.nn.relu,\n name='OUT')\n\n\n\n with tf.Session() as sess:\n # stacked_error_data = tf.stack([desired_result, out])\n # IOU_mapped = tf.map_fn(IOU_error, stacked_error_data)\n # ones = tf.ones(shape=tf.shape(IOU_mapped), dtype=tf.float32)\n # loss = tf.losses.mean_squared_error(labels=ones, predictions=IOU_mapped)\n #\n RMSE = tf.sqrt(tf.losses.mean_squared_error(desired_result, out))\n optimizer = tf.train.AdadeltaOptimizer(0.1).minimize(RMSE)\n #\n batch_size = 5000\n epoch = 500\n old_error = 15\n best_vals = []\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n\n for cur_epoch in range(epoch):\n samples_indexes = np.random.randint(low=0,\n high=dataset['summary']['size'],\n size=batch_size)\n\n images = dataset['images'][samples_indexes]\n labels = dataset['labels'][samples_indexes]\n # show_several_image(images)\n feed_dict = {\n input_sample: np.reshape(images, (-1, 8, 8, 1)),\n desired_result: labels\n }\n\n _, error, result = sess.run([optimizer, RMSE, out], feed_dict=feed_dict)\n # res_frame = pd.DataFrame(result, columns=list('xywh'))\n # LOGGER.info('RESULT\\n' + str(res_frame))\n LOGGER.info('epoch: {epoch} error:{error}'.format(epoch=cur_epoch,error=error))\n if error < old_error and error >=0.01:\n old_error = error\n saver.save(sess, './models/great.ckpt')\n image = np.reshape(dataset['images'][1], (-1, 8,8,1))\n label = dataset['labels'][1]\n\n feed_dict = {\n input_sample: image\n }\n\n result = sess.run([out], feed_dict=feed_dict)\n\n LOGGER.info('des{des} res{res}'.format(des=label, res=result))\n best_vals = [label, result]\n # LOGGER.info('des{des} res{res}'.format(des=labels, res=result))\n\n # test\n LOGGER.info('min erro ' + str(old_error) + str(best_vals))\n\n\n tf.summary.merge_all()\n\n sw = tf.summary.FileWriter('./log', tf.get_default_graph())\n sw.close()\n\ndef test_existedmodel():\n with tf.Session() as sess:\n saver = tf.train.import_meta_graph('./models/great.ckpt.meta')\n saver.restore(sess, './models/great.ckpt')\n pprint.pprint(sess.graph.get_operations())\n\n dataset = load_dataset_simple(pathlib.Path('./simplest_dataset'))\n input_shape = dataset['summary']['sample_shape']\n input_sample = tf.placeholder(shape=[None, *input_shape], dtype=tf.float32, name='Input')\n\n feed_dict = {\n input_sample: [dataset['images'][0]]\n }\n\n desired = dataset['labels'][0]\n\n # with sess.graph.as_default():\n # tf.get_ope\n # sess.run(tf.initialize_all_variables())\n # predict = tf.get_default_graph().get_operation_by_name('OUT')\n # print(predict.eval(feed_dict=feed_dict, session=sess))\n # result = sess.run('OUT', feed_dict=feed_dict)\n # LOGGER.info('{desired}, {result}'.format(desired=desired, result=result))\n\n\n\nif __name__ == '__main__':\n LOGGER.info('start')\n # generate_dataset_simple()\n # dataset = load_dataset_simple(pathlib.Path('./simplest_dataset'))\n # show_several_image(dataset['images'])\n work_one()\n # test_existedmodel()\n\n # sess = tf.Session()\n # bboxA = np.array([0,0,150,150],dtype=np.float32)\n # bboxB = np.array([3, 3, 150, 150], dtype=np.float32)\n # a = sess.run(IOU_error([bboxA, bboxB]))\n # print(a)\n\n\n\n # doctest.testmod()\n\n LOGGER.info('end')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TensorFlowTests/RectangelDetection.py","file_name":"RectangelDetection.py","file_ext":"py","file_size_in_byte":12732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"156372858","text":"import pygame\nfrom pygame.locals import *\n\nfrom utils.mask import Mask\n\nmouse_mask = pygame.Mask((2, 2))\nmouse_mask.fill()\n\n\nclass Event:\n def __init__(self, data):\n if \"name\" not in data:\n raise KeyError(\"Event must have a 'name'.\")\n for key, value in data.items():\n self.__setattr__(key, value)\n self.handled = False\n\n def set_attr(self, name, value):\n self.__setattr__(name, value)\n\n\ndef post_event(data_dict):\n event = pygame.event.Event(USEREVENT, data_dict)\n pygame.event.post(event)\n\n\ndef get_mouse_point_mask(mouse_pos):\n mouse_rect = Rect(mouse_pos[0], mouse_pos[1], 2, 2)\n return Mask(mouse_rect, mouse_mask)\n\n\ndef event_filter():\n mouse_pos = pygame.mouse.get_pos()\n mouse_point_mask = get_mouse_point_mask(mouse_pos)\n\n event_queue = [Event({\"name\": \"mouse_over\", \"pos\": mouse_pos, \"mouse_point_mask\": mouse_point_mask})] # 预置一个mouse_over事件\n\n for event in pygame.event.get():\n event_attributes = event.__dict__\n if event.type in my_events:\n temp = my_events[event.type]\n if isinstance(temp, dict): # 如果有二级列表\n if event_attributes[\"button\"] not in temp:\n continue\n event_attributes[\"name\"] = temp[event_attributes[\"button\"]]\n event_attributes[\"mouse_point_mask\"] = mouse_point_mask\n else:\n event_attributes[\"name\"] = temp\n event_queue.append(Event(event_attributes))\n elif event.type == USEREVENT:\n event_queue.append(Event(event_attributes))\n return event_queue\n\n\nmy_events = {\n QUIT: \"quit\",\n MOUSEBUTTONDOWN: {\n 1: \"mouse_left_down\", # MOUSE BUTTON DOWN = 5\n 2: \"mouse_mid_down\",\n 3: \"mouse_right_down\",\n 4: \"mouse_wheel_forward_down\",\n 5: \"mouse_wheel_backward_down\"\n },\n MOUSEBUTTONUP: {\n 1: \"mouse_left_up\", # MOUSE BUTTON UP = 6\n 2: \"mouse_mid_up\",\n 3: \"mouse_right_up\",\n },\n MOUSEMOTION: \"mouse_motion\",\n KEYDOWN: \"key_down\",\n KEYUP: \"key_up\"\n}","sub_path":"core/event/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"126861015","text":"import os\nimport time\nimport signal\nimport signals_common\n\n\"\"\" Signal Trapping Python Script \"\"\"\n\n\"\"\" Objective: Write a python script that redirects the signal you send it to a specified process via GNU/Linux. \"\"\"\n\n\nprint('My PID is ', os.getpid())\n\n# Print Signal Received\ndef printsignal(signum, stack):\n print('Received a signal called ', signum)\n targetpid = input('Specify the PID of your deepest enemy.')\n try:\n val = int(targetpid)\n except ValueError:\n print(\"That's not an int!\")\n os.kill(targetpid, signum)\n\n# Look back to beginning\nwhile True:\n signals_common.cycle()\n print('Waiting...')\n time.sleep(3)\n","sub_path":"signals/redirect_signals.py","file_name":"redirect_signals.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"576439743","text":"import numpy as np\nimport cv2\nfrom heuristic_faces import HeuristicFaceClassifier\nimport pickle\nimport pandas as pd\nimport sys\n\n\ncap = cv2.VideoCapture(0)\nclf = HeuristicFaceClassifier()\n\nhorizontal_model = pickle.load(open(\"horizontal_gaze.pkcls\", \"rb\"))\nvertical_model = pickle.load(open(\"vertical_gaze.pkcls\", \"rb\"))\n\nif len(sys.argv) >= 2:\n frame = cv2.imread(sys.argv[1])\nelse:\n frame = cv2.imread(\"test.jpg\")\n# Our operations on the frame come here\ngray = frame\n\nfaces = clf.detect_faces(frame)\n# Display the resulting frame\nfor face in faces:\n (x, y, w, h) = face[\"face\"]\n cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 255, 0), 2)\n for eye in face[\"eyes\"]:\n (ex, ey, ew, eh) = eye[\"eye\"]\n ex, ey = x + ex, y + ey\n cv2.rectangle(gray, (ex, ey), (ex + ew, ey + eh), (255, 255, 0), 2)\n pupil = eye[\"pupil\"]\n cv2.circle(gray, (ex + pupil[0], ey + pupil[1]), 2, (255, 0, 0), 1)\n\n face_size = face['face'][2]\n dataframe = pd.DataFrame({\n 'r_eye_px': face['eyes'][1]['pupil'][0] / face_size,\n 'l_eye_px': face['eyes'][0]['pupil'][0] / face_size,\n 'r_eye_s': face['eyes'][1]['eye'][2] / face_size,\n 'l_eye_s': face['eyes'][0]['eye'][2] / face_size,\n 'r_eye_x': face['eyes'][1]['eye'][0] / face_size,\n 'l_eye_x': face['eyes'][0]['eye'][0] / face_size,\n 'r_eye_y': face['eyes'][1]['eye'][1]/face_size,\n 'l_eye_y': face['eyes'][0]['eye'][1]/face_size,\n 'r_eye_py': face['eyes'][1]['pupil'][1]/face_size,\n 'l_eye_py': face['eyes'][0]['pupil'][1]/face_size}, index=[0])\n\n horizontal_prediction = round(horizontal_model.predict(dataframe)[0], 1)\n vertical_prediction = round(vertical_model.predict(dataframe)[0], 1)\n label = \"H: \" + str(horizontal_prediction) \\\n + \" V: \" + str(vertical_prediction)\n cv2.putText(gray, label, (x, y), thickness=2, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(255, 255, 255))\n\ncv2.imshow('frame',gray)\n# cv2.imwrite(\"sample-class-2.jpg\", gray)\ncv2.waitKey()\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"image_test.py","file_name":"image_test.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"489996172","text":"# -*- coding:utf-8 -*- \n'''\n__author__:liubin \n\n'''\nimport logging\nimport logging.config\nimport os\ndef get_logger():\n\n\n filepath = os.path.join(os.path.dirname(__file__), 'logging.conf')\n\n logging.config.fileConfig(filepath)\n return logging.getLogger()\n\n\ndef call_me():\n\n logger = get_logger()\n # filepath = os.path.join(os.path.dirname(__file__), 'logging.conf')\n # print(filepath)\n\n logger.info(\"hi\")\ncall_me()\n\n\"D:/dev/pagedemo/testcase\"\n\n\n\n\n","sub_path":"common/getlogger.py","file_name":"getlogger.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"503246245","text":"import os\r\nimport webapp2\r\nfrom google.appengine.ext.webapp import template\r\n\r\nfrom datetime import datetime\r\nfrom pytz.gae import pytz\r\n\r\nBASE_URL='http://yourgoogleappname.appspot.com'\r\n#BASE_URL='' # For testing via localhost\r\nHOME_NUMBER='415-555-1212'\r\nCELL_1='415-555-1313'\r\nNAME_1='Jim'\r\nCELL_2='415-555-1414'\r\nNAME_2='Sally'\r\nNINE_URL=BASE_URL+'/9.wav'\r\nFAMILY_NAME='smith'\r\nPARTY_DAY=6\r\nPASSWORD='1234567'\r\n\r\nclass BasePage(webapp2.RequestHandler):\r\n def get(self):\r\n self.post()\r\n\r\n def _error(self, msg, redirecturl=None):\r\n templatevalues = {\r\n 'msg': msg,\r\n 'redirecturl': redirecturl\r\n }\r\n xml_response(self, 'error.xml', templatevalues)\r\n\r\nclass AnswerPage(BasePage):\r\n def post(self):\r\n try:\r\n params = {}\r\n if(pst_weekday() == PARTY_DAY):\r\n params = { 'timeout' : 4, 'preface' : ('If you are here for the party, press 1 then type in your code or wait to ring the ' + FAMILY_NAME + '.') }\r\n else:\r\n params = { 'timeout' : 2, 'preface' : ('Please wait while I connect you.') }\r\n xml_response( self, 'answer.xml', params )\r\n except:\r\n self._error(\"Error parsing answer page\")\r\n\r\nclass MainPage(BasePage):\r\n def post(self):\r\n key = self.request.get('Digits')\r\n try:\r\n if (key == '1') or (key == '4'):\r\n xml_response( self, 'code.xml' )\r\n else:\r\n xml_response( self, 'forward.xml', { 'number' : HOME_NUMBER, 'action' : '/app/dialhomeresult' } )\r\n except:\r\n self._error(\"Error parsing main page\")\r\n\r\nclass DialHomeResultPage(BasePage):\r\n def post(self):\r\n status = self.request.get('DialCallStatus')\r\n #sid = self.request.get('DialCallSid')\r\n #duration = self.request.get('DialCallDuration')\r\n try:\r\n if status == 'completed':\r\n xml_response( self, 'end.xml' )\r\n else:\r\n xml_response( self, 'nothomemenu.xml' )\r\n except:\r\n self._error(\"Error parsing dial home result page\")\r\n \r\nclass CodePage(BasePage):\r\n def post(self):\r\n key = self.request.get('Digits')\r\n try:\r\n if key == PASSWORD:\r\n xml_response( self, 'buzz.xml' )\r\n else:\r\n xml_response( self, 'codeerror.xml' )\r\n except:\r\n self._error(\"Error parsing code page\")\r\n\r\nclass NotHomeResultPage(BasePage):\r\n def post(self):\r\n key = self.request.get('Digits')\r\n try:\r\n if key == '1':\r\n xml_response( self, 'forward.xml', { 'preface' : ('Dialing '+NAME_1), 'number' : CELL_1 } )\r\n elif key == '2':\r\n xml_response( self, 'forward.xml', { 'preface' : ('Dialing '+NAME_2), 'number' : CELL_2 } )\r\n elif (key == '3') or (key == '4'):\r\n xml_response( self, 'code.xml' )\r\n elif key == '*':\r\n xml_response( self, 'end.xml' )\r\n else:\r\n xml_response( self, 'nothomemenu.xml', { 'preface' : 'I did not understand.' } )\r\n except:\r\n self._error(\"Error parsing not home page.\")\r\n\r\nclass SimpleForwardPage(BasePage):\r\n def post(self):\r\n try:\r\n xml_response( self, 'forward.xml', { 'number' : HOME_NUMBER } )\r\n except:\r\n self._error(\"Error parsing forward page\")\r\n\r\nutc = pytz.timezone('UTC')\r\npst = pytz.timezone('US/Pacific')\r\n\r\ndef pst_weekday():\r\n return datetime.utcnow().replace(tzinfo=utc).astimezone(pst).weekday()\r\n\r\ndef xml_response(handler, page, templatevalues={}):\r\n \"\"\"\r\n Renders an XML response using a provided template page and values\r\n \"\"\"\r\n templatevalues[ 'baseurl' ] = BASE_URL\r\n templatevalues[ 'family' ] = FAMILY_NAME\r\n templatevalues[ 'nine' ] = NINE_URL\r\n templatevalues[ 'name1' ] = NAME_1\r\n templatevalues[ 'name2' ] = NAME_2\r\n path = os.path.join(os.path.dirname(__file__), page)\r\n handler.response.headers[\"Content-Type\"] = \"text/xml\"\r\n handler.response.out.write(template.render(path, templatevalues))\r\n \r\n\r\napp = webapp2.WSGIApplication([('/app/answer', AnswerPage),\r\n ('/app/code', CodePage),\r\n ('/app/forward', SimpleForwardPage),\r\n ('/app/main', MainPage),\r\n ('/app/dialhomeresult', DialHomeResultPage),\r\n ('/app/nothomeresult', NotHomeResultPage)],\r\n debug=True)\r\n","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"584123778","text":"from django.db import models\nimport datetime\n\nclass TimestampedModel(models.Model):\n \"\"\"Used to attach *created at* and *updated at* timestamps for any model class\n that mixes this in.\"\"\"\n created_at = models.DateTimeField(auto_now_add=True,\n default=datetime.datetime.now)\n updated_at = models.DateTimeField(auto_now=True,\n default=datetime.datetime.now)\n class Meta:\n abstract = True\n\n\nclass LocatableModel(models.Model):\n \"\"\"Give *latitude* and *longitude* properties to any model that mixes\n this in.\"\"\"\n latitude = models.DecimalField(max_digits=8, decimal_places=3, null=True,\n blank=True)\n longitude = models.DecimalField(max_digits=8, decimal_places=3, null=True,\n blank=True)\n def _get_point(self):\n if self.latitude and self.longitude:\n return Point(self.longitude, self.latitude)\n else:\n return Point(0, 0)\n def _set_point(self, point):\n self.latitude = point.y\n self.longitude = point.x\n self.save()\n point = property(_get_point, _set_point)\n class Meta:\n abstract = True\n\n\n","sub_path":"whwn/util/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"120065925","text":"import random\r\nimport sys\r\n\r\nsys.setrecursionlimit( 1000000 ) # long type,32bit OS 4B,64bit OS 8B(1bit for sign)\r\n\r\n\"\"\"Kibovitett eukledeszi módszer ami vissza adja a (g, x, y) a*x + b*y = gcd(x, y) értéket, rekurzív megoldás\"\"\"\r\ndef egcd(a, b):\r\n\r\n if a == 0:\r\n return (b, 0, 1)\r\n else:\r\n g, x, y = egcd( b % a, a )\r\n return (g, y - (b // a) * x, x)\r\n\r\n\r\n\"\"\"annak csekkolása, hogy a p és q értéke inverz-e, ha nem akkor hibát dob vissza, mivel igy nem használható az a p és q\"\"\"\r\ndef modinv(a, m):\r\n g, x, y = egcd(a, m)\r\n if g != 1:\r\n raise Exception('mod inverz nem létezik')\r\n else:\r\n return x % m\r\n\r\n\"\"\"visszadja a multiplikativ inverzét b modulo n-nek\"\"\"\r\ndef mulinv(b, n):\r\n g, x, _ = egcd( b, n )\r\n if g == 1:\r\n return x % n\r\n\r\n\r\n\"\"\"miller rabin teszt n a tesztelendő szám k pedig a teszt elvégzésének a száma\"\"\"\r\ndef miller_rabin(n, k):\r\n if n == 2:\r\n return True\r\n\r\n if n % 2 == 0:\r\n return False\r\n\r\n r, s = 0, n - 1\r\n while s % 2 == 0:\r\n r += 1\r\n s //= 2\r\n for _ in range( k ):\r\n a = random.randrange( 2, n - 1 )\r\n x = power( a, s, n )\r\n if x == 1 or x == n - 1:\r\n continue\r\n for _ in range( r - 1 ):\r\n x = power( x, 2, n )\r\n if x == n - 1:\r\n break\r\n else:\r\n return False\r\n return True\r\n\r\n\"\"\"\"mod pow alternatívája\"\"\"\r\ndef power(a, n, p):\r\n res = 1\r\n a = a % p\r\n while (n > 0):\r\n if ((n & 1) == 1):\r\n res = (res * a) % p\r\n n = n >> 1\r\n a = (a * a) % p\r\n return res\r\n\r\n\r\n\"\"\"\"itt állíthatjuk be, hogy hány bites prímet szeretnénk generálni és, hogy hányszor szeretnénk lefuttatni a miller rabin tesztet\"\"\"\r\ndef primtest():\r\n primtest = 0\r\n while primtest == 0:\r\n prim = random.getrandbits( 1024 )\r\n primtest = miller_rabin( prim, 1 )\r\n\r\n return prim\r\n\r\n\r\n\"\"\"\"kulcscgenerálás\"\"\"\r\ndef kulcsgen():\r\n p = primtest()\r\n q = primtest()\r\n n = p * q\r\n fn = (p - 1) * (q - 1)\r\n e_gcd = 0\r\n while e_gcd != 1:\r\n e = random.randint( 1, 101 )\r\n e_gcd = mulinv( fn, e )\r\n\r\n k = 1\r\n d_test = False\r\n while d_test == False:\r\n if (k * fn + 1) % e == 0:\r\n d = (k * fn + 1) // e\r\n d_test = True\r\n k += 1\r\n return (p, q, n, fn, e, d)\r\n\r\n\r\np, q, n, fn, e, d = kulcsgen()\r\n\r\ndef titkositas(m, n, e):\r\n return power( m, e, n )\r\n\r\n\"\"\"\"p és q értéke az m1,m2\"\"\"\r\ndef kinaimaradek(p, q, n, d, c):\r\n d = int( d )\r\n\r\n x1 = c % p\r\n x2 = c % q\r\n\r\n fP = d % (p - 1)\r\n fQ = d % (q - 1)\r\n\r\n x1 = power( x1, fP, p )\r\n x2 = power( x2, fQ, q )\r\n\r\n M1 = modinv( p, q )\r\n M2 = modinv( q, p )\r\n\r\n x = (x1 * q * M2 + x2 * p * M1) % n\r\n\r\n return x\r\n\r\nprint( \"p:\", p )\r\n\r\nprint( \"q:\", q )\r\n\r\nprint( \"n:\", n )\r\n\r\nprint( \"f(n):\", fn )\r\n\r\nprint( \"e:\", e )\r\n\r\nprint( \"d:\", d )\r\n\r\nm = 1024\r\nprint( \"m:\", m )\r\nc = titkositas( m, n, e )\r\nprint( \"c kódolt üzenet értéke:\" )\r\nprint( c )\r\nprint( \"c kódolt üzenet visszafejtés utáni értéke:\" )\r\nprint( kinaimaradek( p, q, n, d, c ) )\r\nprint( \"PK: (e:\", e, \" n:\", n, \")\" )\r\nprint( \"SK: (d:\", d, \" n:\", n, \")\" )\r\n","sub_path":"rsaboiiii.py","file_name":"rsaboiiii.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"79447687","text":"import mysql.connector\nfrom datetime import datetime\nimport plotly.plotly as py\nimport plotly\nimport plotly.graph_objs as go\n\n\ndef graph():\n number = 0\n inUserLoop = True\n while inUserLoop:\n number = input('Number of Era: ')\n if number.isdigit():\n number = int(number)\n inUserLoop = False\n else:\n print(\"Invalid Input\")\n\n\n plotly.tools.set_credentials_file(username='edwardsainsbury', api_key='23d2jFb3YDBkl9sWe1Vx')\n\n cnx = mysql.connector.connect(user='root', password='mysql', host='127.0.0.1', database='market4')\n cursor = cnx.cursor()\n cursor.execute(\"SELECT MAX(era_no) FROM matches\")\n latestEra = cursor.fetchone()[0]\n cursor.execute(\"SELECT MAX(player_one_id) FROM matches WHERE era_no = %s\" % str(latestEra))\n highest_id = cursor.fetchone()[0]\n\n startEra = 0\n if number != 0:\n startEra = latestEra - number\n cursor.execute(\"SELECT MIN(player_one_id) FROM matches WHERE era_no = %s\" % str(startEra+1))\n lowest_id = cursor.fetchone()[0]\n cursor.execute(\"SELECT MAX(score) FROM matches\")\n highest_score = cursor.fetchone()[0]\n cursor.execute(\"SELECT MIN(score) FROM matches WHERE score > 50\")\n lowest_score = cursor.fetchone()[0]\n era_data = []\n counter = 0\n era_incorrect = []\n highest_scores = []\n for x in range(startEra, latestEra, 1):\n cursor.execute(\"SELECT player_one_id, score FROM matches WHERE era_no = %s\" % str(x+1))\n data = cursor.fetchall()\n id_array = {}\n print(str(((x+1 - startEra) / (latestEra - startEra)) * 100) + \"%\")\n\n for array in data:\n\n id_array.update({array[0]: 0}) if array[0] not in id_array else id_array\n id_array[array[0]] = array[1]\n era_data.append(id_array)\n\n cursor.execute(\"SELECT MAX(score) FROM matches WHERE era_no = %s\" % str(x+1))\n highest_score_era = cursor.fetchone()[0]\n if highest_score_era is None:\n continue\n highest_scores.append(267-highest_score_era/2)\n cursor.execute(\"SELECT incorrect FROM matches WHERE era_no = %s and score = %s\" % (str(x+1), highest_score_era))\n incorrect = cursor.fetchall()[0][0]\n incorrect = eval(incorrect)\n era_incorrect.append(incorrect)\n\n # Create traces\n trace_array = []\n trace2_array = []\n TypeTraceArray = []\n for x in range(lowest_id, highest_id+1):\n values = []\n era = []\n id_lines = []\n\n for y in range(len(era_data)):\n\n if x in era_data[y]:\n values.append(era_data[y][x])\n id_lines.append(x)\n era.append(y)\n\n\n trace_array.append(go.Scatter(x=era, y=values, mode='lines', name=str(x)))\n trace2_array.append(go.Scatter(x=era, y=id_lines, mode='lines', name=str(x)))\n era = []\n for i in range(2):\n types = []\n era = []\n for x in range(len(era_incorrect)):\n types.append(era_incorrect[x][i])\n era.append(x)\n TypeTraceArray.append(go.Scatter(x=era, y=types, mode='lines', name=str(i)))\n\n TypeTraceArray.append(go.Scatter(x=era, y=highest_scores, mode='lines+markers', name='score'))\n\n eraNumber = []\n for i in range(startEra, latestEra):\n eraNumber.append(i)\n\n values = []\n trace3_array = []\n for x in range(lowest_score, highest_score+1):\n values = []\n print(str(((x+1-lowest_score) / (highest_score+1-lowest_score)) * 100) + \"%\")\n counter += 1\n for eras in era_data[:len(era_data)-1]:\n eraArray = []\n for y in eras:\n eraArray.append(eras[y])\n\n values.append(eraArray.count(x))\n if all(v == 0 for v in values):\n continue\n trace3_array.append(go.Scatter(x=eraNumber, y=values, mode='lines', name=str(x)))\n\n\n plotly.offline.plot(trace_array, filename='EraScores.html', auto_open=False)\n plotly.offline.plot(trace2_array, filename='PlayerLongevity.html', auto_open=False)\n #plotly.offline.plot(trace3_array, filename='Success')\n plotly.offline.plot(trace3_array, filename='generation_graph.html', auto_open=False)\n plotly.offline.plot(TypeTraceArray, filename='inccrrect.html', auto_open=False)\n\n\n\n # cursor.execute(\"SELECT timestamp FROM matches\")\n # times = cursor.fetchall()\n # diff_array = []\n # match_array = []\n # trace4_array = []\n # runningAverage = []\n # sum = 0\n # for x in range(len(times)-1):\n # diff_array.append((times[x+1][0]-times[x][0]).total_seconds()/60)\n # sum += (times[x+1][0]-times[x][0]).total_seconds()/60\n # runningAverage.append(sum/(x+1))\n # match_array.append(x)\n # trace4_array.append(go.Scatter(x=match_array, y=diff_array, mode='lines+markers', name='Time'))\n # trace4_array.append(go.Scatter(x=match_array, y=runningAverage, mode='lines+markers', name='Average'))\n # py.offline.plot(trace4_array, filename='MatchTime.html')\n # cnx.close()\n\ngraph()\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"188059491","text":"try:\n from PyQt4.QtCore import *\n from PyQt4.QtGui import *\nexcept:\n from PyQt5.QtCore import *\n from PyQt5.QtGui import *\n from PyQt5.QtWidgets import *\nfrom copy import copy,deepcopy\nfrom decimal import Decimal\nimport run_parameters_parser as yaml_parser\nimport sys, os, math, epics, scipy.constants, numpy\nimport json, requests, datetime, time\nimport collections\nimport numpy as np\n\nclass GenericThread(QThread):\n signal = pyqtSignal()\n\n def __init__(self, function, *args, **kwargs):\n QThread.__init__(self)\n self._stopped = False\n self.existent = 'existent file'\n self.function = function\n self.args = args\n self.kwargs = kwargs\n\n def __del__(self):\n self.wait()\n\n def stop(self):\n self._stopped = True\n\n def run(self):\n self.signal.emit()\n if not self._stopped:\n self.object = self.function(*self.args, **self.kwargs)\n\nclass signalling_monitor(QObject):\n\n valueChanged = pyqtSignal(int)\n\n def __init__(self, ref, parameter, interval=200):\n super(signalling_monitor, self).__init__()\n self.timer = QTimer(self)\n self.timer.setInterval(interval)\n self.timer.timeout.connect(self.emitValue)\n self.ref = ref\n self.parameter = parameter\n\n def stop(self):\n self.timer.stop()\n\n def start(self, interval=None):\n self.setInterval(interval)\n self.timer.start()\n\n def setInterval(self, interval):\n if interval is not None:\n self.timer.setInterval(interval)\n\n def emitValue(self):\n self.valueChanged.emit(getattr(self.ref, self.parameter))\n\nclass RunParameterController(QObject):\n\n def __init__(self, app, view, model):\n super(RunParameterController, self).__init__()\n self.my_name = 'controller'\n self.app = app\n self.model = model\n self.view = view\n self.runParameterLayouts = [\n self.view.s02_parameter_groupbox,\n self.view.c2v_parameter_groupbox,\n self.view.vela_parameter_groupbox,\n self.view.ba1_parameter_groupbox,\n self.view.injector_parameter_groupbox,\n self.view.simulation_parameter_groupbox,\n self.view.scan_groupBox,\n self.view.directory_groupBox\n ]\n self.formLayoutList = [formLayout for layout in self.runParameterLayouts for\n formLayout in layout.findChildren((QFormLayout,QGridLayout))]\n self.accessibleNames = {}\n for layout in self.formLayoutList:\n childCount = layout.count()\n for child in range(0,childCount):\n widget = layout.itemAt(child).widget()\n if widget is not None and widget.accessibleName() is not None:\n self.accessibleNames[widget.accessibleName()] = widget\n else:\n pass\n self.update_macro_particle_combo()\n self.initialize_run_parameter_data()\n self.model.data.scannableParametersDict = self.get_scannable_parameters_dict()\n self.populate_scan_combo_box()\n self.view.parameter_scan.stateChanged.connect(lambda: self.toggle_scan_parameters_state(self.view.parameter_scan))\n self.view.bsol_track_checkBox.stateChanged.connect(self.toggle_BSOL_tracking)\n self.view.runButton.clicked.connect(self.run_astra)\n self.view.runButton.clicked.connect(lambda: self.export_parameter_values_to_yaml_file(auto=True))\n self.view.loadSettingsButton.clicked.connect(self.load_settings_from_directory)\n self.view.directory.textChanged[str].connect(self.check_load_settings_button)\n self.view.directory.textChanged[str].emit(self.view.directory.text())\n self.abort_scan = False\n\n def connect_auto_load_settings(self, state):\n if state:\n self.view.directory.textChanged[str].connect(self.load_settings_from_directory)\n else:\n try:\n self.view.directory.textChanged[str].disconnect(self.load_settings_from_directory)\n except:\n pass\n\n def check_load_settings_button(self, text):\n if os.path.isfile(text + '/settings.yaml'):\n self.view.loadSettingsButton.setEnabled(True)\n else:\n self.view.loadSettingsButton.setEnabled(False)\n\n def load_settings_from_directory(self):\n if os.path.isfile(self.model.data.parameterDict['simulation']['directory'] + '/settings.yaml'):\n self.import_parameter_values_from_yaml_file(self.model.data.parameterDict['simulation']['directory'] + '/settings.yaml')\n\n def toggle_BSOL_tracking(self):\n widget = self.view.bsol_track_checkBox\n if widget.isChecked():\n self.view.buckingsol_strength.setEnabled(False)\n self.view.sol_strength.valueChanged.connect(self.set_BSOL_tracked_value)\n self.view.sol_strength.valueChanged.emit(self.view.sol_strength.value())\n else:\n self.view.buckingsol_strength.setEnabled(True)\n try:\n self.view.sol_strength.valueChanged.disconnect(self.set_BSOL_tracked_value)\n except:\n pass\n\n def set_BSOL_tracked_value(self, value):\n self.view.buckingsol_strength.setValue(float(-0.9*value))\n\n def update_macro_particle_combo(self):\n combo = self.view.macro_particle\n for i in range(2,7):\n combo.addItem(str(2**(3*i)), i)\n combo.setCurrentIndex(1)\n\n def split_accessible_name(self, aname):\n if len((aname.split(':'))) == 3:\n dictname, pv, param = map(str, aname.split(':'))\n else:\n param = None\n dictname, pv = map(str, aname.split(':'))\n return dictname, pv, param\n\n def get_widget_value(self, widget):\n if type(widget) is QLineEdit:\n try:\n value = float(widget.text())\n except:\n value = str(widget.text())\n elif type(widget) is QDoubleSpinBox:\n value = round(float(widget.value()), widget.decimals())\n elif type(widget) is QSpinBox:\n value = int(widget.value())\n elif type(widget) is QCheckBox:\n value = True if widget.isChecked() else False\n elif type(widget) is QComboBox:\n value = widget.itemData(widget.currentIndex())\n if value == '' or value is None:\n value = str(widget.currentText())\n if isinstance(value, QVariant):\n value = value.toString()\n value = str(value)\n else:\n print('Widget Error! Type = ', type(widget))\n value = None\n return value\n\n @pyqtSlot()\n def update_value_in_dict(self):\n widget = self.sender()\n dictname, pv, param = self.split_accessible_name(widget.accessibleName())\n value = self.get_widget_value(widget)\n if param is None:\n self.model.data.parameterDict[dictname].update({pv: value})\n else:\n try:\n self.model.data.parameterDict[dictname][pv].update({param: value})\n except:\n print('Error ', dictname, pv, param, value)\n\n def analyse_children(self, layout):\n for k, v in self.accessibleNames.items():\n widget = v\n if type(widget) is QLineEdit:\n widget.textChanged.connect(self.update_value_in_dict)\n widget.textChanged.emit(widget.placeholderText())\n elif type(widget) is QDoubleSpinBox or type(widget) is QSpinBox:\n widget.valueChanged.connect(self.update_value_in_dict)\n widget.valueChanged.emit(widget.value())\n self.scannableParameters.append(str(widget.accessibleName()))\n elif type(widget) is QCheckBox:\n value = True if widget.isChecked() else False\n widget.stateChanged.connect(self.update_value_in_dict)\n widget.stateChanged.emit(widget.isChecked())\n elif type(widget) is QComboBox:\n widget.currentIndexChanged.connect(self.update_value_in_dict)\n widget.currentIndexChanged.emit(widget.currentIndex())\n\n def initialize_run_parameter_data(self):\n self.scannableParameters = []\n for layout in self.formLayoutList:\n self.analyse_children(layout)\n # return self.model.data.latticeDict\n\n def get_scannable_parameters_dict(self):\n scannableParameterDict = collections.OrderedDict()\n unscannableParameters = ['macro_particle', 'injector_space_charge',\n 'rest_of_line_space_charge', 'end_of_line']\n for key in self.scannableParameters:\n if key not in unscannableParameters:\n if ':' in key:\n scannableParameterDict[' - '.join(list(key.split(':'))[1:])] = key\n return scannableParameterDict\n\n def populate_scan_combo_box(self):\n scanParameterComboBox = self.view.parameter\n for (parameterDisplayStr, parameter) in self.model.data.scannableParametersDict.items():\n scanParameterComboBox.addItem(parameterDisplayStr, parameter)\n\n def update_widget_from_dict(self, aname):\n widget = self.get_object_by_accessible_name(aname)\n dictname, pv, param = self.split_accessible_name(aname)\n if param is None:\n value = self.model.data.parameterDict[dictname][pv]\n else:\n value = self.model.data.parameterDict[dictname][pv][param]\n self.update_widgets_with_values(aname, value)\n\n def get_object_by_accessible_name(self, aname):\n if aname in self.accessibleNames:\n return self.accessibleNames[aname]\n else:\n return None\n\n def update_widgets_with_values(self, aname, value):\n if isinstance(value, (dict)):\n for k,v in value.items():\n self.update_widgets_with_values(aname.replace('Dict','')+':'+k,v)\n else:\n widget = self.get_object_by_accessible_name(aname)\n if widget is not None:\n if type(widget) is QLineEdit:\n widget.setText(str(value))\n if type(widget) is QDoubleSpinBox:\n widget.setValue(float(value))\n if type(widget) is QSpinBox:\n widget.setValue(int(value))\n if type(widget) is QCheckBox:\n if value is True:\n widget.setChecked(True)\n else:\n widget.setChecked(False)\n if type(widget) is QComboBox:\n index = widget.findText(value)\n if index == -1:\n index = widget.findData(value)\n widget.setCurrentIndex(index)\n\n ## Need to port this to the unified controller\n @pyqtSlot()\n def import_parameter_values_from_yaml_file(self, filename=None):\n if filename is None:\n dialog = QFileDialog()\n filename = QFileDialog.getOpenFileName(dialog, caption='Open file',\n directory=self.model.data.parameterDict['simulation']['directory'],\n filter=\"YAML files (*.YAML *.YML *.yaml *.yml)\")\n filename = filename[0] if isinstance(filename,tuple) else filename\n filename = str(filename)\n if not filename == '' and not filename is None and (filename[-4:].lower() == '.yml' or filename[-5:].lower() == '.yaml'):\n loaded_parameter_dict = yaml_parser.parse_parameter_input_file(filename)\n for (parameter, value) in loaded_parameter_dict.items():\n self.update_widgets_with_values(parameter, value)\n else:\n print('Failed to import, please provide a filename')\n\n def convert_data_types(self, export_dict={}, data_dict={}, keyname=None):\n if keyname is not None:\n export_dict[keyname] = dict()\n edict = export_dict[keyname]\n else:\n edict = export_dict\n for key, value in data_dict.items():\n if isinstance(value, (dict, collections.OrderedDict)) and not key == 'sub_elements':\n subdict = self.convert_data_types({}, value)\n edict.update({key:subdict})\n else:\n if not key == 'sub_elements':\n # value = self.model.data.Framework.convert_numpy_types(value)\n edict.update({key:value})\n return export_dict\n\n def create_subdirectory(self, dir):\n if not os.path.exists(dir):\n os.makedirs(dir, exist_ok=True)\n\n def export_parameter_values_to_yaml_file(self, auto=False):\n export_dict = dict()\n data_dicts = ['generator', 'INJ', 'S02', 'C2V', 'EBT', 'BA1', 'simulation']\n if self.model.data.scanDict['parameter_scan']:\n if not auto:\n dialog = QFileDialog()\n directory = QFileDialog.getExistingDirectory(dialog,\"Select Directory\")\n filename = '/scan_settings.yaml'\n else:\n directory = self.model.data.parameterDict['simulation']['directory']\n filename = '/scan_settings.yaml'\n data_dicts.append('scan')\n else:\n if not auto:\n dialog = QFileDialog()\n filename, _filter = QFileDialog.getSaveFileNameAndFilter(dialog, caption='Save File', directory='c:\\\\',\n filter=\"YAML Files (*.YAML *.YML *.yaml *.yml\")\n filename = filename[0] if isinstance(filename,tuple) else filename\n dirctory, filename = os.path.split(filename)\n else:\n directory = self.model.data.parameterDict['simulation']['directory']\n filename = 'settings.yaml'\n if not filename == \"\":\n print('directory = ', directory, ' filename = ', filename, '\\njoin = ', str(os.path.relpath(directory + '/' + filename)))\n self.create_subdirectory(directory)\n for n in data_dicts:\n export_dict = self.convert_data_types(export_dict, self.model.data.parameterDict[n], n)\n yaml_parser.write_parameter_output_file(str(os.path.relpath(directory + '/' + filename)), export_dict)\n else:\n print( 'Failed to export, please provide a filename.')\n\n def toggle_scan_parameters_state(self, object):\n performScanCheckbox = object\n if performScanCheckbox.isChecked():\n self.view.parameter.setEnabled(True)\n self.view.parameter_scan_from_value.setEnabled(True)\n self.view.parameter_scan_to_value.setEnabled(True)\n self.view.parameter_scan_step_size.setEnabled(True)\n self.update_widget_from_dict('scan:parameter')\n self.update_widget_from_dict('scan:parameter_scan_from_value')\n self.update_widget_from_dict('scan:parameter_scan_to_value')\n self.update_widget_from_dict('scan:parameter_scan_step_size')\n else:\n self.view.parameter.setEnabled(False)\n self.view.parameter_scan_from_value.setEnabled(False)\n self.view.parameter_scan_to_value.setEnabled(False)\n self.view.parameter_scan_step_size.setEnabled(False)\n\n def disable_run_button(self, scan=False):\n if not scan:\n self.view.runButton.setEnabled(False)\n else:\n self.view.runButton.clicked.disconnect()\n self.view.runButton.setText('Abort')\n self.view.runButton.clicked.connect(self.abort_ongoing_scan)\n\n\n def read_from_epics(self, time_from=None, time_to=None):\n for l in self.model.data.lattices:\n self.model.data.read_values_from_epics(self.model.data.parameterDict[l], lattice=True)\n for key, value in self.model.data.parameterDict[l].items():\n if value['type'] == \"quadrupole\":\n self.update_widgets_with_values(l + ':' + key + ':k1l', value['k1l'])\n if value['type'] == \"solenoid\":\n self.update_widgets_with_values(l + ':' + key + ':field_amplitude', value['field_amplitude'])\n if value['type'] == \"cavity\":\n self.update_widgets_with_values(l + ':' + key + ':phase', value['phase'])\n self.update_widgets_with_values(l + ':' + key + ':field_amplitude', value['field_amplitude'])\n self.model.data.read_values_from_epics(self.model.data.parameterDict['generator'], lattice=False)\n for key, value in self.model.data.parameterDict['generator'].items():\n if key == \"charge\":\n self.update_widgets_with_values('generator:' + key + ':value', value['value'])\n # self.update_widget_from_dict(key)\n\n return\n\n def abort_ongoing_scan(self):\n self.abort_scan = True\n\n def enable_run_button(self, scan=False):\n self.view.runButton.setText('Track')\n self.view.runButton.clicked.disconnect()\n if not scan:\n self.view.runButton.setEnabled(True)\n else:\n pass\n self.view.runButton.clicked.connect(self.run_astra)\n self.view.runButton.clicked.connect(lambda: self.export_parameter_values_to_yaml_file(auto=True))\n return\n\n def toggle_finished_tracking(self):\n if self.finished_tracking:\n self.finished_tracking = False\n else:\n self.finished_tracking = True\n\n def setup_scan(self):\n self.export_parameter_values_to_yaml_file(auto=True)\n try:\n scan_start = float(self.model.data.scanDict['parameter_scan_from_value'])\n scan_end = float(self.model.data.scanDict['parameter_scan_to_value'])\n scan_step_size = float(self.model.data.scanDict['parameter_scan_step_size'])\n scan_range = np.arange(scan_start, scan_end + scan_step_size, scan_step_size)\n self.view.progressBar.setRange(0,len(scan_range))\n except ValueError:\n print(\"Enter a numerical value to conduct a scan\")\n self.scan_parameter = self.model.data.scanDict['parameter']\n self.scan_basedir = str(self.model.data.parameterDict['simulation']['directory'])\n self.scan_basevalue = self.get_widget_value(self.get_object_by_accessible_name(self.scan_parameter))\n dictname, pv, param = self.split_accessible_name(self.scan_parameter)\n self.scan_range = np.arange(scan_start, scan_end + scan_step_size, scan_step_size)\n self.scan_no = 0\n self.continue_scan()\n\n def do_scan(self):\n self.model.run_script()\n\n def continue_scan(self):\n if not self.abort_scan and self.scan_no < len(self.scan_range):\n self.view.progressBar.setValue(self.scan_no+1)\n self.scan_progress = self.scan_no+1\n current_scan_value = round(self.scan_range[self.scan_no], 5)\n print('Scanning['+str(self.scan_no)+']: Setting ', self.scan_parameter, ' to ', current_scan_value)\n self.update_widgets_with_values(self.scan_parameter, current_scan_value)\n dictname, pv, param = self.split_accessible_name(self.scan_parameter)\n subdir = (self.scan_basedir + '/' + pv + '_' + str(current_scan_value)).replace('//','/')\n self.update_widgets_with_values('simulation:directory', subdir)\n self.thread = GenericThread(self.do_scan)\n self.thread.finished.connect(self.continue_scan)\n self.thread.start()\n self.scan_no += 1\n else:\n self.abort_scan = False\n self.enable_run_button(scan=self.model.data.scanDict['parameter_scan'])\n self.reset_progress_bar_timer()\n self.update_widgets_with_values(self.scan_parameter, self.scan_basevalue)\n self.update_widgets_with_values('simulation:directory', self.scan_basedir)\n\n def app_sequence(self):\n if self.model.data.scanDict['parameter_scan']:\n self.setup_scan()\n else:\n self.thread = GenericThread(self.do_scan)\n self.thread.finished.connect(self.enable_run_button)\n self.thread.start()\n return\n\n def reset_progress_bar_timer(self):\n self.timer = QTimer()\n self.timer.setInterval(1000)\n self.timer.setSingleShot(True)\n self.timer.timeout.connect(self.view.progressBar.reset)\n self.timer.start()\n\n def run_astra(self):\n self.disable_run_button(scan=self.model.data.scanDict['parameter_scan'])\n self.app_sequence()\n","sub_path":"controller/run_parameter_controller.py","file_name":"run_parameter_controller.py","file_ext":"py","file_size_in_byte":20832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"85523198","text":"# -*- coding: utf-8 -*-\r\n\r\n# 암호화 키를 입력 받는다.\r\nraw = input(\"암호화 키: \")\r\nif raw.isdigit():\r\n key=int(raw)\r\n \r\n # 평문을 입력 받는다.\r\n plaintext = input(\"평문: \")\r\n\r\n # 알파벳을 나열한 텍스트가 자주 쓰이기 때문에 미리 지정해둔다.\r\n alphabets = \"abcdefghijklmnopqrstuvwxyz\"\r\n cyphertext = \"\"\r\n for char in plaintext:\r\n \r\n # find 함수로 알파벳을 0~25의 수로 변환해 key를 더하고 26으로 나눈 나머지를 구한다.\r\n number = ( alphabets.find(char) + key ) % 26\r\n \r\n # 0~25의 수를 알파벳으로 변환한다.\r\n letter = alphabets[number] \r\n cyphertext += letter\r\n \r\n print(\"카이사르 암호문:\", cyphertext)\r\n\r\nelse:\r\n print(\"숫자를 입력하세요.\")","sub_path":"caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154995080","text":"# Пробуем написать \"Сапер\"\nn, m, k = [int(i) for i in input().split()] # строки, столбцы, кол-во мин\na = [[0 for j in range(m)] for i in range(n)] # пустая таблица из 0\nfor i in range(k): # перебираем кол-во мин\n row, col = (int(i) - 1 for i in input().split()) # записываем строку и столбец одной мины при каждом проходе\n a[row][col] = -1 # записываем мину по координатам столбца и колонны\n# дальше нам нужно заглянуть в каждую пустую ячейку, и находясь в ячейке пробежаться еще вокруг и поискать мины\nfor i in range(n): # перебираем строки\n for j in range(m): # перебираем столбцы\n if a[i][j] == 0: # ячейка без мины\n for di in range(-1, 2): # перебираем соседние строки (просто цифры -1 0 1)\n for dj in range(-1, 2): # перебираем соседние столбцы (просто цифры -1 0 1)\n ai = i + di # координата по строке\n aj = j + dj # координата по столбцу\n if 0 <= ai < n and 0 <= aj < m and a[ai][aj] == -1: # проверка вхождения в диапазон и мины по соседству\n a[i][j] += 1\n# в поле 5х4 цикл пройдется 5 * 4 = 20 раз по ячейкам и, находясь в ячейке еще по 9(3*3) проходов вокруг. Всего будет 20*9=180 раз (или n*m*9 раз)\n# так же можно искать не пустые ячейки, а ячейки с миной (if a[i][j] == -1) и вокруг прибавлять единицу\n# код почти такой же, меняется последнее условие:\n'''\nif 0 <= ai < n and 0 <= aj < m:\n if a[ai][aj] != -1: # проверка, чтобы не увеличивать на 1 мину\n a[ai][aj] += 1\n'''\n# дальше просто заменяем -1 на \"*\" и 0 на \".\"\nfor i in range(n):\n for j in range(m):\n if a[i][j] == -1:\n print(' * ', end='')\n elif a[i][j] == 0:\n print(' . ', end='')\n else:\n print(a[i][j], end='')\n print()\n","sub_path":"2.6_4.py","file_name":"2.6_4.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"170497274","text":"import logging\nimport json\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand\n\nfrom vimeors.models import Video, User, VideoCategory, VideoTag, Like\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('vimeors')\n\n\nclass Command(BaseCommand):\n help = 'Prepare structured dataset from raw database data'\n option_list = BaseCommand.option_list + (\n make_option('-d', '--dataset', dest='dataset'),)\n\n def handle(self, *args, **options):\n if options['dataset'] == 'userlikes':\n self._prepare_user_likes()\n elif options['dataset'] == 'videocategories':\n self._prepare_video_categories()\n elif options['dataset'] == 'videotags':\n self._prepare_video_tags()\n elif options['dataset'] == 'usertags':\n self._prepare_user_tags()\n logger.info('Done preparing data.')\n\n def _prepare_user_likes(self):\n logger.info('Preparing user likes dataset.')\n dataset = {}\n likes = Like.objects.all()\n for like in likes:\n user = '(%s) %s' % (like.user.uri[7:], like.user.name)\n video = '(%s) %s' % (like.video.uri[8:], like.video.name)\n dataset.setdefault(user, {})\n dataset[user][video] = 1\n logger.info('\"%s\" likes \"%s\"' % (user, video))\n outputpath = 'vimeors/datasets/userlikes.json'\n open(outputpath, 'w').write(json.dumps(dataset, indent=4))\n logger.info('Dataset generated at: %s', outputpath)\n\n def _prepare_video_categories(self):\n logger.info('Preparing video categories dataset.')\n dataset = {}\n video_categories = VideoCategory.objects.all()\n for vc in video_categories:\n video = '(%s) %s' % (vc.video.uri[8:], vc.video.name)\n category = '(%s) %s' % (vc.category.uri[12:], vc.category.name)\n dataset.setdefault(video, {})\n dataset[video][category] = 1\n logger.info('\"%s\" categorized as \"%s\"' % (video, category))\n outputpath = 'vimeors/datasets/videocategories.json'\n open(outputpath, 'w').write(json.dumps(dataset, indent=4))\n logger.info('Dataset generated at: %s', outputpath)\n\n def _prepare_video_tags(self):\n logger.info('Preparing video tags dataset.')\n dataset = {}\n video_tags = VideoTag.objects.all()\n for vt in video_tags:\n video = '(%s) %s' % (vt.video.uri[8:], vt.video.name)\n try:\n taguri = vt.tag.uri[6:]\n except:\n taguri = 'x'\n tag = '(%s) %s' % (taguri, vt.tag.name)\n dataset.setdefault(video, {})\n dataset[video][tag] = 1\n logger.info('\"%s\" tagged as \"%s\"' % (video, tag))\n outputpath = 'vimeors/datasets/videotags.json'\n open(outputpath, 'w').write(json.dumps(dataset, indent=4))\n logger.info('Dataset generated at: %s', outputpath)\n\n def _prepare_user_tags(self):\n logger.info('Preparing user tags dataset.')\n dataset = {}\n likes = Like.objects.all()\n for like in likes:\n user = '(%s) %s' % (like.user.uri[7:], like.user.name)\n dataset.setdefault(user, {})\n tags = like.video.tags.all()\n for tag in tags:\n try:\n taguri = tag.tag.uri[6:]\n except:\n taguri = 'x'\n tag = '(%s) %s' % (taguri, tag.tag.name)\n dataset[user].setdefault(tag, 0)\n dataset[user][tag] += 1\n logger.info('\"%s\" likes tag an item tagged \"%s\"' % (user, tag))\n outputpath = 'vimeors/datasets/usertags.json'\n open(outputpath, 'w').write(json.dumps(dataset, indent=4))\n logger.info('Dataset generated at: %s' % outputpath)\n","sub_path":"vimeors/management/commands/_preparedata.py","file_name":"_preparedata.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"3370368","text":"import cv2\nimport numpy as np\nimport imutils\nfrom datetime import datetime\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport datetime\nimport RPi.GPIO as gpio\nimport os\nimport math\nimport serial\nimport emailIntegration as EMAIL\n\n#Indentify serial communication\nser = serial.Serial('/dev/ttyUSB0', 9600)\n\nforwardCount = 0\n##### INit the pins\ndef init():\n gpio.setmode(gpio.BOARD)\n gpio.setup(31, gpio.OUT)\n gpio.setup(33, gpio.OUT)\n gpio.setup(35, gpio.OUT)\n gpio.setup(37, gpio.OUT)\n gpio.setup(36, gpio.OUT)\n gpio.output(36, False)\n gpio.setup(7, gpio.IN, pull_up_down = gpio.PUD_UP)\n gpio.setup(12, gpio.IN, pull_up_down = gpio.PUD_UP)\n \ndef gameover():\n gpio.output(31, False)\n gpio.output(33, False)\n gpio.output(35, False)\n gpio.output(37, False)\n gpio.cleanup()\n \ndef closeGripper():\n gpio.setmode(gpio.BOARD)\n gpio.setup(36, gpio.OUT) #Gripper\n \n pwm = gpio.PWM(36, 50)\n pwm.start(5)\n \n rate = 0.15\n duty = float(3.5)\n while True:\n duty += rate\n pwm.ChangeDutyCycle(duty)\n time.sleep(0.2)\n if duty >= 6.5:#6.25:\n break\n pwm.stop()\n #clear the output pins\n gpio.output(36, False)\n gpio.cleanup()\n\ndef openGripper():\n gpio.setmode(gpio.BOARD)\n gpio.setup(36, gpio.OUT) #Gripper\n \n pwm = gpio.PWM(36, 50)\n pwm.start(5)\n \n rate = 0.15\n duty = float(6)\n while True:\n pwm.ChangeDutyCycle(duty)\n duty -= rate \n time.sleep(0.2)\n if duty <= 3.5:\n break\n pwm.stop()\n #clear the output pins\n gpio.output(36, False)\n gpio.cleanup()\n \ndef forward(maxTicks):\n init()\n counterBR = np.uint64(0)\n counterFL = np.uint64(0)\n\n buttonBR = int(0)\n buttonFL = int(0)\n\n # Initialize pwm signal to control motor\n pwm1 = gpio.PWM(37, 50) #Right side\n pwm2 = gpio.PWM(31, 50) #Left side\n val = 40\n pwm1.start(val)\n pwm2.start(val)\n time.sleep(0.1)\n\n\n while True: \n if int(gpio.input(12)) != int(buttonBR):\n buttonBR = int(gpio.input(12))\n counterBR += 1\n \n if int(gpio.input(7)) != int(buttonFL):\n buttonFL = int(gpio.input(7))\n counterFL += 1\n \n if counterBR >= maxTicks:\n pwm1.stop()\n \n if counterFL >= maxTicks:\n pwm2.stop()\n \n if counterFL >= maxTicks and counterBR >= maxTicks:\n pwm1.stop()\n pwm2.stop()\n gameover()\n #Read serial stream\n #line = ser.readline() #print(line)\n #line = line.rstrip().lstrip()\n #line = str(line)\n #line = line.strip(\"'\")\n #line = line.strip(\"b'\")\n #print(line)\n \n #Return float\n #currAngle = float(line)\n #file.write(str(currAngle)+'\\n')\n #file.write(str(currAngle)+','+str(maxTicks/98)+'\\n')\n break\n \ndef reverse(maxTicks):\n init()\n counterBR = np.uint64(0)\n counterFL = np.uint64(0)\n\n buttonBR = int(0)\n buttonFL = int(0)\n\n # Initialize pwm signal to control motor\n pwm1 = gpio.PWM(33, 50) #Right side\n pwm2 = gpio.PWM(35, 50) #Left side\n val = 40\n pwm1.start(val)\n pwm2.start(val)\n time.sleep(0.1)\n\n\n while True:\n if int(gpio.input(12)) != int(buttonBR):\n buttonBR = int(gpio.input(12))\n counterBR += 1\n \n if int(gpio.input(7)) != int(buttonFL):\n buttonFL = int(gpio.input(7))\n counterFL += 1\n \n if counterBR >= maxTicks:\n pwm2.stop()\n \n if counterFL >= maxTicks:\n pwm1.stop()\n \n if counterFL >= maxTicks and counterBR >= maxTicks:\n pwm1.stop()\n pwm2.stop()\n gameover()\n #Read serial stream\n #line = ser.readline() #print(line)\n #line = line.rstrip().lstrip()\n #line = str(line)\n #line = line.strip(\"'\")\n #line = line.strip(\"b'\")\n #print(line)\n \n #Return float\n #currAngle = float(line)\n #file.write(str(currAngle)+'\\n')\n #file.write(str(currAngle)+','+str(maxTicks/98)+'\\n')\n break\n\n \ndef pivotright(angle):\n init()\n offset = 1 #degrees\n counterBR = np.uint64(0)\n counterFL = np.uint64(0)\n\n buttonBR = int(0)\n buttonFL = int(0)\n\n # Initialize pwm signal to control motor\n pwm1 = gpio.PWM(31, 50) #Right side\n pwm2 = gpio.PWM(35, 50) #Left side\n val = 35\n pwm1.start(val)\n pwm2.start(val)\n time.sleep(0.1)\n \n if ser.in_waiting > 0:\n line = ser.readline() #print(line)\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n goalAngle = (float(line) + angle)%360\n\n\n while True:\n #Read serial stream\n line = ser.readline() #print(line)\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n currAngle = float(line)\n \n if int(gpio.input(12)) != int(buttonBR):\n buttonBR = int(gpio.input(12))\n counterBR += 1\n \n if int(gpio.input(7)) != int(buttonFL):\n buttonFL = int(gpio.input(7))\n counterFL += 1\n\n if currAngle+offset >= goalAngle and currAngle-offset <= goalAngle:\n pwm1.stop()\n pwm2.stop()\n gameover()\n break\n\n\ndef pivotleft(angle):\n init()\n offset = 1 #degrees\n counterBR = np.uint64(0)\n counterFL = np.uint64(0)\n\n buttonBR = int(0)\n buttonFL = int(0)\n\n # Initialize pwm signal to control motor\n pwm1 = gpio.PWM(33, 50) #Right side\n pwm2 = gpio.PWM(37, 50) #Left side\n val = 35\n pwm1.start(val)\n pwm2.start(val)\n time.sleep(0.1)\n \n if ser.in_waiting > 0:\n line = ser.readline() \n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n goalAngle = (float(line) - angle)%360\n\n while True:\n line = ser.readline() \n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n currAngle = float(line)\n \n if int(gpio.input(12)) != int(buttonBR):\n buttonBR = int(gpio.input(12))\n counterBR += 1\n \n if int(gpio.input(7)) != int(buttonFL):\n buttonFL = int(gpio.input(7))\n counterFL += 1\n\n if currAngle+offset >= goalAngle and currAngle-offset <= goalAngle:\n pwm1.stop()\n pwm2.stop()\n gameover()\n break\n \n\ndef detectOBI(image):\n global forwardCount\n height = (image.shape[0])\n width = (image.shape[1])\n \n #getting the HSV\n hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n #threshold = cv2.inRange(hsvImage, (65, 60, 30), (85, 255, 255)) #Green\n threshold = cv2.inRange(hsvImage, (150, 70, 50), (180, 255, 255)) #Red\n \n contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n #print(contours)\n \n centerX = int(640/2)\n centerY = int(480/2)\n \n #to print the center of the frame\n image = cv2.line(image, (centerX-20,centerY), (centerX+20,centerY), (0, 0, 0), 2)\n image = cv2.line(image, (centerX,centerY-20), (centerX,centerY+20), (0, 0, 0), 2)\n \n if len(contours) == 0:\n print('No block found')\n else:\n c = max(contours, key=cv2.contourArea)\n ((X,Y), radius) = cv2.minEnclosingCircle(c)\n image = cv2.circle(image, (int(X),int(Y)), int(radius), (0, 0, 255), 2)\n image = cv2.circle(image, (int(X),int(Y)), 2, (0, 0, 255), 2)\n \n cv2.putText(image, '('+str(X)+','+str(Y)+')', (20, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2)\n degrees = 0\n if(X > 315 and X < 325):\n if radius*2 > 400: #the object is close to the gripper\n closeGripper() #\n pic_time = 'pickedObject'#datetime.now().strftime('%Y%m%d%H%M%S')\n cv2.imwrite(pic_time+'.jpg', image)\n EMAIL.sendEmail(pic_time)\n reverse(forwardCount)\n forwardCount = 0\n else:\n forward(5)\n forwardCount += 5\n # openGripper()\n return image #within the zone\n \n if(X < centerX):\n #rotate left\n degrees = (320 - X)*0.061\n pivotleft(degrees)\n else:\n #rotate right\n degrees = (640 - X)*0.061\n pivotright(degrees)\n \n return image\n \n#cv2.drawContours(image, contours\n# initialize the Raspberry Pi camera\ncamera = PiCamera()\ncamera.resolution = (640, 480)\ncamera.framerate = 25\nrawCapture = PiRGBArray(camera, size=(640,480))\n\n# allow the camera to warmup\ntime.sleep(0.1)\n\n# define the codec and create VideoWriter object\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('trackblockandretrive.avi', fourcc, 10, (640, 480))\n# write frame to video file\n\n# keep looping\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=False):\n # grab the current frame\n start = datetime.datetime.now()\n \n image = frame.array\n image = cv2.rotate(image, cv2.ROTATE_180)\n \n processedImage = detectOBI(image)\n out.write(processedImage)\n \n # show the frame to our screen\n cv2.imshow(\"Frame\", processedImage)\n \n key = cv2.waitKey(1) & 0xFF\n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n # press the 'q' key to stop the video stream\n if key == ord(\"q\"):\n gameover()\n break\n \n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n\n\n","sub_path":"grand_challenge/autonomous_tracking_green_block/works.py","file_name":"works.py","file_ext":"py","file_size_in_byte":10011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"396182726","text":"from flask import Flask, render_template, request, redirect, url_for, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\napp = Flask(__name__)\n\n\nengine = create_engine('sqlite:///../restaurantmenu.db')\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n \nDBSession = sessionmaker(bind=engine)\n# A DBSession() instance establishes all conversations with the database\n# and represents a \"staging zone\" for all the objects loaded into the\n# database session object. Any change made against the objects in the\n# session won't be persisted into the database until you call\n# session.commit(). If you're not happy about the changes, you can\n# revert all of them back to the last commit by calling\n# session.rollback()\nsession = DBSession()\n\n@app.route('/')\n@app.route('/restaurants')\ndef showRestaurants():\n\trestaurants = session.query(Restaurant).all()\n\treturn render_template('restaurants.html', restaurants=restaurants)\n\n@app.route('/restaurant/new', methods=['GET', 'POST'])\ndef newRestaurant():\n\tif request.method == 'POST':\n\t\trestaurant = Restaurant(name=request.form['name'])\n\t\tsession.add(restaurant)\n\t\tsession.commit()\n\t\tflash(\"New restaurant created!\")\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('newRestaurant.html')\n\n@app.route('/restaurant/<int:restaurant_id>/edit')\ndef editRestaurant(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\treturn render_template('editRestaurant.html', restaurant=restaurant)\n\n@app.route('/restaurant/<int:restaurant_id>/delete', methods=['GET', 'POST'])\ndef deleteRestaurant(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\tif request.method == 'POST':\n\t\tsession.delete(restaurant)\n\t\tsession.commit()\n\t\treturn redirect(url_for('showRestaurants'))\n\telse:\n\t\treturn render_template('deleteRestaurant.html', restaurant=restaurant)\n\n@app.route('/restaurant/<int:restaurant_id>')\n@app.route('/restaurant/<int:restaurant_id>/menu')\ndef showMenu(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titems = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()\n\treturn render_template('menu.html', restaurant=restaurant, items=items)\n\n@app.route('/restaurant/<int:restaurant_id>/menu/new')\ndef newMenuItem(restaurant_id):\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\treturn render_template('newMenuItem.html', restaurant=restaurant)\n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit')\ndef editMenuItem(restaurant_id, menu_id):\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titem = session.query(MenuItem).filter_by(id=menu_id).one()\n\treturn render_template('editMenuItem.html', restaurant=restaurant, item=item)\n\n@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete')\ndef deleteMenuItem(restaurant_id, menu_id):\n\trestaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n\titem = session.query(MenuItem).filter_by(id=menu_id).one()\n\treturn render_template('deleteMenuItem.html', restaurant=restaurant, item=item)\n\nif __name__ == '__main__':\n\tapp.secret_key = 'super_secret_key'\n\tapp.debug = True\n\tapp.run(host='0.0.0.0', port=5000)","sub_path":"vagrant/restaurantmenu/final_project/finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"333737690","text":"\"\"\"Add column tree to areas table\n\nRevision ID: a6e61a0d7623\nRevises: e28918424b7f\nCreate Date: 2020-05-20 00:58:26.994583\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a6e61a0d7623'\ndown_revision = 'e28918424b7f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('areas', sa.Column('tree', sa.String(), nullable=True))\n op.drop_column('areas', 'back_name')\n op.drop_column('areas', 'front_name')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('areas', sa.Column('front_name', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.add_column('areas', sa.Column('back_name', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_column('areas', 'tree')\n # ### end Alembic commands ###\n","sub_path":"backend/app/alembic/versions/a6e61a0d7623_add_column_tree_to_areas_table.py","file_name":"a6e61a0d7623_add_column_tree_to_areas_table.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"177697599","text":"import unittest\n\n\ndef products_of_others(values):\n if len(values) < 2:\n raise IndexError\n\n # product of predecessors\n predecessors = [1 for i in range(len(values))]\n # product of successors\n successors = [1 for i in range(len(values))]\n\n for index, value in enumerate(values):\n if index == 0:\n continue\n predecessors[index] = predecessors[index-1] * values[index-1]\n\n i = len(values) - 2\n while i >= 0:\n successors[i] = successors[i+1] * values[i+1]\n i -= 1\n\n products = []\n for i in range(len(values)):\n products.append(predecessors[i] * successors[i])\n return products\n\n\nclass ProductsOfOthersTestCase(unittest.TestCase):\n def test_example(self):\n data = [1, 7, 3, 4]\n self.assertEqual(products_of_others(data), [84, 12, 28, 21])\n\n def test_two(self):\n data = [1, 2, 6, 5, 9]\n self.assertEqual(products_of_others(data), [540, 270, 90, 108, 60])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"interview_cake/products_of_others.py","file_name":"products_of_others.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"266918332","text":"import os\nimport unittest\nfrom pathlib import Path\nimport requests\nfrom flaskrun import FlaskRunner\n\nserver_file_py = Path(__file__).parent / 'server.py'\ncommand = [str(server_file_py)]\n\n\nclass TestFlaskBg(unittest.TestCase):\n\n def assert_not_running(self):\n with self.assertRaises(requests.exceptions.ConnectionError):\n requests.get('http://127.0.0.1:5000/say-hi')\n\n def assert_running(self):\n self.assertEqual(requests.get('http://127.0.0.1:5000/say-hi').text,\n 'privet')\n\n def test_first_arg_python3(self):\n self.assert_not_running()\n with FlaskRunner(['python3', str(server_file_py)]):\n self.assert_running()\n self.assert_not_running()\n\n def test_first_arg_none(self):\n self.assert_not_running()\n with FlaskRunner([None, str(server_file_py)]):\n self.assert_running()\n self.assert_not_running()\n\n def test_start_stop(self):\n # test the server is not running (yet)\n with self.assertRaises(requests.exceptions.ConnectionError):\n requests.get('http://127.0.0.1:5000/say-hi')\n\n # run server and get two different responses\n with FlaskRunner(command):\n self.assertEqual(requests.get('http://127.0.0.1:5000/say-hi').text,\n 'privet')\n self.assertEqual(requests.get('http://127.0.0.1:5000/say-bye').text,\n 'poka')\n\n # test the server is stopped\n with self.assertRaises(requests.exceptions.ConnectionError):\n requests.get('http://127.0.0.1:5000/say-hi')\n\n def test_env(self):\n assert os.environ.get('my_test_x_variable') is None\n\n with FlaskRunner(command):\n self.assertEqual(requests.get('http://127.0.0.1:5000/get-x').text,\n '')\n\n with FlaskRunner(command, add_env={'my_test_x_variable': '42'}):\n self.assertEqual(requests.get('http://127.0.0.1:5000/get-x').text,\n '42')\n\n # we did not change the environment: the variable was passed to particular Flask instance,\n # so if we start again, the variable is not defined\n with FlaskRunner(command):\n self.assertEqual(requests.get('http://127.0.0.1:5000/get-x').text,\n '')\n\n\nif __name__ == \"__main__\":\n TestFlaskBg().test_env()\n","sub_path":"tests/tst_bg.py","file_name":"tst_bg.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"376843725","text":"from django.urls import path, include\nfrom .views import (\n UserListApi,\n UserRegisterApi,\n MyProfileDetailApi,\n)\n\napp_name = \"admin_users\"\n\nprofile_urlpatterns = [\n path('', MyProfileDetailApi.as_view(), name='detail'),\n]\n\nuser_patterns = [\n path('register/', UserRegisterApi.as_view(), name='register'),\n path('myprofile/', include((profile_urlpatterns, 'myprofile'))),\n path('', UserListApi.as_view(), name='list'),\n]\n\nurlpatterns = [\n path('', include((user_patterns, 'users')))\n]\n","sub_path":"aibomed_test-master/src/apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"524608538","text":"''' Example tests showcasing the potential savings in computation time offered by AMR. The waveguide used in this test is a 3 port lantern. '''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport optics\nfrom mesh import RectMesh3D\nimport LPmodes\nfrom misc import norm_nonu, normalize, overlap_nonu\nfrom prop import Prop3D\n\ndef compute_port_power(ds,AMR=False,ref_val=2e-4,max_iters=5,remesh_every=50):\n ''' Compute the output port powers for a 3 port lantern, given some simulation parameters. '''\n # wavelength\n wl = 1.0 #um\n\n # mesh \n xw = 64 #um\n yw= 64 #um\n zw = 10000 #um\n num_PML = int(4/ds) # number of cells\n dz = 1\n\n mesh = RectMesh3D(xw,yw,zw,ds,dz,num_PML)\n mesh.xy.max_iters = max_iters\n\n xg,yg = mesh.xg[num_PML:-num_PML,num_PML:-num_PML] , mesh.yg[num_PML:-num_PML,num_PML:-num_PML]\n\n # optic (3 port lantern)\n taper_factor = 4\n rcore = 2.2/taper_factor # INITIAL core radius\n rclad = 4\n nclad = 1.4504\n ncore = nclad + 0.0088\n njack = nclad - 5.5e-3\n\n lant = optics.lant3big(rcore,rclad,ncore,nclad,njack,rclad/2,zw,final_scale=taper_factor)\n\n def launch_field(x,y):\n return normalize(np.exp(10.j*x*wl/xw)*LPmodes.lpfield(x,y,0,1,rclad,wl,ncore,nclad))\n\n # propagation\n\n prop = Prop3D(wl,mesh,lant,nclad)\n\n if AMR:\n u , u0 = prop.prop2end(launch_field,ref_val=ref_val,remesh_every=remesh_every)\n else:\n u = u0 = prop.prop2end_uniform(launch_field(xg,yg))\n\n xg,yg = np.meshgrid(mesh.xy.xa,mesh.xy.ya,indexing='ij')\n\n w = mesh.xy.get_weights()\n\n # get the output port powers\n\n output_powers = []\n for pos in lant.final_core_locs:\n _m = norm_nonu(LPmodes.lpfield(xg-pos[0],yg-pos[1],0,1,rcore*taper_factor,wl,ncore,nclad),w)\n output_powers.append(np.power(overlap_nonu(_m,u,w),2))\n\n return np.array(output_powers)\n\n# example of how to use\noutput = compute_port_power(1/64,False)\nprint(output)\n","sub_path":"convergence.py","file_name":"convergence.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"130851829","text":"import pyautogui\nimport time\nimport datetime\nimport NiconClicker\nimport Nheart\nimport os\nimport shutil\nimport sys\nimport webbrowser\nfrom sys import exit\nfrom tkinter import *\nimport tkinter.messagebox\nimport tkinter.ttk as ttk\npyautogui.FAILSAFE = False\nfrom checkBR import brOK, browser\nfrom openpyxl import load_workbook\n\ndef mult(num1, num2, num3,num4): \n\n\n #nPages = num1 #창 몇개\n heartMax = num2 #최대하트수\n pgMax = num3 #최대 서치다운\n #load_wb = load_workbook(\"blog_list.xlsm\", data_only=True)\n load_wb = load_workbook(\"nList.xlsx\", data_only=True)\n load_ws = load_wb['list'] #시트 이름으로 불러오기\n last_row = load_ws.max_row \n \n if num4 < 5:\n cell1st = 5\n elif num4> int(last_row):\n cell1st = int(last_row)\n else:\n cell1st = num4 #시작 셀\n \n if cell1st+num1 > int(last_row)+1:\n nPages = int(last_row) - cell1st + 1\n else:\n nPages = num1\n \n\n aWork = 10 #한번에 작업할 양\n nWork = nPages//aWork\n extraWork = nPages%aWork\n \n cell = cell1st #해당 워크세튼 회차의 첫작업 셀번호\n \n for i in range(nWork):\n for k in range(aWork): \n url = load_ws.cell(cell1st+i*aWork+k,2).value\n webbrowser.open(url) \n time.sleep(aWork//2) #웹페이지 1개당 0.5초 \n work(aWork, heartMax, pgMax,cell) \n cell += aWork #해당 회차의 첫작업 셀번호\n for i in range(extraWork):\n url = load_ws.cell(cell1st+nWork*aWork+i,2).value\n webbrowser.open(url)\n time.sleep(extraWork) #웹페이지 1개당 1초 \n work(extraWork, heartMax, pgMax,cell)\n time.sleep(3) \n\n \ndef work(nPages, heartMax, pgMax,cell1):\n history = {}\n cellend = cell1 + nPages -1 #마지막 작업 셀\n for i in range(nPages): \n id=NiconClicker.icon() \n time.sleep(1) \n history[id] = Nheart.heart(heartMax,pgMax)\n\n pyautogui.click((10,200))\n pyautogui.hotkey('ctrl','w') \n lst = list(history.keys())\n lst.reverse()\n\n now = datetime.datetime.today() \n\n fname = now.strftime('%Y-%m-%d-%H%M%S_'+str(cellend) + \".txt\" )\n f=open(\"nHgiven/\"+fname, 'w',encoding=\"UTF8\")\n for i in range(len(lst)):\n line = str(lst[i]) + \"=\"+str(history[lst[i]])\n f.write(line + \"\\n\")\n f.close()\n \ndef okClick():\n num1 = int(combx.get())\n num2 = int(combxH.get())\n num3 = int(combxP.get())\n num4 = int(combxS.get())\n \n work1 = num1 // 1000 #1000개 단위로 실행\n extraWork = num1 % 1000\n \n url = (\"https://m.blog.naver.com/folkslife\")\n webbrowser.open(url) \n time.sleep(0.5)\n browser()\n brOK()\n\n for i in range(work1):\n mult(1000, num2, num3,num4 + 1000*i)\n \n mult(extraWork, num2, num3,num4 + 1000*work1)\n time.sleep(3)\n exit()\n\n\ndef okClickImage1(): #집컴 home\n pathHome = os.path.realpath('images/home') \n path = os.path.realpath('images/USE')\n files = os.listdir(pathHome)\n for file in files:\n shutil.copyfile(pathHome+'/'+file, path+'/'+file)\n Msgbox1()\n\ndef okClickImage2(): #원장실 one\n pathOne = os.path.realpath('images/one') \n path = os.path.realpath('images/USE')\n files = os.listdir(pathOne)\n for file in files:\n shutil.copyfile(pathOne+'/'+file, path+'/'+file)\n Msgbox1()\n\ndef okClickImage3(): #원장실sub\n pathOneSub = os.path.realpath('images/oneSub') \n path = os.path.realpath('images/USE')\n files = os.listdir(pathOneSub)\n for file in files:\n shutil.copyfile(pathOneSub+'/'+file, path+'/'+file)\n Msgbox1()\n\ndef Msgbox1():\n tkinter.messagebox.showinfo(\"Inform\",\"Image files've been copied.\")\n\nwin = Tk()\nwin.geometry(\"500x500+1300+100\")\nwin.resizable(True,True)\nwin.title(\"execute\")\n\nlabel1=Label(win, text=\"*Chrome 가장왼쪽모니터 전체화면\")\nlabel1.pack()\nlabel1_1=Label(win, text=\"***창열기전 네이버로그인필수***\")\nlabel1_1.pack()\n\nbtn3 = Button(win, text = \"***이미지수정***집pc\", overrelief=\"solid\", width=30, command=okClickImage1)\nbtn3.pack()\nbtn4 = Button(win, text = \"***이미지수정***원장실\", overrelief=\"solid\", width=30, command=okClickImage2)\nbtn4.pack()\nbtn5 = Button(win, text = \"***이미지수정***원장실sub\", overrelief=\"solid\", width=30, command=okClickImage3)\nbtn5.pack()\n\n\nlabel55=Label(win, text=\"시작 셀 넘버\")\nlabel55.pack()\n\nvalS = [str(i) for i in (5, 1425,2845)]\ncombxS = ttk.Combobox(win, height=5, values=valS)\ncombxS.set(5)\ncombxS.pack()\n\nlabel2=Label(win, text=\"작업대상 웹페이지 수 선택\")\nlabel2.pack()\n\nval = [str(i) for i in (1,5,10,30, 50, 100, 200, 500)]\ncombx = ttk.Combobox(win, height=5, values=val)\ncombx.set(5000)\ncombx.pack()\n\n\n\nlabel3=Label(win, text=\"좋아요 최대 클릭수\")\nlabel3.pack()\n\nvalH = [str(i) for i in (1,3,5, 10)]\ncombxH = ttk.Combobox(win, height=5, values=valH)\ncombxH.set(3)\ncombxH.pack()\n\nlabel4=Label(win, text=\"탐색 페이지 수\")\nlabel4.pack()\n\nvalP = [str(i) for i in (1,3,5, 10,30)]\ncombxP = ttk.Combobox(win, height=5, values=valP)\ncombxP.set(2)\ncombxP.pack()\n\nbtn1 = Button(win, text = \"실행\", background=\"cornflowerblue\",overrelief=\"solid\", width=15, command=okClick)\nbtn1.pack()\n\n\nwin.mainloop()\n\n\n\n","sub_path":"nListClicker_V1_2.py","file_name":"nListClicker_V1_2.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"361173658","text":"# Finding vertices topological order by applying in directed graph using Depth First Search\r\nfrom collections import defaultdict\r\n\r\n\r\nclass Graph:\r\n def __init__(self, n):\r\n self.graph = defaultdict(list)\r\n self.stack = [0] * 2 * n\r\n self.order = 0\r\n\r\n def addedge(self, u, v):\r\n self.graph[u].append(v)\r\n\r\n # The function to do DFS traversal. It uses recursive explore\r\n def DFS(self):\r\n # Mark all the vertices as not visited\r\n visited = {}\r\n for v in self.graph:\r\n visited[v] = False\r\n for u in self.graph[v]:\r\n visited[u] = False\r\n # Explore every node\r\n for v in self.graph.copy():\r\n if not visited[v]:\r\n self.explore(v, visited)\r\n print(self.stack)\r\n\r\n def explore(self, v, visited):\r\n # Mark the current node as visited and print it\r\n visited[v] = True\r\n # Recur for all the vertices adjacent to this vertex\r\n self.order += 1\r\n for i in self.graph[v]:\r\n if not visited[i]:\r\n self.explore(i, visited)\r\n self.stack[self.order] = v\r\n self.order += 1\r\n\r\n\r\ndef main():\r\n g = Graph(8)\r\n g.addedge('B', 'A')\r\n g.addedge('B', 'C')\r\n g.addedge('F', 'B')\r\n g.addedge('B', 'E')\r\n g.addedge('A', 'D')\r\n g.addedge('D', 'E')\r\n g.addedge('D', 'G')\r\n g.addedge('E', 'G')\r\n g.addedge('H', 'G')\r\n g.addedge('F', 'H')\r\n\r\n g.DFS()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"graph_algorithms/finding_scc.py","file_name":"finding_scc.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"592337967","text":"import time # import time module\n\n# This script can be used as a module\n\n\n# Define a function that takes no arguments, prints the system time, and delays 0.5 seconds\ndef showtimefunction():\n print(time.time())\n delay(0.5)\n\n\n# Define a function that delays t seconds\ndef delay(t):\n time.sleep(t)\n\n\n# If you'd like to run a module or script directly, you can put an example of it's use at the bottom\n# The following condition is True when a script is executed directly (not imported or used externally)\nif __name__ == \"__main__\":\n\n # Set 'start' to the current system time\n start = time.time()\n\n # Run the functions from above\n showtimefunction()\n delay(1.0)\n showtimefunction()\n delay(0.5)\n showtimefunction()\n\n # Set 'now' to the current system time\n now = time.time()\n\n # Get the elapsed time 'dt'\n dt = now - start\n\n # Print 'dt'\n print('\\nElapsed: ' + str(dt))\n\n","sub_path":"Python/tutorial_b_sub.py","file_name":"tutorial_b_sub.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"111246757","text":"#!/usr/env/ python\n\n############ THIS IS A CUSTOM SCRIPT ##################################\n# Purpose:\n# \tWith input files of a pyprophet output tsv with respect to its \n#\tsqMass file, calculate number of non-zero points across the \n#\tdetected peptide/precursor/transition. Output a tsv file.\n#\tThis script is to be used as a commandline tool.\n\n# Required arguments:\n#\tInput tsv: pyprophet file\n#\tInput filename: filename of the mzml file\n#\tInput sqMass: the corresponding sqMass file containing the XICs\n#\tOutput path: output path for tsv file\n\n#######################################################################\n\n\nimport sqlite3\nimport numpy as np\nimport pandas as pd\nimport matplotlib as plt\nimport msproteomicstoolslib.data_structures as db\nimport SqlDataAccess\nimport os\nimport sys\nimport time\n\nfilename=sys.argv[1]\npyprophet=sys.argv[2]\nsqMass=sys.argv[3]\noutput=sys.argv[4]\n\n# Subset the data\nstats = pd.read_csv(pyprophet, sep = \"\\t\")\nstats = stats.loc[:, ['transition_group_id', 'filename', 'RT', 'mz', 'Sequence', 'decoy','id','FullPeptideName', 'Charge', 'm_score', 'leftWidth', 'rightWidth', 'peak_group_rank', 'd_score']]\n\nstats = stats[stats.decoy == 0] # remove decoys\nstats = stats[stats.m_score <= 0.01] # filter by 1% FDR\nstats = stats[stats.filename == filename]\n# Rank by m_score (fdr)\nstats = stats.loc[stats.groupby([\"FullPeptideName\", \"Charge\"])['m_score'].idxmin()]\nstats = stats[stats.peak_group_rank == 1]\n# Remove duplicates\nstats = stats.drop_duplicates(subset=['FullPeptideName', 'Charge'])\n\n\n# Modified Functions from DIAPASEF scripts:\ndef get_PeakWidth(df):\n '''\n Given the dataset, calculate the peakwidth for each peptide.\n Input: pyprophet pandas dataframe\n Output: dataframe of peakwidth per peptide.\n '''\n pw = df.loc[:, ['Sequence', 'FullPeptideName', 'Charge', 'leftWidth', 'rightWidth']]\n width = df.rightWidth - df.leftWidth\n pw['PeakWidth'] = width\n return(pw)\n\ndef readSQL(sql):\n conn = sqlite3.connect(sql)\n return(conn)\n\ndef getChromData(sql):\n chrom = SqlDataAccess.SqlDataAccess(sql)\n return(chrom)\n\ndef getChromValues(peptide, charge, chrom, conn):\n start = time.time()\n query = '''SELECT * FROM CHROMATOGRAM \\\n INNER JOIN DATA ON DATA.CHROMATOGRAM_ID = CHROMATOGRAM.ID \\\n INNER JOIN PRECURSOR ON PRECURSOR.CHROMATOGRAM_ID = CHROMATOGRAM.ID \\\n where PEPTIDE_SEQUENCE=\"{0}\" AND CHARGE=\"{1}\"'''\n q = conn.execute(query.format(peptide, charge))\n tmp = q.fetchall()\n idx = []\n nativeid = []\n for i in range(0, len(tmp)):\n idx.append(tmp[i][0])\n nativeid.append(tmp[i][2])\n idx = list(set(idx))\n nativeid = list(set(nativeid))\n rt = []\n intensity = []\n selected = chrom.getDataForChromatograms(idx)\n # selected: nested list\n\t# list:\n\t#\t- chrom id\n\t#\t\t- rt\n\t#\t\t- int\n # for t in nativeid:\n # selected = chrom.getDataForChromatogramFromTransitionNativeId(t)\n # rt.append(selected[0])\n # intensity.append(selected[1])\n end = time.time()\n print(\"Time elapsed: \", end-start)\n return(idx,nativeid, peptide, selected)\n\t\ndef getPointsPerChrom(pw, peptide, chrom):\n p = peptide\n tp = pw.loc[pw['Sequence'] == p, ['leftWidth', 'rightWidth']].values.tolist()[0]\n charge = pw.loc[pw['Sequence'] == p, \"Charge\"].values.tolist()[0]\n out = pd.DataFrame(columns = ('Sequence','lw','rw', 'ChromID', 'Points', \"Charge\"))\n chromID = chrom[0]\n chromINFO = chrom[3]\n for chrID in range(0, len(chrom[3])):\n tmp_rt = np.array(chromINFO[chrID][0])\n tmp_int = np.array(chromINFO[chrID][1])\n if (max(tmp_rt) > tp[1]) | (min(tmp_rt) < tp[0]): # tp[0] is the lw, tp[1] is rw\n idx = np.where((tmp_rt > tp[0]) & (tmp_rt < tp[1]))\n n_int = tmp_int[idx]\n n_point = sum(n_int > 0)\n out.loc[chrID] = [p, tp[0], tp[1], chromID[chrID], n_point, charge]\n return(out) \n# for i in range(0, len(chrom[0])):\n# tmp_rt = np.array(chrom[3][i])\n# #print(\"length of rt: %d\" % len(tmp_rt))\n# tmp_int = np.array(chrom[4][i])\n# #print(\"length of int: %d\" %len(tmp_int))\n# if (max(tmp_rt) > tp[1]) & (min(tmp_rt) < tp[0]):\n# idx = np.where((tmp_rt < tp[0]) | (tmp_rt > tp[1]))\n# # print(\"length of idx: %d\" %len(idx))\n# n_int = tmp_int[idx]\n# midx = np.where((tmp_rt > tp[0]) & (tmp_rt < tp[1]))\n# #print(\"length of max idx: %d\" %len(midx))\n# n_points = sum(n_int > 0)\n# out.loc[i] = [p, chrom[0][i], chrom[1][i], n_points, tp[0], tp[1]]\n# return(out)\n\n# Use functions to calculate for number of peaks per peptide:\nchrom = getChromData(sqMass)\nconn = readSQL(sqMass)\n\nPeptide_peakWidths = get_PeakWidth(stats)\n\npeptides = stats.Sequence.tolist()\n\n#Peptide_chroms = [getChromValues(pep, chrom, conn) for pep in peptides]\n\nPointsPerPeak = pd.DataFrame()\n\nfor p in range(0, len(peptides)):\n pep = Peptide_peakWidths.Sequence.tolist()[p]\n charge = Peptide_peakWidths.Charge.tolist()[p]\n print(pep, charge)\n chromValue = getChromValues(pep, charge, chrom, conn)\n n_Points = getPointsPerChrom(Peptide_peakWidths, pep, chromValue)\n PointsPerPeak = PointsPerPeak.append(n_Points, ignore_index=True)\n \n\n\nPointsPerPeak.to_csv(output, sep = \"\\t\", index=False)\n\n\n","sub_path":"calculatePointsPerPeak.py","file_name":"calculatePointsPerPeak.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"233929466","text":"import zipfile\nfrom lxml import etree\nimport json\nfrom searchFiles import find\n\n\n# .docx files are XML underneath. zipfile gets at the directory which contains the document.xml\ndef get_word_xml(docx_filename):\n docx = zipfile.ZipFile(docx_filename)\n return docx.read('word/document.xml')\n\n\n\n# create an ElementTree from a string\ndef get_xml_tree(xml_string):\n return etree.ElementTree(etree.fromstring(xml_string))\n\n\n# Namespace for WordML\nnamespaces = {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}\n\n# Open and parse the document\nSAP = get_xml_tree(get_word_xml('/Users/david/PycharmProjects/SAPTableExtractor/Statistical Analysis Plan - TDE-PH-310_20151223 clean.docx'))\n# uncomment and print to have a look\n# print etree.tostring(SAP, pretty_print=True)\n\n# XPath for text elements corresponding to Table Title, Listing Number and Figure Number. Returns an list of elements.\n# XPath says :find Table Title, then back up the tree to the <w:tbl> element, which is the parent of all tables in the document,\n# then traverses back down the tree to the <w:tc> (table column element)\ntables = SAP.xpath('//w:tbl/w:tr/w:tc/w:p/w:r/w:t[text()=\"Table Title\"]/../../../../../w:tr/w:tc', namespaces=namespaces)\nlistings = SAP.xpath('//w:tbl/w:tr/w:tc/w:p/w:r/w:t[text()=\"Listing Number\"]/../../../../../w:tr/w:tc', namespaces=namespaces)\nfigures = SAP.xpath('//w:tbl/w:tr/w:tc/w:p/w:r/w:t[text()=\"Figure Number\"]/../../../../../w:tr/w:tc', namespaces=namespaces)\n\n# Initialize Lists\nT = []\nL = []\nF = []\n# get all text, making sure to perform a join on <w:t> (text) element which have been split across <w:r> (runs)\nfor tr in tables:\n newList =[]\n [newList.append(t.text) for t in tr.findall('./w:p/w:r/w:t', namespaces=namespaces)]\n cat = ''.join(newList[0:])\n T.append(cat)\n\nfor tr in listings:\n newList =[]\n [newList.append(t.text) for t in tr.findall('./w:p/w:r/w:t', namespaces=namespaces)]\n cat = ''.join(newList[0:])\n L.append(cat)\n\nfor tr in figures:\n newList =[]\n [newList.append(t.text) for t in tr.findall('./w:p/w:r/w:t', namespaces=namespaces)]\n cat = ''.join(newList[0:])\n F.append(cat)\n\n# put lists into dictionaries to further create JSON output (and text file)\nTdict = dict(T[i:i+2] for i in list(range(0, len(T), 2)))\nprint(Tdict)\n# find('*.pdf', '/Users/David/projects/XML/output', Tdict)\nprint((json.dumps(Tdict, sort_keys=True, indent=4, separators=(',', ': '))))\nLdict = dict(L[i:i+2] for i in list(range(2, len(L), 2)))\nprint(Ldict)\nprint((json.dumps(Ldict, sort_keys=True, indent=4, separators=(',', ': '))))\nFdict = dict(F[i:i+2] for i in list(range(2, len(F), 2)))\nprint(Fdict)\nprint((json.dumps(Fdict, sort_keys=True, indent=4, separators=(',', ': '))))\n\n# create tab separated output files, ensuring that encoding is used for expected results\noutfile = open('Tables', 'w' )\nfor key, value in sorted(Tdict.items()):\n outfile.write(str(key) + '\\t' + str(value.encode('utf-8')) + '\\n' )\n\noutfile = open('Listings', 'w' )\nfor key, value in sorted(Ldict.items()):\n outfile.write(str(key) + '\\t' + str(value.encode('utf-8')) + '\\n' )\n\noutfile = open('Figures', 'w' )\nfor key, value in sorted(Fdict.items()):\n outfile.write(str(key) + '\\t' + str(value.encode('utf-8')) + '\\n' )\n\n\n","sub_path":"readFile.py","file_name":"readFile.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132908075","text":"# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport re\nimport subprocess\nimport uuid\n\nimport backoff\nimport pytest\nimport requests\n\n\n@backoff.on_exception(backoff.expo, Exception, max_tries=5)\ndef gcloud_cli(command):\n \"\"\"\n Runs the gcloud CLI with given options, parses the json formatted output\n and returns the resulting Python object.\n\n Usage: gcloud_cli(options)\n options: command line options\n\n Example:\n result = gcloud_cli(\"app deploy --no-promote\")\n print(f\"Deployed version {result['versions'][0]['id']}\")\n\n Raises Exception with the stderr output of the last attempt on failure.\n \"\"\"\n full_command = f\"gcloud {command} --quiet --format=json\"\n print(\"Running command:\", full_command)\n\n output = subprocess.run(\n full_command,\n capture_output=True,\n shell=True,\n check=True,\n )\n try:\n entries = json.loads(output.stdout)\n return entries\n except Exception:\n print(\"Failed to read log\")\n print(f\"gcloud stderr was {output.stderr}\")\n\n raise Exception(output.stderr)\n\n\n# Wait for app to initialize\n@backoff.on_exception(backoff.expo, requests.exceptions.HTTPError, max_time=300)\ndef wait_for_app(url):\n r = requests.get(url)\n r.raise_for_status()\n return True\n\n\n@pytest.fixture\ndef version():\n \"\"\"Launch a new version of the app for testing, and yield the\n project and version number so tests can invoke it, then delete it.\n \"\"\"\n\n result = gcloud_cli(f\"app deploy --no-promote --version={uuid.uuid4().hex}\")\n version_id = result[\"versions\"][0][\"id\"]\n project_id = result[\"versions\"][0][\"project\"]\n version_hostname = f\"{version_id}-dot-{project_id}.appspot.com\"\n\n try:\n wait_for_app(f\"https://{version_hostname}/\")\n yield project_id, version_id\n finally:\n gcloud_cli(f\"app versions delete {version_id}\")\n\n\ndef test_upload_and_view(version):\n project_id, version_id = version\n version_hostname = f\"{version_id}-dot-{project_id}.appspot.com\"\n\n # Check that version is serving form in home page\n response = requests.get(f\"https://{version_hostname}/\")\n assert '<form action=\"' in response.text\n assert response.status_code == 200\n\n matches = re.search(r'action=\"(.*?)\"', response.text)\n assert matches is not None\n upload_url = matches.group(1)\n\n with open(\"./main.py\", \"rb\") as f:\n response = requests.post(upload_url, files={\"file\": f})\n\n assert b\"from google.appengine.api\" in response.content\n assert response.status_code == 200\n","sub_path":"appengine/standard_python3/bundled-services/blobstore/wsgi/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"197571122","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nGraph algorhitms (depth-first and limited depth-first search) visualisation.\n\nNew vertice form (also used for editing vertice)\n\nBy K.Ivenkov <haevlock@gmail.com>\n03.2012\n'''\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom new_vertice_ui import Ui_newVertForm\n\nclass NewVertice(Ui_newVertForm, QWidget):\n '''\n New vertice (or vertice edit) form class \n '''\n \n def __init__(self, parent, vertice_edit = None):\n super(NewVertice, self).__init__(None, Qt.WindowCloseButtonHint)\n self.setupUi(self)\n self.parent = parent\n names = sorted(\n [v.name for v in self.parent.graph.vertices if len(v.name) == 1]\n )\n self.do_edit = bool(vertice_edit)\n \n if self.do_edit:\n self.setWindowTitle('Edit vertice...')\n self.btnCreate.setText('&Save')\n self.label_3.clear()\n \n self.vertice = vertice_edit\n self.leName.setText(self.vertice.name)\n self.leLevel.setValue(self.vertice.level) \n else:\n self.leName.setText(chr(ord(names.pop()) + 1) if names else 'A')\n self.leLevel.setValue(self.parent.lastused_level)\n \n def keyPressEvent(self, e): \n '''\n Main button behaviour override\n '''\n if e.key() == Qt.Key_Escape:\n self.close()\n if e.key() == Qt.Key_Return:\n self.btnCreate.click()\n \n @pyqtSlot()\n def on_btnCancel_clicked(self):\n '''\n Cancel button click\n '''\n self.close()\n\n @pyqtSlot()\n def on_btnCreate_clicked(self):\n '''\n Create button click\n '''\n name = str(self.leName.text())\n level = str(self.leLevel.text())\n \n try:\n if not 0 < len(name) < 5:\n raise Exception('Invalid vertice name! Enter 1-4 letters.')\n if not 0 < len(level) < 5:\n raise Exception('Invalid level!')\n try:\n level = int(level)\n except:\n raise Exception('Invalid level!')\n \n if self.do_edit:\n old_level = self.vertice.level\n old_level_count = len(\n [v for v in self.parent.graph.vertices \\\n if v.level == old_level]\n ) if old_level != level else 0\n \n self.parent.graph.edit_vertice(self.vertice.name, name, level)\n \n # if a level was changed and previously this vertice \n # was the only one in this level\n if old_level_count == 1:\n self.parent.levels.remove(old_level) \n else:\n self.parent.graph.add_vertice(name, level)\n self.parent.lastused_level = level # only on a new vertice act.\n except Exception as inst:\n QMessageBox.warning(self, 'Error', str(inst), QMessageBox.Ok)\n else:\n if level not in self.parent.levels:\n self.parent.levels.append(level)\n self.close()\n self.parent.repaint()\n self.parent._issaved = False\n \n \n ","sub_path":"new_vertice.py","file_name":"new_vertice.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"264674860","text":"import tensorflow as tf\nfrom dgm4nlp.tf.encoder import ffnn\nfrom dgm4nlp.tf.kl import kl_from_q_to_standard_normal\nfrom dgm4nlp.tf.kl import kl_diagonal_gaussians\nfrom dgm4nlp.tf.logit import logit_layer_for_text\nfrom dgm4nlp.tf.logit import logit_layer_for_bitext\nfrom dgm4nlp.tf.encoder import SequenceEncoder\nfrom dgm4nlp.tf.encoder import FeedForwardEncoder\nimport logging\n\n\nclass VariationalApproximationSpecs:\n \"\"\"\n Configure an inference network.\n \"\"\"\n\n def __init__(self, dz: int, ds=None, hierarchical=False):\n \"\"\"\n\n :param dz: dimensionality of latent word embedding\n :param ds: dimensionality of latent sentence embedding\n :param mean_field: whether S and Z_1^m are independent\n - if True, q(s, z_1^m) = q(s) \\prod_i q(z_i)\n - otherwise q(s, z_1^m) = q(s) \\prod_i q(z_i|s)\n \"\"\"\n self.dz = dz\n self.ds = ds\n self.hierarchical = hierarchical if ds is not None else False\n\n # Default configuration for z\n self.z_view = 0\n self.z_mean_hidden_layers = []\n self.z_var_hidden_layers = []\n\n # Default configuration for s\n self.s_view = 0\n self.s_mean_hidden_layers = []\n self.s_var_hidden_layers = []\n\n self._encoders = [None]\n\n def add_encoder(self, encoder: SequenceEncoder):\n self._encoders.append(encoder)\n return len(self._encoders) - 1\n\n def get_encoder(self, view) -> SequenceEncoder:\n return self._encoders[view]\n\n def config_z(self, view: int, mean_hidden_layers=[], var_hidden_layers=[]):\n if not (0 <= view < len(self._encoders)):\n raise ValueError('You do not have enough views')\n self.z_view = view\n self.z_mean_hidden_layers = mean_hidden_layers\n self.z_var_hidden_layers = var_hidden_layers\n\n def config_s(self, view: int, mean_hidden_layers=[], var_hidden_layers=[]):\n if self.ds is None:\n raise ValueError('You are not modelling sentence embeddings')\n if not (0 <= view < len(self._encoders)):\n raise ValueError('You do not have enough views')\n self.s_view = view\n self.s_mean_hidden_layers = mean_hidden_layers\n self.s_var_hidden_layers = var_hidden_layers\n\n\ndef _predict(layers, inputs, name):\n \"\"\"\n Predict by applying a multilayer transformation to inputs.\n\n :param layers: list of nonlinear layers specified by pairs (output_units, activation_fn)\n :param inputs: input activations [B * T, di]\n :param name:\n :return: [B * T, do]\n \"\"\"\n with tf.variable_scope(name):\n # [B * M, do]\n predictions = ffnn(\n inputs, # [B * M, di]\n layers\n )\n return predictions\n\n\nclass EmbedAlignModel:\n \"\"\"\n Model specification for embed-align based onIBM1\n \"\"\"\n\n def __init__(self,\n # architecture\n vx, vy,\n dx=32,\n dh=64,\n nb_softmax_samples=[0, 0],\n softmax_approximation=['botev-batch', 'botev-batch'],\n q=VariationalApproximationSpecs(dz=64),\n attention_z=False,\n improved_features_for_py=False,\n mc_kl=False,\n # tf\n session=None):\n \"\"\"\n\n :param vx: size of vocabulary of x-language\n :param vy: size of vocabulary of y-language\n :param dx: x-embedding dimensionality\n :param dh: hidden units (for MLPs and BiRNNs)\n :param dz: dimensionality of latent encoding (and also of class embeddings)\n :param nb_softmax_samples: number or negative samples for P(X|z) and P(Y|x_a,z_a) (use zero for exact softmax)\n :param softmax_approximation: specify type of sampled logit implementation for P(X|z) and P(Y|x_a, z_a)\n - 'botev' \n - 'jean' (not available for P(Y|x_a, z_a))\n - 'botev-batch'\n :param birnn: bidirectional encodding in the approximate posterior\n :param birnn_merge_strategy: strategy for merging RNN states: sum/concat/mlp\n :param shared_birnn_encoders: whether we use the same BiRNN for predicting mean and variance\n - if True, then mu_layers and sigma_layers can always be used to specialise the shared bidirectional\n encodings before predicting mean and variance\n - if False, you can probably set mu_layers=[] and sigma_layers=[]\n :param mu_layers: specify non-linear hidden layers for predicting mean from x's encodings\n - we will always add to it a layer with num_outputs=dz and activation_fn=None\n :param sigma_layers: specify non-linear hidden layers for predicting log variance from x's encodings\n - we will always add to it a layer with num_outputs=dz and activation_fn=None\n :param session:\n \"\"\"\n\n self.dx = dx\n self.dh = dh\n self.vx = vx\n self.vy = vy\n self.q = q\n self.mc_kl = mc_kl\n\n self.nb_softmax_samples = nb_softmax_samples\n self.softmax_approximation = softmax_approximation\n\n self._attention_z = attention_z\n self._improved_features_for_py = improved_features_for_py\n\n self._create_placeholders()\n self._create_weights()\n self._build_model()\n\n self.saver = tf.train.Saver()\n self.session = session\n\n def _create_placeholders(self):\n \"\"\"These are placeholders to feed data to TensorFlow.\"\"\"\n\n self.x = tf.placeholder(tf.int64, shape=[None, None], name='X') # (B, M)\n self.y = tf.placeholder(tf.int64, shape=[None, None], name='Y') # (B, N)\n\n self.training_phase = tf.placeholder(tf.bool, name='training_phase') # []\n self.alpha_s = tf.placeholder(tf.float32) # [] scalar that weights the KL contribution for prior on S\n self.alpha_z = tf.placeholder(tf.float32) # [] scalar that weights the KL contribution for prior on Z\n\n # here we create placeholders for a batch-wise shared sampled support for X\n if self.softmax_approximation[0] == 'botev-batch':\n self.support_x = tf.placeholder(tf.int64, shape=[None], name='support_x') # [S]\n self.importance_x = tf.placeholder(tf.float32, shape=[None], name='importance_x') # [S]\n else:\n self.support_x = None\n self.importance_x = None\n\n # here we create placeholders for a batch-wise shared sampled support for Y|x,a\n if self.softmax_approximation[1] == 'botev-batch':\n self.support_y = tf.placeholder(tf.int64, shape=[None], name='support_y') # [S]\n self.importance_y = tf.placeholder(tf.float32, shape=[None], name='importance_y') # [S]\n else:\n self.support_y = None\n self.importance_y = None\n\n def _create_weights(self):\n \"\"\"Create weights for the model.\"\"\"\n pass\n\n def save(self, session, path=\"model.ckpt\", step=None):\n \"\"\"Saves the model.\"\"\"\n return self.saver.save(session, path, global_step=step)\n\n def _build_model(self):\n \"\"\"Builds the computational graph for our model.\"\"\"\n\n # Embedding for source words\n # (Vx, dx)\n # add glorot init of params but there is not too much diff with uniform\n x_embeddings = tf.get_variable(\n name=\"x_embeddings\", initializer=tf.contrib.layers.xavier_initializer(), #tf.random_uniform_initializer(),\n shape=[self.vx, self.dx])\n\n # (B, M, dx)\n x_embedded = tf.nn.embedding_lookup(x_embeddings, # (Vx, dx)\n self.x) # (B, M)\n\n # these quantities are only known when the batch is provided\n batch_size = tf.shape(self.x)[0] # B\n longest_x = tf.shape(self.x)[1] # M\n longest_y = tf.shape(self.y)[1] # N\n\n # (B, M)\n x_mask = tf.cast(tf.sign(self.x), tf.float32)\n # (B, N)\n y_mask = tf.cast(tf.sign(self.y), tf.float32)\n # (B,)\n x_len = tf.reduce_sum(tf.sign(self.x), axis=1)\n\n # 2a. Here I define the alignment component P(A|M=m) = U(1/m)\n # (B, 1)\n lengths = tf.expand_dims(x_len, -1)\n # (B, M)\n pa_x = tf.div(x_mask, tf.cast(lengths, tf.float32))\n # (B, 1, M)\n pa_x = tf.expand_dims(pa_x, 1)\n # (B, N, M)\n pa_x = tf.tile(pa_x, [1, longest_y, 1], name='pa_x')\n\n # Compute encodings that will be available to variational approximations\n\n # Encode x into [B, M, d?] tensors\n if self.q.z_view == 0:\n h_for_z = x_embedded\n else:\n h_for_z = self.q.get_encoder(self.q.z_view)(inputs=x_embedded, lengths=x_len)\n if self.q.ds is not None:\n if self.q.s_view == 0:\n h_for_s = x_embedded\n elif self.q.s_view == self.q.z_view:\n h_for_s = h_for_z\n else:\n h_for_s = self.q.get_encoder(self.q.s_view)(inputs=x_embedded, lengths=x_len)\n else:\n h_for_s = None # we are not predicting a distribution over sentence embeddings\n\n if self.q.ds is not None: # Predict parameters of q(S)\n logging.info('Using latent sentence representation s')\n\n # i) Mask invalid positions\n # [B, M, 1]\n weighted_mask = tf.expand_dims(\n # get a float mask [B, M] and normalise it by length\n tf.sequence_mask(x_len, maxlen=longest_x, dtype=tf.float32) / tf.expand_dims(tf.cast(x_len, tf.float32), 1),\n 2\n )\n h_for_s *= weighted_mask\n # [B, 1, dh]\n h_for_s = tf.reduce_sum(h_for_s, axis=1, keep_dims=True)\n\n # ii) Predict mean\n # [B, 1, ds]\n s_mean = FeedForwardEncoder(\n num_units=self.q.ds,\n hidden_layers=self.q.s_mean_hidden_layers,\n name='FF-S-Mean'\n )(h_for_s)\n # [B, ds]\n s_mean = tf.reshape(s_mean, [batch_size, self.q.ds], name='s-mean')\n\n # iii) Predict log-variance\n # [B, 1, ds]\n s_log_var = FeedForwardEncoder(\n num_units=self.q.ds,\n hidden_layers=self.q.s_var_hidden_layers,\n name='FF-S-LogVar'\n )(h_for_s)\n # [B, ds]\n s_log_var = tf.reshape(s_log_var, [batch_size, self.q.ds], name='s-var')\n\n # iv) Sample s\n # [B, ds]\n s_epsilon = tf.random_normal(tf.shape(s_mean), name='s-epsilon')\n # [B, ds]\n s = s_mean + tf.exp(s_log_var / 2.) * s_epsilon\n\n # Here we use the predicted mean for decoding at prediction (validation/test) time\n # but stick with sampled encodings for training\n # [B, ds]\n s = tf.cond(self.training_phase, true_fn=lambda: s, false_fn=lambda: s_mean, name='S')\n\n # Replicate the sample per x-token\n # [B, 1, ds]\n s = tf.expand_dims(s, 1)\n # [B, M, ds]\n s = tf.tile(s, [1, longest_x, 1])\n\n if self.q.hierarchical: # enrich features for Z prediction with s sample\n logging.info('Using hierarchical approximation q(s, z_1^m) = q(s) \\prod_i q(z_i|s)')\n # [B, M, dh + ds]\n h_for_z = tf.concat([h_for_z, s], axis=-1)\n\n logging.info('Using latent word representation z')\n # Predict parameters of q(Z_1^m)\n # [B, M, dz]\n z_mean = FeedForwardEncoder(\n num_units=self.q.dz,\n hidden_layers=self.q.z_mean_hidden_layers,\n name='FF-Z-Mean'\n )(h_for_z)\n # [B * M, dz]\n z_mean = tf.reshape(z_mean, [-1, self.q.dz], name='z-mean')\n # [B, M, dz]\n z_log_var = FeedForwardEncoder(\n num_units=self.q.dz,\n hidden_layers=self.q.z_var_hidden_layers,\n name='FF-Z-LogVar'\n )(h_for_z)\n # [B * M, dz]\n z_log_var = tf.reshape(z_log_var, [-1, self.q.dz], name='z-var')\n\n # Get a sample by using the transformation\n # z = \\mu(x) + \\epsilon \\sigma(x)\n # where \\epsilon ~ N(0, I)\n # and \\sigma(x) = \\exp( 0.5 * log_var)\n # [B * M, dz]\n epsilon = tf.random_normal(tf.shape(z_log_var), name='z-epsilon')\n # [B * M, dz]\n z = z_mean + tf.exp(z_log_var / 2.) * epsilon\n\n # Here we use the predicted mean for decoding at prediction (validation/test) time\n # but stick with sampled encodings for training\n # [B * M, dz]\n z = tf.cond(self.training_phase, true_fn=lambda: z, false_fn=lambda: z_mean)\n\n # [B, M, dz]\n z = tf.reshape(z, [batch_size, longest_x, self.q.dz], name='Z')\n\n # Decide on the latent encoding for predicting x tokens\n if not self._attention_z:\n latent_dim_px = self.q.dz\n # [B, M, dl=dz]\n h_for_px = z\n else: # here we use a self attention mechanism\n logging.info('Self-attention mechanism over z_1^m')\n # [B, M, dz]\n z_keys = FeedForwardEncoder(\n num_units=self.q.dz,\n hidden_layers=[],\n name='FF-Z-Key'\n )(z)\n z_values = z\n # [B, M, M]\n scores = tf.matmul(\n z_keys, # [B, M, dz]\n z_keys, # [B, M, dz]\n transpose_b=True\n )\n # mask invalid logits\n scores = tf.where(\n # make the boolean mask [B, M, M]\n condition=tf.tile(\n # make the boolean mask [B, 1, M]\n tf.expand_dims(\n # get a boolean mask [B, M]\n tf.sequence_mask(x_len, maxlen=longest_x),\n 1\n ),\n [1, longest_x, 1]\n ),\n x=scores,\n y=tf.ones(shape=[batch_size, longest_x, longest_x]) * float('-inf')\n )\n # mask diagonal\n scores += tf.diag(tf.fill([tf.shape(scores)[-1]], float('-inf')))\n # Normalise attention\n # [B, M, M]\n attention = tf.nn.softmax(scores)\n # [B, M, dz]\n c = tf.matmul(\n attention, # [B, M, M]\n z_values # [B, M, dz]\n )\n latent_dim_px = self.q.dz * 2\n # [B, M, dl=2*dz]\n h_for_px = tf.concat([c, z], axis=-1)\n\n if self.q.ds is not None:\n logging.info('X conditions on z and s: P(X|z,s)')\n latent_dim_px += self.q.ds\n # [B, M, dl]\n h_for_px = tf.concat([s, h_for_px], axis=-1)\n\n # Now we implement the generative components P_\\theta(X_1^m|Z_1^m=z_1^m) and P_\\theta(Y_1^n|X=x_1^m, Z=z_1^m)\n\n # X decoder (given s and z_1^m)\n with tf.variable_scope('logit-x'):\n # [B * M, Vx|S], [B * M]\n logits_x, targets_x = logit_layer_for_text(\n nb_classes=self.vx,\n inputs=h_for_px, # [B, M, dl]\n labels=self.x, # [B, M]\n dim=latent_dim_px,\n nb_softmax_samples=self.nb_softmax_samples[0],\n is_training=self.training_phase,\n approximation=self.softmax_approximation[0],\n support=self.support_x,\n importance=self.importance_x,\n name='P(X|z)' if self.q.ds is None else 'P(X|z,s)'\n )\n\n # Apply a softmax to obtain distributions that can be used for sampling outside this class\n # (B, M, Vx)\n px_z = tf.reshape(tf.nn.softmax(logits_x), [batch_size, longest_x, -1])\n\n if self._improved_features_for_py:\n logging.info('Y conditions on everything available: P(Y_j|z_1^m, s, a_j)')\n latent_dim_py = latent_dim_px\n h_for_py = h_for_px\n else:\n logging.info('Y conditions on z_a alone: P(Y_j|z_aj)')\n latent_dim_py = self.q.dz\n h_for_py = z\n\n\n # Y decoder (given z_1^m)\n with tf.variable_scope('logit-y'):\n # [B * M, Vy|S], [B * N]\n logits_y, targets_y = logit_layer_for_bitext(\n nb_classes=self.vy,\n inputs=h_for_py, # [B, M, dl|dz]\n outputs=self.y, # [B, N]\n dim=latent_dim_py, # dl|dz\n nb_softmax_samples=self.nb_softmax_samples[1],\n is_training=self.training_phase,\n approximation=self.softmax_approximation[1],\n support=self.support_y,\n importance=self.importance_y,\n name='P(Y|z_a)'\n )\n\n # Apply a softmax to obtain distributions that can be used for marginalisation\n # [B, M, Vy|S]\n py_xza = tf.reshape(tf.nn.softmax(logits_y), [batch_size, longest_x, -1])\n\n # 2.c Marginalise alignments\n\n # P(y|x_1^m,z_1^m) = \\sum_a P(A=a|M=m) P(Y=y|X=x_a,Z=z_a)\n # [B, N, Vy]\n py_zx = tf.matmul(\n pa_x, # [B, N, M]\n py_xza, # [B, M, Vy]\n )\n\n # 3. Compute loss\n\n # 3a. Negative log-likelihood P(X=x|Z=z)\n # [B * M]\n ce_x = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=targets_x, # [B * M]\n logits=logits_x # [B * M, Vx|S]\n )\n\n # Mask invalid positions\n # [B, M]\n ce_x = tf.reshape(ce_x, [batch_size, longest_x])\n # Sum along time dimension\n # [B]\n ce_x = tf.reduce_sum(ce_x * x_mask, axis=1)\n # and average along the sample dimension\n # []\n ce_x = tf.reduce_mean(ce_x, axis=0)\n\n # 3b. Compute negative log-likelihood \\sum_j P(Y_j=y_j|Z_1^m=z_1^m,X_1^m=x_1^m)\n # [B * N]\n ce_y = tf.nn.sparse_softmax_cross_entropy_with_logits(\n # [B * N]\n labels=targets_y,\n # [B * N, Vy|S]\n logits=tf.log( # tf expects logits of the marginal\n tf.reshape(py_zx, [batch_size * longest_y, -1]) # we collapse sample (B) and time (N) dimensions\n )\n )\n # [B, N]\n ce_y = tf.reshape(ce_y, [batch_size, longest_y])\n # Sum along time dimension\n # [B]\n ce_y = tf.reduce_sum(ce_y * y_mask, axis=1)\n # Average along sample dimension\n # []\n ce_y = tf.reduce_mean(ce_y)\n\n # 3c. Compute KL terms\n\n if self.q.ds is None: # no sentence embedding\n logging.info('Gaussian prior P(Z) = N(0,I)')\n # then we have a standard prior for P(Z)\n # and compute KL(q(Z|x) || N(0,I))\n # [B * M]\n kl_z = kl_from_q_to_standard_normal(z_mean, z_log_var)\n kl_s = tf.zeros(shape=[])\n\n else: # with sentence embedding\n logging.info('Gaussian prior P(S) = N(0,I)')\n # We have a standard prior for P(S)\n # and compute KL(q(S)||N(0,I))\n # [B]\n kl_s = kl_from_q_to_standard_normal(s_mean, s_log_var)\n # []\n kl_s = tf.reduce_mean(kl_s)\n\n # We have a simple prior for P(Z|s) = N(Ws+b, kappa I)\n logging.info('Gaussian prior P(Z|s) = N(linear(s), sigma^2 I)')\n\n # TODO: consider options for prior P(Z|s)\n # 1. mean: zero, *linear_fn(s), nonlinear_fn(s)\n # 2. var: one, *kappa, linear_fn(s), nonlinear_fn(s)\n # [B * M, dz]\n prior_z_mean = ffnn(\n inputs=tf.reshape(s, [batch_size * longest_x, self.q.ds]), # [B * M, ds]\n layers=[[self.q.dz, None]] # TODO: prior_z_mean_layers\n )\n # [B * M, dz]\n prior_z_log_var = ffnn(\n # TODO: prior var is function of s or function of ones?\n inputs=tf.reshape(s, [batch_size * longest_x, self.q.ds]), # [B * M, ds]\n layers=[[self.q.dz, None]] # TODO: prior_z_var_layers\n )\n\n # Here we condition on the MC sample (s) to estimate KL(q(Z|s)||P(Z|s))\n # then we compute \\sum_{i=1}^m KL(q(Z_i|s)||P(Z_i|s))\n # [B * M]\n kl_z = kl_diagonal_gaussians(z_mean, z_log_var, prior_z_mean, prior_z_log_var)\n\n if False: # TODO: for q(S)q(Z_1^m) with a linear prior, perhaps we can condition on q(S)'s mean\n\n # [B, dz]\n prior_z_mean = ffnn(\n inputs=s_mean, # [B, ds]\n layers=[[self.q.dz, None]] # a single linear layer\n )\n\n # [B, dz]\n prior_z_log_var = ffnn(\n inputs=tf.ones_like(s_mean), # [B, ds]\n layers=[[self.q.dz, None]] # a single linear layer\n )\n\n # [B, 1, dz]\n prior_z_mean = tf.expand_dims(prior_z_mean, 1)\n # [B, M, dz]\n prior_z_mean = tf.tile(prior_z_mean, [1, longest_x, 1])\n # [B * M, dz]\n prior_z_mean = tf.reshape(prior_z_mean, [batch_size * longest_x, -1])\n\n # [B, 1, dz]\n prior_z_log_var = tf.expand_dims(prior_z_log_var, 1)\n # [B, M, dz]\n prior_z_log_var = tf.tile(prior_z_log_var, [1, longest_x, 1])\n # [B * M, dz]\n prior_z_log_var = tf.reshape(prior_z_log_var, [batch_size * longest_x, -1])\n\n # TODO: double check that this KL is correct\n # then we compute \\sum_{i=1}^m KL(q(Z_i|s)||P(Z_i|s))\n # [B * M]\n kl_z = kl_diagonal_gaussians(z_mean, z_log_var, prior_z_mean, prior_z_log_var)\n\n # Sum along time dimension (masking invalid steps)\n # [B]\n kl_z = tf.reduce_sum(\n # [B, M]\n tf.reshape(kl_z, [batch_size, -1]) * x_mask,\n axis=-1\n )\n # Average along sample dimension\n # []\n kl_z = tf.reduce_mean(kl_z)\n\n # 3d. Aggregate everything:\n # Our loss is the negative ELBO\n # ELBO = expected log likelihood - KL\n # LOSS = -ELBO = - expected log likelihood + KL\n # = CE + KL\n # because CE = - expected log likelihood\n loss = ce_y + ce_x + self.alpha_z * kl_z + self.alpha_s * kl_s\n\n # 4. Calculate accuracy of predictions\n\n # 4a. with respect to x_1^m\n targets_x = tf.reshape(targets_x, [batch_size, longest_x])\n predictions_x = tf.argmax(px_z, axis=2)\n acc_x = tf.equal(predictions_x, targets_x)\n acc_x = tf.cast(acc_x, tf.float32) * x_mask\n # []\n acc_x_correct = tf.reduce_sum(acc_x)\n acc_x_total = tf.reduce_sum(x_mask)\n acc_x = acc_x_correct / acc_x_total\n\n # 4b. with respect to y_1^n\n # [B, N]\n targets_y = tf.reshape(targets_y, [batch_size, longest_y])\n predictions_y = tf.argmax(py_zx, axis=2)\n acc_y = tf.equal(predictions_y, targets_y)\n acc_y = tf.cast(acc_y, tf.float32) * y_mask\n # []\n acc_y_correct = tf.reduce_sum(acc_y)\n acc_y_total = tf.reduce_sum(y_mask)\n acc_y = acc_y_correct / acc_y_total\n\n # These quantities are useful for optimisation, decoding, logging.\n # [B, M]\n self.pa_x = tf.identity(pa_x, name='pa_x')\n # [B, M, Vx]\n self.px_z = tf.identity(px_z, name='px_z')\n # [B, M, Vy]\n self.py_xza = tf.identity(py_xza, name='py_xza')\n # []\n self.loss = tf.identity(loss, name='loss')\n self.ce_x = tf.identity(ce_x, name='ce_x')\n self.ce_y = tf.identity(ce_y, name='ce_y')\n self.kl_z = tf.identity(kl_z, name='kl_z')\n self.kl_s = tf.identity(kl_s, name='kl_s')\n self.kl = tf.identity(kl_s + kl_z, name='kl')\n self.predictions_x = tf.identity(predictions_x, name='predictions_x')\n self.accuracy_x = tf.identity(acc_x, name='acc_x')\n self.accuracy_x_correct = tf.identity(tf.cast(acc_x_correct, tf.int64), name='acc_x_correct')\n self.accuracy_x_total = tf.identity(tf.cast(acc_x_total, tf.int64), name='acc_x_total')\n self.predictions_y = tf.identity(predictions_y, 'predictions_y')\n self.accuracy_y = tf.identity(acc_y, name='acc_y')\n self.accuracy_y_correct = tf.identity(tf.cast(acc_y_correct, tf.int64), name='acc_y_correct')\n self.accuracy_y_total = tf.identity(tf.cast(acc_y_total, tf.int64), name='acc_y_total')\n\n\n","sub_path":"embedalign/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":24493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"302643903","text":"\n\nfrom xai.brain.wordbase.nouns._numeral import _NUMERAL\n\n#calss header\nclass _NUMERALS(_NUMERAL, ):\n\tdef __init__(self,): \n\t\t_NUMERAL.__init__(self)\n\t\tself.name = \"NUMERALS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"numeral\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_numerals.py","file_name":"_numerals.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"434279314","text":"a, b, n = 1, 1, 0\ninputSuccess = False\nwhile not inputSuccess:\n inputSuccess = True\n try:\n n = int(input(\"Please enter n:\"))\n except ValueError:\n inputSuccess = False\n print('Please enter a valid number!')\nwhile b < n:\n c = a + b\n a = b\n b = c\nif(b == n):\n print(n, \" is a Fibonacci number.\")\nelse:\n print(n, \" is not a Fibonacci number.\")\n\n","sub_path":"PY/Week 3/HW/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"459389082","text":"# MIT License\n#\n# Copyright (c) 2018-2019 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport json\nimport logging\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Tuple, Any, Optional, Dict, Union, List\nfrom urllib.parse import urlparse\n\nimport git\nfrom pkg_resources import get_distribution, DistributionNotFound\n\nfrom packit.exceptions import PackitException, PackitCommandFailedError\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_rev_list_kwargs(opt_list):\n \"\"\"\n Converts the list of 'key=value' options to dict.\n Options without value gets True as a value.\n \"\"\"\n result = {}\n for opt in opt_list:\n opt_split = opt.split(sep=\"=\", maxsplit=1)\n if len(opt_split) == 1:\n result[opt] = True\n else:\n key, raw_val = opt_split\n try:\n val = json.loads(raw_val.lower())\n result[key] = val\n except json.JSONDecodeError:\n result[key] = raw_val\n return result\n\n\nclass StreamLogger(threading.Thread):\n def __init__(self, stream, log_level=logging.DEBUG):\n super().__init__(daemon=True)\n self.stream = stream\n self.output = []\n self.log_level = log_level\n\n def run(self):\n for line in self.stream:\n # not doing strip here on purpose so we get real output\n # and we are saving bytes b/c the output can contain chars which can't be decoded\n self.output.append(line)\n logger.log(self.log_level, line.rstrip(b\"\\n\"))\n\n def get_output(self):\n return b\"\".join(self.output)\n\n\ndef run_command(\n cmd: Union[List[str], str],\n error_message: str = None,\n cwd: str = None,\n fail: bool = True,\n output: bool = False,\n env: Optional[Dict] = None,\n decode=True,\n):\n \"\"\"\n run provided command in a new subprocess\n\n :param cmd: 'duh\n :param error_message: if the command fails, output this error message\n :param cwd: run the command in\n :param fail: raise an exception when the command fails\n :param output: if True, return command's stdout & stderr\n :param env: set these env vars in the subprocess\n :param decode: decode stdout from utf8 to string\n \"\"\"\n if not isinstance(cmd, list):\n cmd = shlex.split(cmd)\n\n logger.debug(\"cmd = '%s'\", \" \".join(cmd))\n\n cwd = cwd or str(Path.cwd())\n error_message = error_message or f\"Command {cmd} failed.\"\n\n # we need to pass complete env to Popen, otherwise we lose everything from os.environ\n cmd_env = os.environ\n if env:\n cmd_env.update(env)\n\n # we can't use universal newlines here b/c the output from the command can be encoded\n # in something alien and we would \"can't decode this using utf-8\" errors\n # https://github.com/packit-service/systemd-rhel8-flock/pull/9#issuecomment-550184016\n shell = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False,\n cwd=cwd,\n env=cmd_env,\n )\n\n stdout = StreamLogger(shell.stdout, log_level=logging.DEBUG)\n stderr = StreamLogger(shell.stderr, log_level=logging.INFO)\n\n stdout.start()\n stderr.start()\n shell.wait()\n stdout.join()\n stderr.join()\n\n if shell.returncode != 0:\n logger.error(\"Command %s failed\", shell.args)\n logger.error(\"%s\", error_message)\n if fail:\n raise PackitCommandFailedError(\n f\"Command {shell.args!r} failed: {error_message}\",\n stdout_output=stdout.get_output(),\n stderr_output=stderr.get_output(),\n )\n success = False\n else:\n success = True\n\n if not output:\n return success\n\n o = stdout.get_output()\n if decode:\n return o.decode(sys.getdefaultencoding())\n else:\n return o\n\n\ndef run_command_remote(\n cmd,\n error_message=None,\n cwd=None,\n fail=True,\n output=False,\n env: Optional[Dict] = None,\n):\n \"\"\"\n wrapper for run_command method\n Indicating that this command run some action without local effect,\n or the effect is not important.\n\n eg.\n submit something to some server, and check how server reply\n call kinit of some ticket\n \"\"\"\n return run_command(cmd, error_message, cwd, fail, output, env)\n\n\nclass PackitFormatter(logging.Formatter):\n def format(self, record):\n if record.levelno == logging.INFO:\n self._style._fmt = \"%(message)s\"\n elif record.levelno > logging.INFO:\n self._style._fmt = \"%(levelname)-8s %(message)s\"\n else: # debug\n self._style._fmt = (\n \"%(asctime)s.%(msecs).03d %(filename)-17s %(levelname)-6s %(message)s\"\n )\n return logging.Formatter.format(self, record)\n\n\ndef set_logging(\n logger_name=\"packit\",\n level=logging.INFO,\n handler_class=logging.StreamHandler,\n handler_kwargs=None,\n date_format=\"%H:%M:%S\",\n):\n \"\"\"\n Set personal logger for this library.\n\n :param logger_name: str, name of the logger\n :param level: int, see logging.{DEBUG,INFO,ERROR,...}: level of logger and handler\n :param handler_class: logging.Handler instance, default is StreamHandler (/dev/stderr)\n :param handler_kwargs: dict, keyword arguments to handler's constructor\n :param date_format: str, date style in the logs\n \"\"\"\n if level != logging.NOTSET:\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n logger.debug(f\"logging set to {logging.getLevelName(level)}\")\n\n # do not readd handlers if they are already present\n if not [x for x in logger.handlers if isinstance(x, handler_class)]:\n handler_kwargs = handler_kwargs or {}\n handler = handler_class(**handler_kwargs)\n handler.setLevel(level)\n\n formatter = PackitFormatter(None, date_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\ndef commits_to_nice_str(commits):\n return \"\\n\".join(\n f\"{commit.summary}\\n\"\n f\"Author: {commit.author.name} <{commit.author.email}>\\n\"\n f\"{commit.hexsha}\\n\"\n for commit in commits\n )\n\n\ndef is_git_repo(directory: str) -> bool:\n \"\"\"\n Test, if the directory is a git repo.\n (Has .git subdirectory?)\n \"\"\"\n return Path(directory).joinpath(\".git\").is_dir()\n\n\ndef get_repo(url: str, directory: str = None) -> git.Repo:\n \"\"\"\n Use directory as a git repo or clone repo to the tempdir.\n \"\"\"\n if not directory:\n tempdir = tempfile.mkdtemp()\n directory = tempdir\n\n # TODO: optimize cloning: single branch and last n commits?\n if is_git_repo(directory=directory):\n logger.debug(f\"Repo already exists in {directory}\")\n repo = git.repo.Repo(directory)\n else:\n logger.debug(f\"Cloning repo: {url} -> {directory}\")\n repo = git.repo.Repo.clone_from(url=url, to_path=directory, tags=True)\n\n return repo\n\n\ndef get_namespace_and_repo_name(url: str) -> Tuple[Optional[str], str]:\n if Path(url).exists():\n return None, Path(url).name\n url = url.strip(\"/\")\n try:\n if url.endswith(\".git\"):\n url = url[:-4]\n if url.startswith(\"http\"):\n # if git_url is in format http{s}://github.com/org/repo_name\n _, namespace, repo_name = url.rsplit(\"/\", 2)\n else:\n # If git_url is in format git@github.com:org/repo_name\n org_repo = url.split(\":\", 2)[1]\n namespace, repo_name = org_repo.split(\"/\", 2)\n except (IndexError, ValueError) as ex:\n raise PackitException(\n f\"Invalid URL format, can't obtain namespace and repository name: {url}: {ex!r}\"\n )\n return namespace, repo_name\n\n\ndef assert_existence(obj):\n \"\"\"\n Force the lazy object to be evaluated.\n \"\"\"\n if obj is None:\n raise PackitException(\"Object needs to have a value.\")\n\n\ndef nested_get(d: dict, *keys, default=None) -> Any:\n \"\"\"\n recursively obtain value from nested dict\n\n :param d: dict\n :param keys: path within the structure\n :param default: a value to return by default\n\n :return: value or None\n \"\"\"\n response = d\n for k in keys:\n try:\n response = response[k]\n except (KeyError, AttributeError, TypeError):\n # logger.debug(\"can't obtain %s: %s\", k, ex)\n return default\n return response\n\n\ndef is_a_git_ref(repo: git.Repo, ref: str) -> bool:\n try:\n commit = repo.commit(ref)\n return bool(commit)\n except git.BadName:\n return False\n\n\n@contextmanager\ndef cwd(target: Union[str, Path]):\n \"\"\"\n Manage cwd in a pushd/popd fashion.\n\n Usage:\n\n with cwd(tmpdir):\n do something in tmpdir\n \"\"\"\n curdir = os.getcwd()\n os.chdir(target)\n try:\n yield\n finally:\n os.chdir(curdir)\n\n\n# TODO: merge this function into parse_git_repo in ogr\n# https://github.com/packit-service/packit/pull/555#discussion_r332871418\ndef git_remote_url_to_https_url(inp: str) -> str:\n \"\"\"\n turn provided git remote URL to https URL:\n returns empty string if the input can't be processed\n \"\"\"\n if not inp:\n return \"\"\n parsed = urlparse(inp)\n if parsed.scheme and parsed.scheme in [\"http\", \"https\"]:\n logger.debug(f\"Provided input {inp} is an url.\")\n return inp\n elif \"@\" in inp:\n url_str = inp.replace(\"ssh://\", \"\")\n # now we can sub the colon (:) with slash (/)\n url_str = url_str.replace(\":\", \"/\")\n # and finally, get rid of the git@ junk\n url_str = re.sub(r\"\\w+@\", \"https://\", url_str)\n # let's verify it's good\n try:\n urlparse(url_str)\n except Exception:\n logger.error(f\"unable to process {inp}\")\n raise PackitException(f\"Unable to process {inp}.\")\n else:\n logger.debug(f\"SSH style URL {inp} turned into HTTPS {url_str}\")\n return url_str\n logger.warning(f\"{inp} is not an URL we recognize\")\n return \"\"\n\n\ndef get_packit_version() -> str:\n try:\n return get_distribution(\"packitos\").version\n except DistributionNotFound:\n return \"NOT_INSTALLED\"\n","sub_path":"packit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"579296593","text":"import requests, json, random\nfrom .bingSpeech.textToSpeech import playSelfieAudioResponse, tweetAck\nfrom azure.storage.blob import BlockBlobService\nimport random\nimport pygame, sys\nimport pygame.camera\nfrom random_words import RandomWords\nfrom pprint import pprint\nfrom pygame.locals import *\nfrom azure.storage.blob import ContentSettings\n\ndef captureSelfie():\n width = 1280\n height = 720\n dimensions = (width, height)\n pygame.init()\n print(\"Initializing PyGame\")\n pygame.camera.init()\n flag = True\n count = 0\n while(flag):\n try:\n cam = pygame.camera.Camera(\"/dev/video0\",dimensions)\n cam.start()\n print(\"Camera started\")\n imageName = 'selfie.jpg'\n image = cam.get_image()\n print(\"Image saved\")\n pygame.image.save(image,imageName)\n cam.stop()\n pygame.quit()\n flag = False\n return\n except:\n print(\"Camera busy. Stand by.\")\n time.sleep(1)\n count = count + 1\n if(count>9):\n print(\"You might need to reconnect the camera. My apologies.\")\n pygame.quit()\n flag = False\n return\n playSelfieAudioResponse()\n\ndef getStorageCredentials():\n credentials = json.load(open('../credentials.json'))\n storageCreds = credentials[\"storageAccount\"]\n return(storageCreds)\n\ndef connectToAzure():\n storageCreds = getStorageCredentials()\n pprint(storageCreds)\n block_blob_service = BlockBlobService(account_name=storageCreds['account_name'], account_key=storageCreds['account_key'])\n return(block_blob_service)\n\ndef randomName():\n num = random.randrange(0, 1000)\n rw = RandomWords()\n return(rw.random_word() + str(num) + \".jpg\") \n \ndef sendToBlob():\n block_blob_service = connectToAzure()\n block_blob_service.create_blob_from_path(\n 'mitcsyimages',\n randomName(),\n 'selfie.jpg',\n content_settings=ContentSettings(content_type='image/jpg')\n )\n return(True)\n\ndef storeImage():\n print(\"Storing image\")\n# captureSelfie()\n print(\"Sending to blob\")\n sendToBlob()\n print(\"Sent!\")\n \ndef sendToAzure():\n captureSelfie()\n storeImage()","sub_path":"staging/snowboy/lib/azureblob/azureblob.py","file_name":"azureblob.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"151534686","text":"class Day:\n def __init__(self, day = 1, month = 1, year = 1):\n self.day = day\n self.month = month\n self.year = year\n\nclass Node:\n def __init__(self, data = None):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def insertTail(self, data):\n p = Node(data)\n if self.head == None:\n self.head = self.tail = p\n else:\n self.tail.next = p\n self.tail = p\n \n def newestDay(self):\n newest = Day()\n if self.head == None:\n return\n cur = self.head\n while cur:\n if newest.year < cur.data.year:\n newest = cur.data\n elif newest.year == cur.data.year and newest.month < cur.data.month:\n newest = cur.data\n elif newest.year == cur.data.year and newest.month == cur.data.month and newest.day < cur.data.day:\n newest = cur.data\n cur = cur.next\n return newest\n\nl = LinkedList()\nn = int(input())\n\nfor i in range(n):\n x = list(map(int,input().split()))\n x = Day(x[0],x[1],x[2])\n l.insertTail(x)\n\nnewest = l.newestDay()\nprint(newest.day,newest.month,newest.year)","sub_path":"BigO/Green/Lecture12_LinkedList/newestDay.py","file_name":"newestDay.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"635004023","text":"from math import prod\nimport networkx as nx\nfrom bag_rules import sample, sample_2, nested_sample, rules\n\n\ndef parse_row(row):\n \"\"\"For a single rule (row), create acceptable input for nx.build_edges_from()\"\"\"\n parent, children = row.split(\" bags contain \")\n parsed_children = []\n if children != \"no other bags.\":\n children = children.replace(\"bags\", \"\").split(\", \")\n for child in children: \n num, adj, color = child.split(\" \")[:3]\n name = adj + \" \" + color\n parsed_children.append((parent, name, {'max_count': int(num)}))\n return {parent: parsed_children}\n \n\ndef parse_all(text_blob):\n \"\"\"Iterate over all given rules (text_blob) and build nodes & edges \n in a format that is acceptable for: \n nx.add_nodes_from(nodes)\n nx.add_edges_from(edges)\n \"\"\"\n parsed_rules = {}\n for row in text_blob.split(\"\\n\"):\n parsed_rules.update(parse_row(row))\n edges = []\n for grp in parsed_rules.values():\n edges += grp\n return edges\n\n\ndef build_graph(edges):\n \"\"\"Create a directed graph between all bag colors\n also contains 'max_count' data\n \"\"\"\n G = nx.DiGraph()\n G.add_edges_from(edges)\n return G \n\n\ndef count_total_bags_contained_in(bag, G):\n sink_nodes = [node for node, n_out in G.out_degree(G.nodes()) if n_out == 0]\n all_paths = [p for sink in sink_nodes \n for p in nx.all_simple_paths(G, source=bag, target=sink)\n ]\n total = 0\n already_seen = set()\n for path in all_paths:\n for parent, child in zip(path[:-1], path[1:]):\n partial_path = tuple(path[:path.index(child)+1])\n \n if partial_path not in already_seen:\n par_chld_pairs = zip(partial_path[:-1], partial_path[1:])\n multipliers = [\n G.get_edge_data(par, ch)['max_count'] for par, ch in par_chld_pairs\n ]\n total += prod(multipliers)\n already_seen.add(partial_path) \n return total\n \n \n# { answer : text_blob} for P1: key is str(answer) for P2 it is just a nomral number\n# WARNING: Cannot have multiple different text_blobs w/ the same numerical answer\ntests = {'4': sample, 32: sample, 126: sample_2, 35653: nested_sample}\n\nfor i, (ans, text_blob) in enumerate(tests.items()):\n G = build_graph(parse_all(text_blob)) \n # Part 1 : How many ultimate parent bags evenutally can hold 'shiny gold'?\n if isinstance(ans, str): \n pred = len(nx.algorithms.ancestors(G, 'shiny gold'))\n assert pred == int(ans), (\n f\"Test #0 Failed\\n\\t\\tTrue => {ans} != {pred} <= Predicted\"\n )\n # Part 2: How many bags can a 'shiny gold' bag hold (with all filled to capacity)?\n else: \n pred = count_total_bags_contained_in('shiny gold', G)\n assert pred == ans, (\n f\"#{i} Failed\\n\\t\\tTrue => {ans} != {pred} <= Predicted\"\n )\n \n \nif __name__ == \"__main__\":\n G = build_graph(parse_all(rules))\n # Part 1\n print(len(nx.algorithms.ancestors(G, 'shiny gold')))\n # Part 2\n print(count_total_bags_contained_in('shiny gold', G))\n\n","sub_path":"day07/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"568762219","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file is covered by the LICENSING file in the root of this project.\n\"\"\"\n\nimport json\nimport os\nfrom time import strftime\nfrom uuid import uuid1\n\nfrom werkzeug.datastructures import FileStorage\n\nfrom hackathon import RequiredFeature\nfrom hackathon.constants import FILE_TYPE, HEALTH_STATUS, HEALTH\nfrom hackathon.storage.storage import Storage\n\n__all__ = [\"AzureStorage\"]\n\n\nclass AzureStorage(Storage):\n \"\"\"Hackathon file storage that saves all templates on Azure storage\n\n template files will be save at \"http://hackathon.blob.core.chinacloudapi.cn/templates/<blob_name>\"\n uploaded images will be save at \"http://hackathon.blob.core.chinacloudapi.cn/images/<blob_name>\"\n \"\"\"\n\n def save(self, context):\n \"\"\"Save a file to Azure storage\n\n :type context: Context\n :param context: must have {\"content\":\"***\", \"file_type\":\"***\"} keys\n\n :rtype: Context\n :return: the updated context which should including the full path of saved file\n\n :note: in config file, there must exit \"storage.azure.image_container\",\n \"storage.azure.template_container\"\n and \"storage.azure.blob_service_host_base\" configuration\n \"\"\"\n container_name = self.__get_container_by_file_type(context.file_type)\n hackathon_name = context.get(\"hackathon_name\")\n blob_name = self.__generate_file_name(hackathon_name, context.file_type, context.file_name)\n\n if context.get('content'):\n file_content = context.content\n if isinstance(file_content, file) or isinstance(file_content, FileStorage):\n result = self.azure_blob_service.upload_file_to_azure(container_name, blob_name, file_content)\n elif isinstance(file_content, dict):\n text = json.dumps(file_content)\n result = self.azure_blob_service.upload_file_to_azure_from_text(container_name, blob_name, text)\n else:\n text = str(file_content)\n result = self.azure_blob_service.upload_file_to_azure_from_text(container_name, blob_name, text)\n else:\n assert context.get('file_path')\n file_path = context.file_path\n result = self.azure_blob_service.upload_file_to_azure_from_path(container_name, blob_name, file_path)\n\n context[\"url\"] = result\n self.log.debug(\"File saved at:\" + result)\n return context\n\n def delete(self, url):\n \"\"\"Delete file from Azure storage\n\n :type url: str|unicode\n :param url: the url of file to be deleted which are created in 'save'\n\n :rtype: bool\n :return: True if successfully deleted, otherwise False\n \"\"\"\n try:\n url_arr = url.split('/')\n blob_name = url_arr[-1]\n container_name = url_arr[-2]\n\n self.azure_blob_service.delete_file_from_azure(container_name, blob_name)\n return True\n except Exception as e:\n self.log.error(e)\n return False\n\n def report_health(self):\n \"\"\"Report the status of Azure storage\"\"\"\n try:\n if self.azure_blob_service.create_container_in_storage('images', 'container'):\n return {\n HEALTH.STATUS: HEALTH_STATUS.OK,\n HEALTH.DESCRIPTION: \"You can use Azure resources now.\",\n \"type\": \"AzureStorage\"\n }\n else:\n return {\n HEALTH.STATUS: HEALTH_STATUS.ERROR\n }\n except Exception as e:\n self.log.error(e)\n return {\n HEALTH.STATUS: HEALTH_STATUS.ERROR\n }\n\n def __init__(self):\n self.__containers = {\n FILE_TYPE.TEMPLATE: self.util.safe_get_config(\"storage.azure.template_container\", \"templates\"),\n FILE_TYPE.HACK_IMAGE: self.util.safe_get_config(\"storage.azure.image_container\", \"images\"),\n FILE_TYPE.AZURE_CERT: self.util.safe_get_config(\"storage.azure.certificate_container\", \"certificate\"),\n FILE_TYPE.USER_FILE: self.util.safe_get_config(\"storage.azure.user_file_container\", \"userfile\"),\n FILE_TYPE.TEAM_FILE: self.util.safe_get_config(\"storage.azure.team_file_container\", \"teamfile\"),\n FILE_TYPE.HACK_FILE: self.util.safe_get_config(\"storage.azure.hack_file_container\", \"hackfile\"),\n }\n self.azure_blob_service = RequiredFeature(\"azure_blob_service\")\n\n def __get_container_by_file_type(self, file_type):\n \"\"\"Get container name of azure by file type\n\n :type file_type: str| unicode\n :param file_type: type of file defined at FILE_TYPE in constants.py\n \"\"\"\n if file_type in self.__containers:\n return self.__containers[file_type]\n return \"default\"\n\n @staticmethod\n def __generate_file_name(hackathon_name, file_type, file_name):\n \"\"\"refresh file_name = hack_name + uuid(10) + time + suffix\n\n Only image name will be replaced since it may contain Chinese characters\n \"\"\"\n if file_type == FILE_TYPE.HACK_IMAGE:\n suffix = file_name.split('.')[-1]\n hackathon_name = \"\" if hackathon_name is None else hackathon_name + \"/\"\n real_name = hackathon_name + str(uuid1())[0:9] + strftime(\"%Y%m%d%H%M%S\") + \".\" + suffix\n return real_name\n else:\n return strftime(\"%Y%m%d/\") + file_name\n","sub_path":"open-hackathon-server/src/hackathon/storage/azure_storage.py","file_name":"azure_storage.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"377072218","text":"from django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.models import User\r\nfrom django.core.servers.basehttp import FileWrapper\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.forms.util import ErrorList\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom django.shortcuts import get_list_or_404, get_object_or_404, render_to_response\r\nfrom django.template import RequestContext\r\nfrom epic.comments.forms import PostCommentForm\r\nfrom epic.core.util import active_user_required\r\nfrom epic.core.models import Item\r\nfrom epic.core.util.view_utils import *\r\nfrom epic.datasets.models import DataSet, DataSetDownload\r\nfrom epic.projects.forms import ProjectDataSetFormSet, ProjectForm\r\nfrom epic.projects.models import Project, ProjectDownload\r\nimport epic.core.util.view_utils\r\nimport re\r\nimport tempfile\r\nimport zipfile\r\n\r\n\r\n@login_required\r\n@active_user_required\r\ndef create_project(request):\r\n if request.method != 'POST':\r\n new_project_form = ProjectForm()\r\n project_datasets = ProjectDataSetFormSet(prefix='project_datasets')\r\n else:\r\n project_datasets = ProjectDataSetFormSet(request.POST, prefix='project_datasets')\r\n new_project_form = ProjectForm(request.POST)\r\n \r\n if new_project_form.is_valid() and project_datasets.is_valid():\r\n name = new_project_form.cleaned_data['name']\r\n categories = new_project_form.cleaned_data['category']\r\n description = new_project_form.cleaned_data['description']\r\n new_project = Project.objects.create(\r\n creator=request.user,\r\n name=name,\r\n description=description,\r\n is_active=True)\r\n \r\n# We need to save the categories as step 2 because it is a m2m relationship.\r\n new_project.categories = categories\r\n new_project.save()\r\n \r\n for dataset_form in project_datasets.forms:\r\n if dataset_form.is_valid() and 'dataset' in dataset_form.cleaned_data:\r\n dataset = dataset_form.cleaned_data['dataset']\r\n new_project.datasets.add(dataset)\r\n \r\n view_project_url = get_item_url(new_project, 'epic.projects.views.view_project')\r\n \r\n return HttpResponseRedirect(view_project_url)\r\n \r\n return render_to_response(\r\n 'projects/create_project.html',\r\n {'new_project_form': new_project_form, 'project_datasets': project_datasets},\r\n context_instance=RequestContext(request))\r\n\r\n@login_required\r\n@active_user_required\r\ndef confirm_delete_project(request, item_id, slug):\r\n project = get_object_or_404(Project, pk=item_id)\r\n user = request.user\r\n view_project_url = get_item_url(project, 'epic.projects.views.view_project')\r\n \r\n if not user_is_item_creator(user, project):\r\n return HttpResponseRedirect(view_project_url)\r\n \r\n return render_to_response(\r\n 'projects/confirm_delete_project.html',\r\n {'project': project,},\r\n context_instance=RequestContext(request))\r\n\r\n@login_required\r\n@active_user_required\r\ndef delete_project(request, item_id, slug):\r\n project = get_object_or_404(Project, pk=item_id)\r\n user = request.user\r\n \r\n if not user_is_item_creator(user, project):\r\n view_project_url = get_item_url(project, 'epic.projects.views.view_project')\r\n\r\n return HttpResponseRedirect(view_project_url)\r\n \r\n project.is_active = False\r\n project.save()\r\n \r\n view_profile_url = reverse('epic.core.views.view_profile')\r\n \r\n return HttpResponseRedirect(view_profile_url)\r\n\r\n@login_required\r\n@active_user_required\r\ndef edit_project(request, item_id, slug):\r\n project = get_object_or_404(Project, pk=item_id)\r\n user = request.user\r\n \r\n view_project_url = get_item_url(project, 'epic.projects.views.view_project')\r\n \r\n if not user_is_item_creator(user, project):\r\n return HttpResponseRedirect(view_project_url)\r\n \r\n if request.method != \"POST\":\r\n initial_edit_project_data = {'name': project.name, 'description': project.description,}\r\n \r\n initial_edit_project_data['category'] = project.categories.values_list('id', flat=True) \r\n \r\n edit_form = ProjectForm(initial=initial_edit_project_data)\r\n initial_project_datasets = []\r\n\r\n for dataset in project.datasets.all():\r\n initial_project_datasets.append(\r\n {'dataset_url': dataset.get_absolute_url()})\r\n \r\n project_datasets = ProjectDataSetFormSet(\r\n prefix='project_datasets', initial=initial_project_datasets)\r\n else:\r\n edit_form = ProjectForm(request.POST)\r\n project_datasets = ProjectDataSetFormSet(\r\n request.POST, prefix='project_datasets')\r\n \r\n if edit_form.is_valid() and project_datasets.is_valid():\r\n project.name = edit_form.cleaned_data['name']\r\n project.categories = edit_form.cleaned_data['category']\r\n project.description = edit_form.cleaned_data['description']\r\n project.save()\r\n\r\n for dataset in project.datasets.all():\r\n project.datasets.remove(dataset)\r\n\r\n for dataset_form in project_datasets.forms:\r\n if dataset_form.is_valid():\r\n if 'dataset' in dataset_form.cleaned_data:\r\n dataset = dataset_form.cleaned_data['dataset']\r\n project.datasets.add(dataset)\r\n\r\n view_project_url = get_item_url(project, 'epic.projects.views.view_project')\r\n\r\n return HttpResponseRedirect(view_project_url)\r\n \r\n render_to_response_data = {\r\n 'project': project,\r\n 'edit_project_form': edit_form,\r\n 'project_datasets': project_datasets,\r\n 'datasets': project.datasets.all(),\r\n }\r\n \r\n return render_to_response(\r\n 'projects/edit_project.html',\r\n render_to_response_data,\r\n context_instance=RequestContext(request))\r\n\r\nPER_PAGE = epic.core.util.view_utils.DEFAULT_OBJECTS_PER_PAGE\r\n\r\ndef view_projects(request):\r\n projects = Project.objects.active().order_by('-created_at')\r\n projects_page = paginate(projects, request.GET, PER_PAGE)\r\n\r\n return render_to_response(\r\n 'projects/view_projects.html',\r\n {'projects_page': projects_page},\r\n context_instance=RequestContext(request))\r\n\r\ndef view_project(request, item_id, slug):\r\n project = get_object_or_404(Project, pk=item_id)\r\n form = PostCommentForm()\r\n user = request.user\r\n\r\n return render_to_response(\r\n 'projects/view_project.html',\r\n {'project': project, 'form': form},\r\n context_instance=RequestContext(request))\r\n\r\ndef view_user_project_list(request, user_id):\r\n requested_user = get_object_or_404(User, pk=user_id)\r\n projects = Project.objects.active().filter(creator=requested_user).order_by('-created_at')\r\n \r\n return render_to_response(\r\n 'projects/view_user_project_list.html',\r\n {'projects': projects, 'requested_user': requested_user},\r\n context_instance=RequestContext(request))\r\n\r\n@login_required\r\n@active_user_required\r\ndef download_all(request, item_id, slug):\r\n project = get_object_or_404(Project, pk=item_id)\r\n user = request.user\r\n \r\n if project.is_active:\r\n datasets = project.datasets.all()\r\n temp = tempfile.TemporaryFile()\r\n archive = zipfile.ZipFile(temp, 'w')\r\n \r\n project_download = ProjectDownload(parent_project=project,\r\n downloader=user)\r\n project_download.save()\r\n \r\n for dataset in datasets:\r\n \r\n dataset_zip_file_name = dataset.slug + \"-all.zip\"\r\n \r\n dataset_download = DataSetDownload(parent_dataset=dataset,\r\n downloader=user,\r\n file_name=dataset_zip_file_name,\r\n is_readme=True, \r\n is_download_all=True,\r\n parent_project=project_download)\r\n dataset_download.save()\r\n \r\n for file in dataset.files.all():\r\n file.file_contents.open('r')\r\n archive.writestr(\r\n (dataset.name + '/' + file.get_short_name()), file.file_contents.read())\r\n file.file_contents.close()\r\n\r\n archive.close()\r\n \r\n wrapper = FileWrapper(temp)\r\n \r\n response = HttpResponse(wrapper, content_type='application/zip')\r\n response['Content-Disposition'] = 'attachment; filename=%s.zip' % project.name\r\n response['Content-Length'] = temp.tell()\r\n \r\n temp.seek(0)\r\n\r\n return response\r\n else:\r\n view_project_url = get_item_url(project, 'epic.projects.views.view_project')\r\n\r\n return HttpResponseRedirect(view_project_url)\r\n","sub_path":"trunk/epic/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"227089151","text":"li = {}\r\nfor i in range(int(input('the number of things:'))):\r\n x, y = input().split()\r\n li[int(x)] = y\r\nprint(li)\r\nn = int(input())\r\nfor i in range(1, n + 1):\r\n count = 0\r\n for j in li.keys():\r\n if i % j == 0:\r\n print(li[j], end=' ')\r\n count = 1\r\n if count == 0:\r\n print(i)\r\n else:\r\n print()\r\n","sub_path":"mysolution/7up.py","file_name":"7up.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"342681103","text":"import numpy as np\nimport pandas as pd\nfrom copy import deepcopy\nfrom classes.crout import mat_solve\nfrom classes.sparse import Sparse\nfrom numpy import sin, cos, angle, imag, real\n\nclass PowerSystem:\n\tdef __init__(self, filename, sparse=False):\n\t\tself.sparse = sparse\n\t\tself.busNumber = 0\n\t\tself.busArea = 2\n\t\tself.busZone = 3\n\t\tself.busType = 4\n\t\tself.busFinalVoltage = 5\n\t\tself.busFinalAngle = 6\n\t\tself.busLoadMW = 7\n\t\tself.busLoadMVAR = 8\n\t\tself.busGenMW = 9\n\t\tself.busGenMVAR = 10\n\t\tself.busBaseKV = 11\n\t\tself.busDesiredVolts = 12\n\t\tself.busMaxMVAR = 13\n\t\tself.busMinMVAR = 14\n\t\tself.busShuntG = 15\n\t\tself.busShuntB = 16\n\t\tself.busRemoteControlledBusNumber = 17\n\n\t\tself.branchFromBus = 0\n\t\tself.branchToBus = 1\n\t\tself.branchR = 6\n\t\tself.branchX = 7\n\t\tself.branchB = 8\n\t\tself.branchTurnsRatio = 14\n\t\tself.branchPhaseShift = 15\n\n\t\tself.bus_data, self.branch_data, self.p_base = self.read_case(filename)\n\n\t\t# Make the Y-bus matrix\n\t\tself.y_bus = self.makeybus()\n\n\t\t# Get bus types\n\t\ttypes = self.bus_data[:, self.busType]\n\t\tself.slack = np.where(types == 3)[0]\n\t\tself.v_slack = self.bus_data[self.slack, self.busDesiredVolts]\n\t\tself.pv = np.where(types == 2)[0] # list of PV bus indices\n\t\tself.pq = np.where(types < 2)[0] # list of PQ bus indices\n\t\tself.pvpq = np.sort(np.concatenate((self.pv, self.pq))) # list of indices of non-slack buses\n\t\tself.gen_buses = np.where(types == 2)[0] # list of generator bus indices\n\n\t\t# Calculate scheduled P and Q for each bus\n\t\tself.mw_gen = self.bus_data[self.pvpq, self.busGenMW]\n\t\tself.mw_load = self.bus_data[self.pvpq, self.busLoadMW]\n\t\tself.mvar_load = self.bus_data[self.pq, self.busLoadMVAR]\n\t\tself.mw_gen_full = self.bus_data[:, self.busGenMW]\n\t\tself.mw_load_full = self.bus_data[:, self.busLoadMW]\n\t\tself.p_gen_full = self.mw_gen_full / self.p_base\n\t\tself.mvar_load_full = self.bus_data[:, self.busLoadMVAR]\n\t\tself.p_load_full = self.mw_load_full / self.p_base\n\t\tself.q_load_full = self.mvar_load_full / self.p_base\n\t\tself.psched = np.array(self.mw_gen - self.mw_load) / self.p_base\n\t\tself.qsched = np.array(- self.mvar_load) / self.p_base\n\t\tself.qsched_full = np.array(- self.mvar_load_full) / self.p_base\n\t\tself.psched_full = np.array(self.mw_gen_full - self.mw_load_full) / self.p_base\n\t\tself.q_lim = np.c_[\n\t\t\tself.bus_data[:, self.busMaxMVAR] / self.p_base,\n\t\t\tself.bus_data[:, self.busMinMVAR] / self.p_base]\n\t\tself.q_min_bus = np.array([]).astype(int)\n\t\tself.q_max_bus = np.array([]).astype(int)\n\n\t@staticmethod\n\tdef read_case(file_name):\n\t\tmva_base = 1\n\t\twith open(file_name) as f:\n\t\t\tfor line in f:\n\t\t\t\tmva_base = float(line[31:37])\n\t\t\t\tbreak\n\n\t\t# count rows of bus data\n\t\ti = 0\n\t\tbus_rows = 0\n\t\tbus_col = 18\n\t\twith open(file_name) as f:\n\t\t\tfor line in f:\n\t\t\t\t# Bus data\n\t\t\t\tif i >= 2:\n\t\t\t\t\tif line[0] == '-':\n\t\t\t\t\t\tbus_rows = i - 2\n\t\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t# Build bus data array\n\t\tbus_data = np.zeros((bus_rows, bus_col))\n\t\ti = 0\n\t\tj = 0\n\t\twith open(file_name) as f:\n\t\t\tfor line in f:\n\t\t\t\tif i >= 2 and j < bus_rows:\n\t\t\t\t\tif line[0] == '-':\n\t\t\t\t\t\tbreak\n\t\t\t\t\tbus_data[j, 0] = int(line[0:4])\n\t\t\t\t\tbus_data[j, 1] = int(line[0:4])\n\t\t\t\t\tbus_data[j, 2] = int(line[18:20])\n\t\t\t\t\tbus_data[j, 3] = int(line[20:23])\n\t\t\t\t\tbus_data[j, 4] = int(line[24:26])\n\t\t\t\t\tbus_data[j, 5] = float(line[27:33])\n\t\t\t\t\tbus_data[j, 6] = float(line[33:40])\n\t\t\t\t\tbus_data[j, 7] = float(line[40:49])\n\t\t\t\t\tbus_data[j, 8] = float(line[49:59])\n\t\t\t\t\tbus_data[j, 9] = float(line[59:67])\n\t\t\t\t\tbus_data[j, 10] = float(line[67:75])\n\t\t\t\t\tbus_data[j, 11] = float(line[76:83])\n\t\t\t\t\tbus_data[j, 12] = float(line[84:90])\n\t\t\t\t\tbus_data[j, 13] = float(line[90:98])\n\t\t\t\t\tbus_data[j, 14] = float(line[98:106])\n\t\t\t\t\tbus_data[j, 15] = float(line[106:114])\n\t\t\t\t\tbus_data[j, 16] = float(line[114:122])\n\t\t\t\t\tbus_data[j, 17] = int(line[123:127])\n\n\t\t\t\t\tj = j + 1\n\t\t\t\ti = i + 1\n\n\t\tbranchDataStart = bus_rows + 4\n\t\ti = 0\n\t\tbranch_rows = 0\n\t\tbranch_col = 21\n\t\twith open(file_name) as f:\n\t\t\tfor line in f:\n\t\t\t\t# Bus data\n\t\t\t\tif i >= branchDataStart:\n\t\t\t\t\tif line[0] == '-':\n\t\t\t\t\t\tbranch_rows = i - branchDataStart\n\t\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\tbranch_data = np.zeros((branch_rows, branch_col))\n\t\ti = 0\n\t\tj = 0\n\t\twith open(file_name) as f:\n\t\t\tfor line in f:\n\t\t\t\tif i >= branchDataStart and j < branch_rows:\n\t\t\t\t\tif line[0] == '-':\n\t\t\t\t\t\tbreak\n\t\t\t\t\tbranch_data[j, 0] = int(line[0:4]) # Columns 1- 4 Tap bus number (I) *\n\t\t\t\t\tbranch_data[j, 1] = int(line[5:9]) # Columns 6- 9 Z bus number (I) *\n\t\t\t\t\tbranch_data[j, 2] = int(line[10:12]) # Columns 11-12 Load flow area (I)\n\t\t\t\t\tbranch_data[j, 3] = int(line[12:15]) # Columns 13-14 Loss zone (I)\n\t\t\t\t\tbranch_data[j, 4] = int(line[16:17]) # Column 17 Circuit (I) * (Use 1 for single lines)\n\t\t\t\t\tbranch_data[j, 5] = int(line[18:19]) # Column 19 Type (I) *\n\t\t\t\t\tbranch_data[j, 6] = float(line[19:29]) # Columns 20-29 Branch resistance R, per unit (F) *\n\t\t\t\t\tbranch_data[j, 7] = float(line[29:40]) # Columns 30-40 Branch reactance X, per unit (F) *\n\t\t\t\t\tbranch_data[j, 8] = float(line[40:50]) # Columns 41-50 Line charging B, per unit (F) *\n\t\t\t\t\tbranch_data[j, 9] = int(line[50:55]) # Columns 51-55 Line MVA rating No 1 (I) Left justify!\n\t\t\t\t\tbranch_data[j, 10] = int(line[56:61]) # Columns 57-61 Line MVA rating No 2 (I) Left justify!\n\t\t\t\t\tbranch_data[j, 11] = int(line[62:67]) # Columns 63-67 Line MVA rating No 3 (I) Left justify!\n\t\t\t\t\tbranch_data[j, 12] = int(line[68:72]) # Columns 69-72 Control bus number\n\t\t\t\t\tbranch_data[j, 13] = int(line[73:74]) # Column 74 Side (I)\n\t\t\t\t\tbranch_data[j, 14] = float(line[75:82]) # Columns 77-82 Transformer final turns ratio (F)\n\t\t\t\t\tbranch_data[j, 15] = float(\n\t\t\t\t\t\tline[83:90]) # Columns 84-90 Transformer (phase shifter) final angle (F)\n\t\t\t\t\tbranch_data[j, 16] = float(line[90:97]) # Columns 91-97 Minimum tap or phase shift (F)\n\t\t\t\t\tbranch_data[j, 17] = float(line[97:104]) # Columns 98-104 Maximum tap or phase shift (F)\n\t\t\t\t\tbranch_data[j, 18] = float(line[105:111]) # Columns 106-111 Step size (F)\n\t\t\t\t\tbranch_data[j, 19] = float(line[112:118]) # Columns 113-119 Minimum voltage, MVAR or MW limit (F)\n\t\t\t\t\tbranch_data[j, 20] = float(line[119:126]) # Columns 120-126 Maximum voltage, MVAR or MW limit (F)\n\n\t\t\t\t\tj = j + 1\n\t\t\t\ti = i + 1\n\t\treturn bus_data, branch_data, mva_base\n\n\tdef makeybus(self, make_bpp=False, make_bp=False, override=None):\n\t\t# Produces the Y bus matrix of a power system.\n\t\t# Written by Nathan Gray\n\t\tif isinstance(override, tuple):\n\t\t\tbus_data = override[0]\n\t\t\tbranch_data = override[1]\n\t\telse:\n\t\t\tbus_data = self.bus_data\n\t\t\tbranch_data = self.branch_data\n\t\tbusShuntG = 15\n\t\tbusShuntB = 16\n\n\t\tbranchR = 6\n\t\tbranchX = 7\n\t\tbranchB = 8\n\t\tbranchTurnsRatio = 14\n\t\tbranchPhaseShift = 15\n\n\t\tnl = branch_data.shape[0] # number of lines\n\t\tn = bus_data.shape[0] # number of buses\n\t\t# Prepare data for algorithm\n\t\tif make_bp:\n\t\t\tz = branch_data[:, branchX] * 1j\n\t\telse:\n\t\t\tz = branch_data[:, branchR] + branch_data[:, branchX] * 1j\n\t\ty = z ** -1\n\t\tb_line = branch_data[:, branchB]\n\t\tif make_bp:\n\t\t\tratio = np.ones(nl)\n\t\telse:\n\t\t\tratio = np.where(branch_data[:, branchTurnsRatio] == 0.0, 1, branch_data[:, branchTurnsRatio])\n\t\tif make_bpp:\n\t\t\tshift = np.zeros(nl)\n\t\telse:\n\t\t\tshift = np.radians(branch_data[:, branchPhaseShift])\n\t\tt = ratio * np.cos(shift) + 1j * ratio * np.sin(shift)\n\t\t# Shunt admittances for each bus.\n\t\ty_shunt = bus_data[:, busShuntG] + 1j * bus_data[:, busShuntB]\n\t\tfrombus = branch_data[:, 0]\n\t\ttobus = branch_data[:, 1]\n\n\t\tif self.sparse:\n\t\t\ty_bus = Sparse.zeros((n, n), dtype=complex) # initialize Y Bus Matrix\n\t\telse:\n\t\t\ty_bus = np.zeros((n, n), dtype=complex) # initialize Y Bus Matrix\n\t\t# The following algorithm takes the arguments: y, b_line, t, y_shunt\n\t\t# Create the four entries of a Y-Bus matrix for each line.\n\t\t#\n\t\t# i|-}|{--~~--|j\n\t\t# \t t:1 y\n\t\t#\n\t\t# [y/|t|^2 -y/t*]\n\t\t# [-y/t y ]\n\n\t\tyjj = y + 1j * b_line / 2\n\t\tyii = yjj / (abs(t) ** 2)\n\t\tyij = -y / np.conj(t)\n\t\tyji = -y / t\n\n\t\tfor k in range(nl):\n\t\t\ti = int(frombus[k]) - 1\n\t\t\tj = int(tobus[k]) - 1\n\t\t\ty_bus[i, j] = yij[k]\n\t\t\ty_bus[j, i] = yji[k]\n\t\t\ty_bus[i, i] += yii[k]\n\t\t\ty_bus[j, j] += yjj[k]\n\t\tif not make_bp:\n\t\t\tfor i in range(n):\n\t\t\t\ty_bus[i, i] += y_shunt[i]\n\n\t\treturn y_bus\n\n\t# ~~~~~ Power Flows ~~~~~\n\tdef flat_start(self):\n\t\t# Initialize with flat start\n\t\tv_flat = np.array(\n\t\t\tnp.where(self.bus_data[:, self.busDesiredVolts] == 0.0, 1, self.bus_data[:, self.busDesiredVolts]))\n\t\td_flat = np.zeros(v_flat.shape)\n\t\treturn v_flat, d_flat\n\n\t@staticmethod\n\tdef pf_dc(d_start, y, pvpq, psched, lam=None):\n\t\td = deepcopy(d_start)\n\t\tbdc = -y.imag[pvpq, :][:, pvpq]\n\t\tif lam is not None:\n\t\t\td[pvpq] = mat_solve(bdc, lam * psched)\n\t\telse:\n\t\t\td[pvpq] = mat_solve(bdc, psched)\n\t\treturn d\n\n\tdef pf_newtonraphson(self, v_start, d_start, prec=2, maxit=4, qlim=True, qlim_prec=2, lam=None, verbose=True, debug_file=None):\n\t\t# Uses Newton-Raphson method to solve the power-flow of a power system.\n\t\t# Written by Nathan Gray\n\t\t# Arguments:\n\t\t# v_start: list of voltage magnitudes in system\n\t\t# d_start: list of voltage phase angles in system\n\t\t# prec: program finishes when all mismatches < 10^-abs(prec)\n\t\t# maxit: maximum number of iterations\n\t\tif verbose:\n\t\t\tprint(\"\\n~~~~~~~~~~ Start Newton-Raphson Method ~~~~~~~~~~\\n\")\n\t\tpsched = deepcopy(self.psched)\n\t\tqsched = deepcopy(self.qsched)\n\t\tif lam is not None:\n\t\t\tpsched = lam * psched\n\t\t\tqsched = lam * qsched\n\t\tv = deepcopy(v_start)\n\t\td = deepcopy(d_start)\n\t\ty = self.y_bus\n\t\tpvpq = self.pvpq\n\t\tpq = deepcopy(self.pq)\n\t\tpv = deepcopy(self.pv)\n\t\tpq_last = deepcopy(pq)\n\t\tn = np.shape(y)[0]\n\n\t\tif debug_file is not None:\n\t\t\tresults = []\n\t\t\tdf_space = pd.DataFrame(data={\"\": [\"\"]})\n\n\t\ti = 0\n\t\t# Newton Raphson\n\t\tfor i in range(maxit + 1):\n\t\t\t# Calculate Mismatches\n\t\t\tmis, p_calc, q_calc = self.mismatch(v, d, y, pq, pvpq, psched, qsched)\n\t\t\tif debug_file is not None:\n\t\t\t\tresults.append(self.results2df(v, d))\n\t\t\t\tresults.append(pd.DataFrame(data={\"It: {}, E: {:.2E}\".format(i, max(abs(mis))): [\"\"]}))\n\t\t\t\tresults.append(df_space)\n\t\t\tif verbose:\n\t\t\t\tprint(\"error: \", max(abs(mis)))\n\t\t\tpq_last = deepcopy(pq)\n\t\t\tif qlim and max(abs(mis)) < 10 ** -abs(qlim_prec):\n\t\t\t\t# Check Limits\n\t\t\t\tpv, pq, qsched = self.check_limits(v, d, y, pv, pq)\n\t\t\t\t# Calculate Mismatches\n\t\t\t\tmis, p_calc, q_calc = self.mismatch(v, d, y, pq, pvpq, psched, qsched)\n\t\t\t# Check error\n\t\t\tif max(abs(mis)) < 10 ** -abs(prec) and np.array_equiv(pq_last, pq):\n\t\t\t\tif verbose:\n\t\t\t\t\tprint(\"Newton Raphson completed in \", i, \" iterations.\")\n\t\t\t\t# pv, pq, qsched = self.check_limits(v, d, y, pv, pq)\n\t\t\t\tif debug_file is not None:\n\t\t\t\t\tpd.concat(results, axis=1, sort=False).to_csv(debug_file, float_format='%.3f')\n\t\t\t\treturn v, d, i\n\t\t\t# Calculate Jacobian\n\t\t\tj = self.pf_jacobian(v, d, pq)\n\t\t\t# Calculate update values\n\t\t\tdx = mat_solve(j, mis)\n\t\t\t# Update angles: d_(n+1) = d_n + dd\n\t\t\td[pvpq] = d[pvpq] + dx[:n - 1]\n\t\t\t# Update Voltages: V_(n+1) = V_n(1+dV/V_n)\n\t\t\tv[pq] = v[pq] * (1 + dx[n - 1:n + pq.size - 1])\n\n\n\t\tif debug_file is not None:\n\t\t\tpd.concat(results, axis=1, sort=False).to_csv(debug_file, float_format='%.3f')\n\t\t# print(v, d)\n\t\tif verbose:\n\t\t\tprint(\"Max iterations reached, \", i, \".\")\n\t\treturn v, d, i\n\n\tdef pf_fast_decoupled(self, v_start, d_start, prec=2, maxit=100, qlim=True, qlim_prec=2, debug_file=None):\n\t\t# Uses Fast Decoupled method to solve the power-flow of a power system.\n\t\t# Written by Nathan Gray\n\t\t# Arguments:\n\t\t# v_start: list of voltage magnitudes in system\n\t\t# d_start: list of voltage phase angles in system\n\t\t# prec: program finishes when all mismatches < 10^-abs(prec)\n\t\t# maxit: maximum number of iterations\n\t\tprint(\"\\n~~~~~~~~~~ Start Fast Decoupled Method ~~~~~~~~~~\\n\")\n\t\tpsched = self.psched\n\t\tqsched = deepcopy(self.qsched)\n\t\ty = self.y_bus\n\t\tbp = self.makeybus(make_bp=True)\n\t\tbpp = self.makeybus(make_bpp=True)\n\t\tv = deepcopy(v_start)\n\t\td = deepcopy(d_start)\n\t\tpvpq = self.pvpq\n\t\tpq = self.pq\n\t\tpv = self.pv\n\t\tpq_last = deepcopy(pq)\n\t\t# Decoupled Power Flow\n\t\tbd = -bp.imag[pvpq, :][:, pvpq]\n\t\tbv = -bpp.imag[pq, :][:, pq] # TODO: Trouble here for certain combinations of PV buses on Kundur system\n\t\t# bd = self.pf_jacobian(v, d, pq, decoupled=True)[0]\n\t\t# bv = self.pf_jacobian(v, d, pq, decoupled=True)[1]\n\n\t\tif debug_file is not None:\n\t\t\tresults = []\n\t\t\tdf_space = pd.DataFrame(data={\"\": [\"\"]})\n\t\t\tpd.DataFrame(data=y.real).to_csv('yreal_'+debug_file, float_format='%.3f')\n\t\t\tpd.DataFrame(data=y.imag).to_csv('yimag_'+debug_file, float_format='%.3f')\n\t\t\tpd.DataFrame(data=bd).to_csv('bd_'+debug_file, float_format='%.3f')\n\t\t\tpd.DataFrame(data=bv).to_csv('bv_'+debug_file, float_format='%.3f')\n\n\t\ti = 0\n\t\tfor i in range(maxit + 1):\n\t\t\t# Calculate Mismatches\n\t\t\tmis, p_calc, q_calc = self.mismatch(v, d, y, pq, pvpq, psched, qsched)\n\t\t\tif debug_file is not None:\n\t\t\t\tresults.append(self.results2df(v, d))\n\t\t\t\tresults.append(pd.DataFrame(data={\"It: {}, E: {:.2E}\".format(i, max(abs(mis))): [\"\"]}))\n\t\t\t\tresults.append(df_space)\n\t\t\tprint(\"error: \", max(abs(mis)))\n\t\t\tpq_last = deepcopy(pq)\n\t\t\tif qlim and max(abs(mis)) < 10 ** -abs(qlim_prec): # Do q-limit check\n\t\t\t\tpv, pq, qsched = self.check_limits(v, d, y, pv, pq)\n\t\t\t\t# Calculate Mismatches\n\t\t\t\tmis, p_calc, q_calc = self.mismatch(v, d, y, pq, pvpq, psched, qsched)\n\t\t\t# Only update bv matrix size if pq changes\n\t\t\tif not np.array_equiv(pq_last, pq):\n\t\t\t\tbv = -bpp.imag[pq, :][:, pq]\n\t\t\t# Check error\n\t\t\tif max(abs(mis)) < 10 ** -abs(prec) and np.array_equiv(pq_last, pq):\n\t\t\t\tprint(\"Decoupled Power Flow completed in \", i, \" iterations.\")\n\t\t\t\tif debug_file is not None:\n\t\t\t\t\tpd.concat(results, axis=1, sort=False).to_csv(debug_file, float_format='%.3f')\n\t\t\t\treturn v, d, i\n\t\t\td[pvpq] = d[pvpq] + mat_solve(bd, mis[0:len(pvpq)] / v[pvpq])\n\t\t\t# mis, p_calc, q_calc = self.mismatch(v, d, y, pq, pvpq, psched, qsched)\n\t\t\tv[pq] = v[pq] + mat_solve(bv, mis[len(pvpq):] / v[pq])\n\n\n\t\tif debug_file is not None:\n\t\t\tpd.concat(results, axis=1, sort=False).to_csv(debug_file, float_format='%.3f')\n\n\t\tprint(\"Max iterations reached, \", i, \".\")\n\t\treturn v, d, i\n\n\tdef results2df(self, v, d):\n\t\ts = (v * np.exp(1j * d)) * np.conj(self.y_bus.dot(v * np.exp(1j * d)))\n\t\tsol_dic = {\n\t\t\t'Bus': self.bus_data[:, 0],\n\t\t\t\"Type\": self.bus_data[:, 4],\n\t\t\t\"V Result\": v,\n\t\t\t\"Angle Result\": d * 180 / np.pi + self.bus_data[0, 6],\n\t\t\t\"MW Injected\": s.real * self.p_base,\n\t\t\t\"MVAR Injected\": s.imag * self.p_base}\n\t\tdf = pd.DataFrame(data=sol_dic)\n\t\treturn df\n\n\tdef check_limits(self, v, d, y, pv, pq):\n\t\tq_lim = self.q_lim\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\n\t\t# Find buses that are no longer limited and convert them back to PV buses\n\t\tif len(self.q_max_bus) > 0 or len(self.q_min_bus) > 0:\n\t\t\tif len(self.q_max_bus) > 0:\n\t\t\t\tfor bus in self.q_max_bus:\n\t\t\t\t\tif v[bus] > self.bus_data[bus, self.busDesiredVolts]:\n\t\t\t\t\t\t# Bus is no longer limited, make PV bus again.\n\t\t\t\t\t\tpq = np.setdiff1d(pq, [bus])\n\t\t\t\t\t\tpv = np.unique(np.concatenate((pv, [bus])))\n\t\t\t\t\t\tself.q_max_bus = np.delete(self.q_max_bus, np.where(self.q_max_bus == bus))\n\t\t\t\t\t\tprint(\"Not Q Limited: \", bus, \" because \", v[bus], \" > \",\n\t\t\t\t\t\t\t self.bus_data[bus, self.busDesiredVolts])\n\t\t\tif len(self.q_min_bus) > 0:\n\t\t\t\tfor bus in self.q_min_bus:\n\t\t\t\t\tif v[bus] < self.bus_data[bus, self.busDesiredVolts]:\n\t\t\t\t\t\t# Bus is no longer limited, make PV bus again.\n\t\t\t\t\t\tpq = np.setdiff1d(pq, [bus])\n\t\t\t\t\t\tpv = np.sort(np.concatenate((pv, [bus])))\n\t\t\t\t\t\tself.q_min_bus = np.delete(self.q_min_bus, np.where(self.q_min_bus == bus))\n\t\t\t\t\t\tprint(\"Not Q Limited: \", bus, \" because \", v[bus], \" < \",\n\t\t\t\t\t\t\t self.bus_data[bus, self.busDesiredVolts])\n\n\t\t# Find buses that need to be limited.\n\t\ts_full = (v * np.exp(1j * d)) * np.conj(y.dot(v * np.exp(1j * d)))\n\t\tq_calc_full = s_full.imag\n\t\tq_generated_full = q_calc_full + self.q_load_full\n\t\tq_min_limits = np.array([min(lim) for lim in q_lim])\n\t\tq_max_limits = np.array([max(lim) for lim in q_lim])\n\t\t# Keep record of all buses that are limited or have been limited.\n\t\tmax_index_for_pv_buses = \\\n\t\t\tnp.where(np.array([max(lim) <= q_generated_full[i] for i, lim in enumerate(q_lim)])[pv])[0]\n\t\tmin_index_for_pv_buses = \\\n\t\t\tnp.where(np.array([min(lim) >= q_generated_full[i] for i, lim in enumerate(q_lim)])[pv])[0]\n\t\tnew_q_max_buses = np.array(pv[max_index_for_pv_buses])\n\t\tnew_q_min_buses = np.array(pv[min_index_for_pv_buses])\n\t\tself.q_max_bus = np.unique(np.r_[self.q_max_bus, new_q_max_buses])\n\t\tself.q_min_bus = np.unique(np.r_[self.q_min_bus, new_q_min_buses])\n\n\t\tif self.q_min_bus.any() in pv or self.q_max_bus.any() in pv:\n\t\t\tif self.q_min_bus.any() in pv: # Remove from pv list, add to pq list.\n\t\t\t\tpv = np.setdiff1d(pv, self.q_min_bus)\n\t\t\t\tpq = np.unique(np.concatenate((pq, self.q_min_bus)))\n\t\t\t\tself.qsched_full[self.q_min_bus] = \\\n\t\t\t\t\tnp.array(q_min_limits[self.q_min_bus] - self.q_load_full[self.q_min_bus])\n\t\t\tif self.q_max_bus.any() in pv: # Remove from pv list, add to pq list.\n\t\t\t\tpv = np.setdiff1d(pv, self.q_max_bus)\n\t\t\t\tpq = np.unique(np.concatenate((pq, self.q_max_bus)))\n\t\t\t\tself.qsched_full[self.q_max_bus] = \\\n\t\t\t\t\tnp.array(q_max_limits[self.q_max_bus] - self.q_load_full[self.q_max_bus])\n\t\t\tq_limited = np.sort(np.concatenate((self.q_max_bus, self.q_min_bus)))\n\t\t\tprint(\"Q Limited: \", q_limited)\n\n\t\t# Calculate scheduled Q for each bus\n\t\tqsched = self.qsched_full[pq]\n\t\tv[pv] = np.array(self.bus_data[pv, self.busDesiredVolts])\n\n\t\treturn pv, pq, qsched\n\n\tdef pf_jacobian(self, v, d, pq, decoupled=False, v_mul=True):\n\t\t# This function was written by Nathan Gray using formulas from chapter 9 of\n\t\t# \"Power Systems Analysis\" J. Grainger et al.\n\t\t# Calculates the Jacobian Matrix for use in the Newton-Raphson Method.\n\t\t# Arguments:\n\t\t# v: Voltage magnitudes\n\t\t# d: Voltage phase angles\n\t\t# y: Ybus matrix\n\t\t# pq: List of PQ buses\n\t\ty = self.y_bus\n\t\tn = y.shape[0]\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\t\ts = (v*np.exp(1j*d))*np.conj(y.dot(v*np.exp(1j*d)))\n\t\tp = s.real\n\t\tq = s.imag\n\n\t\tif self.sparse:\n\t\t\ttmp = Sparse\n\t\telse:\n\t\t\ttmp = np\n\n\t\t# Find indices of non-zero ybus entries\n\t\trow, col = tmp.where(y)\n\n\t\tj11 = tmp.zeros((n - 1, n - 1))\n\t\tj12 = tmp.zeros((n - 1, pq.size))\n\t\tj21 = tmp.zeros((pq.size, n - 1))\n\t\tj22 = tmp.zeros((pq.size, pq.size))\n\n\t\tfor a in range(row.shape[0]):\n\t\t\ti = row[a]\n\t\t\tj = col[a]\n\t\t\t# J11\n\t\t\tif i != 0 and j != 0:\n\t\t\t\tth_ij = np.angle(y[i, j])\n\t\t\t\ts_ij = np.sin(th_ij + d[j] - d[i])\n\t\t\t\tc_ij = np.cos(th_ij + d[j] - d[i])\n\t\t\t\ty_ij = abs(y[i, j])\n\t\t\t\tif i == j: # Diagonals of J11\n\t\t\t\t\tj11[i - 1, j - 1] = - q[i] - v[i]**2*y[i, i].imag\n\t\t\t\telse: # Off-diagonals of J11\n\t\t\t\t\tj11[i - 1, j - 1] = -v[i]*v[j]*y_ij*s_ij\n\t\t\t\t# J21\n\t\t\t\tif i in pq:\n\t\t\t\t\tk: int = np.ravel(np.where(pq == i))[0] # map bus index to jacobian index\n\t\t\t\t\tif i == j: # Diagonals of J21\n\t\t\t\t\t\tj21[k, j - 1] = p[i] - abs(v[i])**2*y[i, j].real\n\t\t\t\t\telse: # Off-diagonals of J21\n\t\t\t\t\t\tj21[k, j - 1] = -v[i]*v[j]*y_ij*c_ij\n\t\t\t\t# J12\n\t\t\t\tif j in pq:\n\t\t\t\t\tl: int = np.ravel(np.where(pq == j))[0] # map bus index to jacobian index\n\t\t\t\t\tif i == j: # Diagonals of J12\n\t\t\t\t\t\tj12[i - 1, l] = p[i] + v[i]**2*y[i, j].real\n\t\t\t\t\telse: # Off-diagonals of J12\n\t\t\t\t\t\tj12[i - 1, l] = v[i]*v[j]*y_ij*c_ij\n\t\t\t\t\tif not v_mul:\n\t\t\t\t\t\tj12[i - 1, l] /= v[j]\n\t\t\t\t# J22\n\t\t\t\tif i in pq and j in pq:\n\t\t\t\t\tk: int = np.ravel(np.where(pq == i))[0] # map bus index to jacobian index\n\t\t\t\t\tl: int = np.ravel(np.where(pq == j))[0] # map bus index to jacobian index\n\t\t\t\t\tif i == j: # Diagonal of J22\n\t\t\t\t\t\tj22[k, l] = -j11[i - 1, j - 1] - 2*v[i]**2*y[i, j].imag\n\t\t\t\t\telse: # Off-diagonals of J22\n\t\t\t\t\t\tj22[k, l] = j11[i - 1, j - 1]\n\t\t\t\t\tif not v_mul:\n\t\t\t\t\t\tj22[k, l] /= v[j]\n\t\t# Assemble jacobian\n\t\tjtop = tmp.concatenate((j11, j12), axis=1)\n\t\tjbottom = tmp.concatenate((j21, j22), axis=1)\n\t\tjacobian = tmp.concatenate((jtop, jbottom), axis=0)\n\t\tif decoupled:\n\t\t\treturn j11, j22\n\t\telse:\n\t\t\treturn jacobian\n\n\tdef pf(self, initial=None, prec=5, maxit=10, qlim=False, qlim_prec=2, verbose=True, debug_file=None):\n\t\tif initial is None:\n\t\t\tv0, d0 = self.flat_start()\n\t\telse:\n\t\t\tv0, d0 = initial\n\t\t# d0 = self.pf_dc(d0, self.y_bus, self.pvpq, self.psched)\n\t\tv, d, it = self.pf_newtonraphson(v0, d0, prec=prec, maxit=maxit, qlim=qlim, qlim_prec=qlim_prec, verbose=verbose, debug_file=debug_file)\n\t\treturn v, d\n\n\tdef dslack_dx(self, v, d, pq):\n\n\t\ty = self.y_bus\n\t\tn = y.shape[0]\n\t\tif self.sparse:\n\t\t\ttmp = Sparse\n\t\t\ty = y.full[0, :]\n\t\telse:\n\t\t\ttmp = np\n\t\t\ty = y[0, :]\n\t\t# Find indices of non-zero ybus entries\n\t\tcol = np.where(y)[0]\n\n\t\tj11 = np.zeros((1, n - 1))\n\t\tj12 = np.zeros((1, pq.size))\n\n\t\tfor a in range(len(col)):\n\t\t\ti = 0\n\t\t\tj: int = col[a]\n\t\t\tif j != 0:\n\t\t\t\tth_ij = np.angle(y[j])\n\t\t\t\ts_ij = np.sin(th_ij + d[j])\n\t\t\t\tc_ij = np.cos(th_ij + d[j])\n\t\t\t\ty_ij = abs(y[j])\n\n\t\t\t\tj11[0, j - 1] = -v[i]*v[j]*y_ij*s_ij\n\t\t\t\tif j in pq:\n\t\t\t\t\tl: int = np.ravel(np.where(pq == j))[0] # map bus index to jacobian index\n\t\t\t\t\tj12[0, l] = v[i]*v[j]*y_ij*c_ij/v[j]\n\t\t# Assemble jacobian\n\t\tdp1dx = np.concatenate((j11, j12), axis=1).T\n\t\treturn dp1dx\n\n\t# ~~~~~ State Estimation ~~~~~\n\tdef se_h_matrix(self, v, d):\n\t\ty = self.y_bus\n\t\tn = y.shape[0]\n\t\tnb = len(self.branch_data[:, 0])\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\t\ts = (v * np.exp(1j * d)) * np.conj(y.dot(v * np.exp(1j * d)))\n\t\tp = s.real\n\t\tq = s.imag\n\n\t\t# Find indices of non-zero ybus entries\n\t\tif self.sparse:\n\t\t\trow = y.rows\n\t\t\tcol = y.cols\n\t\t\ttmp = Sparse\n\t\telse:\n\t\t\trow, col = np.where(y)\n\t\t\ttmp = np\n\n\t\tj01 = tmp.zeros((n, n - 1))\n\t\tj02 = tmp.zeros((n, n))\n\t\tj11 = tmp.zeros((n, n - 1))\n\t\tj12 = tmp.zeros((n, n))\n\t\tj21 = tmp.zeros((n, n - 1))\n\t\tj22 = tmp.zeros((n, n))\n\t\tj31 = tmp.zeros((nb, n - 1))\n\t\tj32 = tmp.zeros((nb, n))\n\t\tj41 = tmp.zeros((nb, n - 1))\n\t\tj42 = tmp.zeros((nb, n))\n\t\tj51 = tmp.zeros((nb, n - 1))\n\t\tj52 = tmp.zeros((nb, n))\n\t\tj61 = tmp.zeros((nb, n - 1))\n\t\tj62 = tmp.zeros((nb, n))\n\n\t\tfor i in range(n):\n\t\t\tj02[i, i] = 1\n\t\tfor a in range(row.shape[0]):\n\t\t\ti = row[a]\n\t\t\tj = col[a]\n\t\t\tth_ij = np.angle(y[i, j])\n\t\t\t# J11\n\t\t\tif j != 0:\n\t\t\t\tif i == j: # Diagonals of J11 dPi/ddi\n\t\t\t\t\tj11[i, j - 1] = - q[i] - v[i] ** 2 * y[i, i].imag\n\t\t\t\telse: # Off-diagonals of J11 dPi/ddj\n\t\t\t\t\tj11[i, j - 1] = -abs(v[i] * v[j] * y[i, j]) * np.sin(th_ij + d[j] - d[i])\n\t\t\t\t# J21\n\t\t\t\tif i == j: # Diagonals of J21 dQi/ddi\n\t\t\t\t\tj21[i, j - 1] = p[i] - v[i] ** 2 * y[i, j].real\n\t\t\t\telse: # Off-diagonals of J21 dQi/ddj\n\t\t\t\t\tj21[i, j - 1] = -abs(v[i] * v[j] * y[i, j]) * np.cos(th_ij + d[j] - d[i])\n\t\t\t# J12\n\t\t\tif i == j: # Diagonals of J12 dPi/dvi\n\t\t\t\tj12[i, j] = (p[i] + abs(v[i] ** 2 * y[i, j].real)) / v[i]\n\t\t\telse: # Off-diagonals of J12 dPi/dvj\n\t\t\t\tj12[i, j] = (abs(v[j] * v[i] * y[i, j]) * np.cos(th_ij + d[j] - d[i])) / v[j]\n\t\t\t# J22\n\t\t\tif i == j: # Diagonal of J22 dQi/dvi\n\t\t\t\tj22[i, j] = (q[i] + v[i] ** 2 * y[i, i].imag - 2 * abs(v[i]) ** 2 * y[i, j].imag) / v[i]\n\t\t\telse: # Off-diagonals of J22 dQi/dvj\n\t\t\t\tj22[i, j] = (-abs(v[i] * v[j] * y[i, j]) * np.sin(th_ij + d[j] - d[i])) / v[j]\n\n\t\tfor b, _ in enumerate(self.branch_data[:, 0]):\n\t\t\tfrom_bus = self.branch_data[b, 0]\n\t\t\tto_bus = self.branch_data[b, 1]\n\t\t\ti = int(from_bus - 1)\n\t\t\tj = int(to_bus - 1)\n\t\t\tb_chrg = self.branch_data[b, self.branchB]\n\t\t\tycosij = abs(y[i, j]) * cos(angle(y[i, j]) + d[j] - d[i])\n\t\t\tycosji = abs(y[j, i]) * cos(angle(y[j, i]) + d[i] - d[j])\n\t\t\tysinij = abs(y[i, j]) * sin(angle(y[i, j]) + d[j] - d[i])\n\t\t\tysinji = abs(y[j, i]) * sin(angle(y[j, i]) + d[i] - d[j])\n\n\t\t\tif i != 0: # Do not include derivatives w.r.t. d[0]\n\t\t\t\t# J31 dPij/dd[i]\n\t\t\t\tj31[b, i - 1] = v[i] * v[j] * ysinij\n\t\t\t\t# J41 dQij/dd[i]\n\t\t\t\tj41[b, i - 1] = v[i] * v[j] * ycosij\n\t\t\t\t# J51 dPji/dd[i]\n\t\t\t\tj51[b, i - 1] = -v[j] * v[i] * ysinji\n\t\t\t\t# J61 dQji/dd[i]\n\t\t\t\tj61[b, i - 1] = -v[j] * v[i] * ycosji\n\n\t\t\tif j != 0: # Do not include derivatives w.r.t. d[0]\n\t\t\t\t# J31 dPij/dd[j]\n\t\t\t\tj31[b, j - 1] = -v[i] * v[j] * ysinij\n\t\t\t\t# J41 dQij/dd[j]\n\t\t\t\tj41[b, j - 1] = -v[i] * v[j] * ycosij\n\t\t\t\t# J51 dPji/dd[j]\n\t\t\t\tj51[b, j - 1] = v[j] * v[i] * ysinji\n\t\t\t\t# J61 dQji/dd[j]\n\t\t\t\tj61[b, j - 1] = v[j] * v[i] * ycosji\n\n\t\t\t# J32 dPij/dV\n\t\t\tj32[b, i] = -2 * v[i] * real(y[i, j]) + v[j] * ycosij\n\t\t\tj32[b, j] = v[i] * ycosij\n\t\t\t# J42 dQij/dV\n\t\t\tj42[b, i] = -2 * v[i] * (b_chrg / 2 - imag(y[i, j])) - v[j] * ysinij\n\t\t\tj42[b, j] = -v[i] * ysinij\n\t\t\t# J52 dPji/dV\n\t\t\tj52[b, j] = -2 * v[j] * real(y[j, i]) + v[i] * ycosji\n\t\t\tj52[b, i] = v[j] * ycosji\n\t\t\t# J62 dQji/dV\n\t\t\tj62[b, j] = -2 * v[j] * (b_chrg / 2 - imag(y[j, i])) - v[i] * ysinji\n\t\t\tj62[b, i] = -v[j] * ysinji\n\n\t\t# Assemble jacobian\n\t\tif self.sparse:\n\t\t\tj0 = Sparse.concatenate((j01, j02), axis=1)\n\t\t\tj1 = Sparse.concatenate((j11, j12), axis=1)\n\t\t\tj2 = Sparse.concatenate((j21, j22), axis=1)\n\t\t\tj3 = Sparse.concatenate((j31, j32), axis=1)\n\t\t\tj4 = Sparse.concatenate((j41, j42), axis=1)\n\t\t\tj5 = Sparse.concatenate((j51, j52), axis=1)\n\t\t\tj6 = Sparse.concatenate((j61, j62), axis=1)\n\t\t\tj0_1 = Sparse.concatenate((j0, j1), axis=0)\n\t\t\tj2_3 = Sparse.concatenate((j2, j3), axis=0)\n\t\t\tj4_5 = Sparse.concatenate((j4, j5), axis=0)\n\t\t\tj0_3 = Sparse.concatenate((j0_1, j2_3), axis=0)\n\t\t\tj4_6 = Sparse.concatenate((j4_5, j6), axis=0)\n\t\t\tjacobian = Sparse.concatenate((j0_3, j4_6), axis=0)\n\t\telse:\n\t\t\tj0 = np.concatenate((j01, j02), axis=1)\n\t\t\tj1 = np.concatenate((j11, j12), axis=1)\n\t\t\tj2 = np.concatenate((j21, j22), axis=1)\n\t\t\tj3 = np.concatenate((j31, j32), axis=1)\n\t\t\tj4 = np.concatenate((j41, j42), axis=1)\n\t\t\tj5 = np.concatenate((j51, j52), axis=1)\n\t\t\tj6 = np.concatenate((j61, j62), axis=1)\n\t\t\tjacobian = np.concatenate((j0, j1, j2, j3, j4, j5, j6), axis=0)\n\n\t\treturn jacobian\n\n\tdef h_calc(self, v, d):\n\t\tv = np.transpose(v)[0]\n\t\td = np.transpose(d)[0]\n\t\ts = (v * np.exp(1j * d)) * np.conj(self.y_bus.dot(v * np.exp(1j * d)))\n\t\tp = np.real(s)\n\t\tq = np.imag(s)\n\t\tpij, qij, pji, qji = self.branch_flows(v, d)\n\t\treturn np.r_[\n\t\t\tnp.transpose([v]),\n\t\t\tnp.transpose([p]),\n\t\t\tnp.transpose([q]),\n\t\t\tnp.transpose([pij]),\n\t\t\tnp.transpose([qij]),\n\t\t\tnp.transpose([pji]),\n\t\t\tnp.transpose([qji])\n\t\t]\n\n\t@staticmethod\n\tdef mismatch(v, d, y, pq, pvpq, psched, qsched):\n\t\t# This function was written by Nathan Gray\n\t\t# This function calculates mismatches between the real and reactive power\n\t\t# injections in a system vs. the scheduled injections.\n\t\t# power system network.\n\t\t# Arguments:\n\t\t# v: list of voltage magnitudes in system\n\t\t# d: list of voltage phase angles in system\n\t\t# y: Ybus matrix for system\n\t\t# pq: list of PQ buses\n\t\t# pvpq: list of PV and pq buses\n\t\t# psched, qsched: list of real, reactive power injections\n\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\t\ts = (v * np.exp(1j * d)) * np.conj(y.dot(v * np.exp(1j * d)))\n\t\t# S = P + jQ\n\t\tpcalc = s[pvpq].real\n\t\tqcalc = s[pq].imag\n\t\tdp = psched - pcalc\n\t\tdq = qsched - qcalc\n\t\tmis = np.concatenate((dp, dq))\n\t\treturn mis, pcalc, qcalc\n\n\tdef complex_injections(self, v, d):\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\t\ts = (v * np.exp(1j * d)) * np.conj(self.y_bus.dot(v * np.exp(1j * d)))\n\t\treturn s\n\n\tdef pij_flow(self, d, v, i, j):\n\t\tyij = np.abs(self.y_bus[i, j])\n\t\tgij = np.real(self.y_bus[i, j])\n\t\tth_ij = np.angle(self.y_bus[i, j])\n\t\tp_ij = -v[i] ** 2 * gij + v[i] * v[j] * yij * np.cos(th_ij + d[j] - d[i])\n\t\treturn p_ij\n\n\tdef qij_flow(self, d, v, i, j, b):\n\t\tb_charging = self.branch_data[b, self.branchB]\n\t\tyij = np.abs(self.y_bus[i, j])\n\t\tbij = np.imag(self.y_bus[i, j])\n\t\tth_ij = np.angle(self.y_bus[i, j])\n\t\tq_ij = -v[i] ** 2 * (b_charging / 2 - bij) - v[i] * v[j] * yij * np.sin(th_ij + d[j] - d[i])\n\t\treturn q_ij\n\n\tdef branch_flows(self, v, d):\n\t\tp_ij = np.zeros(self.branch_data[:, 0].shape)\n\t\tp_ji = np.zeros(self.branch_data[:, 0].shape)\n\t\tq_ij = np.zeros(self.branch_data[:, 0].shape)\n\t\tq_ji = np.zeros(self.branch_data[:, 0].shape)\n\t\tb_charging = self.branch_data[:, self.branchB]\n\t\tfor b, _ in enumerate(self.branch_data[:, 0]):\n\t\t\tfrom_bus = self.branch_data[b, 0]\n\t\t\tto_bus = self.branch_data[b, 1]\n\t\t\ti = int(from_bus - 1)\n\t\t\tj = int(to_bus - 1)\n\t\t\tp_ij[b] = self.pij_flow(d, v, i, j)\n\t\t\tq_ij[b] = self.qij_flow(d, v, i, j, b)\n\n\t\t\tp_ji[b] = self.pij_flow(d, v, j, i)\n\t\t\tq_ji[b] = self.qij_flow(d, v, j, i, b)\n\t\treturn p_ij, q_ij, p_ji, q_ji\n\n\t# ~~~~~ Continuation Power flow aka Voltage Stability Analyis ~~~~~\n\tdef cpf_jacobian(self, v, d, pq, kpq, kt, sign):\n\t\t# Build parameterized jacobian for continuation power flow.\n\t\ty = self.y_bus\n\t\tn = y.shape[0]\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\t\ts = (v * np.exp(1j * d)) * np.conj(y.dot(v * np.exp(1j * d)))\n\t\tp = s.real\n\t\tq = s.imag\n\t\tif self.sparse:\n\t\t\ttmp = Sparse\n\t\telse:\n\t\t\ttmp = np\n\t\t# Find indices of non-zero ybus entries\n\t\trow, col = tmp.where(y)\n\t\tj11 = tmp.zeros((n - 1, n - 1))\n\t\tj12 = tmp.zeros((n - 1, pq.size))\n\t\tj21 = tmp.zeros((pq.size, n - 1))\n\t\tj22 = tmp.zeros((pq.size, pq.size))\n\t\tfor a in range(row.shape[0]):\n\t\t\ti = row[a]\n\t\t\tj = col[a]\n\t\t\t# J11\n\t\t\tif i != 0 and j != 0:\n\t\t\t\tth_ij = np.angle(y[i, j])\n\t\t\t\ts_ij = np.sin(th_ij + d[j] - d[i])\n\t\t\t\tc_ij = np.cos(th_ij + d[j] - d[i])\n\t\t\t\ty_ij = abs(y[i, j])\n\t\t\t\tif i == j: # Diagonals of J11\n\t\t\t\t\tj11[i - 1, j - 1] = -q[i] - v[i]**2*y[i, i].imag\n\t\t\t\telse: # Off-diagonals of J11\n\t\t\t\t\tj11[i - 1, j - 1] = -v[i]*v[j]*y_ij*s_ij\n\t\t\t\t# J21\n\t\t\t\tif i in pq:\n\t\t\t\t\tk: int = np.ravel(np.where(pq == i))[0] # map bus index to jacobian index\n\t\t\t\t\tif i == j: # Diagonals of J21\n\t\t\t\t\t\tj21[k, j - 1] = p[i] - v[i]**2*y[i, j].real\n\t\t\t\t\telse: # Off-diagonals of J21\n\t\t\t\t\t\tj21[k, j - 1] = -v[i]*v[j]*y_ij*c_ij\n\t\t\t\t# J12\n\t\t\t\tif j in pq:\n\t\t\t\t\tl: int = np.ravel(np.where(pq == j))[0] # map bus index to jacobian index\n\t\t\t\t\tif i == j: # Diagonals of J12\n\t\t\t\t\t\tj12[i - 1, l] = (p[i] + v[i]**2*y[i, j].real) / v[j]\n\t\t\t\t\telse: # Off-diagonals of J12\n\t\t\t\t\t\tj12[i - 1, l] = (v[i]*v[j]*y_ij*c_ij) / v[j]\n\t\t\t\t# J22\n\t\t\t\tif i in pq and j in pq:\n\t\t\t\t\tk: int = np.ravel(np.where(pq == i))[0] # map bus index to jacobian index\n\t\t\t\t\tl: int = np.ravel(np.where(pq == j))[0] # map bus index to jacobian index\n\t\t\t\t\tif i == j: # Diagonal of J22\n\t\t\t\t\t\tj22[k, l] = (-j11[i - 1, j - 1] - 2*v[i]**2*y[i, j].imag) / v[j]\n\t\t\t\t\telse: # Off-diagonals of J22\n\t\t\t\t\t\tj22[k, l] = j11[i - 1, j - 1] / v[j]\n\t\t# Assemble jacobian\n\t\tjtop = tmp.concatenate((j11, j12), axis=1)\n\t\tjbottom = tmp.concatenate((j21, j22), axis=1)\n\t\tjacobian = tmp.concatenate((jtop, jbottom), axis=0)\n\n\t\tjac = jacobian\n\t\t# add row for ek and col for psched and qsched\n\t\tnrows = jac.shape[0]\n\t\tncols = jac.shape[1]\n\t\tif self.sparse:\n\t\t\tfor row in range(nrows):\n\t\t\t\tjac[row, ncols] = -kpq[row]\n\t\t\tjac[nrows, kt] = sign\n\t\telse:\n\t\t\tek = np.zeros((1, ncols))\n\t\t\tek[kt] = sign\n\t\t\tjac = np.c_[jac, -kpq]\n\t\t\tjac = np.r_[jac, ek]\n\t\treturn jac\n\n\tdef pf_continuation(self, watch_bus):\n\t\tprint(\"\\n~~~~~~~~~~ Start Voltage Stability Analysis ~~~~~~~~~~\\n\")\n\t\tσ = 0.1\n\t\tλ = 1\n\t\tpsched = deepcopy(self.psched)\n\t\tqsched = deepcopy(self.qsched)\n\t\tkpq = np.r_[psched, qsched]\n\t\ty = self.y_bus\n\t\tn = np.shape(y)[0]\n\t\tpvpq = self.pvpq\n\t\tpq = deepcopy(self.pq)\n\t\t# ~~~~~~~ Run Conventional Power Flow on Base Case ~~~~~~~~~~\n\t\tv, d = self.flat_start()\n\t\td = self.pf_dc(d, y, pvpq, psched, lam=λ)\n\t\tv, d, it = self.pf_newtonraphson(v, d, prec=3, maxit=10, qlim=False, lam=λ)\n\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\t\t# ~~~~~ Set watched bus and associated indexes ~~~~~\n\t\t# watch_bus = 4\n\t\twatch_index = watch_bus - 1\n\t\twatch_pq_index = watch_index # initialize\n\t\tfor i, bus_type in enumerate(self.bus_data[:, self.busType]):\n\t\t\tif watch_index <= i:\n\t\t\t\tbreak\n\t\t\tif bus_type > 0 and watch_index > i:\n\t\t\t\twatch_pq_index -= 1\n\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\t\tresults = [[σ, v[watch_index], d[watch_index], λ, λ*self.psched[watch_index - 1], λ*self.qsched[watch_pq_index ]]]\n\t\tphase = 1 # phase 1 -> increasing load, phase 2 -> decreasing V, phase 3 -> decreasing load\n\n\t\t# Continuation Power Flow or Voltage Stability Analysis\n\t\twhile True:\n\n\t\t\t# kpq_jon = np.zeros(kpq.shape)\n\t\t\t# kpq_jon[watch_index-1] = -1\n\t\t\t# Calculate Jacobian\n\t\t\tif phase == 1:\n\t\t\t\tkt = len(pvpq) + len(pq)\n\t\t\t\ttk = 1\n\t\t\t\tjac = self.cpf_jacobian(v, d, pq, kpq, kt, tk)\n\t\t\tif phase == 2:\n\t\t\t\tkt = len(pvpq) + watch_pq_index\n\t\t\t\ttk = -1\n\t\t\t\tjac = self.cpf_jacobian(v, d, pq, kpq, kt, tk)\n\t\t\tif phase == 3:\n\t\t\t\tkt = len(pvpq) + len(pq)\n\t\t\t\ttk = -1\n\t\t\t\tjac = self.cpf_jacobian(v, d, pq, kpq, kt, tk)\n\n\t\t\t# Calculate update values\n\t\t\t# ~~~~~~~~~~ Calculated Tangent Vector ~~~~~~~~~~\n\t\t\tt = mat_solve(jac, np.r_[np.zeros(jac.shape[0] - 1), 1])\n\t\t\t# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t\t# Update angles: d_(n+1) = d_n + dd\n\t\t\td_pred = deepcopy(d)\n\t\t\td_pred[pvpq] = d[pvpq] + σ * t[:n - 1]\n\t\t\t# Update Voltages: V_(n+1) = V_n(1+dV/V_n)\n\t\t\tv_pred = deepcopy(v)\n\t\t\tv_pred[pq] = v[pq] + σ * t[n - 1:-1]\n\t\t\t# Update Lambda\n\t\t\tλ_pred = λ + σ * t[-1]\n\t\t\t# ~~~~~~~~~~ Corrector ~~~~~~~~~~\n\n\t\t\td_cor = deepcopy(d_pred)\n\t\t\tv_cor = deepcopy(v_pred)\n\t\t\tλ_cor = deepcopy(λ_pred)\n\t\t\tit = 0\n\t\t\tmaxit = 7\n\t\t\twhile it < maxit:\n\t\t\t\tmis, p_calc, q_calc = self.mismatch(v_cor, d_cor, y, pq, pvpq, λ_cor*psched, λ_cor*qsched)\n\t\t\t\tif phase == 1 or phase == 3:\n\t\t\t\t\tmis = np.r_[mis, λ_pred - λ_cor]\n\t\t\t\tif phase == 2:\n\t\t\t\t\tmis = np.r_[mis, v_pred[watch_index] - v_cor[watch_index]]\n\t\t\t\t# Check error\n\t\t\t\tif max(abs(mis)) < 10 ** -3:\n\t\t\t\t\tbreak # return v, d, it\n\t\t\t\tjac = self.cpf_jacobian(v_cor, d_cor, pq, kpq, kt, tk)\n\t\t\t\t# Calculate update values\n\t\t\t\tdx = mat_solve(jac, mis)\n\t\t\t\t# Update angles: d_(n+1) = d_n + dd\n\t\t\t\td_cor[pvpq] = d_cor[pvpq] + dx[:n - 1]\n\t\t\t\t# Update Voltages: V_(n+1) = V_n(1+dV/V_n)\n\t\t\t\tv_cor[pq] = v_cor[pq] + dx[n - 1:n + pq.size - 1]\n\t\t\t\t# Update Lambda\n\t\t\t\tλ_cor = λ_cor + dx[-1]\n\t\t\t\tit += 1\n\n\t\t\tif phase == 1:\n\t\t\t\tif it >= maxit:\n\t\t\t\t\tphase = 2\n\t\t\t\t\tσ = 0.025\n\t\t\t\t\tprint('phase 2')\n\t\t\t\telse:\n\t\t\t\t\tv = deepcopy(v_cor)\n\t\t\t\t\td = deepcopy(d_cor)\n\t\t\t\t\tλ = deepcopy(λ_cor)\n\t\t\t\t\tprint(round(λ, 8), v[watch_index])\n\t\t\t\t\tresults = np.r_[results, [[σ, v[watch_index], d[watch_index], λ, λ*self.psched[watch_index - 1], λ*self.qsched[watch_pq_index ]]]]\n\n\t\t\telif phase == 2:\n\t\t\t\tif it >= maxit:\n\t\t\t\t\tprint(\"phase 2 not converged\")\n\t\t\t\t\t#break\n\t\t\t\t\tphase = 3\n\t\t\t\t\tσ = 0.1\n\t\t\t\t\tprint('phase 3')\n\t\t\t\telif results[-2, 3] - results[-1, 3] > 0.2:\n\t\t\t\t\tphase = 3\n\t\t\t\t\tσ = 0.1\n\t\t\t\t\tprint('phase 3')\n\t\t\t\telse:\n\t\t\t\t\tv = deepcopy(v_cor)\n\t\t\t\t\td = deepcopy(d_cor)\n\t\t\t\t\tλ = deepcopy(λ_cor)\n\t\t\t\t\tprint(round(λ, 8), v[watch_index])\n\t\t\t\t\tresults = np.r_[results, [[σ, v[watch_index], d[watch_index], λ, λ*self.psched[watch_index - 1], λ*self.qsched[watch_pq_index ]]]]\n\n\t\t\tif phase == 3:\n\t\t\t\t# break\n\t\t\t\tif λ < 1:\n\t\t\t\t\tbreak\n\n\t\t\t\tv = deepcopy(v_cor)\n\t\t\t\td = deepcopy(d_cor)\n\t\t\t\tλ = deepcopy(λ_cor)\n\t\t\t\tprint(round(λ, 8), v[watch_index])\n\t\t\t\tresults = np.r_[results, [[σ, v[watch_index], d[watch_index], λ, λ*self.psched[watch_index - 1], λ*self.qsched[watch_pq_index ]]]]\n\n\t\treturn results\n\n\tdef dgdx(self, x):\n\t\tlenx = len(x)\n\t\td = np.r_[0, x[0:lenx//2]]\n\t\tv = np.r_[self.bus_data[0, self.busDesiredVolts], x[lenx//2:]]\n\t\ty = self.y_bus\n\t\tn = y.shape[0]\n\t\tnb = len(self.branch_data[:, 0])\n\t\t# S = V*conj(I) and I = Y*V => S = V*conj(Y*V)\n\t\ts = (v * np.exp(1j * d)) * np.conj(y.dot(v * np.exp(1j * d)))\n\t\tp = s.real\n\t\tq = s.imag\n\n\t\t# Find indices of non-zero ybus entries\n\t\tif self.sparse:\n\t\t\trow = y.rows\n\t\t\tcol = y.cols\n\t\t\ttmp = Sparse\n\t\telse:\n\t\t\trow, col = np.where(y)\n\t\t\ttmp = np\n\n\t\tj11 = tmp.zeros((n - 1, n - 1))\n\t\tj12 = tmp.zeros((n - 1, n - 1))\n\t\tj21 = tmp.zeros((n - 1, n - 1))\n\t\tj22 = tmp.zeros((n - 1, n - 1))\n\n\t\tfor a in range(row.shape[0]):\n\t\t\ti = row[a]\n\t\t\tj = col[a]\n\t\t\tth_ij = np.angle(y[i, j])\n\n\t\t\tif i != 0 and j != 0: # J11\n\t\t\t\tif i == j: # Diagonals of J11 dPi/ddi\n\t\t\t\t\tj11[i - 1, j - 1] = - q[i] - v[i] ** 2 * y[i, i].imag\n\t\t\t\telse: # Off-diagonals of J11 dPi/ddj\n\t\t\t\t\tj11[i - 1, j - 1] = -abs(v[i] * v[j] * y[i, j]) * np.sin(th_ij + d[j] - d[i])\n\t\t\t# if j != 0: # J21\n\t\t\t\tif i == j: # Diagonals of J21 dQi/ddi\n\t\t\t\t\tj21[i - 1, j - 1] = p[i] - v[i] ** 2 * y[i, j].real\n\t\t\t\telse: # Off-diagonals of J21 dQi/ddj\n\t\t\t\t\tj21[i - 1, j - 1] = -abs(v[i] * v[j] * y[i, j]) * np.cos(th_ij + d[j] - d[i])\n\t\t\t# if i != 0: # J12\n\t\t\t\tif i == j: # Diagonals of J12\n\t\t\t\t\tj12[i - 1, j - 1] = (p[i] + abs(v[i] ** 2 * y[i, j].real)) / v[i]\n\t\t\t\telse: # Off-diagonals of J12\n\t\t\t\t\tj12[i - 1, j - 1] = (abs(v[j] * v[i] * y[i, j]) * np.cos(th_ij + d[j] - d[i])) / v[j]\n\t\t\t# J22\n\t\t\t\tif i == j: # Diagonal of J22\n\t\t\t\t\tj22[i - 1, j - 1] = (q[i] + v[i] ** 2 * y[i, i].imag - 2 * abs(v[i]) ** 2 * y[i, j].imag) / v[i]\n\t\t\t\telse: # Off-diagonals of J22\n\t\t\t\t\tj22[i - 1, j - 1] = (-abs(v[i] * v[j] * y[i, j]) * np.sin(th_ij + d[j] - d[i])) / v[j]\n\n\t\t# Assemble jacobian\n\t\tj1 = tmp.concatenate((j11, j12), axis=1)\n\t\tj2 = tmp.concatenate((j21, j22), axis=1)\n\t\tjacobian = tmp.concatenate((j1, j2), axis=0)\n\n\t\treturn jacobian\n\n\tdef g(self, x):\n\t\tlenx = len(x)\n\t\td = np.r_[0, x[0:lenx//2]]\n\t\tv = np.r_[self.bus_data[0, self.busDesiredVolts], x[lenx//2:]]\n\t\ts = self.complex_injections(v, d)\n\t\tp = s.real\n\t\tq = s.imag\n\t\treturn np.r_[self.psched_full[1:] - p[1:], self.qsched_full[1:] - q[1:]]\n\n\tdef nr(self, func, x0, fprime, maxit=10, prec=3, verbose=True):\n\t\tx = deepcopy(x0)\n\t\tfor i in range(maxit + 1):\n\t\t\t# Calculate Mismatches\n\t\t\tmis = func(x)\n\t\t\tif verbose:\n\t\t\t\tprint(\"error: \", max(abs(mis)))\n\t\t\t# Check error\n\t\t\tif max(abs(mis)) < 10 ** -abs(prec):\n\t\t\t\tif verbose:\n\t\t\t\t\tprint(\"Newton Raphson completed in \", i, \" iterations.\")\n\t\t\t\treturn x, i\n\t\t\t# Calculate Jacobian\n\t\t\tj = fprime(x)\n\t\t\t# Calculate update values\n\t\t\tdx = mat_solve(j, mis)\n\t\t\t# Update angles: d_(n+1) = d_n + dd\n\t\t\tx = x - dx\n\n\tdef diff(self, func, x_eq):\n\t\tmat = np.zeros((len(x_eq), len(x_eq)))\n\t\tdx = np.zeros(len(x_eq))\n\t\th = 1e-8\n\t\tfor i in range(len(x_eq)):\n\t\t\tfor j in range(len(x_eq)):\n\t\t\t\tdx[j] = h\n\t\t\t\tmat[i, j] = (func(x_eq + dx / 2)[i] - func(x_eq - dx / 2)[i]) / h\n\t\t\t\tdx[j] = 0\n\t\treturn mat\n\nif __name__ == \"__main__\":\n\timport matplotlib.pyplot as plt\n\n\t# case_name = \"IEEE14BUS.txt\"\n\tcase_name = \"IEEE14BUS_handout.txt\"\n\t# case_name = \"2BUS.txt\"\n\tps = PowerSystem(case_name, sparse=True)\n\t# v0, d0 = ps.flat_start()\n\t# v_nr, d_nr, it = ps.pf_newtonraphson(v0, d0, prec=2, maxit=10, qlim=False, lam=4)\n\twatch_bus = 14\n\tresults = ps.pf_continuation(watch_bus)\n\tnose_point_index = np.argmax(results[:, 3])\n\tnose_point = results[nose_point_index, :]\n\tprint(nose_point)\n\tplt.plot(results[:, 3], results[:, 1], '-o')\n\tplt.title('PV Curve for Modified IEEE 14-Bus System at Bus {}'.format(watch_bus))\n\tplt.xlabel('Lambda (schedule multiplication factor)')\n\tplt.ylabel('Bus Voltage (p.u.)')\n\tplt.show()\n","sub_path":"classes/power_system.py","file_name":"power_system.py","file_ext":"py","file_size_in_byte":38236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"107968263","text":"import collections\n\nfrom strictdoc.backend.dsl.models.document import Document\nfrom strictdoc.backend.dsl.models.requirement import (\n CompositeRequirement,\n Requirement,\n)\nfrom strictdoc.backend.dsl.models.section import FreeText, Section\nfrom strictdoc.core.level_counter import LevelCounter\n\n\nclass DocumentCachingIterator:\n def __init__(self, document):\n assert isinstance(document, Document)\n\n self.document = document\n self.nodes_cache = []\n self.toc_nodes_cache = []\n\n def table_of_contents(self):\n if len(self.toc_nodes_cache) > 0:\n yield from self.toc_nodes_cache\n return\n\n for node in self.all_content():\n if isinstance(node, FreeText):\n continue\n\n self.toc_nodes_cache.append(node)\n yield node\n\n def all_content(self):\n if len(self.nodes_cache) > 0:\n yield from self.nodes_cache\n return\n\n document = self.document\n level_counter = LevelCounter()\n\n task_list = collections.deque(document.section_contents)\n\n while True:\n if not task_list:\n break\n\n current = task_list.popleft()\n\n if isinstance(\n current, (Section, Requirement, CompositeRequirement)\n ):\n assert current.ng_level, f\"Node has no ng_level: {current}\"\n level_counter.adjust(current.ng_level)\n\n current.context.title_number_string = level_counter.get_string()\n\n self.nodes_cache.append(current)\n yield current\n\n if isinstance(current, Section):\n task_list.extendleft(reversed(current.section_contents))\n\n if isinstance(current, CompositeRequirement):\n task_list.extendleft(reversed(current.requirements))\n","sub_path":"strictdoc/core/document_iterator.py","file_name":"document_iterator.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"46123172","text":"import numpy as np\nimport pandas as pd\nimport scipy.sparse\nimport datetime\n\nby_month_directory = './data/reddit/_by_month/'\n\nruns = {\n\t'2010': ['11','12'],\n\t'2011': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2012': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2013': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2014': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2015': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2016': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2017': ['01','02','03','04','05','06','07','08','09','10','11','12'],\n\t'2018': ['01','02']\n}\n\ndef get_authors_lookup(key):\n\tn = by_month_directory + key + '/' + key + '_nodes.csv'\n\tauthors = pd.read_csv(n, header=0)['author'].values.tolist()\n\tauthors_length = len(authors)\n\n\t# build easy index lookup ...\n\tauthors_lookup = {}\n\tfor index in range(0, authors_length):\n\t\tname = authors[index]\n\t\tauthors_lookup[name] = index\n\t\t\n\treturn authors, authors_lookup, authors_length\n\nfor year, months in runs.items():\n\tfor month in months:\n\n\t\tkey = year + '-' + month\n\n\t\tprint(key)\n\t\tprint(\"Started at\", str(datetime.datetime.now()))\n\n\t\tauthors, authors_lookup, authors_length = get_authors_lookup(key)\n\t\t\n\t\te = by_month_directory + key + '/' + key + '_wt_lt_adj.npz'\n\t\tmatrix = scipy.sparse.load_npz(e).tolil()\n\t\tmatrix = matrix + matrix.transpose()\n\t\t\n\t\tones = scipy.sparse.lil_matrix((matrix.shape[0], 1), dtype=np.int8)\n\t\tones[:,0] = 1\n\n\t\tdegrees = (matrix * ones).todense().tolist()\n\t\t\n\t\tn = authors_length\n\t\tdegree_centrality = {}\n\t\tfor author,i in authors_lookup.items():\n\t\t\tdegree_centrality[author] = degrees[i][0] / (n-1)\n\t\t\n\t\tdf = pd.DataFrame({'author':list(degree_centrality.keys()), 'scores': list(degree_centrality.values()) })\n\t\tdf.to_csv(by_month_directory + key + '/' + key + '_degree_centrality.csv', index=False, header=True)\n\n\t\tprint(\"Completed at\", str(datetime.datetime.now()))\n","sub_path":"Thesis/Centrality/degree_centrality.py","file_name":"degree_centrality.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"566133865","text":"# TM - Lifting data is joined with other data in Tableau\r\nimport pandas as pd\r\nimport os\r\nimport datetime\r\nimport shared_functions as sf\r\n\r\ndef get_equipment_group_rad(p_row): \r\n if p_row['Dry_Live_Rad'] == \"RAD\":\r\n return \"RAD\"\r\n elif p_row['Dry_Live_Rad'] == \"LIVE\":\r\n return \"REEFER LIVE\" \r\n else:\r\n return p_row['Equipment_Group']\r\n \r\ndef exclude_small_customer(p_lift_df): \r\n # Group on Cust_Grp_Name\r\n tmp_lift_df = p_lift_df\r\n tmp_lift_df = p_lift_df[['Cust_Grp_Name', 'Teu']]\r\n tmp_lift_df[['Teu']] = lift_df[['Teu']].astype(float)\r\n \r\n key = ['Cust_Grp_Name']\r\n tmp_lift_df = tmp_lift_df.groupby(by=key, as_index=False, sort=True).agg({'Teu': 'sum'})\r\n tmp_lift_df = tmp_lift_df[(tmp_lift_df['Teu'] < 100)]\r\n #tmp_lift_df = tmp_lift_df [['Cust_Grp_Name']]\r\n #print(type(tmp_lift_df))\r\n #print(tmp_lift_df)\r\n tmp_lift_df.to_csv(\"small_customer.csv\", index=False, encoding=\"utf-8\")\r\n p_lift_df = p_lift_df[~p_lift_df.Cust_Grp_Name.isin(tmp_lift_df.Cust_Grp_Name)]\r\n return p_lift_df\r\n\r\n\r\nstart_date = datetime.date(2016, 9, 1) # YMD - 01-Sep-2016\r\n\r\n# r: Use a raw string, to make sure that Python doesn't try to interpret anything following a \\ as an escape sequence.\r\nworking_dir = r\"D:\\BI\\dashboards\\Customer Ranking\" \r\nos.chdir(working_dir)\r\n\r\n# Merging the lifting files\r\nprint('before reading csv')\r\ndf0 = pd.read_csv(\"NPS DATA_20162H.csv\", dtype='unicode')\r\ndf1 = pd.read_csv(\"NPS DATA_20171H.csv\", dtype='unicode')\r\ndf2 = pd.read_csv(\"NPS DATA_20172H.csv\", dtype='unicode')\r\n\r\n#df0 = pd.read_csv(\"NPS DATA_20162H-000.csv\", dtype='unicode')\r\n#df1 = pd.read_csv(\"NPS DATA_20171H-000.csv\", dtype='unicode')\r\n#df2 = pd.read_csv(\"NPS DATA_20172H-000.csv\", dtype='unicode')\r\nlift_df = pd.concat([df0,df1,df2])\r\n#print('after reading csv')\r\n\r\n# replace space in column name with underscore\r\n#print(sorted(lift_df.columns))\r\nsf.df_format_col_name(lift_df)\r\nlift_df = lift_df.rename(columns={'Item1': 'Teu'})\r\n#print(sorted(lift_df.columns))\r\n\r\n\r\n# filter data older than 12 months\r\nlift_df['Onboard_Dt'] = pd.to_datetime(lift_df['Onboard_Dt'])\r\nlift_df = lift_df[lift_df['Onboard_Dt'] >= start_date]\r\n\r\n \r\nlift_df[['Total_Wt','Teu','Total_Npi']] = lift_df[['Total_Wt','Teu','Total_Npi']].astype(float)\r\nlift_df[['Main_Cust_Code','Trade_Group']] = lift_df[['Main_Cust_Code','Trade_Group']].astype(str)\r\n\r\n# remove rows with TEU less than 1 or cust.grp.name is blank\r\n\r\nlift_df = lift_df[~lift_df.Cust_Grp_Name.isin(['.', '..', '...', '-', 'n/a', 'tba'])]\r\nlift_df = lift_df[(pd.notnull(lift_df['Cust_Grp_Name']))]\r\nprint(len(lift_df))\r\nlift_df = exclude_small_customer(lift_df)\r\nprint(len(lift_df))\r\n\r\n# Remove all customers group name that is numeric\r\nlift_df = lift_df[~lift_df.Cust_Grp_Name.str.isnumeric()]\r\n \r\n# Create Equipment Group column\r\nlift_df['Equipment_Group'] = lift_df['Equipment_Size'].apply(sf.get_equipment_group)\r\n#print(lift_df['Equipment_Group'].unique())\r\nprint(lift_df.Equipment_Group.value_counts())\r\n\r\n\r\nlift_df['Equipment_Group_Rad'] = lift_df.apply(get_equipment_group_rad, axis=1)\r\nprint(lift_df.Equipment_Group_Rad.value_counts())\r\n\r\n\r\n# change all customer name to upper case\r\nlift_df['Cust_Grp_Name'] = lift_df['Cust_Grp_Name'].str.upper()\r\n\r\nlift_df.to_csv(\"cus_evl_lifting_trade_mgt.csv\", index=False, encoding=\"utf-8\")\r\nprint(\"Done\")\r\n\r\n#lift_df['wt_per_teu'] = (lift_df.total_wt/1000)/lift_df.Item1\r\n#print('after wt_per_teu')\r\n#wt_per_teu = lift_df['wt_per_teu'] \r\n#df['newcolumn'] = df.apply(fab, axis=1)\r\n#lift_df['wt_score'] = lift_df['wt_per_teu'].apply(get_wt_score)\r\n\r\n#lift_df['npi_per_teu'] = lift_df.total_npi/lift_df.Item1\r\n#lift_df['npi_per_day'] = lift_df.total_npi/lift_df['Transit.Time']\r\n# print(lift_df[['npi_per_teu', 'npi_per_day']])\r\n#print('after Transit.Time')\r\n#lift_df['npi_per_teu_percentile'] = [stats.percentileofscore(lift_df['npi_per_teu'], i) for i in lift_df['npi_per_teu']]\r\n\r\n#lift_df['npi_per_teu_percentile'] = stats.rankdata(lift_df['npi_per_teu'], \"max\")/len(lift_df['npi_per_teu'])\r\n#lift_df['npi_per_teu_percentile'] = lift_df['npi_per_teu_percentile']*100\r\n#print('after percentile 1')\r\n#lift_df['npi_per_day_percentile'] = stats.rankdata(lift_df['npi_per_day'], \"max\")/len(lift_df['npi_per_day'])\r\n#lift_df['npi_per_day_percentile'] = lift_df['npi_per_day_percentile']*100\r\n#lift_df['npi_per_day_percentile'] = [stats.rankdata(lift_df['npi_per_day'], i) for i in lift_df['npi_per_day']]\r\n#print('after percentile 2')\r\n\r\n\r\n# convert percentile to score\r\n#lift_df['npi_per_teu_score'] = lift_df['npi_per_teu_percentile'].apply(get_percentile_score)\r\n#lift_df['npi_per_day_score'] = lift_df['npi_per_day_percentile'].apply(get_percentile_score)\r\n\r\n#print(lift_df['npi_per_teu'].describe())\r\n#print(lift_df['npi_per_day'].describe())\r\n\r\n#lift_df['dso_score'] = randint(1,5)\r\n#lift_df['dso_score'] = lift_df.apply (lambda row: randint(1,5), axis=1) \r\n#lift_df['cancellation_score'] = lift_df.apply (lambda row: randint(1,5), axis=1) \r\n#lift_df['volume_score'] = lift_df.apply (lambda row: randint(1,5), axis=1) \r\n\r\n#df['C'] = df.apply(lambda row: randint(1,5), axis=1)\r\n\r\n\r\n'''\r\nFactors\tRatio\r\nNPI/TEU - Bottom%\t30%\r\nNPI/Day - Bottom%\t30%\r\nVolume \t20%\r\nStay Day\t15%\r\nCancellation\t15%\r\nDSO\t10%\r\nCargo Weight\t10%\r\n'''\r\n# applying weighted average\r\n'''\r\nlift_df['npi_per_teu_score_wa'] = lift_df['npi_per_teu_score'] * 0.3\r\nlift_df['npi_per_day_score_wa'] = lift_df['npi_per_day_score'] * 0.3\r\nlift_df['volume_score_wa'] = lift_df['volume_score'] * 0.2\r\nlift_df['stay_score_wa'] = lift_df['Stay_Score'] * 0.15\r\nlift_df['cancellation_score_wa'] = lift_df['cancellation_score'] * 0.15\r\nlift_df['dso_score_wa'] = lift_df['dso_score'] * 0.1\r\nlift_df['wt_score_wa'] = lift_df['wt_score'] * 0.1\r\n\r\n\r\nlift_df['stay_score_wa'].fillna(0, inplace=True)\r\n'''\r\n#print('stay_score-')\r\n\r\n#print(nvl(lift_df['stay_score_wa']))\r\n\r\n#total_score = lift_df['volume_score_wa'] + lift_df['stay_score_wa'] + lift_df['cancellation_score_wa'] + lift_df['dso_score_wa'] + lift_df['wt_score_wa']\r\n#print(total_score)\r\n\r\n#lift_df['total_score_by_npi_per_teu'] = total_score + nvl(lift_df['npi_per_teu_score_wa']) \r\n#lift_df['total_score_by_npi_per_day']= total_score + nvl(lift_df['npi_per_day_score_wa'])\r\n\r\n#print(lift_df[['npi_per_teu', 'npi_per_teu_percentile', 'npi_per_teu_score']])\r\n#print(lift_df[['npi_per_day', 'npi_per_day_percentile', 'npi_per_day_score']])\r\n\r\n","sub_path":"cus_evl_gen_lifting_TM.py","file_name":"cus_evl_gen_lifting_TM.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"38439910","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom serial import Serial\nfrom serial import SerialException\nfrom time import time\nfrom time import sleep\nfrom time import strftime\nfrom time import localtime\nimport sys\n\n\ndef last_line(s):\n if s.endswith('\\r') or (s.endswith('\\n') and not s.endswith('\\r\\n')):\n return s.splitlines(False)[-1]\n else:\n return ''\n\n\nclass SerEx(Serial):\n def __init__(self, port=None,\n baudrate=115200, print_expect=True, outputstream=None, timeout=2.0):\n start_time = time()\n while True:\n try:\n if port:\n super(SerEx, self).__init__(port,\n baudrate=baudrate, timeout=timeout)\n else:\n super(SerEx, self).__init__(\n baudrate=baudrate, timeout=timeout)\n break # successfully initialized, so leave the while loop\n except SerialException: # Could not initialize the serial port\n if (time() - start_time) > 30: # after 30 seconds\n raise # give up and raise the last exception\n sleep(1) # before retying have some sleep\n self.print_expect = print_expect\n self.outputstream = outputstream\n\n def log(self, msg):\n lines = msg.splitlines(False)\n for line in lines:\n if len(line) > 0: # empty line is never logged\n outstr = strftime('%Y%m%d_%H%M%S ',\n localtime(time())) + line + '\\n'\n sys.stderr.write(outstr)\n if self.outputstream:\n self.outputstream.write(outstr)\n if self.outputstream:\n self.outputstream.flush()\n\n def endswith(self, data):\n if type(data) is str:\n if self.received.endswith(data):\n self.ended = data\n self.preceded = self.received[0:len(self.received) - len(data)]\n return True\n else:\n return False\n elif type(data) is list:\n for d in data:\n if self.received.endswith(d):\n self.ended = d\n self.preceded = \\\n self.received[0:len(self.received) - len(d)]\n return True\n return False\n else:\n raise Exception('Unexpected data type: %s' % str(type(data)))\n\n def ex(self, data, ex_timeout=5.0, ch_timeout=None):\n self.received = ''\n time_start = time()\n time_pre = time_start\n self.timeout = ch_timeout if ch_timeout else ex_timeout\n while not self.endswith(data):\n ch = self.read(size=1)\n if ch:\n self.received += ch\n time_now = time()\n elapsed = time_now - time_start\n if not self.endswith(data):\n if self.received.endswith('\\n') or \\\n self.received.endswith('\\r'):\n time_dif = time_now - time_pre\n time_pre = time_now\n if self.print_expect:\n s = last_line(self.received)\n if len(s) > 0:\n timo = int(time_dif + 1.0)\n if timo > 5:\n self.log(\n '# self.ex(\\'%s\\', ex_timeout=%d)\\n'\n % (s, timo))\n else:\n self.log(\n '# self.ex(\\'%s\\')\\n' % s)\n if elapsed > ex_timeout:\n self.log('ex_timeout')\n if len(self.received) > 0:\n self.log('received:\\n')\n self.log(self.received)\n else:\n self.log('nothing is received\\n')\n raise Exception('ex_timeout expecting \"%s\"' % str(data))\n else:\n time_dif = time_now - time_pre\n time_pre = time_now\n if elapsed < ex_timeout and self.print_expect:\n timo = int(time_dif + 1.0)\n if timo > 5:\n self.log(\n '## self.ex(\\'%s\\', ex_timeout=%d)\\n'\n % (self.received, timo))\n else:\n self.log('ch_timeout')\n if len(self.received) > 0:\n self.log('received:\\n')\n self.log(self.received)\n else:\n self.log('nothing is received\\n')\n raise Exception('ch_timeout expecting \"%s\"' % str(data))\n #if data != '\\r\\n':\n #self.log(data + '\\n')\n return self.preceded\n\n\n#if __name__ == \"__main__\":\n #a = 'abcd'\n #b = ['abc', 'def']\n #print str(a), type(a), len(a)\n #print str(b), type(b), len(b)\n\n #if type(a) is list:\n #print 'a is list'\n #if type(a) is str:\n #print 'a is str'\n #if type(b) is list:\n #print 'b is list'\n #if type(b) is str:\n #print 'b is str'\n","sub_path":"serex.py","file_name":"serex.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"202240120","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"AddSign oprimizer\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom tensorflow.python.ops import math_ops, state_ops\nfrom tensorflow.python.training import optimizer\n\nclass AddSignOptimizer(optimizer.Optimizer):\n def __init__(self, alpha=1.0, decay=0.9, lr=1e-4,\n use_locking=False, name=\"PowerSign\"):\n \"\"\"Construct a new AddSignOptimizer.\n\n https://arxiv.org/abs/1709.07417\n\n Args:\n alpha: Float. The base of PowerSign update.\n decay: Float. Decay to use to maintain the moving averages\n of trained variables.\n lr: Float. Learning rate.\n use_locking: If True use locks for update operations.\n name: Optional name for the operations created when applying gradients.\n Defaults to \"AddSign\".\n \"\"\"\n super(AddSignOptimizer, self).__init__(use_locking, name)\n # self._ema = moving_averages.ExponentialMovingAverage(decay, num_updates=num_updates)\n self._variable_map = None\n self._alpha = alpha\n self._lr = lr\n self._decay = decay\n self._name = \"AddSign\"\n\n def _create_slots(self, var_list):\n for v in var_list:\n self._zeros_slot(v, \"m\", self._name)\n\n def _apply_dense(self, grad, var):\n return self._apply_add_sign(grad, var)\n\n def _resource_apply_dense(self, grad, var):\n return self._apply_add_sign(grad.handle, var.handle)\n\n def _apply_sparse(self, grad, var):\n return self._apply_add_sign(grad, var)\n\n def _apply_add_sign(self, grad, var):\n m = self.get_slot(var, \"m\")\n lr = math_ops.cast(self._lr, var.dtype.base_dtype),\n decay = math_ops.cast(self._decay, var.dtype.base_dtype)\n \n m = (grad - m) * decay + m \n same_sign = math_ops.sign(m) * math_ops.sign(grad)\n\n alpha = math_ops.cast(self._alpha, var.dtype.base_dtype),\n delta = (alpha + same_sign) * grad\n var = state_ops.assign_sub(var, delta * lr, use_locking=True)\n\n return var\n\n","sub_path":"tensorflow/contrib/opt/python/training/add_sign_optimizer.py","file_name":"add_sign_optimizer.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"149441046","text":"#vjezba7_zd05\n#jure glavan\n#7.prosinca,2017\n\n#program trazi od korisnika unos niza znakova\n#zatim kreiramo listu od svih znakova\n#listu sortiramo\n#izbacujemo sve znakove \".\" i \",\"\n#ispisujemo listu\n\na=input(\"upisite nekakav niz znakova: \")\n\na=list(a)\na.sort()\n\nwhile (\".\" in a):\n a.remove(\".\")\n \nwhile (\",\" in a):\n a.remove(\",\")\n\nprint(a)","sub_path":"PYTHON/IT/Vježba_7_Jure_Glavan/vjezbe7_zd05.py","file_name":"vjezbe7_zd05.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"6973812","text":"import sys\nimport pickle\nimport numpy as np\n\nfrom mlsploit import Job\nfrom random import randint\n\n#X1 = [3.3 8.9]\n#X2 = [0.0 7.9]\n# python3 iris_simple_query.py 3.5 3.3 2\n\nJob.initialize()\n\ninput_json = Job.input_json # this is a dict for input.json\nfunction = Job.function # this is the function name\noptions = Job.options # this is a dict of the arguments for the function\ninput_files = Job.input_files # this is a list of InputFile objects\n\nclass Rclf:\n def __init__(self, estimators):\n self.estimators = estimators\n self.num_estimators = len(self.estimators)\n self.result = None\n\n def predict(self, X, rannum):\n out = []\n for x in range(len(X)):\n randit = randint(0,self.num_estimators-1)\n clf = self.estimators[randit]\n\n if rannum == -1:\n out.append(clf.predict([X[x]]))\n else:\n if randit == rannum:\n out.append(clf.predict([X[x]]))\n else:\n out.append([-1]) \n\n return np.asarray(out)\n\n def score(self, X, y):\n out = []\n for x in range(len(X)):\n randit = randint(0,self.num_estimators-1)\n\n clf = self.estimators[randit]\n result = clf.predict([X[x]])\n if result == y[x]:\n out.append(1)\n else:\n out.append(0)\n\n return float(sum(out)/len(out))\n\n#Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) \n\nX1 = float(options[\"X1\"])\nX2 = float(options[\"X2\"])\n\nrandomnum = -1\n\"\"\"\nif len(sys.argv) > 3:\n randomnum = int(sys.argv[3])\n\"\"\"\nif \"random_k\" in options.keys():\n randomnum = int(options[\"random_k\"])\n\n#print (X1, X2, randomnum)\n\ndumplist = pickle.load(open(\"all.pkl\", \"rb\"))\nclf1 = dumplist[0]\nclf2 = dumplist[1]\nclf3 = dumplist[2]\nclf4 = dumplist[3]\nclf5 = dumplist[4]\neclf = dumplist[5]\nrclf = dumplist[6]\n\nresult = rclf.predict(np.c_[X1, X2], randomnum)\n#print (result[0][0])#\n\noutput_filepath = Job.make_output_filepath('out.txt')\nopen(output_filepath, 'w').writelines(str(result[0][0]))\nprint(\"class:\" + str(result[0][0]))\n\ntags = {'class': int(result[0][0])}\n\nJob.add_output_file(output_filepath, tags=tags, is_extra=True)\nJob.commit_output()\n","sub_path":"iris_simple_query.py","file_name":"iris_simple_query.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83197174","text":"#!/usr/bin/env python\n##\n# Elipse module\n##\nfrom coordenadas import Coordinates\n\n##\n# Elipse class\n##\nclass Elipse():\n \n CURRENT_COLOR = \"black\"\n TAG = 'circulo'\n i = 0\n \n ##\n # Funcao init\n # @param canvas : canvas where to draw the elipse\n # \n def __init__(self, canvas):\n self.canvas = canvas\n self.coordinates = Coordinates(self.canvas)\n \n ##\n # Faze binding entre botoes e funcaos\n # \n def addEvents(self):\n self.canvas.bind(\"<1>\", self.recomecaLinha)\n self.canvas.bind('<B1-Motion>',self.desenhaCirculo)\n \n ##\n # Restart a new elipse\n # @param event : Tk event\n def recomecaLinha(self,event):\n self.ultimo_P=self.coordinates.coordenadas()\n self.i = self.i+1\n self.TAG = 'circulo' + str(self.i)\n \n ##\n # Draw the elipse on the canvas\n # @param event : Tk event\n def desenhaCirculo(self,event):\n (x,y) = self.coordinates.coordenadas()\n \n try:\n P = (x,y)\n self.canvas.delete(self.TAG) \n self.canvas.create_oval(self.ultimo_P,P,outline=self.CURRENT_COLOR,fill='white',tag=self.TAG)\n except:\n return\n ","sub_path":"Paint_edit/src/paint_edit/elipse.py","file_name":"elipse.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"227188654","text":"from __future__ import division, print_function, absolute_import\n\nimport tensorflow as tf\nimport numpy as np\n# import matplotlib.pyplot as plt\n#from read_from_db import read_csi_from_db\nimport time\nfrom get_v3 import get_csi\nimport os\nimport random\nimport psutil\nimport shutil\n\n\n#num_of_SPs=[6,10,13,16,19]\nnum_of_SPs=13\n\nfor sp in [num_of_SPs]:\n total_elapsed_time=[]\n total_memory_used=[]\n total_memory_perc=[]\n random_list=random.sample(range(19), sp)\n if os.path.isdir(os.getcwd()+r'\\Models'):\n shutil.rmtree(os.getcwd()+r'\\Models')\n os.mkdir(os.getcwd()+r'\\Models')\n\n for test_i in [random_list.index(random_list[-1])]: # we must create a session for each of loc --> model all other sps with one net!!\n csi = get_csi(random_list[test_i],random_list)\n csi = np.squeeze(csi)\n st = time.time()\n points=[]\n for t in range(sp):\n points.append(csi[t*20:(t+1)*20])\n\n total=points\n\n # Parameters\n learning_rate = 0.01\n training_epochs = 1000\n display_step = 50\n n_labels = sp-1\n #\n\n # Network Parameters\n n_hidden_1 = 45 # 1st layer num features\n n_hidden_2 = 20 # 2nd layer num features\n n_hidden_3=10\n n_hidden_4=5\n n_input = 90\n\n # tf Graph input (only pictures)\n X = tf.placeholder(\"float\", [None, n_input])\n y = tf.placeholder(\"float\", [None, n_labels],name='Labels')\n\n weights = {\n 'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]),name='w1'),\n 'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]),name='w2'),\n 'encoder_h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3]),name='w3'),\n 'encoder_h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4]),name='w4'),\n\n 'decoder_h1': tf.Variable(tf.random_normal([n_hidden_4+n_labels, n_hidden_3]),name='w5'),\n 'decoder_h2': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_2]),name='w6'),\n 'decoder_h3': tf.Variable(tf.random_normal([n_hidden_2,n_hidden_1]),name='w7'),\n 'decoder_h4': tf.Variable(tf.random_normal([n_hidden_1, n_input]),name='w8'),\n }\n\n\n biases = {\n 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1]),name='b1'),\n 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2]),name='b2'),\n 'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3]),name='b3'),\n 'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4]),name='b4'),\n\n 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3]),name='b5'),\n 'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2]),name='b6'),\n 'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1]),name='b7'),\n 'decoder_b4': tf.Variable(tf.random_normal([n_input]),name='b8'),\n }\n\n\n\n # Building the encoder\n def encoder(x):\n # Encoder Hidden layer with sigmoid activation #1\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),\n biases['encoder_b1']))\n # Decoder Hidden layer with sigmoid activation #2\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),\n biases['encoder_b2']))\n\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']),\n biases['encoder_b3']))\n\n layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['encoder_h4']),\n biases['encoder_b4']))\n return layer_4\n\n\n # Building the decoder\n def decoder(x):\n # Encoder Hidden layer with sigmoid activation #1\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),\n biases['decoder_b1']))\n # Decoder Hidden layer with sigmoid activation #2\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),\n biases['decoder_b2']))\n\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']),\n biases['decoder_b3']))\n\n layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']),\n biases['decoder_b4']))\n return layer_4\n\n # Construct model\n encoder_op = encoder(X)\n decoder_input = tf.concat([encoder_op,y],1,name=\"op_to_restore\")\n\n decoder_op = decoder(decoder_input)\n\n # Prediction\n y_pred = decoder_op\n\n # Targets (Labels) are the input data.\n y_true = X\n\n # Define loss and optimizer, minimize the squared error\n cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\n optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)\n\n\n # Initializing the variables\n init = tf.global_variables_initializer()\n\n\n saver = tf.train.Saver()\n\n\n # Launch the graph\n with tf.Session() as sess:\n sess.run(init)\n # Training cycle\n for epoch in range(training_epochs):\n # Loop over all batches\n x=csi\n rows=x.shape[0] # == sp-1 * num_of_pcakets(20)\n labels=np.array(([0]*(sp-1))*rows)\n labels=labels.reshape(rows,n_labels) # sp-1*20 , sp-1\n for t in range(sp-1):\n labels[t*20:(t+1)*20,t]=1\n\n\n label=labels\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={X: x,y: label})\n tf.add_to_collection(\"predict\", y_pred)\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1),\"cost=\", \"{:.9f}\".format(c))\n # print(\"Epoch:\", '%04d' % (epoch+1),\"accuracy\", \"{:.9f}\".format(accuracy))\n\n print(\"Optimization Finished for point \" + str(test_i+1) + \"! \")\n os.mkdir(os.getcwd()+'/Models/Test'+str(test_i+1))\n saver.save(sess,os.path.join(os.getcwd()+r'\\Models\\Test'+str(test_i+1), 'trained_variables'+'.ckpt'))\n tf.reset_default_graph()\n total_elapsed_time.append( float(\"{0:.2f}\".format(time.time()-st)))\n total_memory_used.append(float(\"{0:.2f}\".format(list(psutil.virtual_memory())[3]/1e9)))\n total_memory_perc.append(list(psutil.virtual_memory())[2])\n\nprint(float(\"{0:.2f}\".format(np.mean(total_elapsed_time))),float(\"{0:.2f}\".format(np.sum(total_elapsed_time))))\nprint(float(\"{0:.2f}\".format(np.mean(total_memory_used))))\nprint(float(\"{0:.2f}\".format(np.mean(total_memory_perc))))\n\n","sub_path":"Overhead-Test/DeepPos/OffLine_Training_v3_super.py","file_name":"OffLine_Training_v3_super.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"185763521","text":"import discord\nfrom discord.ext import commands\n\nfrom database import models, database\n\n\nclass Library(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.db = None\n\n @commands.command(\n help='Looks up by name in the RevivalStory Library',\n description='Looks up by name in the RevivalStory Library',\n )\n async def lookup(self, ctx, *, name: str):\n item = await self.db.search(name)\n if isinstance(item, list):\n return await ctx.send(f'Did you mean... `{\"`, `\".join(item)}`?')\n elif isinstance(item, (models.Monster, models.Equip, models.Item)):\n embed = discord.Embed.from_dict(item.to_embed_dict())\n embed.set_thumbnail(url=item.get_image_url())\n return await ctx.send(embed=embed)\n\n @commands.command(\n help='Searches all items in the given category (equips, items, monsters) with the given condition',\n description='Searches all items in the given category (equips, items, monsters) with the given condition\\n'\n 'Conditions are formatted `<field>` `<operator>` `<value>`\\n'\n 'Operators include any combination of `<`, `>`, and `=`\\n'\n 'Value is an integer.'\n )\n async def search(self, ctx, category: str, *, condition: str):\n values = await self.db.where(category, condition)\n if not values:\n return\n\n try:\n return await ctx.send(f'Returned value(s): `{\"`, `\".join(values)}`')\n except discord.HTTPException:\n return await ctx.send('Please narrow your search.')\n\n async def cog_before_invoke(self, ctx):\n if self.db is None:\n self.db = database.Database(self.bot.pool)\n\n\ndef setup(bot):\n bot.add_cog(Library(bot))\n","sub_path":"cogs/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"358920287","text":"\nimport tensorflow as tf\nimport numpy as np\nimport agent\nimport cloudpickle as pickle\nimport gym\nimport matplotlib.pyplot as plt\n\nlearning_rate = 1e-3\ngamma = 0.99 # discount factor for reward\nD = 80\nresume = False\nsave_url = './models/pong_model_4_lr3.ckpt'\n\n# np.set_printoptions(threshold='nan')\n\n\ndef prepro(I):\n \"\"\" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector \"\"\"\n I = I[35:195] # crop\n I = I[::2, ::2, 0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()\n\n\ndef resetGradBuffer(gradBuffer):\n for ix, grad in enumerate(gradBuffer):\n gradBuffer[ix] = grad * 0\n return gradBuffer\n\ndef discount_rewards(r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\ndef prepro(I, c):\n \"\"\" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector \"\"\"\n # if c % 10 == 0:\n # plt.imshow(I)\n # plt.show()\n I = I[35:195] # crop\n I = I[::2, ::2, 0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n # if c % 10 == 0:\n # plt.imshow(I)\n # plt.show()\n return I.astype(np.float)\n\ndef discount_epr(rs_n):\n discounted_epr = discount_rewards(rs_n)\n discounted_epr -= np.mean(discounted_epr)\n discounted_epr /= np.std(discounted_epr)\n return discounted_epr\n\nreward_sum = 0\nrunning_reward = None\nprev_x = None\nenv = gym.make('Pong-v0')\ntf.reset_default_graph()\nagent = agent.policy(200, D, learning_rate)\nepisode_number = 0\nreward_sum = 0\nrunning_total = 0\nxs, ys, dlogps, rs = [], [], [], []\nsaver = tf.train.Saver()\ncount = 0\nrunning = True\n\n\n\nwith tf.Session() as sess:\n if resume:\n print('resuming')\n saver.restore(sess, './models/pong_model_3_lr3.ckpt')\n else:\n sess.run(tf.global_variables_initializer())\n\n observation = env.reset()\n gradBuffer = sess.run(agent.tvars)\n gradBuffer = resetGradBuffer(gradBuffer)\n agent.setSession(sess)\n\n\n while running:\n # keep running episodes\n cur_x = prepro(observation, count)\n x = cur_x - prev_x if prev_x is not None else np.zeros((D,D))\n prev_x = cur_x\n\n # if count % 10 == 0:\n # print('count is 10')\n # reshaped = x.reshape(80,80)\n # plt.imshow(reshaped)\n # plt.show()\n\n x = np.reshape(x, [1, D, D, 1])\n\n\n xs.append(x)\n probability = agent.evaluatePolicy(x)\n action = 2 if np.random.uniform() < probability else 3 # fake label, what does this mean???\n y = 1 if action == 2 else 0\n # this is a regularisation gradient that pushes slighty for the thing that happened to happen if it was likely, and strongly for it to happen again if it was unlikely\n observation, reward, done, info = env.step(action)\n reward_sum += reward\n rs.append(reward)\n ys.append(y)\n# calculate next action from observation (get loss at this point?)\n# step environment\n# add observations, action, reward to arrays\n\n count += 1\n # print('episode %d reward_sum %f' % (episode_number, reward))\n if done:\n episode_number += 1\n # numpyify these 2 arrays\n x_n = np.vstack(xs)\n y_n = np.vstack(ys)\n rs_n = np.vstack(rs)\n print(reward)\n\n if reward != -1:\n xpos_n = np.vstack(xs)\n ypos_n = np.vstack(ys)\n rposs_n = np.vstack(rs)\n\n\n xs, ys, dlogps, rs = [], [], [], [] # reset array memory for next point (in the game of pong)\n # then normalise, so take the mean and divide by standard deviation\n # do dlogps*rs now\n # if episode_number % 5 ==0:\n # print('rs_n')\n # print(rs_n)\n discounted_epr = discount_epr(rs_n)\n # if episode_number % 5 == 0:\n # print('discounted_epr')\n # print(discounted_epr)\n\n grads = discounted_epr\n # calculate the relevant gradients for the policy network\n tGrad = agent.calculatePolicyGradients(x_n, y_n, grads)\n\n if np.sum(tGrad[0] == tGrad[0]) == 0:\n break\n # aggregate the gradients into the buffer (can just sum them as 2 variables, savesa dimension)\n for ix, grad in enumerate(tGrad):\n gradBuffer[ix] += grad\n # then train the policy network in a batch!\n if episode_number % 20 == 0:\n agent.trainPolicyNetwork(gradBuffer[0], gradBuffer[1])\n resetGradBuffer(gradBuffer)\n\n # boring book-keeping\n running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01\n print('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))\n\n # if reward_sum != -21:\n # point = {'x': x_n, 'y': y_n, 'r': rs_n}\n # # running = False\n # pickle.dump(point, open('winning_point.p', 'wb'))\n\n if episode_number % 100 == 0:\n model = {'W1': agent.getW1(), 'W2': agent.getW2()}\n pickle.dump(agent.writeWeights(), open('nn_p_save2.p', 'wb'))\n saver.save(sess, save_url)\n\n reward_sum = 0\n observation = env.reset() # reset env\n prev_x = None\n\n if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.\n print(\n ('ep %d: game finished, reward: %f' % (episode_number, reward)) + ('' if reward == -1 else ' !!!!!!!!'))\n","sub_path":"pong_conv.py","file_name":"pong_conv.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"271242362","text":"# # -*- coding: utf-8 -*-\nimport requests\nimport json\nimport os\nimport time\nimport datetime\nimport pymysql\nimport urllib.request\nimport random\nimport string\n#db = pymysql.connect(host= \"rm-bp1d3nze222r06y54.mysql.rds.aliyuncs.com\",port=3306,user=\"newsflow\",passwd=\"3MvO9da9Wn\",db=\"newsflow\", charset=\"utf8\")\ndb = pymysql.connect(host= \"192.168.1.168\",port=3306,user=\"admin\",passwd=\"123\",db=\"newsflow\", charset=\"utf8\")\ncur = db.cursor()\nfor i in range(1,226):\n i=str(i)\n url = 'http://ad.chntid.com/api/v1/reptile/duanzi?page='+i\n print(url)\n response = requests.get(url)\n data=json.loads(response.text)\n data=data['data']['data']\n address='G:\\duanzi'\n # address='/mountimgserver/newsflow/'\n today = time.strftime('%Y%m%d', time.localtime(time.time()))\n if os.path.isdir(address+today):\n pass\n else:\n os.mkdir(address+today)\n imgPath = '' + today + \"/\"\n for i in data:\n id=i['id']\n title=i['title']\n sql_cmd = '''select * from t_crosstalk where id = '%s' ''' % id\n cur.execute(sql_cmd)\n res = cur.fetchall()\n suc = True\n if (len(res) == 0):\n type=i['type']\n content=i['content']\n likes=i['likes']\n share=i['share']\n status=i['status']\n create_time=i['create_time']\n update_time=i['create_time']\n icon=\"\"\n # try:\n # imgName = ''.join(random.sample(string.ascii_letters + string.digits, 32))\n # imgurl = i['file']\n # bytes = urllib.request.urlopen(imgurl)\n # icon = address + imgPath + imgName + \".jpg\"\n # f = open(icon, 'wb');\n # f.write(bytes.read());\n # f.flush(); # 将缓冲区的数据立即写入缓冲区,并清空缓冲区\n # f.close(); # 关闭文件\n # print(\"有图\")\n # except:\n # print(\"无图\")\n insert_content = (\n \"INSERT INTO t_crosstalk(id,title,type,content,likes,share,status,create_time,update_time,icon)\" \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\")\n data_list=(id,title,type,content,likes,share,status,create_time,update_time,icon)\n cur.execute(insert_content, data_list)\n db.commit()\n print(\"add \" + title)\n else:\n print(\"exist 已存在\" + title)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"206538432","text":"import dateparser\nimport datetime\nimport logging\nimport string\n\n_logger = logging.getLogger(__name__)\n\n\ndef parse_and_format_date(datestr, **kwargs):\n try:\n dt = dateparser.parse(datestr, **kwargs)\n fmtdate = dt.strftime(\"%Y-%m-%d\")\n _logger.info(\"Parsing %s result %s\", datestr, fmtdate)\n return fmtdate\n except:\n _logger.exception(\"Could not parse %s as date\", datestr)\n return \"\"\n\n\ndef parse_and_format_month_year(datestr, **kwargs):\n try:\n dt = dateparser.parse(datestr, **kwargs)\n fmtdate = dt.strftime(\"%Y-%m\")\n _logger.info(\"Parsing %s result %s\", datestr, fmtdate)\n return fmtdate\n except:\n _logger.exception(\"Could not parse %s as date\", datestr)\n return \"\"\n\n\ndef select_highest_amount(matches):\n maxval = 0.00\n for m in matches:\n val = float(m.replace(\",\", \".\"))\n if val > maxval:\n maxval = val\n _logger.info(\"selected %s out of %s\", maxval, len(matches))\n return str(maxval)\n\n\ndef strip_spaces(matches):\n return matches.replace(\" \", \"\")\n\n\ndef strip_empty_matches(matches):\n return filter(None, matches)\n\n\ndef format_property_address(matches):\n return string.capwords(list(strip_empty_matches(matches))[0]);\n\n\ndef info_dump(matches):\n _logger.error(\":::: In Info Dump ::::\")\n _logger.error(\"Processing matches which is of type %s\", type(matches))\n _logger.error(\"Content of matches is %s\", matches)\n for match in matches:\n _logger.error(\"match type: %s -> match value: %s\", type(match), match)\n _logger.error(\":::: Leaving Info Dump ::::\")\n return matches\n\ndef selector_dump(matches):\n _logger.error(\":::: In Selector Dump ::::\")\n _logger.error(\"Processing matches which is of type %s\", type(matches))\n _logger.error(\"Content of matches is %s\", matches)\n for match in matches:\n _logger.error(\"match type: %s -> match value: %s\", type(match), match)\n _logger.error(\":::: Leaving Selector Dump ::::\")\n return matches\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"15949663","text":"#!/usr/bin/python\n#coding:utf-8\n\nimport math\nfrom itertools import count\n\ndef prime(number):\n\tif number <= 1:\n\t\treturn False\n\tfor i in range(2,int(math.sqrt(number))+1):\n\t\tif number % i == 0:\n\t\t\treturn False\n\treturn True\ndef prime2(number):\n\tif number <=1:\n\t\treturn False\n\ti = 2\n\twhile i*i < number:\n\t\tif number % i == 0:\n\t\t\treturn False\n\t\ti+=1\n\treturn True\ndef prime3(number):\n\tif number<=1:\n\t\treturn False\n\tfor i in count(2):\n\t\tif i*i > number:\n\t\t\treturn True\n\t\tif number % i == 0:\n\t\t\treturn False\ndef prime4(number):\n\tif number<=1:\n\t\treturn False\n\tif number == 2:\n\t\treturn True\n\tif number % 2 == 0:\n\t\treturn False\n\ti = 3\n\twhile i*i < number:\n\t\tif number % i == 0:\n\t\t\treturn False\n\t\ti+=2\n\treturn True\n\nif __name__ == \"__main__\":\n\tp = []\n\tfor i in range(0,100):\n\t\tif prime2(i):\n\t\t\tp.append(i)\n\tprint(p)\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"559579587","text":"import sys\n\ndef isCyclicPerm_KMP(A,B):\n STRING = A+A\n PATTERN = B\n \n lps = [0 for i in range(len(B))]\n i = 0\n j = 1\n while j < len(lps): # O(M)\n if PATTERN[i] != PATTERN[j]:\n j += 1\n if i > 0:\n i = lps[i-1]\n else:\n lps[j] = i+1\n i += 1\n j += 1\n\n s = 0\n p = 0\n while s < len(STRING):\n if STRING[s] != PATTERN[p] and p > 0:\n p = lps[p-1]\n else:\n s += 1\n p += 1\n \n if p == len(lps):\n return True\n \n return False\n\n\nTCs = int(input())\nfor t in range(1,TCs+1):\n N,K = map(int,input().split())\n A = list(map(int,input().split()))\n B = list(map(int,input().split()))\n \n CP = isCyclicPerm_KMP(A,B)\n \n doable = True\n if K == 0 and A != B:\n doable = False\n elif len(set(A)) == 1:\n doable = True\n elif K == 1 and A == B: #May no longer be true in A2.\n if (N % 2 == 0 and A[:N//2] == A[N//2:]):\n doable = True\n else:\n doable = False\n elif N == 2 and CP:\n if A == B:\n doable = (K % 2) == 0\n else:\n doable = (K % 2) != 0\n else:\n doable = CP\n\n print(f\"Case #{t}: {'YES' if doable else 'NO'}\")\n sys.stdout.flush()\n","sub_path":"Facebook/2022/Round1/A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"256832414","text":"#!/usr/bin/python\n__author__ = 'madsens'\n#Runs the Museum Skeleton.\n\nimport time, sys, os\nsys.path.append(\"/home/pi/PiClasses\")\nimport Logging\nimport GPIOLib\nfrom termios import tcflush, TCIOFLUSH\n\n#Instantiate Logging and GPIO Classes\ndbConn = Logging.Logging()\ngpio = GPIOLib.GPIOLib(\"BOARD\", \"LOW\", [11,13])\n\nwhile True: # Runs until break is encountered. We want to set it to break on a particular ID.\n n = raw_input(\"Scanned ID: \")\n currentScan = time.time()\n if n == \"STOP\":\n break # stops the loop\n else :\n dbConn.logAccess(n)\n\n #Trigger \"up\" GPIO Pin.\n gpio.on([11])\n time.sleep(1)\n gpio.off([11])\n\n time.sleep (10)\n\n # Trigger \"down\" GPIO Pin.\n gpio.on([13])\n time.sleep(1)\n gpio.off([13])\n\n #flush keyboard buffer\n sys.stdout.flush();\n tcflush(sys.stdin, TCIOFLUSH)","sub_path":"OwlHat.py","file_name":"OwlHat.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"58522770","text":"__all__ = ['DawidSkene']\n\nimport attr\nimport numpy as np\nimport pandas as pd\n\nfrom . import annotations\nfrom .annotations import manage_docstring, Annotation\nfrom .base_aggregator import BaseAggregator\nfrom .majority_vote import MajorityVote\nfrom .utils import get_most_probable_labels\n\n_EPS = np.float_power(10, -10)\n\n\n@attr.s\n@manage_docstring\nclass DawidSkene(BaseAggregator):\n \"\"\"\n Dawid-Skene aggregation model\n A. Philip Dawid and Allan M. Skene. 1979.\n Maximum Likelihood Estimation of Observer Error-Rates Using the EM Algorithm.\n Journal of the Royal Statistical Society. Series C (Applied Statistics), Vol. 28, 1 (1979), 20–28.\n\n https://doi.org/10.2307/2346806\n \"\"\"\n\n n_iter: int = attr.ib()\n\n probas_: annotations.OPTIONAL_PROBAS = attr.ib(init=False)\n priors_: annotations.OPTIONAL_PRIORS = attr.ib(init=False)\n labels_: annotations.OPTIONAL_LABELS = attr.ib(init=False)\n errors_: annotations.OPTIONAL_ERRORS = attr.ib(init=False)\n\n @staticmethod\n @manage_docstring\n def _m_step(data: annotations.LABELED_DATA, probas: annotations.TASKS_LABEL_PROBAS) -> annotations.ERRORS:\n \"\"\"Perform M-step of Dawid-Skene algorithm.\n\n Given performers' answers and tasks' true labels probabilities estimates\n performer's errors probabilities matrix.\n \"\"\"\n joined = data.join(probas, on='task')\n joined.drop(columns=['task'], inplace=True)\n\n errors = joined.groupby(['performer', 'label'], sort=False).sum()\n errors.clip(lower=_EPS, inplace=True)\n errors /= errors.groupby('performer', sort=False).sum()\n\n return errors\n\n @staticmethod\n @manage_docstring\n def _e_step(data: annotations.LABELED_DATA, priors: annotations.LABEL_PRIORS, errors: annotations.ERRORS) -> annotations.TASKS_LABEL_PROBAS:\n \"\"\"\n Perform E-step of Dawid-Skene algorithm.\n\n Given performer's answers, labels' prior probabilities and performer's performer's\n errors probabilities matrix estimates tasks' true labels probabilities.\n \"\"\"\n\n # We have to multiply lots of probabilities and such products are known to converge\n # to zero exponentialy fast. To avoid floating-point precision problems we work with\n # logs of original values\n joined = data.join(np.log2(errors), on=['performer', 'label'])\n joined.drop(columns=['performer', 'label'], inplace=True)\n log_likelihoods = np.log2(priors) + joined.groupby('task', sort=False).sum()\n\n # Exponentiating log_likelihoods 'as is' may still get us beyond our precision.\n # So we shift every row of log_likelihoods by a constant (which is equivalent to\n # multiplying likelihoods rows by a constant) so that max log_likelihood in each\n # row is equal to 0. This trick ensures proper scaling after exponentiating and\n # does not affect the result of E-step\n scaled_likelihoods = np.exp2(log_likelihoods.sub(log_likelihoods.max(axis=1), axis=0))\n return scaled_likelihoods.div(scaled_likelihoods.sum(axis=1), axis=0)\n\n @manage_docstring\n def fit(self, data: annotations.LABELED_DATA) -> Annotation(type='DawidSkene', title='self'):\n\n data = data[['task', 'performer', 'label']]\n\n # Early exit\n if not data.size:\n self.probas_ = pd.DataFrame()\n self.priors_ = pd.Series()\n self.errors_ = pd.DataFrame()\n self.labels_ = pd.Series()\n return self\n\n # Initialization\n probas = MajorityVote().fit_predict_proba(data)\n priors = probas.mean()\n errors = self._m_step(data, probas)\n\n # Updating proba and errors n_iter times\n for _ in range(self.n_iter):\n probas = self._e_step(data, priors, errors)\n priors = probas.mean()\n errors = self._m_step(data, probas)\n\n # Saving results\n self.probas_ = probas\n self.priors_ = priors\n self.errors_ = errors\n self.labels_ = get_most_probable_labels(probas)\n\n return self\n\n @manage_docstring\n def fit_predict_proba(self, data: annotations.LABELED_DATA) -> annotations.TASKS_LABEL_PROBAS:\n return self.fit(data).probas_\n\n @manage_docstring\n def fit_predict(self, data: annotations.LABELED_DATA) -> annotations.TASKS_LABELS:\n return self.fit(data).labels_\n","sub_path":"src/aggregation/dawid_skene.py","file_name":"dawid_skene.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"404935755","text":"from settings import INSTALLED_APPS\r\nfrom functools import reduce\r\n\r\n\r\ndef server_actions():\r\n modules = reduce(lambda value, item: value + [__import__(f'{item}.actions')],\r\n INSTALLED_APPS,\r\n [])\r\n actions = reduce(lambda value, item: value + [getattr(item, 'actions', [])],\r\n modules,\r\n [])\r\n return reduce(lambda value, item: value + getattr(item, 'action_names', []),\r\n actions,\r\n [])\r\n\r\n\r\ndef resolve(action_name, actions=None):\r\n if actions:\r\n action_list = actions\r\n else:\r\n action_list = server_actions()\r\n map_actions = {action.get('action'): action.get('controller') for action in action_list}\r\n\r\n return map_actions.get(action_name)\r\n\r\n","sub_path":"messenger/server/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"598639035","text":"from datetime import date, timedelta\nfrom utils import ta_calculations as ta_calcs\nfrom utils import functions as fnc\nfrom utils import ohlc_values as ohlc\nfrom utils import dollar_price as dllp\nfrom flask import Flask, request, jsonify\nfrom flask_cors import cross_origin\n\n\napp = Flask(__name__)\n\n\n@app.route('/api/technical-analysis-between', methods=['GET'])\ndef get_ta():\n print(f\"indicator: {indicator}, ticker: {ticker}\")\n ticker = request.args.get('ticker')\n indicator = request.args.get('indicator')\n\n indicator_values, str_dates = ta_calcs.get_indicator_values(ticker, indicator, start_date, end_date)\n\n ta = fnc.ta_json_format(ticker, indicator, indicator_values, str_dates)\n\n return jsonify(ta)\n\n\n@app.route('/api/simple-technical-analysis')\ndef get_simple_ta(ticker: str):\n simple_ta = ta_calcs.get_simple_ta(ticker)\n\n return jsonify(simple_ta)\n\n\n@app.route('/api/price-between', methods=['GET'])\ndef get_price_between():\n ticker = request.args.get('ticker')\n start_date = request.args.get('start_date')\n end_date = request.args.get('end_date')\n data = ohlc.get_ohlc_between(ticker, start_date, end_date)\n\n return_json = {}\n return_json['ticker'] = ticker\n return_json['name'] = fnc.get_name(ticker)\n return_json['data'] = data\n\n return jsonify(return_json)\n\n\n@app.route('/api/year-today-price', methods=['GET'])\ndef get_year_today_prices():\n ticker = request.args.get('ticker')\n\n data = ohlc.get_ohlc_year_today(ticker)\n\n return_json = {}\n return_json['ticker'] = ticker\n return_json['name'] = fnc.get_name(ticker)\n return_json['data'] = data\n\n return jsonify(return_json)\n\n\n@app.route('/ccl-cedear-dollar', methods=['GET'])\ndef get_ccl_vs_cedear_dollar():\n ticker = request.args.get('ticker')\n \n data = dllp.calculate_difference(ticker)\n print(data)\n\n response = jsonify(data)\n return response\n\n\n@app.after_request\ndef after_request(response):\n header = response.headers\n header['Access-Control-Allow-Origin'] = '*'\n header['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'\n header['Access-Control-Allow-Methods'] = 'GET'\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True, port=5000)\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"201662442","text":"\"\"\"\nauthor : @akash kumar\ninstitute : Moradabad Institute Of Technology\n\"\"\"\n\nfor t in range(1,int(input())+1):\n def subString(s,n):\n ss=[]\n for i in range(n):\n for len in range(i+1,n+1):\n ss.append(s[i:len])\n return ss\n\n nn=int(input())\n S=str(input())\n \"\"\"\n SS=0\n for i in S:\n if i==\"F\":\n SS+=1\n print(SS)\n \"\"\"\n ss=subString(S,nn)\n #print(ss)\n ans_list=[]\n sss=[]\n for i in ss:\n if len(i)!=1 or len(i)!=2:\n sss.append(i)\n for s in sss:\n n=len(s)\n\n if n==1 or s==\"XF\" or s==\"XX\" or s==\"FF\" or s==\"FX\" or s==\"OF\" or s==\"OO\" or s==\"FO\":\n continue\n else:\n ans=[]\n f=-1\n for i in range(n):\n if s[i]==\"O\":\n f=0\n ans.append(f)\n #print()\n if s[i]==\"X\":\n #print()\n f=1\n ans.append(f)\n final_ans=0\n for i in range(len(ans)-1):\n if abs(ans[i]-ans[i+1])==1:\n final_ans+=1\n \n ans_list.append(final_ans)\n print(\"Case #\"+str(t)+\": \",sum(ans_list)%1000000007)\n \n","sub_path":"FacebookHackerCup/2021/Round1/problem1_week_typing_chapter2.py","file_name":"problem1_week_typing_chapter2.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101787084","text":"infile = open(\"input\", \"r\")\noutfile = open(\"output\", \"w\")\n\ncases = infile.readline()\n\nfor case in range(int(cases)):\n\tN = infile.readline()\n\tappears = [0]*2501\n\tfor i in range(2*int(N)-1):\n\t\tline = infile.readline()\n\t\tfor j in line.split(' '):\n\t\t\tnumber = int(j)\n\t\t\tappears[number] += 1\n\toutfile.write(\"Case #\" + str(int(case+1)) + \":\"),\n\tfor k in range(2501):\t\n\t\tif appears[k]%2 != 0:\n\t\t\toutfile.write(\" \" + str(k)),\n\t\n\toutfile.write(\"\\n\"),\n","sub_path":"solutions_5630113748090880_1/Python/proxy/prob2.py","file_name":"prob2.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"377771726","text":"# Fibonacci\n\ndef fibonacci(n):\n '''Fibonacci - return the nth value in the fibonacci series (starting with zero index) - call with series.fibonacci(n)'''\n if (n<1):\n \treturn (0)\n if (n == 1):\n \treturn (1)\n else:\n \treturn( fibonacci(n-2) + fibonacci(n-1) )\n\nprint (fibonacci.__doc__)\n\ndef lucas(n):\n '''Lucas - return the nth value in the lucas series - call with series.lucas(n)'''\n if (n<1):\n \treturn (2)\n if (n == 1):\n \treturn (1)\n else:\n \treturn( lucas(n-2) + lucas(n-1) )\n\nprint (lucas.__doc__)\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series - return the nth value in the sum series - call with series.sum_series(n,x,y), \n \tx and y are optional starting values (0 and 1 if not specified)\n\n \t\"\"\"\n\n if (n<1):\n \treturn (x)\n if (n == 1):\n \treturn (y)\n else:\n \treturn( sum_series(n-2, x, y) + sum_series(n-1, x, y) )\n\nprint (sum_series.__doc__)\n\nif __name__ == \"__main__\":\n # this will only print if run as a script\n print(\"running tests\")\n assert fibonacci(6) == sum_series(6), \"fib and sum_series at n=6 should be the same\"\n\n assert lucas(6) == sum_series(6,2,1), \"lucas and sum_series at n=6 should be the same\"\n\n assert sum_series(5,3,2) == 19, \"sum_series with (5,3,2) should be 12\"\n\n print(\"the tests pass\")\n\n","sub_path":"students/matiasli/lesson02/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"611675056","text":"#!/usr/bin/env python3\n\nfrom mdcClasses import *\n\n#File containing routines to convert from tokens to\n#markdown classes\n\n#returns list of markdown objects\n#tokens is the list of tokens from tokenize\ndef tokensToObjects(tokens):\n\tobjects = []\t\n\ti = 0\n\twhile i < len(tokens):\n\t\tif tokens[i].type == \"Header\":\n\t\t\tobjects.append(header(tokens[i]))\n\t\telif tokens[i].type == \"Image\":\n\t\t\tobjects.append(image(tokens[i]))\n\t\telif tokens[i].type == \"Text\":\n\t\t\ti = paragraph(i, tokens, objects)\n\t\t\tcontinue\n\t\telif tokens[i].type == \"List\":\n\t\t\ti = list(i, tokens, objects)\n\t\t\tcontinue\n\t\telif tokens[i].type == \"Table\":\n\t\t\ti = table(i, tokens, objects)\n\t\t\tcontinue\n\t\telif tokens[i].type == \"Code Block\":\n\t\t\ti = codeblock(i, tokens, objects)\n\t\t\tcontinue\n\t\ti = i + 1\t\t\n\treturn objects\t\n\n#returns a header object\n#tok is the header token to be converted\ndef header(tok):\n\tcount = 0\n\tfor c in tok.line:\n\t\tif c == \"#\":\n\t\t\tcount = count + 1\n\t\telse:\n\t\t\tbreak\n\treturn Header(count, tok.line[count+1:])\n\n#returns an image object\n#tok is the image token to be converted\ndef image(tok):\n\taltStart = 2\n\taltEnd = tok.line.find(\"](\")\n\taltText = tok.line[altStart:altEnd]\n\n\turlStart = altEnd + 2\n\tif tok.line[-2] == \"%\":\n\t\turlEnd = tok.line.rfind(\" \")\n\t\turl = tok.line[urlStart:urlEnd]\n\n\t\tscaleStart = urlEnd + 1\n\t\tscaleEnd = -1\n\t\tscale = tok.line[scaleStart:scaleEnd]\n\t\tobj = Image(altText, url, scale=scale)\n\telse:\n\t\turlEnd = -1\n\t\turl = tok.line[urlStart:urlEnd]\n\n\t\tobj = Image(altText, url)\n\t\t\n\treturn obj\n\n#returns a link object\n#tok is the link token to be converted\ndef link(tok):\n\ttextStart = 1\n\ttextEnd = tok.line.find(\"](\")\n\ttext = tok.line[textStart:textEnd]\n\n\turlStart = textEnd + 2\n\turlEnd = -1\n\turl = tok.line[urlStart:urlEnd]\n\n\treturn Link(text, url)\n\n#Creates Paragraph object and appends to objects list\n#returns updated token list index\n#i is the current token list index\n#tokens is the token list\n#objects is the object list\ndef paragraph(i, tokens, objects):\n\tpar = Paragraph()\n\tbold = False\n\tit = False\n\tcode = False\n\tignore = False\n\tskip = False\n\n\tprev = \"\"\n\ttext = \"\"\n\n\twhile tokens[i].type == \"Text\":\n\t\tfor x, c in enumerate(tokens[i].line):\n\t\t\tif c == '\\\\' and not ignore:\n\t\t\t\tignore = True\n\t\t\telif skip:\n\t\t\t\tskip = False\n\t\t\t\tcontinue\n\t\t\telif c == '*' and not ignore and not code:\n\t\t\t\t#Append existing text if it's not empty\n\t\t\t\tif text != \"\":\n\t\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\t\ttext = \"\"\n\t\t\t\t#Toggle bold or it depending on stars\n\t\t\t\tif x+1 < len(tokens[i].line):\n\t\t\t\t\tif tokens[i].line[x+1] == \"*\":\n\t\t\t\t\t\tbold = not bold\n\t\t\t\t\t\tskip = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tit = not it\n\t\t\t\telse:\n\t\t\t\t\tit = not it\n\t\t\telif c == '_' and not ignore and not code:\n\t\t\t\tif text != \"\":\n\t\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\t\ttext = \"\"\n\t\t\t\tif x+1 < len(tokens[i].line):\n\t\t\t\t\tif tokens[i].line[x+1] == \"_\":\n\t\t\t\t\t\tbold = not bold\n\t\t\t\t\t\tskip = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tit = not it\n\t\t\t\telse:\n\t\t\t\t\tit = not it\n\t\t\telif c == '`' and not ignore:\n\t\t\t\tif text != \"\":\n\t\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\t\ttext = \"\"\n\t\t\t\tcode = not code\n\t\t\telif c == \"[\" and not ignore and not code:\n\t\t\t\tif text != \"\":\n\t\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\t\ttext = \"\"\n\t\t\telif c == \"]\" and not ignore and not code:\n\t\t\t\tprev = text\n\t\t\t\ttext = \"\"\n\t\t\telif c == \"(\" and not ignore and not code:\n\t\t\t\tcontinue\n\t\t\telif c == \")\" and not ignore and not code:\n\t\t\t\tpar.append(Link(prev, text))\n\t\t\t\ttext = \"\"\n\t\t\t\tprev = \"\"\n\t\t\telse:\n\t\t\t\ttext = text + c\n\t\t\t\tignore = False\n\t\tif text != \"\":\n\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\ttext = \"\"\n\t\tif i+1 == len(tokens):\n\t\t\ti = i + 1\n\t\t\tbreak\n\t\ti = i + 1\t\n\tobjects.append(par)\n\treturn i \n\n#returns new paragraph made from string\n#line is the string to convert\ndef strToPar(line):\n\tpar = Paragraph()\n\tbold = False\n\tit = False\n\tcode = False\n\tignore = False\n\tskip = False\n\n\tprev = \"\"\n\ttext = \"\"\n\n\tfor x, c in enumerate(line):\n\t\tif c == '\\\\' and not ignore:\n\t\t\tignore = True\n\t\telif skip:\n\t\t\tskip = False\n\t\t\tcontinue\n\t\telif c == '*' and not ignore and not code:\n\t\t\t#Append existing text if it's not empty\n\t\t\tif text != \"\":\n\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\ttext = \"\"\n\t\t\t#Toggle bold or it depending on stars\n\t\t\tif x+1 < len(line):\n\t\t\t\tif line[x+1] == \"*\":\n\t\t\t\t\tbold = not bold\n\t\t\t\t\tskip = True\n\t\t\t\telse:\n\t\t\t\t\tit = not it\n\t\t\telse:\n\t\t\t\tit = not it\n\t\telif c == '_' and not ignore and not code:\n\t\t\tif text != \"\":\n\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\ttext = \"\"\n\t\t\tif x+1 < len(tokens[i].line):\n\t\t\t\tif line[x+1] == \"_\":\n\t\t\t\t\tbold = not bold\n\t\t\t\t\tskip = True\n\t\t\t\telse:\n\t\t\t\t\tit = not it\n\t\t\telse:\n\t\t\t\tit = not it\n\t\telif c == '`' and not ignore:\n\t\t\tif text != \"\":\n\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\ttext = \"\"\n\t\t\tcode = not code\n\t\telif c == \"[\" and not ignore and not code:\n\t\t\tif text != \"\":\n\t\t\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\t\t\ttext = \"\"\n\t\telif c == \"]\" and not ignore and not code:\n\t\t\tprev = text\n\t\t\ttext = \"\"\n\t\telif c == \"(\" and not ignore and not code:\n\t\t\tcontinue\n\t\telif c == \")\" and not ignore and not code:\n\t\t\tpar.append(Link(prev, text))\n\t\t\ttext = \"\"\n\t\t\tprev = \"\"\n\t\telse:\n\t\t\ttext = text + c\n\t\t\tignore = False\n\tif text != \"\":\n\t\tpar.append(Text(text, bold=bold, it=it, code=code))\n\t\ttext = \"\"\n\treturn par\n\n#creates List object and appends to objects list\n#returns updated token list index\n#i is the current token list index\n#tokens is the token list\n#objects is the object list\ndef list(i, tokens, objects):\n\tif tokens[i].line[0] == '*':\n\t\tli = List(False)\n\telse:\n\t\tli = List(True)\n\n\tcurPar = li\n\tcurTabs = 0\n\twhile tokens[i].type == \"List\":\n\t\ttabs = tabCount(tokens[i].line)\n\t\tif tabs == curTabs:\n\t\t\tsp = tokens[i].line.find(\" \")\n\t\t\ttext = strToPar(tokens[i].line[sp+1:])\n\t\t\tcurPar.append(Item(text, curPar))\n\t\t\tif i+1 == len(tokens):\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\telif tabs > curTabs:\n\t\t\tcurTabs = tabs\n\t\t\tcurPar = curPar.items[-1]\n\t\telif tabs < curTabs:\n\t\t\tdif = curTabs - tabs\n\t\t\tcurTabs = tabs\n\t\t\tfor j in range(0, dif):\n\t\t\t\tcurPar = curPar.parent\n\tobjects.append(li)\n\treturn i \n\n#returns the number of tabs in line\n#line the line to search\ndef tabCount(line):\n\tcount = 0\n\tfor c in line:\n\t\tif c == \"\\t\":\n\t\t\tcount = count + 1\n\t\telse:\n\t\t\tbreak\n\treturn count\n\n#creates Table object and appends to objects list\n#returns updated token list index\n#i is the current token list index\n#tokens is the token list\n#objects is the object list\ndef table(i, tokens, objects):\n\ttab = Table()\n\n\tcols = tokens[i].line.count(\" | \") + 1\n\theadCells = tokens[i].line.split(\" | \")\n\talignCells = tokens[i+1].line.split(\" | \")\n\t\n\tfor x in range(0, cols):\n\t\thead = headCells[x].strip()\n\t\talign = getAlign(alignCells[x].strip())\n\t\ttab.addColumn(align, head)\n\n\ti = i + 2\n\twhile tokens[i].type == \"Table\":\n\t\tcells = tokens[i].line.split(\" | \")\n\t\tfor x in range(0, cols):\n\t\t\ttab.addCell(cells[x].strip(), x)\n\t\tif i+1 == len(tokens):\n\t\t\tbreak\n\t\ti = i + 1\n\tobjects.append(tab)\n\treturn i \n\n#returns the text alignment of a column based on the given cell\n#cell is the alignment cell from a markdown table column\ndef getAlign(cell):\n\tif cell.count(\":\") == 2:\n\t\treturn \"center\"\n\telif cell[0] == \":\":\n\t\treturn \"left\"\n\telse:\n\t\treturn \"right\"\n\n#creates CodeBlock object and appends to objects list\n#returns updated token list index\n#i is the current token list index\n#tokens is the token list\n#objects is the object list\ndef codeblock(i, tokens, objects):\n\tlang = tokens[i].line[3:]\n\tcb = CodeBlock(lang)\n\ti = i + 1\n\twhile tokens[i].type != \"Code Block\":\n\t\tline = str.replace(tokens[i].line, \"\\`\", \"`\")\n\t\tcb.addLine(line)\n\t\ti = i + 1\n\tobjects.append(cb)\n\treturn i + 1\n","sub_path":"mdcConverter.py","file_name":"mdcConverter.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"388298593","text":"from django.contrib.syndication.views import Feed\nfrom django.utils import feedgenerator\nfrom django.utils.html import strip_tags\n\nfrom .utils import absurl\n\n\nclass PodcastFeed(feedgenerator.Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super().rss_attributes()\n attrs.update(\n {\n \"xmlns:itunes\": \"http://www.itunes.com/dtds/podcast-1.0.dtd\",\n \"xmlns:content\": \"http://purl.org/rss/1.0/modules/content/\",\n }\n )\n return attrs\n\n def add_root_elements(self, handler):\n super().add_root_elements(handler)\n handler.addQuickElement(\"content:encoded\", self.feed[\"description\"])\n # Image for general RSS\n if self.feed[\"image\"]:\n handler.startElement(\"image\", {})\n # See RSS specification for max image dimensions\n handler.addQuickElement(\n \"url\", absurl(self.feed[\"image\"].image.get_rendition(\"max-144x400\").url)\n )\n handler.addQuickElement(\"title\", self.feed[\"image\"].image.title)\n handler.addQuickElement(\"link\", self.feed[\"link\"])\n handler.endElement(\"image\")\n\n # Image for iTunes\n handler.addQuickElement(\n \"itunes:image\",\n \"\",\n {\n \"href\": absurl(\n self.feed[\"image\"].image.get_rendition(\"max-3000x3000\").url\n )\n },\n )\n\n handler.addQuickElement(\"itunes:type\", self.feed[\"type\"])\n handler.addQuickElement(\"itunes:subtitle\", self.feed[\"subtitle\"])\n handler.addQuickElement(\"itunes:author\", self.feed[\"author\"][\"name\"])\n\n handler.startElement(\"itunes:owner\", {})\n handler.addQuickElement(\"itunes:name\", self.feed[\"owner\"][\"name\"])\n handler.addQuickElement(\"itunes:email\", self.feed[\"owner\"][\"email\"])\n handler.endElement(\"itunes:owner\")\n\n # TODO - iTunes has a specific list of categories that we need to use,\n # and it also supports nested categories.\n for cat in self.feed[\"categories\"]:\n handler.addQuickElement(\"itunes:category\", \"\", {\"text\": str(cat)})\n\n handler.addQuickElement(\n \"itunes:explicit\", \"true\" if self.feed[\"explicit\"] else \"false\"\n )\n handler.addQuickElement(\"itunes:block\", \"Yes\" if self.feed[\"block\"] else \"No\")\n handler.addQuickElement(\n \"itunes:complete\", \"Yes\" if self.feed[\"complete\"] else \"No\"\n )\n\n def add_item_elements(self, handler, item):\n super().add_item_elements(handler, item)\n if item[\"duration\"]:\n # Convert to MM:SS\n h, m = divmod(item[\"duration\"], 60)\n handler.addQuickElement(\"itunes:duration\", \"%02d:%02d\" % (h, m))\n\n if item[\"image\"]:\n handler.addQuickElement(\n \"itunes:image\",\n \"\",\n {\n \"href\": absurl(\n item[\"image\"].image.get_rendition(\"max-3000x3000\").url\n )\n },\n )\n\n handler.addQuickElement(\n \"itunes:explicit\", \"true\" if item[\"explicit\"] else \"false\"\n )\n handler.addQuickElement(\"itunes:episodeType\", item[\"episode_type\"])\n\n if item[\"season_number\"]:\n handler.addQuickElement(\"itunes:season\", item[\"season_number\"])\n\n\nclass ShowFeed(Feed):\n\n feed_type = PodcastFeed\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n def get_object(self, request, show_id):\n from .models import Show # Avoid circular import\n\n return Show.objects.get(pk=show_id)\n\n def title(self, obj):\n return str(obj)\n\n def description(self, obj):\n return strip_tags(obj.description)\n\n def language(self, obj):\n return strip_tags(obj.language)\n\n def categories(self, obj):\n return obj.tags.all()\n\n def subtitle(self, obj):\n return obj.subtitle\n\n def link(self, obj):\n return obj.full_url\n\n def items(self, obj):\n return obj.get_children().live().specific()[:10]\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n return strip_tags(item.description)\n\n def item_link(self, item):\n return item.full_url\n\n def item_enclosures(self, item):\n enclosures = []\n for enc in item.enclosures.all():\n if enc.media.file.url.startswith(\"http\"):\n url = enc.media.file.url\n else:\n url = \"{}{}\".format(\n self.request.build_absolute_uri(\"/\").rstrip(\"/\"), enc.media.file.url\n )\n enclosure = feedgenerator.Enclosure(\n url=url, length=str(enc.media.file.size), mime_type=enc.media.mime_type\n )\n enclosures.append(enclosure)\n return enclosures\n\n def item_categories(self, item):\n return item.tags.all()\n\n def item_pubdate(self, item):\n return item.date_created\n\n def item_updateddate(self, item):\n return item.date_updated\n\n def feed_extra_kwargs(self, obj):\n return {\n \"image\": obj.images.first(),\n \"description_html\": obj.description,\n \"type\": \"serial\"\n if obj.podcast_type == obj.SHOW_TYPE_SERLIALIZED\n else \"episodic\",\n # TODO these have specific meanings - owner is the adminstrator of the podcast\n \"author\": {\n \"name\": obj.owner.get_full_name() if obj.owner else \"\",\n \"email\": obj.owner.email if obj.owner else \"\",\n },\n \"owner\": {\n \"name\": obj.owner.get_full_name() if obj.owner else \"\",\n \"email\": obj.owner.email if obj.owner else \"\",\n },\n \"explicit\": obj.is_explicit,\n \"block\": False,\n \"complete\": False,\n }\n\n def item_extra_kwargs(self, item):\n default_enclosure = item.enclosures.first()\n return {\n \"duration\": default_enclosure.media.duration if default_enclosure else None,\n \"image\": item.images.first(),\n \"explicit\": item.is_explicit,\n \"season_number\": item.season_number,\n \"episode_type\": item.episode_type,\n }\n","sub_path":"npr_poc/podcasts/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"270108134","text":"# Writing a file, \\n will create a new line.\nwith open(\"test.txt\", \"w\") as write_file:\n lines = [\"first line.\\n\", \"second line.\\n\", \"third line.\\n\"]\n write_file.writelines(lines)\n\n# Reading a file\nwith open(\"test.txt\", \"r\") as read_file:\n lines = read_file.readlines()\n for line in lines:\n print(line)\n\nimport glob\nimport os\n\n\"\"\" Loops through all the files, checks if its a file and then check if its a\n csv and then print the absolute path.\"\"\"\nfor filep in glob.glob(\"**/*\", recursive=True):\n if os.path.isfile(filep):\n if os.path.splitext(filep)[1] == \".csv\":\n print(os.path.join(os.getcwd(), filep))\n","sub_path":"files/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"184708380","text":"#----------------------\n# switchVCF.py switches the bases in the vcf file to their complement to\n# return the mutation on the other DNA strand.\n#----------------------\n\n\nimport re\nimport statistics\ndef main():\n inputfile=open('231_SNV_COSMIC.vcf').read().split('\\n')\n header = inputfile[:6] #save header\n inputfile = inputfile[6:] #remove first five lines\n\n for line in header:\n print(line)\n for lines in inputfile:\n line = lines.split('\\t')\n if line[3] == 'A':\n line[3] = 'T'\n elif line[3] == 'T':\n line[3] = 'A'\n elif line[3] == 'C':\n line[3] = 'G'\n elif line[3] == 'G':\n line[3] = 'C'\n if line[4] == 'A':\n line[4] = 'T'\n elif line[4] == 'T':\n line[4] = 'A'\n elif line[4] == 'C':\n line[4] = 'G'\n elif line[4] == 'G':\n line[4] = 'C'\n print(line) \nmain()\n","sub_path":"_projects/project2/Mutations/switchVCF.py","file_name":"switchVCF.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"392916113","text":"def merge(A, B):\r\n len_A, len_B = len(A), len(B)\r\n C = []\r\n i, j = 0, 0\r\n while i < len_A and j < len_B:\r\n if A[i] < B[j]:\r\n C.append(A[i])\r\n i += 1\r\n else:\r\n C.append(B[j])\r\n j += 1\r\n if i < len_A:\r\n C.extend(A[i:])\r\n else:\r\n C.extend(B[j:])\r\n return (print(*C))\r\n\r\n\r\nA = list(map(int, input().split()))\r\nB = list(map(int, input().split()))\r\nmerge(A, B)\r\n","sub_path":"Week 6: Sorting/6 (01).py","file_name":"6 (01).py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"440297828","text":"from rest_framework import serializers\n\nfrom repair.apps.conclusions.models import Conclusion, ConsensusLevel, Section\n\n\nclass ConclusionSerializer(serializers.ModelSerializer):\n step_name = serializers.CharField(source='get_step_display', read_only=True)\n\n parent_lookup_kwargs = {\n 'casestudy_pk': 'keyflow__casestudy__id',\n 'keyflow_pk': 'keyflow__id'\n }\n\n class Meta:\n model = Conclusion\n fields = ('id', 'step', 'step_name', 'text', 'link', 'image',\n 'consensus_level', 'section')\n extra_kwargs = {\n 'text': {'required': False, 'allow_null': True,\n 'allow_blank': True},\n 'link': {'required': False, 'allow_null': True,\n 'allow_blank': True},\n 'image': {'required': False, 'allow_null': True},\n }\n\n\nclass ConsensusSerializer(serializers.ModelSerializer):\n parent_lookup_kwargs = {'casestudy_pk': 'casestudy__id'}\n\n class Meta:\n model = ConsensusLevel\n fields = ('id', 'name', 'priority')\n\n\nclass SectionSerializer(serializers.ModelSerializer):\n parent_lookup_kwargs = {'casestudy_pk': 'casestudy__id'}\n\n class Meta:\n model = Section\n fields = ('id', 'name', 'priority')\n","sub_path":"repair/apps/conclusions/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"110635064","text":"# -*- coding: utf-8 -*-\n# Author: Florian Mayer <florian.mayer@bitsrc.org>\n\n#pylint: disable=W0613\n\nfrom __future__ import absolute_import\n\nimport pytest\n\nimport os\nimport tempfile\n\nfrom functools import partial\n\nfrom sunpy.net.download import Downloader, default_name\nimport threading\n\ndef wait_for(n, callback): #pylint: disable=W0613\n items = []\n def _fun(handler):\n items.append(handler)\n if len(items) == n:\n callback(items)\n return _fun\n\n\ndef path_fun(*args, **kwargs):\n raise ValueError\n\n\ndef test_path_exception():\n x = threading.Event()\n dw = Downloader(1, 2)\n dw.download(\n \"http://google.at\", path_fun, errback=wait_for(1, lambda a: x.set())\n )\n th = threading.Thread(target=dw.wait)\n th.daemon = True\n th.start()\n x.wait(10)\n assert x.isSet()\n dw.stop()\n\n\ndef test_download_http():\n items = []\n\n def wait_for(n, callback): # pylint: disable=W0613\n def _fun(handler):\n items.append(handler)\n if len(items) == n:\n callback(items)\n return _fun\n\n tmp = tempfile.mkdtemp()\n path_fun = partial(default_name, tmp)\n\n dw = Downloader(1, 1)\n\n on_finish = wait_for(2, lambda _: dw.stop())\n dw.download('http://ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js', path_fun, on_finish)\n dw.download('http://ajax.googleapis.com/ajax/libs/webfont/1.4.2/webfont.js', path_fun, on_finish)\n # dw.download('ftp://speedtest.inode.at/speedtest-100mb', path_fun, on_finish)\n\n dw.wait()\n\n for item in items:\n assert os.path.exists(item ['path'])\n","sub_path":"sunpy/tests/net/test_download.py","file_name":"test_download.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"621879265","text":"import random\nfrom collections import deque\nimport numpy as np\nfrom checkers import game\nfrom utils.gameplay import Gameplay\nfrom v2.DNN import DNN\nimport matplotlib.pyplot as plt\nimport utils.config as config\nfrom utils.learning_utils import LearningUtils\n\nBOARD_SIZE = 8\nMEM_SIZE = 40000\nWEIGHTS_DIR = './v2/weights/'\n\n\nclass DDQAgent(object):\n def __init__(self, name, with_eps):\n self.name = name\n self.memory = deque([], maxlen=MEM_SIZE)\n self.state_size = BOARD_SIZE*BOARD_SIZE\n self.replay_batch_size = 254\n self.ddqn = DNN(self.state_size)\n self.ddqn_target = DNN(self.state_size)\n self.minibatch_count = 0\n self.gamma = 0.995\n self.epsilon = 1.0\n self.epsilon_min = 0.1\n self.epsilon_decay_rate = 0.9995\n self.target_update_threshold = 4*self.replay_batch_size\n self.replay_count = 0\n self.with_eps = with_eps\n self.loss_mean = 0\n self.q_search_depth = 0\n\n def remember(self, board_state, draw_counter, board_state_action, reward, next_board_state, next_draw_counter, next_possible_board_states, done):\n self.memory.append((board_state, draw_counter, board_state_action, reward, next_board_state, next_draw_counter, next_possible_board_states, done))\n\n def replay_memory(self):\n print('Replay memory!')\n samples = random.sample(self.memory, self.replay_batch_size)\n\n done_not_draw = [m for m in self.memory if LearningUtils.is_done_not_draw(m[3], m[7])]\n if (len(done_not_draw) > config.DONE_NOT_DRAW_LIM):\n done_not_draw = random.sample(done_not_draw, config.DONE_NOT_DRAW_LIM)\n\n samples = np.concatenate((samples, done_not_draw), axis=0)\n\n avg_loss = 0\n minibatch_X = []\n minibatch_y = []\n for board_state, draw_counter, board_state_action, reward, next_board_state, next_draw_counter, next_possible_board_states, done in samples:\n if(self.replay_count == self.target_update_threshold):\n self.update_target_weights()\n self.replay_count = 0\n\n draw_counter_norm = draw_counter / 40\n\n if done or next_possible_board_states is None or not next_possible_board_states.size > self.state_size:\n q_value = reward\n else:\n next_draw_counter_norm = next_draw_counter / 40\n targets = []\n for possible_board_state in next_possible_board_states:\n targets.append(self.ddqn.predict_Q(next_board_state, next_draw_counter_norm, possible_board_state))\n\n targets = np.array(targets)\n next_best_board = next_possible_board_states[np.argmax(targets)]\n q_value_t = self.ddqn_target.predict_Q(next_board_state, next_draw_counter_norm, next_best_board)\n if (self.replay_count % 13) == 0:\n print('---------------------')\n Gameplay.show_board(board_state)\n print('---')\n Gameplay.show_board(board_state_action)\n print('draw counter: {}'.format(draw_counter))\n print('reward: {}'.format(reward))\n print('q_value: {}'.format(self.ddqn.predict_Q(board_state, draw_counter_norm, board_state_action)))\n\n q_value = reward + self.gamma * q_value_t\n\n board_state_reshaped = board_state.reshape(self.state_size)\n board_state_action_reshaped = board_state_action.reshape(self.state_size)\n minibatch_X.append(np.hstack((board_state_reshaped, board_state_action_reshaped, draw_counter_norm)))\n minibatch_y.append(q_value)\n self.replay_count += 1\n\n self.minibatch_count += 1\n #print(minibatch_y)\n hist = self.ddqn.train(np.array(minibatch_X), np.array(minibatch_y))\n avg_loss += np.mean(hist.history['loss'])\n\n self.loss_mean = (self.loss_mean*self.minibatch_count + avg_loss)/(self.minibatch_count+1)\n\n print('{} minibatch average Q_value: {}'.format(self.name, np.mean(minibatch_y)))\n print('{} minibatch average loss: {}'.format(self.name, avg_loss))\n print('{} overall average loss: {}'.format(self.name, self.loss_mean))\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay_rate\n print('{} epsilon: {}'.format(self.name, self.epsilon))\n\n def auto_play(self, n_episodes):\n plt.ion()\n plt.xlabel('Episodes')\n plt.ylabel('{} mean error'.format(self.name))\n x, y = [], []\n line, = plt.plot(x, y)\n plt.xlim(0, n_episodes)\n plt.ylim(0, config.PLOT_Y_LIM)\n\n for i in range(n_episodes):\n print(\"Episode {}\".format(i))\n turns_hist = {\n 1: [],\n 2: []\n }\n gm = game.Game()\n boardState = Gameplay.board_state_from_board(gm.board)\n\n while (not gm.is_over()):\n player = gm.whose_turn()\n\n possible_board_states = Gameplay.board_states_from_possible_moves(gm.board)\n move_idx, q_val = Gameplay.get_QAgent_move_pp(self, gm)\n\n draw_counter = gm.moves_since_last_capture\n\n if (player == 2):\n boardState = Gameplay.invert_board(boardState)\n possible_board_states = np.array(\n list(map(lambda x: Gameplay.invert_board(x), possible_board_states)))\n\n # Updating previous history\n if len(turns_hist[player]) > 0:\n turns_hist[player][-1]['next_board_state'] = boardState\n turns_hist[player][-1]['next_draw_counter'] = draw_counter\n turns_hist[player][-1]['next_possible_board_states'] = possible_board_states\n\n move = gm.get_possible_moves()[move_idx]\n\n reward = 0\n if (move in gm.board.get_possible_capture_moves()):\n reward += config.CAPTURE_REWARD\n\n piece_was_king = gm.board.searcher.get_piece_by_position(move[0]).king\n new_boardState = Gameplay.make_move(gm, move)\n\n if (not piece_was_king) and gm.board.searcher.get_piece_by_position(move[1]).king:\n reward += config.KING_REWARD\n\n if len(turns_hist[Gameplay.get_other_player(player)]) > 0:\n turns_hist[Gameplay.get_other_player(player)][-1]['reward'] -= reward\n\n # New history\n turns_hist[player].append({\n 'board_state': boardState,\n 'draw_counter': draw_counter,\n 'board_state_action': new_boardState,\n 'reward': reward,\n 'next_board_state': None,\n 'next_draw_counter': None,\n 'next_possible_board_states': None,\n 'done': False\n })\n if (player == 2):\n turns_hist[player][-1]['board_state_action'] = Gameplay.invert_board(new_boardState)\n\n boardState = new_boardState\n\n print(\"Game Over! \")\n if gm.move_limit_reached():\n print(\"It's a tie!!\")\n for j in range(2):\n turns_hist[j + 1][-1]['reward'] += config.DRAW_REWARD\n turns_hist[j + 1][-1]['done'] = True\n else:\n print(\"Winner is: {}\".format(gm.get_winner()))\n turns_hist[gm.get_winner()][-1]['reward'] += config.WIN_REWARD\n turns_hist[gm.get_winner()][-1]['done'] = True\n turns_hist[Gameplay.get_other_player(gm.get_winner())][-1]['reward'] -= config.WIN_REWARD\n turns_hist[Gameplay.get_other_player(gm.get_winner())][-1]['done'] = True\n\n for k, v in turns_hist.items():\n print(\"Reward sum for {}: {}\".format(k, sum(list(map(lambda x: x['reward'], v)))))\n\n for k, v in turns_hist.items():\n for turn_hist in v:\n self.remember(turn_hist['board_state'], turn_hist['draw_counter'],\n turn_hist['board_state_action'], turn_hist['reward'],\n turn_hist['next_board_state'], turn_hist['next_draw_counter'],\n turn_hist['next_possible_board_states'], turn_hist['done'])\n\n if (len(self.memory) > self.replay_batch_size):\n self.replay_memory()\n y.append(self.loss_mean)\n x.append(i)\n line.set_data(x, y)\n plt.draw()\n plt.pause(0.000000001)\n\n return self\n\n def choose_action_pp(self, gme):\n best_q = 0\n board = gme.board\n if self.with_eps and np.random.rand() <= self.epsilon:\n action_index = np.random.randint(0, len(board.get_possible_moves()))\n else:\n if(len(board.get_possible_moves()) < 2):\n action_index = 0\n else:\n action_index, best_q = self.deep_q_search(board, gme.moves_since_last_capture, self.q_search_depth)\n return action_index, best_q\n\n def deep_q_search(self, board, draw_counter, depth=0):\n curr_player = board.player_turn\n board_state = Gameplay.board_state_from_board(board)\n possible_board_states = Gameplay.board_states_from_possible_moves(board)\n invert = (curr_player == 2)\n\n if invert:\n board_state = Gameplay.invert_board(board_state)\n possible_board_states = np.array(list(map(lambda x: Gameplay.invert_board(x), possible_board_states)))\n\n if(len(possible_board_states) == 0):\n return None, 0\n\n q_values = []\n if (depth == 0):\n q_values = self.get_moves_Q_values(board_state, draw_counter, possible_board_states)\n else:\n moves = board.get_possible_moves()\n for move in moves:\n new_board = board.create_new_board_from_move(move)\n new_board_state = Gameplay.board_state_from_board(new_board)\n if invert:\n new_board_state = Gameplay.invert_board(new_board_state)\n move_q_value = self.ddqn.predict_Q(board_state, draw_counter/40, new_board_state)\n\n draw_counter = 0 if new_board.previous_move_was_capture else draw_counter + 1\n _, q_val = self.deep_q_search(new_board, draw_counter, depth - 1)\n\n if curr_player != new_board.player_turn:\n q_val = -q_val\n q_values.append(move_q_value + q_val)\n\n best_q = np.max(q_values)\n best_move_idx = np.argmax(q_values)\n\n return best_move_idx, best_q\n\n def get_moves_Q_values(self, board_state, draw_counter, possible_board_states):\n q_values = []\n for possible_bd_state in possible_board_states:\n q_values.append(self.ddqn.predict_Q(board_state, draw_counter/40, possible_bd_state))\n return q_values\n\n def update_target_weights(self):\n print('Update target weights!')\n self.ddqn_target.model.set_weights(self.ddqn.model.get_weights())\n\n def save_weights(self, filename):\n self.ddqn.model.save_weights(WEIGHTS_DIR+filename, overwrite=True)\n\n def load_weights(self, filename):\n self.ddqn.model.load_weights(WEIGHTS_DIR+filename)\n self.update_target_weights()\n","sub_path":"python_applications/Q_checkers/v2/DDQAgent.py","file_name":"DDQAgent.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"394065614","text":"import re\n\nfrom kraken.core.maths import Vec3\nfrom kraken.core.maths.xfo import Xfo, xfoFromDirAndUpV, aimAt\nfrom kraken.core.maths.rotation_order import RotationOrder\nfrom kraken.core.maths.constants import *\n\n\nfrom kraken.core.objects.components.base_example_component import BaseExampleComponent\n\nfrom kraken.core.objects.attributes.attribute_group import AttributeGroup\nfrom kraken.core.objects.attributes.scalar_attribute import ScalarAttribute\nfrom kraken.core.objects.attributes.bool_attribute import BoolAttribute\nfrom kraken.core.objects.attributes.string_attribute import StringAttribute\nfrom kraken.core.objects.attributes.integer_attribute import IntegerAttribute\n\n\nfrom kraken.core.objects.constraints.pose_constraint import PoseConstraint\n\nfrom kraken.core.objects.component_group import ComponentGroup\nfrom kraken.core.objects.hierarchy_group import HierarchyGroup\nfrom kraken.core.objects.transform import Transform\nfrom kraken.core.objects.joint import Joint\nfrom kraken.core.objects.space import Space\nfrom kraken.core.objects.control import Control\n\nfrom kraken.core.objects.operators.kl_operator import KLOperator\n\nfrom kraken.core.profiler import Profiler\nfrom kraken.helpers.utility_methods import logHierarchy\n\nfrom OSS.OSS_control import *\nfrom OSS.OSS_component import OSS_Component\n\n\nCOMPONENT_NAME = \"hand\"\n\nclass OSSHandComponent(OSS_Component):\n \"\"\"Hand Component\"\"\"\n\n def __init__(self, name=COMPONENT_NAME, parent=None):\n\n super(OSSHandComponent, self).__init__(name, parent)\n\n # ===========\n # Declare IO\n # ===========\n # Declare Inputs Xfos\n\n # Declare Output Xfos\n self.hand_cmpOut = self.createOutput('hand', dataType='Xfo', parent=self.outputHrcGrp).getTarget()\n self.palm_cmpOut = self.createOutput('palm', dataType='Xfo', parent=self.outputHrcGrp).getTarget()\n self.ikgoal_cmpOut = self.createOutput('ikgoal', dataType='Xfo', parent=self.outputHrcGrp).getTarget()\n\n # Declare Input Attrs\n\n # Declare Output Attrs\n self.drawDebugOutputAttr = self.createOutput('drawDebug', dataType='Boolean', value=False, parent=self.cmpOutputAttrGrp).getTarget()\n self.ikBlend_cmpOutAttr = self.createOutput('ikBlend', dataType='Float', value=0.0, parent=self.cmpOutputAttrGrp).getTarget()\n #self.limbMocap_cmpOutAttr = self.createOutput('limbMocap', dataType='Float', value=0.0, parent=self.cmpOutputAttrGrp).getTarget()\n self.softIK_cmpOutAttr = self.createOutput('softIK', dataType='Float', value=0.0, parent=self.cmpOutputAttrGrp).getTarget()\n self.stretch_cmpOutAttr = self.createOutput('stretch', dataType='Float', value=0.0, parent=self.cmpOutputAttrGrp).getTarget()\n\n\n\nclass OSSHandComponentGuide(OSSHandComponent):\n \"\"\"Hand Component Guide\"\"\"\n\n def __init__(self, name=COMPONENT_NAME, parent=None):\n\n Profiler.getInstance().push(\"Construct Hand Guide Component:\" + name)\n super(OSSHandComponentGuide, self).__init__(name, parent)\n\n\n # =========\n # Controls\n # ========\n\n # Guide Settings\n self.addPartialJoints = BoolAttribute('addPartialJoints', value=False, parent=self.guideSettingsAttrGrp)\n self.ikHandleSizeInputAttr = ScalarAttribute('ikHandleSize', value=1, minValue=0.0, maxValue=50.0, parent=self.guideSettingsAttrGrp)\n #self.numDigits = IntegerAttribute('numDigits', value=5, minValue=1, maxValue=20, parent=self.guideSettingsAttrGrp)\n self.digit3SegmentNames = StringAttribute('Digit3SegmentNames', value=\"index middle ring pinky\", parent=self.guideSettingsAttrGrp)\n self.digit2SegmentNames = StringAttribute('Digit2SegmentNames', value=\"thumb\", parent=self.guideSettingsAttrGrp)\n self.digit1SegmentNames = StringAttribute('Digit1SegmentNames', value=\"\", parent=self.guideSettingsAttrGrp)\n\n\n self.digit3SegmentNames.setValueChangeCallback(self.updateDigit3SegmentControls)\n self.digit2SegmentNames.setValueChangeCallback(self.updateDigit2SegmentControls)\n self.digit1SegmentNames.setValueChangeCallback(self.updateDigit1SegmentControls)\n\n # Guide Controls\n self.handCtrl = Control('hand', parent=self.ctrlCmpGrp, shape=\"sphere\")\n self.palmCtrl = Control('palm', parent=self.ctrlCmpGrp, shape=\"sphere\")\n self.palmCtrl.setShapeVisibility(False) # For now, until this is an option\n self.palmCtrl.lockRotation(True, True, True)\n self.palmCtrl.lockScale(True, True, True)\n # self.palmCtrl.lockTranslation(True, True, True)\n self.palmTipCtrl = Control('palmTip', parent=self.ctrlCmpGrp, shape=\"sphere\")\n self.handleCtrl = Control('handle', parent=self.ctrlCmpGrp, shape=\"jack\")\n\n self.digit3SegmentCtrls = []\n self.digit2SegmentCtrls = []\n self.digit1SegmentCtrls = []\n\n data = {\n \"name\": name,\n \"location\": \"L\",\n \"handXfo\": Xfo(Vec3(1.85, 1.2, -1.2)),\n \"palmXfo\": Xfo(Vec3(1.85, 0.4, 0.25)),\n \"palmTipXfo\": Xfo(Vec3(1.85, 0.4, 1.5)),\n \"handleXfo\" : Xfo(Vec3(1.85, 0.0, -1.6)),\n }\n\n self.loadData(data)\n\n Profiler.getInstance().pop()\n\n\n # ==========\n # Callbacks\n # ==========\n def updateNumDigitsControls(self, numSegments, controlsList, digitNames):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n numDigits -- object, The number of palm/toes\n\n Return:\n True if successful.\n\n \"\"\"\n\n self.controlXforms = []\n\n # Store current values if guide controls already exist\n current = 0\n for i, ctrl in enumerate(controlsList):\n\n if ctrl.getParent() is self.palmCtrl:\n self.controlXforms.append([ctrl.xfo])\n current = len(self.controlXforms) -1\n else:\n self.controlXforms[current].append(ctrl.xfo)\n\n\n # Delete current controls\n for ctrl in reversed(controlsList):\n ctrl.getParent().removeChild(ctrl)\n del controlsList[:]\n\n # Lets build all new digits\n digitNameList = getDigitNameList(digitNames)\n\n if not digitNameList: # Nothing to build\n return True\n\n segments = [\"palm\", \"base\", \"mid\", \"tip\", \"end\"]\n if numSegments == 2:\n segments.remove(\"mid\")\n elif numSegments == 1:\n segments.remove(\"mid\")\n segments.remove(\"tip\")\n\n offset = 0.0\n for i, digitName in enumerate(digitNameList):\n parent = self.palmCtrl\n for j, segment in enumerate(segments):\n newCtrl = Control(digitName+\"_\"+segment, parent=parent, shape=\"sphere\")\n #newCtrl.scalePoints(Vec3(0.25, 0.25, 0.25))\n if j == 0:\n newCtrl.xfo = parent.xfo.multiply(Xfo(Vec3(10, 0.0, -offset)))\n offset += 10.0\n else:\n newCtrl.xfo = parent.xfo.multiply(Xfo(Vec3(10.0, 0.0, 0.0)))\n\n controlsList.append(newCtrl)\n parent = newCtrl\n\n if i < len(self.controlXforms):\n if j < len(self.controlXforms[i]):\n newCtrl.xfo = self.controlXforms[i][j]\n return True\n\n\n def updateDigit3SegmentControls(self, digitNames):\n self.updateNumDigitsControls(3, self.digit3SegmentCtrls, digitNames)\n\n def updateDigit2SegmentControls(self, digitNames):\n self.updateNumDigitsControls(2, self.digit2SegmentCtrls, digitNames)\n\n def updateDigit1SegmentControls(self, digitNames):\n self.updateNumDigitsControls(1, self.digit1SegmentCtrls, digitNames)\n\n\n # =============\n # Data Methods\n # =============\n def saveData(self):\n \"\"\"Save the data for the component to be persisted.\n\n\n Return:\n The JSON data object\n\n \"\"\"\n\n data = super(OSSHandComponentGuide, self).saveData()\n\n data['handXfo'] = self.handCtrl.xfo\n data['palmXfo'] = self.palmCtrl.xfo\n data['palmTipXfo'] = self.palmTipCtrl.xfo\n data['handleXfo'] = self.handleCtrl.xfo\n\n\n for ctrlListName in [\"digit3SegmentCtrls\", \"digit2SegmentCtrls\", \"digit1SegmentCtrls\"]:\n ctrls = getattr(self, ctrlListName)\n xfos = []\n for i in xrange(len(ctrls)):\n xfos.append(ctrls[i].xfo)\n data[ctrlListName+\"Xfos\"] = xfos\n\n\n\n return data\n\n\n def loadData(self, data):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n\n #Grab the guide settings in case we want to use them here (and are not stored in data arg)\n existing_data = self.saveData()\n existing_data.update(data)\n data = existing_data\n\n super(OSSHandComponentGuide, self).loadData( data )\n\n\n #Reset all shapes, but really we should just recreate all controls from loadData instead of init\n for ctrl in self.getHierarchyNodes(classType=\"Control\"):\n ctrl.setShape(ctrl.getShape())\n\n # TODO: make this a property of the component\n self.boneAxisStr = \"POSX\"\n if self.getLocation() == 'R':\n self.boneAxisStr = \"NEGX\"\n self.boneAxis = AXIS_NAME_TO_TUPLE_MAP[self.boneAxisStr]\n\n self.upAxisStr = \"POSZ\"\n if self.getLocation() == 'R':\n self.upAxisStr = \"NEGZ\"\n self.upAxis = AXIS_NAME_TO_TUPLE_MAP[self.upAxisStr]\n\n if \"handXfo\" in data.keys():\n self.handCtrl.xfo = data['handXfo']\n if \"palmXfo\" in data.keys():\n self.palmCtrl.xfo = data['palmXfo']\n if \"palmTipXfo\" in data.keys():\n self.palmTipCtrl.xfo = data['palmTipXfo']\n if \"handleXfo\" in data.keys():\n self.handleCtrl.xfo = data['handleXfo']\n\n\n globalScale = self.globalComponentCtrlSizeInputAttr.getValue()\n self.globalScaleVec =Vec3(globalScale, globalScale, globalScale)\n\n self.handCtrl.scalePoints(self.globalScaleVec)\n self.palmCtrl.scalePoints(self.globalScaleVec)\n self.palmTipCtrl.scalePoints(self.globalScaleVec)\n self.handleCtrl.scalePoints(self.globalScaleVec)\n\n self.handleCtrl.scalePoints(Vec3(data[\"ikHandleSize\"], data[\"ikHandleSize\"], data[\"ikHandleSize\"]))\n\n for ctrlListName in [\"digit3SegmentCtrls\", \"digit2SegmentCtrls\", \"digit1SegmentCtrls\"]:\n ctrls = getattr(self, ctrlListName)\n if ctrlListName+\"Xfos\" in data.keys():\n for i in xrange(len(data[ctrlListName+\"Xfos\"])):\n if i < len(ctrls):\n ctrls[i].xfo = data[ctrlListName+\"Xfos\"][i]\n ctrls[i].scalePoints(self.globalScaleVec)\n return True\n\n\n\n def getRigBuildData(self):\n \"\"\"Returns the Guide data used by the Rig Component to define the layout of the final rig..\n\n Return:\n The JSON rig data object.\n\n \"\"\"\n\n data = super(OSSHandComponentGuide, self).getRigBuildData()\n\n\n\n # Values\n\n HandPosition = self.handCtrl.xfo.tr\n palmPosition = self.palmCtrl.xfo.tr\n palmTipPosition = self.palmTipCtrl.xfo.tr\n\n\n # Get lengths\n handLen = HandPosition.subtract(palmPosition).length()\n palmLen = palmPosition.subtract(palmTipPosition).length()\n\n handXfo = Xfo()\n handXfo.tr = HandPosition\n\n # Calculate Hand Xfo\n HandTopalm = palmPosition.subtract(HandPosition).unit()\n\n palmXfo = Xfo(self.palmCtrl.xfo)\n\n\n # In the complete guide system, have live constraint for palm upvec, this assumes Hand is higher than palm\n #upvec hard-coded for now. Really, we should have an upVector in the guide setup\n aimAt(palmXfo, aimPos=palmTipPosition, upVector=handXfo.ori.getYaxis(), aimAxis=self.boneAxis, upAxis=self.upAxis)\n # Same here\n aimAt(handXfo, aimPos=palmXfo.tr, upVector=handXfo.ori.getYaxis(), aimAxis=self.boneAxis, upAxis=self.upAxis)\n\n handleXfo = self.handleCtrl.xfo\n # Not great. This assumes that guide ctrl has been mirrored from left side\n # Another case where the guide system should feed in correct values\n #if self.getLocation() == 'R':\n # handleXfo.ori = handleXfo.ori.mirror(0)\n\n data['handXfo'] = handXfo\n data['palmXfo'] = palmXfo\n data['handLen'] = handLen\n data['palmLen'] = palmLen\n data['handleXfo'] = handleXfo\n\n\n for ctrlListName in [\"digit3SegmentCtrls\", \"digit2SegmentCtrls\", \"digit1SegmentCtrls\"]:\n ctrls = getattr(self, ctrlListName)\n xfos = []\n for i in xrange(len(ctrls)):\n xfos.append(ctrls[i].xfo)\n data[ctrlListName+\"Xfos\"] = xfos\n\n return data\n\n # ==============\n # Class Methods\n # ==============\n @classmethod\n def getComponentType(cls):\n \"\"\"Enables introspection of the class prior to construction to determine if it is a guide component.\n\n Return:\n The true if this component is a guide component.\n\n \"\"\"\n\n return 'Guide'\n\n @classmethod\n def getRigComponentClass(cls):\n \"\"\"Returns the corresponding rig component class for this guide component class\n\n Return:\n The rig component class.\n\n \"\"\"\n\n return OSSHandComponentRig\n\n\nclass OSSHandComponentRig(OSSHandComponent):\n \"\"\"Hand Component\"\"\"\n\n def __init__(self, name=COMPONENT_NAME, parent=None):\n\n Profiler.getInstance().push(\"Construct Leg Rig Component:\" + name)\n super(OSSHandComponentRig, self).__init__(name, parent)\n\n self.mocap = False\n\n # =========\n # Controls\n # =========\n\n # IK Handle\n self.handleCtrl = IKControl(self.getName(), parent=self.ctrlCmpGrp, shape=\"jack\")\n self.handleCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP[\"ZXY\"]) #Set with component settings later careful when combining with foot!\n self.handleSpace = self.handleCtrl.insertSpace(name=\"hand_ik\") # To avoid clashes\n self.handleIKSpace = Space('hand_orient_in_ik', parent=self.handleCtrl) # Not a good way to use ik and space suffixes Hmmm...\n\n # FK Hand\n self.handCtrl = FKControl(self.getName(), parent=self.ctrlCmpGrp, shape=\"cube\")\n self.handCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP[\"ZYX\"]) #Set with component settings later\n self.handCtrl.alignOnXAxis()\n self.handSpace = self.handCtrl.insertSpace(name=\"hand_fk\")\n\n # FK palm\n self.palmCtrl = FKControl('palm', parent=self.handCtrl, shape=\"cube\")\n self.palmCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP[\"ZYX\"]) #Set with component settings later\n self.palmCtrl.alignOnXAxis()\n self.palmSpace = self.palmCtrl.insertSpace()\n\n # IK palm\n self.palmIKSpace = Space('palmIK', parent=self.handleCtrl)\n\n\n # Rig Ref objects\n\n # Add Component Params to IK control\n self.handleCtrlAttrGrp = AttributeGroup(\"DisplayInfo_HandSettings\", parent=self.handleCtrl)\n #ballBreakInputAttr = ScalarAttribute('ballBreak', value=45.0, minValue=0, maxValue=90.0, parent=self.handleCtrlAttrGrp)\n #HandTiltInputAttr = ScalarAttribute('handTilt', value=0.0, minValue=-180, maxValue=180.0, parent=self.handleCtrlAttrGrp)\n\n self.ikBlendAttr = ScalarAttribute('ikBlend', value=0.0, minValue=0.0, maxValue=1.0, parent=self.handleCtrlAttrGrp)\n self.ikBlend_cmpOutAttr.connect(self.ikBlendAttr)\n self.handIKInputAttr = ScalarAttribute('handIK', value=0.0, minValue=0.0, maxValue=1.0, parent=self.handleCtrlAttrGrp)\n\n # Need a more elegant way to drive attrs on another component, especially this one where we don't even know if the limb has mocap\n #self.limbMocapAttr = ScalarAttribute('limbMocap', value=0.0, minValue=0.0, maxValue=1.0, parent=self.handleCtrlAttrGrp)\n #self.limbMocap_cmpOutAttr.connect(self.limbMocapAttr)\n self.softIKAttr = ScalarAttribute('softIK', value=0.0, minValue=0.0, parent=self.handleCtrlAttrGrp)\n self.softIK_cmpOutAttr.connect(self.softIKAttr)\n self.stretchAttr = ScalarAttribute('stretch', value=0.0, minValue=0.0, maxValue=1.0, parent=self.handleCtrlAttrGrp)\n self.stretch_cmpOutAttr.connect(self.stretchAttr)\n\n self.ikGoalRefTransform = Transform(self.getName()+'_ikGoalRef', parent=self.handleCtrl)\n\n handDrawDebugInputAttr = BoolAttribute('drawDebug', value=False, parent=self.handleCtrlAttrGrp)\n self.drawDebugInputAttr.connect(handDrawDebugInputAttr)\n # ==========\n # Deformers\n # ==========\n\n self.handDef = Joint(self.getName(), parent=self.deformersParent)\n self.handDef.setComponent(self)\n self.handDef.constrainTo(self.hand_cmpOut, maintainOffset=False)\n self.hand_cmpOut.parentJoint = self.handDef\n\n self.palmDef = Joint('palm', parent=self.handDef)\n self.palmDef.setComponent(self)\n self.palmDef.constrainTo(self.palmCtrl)\n self.palm_cmpOut.parentJoint = self.palmDef\n\n self.parentSpaceInputTgt.childJoints = [self.handDef]\n\n # ==============\n # Constrain I/O\n # ==============\n # Constraint inputs\n\n self.handSpaceConstraint = self.handSpace.constrainTo(self.parentSpaceInputTgt, maintainOffset=True)\n self.handleSpaceConstraint = self.handleSpace.constrainTo(self.globalSRTInputTgt, maintainOffset=True)\n # Constraint outputs\n self.ikgoal_cmpOutConstraint = self.ikgoal_cmpOut.constrainTo(self.handleCtrl, maintainOffset=False)\n\n\n\n # Create IK joints (until footrocker system is integrated)\n self.ikHandTransform = Transform('ikHand', parent=self.handSpace)\n self.ikPalmTransform = Transform('ikPalm', parent=self.ikHandTransform)\n\n\n # ===============\n # Add KL Ops\n # ===============\n\n # Wait, can this be a hier blend op?\n # Add Hand Blend KL Op\n self.IKHandBlendKLOp = KLOperator(self.getName(), 'OSS_IKFootBlendSolver', 'OSS_Kraken')\n self.addOperator(self.IKHandBlendKLOp)\n # Add Att Inputs\n self.IKHandBlendKLOp.setInput('drawDebug', self.drawDebugInputAttr)\n self.IKHandBlendKLOp.setInput('rigScale', self.rigScaleInputAttr)\n self.IKHandBlendKLOp.setInput('blend', self.handIKInputAttr)\n # Add Xfo Inputs)\n self.IKHandBlendKLOp.setInput('ikFoot', self.handleIKSpace)\n self.IKHandBlendKLOp.setInput('fkFoot', self.handCtrl)\n self.IKHandBlendKLOp.setInput('ikBall', self.palmIKSpace)\n self.IKHandBlendKLOp.setInput('fkBall', self.palmCtrl)\n # Add Xfo Outputs\n self.IKHandBlendKLOpHand_out = Transform('IKHandBlendKLOpHand_out', parent=self.outputHrcGrp)\n self.IKHandBlendKLOpPalm_out = Transform('IKHandBlendKLOpPalm_out', parent=self.outputHrcGrp)\n self.IKHandBlendKLOp.setOutput('foot', self.IKHandBlendKLOpHand_out)\n self.IKHandBlendKLOp.setOutput('ball', self.IKHandBlendKLOpPalm_out)\n\n\n Profiler.getInstance().pop()\n\n\n def createControls(self, numSegments, digitNames, data):\n\n digitNameList = getDigitNameList(digitNames)\n\n segments = [\"palm\", \"base\", \"mid\", \"tip\", \"end\"]\n if numSegments == 2:\n segments.remove(\"mid\")\n elif numSegments == 1:\n segments.remove(\"mid\")\n segments.remove(\"tip\")\n\n globalScale = Vec3(data['globalComponentCtrlSize'], data['globalComponentCtrlSize'], data['globalComponentCtrlSize'])\n\n self.handCtrl.xfo.ori.getZaxis()\n\n\n\n\n for i, digitName in enumerate(digitNameList):\n parent = self.palm_cmpOut\n defParent = self.handDef\n digiSegCtrls = []\n digiSegDefs = []\n for j, segment in enumerate(segments):\n #Eventually, we need outputs and ports for this component for each digit segment\n #spineOutput = ComponentOutput(digitName+\"_\"+segment, parent=self.outputHrcGrp)\n\n if segment == \"end\":\n continue # don't create control for end (but we need it to loop through control positions correctly)\n digiSegCtrl = FKControl(digitName+\"_\"+segment, parent=parent, shape=\"square\")\n digiSegCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP[\"XZY\"]) #Set with component settings later\n digiSegCtrl.rotatePoints(0,0,90)\n digiSegCtrl.scalePoints(globalScale)\n digiSegCtrls.append(digiSegCtrl)\n\n digiSegDef = Joint(digitName+\"_\"+segment, parent=defParent)\n digiSegDef.setComponent(self)\n digiSegDefs.append(digiSegDef)\n\n defParent = digiSegDef\n\n parent = digiSegCtrl\n ctrlListName = \"digit\"+str(numSegments)+\"SegmentCtrls\"\n\n if (ctrlListName+\"Xfos\") in data.keys():\n\n index = i*len(segments) + j\n\n if (i*numSegments + j) < len(data[ctrlListName+\"Xfos\"]):\n digiSegCtrl.xfo = data[ctrlListName+\"Xfos\"][index]\n\n #Aim Control at ch`ild\n for j in range(len(digiSegCtrls)):\n if j == 0:\n upVectorAxisStr = self.upAxisStr[-1]\n upVectorFunction = getattr(digiSegCtrls[j].xfo.ori, \"get\"+upVectorAxisStr+\"axis\")\n upVector = upVectorFunction()\n if self.upAxisStr.startswith(\"NEG\"):\n upVector = upVector.negate()\n\n # if j == len(digiSegCtrls) - 1:\n # digiSegCtrls[j].xfo.ori = digiSegCtrls[j-1].xfo.ori\n # else:\n # aimAt(digiSegCtrls[j].xfo, aimPos=digiSegCtrls[j+1].xfo.tr, upVector=upVector, aimAxis=self.boneAxis, upAxis=self.upAxis)\n\n digiSegCtrls[j].insertSpace()\n digiSegDefs[j].constrainTo(digiSegCtrls[j]).evaluate()\n\n if self.addPartialJoints:\n twistXfo = self.createOutput(digiSegDefs[j].getName()+\"_partial\", dataType='Xfo', parent=self.outputHrcGrp).getTarget()\n twistXfo.xfo = digiSegDefs[j].xfo\n twistXfo.constrainTo(digiSegDefs[j].getParent(), maintainOffset=True)\n self.createPartialJoint(digiSegDefs[j], baseTranslate=twistXfo, baseRotate=twistXfo, parent=digiSegDefs[j].getParent())\n\n\n\n return True\n\n\n\n\n # =============\n # Data Methods\n # =============\n def loadData(self, data=None):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n\n super(OSSHandComponentRig, self).loadData( data )\n\n self.mocap = bool(data[\"mocap\"])\n\n self.addPartialJoints = bool(data['addPartialJoints']) #This should be a simple method instead\n\n # TODO: make this a property of the component\n self.boneAxisStr = \"POSX\"\n if self.getLocation() == 'R':\n self.boneAxisStr = \"NEGX\"\n self.boneAxis = AXIS_NAME_TO_TUPLE_MAP[self.boneAxisStr]\n\n self.upAxisStr = \"POSZ\"\n if self.getLocation() == 'R':\n self.upAxisStr = \"NEGZ\"\n self.upAxis = AXIS_NAME_TO_TUPLE_MAP[self.upAxisStr]\n\n\n self.handleSpace.xfo = data['handleXfo']\n #aimAt(self.handleSpace.xfo., aimVector=Vec3(0, 1, 0), upPos=self.palmCtrl.xfo.tr, aimAxis=(0, 1, 0), upAxis=(0, 0, 1))\n self.handleCtrl.xfo = self.handleSpace.xfo\n\n self.handSpace.xfo = data['handXfo']\n self.handCtrl.xfo = data['handXfo']\n self.handCtrl.scalePointsOnAxis(data['handLen'], self.boneAxisStr)\n\n self.palmSpace.xfo = data['palmXfo']\n self.palmCtrl.xfo = data['palmXfo']\n self.palmCtrl.scalePointsOnAxis(data['palmLen'] / 5.0, self.boneAxisStr)\n\n self.handleIKSpace.xfo = self.handCtrl.xfo\n self.palmIKSpace.xfo = self.palmCtrl.xfo\n\n self.ikHandTransform = data['handXfo']\n self.ikPalmTransform = data['palmXfo']\n\n\n if self.getLocation() == \"R\":\n pass\n #self.legIKCtrl.rotatePoints(0, 90, 0)\n #self.legIKCtrl.translatePoints(Vec3(-1.0, 0.0, 0.0))\n else:\n pass\n #self.legIKCtrl.rotatePoints(0, -90, 0)\n #self.legIKCtrl.translatePoints(Vec3(1.0, 0.0, 0.0))\n\n\n self.createControls(3, data[\"Digit3SegmentNames\"], data)\n self.createControls(2, data[\"Digit2SegmentNames\"], data)\n self.createControls(1, data[\"Digit1SegmentNames\"], data)\n\n self.connectReverse(self.handIKInputAttr, self.handCtrl.getVisibilityAttr())\n\n self.hand_cmpOutConstraint = self.hand_cmpOut.constrainTo(self.IKHandBlendKLOpHand_out)\n self.palm_cmpOutConstraint = self.palm_cmpOut.constrainTo(self.IKHandBlendKLOpPalm_out)\n\n\n\n # ====================\n # Evaluate Fabric Ops\n # ====================\n # Eval Operators # Order is important\n self.evalOperators()\n # ====================\n # Evaluate Output Constraints (needed for building input/output connection constraints in next pass)\n # ====================\n # Evaluate the *output* constraints to ensure the outputs are now in the correct location.\n # Don't eval *input* constraints because they should all have maintainOffset on and get evaluated at the end during build()\n self.ikgoal_cmpOutConstraint.evaluate()\n self.hand_cmpOutConstraint.evaluate()\n self.palm_cmpOutConstraint.evaluate()\n\n\n #JSON data at this point is generated by guide rig and passed to this rig, should include all defaults+loaded info\n globalScale = Vec3(data['globalComponentCtrlSize'], data['globalComponentCtrlSize'], data['globalComponentCtrlSize'])\n\n self.handCtrl.scalePoints(Vec3(1.0, data['globalComponentCtrlSize']*2, data['globalComponentCtrlSize']))\n self.palmCtrl.scalePoints(Vec3(1.0, data['globalComponentCtrlSize']*2, data['globalComponentCtrlSize']))\n self.handleCtrl.scalePoints(globalScale)\n self.handleCtrl.scalePoints(Vec3(data[\"ikHandleSize\"], data[\"ikHandleSize\"], data[\"ikHandleSize\"]))\n\n # If we remove this, need to update footBlendSolver's translate blend to take leg ikBlend into account\n self.handCtrl.lockTranslation(True, True, True)\n\n \"\"\"\n HandPlane = Control(\"TMP\", shape=\"square\")\n HandPlane.alignOnZAxis()\n HandPlane.scalePoints(Vec3(data['globalComponentCtrlSize'], data['globalComponentCtrlSize'], 1.0))\n # Damn, can't get the Hand length because it is on another component\n # Can we do this with just inputs? We'd have to guarantee that everything was in the correct pose first\n #HandPlane.scalePointsOnAxis(self.handleCtrl.xfo.tr.subtract(self.palmTipPivotTransform.xfo.tr).length(), \"POSZ\")\n self.handleCtrl.appendCurveData(HandPlane.getCurveData())\n \"\"\"\n self.tagAllComponentJoints([self.getDecoratedName()] + self.tagNames)\n\n\ndef getDigitNameList(digitNames):\n \"\"\" tokenizes string argument, returns a list\"\"\"\n\n digitNameList = re.split(r'[ ,:;]+', digitNames)\n\n # These checks should actually prevent the component_inspector from closing maybe?\n for name in digitNameList:\n if name and not re.match(r'^[\\w_]+$', name):\n # Eventaully specific exception just for component class that display component name, etc.\n raise ValueError(\"digitNames \\\"\"+name+\"\\\" contains non-alphanumeric characters in component \\\"\"+self.getName()+\"\\\"\")\n\n digitNameList = [x for x in digitNameList if x != \"\"]\n\n if not digitNameList:\n return []\n\n if len(digitNameList) > len(set(digitNameList)):\n raise ValueError(\"Duplicate names in digitNames in component \\\"\"+self.getName()+\"\\\"\")\n\n return digitNameList\n\nfrom kraken.core.kraken_system import KrakenSystem\nks = KrakenSystem.getInstance()\nks.registerComponent(OSSHandComponentGuide)\nks.registerComponent(OSSHandComponentRig)\n","sub_path":"Python/OSS/OSS_hand_component.py","file_name":"OSS_hand_component.py","file_ext":"py","file_size_in_byte":28212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"464388731","text":"from django.shortcuts import render\nfrom django.template import RequestContext, loader\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.http import *\nfrom django.shortcuts import render_to_response,redirect\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.core.urlresolvers import reverse\nfrom ReviewProcess.models import ReportingManagerProfile\nfrom django import forms\nfrom .forms import ContactForm\nfrom ReviewProcess.models import ReviewQuestion\n\ndef index(request):\n import pdb;pdb.set_trace()\n template = loader.get_template('ReviewProcess/index.html')\n context = RequestContext(request, {\n 'incorrect_login': False,\n })\n logout(request)\n username = password = ''\n if request.POST:\n username = request.POST['uid']\n password = request.POST['upass']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('home'))\n else:\n context = RequestContext(request, {\n 'incorrect_login': True,\n })\n return HttpResponse(template.render(context))\n\n@login_required(login_url='/index/')\ndef home(request):\n template = loader.get_template('ReviewProcess/home.html')\n context = RequestContext(request, {\n 'name': 'Aman',\n })\n return HttpResponse(template.render(context))\n@login_required\ndef user_logout(request):\n context = RequestContext(request,\n {\"logout\":\"True\"})\n logout(request)\n # Redirect back to index page.\n template = loader.get_template('ReviewProcess/index.html')\n return HttpResponse(template.render(context))\n\n@login_required\ndef user_profile(request):\n context = RequestContext(request,\n {\"user_id\":request.user.id})\n # Redirect back to index page.\n template = loader.get_template('ReviewProcess/profile.html')\n return HttpResponse(template.render(context))\n@login_required\ndef Test(request):\n context = RequestContext(request)\n # Redirect back to index page.\n template = loader.get_template('ReviewProcess/DropDown.html')\n return HttpResponse(template.render(context))\n@login_required\ndef createtask(request):\n # Redirect back to index page.\n context = {}\n if ReportingManagerProfile.objects.filter(id=request.user.id).count():\n reporter = ReportingManagerProfile.objects.get(id=request.user.id)\n reportees = reporter.reportees.all()\n context = {'reportees': reportees}\n #import pdb;pdb.set_trace()\n return render_to_response(\n 'ReviewProcess/createtask.html',context,\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef editprofinfo(request):\n # Redirect back to index page.\n context = {}\n import pdb;pdb.set_trace()\n import pdb;pdb.set_trace()\n return render_to_response(\n 'ReviewProcess/profile.html',context,\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef getreviewquestion(request):\n # Redirect back to index page.\n context = {}\n allobj = ReviewQuestion.objects.all()\n context['reviewquestions'] = allobj\n questiondict = {}\n questionset = []\n import pdb;pdb.set_trace()\n return render_to_response(\n 'ReviewProcess/myfrom.html',context,\n context_instance=RequestContext(request)\n )","sub_path":"ReviewProcess/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"128035103","text":"# accept command line args\nimport argparse\n\n# database connection layers\nimport psycopg2\nimport pymysql\n\n# datetime information\nimport datetime\n\n# aws connection\nimport boto3\n\n# log start\nprint(\"Starting Process: \" + str(datetime.datetime.now()))\n\ndatetimestring = str(datetime.datetime.now()).replace(\"-\",\"\").replace(\":\",\"\").replace(\".\",\"\").replace(\" \",\"\")[:-6]\n\n# Get ETL ProcessName\n# processname = input(\"Enter ETL Master Process Name: \")\nparser = argparse.ArgumentParser(description='Process Master ETL Process Name')\nparser.add_argument('etlname', help=\"Enter the Master Process ETL Name\")\nargs = parser.parse_args()\n\nprocessname = args.etlname\n\n# mysql connection for config values\nmysqlconn = pymysql.connect()\nconfigcursor = mysqlconn.cursor()\n\nconfigcursor.execute(\n \"\"\"SELECT cp.ChildProcessName,\n ds.DataSourceId,\n ds.DataSourceTypeId,\n dd.DataSourceId as DataDestinationId,\n dd.DataSourceTypeId as DataDestinationTypeId,\n ds.ConnectionString as DataSourceConnectionString,\n dd.ConnectionString as DataDestinationConnectionString,\n cp.ETLCode,\n cp.ETLCodeTypeId\n FROM MasterProcess mp\n INNER JOIN ChildProcess cp on mp.MasterProcessId = cp.MasterProcessId\n INNER JOIN DataSource ds ON cp.DataSourceId = ds.DataSourceId\n INNER JOIN DataSource dd ON cp.DataDestinationId = dd.DataSourceId\n\n WHERE mp.ProcessName = '\"\"\"+processname+\"\"\"'\n AND ChildProcessTypeId = 3\n ORDER BY Priority, Sequence;\"\"\"\n )\n\nconfigvalues = configcursor.fetchall()\n\nmysqlconn.close()\n\n# assign values, copy data into Redshift, and archive files\nfor row in configvalues:\n childprocessname = row[0]\n datasourceid = row[1]\n datasourcetypeid = row[2]\n datadestinationid = row[3]\n datadestinationtypeid = row[4]\n datasourceconnectionstring = row[5]\n datadestinationconnectionstring = row[6]\n etlcode = row[7]\n etlcodetypeid = row[8]\n\n session = boto3.Session(aws_access_key_id='',aws_secret_access_key='',region_name='')\n s3 = session.resource('s3')\n bucket = s3.Bucket(datasourceconnectionstring)\n\n # loop through bucket to find files. Need to replace if statements with object.filter()\n for files in bucket.objects.all():\n if etlcode in files.key:\n \n etlcodeexec = files.get()['Body'].read()\n\n # connect to redshift, run copy commands\n redshiftconn = psycopg2.connect(datadestinationconnectionstring)\n importcursor = redshiftconn.cursor()\n\n importcursor.execute(etlcodeexec)\n redshiftconn.commit()\n redshiftconn.close()\n\n# log completion time\nprint(\"Process Completed: \" + str(datetime.datetime.now()))","sub_path":"datawarehouse/dwloadpublic.py","file_name":"dwloadpublic.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"194141660","text":"#!usr/bin/env python\n#coding:utf-8\n\"\"\"\n@author: Haidong Zhang\n@contact: haidong_zhang14@yahoo.com\n@time: 2020/12/10 18:21\n@project: MalariaDetection\n@description: \n\"\"\"\nfrom keras.layers import Flatten, Dense, BatchNormalization, UpSampling2D\nfrom keras.models import Model\nfrom keras.optimizers import adam\n\nfrom utils import module\n\n\npad = 1\nkernel = 3\nfilter_size = 64\npool_size = 2\n\n\ndef decode_segnet(img_input, f5):\n\n model = module.dep_conv2(512, f5)\n model = UpSampling2D((pool_size, pool_size))(model)\n\n model = module.dep_conv2(256, model)\n model = UpSampling2D((pool_size, pool_size))(model)\n\n model = module.dep_conv2(128, model)\n model = UpSampling2D((pool_size, pool_size))(model)\n\n model = module.dep_conv2(64, model)\n model = UpSampling2D((pool_size, pool_size))(model)\n\n model = Flatten()(model)\n model = Dense(512)(model)\n print(f\"segnet: {model}\")\n\n o = Dense(1, activation=\"sigmoid\")(model)\n\n model = Model(img_input, o)\n model.compile(optimizer=adam(lr=0.00001), loss='binary_crossentropy',\n metrics=['accuracy']) # define optimizer and loss functions as well as required metrics\n return model\n\n\ndef segnet():\n img_input, [f1,f2, f3, f4, f5] = module.base_encode()\n model = decode_segnet(img_input, f5)\n return model\n\n\ndef vgg_segnet():\n img_input, [f1,f2, f3, f4, f5] = module.vgg_encode()\n model = decode_segnet(img_input, f5)\n return model\n\n\ndef res_segnet():\n img_input, [f1,f2, f3, f4, f5] = module.res50_encode()\n model = decode_segnet(img_input, f5)\n return model","sub_path":"Step 3/model/SEGNet.py","file_name":"SEGNet.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"495554190","text":"'''\nCreate a Planet class that models attributes and methods of\na planet object.\n\nUse the appropriate dunder method to get informative output with print()\n\n'''\n\nclass Planet():\n\n def __init__(self, name, color, system):\n self.name = name\n self.color = color\n self.system = system\n\n def __str__(self):\n return f\"Planet {self.name} is a {self.color} planet in the {self.system}.\"\n\n def bears_life(self):\n if self.color.lower() == 'blue':\n return True\n else:\n return False\n\n\nmars = Planet('Mars', 'red', 'Solar System')\nearth = Planet('Earth', 'blue', 'Solar System')\nnaboo = Planet('Naboo', 'blue', 'Naboo System')\n\nprint(mars.bears_life())\nprint(earth.bears_life())\nprint(naboo)\n","sub_path":"week_04/labs/11_classes_objects_methods/Exercise_planets.py","file_name":"Exercise_planets.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"247587787","text":"import deck\n\nclass Hand:\n\n def __init__(self):\n self.cards = []\n\n def receiveCard(self, card):\n self.cards.append(card)\n\n def size(self):\n return len(self.cards)\n\n def score(self):\n nonAceCards = [c for c in self.cards if c.value != 'A']\n aces = [c for c in self.cards if c.value == 'A']\n \n scores = 0\n for c in nonAceCards:\n if isinstance(c.value, basestring):\n scores += 10 #JQKA\n else:\n scores += c.value \n for c in aces:\n if scores + 11 <= 21:\n scores += 11\n else:\n scores += 1\n\n return scores\n\n def __repr__(self):\n cardString = ''\n for card in self.cards:\n cardString += str(card)\n cardString += ' '\n return cardString\n","sub_path":"Session 2.1/blackjack/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112969633","text":"from tiny_db_mod.tiny_db import DB_connect, ToDB, Table, IntegerColumn\n\n#class WisentTable(Table):\n# ptt = IntegerColumn()\n# def something(self):\n# pass\n#\n#class AuthTable(Table):\n# first_field = 10\n# second_field = 20\n# third_field = Column()\n# def some(self):\n# pass\n\nif __name__ == '__main__':\n# db = ToDB()\n# tt = ['roto', 'foto']\n# db.create_table(tt)\n# print(db.db_cursor())\n t = 3\n print(t.__name__)\n","sub_path":"tiny_db_mod/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"456902772","text":"import numpy as np\n\n# Represents a motion planning problem to be solved using A*\nclass AStar(object):\n\n def __init__(self, statespace_lo, statespace_hi, x_init, x_goal, occupancy, resolution):\n self.statespace_lo = statespace_lo # state space lower bound (e.g., (-5, -5))\n self.statespace_hi = statespace_hi # state space upper bound (e.g., (5, 5))\n self.occupancy = occupancy # occupancy grid\n self.resolution = resolution # resolution of the discretization of state space (cell/m)\n self.x_init = self.snap_to_grid(x_init) # initial state\n self.x_goal = self.snap_to_grid(x_goal) # goal state\n\n self.closed_set = [] # the set containing the states that have been visited\n self.open_set = [] # the set containing the states that are candidate for future expension\n\n self.f_score = {} # dictionary of the f score (estimated cost from start to goal passing through state)\n self.g_score = {} # dictionary of the g score (cost-to-go from start to state)\n self.came_from = {} # dictionary keeping track of each state's parent to reconstruct the path\n\n self.open_set.append(x_init)\n self.g_score[x_init] = 0\n self.f_score[x_init] = self.distance(x_init,x_goal)\n\n self.path = None # the final path as a list of states\n\n self.dist_from_wall = 1.0 # distance from wall to keep\n\n # Checks if a give state is free, meaning it is inside the bounds of the map and\n # is not inside any obstacle\n # INPUT: (x)\n # x - tuple state\n # OUTPUT: Boolean True/False\n def is_free(self, x):\n if x==self.x_init or x==self.x_goal:\n return True\n for dim in range(len(x)):\n if x[dim] < self.statespace_lo[dim]:\n return False\n if x[dim] >= self.statespace_hi[dim]:\n return False\n if not self.occupancy.is_free(x):\n return False\n return True\n\n # computes the euclidean distance between two states\n # INPUT: (x1, x2)\n # x1 - first state tuple\n # x2 - second state tuple\n # OUTPUT: Float euclidean distance\n def distance(self, x1, x2):\n return np.linalg.norm(np.array(x1)-np.array(x2))\n\n # returns the closest point on a discrete state grid\n # INPUT: (x)\n # x - tuple state\n # OUTPUT: A tuple that represents the closest point to x on the discrete state grid\n def snap_to_grid(self, x):\n return (self.resolution*round(x[0]/self.resolution), self.resolution*round(x[1]/self.resolution))\n\n # gets the FREE neighbor states of a given state. Assumes a motion model\n # where we can move up, down, left, right, or along the diagonals by an\n # amount equal to self.resolution.\n # Use self.is_free in order to check if any given state is indeed free.\n # Use self.snap_to_grid (see above) to ensure that the neighbors you compute\n # are actually on the discrete grid, i.e., if you were to compute neighbors by\n # simply adding/subtracting self.resolution from x, numerical error could\n # creep in over the course of many additions and cause grid point equality\n # checks to fail. To remedy this, you should make sure that every neighbor is\n # snapped to the grid as it is computed.\n # INPUT: (x)\n # x - tuple state\n # OUTPUT: List of neighbors that are free, as a list of TUPLES\n def get_neighbors(self, x):\n (xpos, ypos) = x\n moves = [[xpos-self.resolution, ypos],\n [xpos+self.resolution, ypos],\n [xpos, ypos+self.resolution],\n [xpos, ypos-self.resolution],\n [xpos-self.resolution*np.sqrt(2), ypos+self.resolution*np.sqrt(2)],\n [xpos-self.resolution*np.sqrt(2), ypos-self.resolution*np.sqrt(2)],\n [xpos+self.resolution*np.sqrt(2), ypos+self.resolution*np.sqrt(2)],\n [xpos+self.resolution*np.sqrt(2), ypos-self.resolution*np.sqrt(2)]];\n\n neighbors = [tuple(self.snap_to_grid(el)) for el in moves if self.is_free(self.snap_to_grid(el))]\n\n return neighbors\n\n # Gets the state in open_set that has the lowest f_score\n # INPUT: None\n # OUTPUT: A tuple, the state found in open_set that has the lowest f_score\n def find_best_f_score(self):\n return min(self.open_set, key=lambda x: self.f_score[x])\n\n # Use the came_from map to reconstruct a path from the initial location\n # to the goal location\n # INPUT: None\n # OUTPUT: A list of tuples, which is a list of the states that go from start to goal\n def reconstruct_path(self):\n path = [self.x_goal]\n current = path[-1]\n while current != self.x_init:\n path.append(self.came_from[current])\n current = path[-1]\n return list(reversed(path))\n\n\n # def fix_animal_position(self, eps):\n\n # goal = self.x_goal\n\n # # find the wall closest to the goal position\n # closest_line = None\n # min_dist = 1000\n\n # for obs in self.occupancy.obstacles:\n\n # x1, y1 = obs[0]\n # x2, y2 = obs[1]\n\n # x0, y0 = goal\n # dist = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/np.sqrt((y2-y1)**2 + (x2-x1)**2)\n\n # if dist < min_dist:\n # min_dist = dist\n # closest_line = obs\n\n # # Find slope of wall and a line perpendicular to the wall\n # x1, y1 = obs[0]\n # x2, y2 = obs[1]\n\n # slope_wall = (y2-y1)/(x2-x1)\n # slope_line = (1.,-1./slope_wall)\n\n # # normalize slope_line, and add the line scaled by\n # # (min_dist + eps), which gives new goal\n # norm_line = np.norm(slope_line)\n\n\n # new_goal = goal + slope_line/norm_line*(min_dist + eps)\n # return new_goal\n\n # Solves the planning problem using the A* search algorithm. It places\n # the solution as a list of of tuples (each representing a state) that go\n # from self.x_init to self.x_goal inside the variable self.path\n # INPUT: None\n # OUTPUT: Boolean, True if a solution from x_init to x_goal was found\n def solve(self):\n\n # animal position is most likely beyond the wall\n # if not self.is_free(self.x_goal):\n # self.x_goal = self.fix_animal_position(self.dist_from_wall)\n\n while len(self.open_set)>0:\n x_curr = self.find_best_f_score()\n\n if x_curr == self.x_goal:\n self.path = self.reconstruct_path()\n return True\n\n self.open_set.remove(x_curr)\n self.closed_set.append(x_curr)\n\n for x_neigh in self.get_neighbors(x_curr):\n if x_neigh in self.closed_set:\n continue\n\n tentative_g_score = self.g_score[x_curr] + self.distance(x_curr, x_neigh)\n\n if x_neigh not in self.open_set:\n self.open_set.append(x_neigh)\n elif (tentative_g_score > self.g_score[x_neigh]):\n continue\n\n self.came_from[x_neigh] = x_curr\n self.g_score[x_neigh] = tentative_g_score\n self.f_score[x_neigh] = tentative_g_score + self.distance(x_neigh, self.x_goal)\n\n return False\n\n# A 2D state space grid with a set of rectangular obstacles. The grid is fully deterministic\nclass DetOccupancyGrid2D(object):\n def __init__(self, width, height, obstacles):\n self.width = width\n self.height = height\n self.obstacles = obstacles\n\n def is_free(self, x):\n for obs in self.obstacles:\n inside = True\n for dim in range(len(x)):\n if x[dim] < obs[0][dim] or x[dim] > obs[1][dim]:\n inside = False\n break\n if inside:\n return False\n return True\n","sub_path":"scripts/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":7938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"629877844","text":"import os\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport numpy as np\r\n# import random\r\nfrom numpy.random import seed\r\nfrom keras.optimizers import Adam\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv1D, MaxPooling1D, Dense, Flatten, Dropout # , GlobalMaxPooling1D\r\nfrom keras.callbacks import ModelCheckpoint\r\n# from keras.utils import plot_model\r\nimport keras.backend as K\r\n# import process_data\r\nimport time\r\nfrom HAPT import split_HAPT\r\n\r\n# import sys\r\n# import pyRAPL\r\n\r\npca_dims = 10\r\n\r\n# ...\r\n# Instructions to be evaluated.\r\n# ...\r\n\r\n# Select sample rate moderator. Default sample rate is 50Hz. Moderated sample rate would be default//rate\r\n# sampling_rate = [10, 5, 2.5, 2, 1.25, 1]\r\n# for rate in sampling_rate:\r\nrate = 1\r\nmoderated = str(int(50 // rate))\r\nprint(moderated)\r\nX_train, X_test, y_train, y_test = split_HAPT.main(pca_dims)\r\nstart_time = time.time()\r\n# Load all train and test data (* dynamic and static data are mixed.)\r\n\r\nprint(y_test.shape)\r\n# Convert (1, 2, 3) labels to (0, 1, 2)\r\ny_train = y_train - 1\r\nprint(y_train.shape)\r\n\r\nprint((\"test_dynamic shape: \", X_test.shape))\r\n\r\nprint(X_train.shape)\r\nprint(y_test.shape)\r\n\r\nn_classes = 12\r\n\r\n# Convert to one hot encoding vector\r\ny_train_dynamic_oh = np.eye(n_classes)[y_train]\r\n# y_train_dynamic_oh = np.delete(y_train_dynamic_oh, 0, 1)\r\n\r\nprint(y_train_dynamic_oh.shape)\r\nprint(y_train.shape)\r\n\r\nprint(y_train)\r\nprint(y_train_dynamic_oh)\r\n\r\n# Fit 1d CNN for dynamic HAR\r\n\r\nseed(2020)\r\nmodel = Sequential()\r\nmodel.add(Conv1D(100, 12, input_shape=(pca_dims, 1), activation='relu'))\r\nmodel.add(MaxPooling1D(8))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(12, activation='softmax'))\r\nmodel.add(Dropout(0.5))\r\n#\r\nadam = Adam(lr=0.0004, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\nmodel.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])\r\n\r\n# model = Sequential()\r\n# model.add(Conv1D(100, 3, input_shape=(30, 1), activation='relu', padding='same'))\r\n# # model.add(MaxPooling1D(12, padding='same'))\r\n# model.add(Conv1D(64, 3, activation='relu', padding='same'))\r\n# # model.add(MaxPooling1D(12, padding='same'))\r\n# model.add(Dropout(0.50))\r\n# model.add(GlobalMaxPooling1D())\r\n# # model.add(MaxPooling1D(12, padding='same'))\r\n# # model.add(Conv1D(50, 12, activation='relu', padding='same'))\r\n# # model.add(MaxPooling1D(12, padding='same'))\r\n# model.add(Dropout(0.25))\r\n# # model.add(Conv1D(100, 12, activation='relu', padding='same'))\r\n# # model.add(Flatten())\r\n# model.add(Dense(100, activation='softmax'))\r\n# model.add(Dense(12, activation='softmax'))\r\n# # model.add(Dropout(0.50))\r\n\r\n# adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n# model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\r\n\r\n# Summarize layers\r\nprint((model.summary()))\r\n\r\n# Save model image\r\n# if not os.path.exists('fig_har_dyna.png'):\r\n# model_file = 'fig_har_dyna.png'\r\n# plot_model(model, to_file=model_file)\r\n\r\nnew_dir = 'model/' + moderated + 'Hz/weights/'\r\nif not os.path.exists(new_dir):\r\n os.makedirs(new_dir)\r\nfpath = new_dir + moderated + 'Hz' + '_pca' + str(pca_dims) + '_weights.{epoch:02d}-{val_accuracy:.2f}.hdf5'\r\n\r\ncp_cb = ModelCheckpoint(fpath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)\r\n\r\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\n# To disable learning, the below code - two lines - is commented.\r\n# To enable learning uncomment the below two lines of code.\r\n\r\nmodel.fit(np.expand_dims(X_train, axis=2), y_train_dynamic_oh,\r\n batch_size=32, epochs=50, verbose=2, validation_split=0.2, callbacks=[cp_cb])\r\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\nmodel.save('model/' + moderated + 'Hz/' + 'pca' + str(pca_dims) + '.hdf5')\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\ndel model\r\nK.clear_session()\r\n# report.data.head()\r\n#\r\n#\r\n# '''\r\n#\r\n# /usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/har/har_dyna_learn_model.py\r\n# /home/hcilab/.local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n# from ._conv import register_converters as _register_converters\r\n# Using TensorFlow backend.\r\n#\r\n# +++ DATA STATISTICS +++\r\n#\r\n# train_dynamic shape: (3285, 561)\r\n# test_dynamic shape: (1387, 561)\r\n# _________________________________________________________________\r\n# Layer (type) Output Shape Param #\r\n# =================================================================\r\n# conv1d_1 (Conv1D) (None, 559, 100) 400\r\n# _________________________________________________________________\r\n# max_pooling1d_1 (MaxPooling1 (None, 186, 100) 0\r\n# _________________________________________________________________\r\n# flatten_1 (Flatten) (None, 18600) 0\r\n# _________________________________________________________________\r\n# dense_1 (Dense) (None, 3) 55803\r\n# _________________________________________________________________\r\n# dropout_1 (Dropout) (None, 3) 0\r\n# =================================================================\r\n# Total params: 56,203\r\n# Trainable params: 56,203\r\n# Non-trainable params: 0\r\n# _________________________________________________________________\r\n# None\r\n#\r\n#\r\n# Process finished with exit code 0\r\n#\r\n# '''\r\n","sub_path":"HAPT/testing4.py","file_name":"testing4.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"453113009","text":"from django.db.models import QuerySet\nfrom django.shortcuts import render\nfrom django.views import generic\n\n# Create your views here.\nimport json\nfrom django.http import HttpResponse\nfrom .models import User, Brand, Nutrition, NutriScore, Category, FRC, Food\nfrom django.views.decorators.csrf import csrf_exempt\nimport datetime\nimport random\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n@csrf_exempt\ndef login(request):\n dic = {}\n if request.method == 'GET':\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Method\"\n return HttpResponse(json.dumps(dic))\n\n try:\n post_content = json.loads(request.body)\n username = post_content['username']\n password = post_content['password']\n user = User.objects.get(username=username)\n except (KeyError, json.decoder.JSONDecodeError):\n dic['status'] = \"Failed\"\n dic['message'] = \"No Input\"\n return HttpResponse(json.dumps(dic))\n except User.DoesNotExist:\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Username\"\n return HttpResponse(json.dumps(dic))\n if user.password != password:\n dic['message'] = \"Wrong Password\"\n dic['status'] = \"Failed\"\n return HttpResponse(json.dumps(dic))\n else:\n dic['status'] = \"Success\"\n dic['user_id'] = user.uid\n dic['user_role'] = user.role\n return HttpResponse(json.dumps(dic))\n\n\n@csrf_exempt\ndef register(request):\n dic = {}\n if request.method == 'GET':\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Method\"\n return HttpResponse(json.dumps(dic))\n try:\n post_content = json.loads(request.body)\n username = post_content['username']\n password = post_content['password']\n role = post_content['role']\n user = User.objects.get(username=username)\n except (KeyError, json.decoder.JSONDecodeError):\n dic['status'] = \"Failed\"\n dic['message'] = \"No Input\"\n return HttpResponse(json.dumps(dic))\n except User.DoesNotExist:\n dic['status'] = \"Success\"\n now = datetime.datetime.now()\n newUser = User(username=username, password=password, role=role, register_time=now)\n newUser.save()\n return HttpResponse(json.dumps(dic))\n if user is not None:\n dic['status'] = \"Failed\"\n dic['message'] = \"User exist\"\n return HttpResponse(json.dumps(dic))\n\n\n# sample\n# {\n# \"uid\": \"1\",\n# \"food_name\": \"food_sample_5\",\n# \"carbohydrate\": 1.,\n# \"fat\": 1.,\n# \"sugar\": 1.,\n# \"energy\": 1.,\n# \"protein\": 1.,\n# \"brand\": \"brand_sample_5\",\n# \"categories\": [\"category_sample_1\", \"category_sample_2\", \"category_sample_3\", \"category_sample_4\"]\n# \"score\": 3\n# }\n@csrf_exempt\ndef create(request):\n dic = {}\n if request.method == 'GET':\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Method\"\n return HttpResponse(json.dumps(dic))\n try:\n post_content = json.loads(request.body)\n creator = post_content['uid']\n food_name = post_content['food_name']\n nutrition_carbohydrate = post_content['carbohydrate']\n nutrition_fat = post_content['fat']\n nutrition_sugar = post_content['sugar']\n nutrition_energy = post_content['energy']\n nutrition_protein = post_content['protein']\n brand = post_content['brand']\n categories = post_content['categories']\n score_id = post_content['score']\n\n isBrand = True\n # get brand id\n try:\n br = Brand.objects.get(name=brand)\n except Brand.DoesNotExist:\n isBrand = False\n if isBrand is False:\n newBrand = Brand(None, brand, 0)\n newBrand.save()\n br = Brand.objects.get(name=brand)\n brand_id = br.id\n\n # create nutrition\n newNutrition = Nutrition(None,\n nutrition_carbohydrate,\n nutrition_sugar,\n nutrition_protein,\n nutrition_fat,\n nutrition_energy)\n newNutrition.save()\n\n # get nutrition id\n nutri = Nutrition.objects.last()\n nutrition_id = nutri.id\n\n # create food\n newFood = Food(None, food_name, nutrition_id, brand_id, score_id, creator)\n newFood.save()\n\n # get food\n fo = Food.objects.last()\n\n # create relation\n for category in categories:\n isCat = True\n try:\n cat = Category.objects.get(name=category)\n except Category.DoesNotExist:\n isCat = False\n if isCat is False:\n newCat = Category(name=category)\n newCat.save()\n cat = Category.objects.get(name=category)\n newFRC = FRC(food=fo, category=cat)\n newFRC.save()\n\n # brand product count +1\n br.product_count = br.product_count + 1\n br.save()\n\n dic['status'] = \"Success\"\n\n except (KeyError, json.decoder.JSONDecodeError):\n dic['status'] = \"Failed\"\n dic['message'] = \"No Input\"\n\n return HttpResponse(json.dumps(dic))\n\n\n# sample\n# {\n# \"uid\": \"1\",\n# \"food_id\": \"105111\"\n# }\n@csrf_exempt\ndef delete(request):\n dic = {}\n if request.method == 'GET':\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Method\"\n return HttpResponse(json.dumps(dic))\n try:\n post_content = json.loads(request.body)\n uid = post_content['uid']\n food_id = post_content['food_id']\n food = Food.objects.get(id=food_id)\n user = User.objects.get(uid=uid)\n\n if user.role != 1:\n dic['status'] = \"Failed\"\n dic['message'] = \"No Permission\"\n return HttpResponse(json.dumps(dic))\n\n # delete all relations\n FRC.objects.filter(food=food).delete()\n\n # brand product count -1\n br = food.brand\n br.product_count = br.product_count - 1\n br.save()\n\n # get nutrition id\n nutri_id = food.nutri.id\n\n # delete food\n Food.objects.get(id=food_id).delete()\n\n # delete nutrition\n Nutrition.objects.get(id=nutri_id).delete()\n dic['status'] = \"Success\"\n except (KeyError, json.decoder.JSONDecodeError):\n dic['status'] = \"Failed\"\n dic['message'] = \"No Input\"\n except Food.DoesNotExist:\n dic['status'] = \"Failed\"\n dic['message'] = \"No Food\"\n except User.DoesNotExist:\n dic['status'] = \"Failed\"\n dic['message'] = \"No User\"\n except FRC.DoesNotExist:\n dic['status'] = \"Failed\"\n dic['message'] = \"Unknown Error\"\n\n return HttpResponse(json.dumps(dic))\n\n\n# sample\n# {\n# \"uid\": 1,\n# \"food_id\": 105106,\n# \"carbohydrate\": 1.2,\n# \"fat\": 1.2,\n# \"sugar\": 1.2,\n# \"energy\": 1.2,\n# \"protein\": 1.2,\n# \"score\": 4,\n# }\n@csrf_exempt\ndef update(request):\n dic = {}\n if request.method == 'GET':\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Method\"\n return HttpResponse(json.dumps(dic))\n try:\n post_content = json.loads(request.body)\n uid = post_content['uid']\n food_id = post_content['food_id']\n nutrition_carbohydrate = post_content['carbohydrate']\n nutrition_fat = post_content['fat']\n nutrition_sugar = post_content['sugar']\n nutrition_energy = post_content['energy']\n nutrition_protein = post_content['protein']\n score_id = post_content['score']\n\n food = Food.objects.get(id=food_id)\n uid = int(uid)\n user = User.objects.get(uid=uid)\n if user.role != 1:\n dic['status'] = \"Failed\"\n dic['message'] = \"No Permission\"\n return HttpResponse(json.dumps(dic))\n # update score\n new_score = NutriScore.objects.get(id=score_id)\n food.score = new_score\n food.save()\n\n # update nutrition\n nutri = food.nutri\n nutri.carbohydrate = nutrition_carbohydrate\n nutri.fat = nutrition_fat\n nutri.sugar = nutrition_sugar\n nutri.energy_kcal = nutrition_energy\n nutri.protein = nutrition_protein\n nutri.save()\n dic['status'] = \"Success\"\n\n except (KeyError, json.decoder.JSONDecodeError):\n dic['status'] = \"Failed\"\n dic['message'] = \"No Input\"\n except Food.DoesNotExist:\n dic['status'] = \"Failed\"\n dic['message'] = \"No Food\"\n except FRC.DoesNotExist:\n dic['status'] = \"Failed\"\n dic['message'] = \"Unknown Error\"\n return HttpResponse(json.dumps(dic))\n\n\n@csrf_exempt\ndef query(request, food_start):\n dic = {'food_id': [], 'food_name': [], 'food_brand': [], 'food_categories': [], 'food_score': []}\n try:\n food_list = Food.objects.all()[food_start: food_start + 60]\n for food_li in food_list:\n _Id = food_li.id\n\n _Name = food_li.name\n\n br = food_li.brand\n _Brand = br.name\n\n relations = FRC.objects.filter(food=_Id)\n _Cat = []\n for relation in relations:\n cat = relation.category\n _Cat.append(cat.name)\n\n sco = food_li.score\n _Score = sco.r\n\n dic['food_id'].append(_Id)\n dic['food_name'].append(_Name)\n dic['food_brand'].append(_Brand)\n dic['food_categories'].append(_Cat)\n dic['food_score'].append(_Score)\n dic['status'] = \"Success\"\n except Food.DoesNotExist:\n dic['status'] = \"Failed\"\n\n return HttpResponse(json.dumps(dic))\n\n\n@csrf_exempt\ndef product(request, food_id):\n dic = {}\n try:\n food = Food.objects.get(id=food_id)\n dic['food_id'] = food.id\n dic['food_name'] = food.name\n dic['food_brand'] = food.brand.name\n dic['food_brand_id'] = food.brand.id\n dic['food_brand_product_count'] = food.brand.product_count\n dic['food_score'] = food.score.r\n dic['food_score_desc'] = food.score.des\n dic['food_nutrition_carbohydrate'] = food.nutri.carbohydrate\n dic['food_nutrition_sugar'] = food.nutri.sugar\n dic['food_nutrition_protein'] = food.nutri.protein\n dic['food_nutrition_fat'] = food.nutri.fat\n dic['food_nutrition_energy_kcal'] = food.nutri.energy_kcal\n dic['food_creator'] = food.creator.uid\n dic['categories_id'] = []\n dic['categories_name'] = []\n\n relations = FRC.objects.filter(food=food)\n for relation in relations:\n dic['categories_id'].append(relation.category.id)\n dic['categories_name'].append(relation.category.name)\n dic['status'] = \"Success\"\n except (Food.DoesNotExist, FRC.DoesNotExist):\n dic['status'] = \"Failed\"\n\n return HttpResponse(json.dumps(dic))\n\n\n@csrf_exempt\ndef search(request):\n dic = {}\n if request.method == 'GET':\n dic['status'] = \"Failed\"\n dic['message'] = \"Wrong Method\"\n return HttpResponse(json.dumps(dic))\n try:\n post_content = json.loads(request.body)\n food_name = post_content[\"food_name\"]\n foods = Food.objects.filter(name__contains=food_name)[:5]\n dic['food_ids'] = []\n for food in foods:\n dic['food_ids'].append(food.id)\n dic['status'] = \"Success\"\n except (KeyError, json.decoder.JSONDecodeError):\n dic['status'] = \"Failed\"\n dic['message'] = \"No Input\"\n except (Food.DoesNotExist, FRC.DoesNotExist):\n dic['status'] = \"Failed\"\n\n return HttpResponse(json.dumps(dic))\n\n\n@csrf_exempt\ndef ran(request):\n dic = {}\n length = len(Food.objects.all())\n num = random.randrange(0, length)\n food = Food.objects.all()[num]\n dic[\"food_id\"] = food.id\n dic[\"food_name\"] = food.name\n dic[\"status\"] = \"Success\"\n\n return HttpResponse(json.dumps(dic))","sub_path":"django_server/server/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"226312604","text":"#!/usr/bin/env python\nimport os\n\nCOV = None\nif os.environ.get('FLASK_COVERAGE'):\n import coverage\n COV = coverage.coverage(branch=True, include='app/*')\n COV.start()\n\nimport flask_whooshalchemy as wa\nfrom app import create_app, db\nfrom app.models import User, Post, Comment, Widget\nfrom flask_script import Manager, Shell\nfrom flask_migrate import Migrate, MigrateCommand\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\nwa.whoosh_index(app, Post)\n\n\ndef make_shell_context():\n return {\n 'app': app,\n 'db': db,\n 'User': User,\n 'Post': Post,\n 'Comment': Comment,\n 'Widget': Widget\n }\nmanager.add_command(\"shell\", Shell(make_context=make_shell_context))\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef bootstrap():\n \"\"\" Create database with fake data \"\"\"\n db.create_all()\n Post._bootstrap()\n Comment._bootstrap()\n Widget._bootstrap()\n os.environ['ADMIN_EMAIL'] = 'osadchuk.m.01@gmail.com'\n os.environ['ADMIN_PASSWORD'] = '1111'\n db.session.add(User(email='osadchuk.m.01@gmail.com', name='Maxim', password='1111'))\n db.session.commit()\n\n\n@manager.command\ndef test(coverage=False):\n \"\"\"Run the unit tests.\"\"\"\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"81645857","text":"# Copyright 2017 Brandon T. Gorman\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# BUILT USING PYTHON 3.6.0\n\nimport numpy as np\nimport subprocess\nimport sys\n\nnumber_of_sims = 501\n\npower_load_lb = 0.3388\npower_load_ub = 1.0\n\nplf_array = np.linspace(power_load_lb, power_load_ub, number_of_sims)\n\nfor i in range(0, len(plf_array)):\n\tif (i+1)%25 == 0:\n\t\tprint(i+1)\n\n\tpid = subprocess.call('python analysis_power_n1.py {}'.format(plf_array[i]), shell=True)\n","sub_path":"analysis_power_n1_run.py","file_name":"analysis_power_n1_run.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"121926170","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: oliver\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report,accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nimport pydot\nfrom io import StringIO\nfrom sklearn.tree import export_graphviz\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nimport matplotlib.pyplot as plt\nfrom sklearn.neural_network import MLPClassifier\nfrom imblearn.over_sampling import RandomOverSampler\nfrom collections import Counter\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.feature_selection import SelectFromModel\n\n\n\n\n\n#import data into dataframe\n\ndf = pd.read_csv('CaseStudyData.csv')\nrs = 10\nros = RandomOverSampler(random_state=0)\nrus = RandomUnderSampler(random_state=0)\nsmote = SMOTE(random_state = 0)\n\n#-----------------------------------------------------------------------------\n#Task 1.1 KICK proportion\n#-----------------------------------------------------------------------------\ndef Kick_proportion(df):\n ###count bad buy 0 = No 1 = Yes\n proportion = df['IsBadBuy'].value_counts()\n\n kick_proportion = proportion.iloc[1] / len(df)\n \n return kick_proportion\nBADBUY = Kick_proportion(df)\n#-----------------------------------------------------------------------------\n#1.2 Fix Data quality problems\n#-----------------------------------------------------------------------------\n\n#replace ? to NAN\ndf = df.replace(['?','#VALUE!'], np.nan) \n\n#nominal colums replace with mode\nnominal_cols = ['Auction','Make','Color','Transmission',\n 'WheelTypeID','WheelType','Nationality','Size',\n 'TopThreeAmericanName','PRIMEUNIT','AUCGUART',\n 'VNST','IsOnlineSale','ForSale']\n\n\n#numeric replace with median\nnum_cols = ['VehYear','VehOdo','MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice',\n 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice',\n 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice',\n 'MMRCurrentRetailAveragePrice' , 'MMRCurrentRetailCleanPrice',\n 'VehBCost','WarrantyCost']\n#-----------------------------------------------------------------------------\n\ndef preprocess_data(df):\n \n ##Convert Manual to MANUAL\n df['Transmission'] = df['Transmission'].replace('Manual', 'MANUAL')\n\n # Replace USA to American\n df['Nationality'] = df['Nationality'].replace('USA', 'AMERICAN')\n \n ## ForSale\n # Drop 0 and convert to lower case\n df = df.drop(df[df.IsOnlineSale == '0'].index)\n df['ForSale']=df['ForSale'].str.lower()\n \n ## Convert 0.0 to 0 \n ###### -1 drop\n ## drop others\n df['IsOnlineSale'] = df['IsOnlineSale'].astype(str).replace('0.0','0')\n df['IsOnlineSale'] = df['IsOnlineSale'].replace(['1.0'], '1')\n df = df.drop(df[df.IsOnlineSale == '4.0'].index)\n df = df.drop(df[df.IsOnlineSale == '2.0'].index)\n df = df.drop(df[df.IsOnlineSale == '-1.0'].index)\n \n # MMRAcquisitionAuctionAveragePrice , 501 '0' PRICE\n # MMRAcquisitionAuctionCleanPrice, 414 '0' PRICE\n # MMRAcquisitionRetailAveragePrice, 501 '0' PRICE\n # MMRAcquisitonRetailCleanPrice , 500 '0' PRICE\n\n # MMRCurrentAuctionAveragePrice 287 '0' Price\n # MMRCurrentAuctionCleanPrice 206\n # MMRCurrentRetailAveragePrice 287\n # MMRCurrentRetailCleanPrice 287\n # convert str to float to calucate RATIO\n \n price_col = ['MMRAcquisitionAuctionAveragePrice','MMRAcquisitionAuctionCleanPrice',\n 'MMRAcquisitionRetailAveragePrice','MMRAcquisitonRetailCleanPrice',\n 'MMRCurrentAuctionAveragePrice','MMRCurrentAuctionCleanPrice',\n 'MMRCurrentRetailAveragePrice' , 'MMRCurrentRetailCleanPrice']\n # mask for drop prices 0 , 1 \n for i in price_col:\n df[i] = df[i].astype(float)\n mask = df[i] < 100\n df.loc[mask,i] = np.nan\n \n for i in num_cols:\n df[i] = df[i].astype(float)\n \n\n return df\n\n#------------------------------------------------------------------------------\n# using mode for nominal columns and median for numerical columns\n# drop columns\ndef missing_values(df):\n for i in num_cols:\n df[i] = df[i].fillna(df[i].median())\n \n for i in nominal_cols:\n mode = df[i].mode()[0]\n df[i] = df[i].fillna(mode)\n \n #calucate ratio\n df['MMRCurrentRetailRatio'] = df['MMRCurrentRetailAveragePrice'] / df['MMRCurrentRetailCleanPrice']\n df['MMRCurrentRetailRatio'] = df['MMRCurrentRetailRatio'].round(4)\n \n #DROP DATA\n #BAD DATA\n df = df.drop(['PRIMEUNIT','AUCGUART'], axis = 1)\n\n #Same Data\n df = df.drop(['ForSale','IsOnlineSale',], axis = 1)\n \n \n #Irrelevant\n df = df.drop(['PurchaseID','PurchaseTimestamp','Color','WheelType'],axis = 1)\n\n # Convert To Date only\n\n df['PurchaseYear'] = pd.to_datetime(df['PurchaseDate']).dt.strftime('%Y')\n df['PurchaseMonth'] = pd.to_datetime(df['PurchaseDate']).dt.strftime('%m')\n \n col_time = ['PurchaseYear','PurchaseMonth']\n for i in col_time:\n df[i] = df[i].astype(int)\n \n df = df.drop('PurchaseDate', axis = 1)\n\n\n df = pd.get_dummies(df)\n \n return df\n\ndef split_data_for_dt(df):\n y = df['IsBadBuy']\n X = df.drop(['IsBadBuy'], axis = 1)\n X_mat = X.as_matrix()\n X_train, X_test, y_train, y_test = train_test_split(X_mat, y,test_size = 0.3, stratify = y, random_state = rs)\n \n #oversample \n X_train, y_train = smote.fit_resample(X_train, y_train)\n \n return X_train, X_test, y_train, y_test\n\ndef calculate_num_leaves(dt):\n n_nodes = dt.tree_.node_count\n ll = dt.tree_.children_left\n rl = dt.tree_.children_right\n count = 0\n for i in range(0,n_nodes):\n if (ll[i] & rl[i]) == -1:\n count = count + 1\n return count\n\n\n# grab feature importances from the model and feature name from the original X\ndef analyse_feature_importance(dm_model, feature_names, n_to_display=20):\n # grab feature importances from the model\n importances = dm_model.feature_importances_\n \n # sort them out in descending order\n indices = np.argsort(importances)\n indices = np.flip(indices, axis=0)\n\n # limit to 20 features, you can leave this out to print out everything\n indices = indices[:n_to_display]\n\n for i in indices:\n print(feature_names[i], ':', importances[i])\n \ndef split_data_for_RF_CNN(df_ready, num_cols):\n df_log = df_ready.copy()\n\n for col in num_cols:\n df_log[col] = df_log[col].apply(lambda x: x+1)\n df_log[col] = df_log[col].apply(np.log)\n\n y_log = df_log['IsBadBuy']\n X_log = df_log.drop(['IsBadBuy'], axis=1)\n X_mat_log = X_log.as_matrix()\n X_train_log, X_test_log, y_train_log, y_test_log = train_test_split(X_mat_log, y_log, test_size=0.3, stratify=y_log, \n random_state=rs)\n X_train_log, y_train_log = ros.fit_resample(X_train_log, y_train_log)\n\n # standardise them again\n scaler_log = StandardScaler()\n X_train_log = scaler_log.fit_transform(X_train_log, y_train_log)\n X_test_log = scaler_log.transform(X_test_log)\n \n return X_train_log, X_test_log,y_train_log, y_test_log\n\n\nprocessed_data = preprocess_data(df)\n\ndf_ready = missing_values(processed_data)\n\n#------------------------------------------------------------------------------\n# DT\n#------------------------------------------------------------------------------\n# change to the dummy\n\n\nX_train, X_test, y_train, y_test = split_data_for_dt(df_ready)\n\n#------------------------------------------------------------------------------\n# Oversamping\n#print('Original dataset shape %s' % Counter(y))\n#print('Resampled dataset shape %s' % Counter(y_res))\n\n#training\ndt = DecisionTreeClassifier(criterion = 'gini', max_depth = 9, min_samples_leaf = 2,random_state=rs)\n\n\ndt.fit(X_train,y_train)\ny_pred2 = dt.predict(X_test)\nprint(\"DT Default Train accuracy:\", dt.score(X_train, y_train))\nprint(\"DT Default Test accuracy:\", dt.score(X_test, y_test))\nprint(classification_report(y_test,y_pred2))\n\n#size of nude\nprint(print(\"Number of nodes: \",dt.tree_.node_count))\n\n# print leaves\nprint(\"The number of leaves is \",calculate_num_leaves(dt))\n##------------------------------------------------------------------------------\n# GIRDSEARCH\n#------------------------------------------------------------------------------\nparams = {'criterion': ['gini', 'entropy'],\n 'max_depth': range(7,10),\n 'min_samples_leaf': range(2,3)}\n\ncv_dt = GridSearchCV(param_grid= params, estimator=DecisionTreeClassifier(random_state=rs), cv=10)\ncv_dt.fit(X_train, y_train)\nprint('strating')\nprint(\"DT GridSearch Train accuracy:\", cv_dt.score(X_train, y_train))\nprint(\"DT GridSearch test accuracy:\", cv_dt.score(X_test, y_test))\n\n # test the best model\ny_pred = cv_dt.predict(X_test)\nprint(classification_report(y_test, y_pred))\n\n# print parameters of the best model\nprint(cv_dt.best_params_)\n\n#------------------------------------------------------------------------------\n# Feature Importance and visualising\n#------------------------------------------------------------------------------\nanalyse_feature_importance(dt, X.columns,5)\n\n# visualize\ndotfile = StringIO()\nexport_graphviz(dt, out_file=dotfile, feature_names=X.columns)\ngraph = pydot.graph_from_dot_data(dotfile.getvalue())\ngraph[0].write_png(\"dt_search.png\") # saved in the following file - will return True if successful\n\n#------------------------------------------------------------------------------\n# Visualising relationship between hyperparameters and model performance\n#------------------------------------------------------------------------------\ntest_score = []\ntrain_score = []\n\n# check the model performance for max depth from 2-20\nfor max_depth in range (2, 300):\n model = DecisionTreeClassifier(min_samples_leaf = max_depth, random_state=rs)\n model.fit(X_train, y_train)\n \n test_score.append(model.score(X_test, y_test))\n train_score.append(model.score(X_train, y_train)) \n\n# plot max depth hyperparameter values vs training and test accuracy score\nplt.plot(range(2, 300), train_score, 'b', range(2,300), test_score, 'r')\nplt.xlabel('min_samples_leaf\\nBlue = training acc. Red = test acc.')\nplt.ylabel('accuracy')\nplt.show() \n#import seaborn as sns \n#import matplotlib.pyplot as plt\n#categoryCol = ['PurchaseDate','PurchaseTimestamp'] \n \n#for i in categoryCol:\n# sns.countplot(data=df,x=i,hue=\"IsBadBuy\")\n# plt.show()\n\n#------------------------------------------------------------------------------\n# LR\n#------------------------------------------------------------------------------\n\nX_train_log, X_test_log, y_train_log, y_test_log = split_data_for_RF_CNN(df_ready,num_cols)\nmodel_rfe= LogisticRegression(C = 10, random_state=rs)\n\n# fit it to training data\nmodel_rfe.fit(X_train_log, y_train_log)\nprint(\"LR Default Train accuracy:\", model_rfe.score(X_train_log, y_train_log))\nprint(\"LR Default Test accuracy:\", model_rfe.score(X_test_log, y_test_log))\n\ny_pred = model_rfe.predict(X_test_log)\nprint(classification_report(y_test_log, y_pred))\n#------------------------------------------------------------------------------\n#importantce\n# grab feature importances from the model and feature name from the original X\ncoef = cv_lr.best_estimator_.coef_[0]\nfeature_names = X.columns\n\n# sort them out in descending order\nindices = np.argsort(np.absolute(coef))\nindices = np.flip(indices, axis=0)\n\n# limit to 20 features, you can leave this out to print out everything\nindices = indices[:100]\n\nfor i in indices:\n print(feature_names[i], ':', coef[i])\n\n#------------------------------------------------------------------------------\n# LR GridSearch\n#------------------------------------------------------------------------------\nparams = {'C': [pow(10, x) for x in range(-8, 0)]}\n\n# use all cores to tune logistic regression with C parameter\ncv_lr = GridSearchCV(param_grid=params, estimator=LogisticRegression(random_state=rs), cv=10, n_jobs=-1)\ncv_lr.fit(X_train_log, y_train_log)\n\n# test the best model\nprint(\"LR GridSearch Train accuracy:\", cv_lr.score(X_train_log, y_train_log))\nprint(\"LR GridSearch Test accuracy:\", cv_lr.score(X_test_log, y_test_log))\n\ny_pred = cv_lr.predict(X_test_log)\nprint(classification_report(y_test_log, y_pred))\n\n# print parameters of the best model\nprint(cv_lr.best_params_)\n\n##------------------------------------------------------------------------------\n# RFE feature selection\nrfe = RFECV(estimator = LogisticRegression(random_state=rs), cv=10)\nrfe.fit(X_train_log, y_train_log)\n\nprint(\"Original feature set\", X_train.shape[1])\nprint(\"Number of features after elimination\", rfe.n_features_)\n\nX_train_sel = rfe.transform(X_train_log)\nX_test_sel = rfe.transform(X_test_log)\n\nparams = {'C': [pow(10, x) for x in range(-4, 4)]}\n\ncv_rfe = GridSearchCV(param_grid=params, estimator=LogisticRegression(random_state=rs), cv=10, n_jobs=-1)\ncv_rfe.fit(X_train_sel, y_train_log)\n\n# test the best model\nprint(\"LR_RFE Train accuracy:\", cv_rfe.score(X_train_sel, y_train_log))\nprint(\"LR_RFE Test accuracy:\", cv_rfe.score(X_test_sel, y_test_log))\n\ny_pred = cv_rfe.predict(X_test_sel)\nprint(classification_report(y_test, y_pred))\n\n# print parameters of the best model\nprint(cv_rfe.best_params_)\n##------------------------------------------------------------------------------\n# DT feature selection\nparams = {'criterion': ['gini', 'entropy'],\n 'max_depth': range(8,12),\n 'min_samples_leaf': range(2,3)}\n\ndt_sel = GridSearchCV(param_grid= params, estimator=DecisionTreeClassifier(random_state=rs), cv=10)\ndt_sel.fit(X_train_log, y_train_log)\n\nselectmodel = SelectFromModel(dt_sel.best_estimator_, prefit=True)\nX_train_sel_model = selectmodel.transform(X_train_log)\nX_test_sel_model = selectmodel.transform(X_test_log)\n\nprint(X_train_sel_model.shape)\n\nparams = {'C': [pow(10, x) for x in range(-6, 4)]}\n\ncv_lr_dt = GridSearchCV(param_grid=params, estimator=LogisticRegression(random_state=rs), cv=10, n_jobs=-1)\ncv_lr_dt.fit(X_train_sel_model, y_train_log)\n\nprint(\"LR DT Train accuracy:\", cv_lr_dt.score(X_train_sel_model, y_train_log))\nprint(\"LR DT Test accuracy:\", cv_lr_dt.score(X_test_sel_model, y_test_log))\n\n# test the best model\ny_pred = cv_lr_dt.predict(X_test_sel_model)\nprint(classification_report(y_test_log, y_pred))\n\n# print parameters of the best model\nprint(cv_lr_dt.best_params_)\n\n\n#graph plot\ntest_score = []\ntrain_score = []\nparams = [pow(10, x) for x in range(-10, 10)]\n# check the model performance for max depth from 2-20\nfor c in params:\n model1 = LogisticRegression(C= c, random_state=rs)\n model1.fit(X_train_log, y_train_log)\n \n test_score.append(model1.score(X_test_log, y_test_log))\n train_score.append(model1.score(X_train_log, y_train_log))\nfor i in test_score:\n print(i)\nimport matplotlib.pyplot as plt\n\n# plot max depth hyperparameter values vs training and test accuracy score\nplt.plot(params, train_score,\n 'b', params, test_score, 'r')\nplt.xscale('log')\nplt.xlabel('C\\nBlue = training acc. Red = test acc.')\nplt.ylabel('accuracy')\nplt.show()\n\n#------------------------------------------------------------------------------\n# MLP \n#------------------------------------------------------------------------------\n\nX_train_log, X_test_log, y_train_log, y_test_log =split_data_for_RF_CNN(df_ready, num_cols)\n\nmlp = MLPClassifier(random_state=rs)\nmlp.fit(X_train_log, y_train_log)\n\nprint(\"Train accuracy:\", mlp.score(X_train_log, y_train_log))\nprint(\"Test accuracy:\", mlp.score(X_test_log, y_test_log))\n\ny_pred = mlp.predict(X_test_log)\nprint(classification_report(y_test_log, y_pred))\n\nprint(model)\n\n#------------------------------------------------------------------------------\n# MLP GridSearch\n#------------------------------------------------------------------------------\nX_train_log.shape\n# X_train features = neurons\nparams = {'hidden_layer_sizes': (105,) , 'alpha': [0.01,0.001, 0.0001, 0.00001]}\n\ncv_mlp = GridSearchCV(param_grid=params, estimator=MLPClassifier(random_state=rs), cv=10, n_jobs=-1)\ncv_mlp.fit(X_train_log, y_train_log)\n\nprint(\"Train accuracy:\", cv_mlp.score(X_train_log, y_train_log))\nprint(\"Test accuracy:\", cv_mlp.score(X_test_log, y_test_log))\n\ny_pred = cv_mlp.predict(X_test_log)\nprint(classification_report(y_test_log, y_pred))\n\nprint(cv_mlp.best_params_)\n\n##------------------------------------------------------------------------------\n# Feature Selection DT CNN\n\n#--------------------\n\nX_train_log, X_test_log, y_train_log, y_test_log =split_data_for_RF_CNN(df_ready,num_cols)\n\nparams = {'criterion': ['gini', 'entropy'],\n 'max_depth':range(11,12,2),\n 'min_samples_leaf': range(2,3)}\n\ndt_sel = GridSearchCV(param_grid=params, estimator=DecisionTreeClassifier(random_state=rs), cv=10)\ndt_sel.fit(X_train_log, y_train_log)\n\n\n\nselectmodel = SelectFromModel(dt_sel.best_estimator_, prefit=True)\nX_train_sel_model = selectmodel.transform(X_train_log)\nX_test_sel_model = selectmodel.transform(X_test_log)\n\nprint(X_train_sel_model.shape)\n\n\n\n\nparams = {'hidden_layer_sizes': (105,) , 'alpha': [0.01,0.001]}\n\ncv_dt_cnn = GridSearchCV(param_grid=params, estimator=MLPClassifier( random_state=rs), cv=10, n_jobs=-1)\ncv_dt_cnn.fit(X_train_sel_model, y_train_log)\n\nprint(\"Train accuracy:\", cv_dt_cnn.score(X_train_sel_model, y_train))\nprint(\"Test accuracy:\", cv_dt_cnn.score(X_test_sel_model, y_test))\n\ny_pred = cv_dt_cnn.predict(X_test_sel_model)\nprint(classification_report(y_test, y_pred))\n\nprint(cv_dt_cnn.best_params_)\n\n##------------------------------------------------------------------------------\n# Feature Selection RF\n#--------------------\nX_train_sel = rfe.transform(X_train_log)\nX_test_sel = rfe.transform(X_test_log)\n\nparams = {'hidden_layer_sizes': [(90,), (92,), (100,), (105,)], 'alpha': [0.0001, 0.00001]}\n\ncv_cnn_rf= GridSearchCV(param_grid=params, estimator=MLPClassifier(random_state=rs), cv=10, n_jobs=-1)\ncv_cnn_rf.fit(X_train_sel, y_train_log)\n\nprint(\"Train accuracy:\", cv_cnn_rf.score(X_train_sel, y_train_log))\nprint(\"Test accuracy:\", cv_cnn_rf.score(X_test_sel, y_test_log))\n\ny_pred = cv_cnn_rf.predict(X_test_sel)\nprint(classification_report(y_test_log, y_pred))\n\nprint(cv_cnn_rf.best_params_)\n\n\n##------------------------------------------------------------------------------\n# ensemble model\n#--------------------\n\ndt_model = cv_dt.best_estimator_\nprint(dt_model)\n\ncnn_model = cv_dt_cnn.best_estimator_\nprint(cnn_model)\n\ncl_model = cv_rfe.best_estimator_\nprint(cl_model)\n\n\n\nfrom sklearn.metrics import roc_auc_score\n\ny_pred_proba_dt = dt_model.predict_proba(X_test)\ny_pred_proba_log_reg = cl_model.predict_proba(X_test_sel)\ny_pred_proba_nn = cnn_model.predict_proba(X_test_sel_model)\n\nroc_index_dt = roc_auc_score(y_test, y_pred_proba_dt[:, 1])\nroc_index_log_reg = roc_auc_score(y_test, y_pred_proba_log_reg[:, 1])\nroc_index_nn = roc_auc_score(y_test, y_pred_proba_nn[:, 1])\n\n\n\nprint(\"ROC index on test for DT:\", roc_index_dt)\nprint(\"ROC index on test for logistic regression:\", roc_index_log_reg)\nprint(\"ROC index on test for NN:\", roc_index_nn)\n\n\n\nfrom sklearn.metrics import roc_curve\n\nfpr_dt, tpr_dt, thresholds_dt = roc_curve(y_test, y_pred_proba_dt[:,1])\nfpr_log_reg, tpr_log_reg, thresholds_log_reg = roc_curve(y_test, y_pred_proba_log_reg[:,1])\nfpr_nn, tpr_nn, thresholds_nn = roc_curve(y_test, y_pred_proba_nn[:,1])\nfpr_en, tpr_en,thresholds_en = roc_curve(y_test,y_pred_proba_ensemble[:,1])\n\nimport matplotlib.pyplot as plt\n\nplt.plot(fpr_dt, tpr_dt, label='ROC Curve for DT {:.3f}'.format(roc_index_dt), color='red', lw=0.5)\nplt.plot(fpr_log_reg, tpr_log_reg, label='ROC Curve for Log reg {:.3f}'.format(roc_index_log_reg), color='green', lw=0.5)\nplt.plot(fpr_nn, tpr_nn, label='ROC Curve for NN {:.3f}'.format(roc_index_nn), color='darkorange', lw=0.5)\nplt.plot(fpr_en, tpr_en, label = 'ROC Curve for ensemble {:.3f}'.format(roc_index_ensemble),color = 'blue',lw = 0.5)\n# plt.plot(fpr[2], tpr[2], color='darkorange',\n# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\nplt.plot([0, 1], [0, 1], color='navy', lw=0.5, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.0])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n##------------------------------------------------------------------------------\n# Ensemble Modeling\n#--------------------\n# import the model\nfrom sklearn.ensemble import VotingClassifier\n\n# initialise the classifier with 3 different estimators\nvoting = VotingClassifier(estimators=[('dt', dt_model), ('lr', cl_model), ('nn', cnn_model)], voting='soft')\n\nvoting.fit(X_train_log, y_train_log)\ny_pred_ensemble = voting.predict(X_test_log)\n# evaluate train and test accuracy\nprint(\"Ensemble train accuracy:\", voting.score(X_train_log, y_train_log))\nprint(\"Ensemble test accuracy:\", voting.score(X_test_log, y_test_log))\n\n# evaluate ROC auc score\ny_pred_proba_ensemble = voting.predict_proba(X_test_log)\nroc_index_ensemble = roc_auc_score(y_test_log, y_pred_proba_ensemble[:, 1])\nprint(\"ROC score of voting classifier:\", roc_index_ensemble)\n \n\nprint(classification_report(y_test, y_pred_proba_ensemble))\n\n\n\nprint(\"\\nReport for Ensemble: \\n\",classification_report(y_test_log, y_pred_ensemble))\n\n","sub_path":"identifying-car-quality/casestudy1.py","file_name":"casestudy1.py","file_ext":"py","file_size_in_byte":21595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"74069505","text":"import time\nimport subprocess\n\ntypes = 3\nvCPUs = [1,2,4]\nmem = [4096,8192,15360]\n\npath='/home/clouduser/benchmarks/linpack.tar'\n\ndef create_master(machine_type):\n\tprint('Creating master docker-machine')\n\tsubprocess.run('docker-machine create --driver virtualbox --virtualbox-cpu-count %s --virtualbox-memory %s master' %(vCPUs[machine_type],mem[machine_type]),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tsubprocess.run('docker-machine env master',shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tsubprocess.run('eval \\\"$(docker-machine env master)\\\"',shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tprint('Docker master machine created')\n\ndef initialize():\n\tsubprocess.run('docker-machine scp %s master:/home/docker/' %(path),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tsubprocess.run('docker-machine ssh master docker image load -i linpack.tar',shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tprint('Initialization Completed')\n\ndef run_process():\n\tsubprocess.run('docker-machine ssh master docker run -i pathikrit/linpack',shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tcmd_to_exec = 'docker-machine ssh master docker logs \\'$(docker ps -l -q)\\''\n\tresponse = subprocess.Popen([cmd_to_exec],shell=True, universal_newlines=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\treturn response\n\ndef write_result(response):\n\twith open(\"result_linpack_%s_%s.txt\" %(vCPUs[x],mem[x]), 'w') as txt_file:\n\t txt_file.write(\"\".join(response.stdout.readlines()))\n\tprint('Speed test done for type --- %s' %x)\n\ndef cleanup_process():\n\tsubprocess.run('docker-machine rm -f master',shell=True)\n\tprint('Master docker-machine deleted')\n\ndef main():\n\tfor x in range(types):\n\t\tcreate_master(x)\n\t\tinitialize()\n\t\tresponse = run_process()\n\t\twrite_result(response)\n\t\tcleanup_process()\n\nif __name__ == '__main__':\n main()","sub_path":"Speed/Linpack/LocalDocker/SL_linpack.py","file_name":"SL_linpack.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"548275992","text":"import copy, logging, math, os\r\nfrom pprint import pprint, pformat\r\nimport config, util\r\nimport maya_helpers as mh\r\nfrom dson import DSON\r\n\r\nfrom pymel import core as pm\r\nimport pymel.core.datatypes as dt\r\nimport pymel\r\nfrom maya import mel\r\n\r\nlog = logging.getLogger('DSONImporter')\r\n\r\ndef _create_rotation_rbf(name):\r\n \"\"\"\r\n Create an RBF solver. This approximates the rotation on an input plane, with clean falloff\r\n before we flip at 180 degrees.\r\n #\r\n At 1,0, we're at rest. The vector is in its original position, so the angle is 0.\r\n At 0,1, we've rotated 90 degrees. At 0,-1 we've rotated -90 degrees.\r\n We'll place a number of samples around the unit circle, keying them to the angle.\r\n #\r\n We stop before 180 degrees, since 180 degrees could be either 180 or -180 degrees.\r\n There's no way to figure this out, and if we give duplicate inputs to the solver\r\n we'll end up with an unsolvable key set. We stop at 165 degrees in either direction.\r\n If the rotation goes beyond that it'll flip.\r\n \"\"\"\r\n\r\n mh.load_plugin('zRBF.py')\r\n\r\n rbf_node = pm.createNode('zRBF', n=name)\r\n min_angle = -165\r\n max_angle = 165\r\n intervals = 6\r\n step = (max_angle - min_angle) / (intervals*2)\r\n for idx, interval in enumerate(xrange(-intervals, intervals+1)):\r\n angle = step * interval\r\n angle = angle * math.pi / 180.0\r\n x = math.cos(angle)\r\n y = math.sin(angle)\r\n point = (x, y, 0)\r\n\r\n value_attr = rbf_node.attr('value').elementByLogicalIndex(idx)\r\n pm.setAttr(value_attr.attr('value_Position'), point)\r\n pm.setAttr(value_attr.attr('value_Value'), angle)\r\n return rbf_node\r\n\r\ndef load_plugin(plugin):\r\n # Don't call loadPlugin if the plugin is already loaded. Even though it doesn't do anything,\r\n # it takes about half a second.\r\n if not pm.pluginInfo(plugin, q=True, loaded=True):\r\n pm.loadPlugin(plugin, quiet=True)\r\n\r\n if not pm.pluginInfo(plugin, q=True, registered=True):\r\n raise RuntimeError('Plugin \"%s\" isn\\'t available.' % plugin)\r\n\r\n_twist_rig_map = {\r\n # The key is the roll joint. aim_vector is the vector down the roll joint.\r\n # The up vector is the axis that receives most of the rotation (other than aim_vector).\r\n # For example, the arm joints are down the X axis, and the wrist receives the\r\n # least motion on the Y axis, so we use the Z axis for the wrist's up_vector.\r\n 'lShldrBend': {\r\n 'twist_joint_asset_id': 'lShldrTwist',\r\n 'end_joint_asset_id': 'lForearmBend',\r\n 'aim_vector': (1,0,0),\r\n 'up_vector': (0,1,0),\r\n 'roll_orient_joint': 'xyz',\r\n 'roll_orient_sao': 'yup',\r\n },\r\n 'rShldrBend': {\r\n 'twist_joint_asset_id': 'rShldrTwist',\r\n 'end_joint_asset_id': 'rForearmBend',\r\n 'aim_vector': (1,0,0),\r\n 'up_vector': (0,1,0),\r\n 'roll_orient_joint': 'xyz',\r\n 'roll_orient_sao': 'yup',\r\n },\r\n 'lForearmBend': {\r\n 'twist_joint_asset_id': 'lForearmTwist',\r\n 'end_joint_asset_id': 'lHand',\r\n 'aim_vector': (1,0,0),\r\n 'up_vector': (0,0,1),\r\n 'roll_orient_joint': 'xyz',\r\n 'roll_orient_sao': 'yup',\r\n },\r\n 'rForearmBend': {\r\n 'twist_joint_asset_id': 'rForearmTwist',\r\n 'end_joint_asset_id': 'rHand',\r\n 'aim_vector': (1,0,0),\r\n 'up_vector': (0,0,1),\r\n 'roll_orient_joint': 'xyz',\r\n 'roll_orient_sao': 'yup',\r\n },\r\n 'lThighBend': {\r\n 'twist_joint_asset_id': 'lThighTwist',\r\n 'end_joint_asset_id': 'lShin',\r\n 'aim_vector': (0,1,0),\r\n 'up_vector': (1,0,0),\r\n\r\n # This will change the orientation of the joint. Maya's joint orient command doesn't\r\n # give a way to orient a joint away from the child.\r\n 'roll_orient_joint': 'yzx',\r\n 'roll_orient_sao': 'zup',\r\n },\r\n 'rThighBend': {\r\n 'twist_joint_asset_id': 'rThighTwist',\r\n 'end_joint_asset_id': 'rShin',\r\n 'aim_vector': (0,1,0),\r\n 'up_vector': (1,0,0),\r\n 'roll_orient_joint': 'yxz',\r\n 'roll_orient_sao': 'xup',\r\n },\r\n 'neckLower': {\r\n 'twist_joint_asset_id': 'neckUpper',\r\n 'end_joint_asset_id': 'head',\r\n 'aim_vector': (0,1,0),\r\n 'up_vector': (1,0,0),\r\n 'roll_orient_joint': 'yzx',\r\n 'roll_orient_sao': 'zup',\r\n },\r\n}\r\n\r\ndef create_twist_rigs(env):\r\n if not config.get('create_twist_rigs'):\r\n return\r\n\r\n if pymel.versions.current() < 201650:\r\n # Prior to 2016 ext2, changing preBindMatrix on a skinCluster didn't take effect, which\r\n # leads to this twisting the mesh out of shape.\r\n log.warning('Not reating twist joint rigs. Please update to at least Maya 2016 EXT2.')\r\n return\r\n\r\n log.debug('Creating twist joint rigs...')\r\n\r\n for dson_node in env.scene.depth_first():\r\n if dson_node.node_type != 'figure':\r\n continue\r\n\r\n # If these figure is conforming, don't change it. The twist rigs will go\r\n # on the target skeleton.\r\n if 'conform_target' in dson_node:\r\n continue\r\n\r\n for bone_node in dson_node._get_nodes_within_figure():\r\n _create_twist_rig(bone_node)\r\n\r\ndef get_twist_rig_asset_names():\r\n \"\"\"\r\n Return a list of asset names that will have twist rigs applied.\r\n \"\"\"\r\n if not config.get('create_twist_rigs'):\r\n return []\r\n\r\n result = []\r\n for roll_joint_asset_name, part in _twist_rig_map.iteritems():\r\n result.append(roll_joint_asset_name)\r\n result.append(part['twist_joint_asset_id'])\r\n result.append(part['end_joint_asset_id'])\r\n return result\r\n \r\ndef _create_twist_rig(dson_node):\r\n \"\"\"\r\n Some models have an unusual roll and twist joint setup: they put elbow rotation\r\n on the shoulder twist joint, and put elbow rotation control on the shoulder control\r\n as an alias (which we don't import). All twisting is on the twist joint; there's\r\n no weighting to put some of the rotation on any other joints. The deformations this\r\n gives are fine, but it's a bit weird, so clean it up.\r\n\r\n Reparent the elbow directly under the shoulder, so the twist joint is by itself,\r\n and use an aim constraint to make the twist joint follow the elbow. This way,\r\n the elbow joint can be rotated normally, and the twist joint will follow.\r\n\r\n This depends on the structure of the skeleton and should be turned off with skeletons\r\n that use different structures.\r\n \"\"\"\r\n if dson_node.node_type != 'bone':\r\n return\r\n\r\n # We use the asset to figure out which bone is which.\r\n if not dson_node.asset:\r\n return\r\n\r\n twist_rig_info = _twist_rig_map.get(dson_node.asset.get_value('name'))\r\n if not twist_rig_info:\r\n return\r\n\r\n # The roll joint is the joint we're on.\r\n roll_joint = dson_node\r\n roll_joint_maya_node = roll_joint.maya_node\r\n\r\n # Find the twist joint.\r\n twist_joint = roll_joint.find_asset_name(twist_rig_info['twist_joint_asset_id'])\r\n twist_joint_maya_node = twist_joint.maya_node\r\n\r\n # Find the end joint, which is the joint after the twist joint.\r\n end_joint = roll_joint.find_asset_name(twist_rig_info['end_joint_asset_id'])\r\n end_joint_maya_node = end_joint.maya_node\r\n\r\n # Don't apply this if there are incoming connections to nodes we're going to change.\r\n # It's OK for there to be outgoing connections. For example, we don't want a \"knee\r\n # bend\" constraint that targets the thigh, since we're going to put constraints on\r\n # the thigh, but corrective modifiers that read the position of the thigh are fine.\r\n # These twist joints are intended to make external rigging easier, and if you're\r\n # putting a rig on the figure, you want all controls that take over joints disabled\r\n # anyway.\r\n def has_incoming_connections(node):\r\n attrs_to_check = (\r\n 'translate', 'translateX', 'translateY', 'translateZ',\r\n 'rotate', 'rotateX', 'rotateY', 'rotateZ',\r\n 'scale', 'scaleX', 'scaleY', 'scaleZ')\r\n nodes = [node.attr(attr) for attr in attrs_to_check]\r\n connections = pm.listConnections(nodes, s=True, d=False, p=True)\r\n if connections:\r\n log.warning('Not creating twist rig for %s because it has incoming connections: %s' % (node, connections))\r\n return True\r\n return False\r\n\r\n if has_incoming_connections(roll_joint_maya_node) or has_incoming_connections(twist_joint_maya_node) or has_incoming_connections(end_joint_maya_node):\r\n return\r\n\r\n # The roll joint is oriented towards the twist joint, but we need it oriented towards\r\n # the end joint. We don't want to reorient the joint, since it'll break anything constrained\r\n # to it. Instead, create a new joint to take its place, and hide and parent the skinned\r\n # roll joint to the new joint. That lets us reorient the new joint however we want.\r\n roll_joint_name = roll_joint_maya_node.nodeName()\r\n pm.rename(roll_joint_maya_node, '%s_Skinned' % roll_joint_name)\r\n\r\n # Create the new roll joint, and position it in the same place as the skinned one.\r\n new_roll_joint = pm.duplicate(roll_joint_maya_node, parentOnly=True, n=roll_joint_name)[0]\r\n pm.reorder(new_roll_joint, front=True)\r\n\r\n # Mark the roll joint that we're controlling internal.\r\n mh.config_internal_control(roll_joint_maya_node)\r\n roll_joint_maya_node.attr('visibility').set(0)\r\n\r\n # The roll joint may have rotations on it from straighten_poses in addition to jointOrient.\r\n # Freeze the rotations, or orient joints won't work (\"has non-zero rotations\"). This\r\n # is only freezing this control, not the underlying joint.\r\n pm.makeIdentity(new_roll_joint, apply=True, t=0, r=1, s=0, n=0, pn=1)\r\n\r\n # Create a copy of the end joint to orient towards. We need to freeze rotations on this too,\r\n # or pm.joint will spew warnings about non-zero rotations. (That looks like a bug, since we're\r\n # not telling it to orient that joint.)\r\n temporary_joint = pm.createNode('joint', n='temp')\r\n pm.parent(temporary_joint, end_joint_maya_node, r=True)\r\n pm.parent(temporary_joint, new_roll_joint)\r\n pm.makeIdentity(temporary_joint, apply=True, t=0, r=1, s=0, n=0, pn=1)\r\n\r\n # Orient the roll joint towards the end joint.\r\n pm.joint(new_roll_joint, e=True, orientJoint=twist_rig_info['roll_orient_joint'], secondaryAxisOrient=twist_rig_info['roll_orient_sao'])\r\n pm.delete(temporary_joint)\r\n\r\n def create_rotation_node():\r\n # Don't do this if there aren't any connections to rotation.\r\n attrs_to_check = ('rotate', 'rotateX', 'rotateY', 'rotateZ')\r\n for attr in attrs_to_check:\r\n if pm.listConnections(end_joint_maya_node.attr(attr), s=False, d=True, p=True):\r\n break\r\n else:\r\n return\r\n\r\n # Create a placeholder node. This follows the end joint around and is parented to the twist joint,\r\n # so it represents the rotation of the end joint relative to the twist joint. We'll move outgoing\r\n # connections from the end joint's rotation to this. That way, even though we'll be reparenting the\r\n # end joint to under the bend joint, other nodes still see rotation relative to twist, like they\r\n # did before.\r\n rotation_output_node = pm.createNode('joint', n=end_joint_maya_node.nodeName() + '_RelativeRotation', p=end_joint_maya_node)\r\n mh.config_internal_control(rotation_output_node)\r\n pm.parent(rotation_output_node, twist_joint_maya_node)\r\n\r\n for attr in attrs_to_check:\r\n for connected_attr in pm.listConnections(end_joint_maya_node.attr(attr), s=False, d=True, p=True):\r\n pm.connectAttr(rotation_output_node.attr(attr), connected_attr, force=True)\r\n\r\n pm.parentConstraint(end_joint_maya_node, rotation_output_node, mo=False)\r\n\r\n create_rotation_node()\r\n\r\n # Move the end joint out from inside the twist joint into the new roll joint.\r\n pm.parent(end_joint_maya_node, new_roll_joint)\r\n\r\n # Put the children of the old roll joint under the new roll joint.\r\n for child in pm.listRelatives(roll_joint_maya_node, children=True):\r\n pm.parent(child, new_roll_joint)\r\n\r\n # Constrain the old roll joint to the new one. We're keeping this joint around unchanged,\r\n # since there may be other things constrained to it. For example, clothing conforms often\r\n # connect to these joints.\r\n pm.parentConstraint(new_roll_joint, roll_joint_maya_node, maintainOffset=True)\r\n pm.scaleConstraint(new_roll_joint, roll_joint_maya_node, maintainOffset=True)\r\n\r\n # Turn off segmentScaleCompensate on the original roll joint that we've scale constrained.\r\n # We have it on so modifiers work, but modifiers are now pointing at our new joint, and\r\n # scale constraints don't work correctly with segmentScaleCompensate.\r\n roll_joint_maya_node.attr('segmentScaleCompensate').set(0)\r\n\r\n # For some reason, the YZX rotate order on some thigh twist joints causes the aim constraint\r\n # to flip out, and changing it to XYZ fixes it. The rotate order on the twist joint shouldn't\r\n # matter since we should only ever be rotating it on one axis anyway.\r\n pm.setAttr(twist_joint_maya_node.attr('rotateOrder'), 0)\r\n\r\n pm.aimConstraint(end_joint_maya_node, twist_joint_maya_node, mo=True, worldUpType='objectrotation', worldUpObject=end_joint_maya_node,\r\n aimVector=twist_rig_info['aim_vector'],\r\n upVector=twist_rig_info['up_vector'], worldUpVector=twist_rig_info['up_vector'])\r\n\r\n # Bump the twist joint down one in the outliner, so pressing down from the parent joint\r\n # goes to the end joint and not the twist joint.\r\n pm.reorder(end_joint_maya_node, relative=1)\r\n\r\n # Put the twist joint inside two empty groups. This prevents a bone from being\r\n # drawn from the roll joint to the twist joint, since Maya only searches up two\r\n # parenting levels for a parent joint.\r\n group_inner = pm.group(twist_joint_maya_node, name='%s_Grp1' % twist_joint_maya_node.nodeName())\r\n group_outer = pm.group(group_inner, name='%s_Grp' % twist_joint_maya_node.nodeName())\r\n mh.config_internal_control(group_outer)\r\n pm.setAttr(group_outer.attr('visibility'), 0)\r\n\r\n # Point the roll joint node at the new roll joint.\r\n roll_joint.maya_node = new_roll_joint\r\n\r\ndef straighten_poses(env):\r\n \"\"\"\r\n Figures are generally in a relaxed T-pose. Move figures to a full T-pose.\r\n Note that this doesn't bring the arms parallel to the X axis.\r\n \"\"\"\r\n\r\n if not config.get('straighten_pose'):\r\n return\r\n\r\n log.debug('Straightening poses')\r\n for dson_node in env.scene.depth_first():\r\n if dson_node.node_type != 'figure':\r\n continue\r\n\r\n # Ignore eg. SkinBindings.\r\n if dson_node.node_source != 'node':\r\n continue\r\n if 'conform_target' in dson_node:\r\n continue\r\n\r\n # The feet in bind pose are usually pointing slightly outwards. Aim them along\r\n # the Z axis, so they're pointing straight ahead. This is important for HIK floor\r\n # contact, since its contact planes assume that feet are aligned when in bind pose.\r\n # The foot joints aren't aligned to the XZ plane, so there's no axis for us to simply\r\n # align to zero. Instead, look at the world space angle going down to the next joint,\r\n # and rotate by the inverse of that. Do the same for the arm joints and hands. We\r\n # want the hands to be square with the world, so floor contact planes are aligned\r\n # with HIK later.\r\n joints = [\r\n # Joint to aim End joints Cross, aim, rotate axis Invert\r\n ('lShldrBend', ('lForearmBend',), (1, 0, 2), False),\r\n ('rShldrBend', ('rForearmBend',), (1, 0, 2), False),\r\n ('lForeArm', ('lHand',), (1, 0, 2), False),\r\n ('rForeArm', ('rHand',), (1, 0, 2), False),\r\n ('lForeArm', ('lHand',), (2, 0, 1), True),\r\n ('rForeArm', ('rHand',), (2, 0, 1), True),\r\n\r\n # Aim the hand towards the average of the middle and ring finger.\r\n #('lHand', ('lMid1', 'lRing1'), (2, 0, 1), True),\r\n #('rHand', ('rMid1', 'rRing1'), (2, 0, 1), True),\r\n ('lHand', ('lRing1', ), (2, 0, 1), True),\r\n ('rHand', ('rRing1', ), (2, 0, 1), True),\r\n ('rFoot', ('rMetatarsals',), (0, 2, 1), False),\r\n ('lFoot', ('lMetatarsals',), (0, 2, 1), False),\r\n ]\r\n\r\n # First, find and check all of the joints. If there are problem with any joints, we\r\n # won't apply any changes.\r\n for aim_joint, end_joints, (cross_axis_idx, aim_axis_idx, rotate_axis_idx), invert in joints:\r\n def make_rotations():\r\n total_angle = 0\r\n try:\r\n j1 = dson_node.find_asset_name(aim_joint)\r\n except KeyError as e:\r\n log.warning('Couldn\\'t straighten %s %s: %s', dson_node.node_id, aim_joint, e.message)\r\n return\r\n\r\n # Average the angle towards each of the target joints.\r\n for end_joint in end_joints:\r\n try:\r\n j2 = dson_node.find_asset_name(end_joint)\r\n except KeyError as e:\r\n log.warning('Couldn\\'t straighten %s %s: %s', dson_node.node_id, aim_joint, e.message)\r\n return\r\n\r\n pos1 = pm.xform(j1.maya_node, q=True, ws=True, t=True)\r\n pos2 = pm.xform(j2.maya_node, q=True, ws=True, t=True)\r\n if pos2[aim_axis_idx] < pos1[aim_axis_idx]:\r\n pos1, pos2 = pos2, pos1\r\n\r\n angle = math.atan2(pos2[cross_axis_idx] - pos1[cross_axis_idx], pos2[aim_axis_idx] - pos1[aim_axis_idx])\r\n angle = angle * 180 / math.pi \r\n\r\n if invert:\r\n angle = -angle\r\n total_angle += angle\r\n\r\n total_angle /= len(end_joints)\r\n\r\n # If the angle is too wide, something is probably wrong. Stop rather than twisting\r\n # a figure into a weird shape.\r\n if angle < -45 or angle > 45:\r\n raise RuntimeError('Unexpected angle while orienting joint %s to %s: %f' % (j1.maya_node, j2.maya_node, angle))\r\n\r\n rotate = [0,0,0]\r\n rotate[rotate_axis_idx] = -angle\r\n pm.xform(j1.maya_node, ws=True, r=True, ro=rotate)\r\n\r\n rotations = make_rotations()\r\n\r\n","sub_path":"dsonimport/rigging.py","file_name":"rigging.py","file_ext":"py","file_size_in_byte":19164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"19554657","text":"# Copyright 2018 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Reconstructs activation of a sentence where one token activation is changed.\"\"\"\nimport json\nimport os\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport sys\nsys.path.insert(1, 'helpers')\nimport activation_helper\nimport attention_mask_helper\nimport embeddings_helper\nimport folder_helper\nimport inference_helper\nimport one_hots_helper\nimport optimization_helper\nimport output_helper\nimport setup_helper\nimport tokenization_helper\n\n# Command Line Arguments\nFLAGS = flags.FLAGS\nflags.DEFINE_string('sentence', u'i hate kickshaws',\n 'the sentence to start with')\nflags.DEFINE_string('sentence2', u'', 'an optional sencond sentence')\nflags.DEFINE_string('output_dir', None,\n 'the output directory where the results will be'\n 'written.')\nflags.DEFINE_string('change_activation_dir', None, 'the file that holds the'\n 'activation that we change a word to')\nflags.DEFINE_string('change_activation_file', None, 'the file that holds the'\n 'activation that we change a word to')\nflags.DEFINE_string('model_config', 'bert-base-uncased', 'the name of the model'\n 'configuration to load')\nflags.DEFINE_string('target', None, 'target of the shifted activation process')\nflags.DEFINE_integer('num_iterations', 10, 'number of optimization steps')\nflags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')\nflags.DEFINE_integer('word_id', None, 'word to optimize activation for')\nflags.DEFINE_integer('neuron_id', None, 'neuron to optimize activation for')\nflags.DEFINE_integer('change_id', 1, 'token activation that is to be changed')\nflags.DEFINE_integer('dream_start', 1, 'first token that is to be changed in'\n 'the sentence')\nflags.DEFINE_integer('dream_end', 0, 'last token that is to be changed in the'\n 'sentence')\nflags.DEFINE_integer('warmup', 200, 'how long before the temperature of the'\n 'softmax gets adjusted')\nflags.DEFINE_integer('metrics_frequency', 250, 'frequency in which results are'\n 'saved')\nflags.DEFINE_float('start_temp', 2.0, 'start-temperature of the softmax')\nflags.DEFINE_float('end_temp', 0.1, 'end-temperature of the softmax')\nflags.DEFINE_float('anneal', 0.9995, 'annealing factor for the temperature')\nflags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')\nflags.DEFINE_bool('gumbel', False, 'use gumbel noise with the softmax')\nflags.DEFINE_bool('write_top_k', False, 'write top words for each iteration')\nflags.DEFINE_integer('k', 10, 'number of top ranked words to store for each'\n 'iteration')\n\n\ndef change_target_activation(target_activation, device):\n \"\"\"Change the target activation to the desired one.\n\n Args:\n target_activation: The old target activation to be changed.\n device: Device to load variables to.\n\n Returns:\n target_activation: The new, changed target activation to optimize for.\n \"\"\"\n change_path = os.path.join(FLAGS.change_activation_dir, str(FLAGS.layer_id),\n FLAGS.change_activation_file)\n change_file = open(change_path, 'rb')\n change_np = np.load(change_file)\n change_tensor = torch.tensor(change_np)\n change_tensor = change_tensor.to(device)\n target_activation[FLAGS.change_id] = change_tensor\n return target_activation\n\n\ndef deep_dream(data, results, params, device, tokenizer, embedding_map, model):\n \"\"\"Deep dream to a target activation.\n\n Args:\n data: Holds the top-k values.\n results: Holds the results of the run.\n params: Holds the parameters of the run.\n device: Where to place new variables.\n tokenizer: Used to convert between ids and tokens.\n embedding_map: Holding all BERT token embeddings.\n model: The model used for this dream.\n \"\"\"\n # An embedding for the tokens is obtained\n tokens = tokenization_helper.tokenize_input_sentence(\n tokenizer, FLAGS.sentence, FLAGS.sentence2)\n tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(\n tokenizer, tokens, device)\n _, pos_embeddings, sentence_embeddings = embeddings_helper.get_embeddings(\n tokens_tensor, segments_tensor, model)\n # Correct the end of the dream if necessary\n if FLAGS.dream_end == 0:\n FLAGS.dream_end = len(tokens) - 2\n # Write the parameters to a file\n output_helper.get_params(params, FLAGS, tokens)\n # Get the smooth one-hot vector that is to be optimized, split into static and\n # modifiable parts\n before, modify, after = one_hots_helper.get_one_hots(\n tokens_tensor.data.cpu().numpy(), FLAGS.dream_start, FLAGS.dream_end,\n device)\n modify = torch.randn(modify.shape, device=device, requires_grad=True)\n # Obtain the default attention mask to be able to run the model\n att_mask = attention_mask_helper.get_attention_mask(tokens_tensor)\n # The optimizer used to modify the input embedding\n optimizer = torch.optim.Adam([modify], lr=FLAGS.learning_rate)\n # Init temperature for Gumbel\n temperature = torch.tensor(FLAGS.start_temp, device=device,\n requires_grad=False)\n # Obtain the target activation we try to optimize towards.\n target_ids = tokens_tensor.data.cpu().numpy()[0]\n target_activation = activation_helper.get_ids_activation(\n target_ids, pos_embeddings, sentence_embeddings, att_mask,\n FLAGS.dream_start, FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id,\n FLAGS.layer_id, False, embedding_map, model, device)\n target_activation = change_target_activation(target_activation, device)\n target_activation = target_activation.clone().detach().requires_grad_(False)\n # Obtain the properties of the initial embedding\n one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,\n FLAGS.gumbel)\n max_values, token_ids = one_hots_helper.get_tokens_from_one_hots(\n torch.cat([before, one_hots_sm, after], dim=1))\n numpy_max_values = max_values.data.cpu().numpy()\n ids = token_ids.data.cpu().numpy()[0]\n tokens = tokenizer.convert_ids_to_tokens(ids)\n ids_activation = activation_helper.get_ids_activation(\n ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,\n FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,\n embedding_map, model, device)\n # Write the initial stuff for the results file\n output_helper.init_results(results)\n\n # Optimize the embedding for i iterations and update the properties to\n # evaluate the result in each step\n for i in range(FLAGS.num_iterations):\n # Do an optimization step\n max_vals, token_ids, loss = optimization_helper.step_towards_activation(\n optimizer, before, modify, after, pos_embeddings,\n sentence_embeddings, att_mask, temperature, i, FLAGS.gumbel,\n FLAGS.write_top_k, FLAGS.k, data, FLAGS.word_id, FLAGS.neuron_id,\n FLAGS.layer_id, FLAGS.dream_start, FLAGS.dream_end, tokenizer,\n embedding_map, model, target_activation)\n # Write the properties of the last step\n ids_loss = F.mse_loss(ids_activation, target_activation)\n if (i % FLAGS.metrics_frequency) == 0:\n output_helper.get_metrics(\n tokens, i, temperature, numpy_max_values, results,\n loss=loss, ids_loss=ids_loss)\n # Set the numpy max values\n numpy_max_values = max_vals.data.cpu().numpy()\n # Obtain the activation property for the id-array that would result from the\n # optimization\n ids = token_ids.data.cpu().numpy()[0]\n tokens = tokenizer.convert_ids_to_tokens(ids)\n # Calculate the activation using the highest scoring words\n ids_activation = activation_helper.get_ids_activation(\n ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,\n FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,\n embedding_map, model, device)\n # Check if the temperature needs to decrease\n if i > FLAGS.warmup:\n temperature = torch.clamp(temperature * FLAGS.anneal, FLAGS.end_temp)\n\n # Calculate the final activation just as before, but without backprop\n if (FLAGS.num_iterations % FLAGS.metrics_frequency) == 0:\n with torch.no_grad():\n one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,\n FLAGS.gumbel)\n fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)\n if FLAGS.write_top_k:\n output_helper.write_top_ks(fused_one_hots, FLAGS.k,\n FLAGS.num_iterations, data,\n FLAGS.dream_start, FLAGS.dream_end,\n tokenizer)\n layers = inference_helper.run_inference(before, one_hots_sm, after,\n pos_embeddings,\n sentence_embeddings, att_mask,\n embedding_map, model)\n activation = activation_helper.get_activations(\n layers, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id)\n loss = F.mse_loss(activation, target_activation)\n ids_loss = F.mse_loss(ids_activation, target_activation)\n output_helper.get_metrics(\n tokens, FLAGS.num_iterations, temperature, numpy_max_values, results,\n loss=loss, ids_loss=ids_loss)\n\n\ndef reconstruct_changed_activation(device, tokenizer, emb_map, model):\n \"\"\"Reconstruct the activation for a given sentence after they have been shifted.\n\n Args:\n device: The device to use for training the model.\n tokenizer: Used to convert between sentences, tokens, and ids.\n emb_map: Map containing all the pretrained embeddings of the model.\n model: BERT model used for the dreaming process.\n \"\"\"\n data = []\n results = {}\n params = {}\n # Create a folder for this experiment\n layer_dir = os.path.join(FLAGS.output_dir, str(FLAGS.layer_id))\n folder_helper.make_folder_if_not_exists(layer_dir)\n # Actually do the optimization\n deep_dream(data, results, params, device, tokenizer, emb_map, model)\n # If the top k file is to be written, write it\n if FLAGS.write_top_k:\n for i in range(len(data)):\n top_k_path = os.path.join(layer_dir, 'top_k' + str(i) + '.json')\n top_k_file = open(top_k_path, 'w')\n json.dump(data[i], top_k_file)\n top_k_file.close()\n output_helper.write_results(layer_dir, results, params,\n 'reconstruct_changed')\n\n\ndef main(_):\n tokenizer, model, device, emb_map = setup_helper.setup_uncased(\n FLAGS.model_config)\n # Make a directory for the current run\n folder_helper.make_folder_if_not_exists(FLAGS.output_dir)\n # Start the run\n reconstruct_changed_activation(device, tokenizer, emb_map, model)\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('output_dir')\n flags.mark_flag_as_required('change_activation_dir')\n flags.mark_flag_as_required('change_activation_file')\n flags.mark_flag_as_required('target')\n app.run(main)\n","sub_path":"text-dream/python/dream/reconstruct_changed_activation.py","file_name":"reconstruct_changed_activation.py","file_ext":"py","file_size_in_byte":11710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"464397521","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 21 09:07:18 2021\r\n\r\n@author: fdm\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nimport glob\r\nimport ntpath\r\nimport os\r\nimport re\r\nfrom PIL import Image\r\nimport time\r\nfrom tool.predictor import Predictor\r\nfrom tool.config import Cfg\r\nconfig_all = Cfg.load_config_from_file('./train_config/seq2seq_2906_pretrain_32_10k.yml')\r\nconfig_all['weights'] = './checkpoint/seq2seq_2906_pretrain_32_10k.pth'\r\nconfig_all['cnn']['pretrained'] = False\r\nconfig_all['device'] = 'cuda:1'\r\n# config_all['device'] = 'cuda:1'\r\nconfig_all['predictor']['beamsearch'] = False\r\n# config_all['vocab'] = '''aAàÀảẢãÃáÁạẠăĂằẰẳẲẵẴắẮặẶâÂầẦẩẨẫẪấẤậẬbBcCdDđĐeEèÈẻẺẽẼéÉẹẸêÊềỀểỂễỄếẾệỆfFgGhHiIìÌỉỈĩĨíÍịỊjJkKlLmMnNoOòÒỏỎõÕóÓọỌôÔồỒổỔỗỖốỐộỘơƠờỜởỞỡỠớỚợỢpPqQrRsStTuUùÙủỦũŨúÚụỤưƯừỪửỬữỮứỨựỰvVwWxXyYỳỲỷỶỹỸýÝỵỴzZ0125456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ '''\r\ndetector_old = Predictor(config_all)\r\n\r\n\r\ndef no_accent_vietnamese(s):\r\n\ts = re.sub(r'[àáạảãâầấậẩẫăằắặẳẵ]', 'a', s)\r\n\ts = re.sub(r'[ÀÁẠẢÃĂẰẮẶẲẴÂẦẤẬẨẪ]', 'A', s)\r\n\ts = re.sub(r'[èéẹẻẽêềếệểễ]', 'e', s)\r\n\ts = re.sub(r'[ÈÉẸẺẼÊỀẾỆỂỄ]', 'E', s)\r\n\ts = re.sub(r'[òóọỏõôồốộổỗơờớợởỡ]', 'o', s)\r\n\ts = re.sub(r'[ÒÓỌỎÕÔỒỐỘỔỖƠỜỚỢỞỠ]', 'O', s)\r\n\ts = re.sub(r'[ìíịỉĩ]', 'i', s)\r\n\ts = re.sub(r'[ÌÍỊỈĨ]', 'I', s)\r\n\ts = re.sub(r'[ùúụủũưừứựửữ]', 'u', s)\r\n\ts = re.sub(r'[ƯỪỨỰỬỮÙÚỤỦŨ]', 'U', s)\r\n\ts = re.sub(r'[ỳýỵỷỹ]', 'y', s)\r\n\ts = re.sub(r'[ỲÝỴỶỸ]', 'Y', s)\r\n\ts = re.sub(r'[Đ]', 'D', s)\r\n\ts = re.sub(r'[đ]', 'd', s)\r\n\treturn s\r\n\r\n\r\ndef util_check_input_img(img_input):\r\n\tif not isinstance(img_input, (np.ndarray)):\r\n\t\t#print('not np array')\r\n\t\treturn False\r\n\t\r\n\tif img_input.shape[0] < 10 or img_input.shape[1] < 10:\r\n\t\t#print('small image %d %d' %(img_input.shape[0], img_input.shape[1]))\r\n\t\treturn False\r\n\treturn True\r\n\r\n\r\ndef run_ocr_cnn(img_line):\r\n\tif not util_check_input_img(img_line):\r\n\t\treturn \"\"\r\n\tis_success, buffer = cv2.imencode('.jpg', img_line)\r\n\r\n\t# print(buffer.tobytes())\r\n\tstr_ocr_val = detector_old.predict_bytes(buffer)\r\n\treturn str_ocr_val\t\r\n\r\n\r\ndef Repalce(s):\r\n last_symbol = s[len(s)-1]\r\n for j in ['/', '_', ':', ';', '-', '.', ',', '?']:\r\n if last_symbol == j:\r\n s = s.replace(j, '')\r\n for l in ['/', '_', ':', ';', '-', '.', ',', '?']:\r\n s = s.replace(l, '')\r\n s = s.replace(' ', ' ')\r\n return s.strip()\r\n\r\n\r\ndef check_cnn_model(str_img_path, txt_file):\r\n\t### read and run ocr by cnn model\r\n\timg_in = cv2.imread(str_img_path)\r\n\tstr_ocr = run_ocr_cnn(img_in)\r\n\tkq_test = ''\r\n\tif len(str_ocr)==0 or len(txt_file)==0:\r\n\t\treturn [0, 0, 0, \"false\"]\r\n\tif Repalce(str_ocr).strip() == Repalce(txt_file).strip():\r\n\t\ta = 1\r\n\telse:\r\n\t\ta = 0\r\n\t\tkq_test = f\"\\n{str_ocr}\\n{txt_file}\\n-------------------------\"\r\n\tupperocr = no_accent_vietnamese(Repalce(str_ocr).strip())\r\n\tuppertxt = no_accent_vietnamese(Repalce(txt_file).strip())\r\n\tif no_accent_vietnamese(Repalce(str_ocr).strip()) == no_accent_vietnamese(Repalce(txt_file).strip()):\r\n\t\tb = 1\r\n\telse:\r\n\t\tb = 0\r\n\tif upperocr.upper() == uppertxt.upper():\r\n\t\tc = 1\r\n\telse:\r\n\t\tc = 0\r\n\treturn [a, b, c, kq_test]\r\n\t# print(a)\r\n\t### TO DO: read txt_file\r\n\t### str_true = read from text file\r\n\t### compare in 2 level:\r\n\t### 1. compare str_ocr and str_true\t\r\n\t### 2. No accent vnese\r\n\t## str_non_vnese_ocr = \tno_accent_vietnamese (str_ocr)\r\n\t## str_non_vnese_true = no_accent_vietnamese (str_true)\r\n\t## compare str_non_vnese_ocr and str_non_vnese_true\r\n\t\r\n\r\n# check_cnn_model('/home/longhn/Desktop/Anotation/data/0HD_QuanLyTaiKhoan_COLOMBO.pdf_182021042210545568.jpg','CÔNG TY TNHH ĐẦU TƯ VÀ PHÁT TRIỂN COLOMBO')\r\nafile = open('/home/longhn/Annotation_2906/test.txt')\r\nkq = open('/home/longhn/Annotation_2906/kq.txt', 'w', encoding=\"utf8\")\r\ntrue_11 = 0\r\ntrue_noacc = 0\r\ntrue_upper = 0\r\nstart = time.time()\r\nfor x in afile:\r\n\tdata = x.split(\"\\t\")\r\n\t# print(data[0])\r\n\tlink = '/home/longhn/' + data[0]\r\n\ttext = data[1].strip()\r\n\t# print(link)\r\n\tif check_cnn_model(link, text)[3] != '':\r\n\t\tkq.write(f\"{link} {check_cnn_model(link,text)[3]}\")\r\n\t\t# kq.write(check_cnn_model(link,text)[3])\r\n\ttrue_11 += check_cnn_model(link, text)[0]\r\n\ttrue_noacc += check_cnn_model(link, text)[1]\r\n\ttrue_upper += check_cnn_model(link, text)[2]\r\nstop = time.time()\r\nprint(\"So sanh 1vs1:\",true_11)\r\nprint(\"So sanh ko dau:\", true_noacc)\r\nprint(\"So sanh upper:\", true_upper)\r\nprint(\"Tong thoi gian:\", stop-start)\r\n","sub_path":"run_ocr_cnn.py","file_name":"run_ocr_cnn.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"241058839","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 20 15:22:12 2017\n\n@author: Robert Bauer\n\"\"\"\n# %%\nimport libmushu\nimport wyrm.io\nimport wyrm.types\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# %%\nfname = r\"C:\\projects\\nmes_phase\\confchannel_conf.narray\"\n# %%\n# look for amplifiers connected to the system, and return a list of the\n# respective classes\n# available_amps = libmushu.get_available_amps()\n\n# select the first available amp and decorate it with tcp-marker- and\n# save-to-file-functionality\n# ampname = available_amps[1]\nampname = 'replayamp'\namp = libmushu.get_amp(ampname)\n\n# configure the amplifier\nold_recording = wyrm.io.load_brain_vision_data(r\"C:\\Users\\AGNPT-M-001\\Desktop\\untitled.vhdr\")\nold_recording.data = old_recording.data[0:-1:50,:] #speed up the recording by decimation for faster playback\namp.configure(data=old_recording.data,marker=list(old_recording.markers),channels=old_recording.axes[-1],fs = old_recording.fs)\n\namp_fs = amp.get_sampling_frequency()\namp_chan = amp.get_channels()\n\n# start it and collect data until finished\nrbuffer = wyrm.types.RingBuffer(5000)\nbbuffer = wyrm.types.BlockBuffer(10)\n\nplt.ion()\n\n\namp.start()\nburst_duration = 0\nburst_delay = 100\nwhile True:\n\n data, markers = amp.get_data()\n\n cnt = wyrm.io.convert_mushu_data(data,markers,amp_fs,amp_chan)\n bbuffer.append(cnt)\n rbuffer.append(cnt)\n cnt = rbuffer.get()\n # cnt = bbuffer.get()\n plt.cla()\n plt.axis([0, 6000, -500.5, 500.5])\n plt.grid()\n if len(cnt.data)>0:\n cpz = cnt.data[:,np.in1d(amp_chan,'CPz')];\n plt.plot(cpz)\n\n plt.pause(1/amp_fs)\n\n# stop the amplifier\namp.stop()\n","sub_path":"NU/explore_replayamp.py","file_name":"explore_replayamp.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"622747352","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import DetailView, ListView\nfrom polls.models import Poll\nfrom crm.models import Applicant, Contact\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\n\n\nadmin.autodiscover()\n\nhandler404 = 'mysite.views.http_404'\nhandler500 = 'mysite.views.http_500'\nhandler505 = 'mysite.views.http_505'\n\n\n#urlpatterns = patterns('polls.views',\n # Examples:\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^mysite/', include('mysite.foo.urls')),\n \n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n \n # Uncomment the next line to enable the admin:\n\n# url(r'^polls/$', 'index'),\n# url(r'^polls/(?P<poll_id>\\d+)/$', 'detail'),\n# url(r'^polls/(?P<poll_id>\\d+)/results/$', 'results'),\n# url(r'^polls/(?P<poll_id>\\d+)/vote/$', 'vote'),\n# )\n\n\n\nurlpatterns = patterns('crm.views', \n url(r'^crm$', 'index'),\n url(r'^crm/(?P<applicant_id>\\d+)$', 'detail'),\n\n url(r'^crm/list$',\n ListView.as_view( queryset = Applicant.objects.order_by('index'),\n context_object_name = 'applicant_list',\n template_name='crm/list.html'),\n name='index' ),\n \n )\n\nurlpatterns += patterns('mysite.views', \n url(r'^listpictures', 'listpictures'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^/$', 'index'),\n url(r'^$', 'index'),\n url(r'^terms[/]$', 'terms'),\n url(r'^privacy[/]$', 'privacy'),\n url(r'^profile$', 'profile'),\n url(r'^about$', 'about'),\n url(r'^contact$', 'contact'),\n url(r'^get-startted[/]$', 'getstartted'),\n url(r'^apps/get/(?P<appname>\\w+)[/]$', 'getApplication'),\n url(r'^get/(?P<resource>.*)/(?P<username>\\w+)/(?P<textid>\\w+)/(?P<index>\\w+)/(?P<options>\\w+)[/]$', 'getresource'),\n url(r'^signup[/]$', 'signup'),\n url(r'^NQ4bch2.html$', 'cert'),\n )\n\nurlpatterns += patterns('',\n url(r'^login[/]$', 'django.contrib.auth.views.login', {'template_name': 'login.html', 'extra_context': {'next': '/profile'}}),\n url(r'^logout[/]$', 'django.contrib.auth.views.logout', {'template_name': 'logout.html', 'extra_context':{'next': '/login'}}), \n )\n\nurlpatterns += patterns('builder.views',\n url(r'^builder$', 'index'),\n\n )\n\nurlpatterns += patterns('school.views',\n url(r'^school[/]$', 'index'),\n )\n\nurlpatterns += patterns('ce.views',\n url(r'^ce[/]$', 'index'),\n url(r'^ce/upload[/]$', 'upload'),\n url(r'^ce/contact[/]$', 'contact'),\n url(r'^ce/thanks[/]$', 'thanks'),\n \n )\n\nurlpatterns += patterns('report.views',\n url(r'^report[/]$', 'index'),\n url(r'^report/test[/]$', 'test'),\n url(r'^report/gencsrf[/]$', 'gencsrf'),\n url(r'^report/generate[/]$', 'generate'),\n url(r'^report/getplan/(?P<planid>\\w+)[/]$', 'getPlan'),\n \n )\n\nurlpatterns += patterns('file.views',\n url(r'^file/(?P<userid>\\d+)[/](?P<filename>.*)$', 'getfile'),\n\n )\n\nurlpatterns += patterns('m.views',\n url(r'^m[/]$', 'index')\n )\n\n\n## Adding url patterns for other apps\nurlpatterns += patterns('cloudchoice.views',\n url(r'^cloudchoice/', include('cloudchoice.urls')),\n)\n\nurlpatterns += patterns('vi.views',\n url(r'^vi/', include('vi.urls')),\n)\n\n#urlpatterns += patterns('',\n# url(r'^$',\n# ListView.as_view( queryset = Poll.objects.order_by('-pub_date')[:5],\n# context_object_name = 'latest_poll_list',\n# template_name='polls/index.html'),\n# name='index' ),\n# url(r'^(?P<pk>\\d+)/$',\n# DetailView.as_view( model = Poll,\n# template_name = 'polls/detail.html'),\n# name='detail' ),\n# url(r'^(?P<pk>\\d+)/results/$',\n# DetailView.as_view( model = Poll,\n# template_name = 'polls/results.html'),\n# name = 'results'),\n# url(r'^(?P<poll_id>\\d+)/vote/$', 'polls.views.vote', \n# name='vote'),\n# )\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182355572","text":"import time\r\nimport re\r\nimport ConfigParser\r\nimport csv\r\nimport sessions.linux\r\nimport telnetlib\r\nimport Queue\r\nimport threading\r\nimport urllib2\r\n\r\n\r\n#####################################################################\r\n# setup_shelf(ip)\r\n# Description: This function is used to login to device using telnet\r\n# library and change sysname by using set_tid()\r\n# Inputs : ip - IP address of device to be login\r\n# Returns : con - telnet session handle \r\n######################################################################\r\ndef setup_shelf(ip, additional_logs):\r\n \"\"\"provide the unique network host name to all NE\"\"\"\r\n con = telnetlib.Telnet(ip, 23)\r\n con.write( \"ADTRAN\\r\\n\")\r\n con.read_until( \"#\" , 5)\r\n con.write( \"BOSCO\\r\\n\")\r\n con.read_until( \"#\" , 5)\r\n con.write( \"en\\r\\n\")\r\n con.read_until( \"#\" , 5)\r\n con.write( \"term len 0\\r\\n\")\r\n con.read_until( \"#\" , 5)\r\n set_tid(con, \"ST-{}\".format(ip), additional_logs)\r\n con.close()\r\n\r\n####################################################################\r\n# set_tid(con, name)\r\n# Description: This function is used to set system name on device \r\n# using telnet library\r\n# Inputs : con - telnet session handle\r\n# name - system name of the device\r\n# Returns : Pass\r\n####################################################################\r\ndef set_tid(con, name, additional_logs):\r\n con.write(\"config t\\n\")\r\n con.read_until( \"#\" , 120)\r\n con.write(\"tl1\\n\")\r\n con.read_until( \"#\" , 120)\r\n con.write(\"tid {}\\n\".format(name))\r\n con.read_until( \"#\" , 120)\r\n con.write(\"end\\n\")\r\n con.read_until( \"#\" , 120)\r\n additional_logs.info(\"Successfully set the TID as {}\\n\".format(name))\r\n pass\r\n\r\n\r\n###########################################################################\r\n# validate(ip,web)\r\n# Description: This function is used to validate the multiple discovery NE\r\n# Inputs : ip - IP address of device to be login\r\n# web - provides handle for the web interface\r\n# Returns : Void \r\n###########################################################################\r\ndef validate(ip,web):\r\n \"\"\"Validation function for the multiple discovery NE\"\"\"\r\n\r\n retrieve = web.retrieveNE(ip)\r\n assert \"model\" in web.retrieveNE(ip)\r\n assert \"ipAddress\" in retrieve[\"model\"]\r\n assert retrieve[\"model\"][\"ipAddress\"] == ip\r\n\r\n\r\n###########################################################################\r\n# set_config_params(config)\r\n# Description: This function is used to set parameters for device discovery\r\n# Inputs : config - provides configuration parameters for discovered \r\n# device\r\n# Returns : Void\r\n###########################################################################\r\ndef set_config_params(config, additional_logs):\r\n \"\"\"configuration related data can be defined here for this module test cases\"\"\"\r\n additional_logs.info(\"Setting all the parameters for multiple device discovery\")\r\n config.set('discovery_params','exp_time','1200')\r\n config.set('discovery_params','ro_comu','public')\r\n config.set('discovery_params','rw_comu','private')\r\n\r\n\r\n###############################################################################\r\n# Multiple_devices_discovery(tb,web,logdir,config,get_logfile)\r\n# Description: This function use the discovery.sh script to discover the device\r\n# Inputs : tb - testbed object\r\n# web - provides handle for the web interface\r\n# logdir - test module log directory\r\n# config - provides configuration parameters for device discovery\r\n# get_logfile - test module log file fixture function parameter\r\n# Returns : returns True if multiple devices got discovered else False\r\n###############################################################################\r\ndef Multiple_devices_discovery(tb,web,logdir,config,get_logfile, additional_logs):\r\n \"\"\"this discovery uses the discovery.sh script for the discovery purpose \"\"\"\r\n set_config_params(config, additional_logs)\r\n ips = tb.ip_list\r\n \r\n #make sure to have unique Network host name for all NE\r\n try:\r\n\r\n thread_list = []\r\n for ip in ips:\r\n additional_logs.info(\"List of ips to be discovered is {}\\n\".format(ip))\r\n t = threading.Thread(target=setup_shelf, args = (ip))\r\n thread_list.append(t)\r\n\r\n # Starts threads\r\n for thread in thread_list:\r\n thread.daemon = True\r\n thread.start()\r\n\r\n # This blocks the calling thread until the thread whose join() method is called is terminated.\r\n for thread in thread_list:\r\n thread.join()\r\n\r\n except KeyboardInterrupt:\r\n additional_logs.error(\"Shutdown: Due to manual Intrupt\\n\")\r\n\r\n #get all desired params from config file here\r\n cal_time = config.getint('discovery_params','exp_time')\r\n ro_comu = config.get('discovery_params','ro_comu')\r\n rw_comu = config.get('discovery_params','rw_comu')\r\n\r\n sftp_session = sessions.linux.Linux( host = tb.do_pc_ip\r\n , port = tb.hostport\r\n , method = 'sftp'\r\n , user = 'ems'\r\n , password = 'password'\r\n , logfile = get_logfile('multiple_discovery_sftp.txt'))\r\n\r\n sftp_session.sendline('lmkdir /tmp/test_dir')\r\n sftp_session.expect('sftp>', timeout=30)\r\n sftp_session.sendline('lcd /tmp/test_dir')\r\n sftp_session.expect('sftp>', timeout=30)\r\n sftp_session.sendline('get /opt/dorado/owareapps/scripts/discover.sh')\r\n sftp_session.expect('sftp>', timeout=30)\r\n\r\n\r\n #change the default timeout of discovery.sh script and replace it \r\n s = open(\"/tmp/test_dir/discover.sh\").read()\r\n s = s.replace('60000', '1800000')\r\n f = open(\"/tmp/test_dir/discover.sh\", 'w')\r\n f.write(s)\r\n f.close()\r\n\r\n #create the csv file and append with the no of IPS to be discovered.\r\n with open('/tmp/test_dir/test.csv','wb') as csvfile:\r\n temp = csv.writer(csvfile, delimiter=',')\r\n data = []\r\n for items in ips:\r\n values = [items,ro_comu,rw_comu]\r\n data.append(values)\r\n temp.writerows(data)\r\n\r\n csvfile.close()\r\n\r\n\r\n #ssh to the running docker \r\n dosession = sessions.linux.Linux( host = tb.do_pc_ip\r\n , port = tb.hostport\r\n , method = 'ssh'\r\n , user = 'ems'\r\n , password = 'password'\r\n , logfile = get_logfile('multiple_discovery_ssh.txt'))\r\n\r\n\r\n #replace the original discover.sh file with the modified one & copy the new csv file\r\n\r\n sftp_session.sendline('cd /opt/dorado/owareapps/scripts')\r\n sftp_session.expect('sftp>', timeout=30)\r\n sftp_session.sendline('put /tmp/test_dir/discover.sh')\r\n sftp_session.expect('sftp>', timeout=30)\r\n sftp_session.sendline('put /tmp/test_dir/test.csv')\r\n sftp_session.expect('sftp>', timeout=30)\r\n\r\n\r\n #start discovery: Note down here start time to compare later \r\n start_time = time.time()\r\n\r\n #lets give more time than our discovery expected time as timeout value\r\n tout = cal_time + 300\r\n\r\n #workaround for default password\r\n\r\n dosession.sendline('cd /opt/dorado/owareapps/scripts')\r\n dosession.expect('.*ems@aoe-docker.*', timeout=30)\r\n dosession.sendline(\"./discover.sh {}:{}/aoe admin password test.csv > temp_log.txt\".format(tb.do_pc_ip,tb.hostport+1))\r\n dosession.expect(\".*ems@aoe-docker.*\", timeout=tout)\r\n\r\n\r\n sftp_session.sendline('lcd /tmp/test_dir')\r\n sftp_session.expect('sftp>', timeout=30)\r\n sftp_session.sendline('get /opt/dorado/owareapps/scripts/temp_log.txt')\r\n sftp_session.expect('sftp>', timeout=30)\r\n\r\n #The only way to findout the discovered device is by reading the log file generated by discover.sh\r\n\r\n with open(\"/tmp/test_dir/temp_log.txt\", \"r\") as f:\r\n lin = f.readlines()\r\n find_ip = []\r\n for line in lin:\r\n m = re.findall(r'\\[DISCOVERY\\sSUCCESS\\]\\s\\S+\\:\\s\\[\\d+-(\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3})',line)\r\n for value in m:\r\n find_ip.append(value)\r\n additional_logs.info(\"Discovered IP : {}\\n\".format(find_ip))\r\n count = 1\r\n for item in ips:\r\n if item in find_ip:\r\n additional_logs.info(\"{} ip is discovered\\n\".format(item))\r\n else:\r\n additional_logs.error(\"{} ip is not discovered\\n\".format(item))\r\n count = 0\r\n f.close()\r\n\r\n elapsed_time = time.time() - start_time\r\n assert count,\"Discovery for multiple device failed: couldn't discover all the device\"\r\n # Check that Discovery for multiple device is successfull or not.\r\n for items in ips:\r\n additional_logs.info(\"Check the validation for the ip %s\\n\"%(items))\r\n validate(items,web)\r\n additional_logs.info(\"Successfully discovered the ip %s\\n\"%(items))\r\n\r\n assert elapsed_time <= cal_time, \"Multiple discovery failed: time to discover exceeded than define\"\r\n additional_logs.info(\"Device discovery successfull for multiple Ips\\n\")\r\n return True\r\n\r\n","sub_path":"kk_ADTRAN/device_discovery.py","file_name":"device_discovery.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"507980319","text":"# -*- coding: utf-8 -*-\nimport time\nfrom openerp.osv import osv\nfrom openerp.report import report_sxw\nfrom common_report_header import common_report_header\n\nclass plc_invoice_print(report_sxw.rml_parse, common_report_header):\n\n def __init__(self, cr, uid, name, context=None):\n if context is None:\n context = {}\n super(plc_invoice_print, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'convert_int2word': self._convert_int2word,\n 'get_paid':self._get_paid,\n 'get_remaining':self._get_remaining,\n 'format_numbervn':self._format_numbervn,\n })\n \n def _get_paid(self,acc_inv_id):\n sql=\"select residual,amount_total from account_invoice where id=%d\"%(acc_inv_id)\n self.cr.execute(sql)\n data=self.cr.dictfetchall()\n result=float(data[0]['amount_total'])-float(data[0]['residual'])\n plc=self._format_numbervn(result)\n return plc\n \n def _get_remaining(self,acc_inv_id):\n sql=\"select residual from account_invoice where id=%d\"%(acc_inv_id)\n self.cr.execute(sql)\n data=self.cr.dictfetchone()['residual']\n result=self._format_numbervn(data)\n return result \n def _format_numbervn(self,abc):\n try:\n xau=str(abc).split('.')\n if xau[1]=='0':\n return '{0:,}'.format(int(xau[0]))\n else:\n return '{0:,}'.format((abc/10)*10)\n except:\n return 0;\n \n def _convert_int2word(self,acc_inv_id):\n sql=\"select residual from account_invoice where id=%d\"%(acc_inv_id)\n self.cr.execute(sql)\n num=self.cr.dictfetchone()['residual']\n \n units = [\" \", \"một \", \"hai \", \"ba \", \"bốn \", \"năm \", \"sáu \", \"bảy \", \"tám \", \"chín \"]\n teens = [\" \", \"mười một \", \"mười hai \", \"mười ba \", \"mười bốn \", \n \"mười năm \", \"mười sáu \", \"mười bảy \", \"mười tám \", \"mười chín \"]\n tens = [\"\", \"mười\", \"hai mươi\", \"ba mươi\", \"bốn mươi \",\n \"năm mươi \", \"sáu mươi \", \"bảy mươi \", \"tám mươi \", \"chín mươi \"]\n thousands = [\" \",\" nghìn \", \"triệu \", \"tỷ \", \"trillion\", \n \"quadrillion\", \"quintillion\", \"sextillion\", \"septillion\", \"octillion\", \n \"nonillion\", \"decillion\", \"undecillion\", \"duodecillion\", \"tredecillion\", \n \"quattuordecillion\", \"sexdecillion\", \"septendecillion\", \"octodecillion\", \n \"novemdecillion\", \"vigintillion \"]\n words = \"\"\n if num == 0:\n words=words+(\"không \")\n else:\n numStr = \"%d\" % num\n numStrLen = len(numStr)\n groups = (numStrLen + 2) / 3\n numStr = numStr.zfill(groups * 3)\n for i in range(0, groups*3, 3):\n h = int(numStr[i])\n t = int(numStr[i+1])\n u = int(numStr[i+2])\n g = groups - (i / 3 + 1)\n \n if h >= 1:\n words=words+(units[h])\n words=words+(\"trăm \")\n \n if t > 1:\n words=words+(tens[t])\n if u >= 1:\n words=words+(units[u])\n elif t == 1:\n if u >= 1:\n words=words+(teens[u])\n else:\n words=words+(tens[t])\n else:\n if u >= 1:\n words=words+(units[u])\n \n if g >= 1 and (h + t + u) > 0:\n words=words+(thousands[g])\n words=words+\" đồng\"\n return words.capitalize()\n\nclass report_agedpartnerbalance(osv.AbstractModel):\n _name = 'report.account.report_invoice'\n _inherit = 'report.abstract_report'\n _template = 'account.report_invoice'\n _wrapped_report_class = plc_invoice_print","sub_path":"openerp/addons/account/report/plc_report_invoice.py","file_name":"plc_report_invoice.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"103459754","text":"# -*- coding: utf-8 -*-\n\n\"\"\" \n MongoDB 연결 core code\n\n\"\"\"\n\nimport pymongo\nimport sys\nimport os\n\n\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))\nfrom log import log # 로그\n\n\n\nclass MongoDB_DB():\n\n\n\n\n def __init__(self, collection_name):\n\n # 로그 생성\n self.logger = log.make_logger(\"MongoDB_DB\")\n self.logger.info(\"CLASS | {} > run\".format(self.__class__.__name__))\n\n # 접속 경로\n self.client = \"mongodb://localhost:27017/\"\n self.database = \"opgg\"\n self.collection = collection_name\n\n\n def connect(self):\n \"\"\"MongoDB 연결\n\n Returns:\n [class]: <class 'pymongo.cursor.Cursor'>\n \"\"\"\n\n self.logger.info(\"FUC | {} > run\".format(sys._getframe().f_code.co_name))\n\n try:\n self.client = pymongo.MongoClient(self.client)\n self.db = self.client[self.database] # Database\n self.cursor = self.db[self.collection] # Collection\n self.logger.info(\"FUC | {} > success\".format(sys._getframe().f_code.co_name))\n \n\n return self.cursor\n\n except:\n self.logger.error(\"FUC | {} > error\".format(sys._getframe().f_code.co_name))\n\n\n\n","sub_path":"python/DB/Mongo/mongo_config.py","file_name":"mongo_config.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"188633477","text":"from django.urls import path\nfrom rest_framework import routers\nfrom . import views\n\napp_name = \"content\"\n\nrouter = routers.SimpleRouter()\n\nurlpatterns = [\n path(\"about/\", views.about_view),\n path(\"home/\", views.home_view)\n]\n\nurlpatterns += router.urls\n","sub_path":"DomePortfolio/apps/content/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"638662617","text":"\"\"\"Game main module.\n\nContains the entry point used by the run_game.py script.\n\nFeel free to put all your game code here, or in other modules in this \"game\"\npackage.\n\"\"\"\n\nimport pygame\nimport pygame.freetype\nimport os\n\nimport pygame_gui\n\nfrom game import loader\nfrom game import events\nfrom game import popups\nfrom game import Sound\nfrom game.resources import Resources\n\n\nwidth, height = [1280, 720]\nbgcolor = (230, 30, 70)\n\n\ndef main():\n print(\"Hello from your game's main()\")\n print(loader.load(\"sample.txt\").read())\n\n pygame.init()\n pygame.freetype.init()\n\n pygame.display.set_caption(\"Amazing Game 10/10\") # changes name of pygame window\n\n screen = pygame.display.set_mode((width, height))\n clock = pygame.time.Clock()\n\n manager = pygame_gui.UIManager((width, height))\n\n newspaper = popups.Newspaper(\n \"Ant Colony Overruns Granary! City Officials Scramble.\"\n )\n\n food = 50\n population = 50\n territory = 50\n\n # creates the Resources object, which can be accessed from anywhere as Resources.instance\n Resources(manager, food, population, territory)\n\n # example decisions\n theft_decision = events.Decision(\"theft\")\n theft_decision.text = (\n \"An ant was found stealing from the colony's food supply! How do you respond?\"\n )\n theft_decision.options = [\"Banish the ant\", \"Do nothing\"]\n theft_decision.outcomes = [\n \"Angry at your decision, several of the banished ant's friends leave with them\",\n \"Seeing there are no conequences, more ants begin to steal food\",\n ]\n theft_decision.impacts = [[0, -5, 0], [-10, 0, 0]]\n theft_decision.ready() # builds the UI stuff\n\n war_decision = events.Decision(\"war\")\n war_decision.text = \"The beetles have been encroaching on your territory recently. Should we go to war to teach them a lesson?\"\n war_decision.options = [\"Yes, war!\", \"No, peace\"]\n war_decision.outcomes = [\n \"Your soldiers attack the beetles, sucessfully pushing them back and gaining territory. You do face some losses though\",\n \"The beetles continue to take your land\",\n ]\n war_decision.impacts = [[0, -10, 5], [0, 0, -20]]\n war_decision.ready() # builds the UI stuff\n\n\n #example events\n spoiled_food_event = events.Event(\"spoiled food\")\n spoiled_food_event.text = (\n \"Some food in storage has spoiled!\"\n )\n spoiled_food_event.impacts = [-5, 0, 0]\n spoiled_food_event.ready()\n\n new_land_event = events.Event(\"new land\")\n new_land_event.text = (\n \"Your scouts have found some new uninhabitaded land!\"\n )\n new_land_event.impacts = [0, 0, 5]\n new_land_event.ready()\n\n event_queue = [war_decision, spoiled_food_event, newspaper, theft_decision, new_land_event]\n\n current_decision = event_queue.pop(0)\n # displayDecision(manager, decision_textbox, decision_buttons, current_decision)\n\n image = pygame.image.load (loader.filepath(\"Queen's room.png\"))\n image = pygame.transform.scale(image,(1280,720))\n\n sounds = Sound.Sound(manager, width, height)\n sounds.displayVolumeButton()\n sounds.playMusic()\n \n while True:\n \n time_delta = clock.tick(60) / 1000\n\n if sounds.volumeButton.check_pressed() and sounds.slidesDisplayed == False:\n sounds.displayVolumeSlides()\n if sounds.volumeButton.check_pressed() and sounds.slidesDisplayed:\n sounds.killVolumeSlides()\n sounds.displayVolumeButton()\n if sounds.slidesDisplayed:\n sounds.updateVolume()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n raise SystemExit\n\n if event.type == pygame.USEREVENT:\n pass\n\n manager.process_events(event)\n current_decision.process_events(event, sounds)\n\n manager.update(time_delta)\n\n screen.fill(bgcolor)\n screen.blit(image,(0,0))\n\n if current_decision.display(time_delta):\n if len(event_queue) > 0:\n current_decision = event_queue.pop(0)\n else:\n current_decision = events.NoDecision()\n print(\"no more decisions\")\n\n manager.draw_ui(screen)\n\n pygame.display.flip()\n","sub_path":"game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"539327090","text":"import pygame\nimport numpy as np\nfrom game_lib.SharedClasses import ConfirmButton, BackButton, CircuitButton, TutorialBlock, Knob\nfrom game_lib.parameters import BACKGROUND_COLOR, FPS, IMAGE_PATH\n\nclass Caption:\n width = 800\n height = 200\n\n def __init__(self, pos):\n self.rect = pygame.Rect((0, 0), (self.width, self.height))\n font = pygame.font.SysFont('timesnewroman', 50)\n self.text = font.render('Is Alice...?', True, pygame.Color(\"black\"))\n self.text_rect = self.text.get_rect()\n self.text_rect.center = pos\n\n def draw(self, surface):\n surface.blit(self.text, self.text_rect)\n\nclass Theta():\n\n width = 50\n height = 50\n def __init__(self, pos):\n self.image = pygame.transform.scale(pygame.image.load(f'{IMAGE_PATH}/theta.png'),\n (self.width, self.height))\n\n self.rect = self.image.get_rect()\n self.rect.center = pos\n\n def draw(self, surface):\n surface.blit(self.image, self.rect.topleft)\n\nclass Phi():\n width = 40\n height = 40\n def __init__(self, pos):\n self.image = pygame.transform.scale(pygame.image.load(f'{IMAGE_PATH}/phi.png'),\n (self.width, self.height))\n\n self.rect = self.image.get_rect()\n self.rect.center = pos\n\n def draw(self, surface):\n surface.blit(self.image, self.rect.topleft)\n\nclass Officer():\n width = 400\n height = 450\n\n def __init__(self, pos):\n self.image = pygame.transform.scale(pygame.image.load(f'{IMAGE_PATH}/AliceOfficer.png'),\n (self.width, self.height))\n\n self.rect = self.image.get_rect()\n self.rect.center = pos\n\n def draw(self, surface):\n surface.blit(self.image, self.rect.topleft)\n\n\nclass GBButton():\n # Set the default size of the buttons while assuming the Angle of each is settled as pi/2\n width = 75\n height = 40\n\n # text is for decide weither print Good or Bad\n def __init__(self, pos, angle, color, text=''):\n\n self.angle = angle\n # setting the ratio of size\n # based on angle\n # self.goodRatio = 2*(self.angle/180)\n # self.badRatio = 2*(1-self.angle/180)\n\n self.update_ratio(self.angle)\n self.color = color\n self.text = text\n self.pos = pos\n\n #if self.text == 'GOOD!':\n # self.font = pygame.font.SysFont('timesnewroman', int(30))\n # self.text0 = self.font.render(self.text, True, pygame.Color(\"black\"))\n #elif self.text == 'BAD!':\n # self.font = pygame.font.SysFont('timesnewroman', int(30))\n # self.text0 = self.font.render(self.text, True, pygame.Color(\"black\"))\n # else:\n # raise ValueError('text must be \"GOOD!\" or \"BAD!\"')\n\n def update_ratio(self,angle):\n self.angle = angle\n self.ratio = [ 2*(self.angle/np.pi), (2*(1 - self.angle/np.pi))]\n\n def draw(self, surface):\n if self.text == 'GOOD!':\n self.rect = pygame.Rect((0, 0), (self.ratio[0] * self.width, self.ratio[0] * self.height))\n self.font = pygame.font.SysFont('timesnewroman', int(25*self.ratio[0]))\n self.text0 = self.font.render(self.text, True, pygame.Color(\"black\"))\n\n elif self.text == 'BAD!':\n self.rect = pygame.Rect((0, 0), (self.ratio[1] * self.width, self.ratio[1] * self.height))\n self.font = pygame.font.SysFont('timesnewroman', int(30*self.ratio[1]))\n self.text0 = self.font.render(self.text, True, pygame.Color(\"black\"))\n else:\n raise ValueError('text must be \"GOOD!\" or \"BAD!\"')\n\n self.rect.bottomleft = (self.pos[0] - self.width / 2, self.pos[1] + self.height / 2)\n surface.fill(pygame.Color(self.color), self.rect)\n surface.blit(self.text0, (\n self.rect.bottomleft[0], self.rect.bottomleft[1] - self.text0.get_height()))\n\n\nclass BobStage:\n\n def __init__(self, data):\n\n self.data = data\n\n self.screen = pygame.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pygame.time.Clock()\n self.fps = FPS\n\n self.keys = pygame.key.get_pressed()\n\n self.dragged_k = None\n self.checked_cb = None\n\n cx, cy = self.screen_rect.center\n\n self.Officer = Officer((5*cx / 3, cy - 100))\n self.Phi = Phi((100, cy + 60))\n self.Theta = Theta((100, cy - 60))\n self.caption = Caption((cx, 0.5*cy))\n\n self.Knobs = [Knob((50, cy - 60), np.pi/2), # theta\n Knob((50, cy + 60), 0)] # phi\n\n self.GBButtons = [GBButton((cx + 75, cy + 150), self.Knobs[0].angle, 'red', 'GOOD!'),\n GBButton((cx - 100, cy + 150), self.Knobs[0].angle, 'blue', 'BAD!')]\n\n self.ConfirmButton = ConfirmButton()\n self.BackButton = BackButton()\n self.CircuitButton = CircuitButton()\n\n self.next_stage = False\n self.quit = False\n self.back = False\n self.show_circuit = False\n\n def event_loop(self):\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT or self.keys[pygame.K_ESCAPE]:\n self.quit = True\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n for k in self.Knobs:\n k.check_click(event.pos)\n if k.click == True:\n self.dragged_k = k\n break\n\n self.ConfirmButton.check_click(event.pos)\n self.BackButton.check_click(event.pos)\n self.CircuitButton.check_click(event.pos)\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n for k in self.Knobs:\n if k.click:\n k.click = False\n self.dragged_k = None\n break\n\n elif event.type in (pygame.KEYUP, pygame.KEYDOWN):\n self.keys = pygame.key.get_pressed()\n\n def render(self):\n self.screen.fill(pygame.Color(BACKGROUND_COLOR))\n\n for k in self.Knobs:\n k.draw(self.screen)\n\n for i in self.GBButtons:\n i.draw(self.screen)\n\n self.Officer.draw(self.screen)\n self.Theta.draw(self.screen)\n self.Phi.draw(self.screen)\n self.ConfirmButton.draw(self.screen)\n self.BackButton.draw(self.screen)\n self.CircuitButton.draw(self.screen)\n self.caption.draw(self.screen)\n\n pygame.display.update()\n\n def main_loop(self):\n\n while not (self.quit or self.next_stage or self.back or self.show_circuit):\n self.event_loop()\n\n for k in range(2):\n## print(self.Knobs[k].angle)\n self.Knobs[k].update_drag()\n\n for i in range(2):\n self.GBButtons[i].update_ratio(self.Knobs[0].angle)\n\n\n\n self.phi = (self.Knobs[1].angle) * np.pi / 180\n self.theta = (self.Knobs[0].angle) * np.pi / 180\n\n self.data['UA'] = np.array([[np.exp(self.phi * 1j) * np.cos(self.theta / 2), -(np.sin(self.theta / 2))],\n [np.sin(self.theta / 2), np.exp(-self.phi * 1j) * np.cos(self.theta / 2)]])\n\n if self.ConfirmButton.click:\n self.ConfirmButton.update_click()\n self.next_stage = True\n elif self.BackButton.click:\n self.BackButton.update_click()\n self.back = True\n elif self.CircuitButton.click:\n self.CircuitButton.update_click()\n self.show_circuit = True\n\n self.render()\n self.clock.tick(self.fps)\n\n \n\n \n \n","sub_path":"game_lib/prisoner_dilemma/BobStage.py","file_name":"BobStage.py","file_ext":"py","file_size_in_byte":7910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"118726623","text":"import os\nimport shutil\nfrom datetime import time\nfrom typing import Any, Optional, List\n\nfrom slugify import slugify\nfrom sqlalchemy import case, cast, Integer\nfrom sqlalchemy.orm import Session, aliased, joinedload\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.sql.elements import Cast\n\nfrom crud.base import CRUDBase\nfrom elococo import main\nfrom datetime import datetime\nfrom models.catalogue import Product, ProductToProduct\n\nelements = aliased(ProductToProduct)\nboxes = aliased(ProductToProduct)\nelement = aliased(Product)\n\n\ndef reduction(box):\n return case(\n [\n (box.reduction_end > now().today(), box.reduction),\n ],\n else_=0\n )\n\n\ndef price_exact(box, reduction_=None):\n int_price = box.price\n\n if reduction_ is not None:\n return Cast(\n case(\n [\n (reduction_ > 0, reduction_ * int_price / 100),\n ],\n else_=int_price\n ), Integer\n )\n\n return int_price\n\n\ndef price_exact_ht(box, price_alias):\n return Cast(case(\n [\n (box.is_ttc_price, price_alias * 100 / main.settings.TVA),\n ],\n else_=price_alias\n ), Integer)\n\n\ndef price_exact_ttc(box, price_alias):\n return Cast(\n case(\n [\n (box.is_ttc_price, price_alias),\n ], else_=price_alias * main.settings.TVA / 100\n ), Integer\n )\n\n\ndef product_query(session: Session, box):\n return session.query(\n box\n ).options(\n joinedload(box.categories),\n joinedload(box.images),\n ).join(\n box.elements.of_type(elements), isouter=True\n ).join(\n elements.element.of_type(element), isouter=True\n ).options(\n joinedload(box.boxes.of_type(boxes)).joinedload(boxes.box),\n joinedload(box.elements.of_type(elements)).joinedload(elements.element)\n )\n\n\ndef set_elements(session: Session, product_db, elements: List[schemas.catalogue.Elements]):\n for element_db in product_db.elements:\n session.delete(element_db)\n session.commit()\n\n if elements is not None and len(elements) > 0:\n associations = []\n\n for element in elements:\n if session.query(Product).filter(Product.id == element.product_id).first() is None:\n continue\n\n associations.append(\n ProductToProduct(\n product_box_id=product_db.id,\n product_element_id=element.product_id,\n quantity=element.quantity\n )\n )\n\n session.add_all(associations)\n session.commit()\n return True\n\n return False\n\n\ndef set_categories(session, product_db, categories_id):\n if categories_id is not None and len(categories_id) > 0:\n categories_db = session.query(CategoryCreate).filter(\n CategoryCreate.id.in_(categories_id)\n ).all()\n else:\n categories_db = []\n\n product_db.categories = categories_db\n\n session.commit()\n\n\nclass CRUDProduct(CRUDBase[Product, ProductCreate, ProductUpdate]):\n def set_images(self, session, product_db, images):\n for image in product_db.images:\n path = main.settings.BASE_DIR / \"medias\" / image.path\n if os.path.exists(path):\n os.remove(path)\n session.delete(image)\n session.commit()\n\n models_product_images = []\n for image in images:\n filename = f\"{time.time_ns()}.{image.filename.split('.')[-1]}\"\n try:\n with open(main.settings.BASE_DIR / \"medias\" / filename, \"wb\") as buffer:\n shutil.copyfileobj(image.file, buffer)\n finally:\n image.file.close()\n\n models_product_images.append(\n ProductImage(path=filename, product_id=product_db.id)\n )\n\n product_db.images = models_product_images\n session.commit()\n\n def get(self, db: Session, id_: Any) -> Optional[Product]:\n box = aliased(Product)\n\n effective_stock_alias = case(\n [\n (\n func.count(elements.product_element_id) > 0, func.min(\n cast(\n element.stock / elements.quantity, Integer\n )\n )\n ),\n ], else_=box.stock\n ).label(\"effective_stock\")\n reduction_alias = reduction(box).label(\"effective_reduction\")\n price_exact_label = price_exact(box, reduction_alias).label(\"price_exact\")\n price_ht_alias = price_exact_ht(box, price_exact_label).label(\"price_ht\")\n price_ttc_alias = price_exact_ttc(box, price_exact_label).label(\"price_ttc\")\n\n q = product_query(\n db, box\n ).add_columns(\n effective_stock_alias, reduction_alias, price_ht_alias, price_ttc_alias\n ).filter(\n box.id == id_\n )\n\n q = q.first()\n\n product = q[0]\n product.effective_stock = q[1]\n product.effective_reduction = q[2]\n product.price_ht = q[3]\n product.price_ttc = q[4]\n\n return product\n\n def create(\n self, session: Session,\n obj_in: ProductCreate,\n elements: Optional[List[schemas.catalogue.Elements]] = None,\n categories_id: Optional[List[int]] = None\n ) -> Product:\n obj_in.date = obj_in.dateUpdate = now().date()\n product_db = Product(slug=slugify(obj_in.name), **obj_in.dict())\n session.add(product_db)\n session.commit()\n session.refresh(product_db)\n product_db = self.get(session, product_db.id)\n\n if elements is not None:\n set_categories(session, product_db, categories_id)\n\n if categories_id is not None:\n set_elements(session, product_db, elements)\n\n return product_db\n\n def update(\n self, db: Session,\n db_obj: Product,\n obj_in: ProductUpdate,\n elements: Optional[List[schemas.catalogue.Elements]] = None,\n categories_id: Optional[List[int]] = None\n ) -> Product:\n obj_in.dateUpdate = now().date()\n update_data = obj_in.dict(exclude_unset=True)\n\n if obj_in.name is not None:\n product.slug = slugify(obj_in.name)\n\n product_db = super().update(db, db_obj=db_obj, obj_in=update_data)\n\n if elements is not None:\n set_elements(db, product_db, elements)\n\n if categories_id is not None:\n set_categories(db, product_db, categories_id)\n\n return product_db\n\n\nproduct = CRUDProduct(Product)\n","sub_path":"crud/crud_product.py","file_name":"crud_product.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"596585531","text":"import tensorflow as tf\nimport numpy as np\nimport tensor2tensor.models.transformer_with_context as transformer_with_context\nimport tensor2tensor.models.transformer as transformer\n\nhparams = transformer.transformer_base()\nhparams.hidden_size = 3\nhparams.num_heads = 1\nhparams.use_target_space_embedding = False\nmodel = transformer_with_context.TransformerWithContext(hparams)\n\ninputs_context_np = [[[[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]]], [[[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]]]]\ninputs_context = tf.convert_to_tensor(inputs_context_np, np.float32)\ninputs_np = [[[[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]]], [[[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]]]]\ninputs = tf.convert_to_tensor(inputs_np, np.float32)\ntarget_space_id = 0\ntargets_np = [[[[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]]], [[[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]], [[0.3, 0.2, 0.1]]]]\ntargets = tf.convert_to_tensor(targets_np)\n\nfeatures = dict()\nfeatures[\"inputs_context\"] = inputs_context\nfeatures[\"inputs\"] = inputs\nfeatures[\"target_space_id\"] = target_space_id\nfeatures[\"targets\"] = targets\n\noutput = model.body(features)\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\nx =output.eval(session=sess)\nprint(x)\n","sub_path":"tensor2tensor/my_tests/test_transfomer_with_context.py","file_name":"test_transfomer_with_context.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"423441160","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn import datasets\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt\r\nimport scipy\r\nimport seaborn as sns\r\nimport os\r\nos.chdir('S:/Nutrition/Sacks Lab Data/2018.06 - OMNIHeart HDL Subspecies/DATA/BG')\r\n\r\n#Our file\r\ndf = pd.read_csv('07_8_merged Lipid and HDLSubSpecies.csv')\r\n#List of columns of interest\r\nID = df.iloc[:,1]\r\nprots = df.iloc[:, 4:26]\r\nsex = df.iloc[:, 45]\r\nage = df.iloc[:, 47]\r\nbmi = df.iloc[:, 48]\r\nlipids = df.iloc[:,60:64]\r\ndiet = df.iloc[:,77]\r\nrace = df.iloc[:,49]\r\n\r\nnewdf = pd.concat([ID, diet, prots, sex, age, bmi, lipids], axis = 1)\r\nzofall = scipy.stats.zscore(newdf.iloc[:,2:])\r\n\r\n\r\nprots_z = scipy.stats.zscore(prots)\r\nplt.plot(prots_z, linewidth = 0.5)\r\nplt.plot(np.linspace(0,450,100), [0]*100, c = 'red')\r\nplt.title('Z Scores of All Proteins')\r\n\r\n##################\r\n#corrilation matrix\r\nfig, ax = plt.subplots()\r\nprotdf = pd.DataFrame(prots_z)\r\ncorr = protdf.corr()\r\nmask = np.zeros_like(corr, dtype=np.bool)\r\nmask[np.triu_indices_from(mask)] = True\r\ncmap = sns.diverging_palette(200, 10, as_cmap=True)\r\nsns.heatmap(corr, mask=mask, cmap = cmap, linewidth = 1, xticklabels = list(prots), yticklabels = list(prots))\r\nplt.title('Protein Corrilation After Zscore Normalization')\r\nplt.show()\r\n\r\nsns.clustermap(corr,xticklabels = list(prots), yticklabels = list(prots))\r\nplt.title('Clustered Heatmap of Proteins Zscores', loc = 'left')\r\nplt.show()\r\n","sub_path":"Corr_and_Heat.py","file_name":"Corr_and_Heat.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"463574519","text":"import numpy as np\nimport math\nfrom pandas import read_csv\nfrom datetime import datetime\nfrom math import sqrt\nfrom numpy import concatenate\nfrom matplotlib import pyplot\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\n\n\ndatos=read_csv('files/datos_aceituna_gilena.csv', usecols=[0,2,5], engine='python')\ndatoss = datos.values\n\n#Normalizar los datos de kilogramos para usarlo en la media ponderada\nkilos = read_csv('files/datos_aceituna_gilena.csv', usecols=[2], engine='python')\nkilos = kilos.values\nminmax = MinMaxScaler(feature_range=(0,1))\nkilos_normalizados = minmax.fit_transform(kilos)\ndatoss[:,1] = kilos_normalizados[:,0]\n\nanyos= ['2015', '2016', '2017', '2018', '2019']\nmeses = ['01','10','11','12']\ndias = ['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30']\nsuma2015 = []\nsuma2016 = []\nsuma2017 = []\nsuma2018 = []\nsumaall = []\n\nfor anyo in anyos:\n for mes in meses:\n for dia in dias:\n suma_dia = 0\n numero_albaran = 0\n #print(str(i)+'/'+mes)\n for d in datoss:\n if (dia+'/'+mes+'/'+anyo) in d[0]:\n #if d[0].__contains__(str(i)+'/'+mes):\n print('Fecha: ' + d[0] + ' Acidez: ' + str(d[2]))\n suma_dia = suma_dia + d[1]*d[2]\n numero_albaran = numero_albaran +1\n sumaall.append(suma_dia)\n if anyo == '2015':\n if numero_albaran==0:\n suma2015.append(suma_dia)\n else:\n suma2015.append(suma_dia/numero_albaran)\n if anyo == '2016':\n if mes in ['10','11','12']:\n if numero_albaran==0:\n suma2016.append(suma_dia)\n else:\n suma2016.append(suma_dia/numero_albaran)\n if mes in ['01']:\n if numero_albaran==0:\n suma2015.append(suma_dia)\n else:\n suma2015.append(suma_dia/numero_albaran)\n if anyo == '2017':\n if mes in ['10','11','12']:\n if numero_albaran==0:\n suma2017.append(suma_dia)\n else:\n suma2017.append(suma_dia/numero_albaran)\n if mes in ['01']:\n if numero_albaran==0:\n suma2016.append(suma_dia)\n else:\n suma2016.append(suma_dia/numero_albaran)\n if anyo == '2018':\n if mes in ['10','11', '12']:\n if numero_albaran==0:\n suma2018.append(suma_dia)\n else:\n suma2018.append(suma_dia/numero_albaran)\n if mes in ['01']:\n if numero_albaran==0:\n suma2017.append(suma_dia)\n else:\n suma2017.append(suma_dia/numero_albaran)\n if anyo == '2019':\n if mes == '01':\n if numero_albaran==0:\n suma2018.append(suma_dia)\n else:\n suma2018.append(suma_dia/numero_albaran)\n\n\n\nprint('Suma All: ')\nprint(sumaall)\nprint('len')\nprint(len(sumaall))\nprint('\\n')\n\nprint('Suma 2015: ')\nprint(suma2015)\nprint('len')\nprint(len(suma2015))\nprint('\\n')\n\nprint('Suma 2016: ')\nprint(suma2016)\nprint('len')\nprint(len(suma2016))\nprint('\\n')\n\nprint('Suma 2017: ')\nprint(suma2017)\nprint('len')\nprint(len(suma2017))\nprint('\\n')\n\nprint('Suma 2018: ')\nprint(suma2018)\nprint('len')\nprint(len(suma2018))\nprint('\\n')\n\n\nsuma_anyos = [suma2015, suma2016, suma2017, suma2018]\naux = 1\npyplot.figure()\nfor result in suma_anyos:\n pyplot.subplot(4, 1, aux)\n pyplot.plot(result)\n aux = aux+1\n\npyplot.show()\n\n\nsuma2015df = DataFrame(suma2015)\nsuma2015df = suma2015df.loc[~(suma2015df==0).all(axis=1)]\n#suma2015df.to_csv('files/media_acidez_dias_2015.csv')\nsuma2015df = suma2015df.values\nsuma2016df = DataFrame(suma2016)\nsuma2016df = suma2016df.loc[~(suma2016df==0).all(axis=1)]\n#suma2016df.to_csv('files/media_acidez_dias_2016.csv')\nsuma2016df = suma2016df.values\nsuma2017df = DataFrame(suma2017)\nsuma2017df = suma2017df.loc[~(suma2017df==0).all(axis=1)]\n#suma2017df.to_csv('files/media_acidez_dias_2017.csv')\nsuma2017df = suma2017df.values\nsuma2018df = DataFrame(suma2018)\nsuma2018df = suma2018df.loc[~(suma2018df==0).all(axis=1)]\n#suma2018df.to_csv('files/media_acidez_dias_2018.csv')\nsuma2018df = suma2018df.values\n\n\n\nprint('Suma 2015 Drop zeros: ')\nprint(suma2015df)\nprint('len')\nprint(len(suma2015df))\nprint('\\n')\n\nprint('Suma 2016 Drop zeros: ')\nprint(suma2016df)\nprint('len')\nprint(len(suma2016df))\nprint('\\n')\n\nprint('Suma 2017 Drop zeros: ')\nprint(suma2017df)\nprint('len')\nprint(len(suma2017df))\nprint('\\n')\n\nprint('Suma 2018 Drop zeros: ')\nprint(suma2018df)\nprint('len')\nprint(len(suma2018df))\nprint('\\n')\n\n\n\nsuma_anyos_df = [suma2015df, suma2016df, suma2017df, suma2018df]\naux = 1\npyplot.figure()\nfor result in suma_anyos_df:\n pyplot.subplot(4, 1, aux)\n pyplot.plot(result)\n aux = aux+1\n\npyplot.show()\n\ndatos_acidez = np.concatenate((suma2015df, suma2016df, suma2017df, suma2018df))\n\nprint('Datos acidez:')\nprint(datos_acidez)\n\n\ncsv_media_ponderada_acidez = DataFrame(datos_acidez)\ncsv_media_ponderada_acidez.to_csv('files/csv_media_ponderada_acidez.csv')\ndatos = datos_acidez\n#datos = datos.astype('float32')\n\npyplot.plot(datos[:])\npyplot.show()\n\nnp.random.seed(7)\n\n\n\nminmax = MinMaxScaler(feature_range=(0,1))\n#max(conjunto) = 622, min(conjunto)=104. 112-104/622-104 = 0.015\nconjunto = minmax.fit_transform(datos)\nprint('El conjunto normalizado: \\n' + str(conjunto))\n\n#Como hemos de tener un conjunto de entrenamiento y uno de test, vamos a separar el que tenemos en dos\n#un 67% de los datos los usaremos para entrenar y el resto para el conjunto de prueba\ntamaño_entrenamiento = int(len(conjunto) * 0.67)\ntamaño_test = len(conjunto) - tamaño_entrenamiento\nentrenamiento = conjunto[0:tamaño_entrenamiento]\ntest = conjunto[tamaño_entrenamiento:len(conjunto)]\n\nprint('Tamaño cojunto entrenamiento: ' +\n str(len(entrenamiento)), 'Tamaño conjunto pruebas: ' +\n str(len(test)), 'Tamaño total conjunto: ' +str(len(conjunto)))\nprint('Conjunto entrenamiento: \\n' + str(entrenamiento))\nprint('Conjunto pruebas: \\n' + str(test))\n\n\n#Creamos los conjuntos de dato con el formato adecuado para LSTM\n\ndef datosX (conjunto):\n aux = []\n for i in range (len(conjunto)-2):\n a = [conjunto[i]]\n aux.append(a)\n return np.array(aux)\n\ndef datosY (conjunto):\n aux = []\n for i in range (len(conjunto)-2):\n aux.append(conjunto[i+1,0])\n return np.array(aux)\n\n\nentrenamientoX = datosX(entrenamiento)\nentrenamientoY = datosY(entrenamiento)\n\npruebasX = datosX(test)\npruebasY = datosY(test)\n\n\n\nprint('Conjunto de entrenamiento t mio bien: \\n' + str(entrenamientoX))\nprint('Con shape')\nprint(entrenamientoX.shape)\nprint('Conjunto de entrenamiento t+1 mio: \\n' + str(entrenamientoY))\nprint('Conjunto prueba t: \\n' + str(pruebasX))\nprint('Conjunto prueba t+1: \\n' + str(pruebasY))\n\n\n\n# creamos y entrenamos la red LSTM\npaso_atras = 1 #de cúantas unidades de tiempo son los pasos\nmodel = Sequential()\nmodel.add(LSTM(4, input_shape=(1, paso_atras)))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\n\nmodel.fit(entrenamientoX, entrenamientoY, epochs=100, batch_size=1, verbose=2)\n\n\n# Hacemos la predicción\ntrainPredict = model.predict(entrenamientoX)\nprint('La prediccion de la red neuronal para los datos de entrenamiento: \\n' + str(trainPredict))\ntestPredict = model.predict(pruebasX)\nprint('La prediccion de la red neuronal para los datos de test: \\n' + str(testPredict))\n# Invertir el normalizado para obtener los datos de pasajeros en escala unidades de 1000\ntrainPredict = minmax.inverse_transform(trainPredict)\nprint('La prediccion de la red neuronal para los datos de entrenamiento INVERTIDOS: \\n' + str(trainPredict))\ntrainY = minmax.inverse_transform([entrenamientoY])\ntestPredict = minmax.inverse_transform(testPredict)\nprint('La prediccion de la red neuronal para los datos de test INVERTLDOS: \\n' + str(testPredict))\ntestY = minmax.inverse_transform([pruebasY])\n# Calculamos el error cuadrático medio\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n# shift train predictions for plotting\ntrainPredictPlot = np.empty_like(conjunto)\ntrainPredictPlot[:, :] = np.nan\ntrainPredictPlot[paso_atras:len(trainPredict)+paso_atras, :] = trainPredict\n# shift test predictions for plotting\ntestPredictPlot = np.empty_like(conjunto)\ntestPredictPlot[:, :] = np.nan\ntestPredictPlot[len(trainPredict)+(paso_atras*2)+1:len(conjunto)-1, :] = testPredict\n# plot baseline and predictions\npyplot.plot(minmax.inverse_transform(conjunto))\npyplot.plot(trainPredictPlot)\npyplot.plot(testPredictPlot)\npyplot.show()","sub_path":"old/ACIDEZ_MEDIA_GILENA.py","file_name":"ACIDEZ_MEDIA_GILENA.py","file_ext":"py","file_size_in_byte":9426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"338265630","text":"# Test a sample\n# give a few points\n# based on max x y and z\n# assign respective tags :D\nimport ansystotal.genericoperations.mesh_information_structures as mis\nfrom collections import defaultdict\n\n\n# function taking in the points list as an input\n\ndef find_face_centroids(points_list):\n cube_faces = defaultdict()\n\n max_x = points_list[0].get_x()\n min_x = points_list[0].get_x()\n max_y = points_list[0].get_y()\n min_y = points_list[0].get_y()\n max_z = points_list[0].get_z()\n min_z = points_list[0].get_z()\n\n # max_x_node = '' # Right Face\n # min_x_node = '' # Left Face\n # max_y_node = '' # Upper Face\n # min_y_node = '' # Lower Face\n # max_z_node = '' # Back Face\n # min_z_node = '' # Front Face\n\n for point in points_list:\n if point.get_x() > max_x:\n max_x = point.get_x()\n max_x_node = point.get_label()\n cube_faces['right_face'] = max_x_node\n\n if point.get_x() < min_x:\n min_x = point.get_x()\n min_x_node = point.get_label()\n cube_faces['left_face'] = min_x_node\n\n if point.get_y() > max_y:\n max_y = point.get_y()\n max_y_node = point.get_label()\n cube_faces['upper_face'] = max_y_node\n\n if point.get_y() < min_y:\n min_y = point.get_y()\n min_y_node = point.get_label()\n cube_faces['lower_face'] = min_y_node\n\n if point.get_z() > max_z:\n max_z = point.get_z()\n max_z_node = point.get_label()\n cube_faces['back_face'] = max_z_node\n\n if point.get_z() < min_z:\n min_z = point.get_z()\n min_z_node = point.get_label()\n cube_faces['front_face'] = min_z_node\n\n\n\n # print(\"Right Face {}: {}\".format(max_x, cube_faces['right_face']))\n # print(\"Left Face {}: {}\".format(min_x, cube_faces['left_face']))\n # let the dictionary be cube_faces = {'max_x': 'label'}\n # max = max(x.get_x() for x in points_list)\n # print(max)\n return cube_faces\n\nif __name__ == '__main__':\n p1 = mis.Point(1.4, -4.1, 0.5, '1')\n p2 = mis.Point(6.5, -10, 5, '2')\n p3 = mis.Point(1.5, 0.2, 1.1, '3')\n p4 = mis.Point(-3.2, 1.2, -5.1, '4')\n\n pts_list = [p1, p2, p3, p4]\n\n find_face_centroids(pts_list)\n","sub_path":"test_functions/find_points_cube_sides.py","file_name":"find_points_cube_sides.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"328490208","text":"#!/usr/bin/python\n#\n# Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\n\ndef main(argv):\n pass\n\n\nif __name__ == '__main__':\n main(sys.argv)\nfrom lib import components, utils\n\nclass Command:\n required_keys = [\n {\"key\": \"with\", \"types\": str}\n ]\n\n def __init__(self, config, required_instruction, component_key):\n utils.ConfigValidator.validate_dict(__class__, config)\n self.required_instruction = required_instruction\n self.components = [components.get_component_class(config[\"with\"], component_config) for component_config in config[component_key]]\n\n def get_lines(self):\n pass\n\n def get_tools(self):\n return [component for component in self.components]\n\n def get_tool_names(self):\n return [component.name for component in self.components]\n\n def get_tool_infos(self):\n return [component.get_info() for component in self.components]\n\nclass AptGet(Command):\n required_keys = [\n {\"key\": \"tools\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"run\", \"tools\")\n\n def get_lines(self):\n return (line for line in [\n \"apt-get --yes update && apt-get --yes install {}\".format(' '.join(self.get_tool_names())),\n \"rm -rf /var/lib/apt/lists\"\n ])\n\nclass Curl(Command):\n required_keys = [\n {\"key\": \"tools\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"run\", \"tools\")\n\n def get_lines(self):\n for tool in self.components:\n if tool.to is not None:\n yield \"curl -sLf {} -o {}\".format(Curl.get_download_location(tool), tool.to)\n else:\n yield \"curl -sLf {} -o /bin/{} && chmod 755 /bin/{}\".format(Curl.get_download_location(tool), tool.name, tool.name)\n\n @staticmethod\n def get_download_location(tool):\n from_url = tool.get_from\n if tool.version is not None:\n from_url = tool.get_from.format(version=tool.version.get())\n return from_url\n\nclass Execute(Command):\n required_keys = [\n {\"key\": \"execute\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"run\", \"execute\")\n\n def get_lines(self):\n return (component.command.rstrip() for component in self.components)\n\nclass Copy(Command):\n required_keys = [\n {\"key\": \"tools\", \"types\": list}\n ]\n\n def __init__(self, config,):\n Command.__init__(self, config, \"copy\", \"tools\")\n\n def get_lines(self):\n for component in self.components:\n yield \"{} {}\".format(component.get_from, component.to)\n\nclass Pip(Command):\n required_keys = [\n {\"key\": \"tools\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"run\", \"tools\")\n\n def get_lines(self):\n yield \"pip install {}\".format(' '.join(self.get_tool_names()))\n\nclass Export(Command):\n required_keys = [\n {\"key\": \"export\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"env\", \"export\")\n\n def get_lines(self):\n yield ' '.join(self.get_tool_names())\n\nclass AddAptGetRepo(Command):\n required_keys = [\n {\"key\": \"repos\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"run\", \"repos\")\n\n def get_lines(self):\n command = [\n 'apt-get --yes update && apt-get --yes install lsb-release gnupg apt-transport-https',\n ]\n for component in self.components:\n repo_name = component.name\n repo_url = component.url\n download_command = [\n 'REPO=\"{}$(lsb_release -cs)\"'.format(component.release_prefix),\n 'echo \"deb {} $REPO main\" | tee /etc/apt/sources.list.d/{}.list'.format(repo_url, repo_name),\n 'curl -sL {} | apt-key add -'.format(component.key_url),\n ]\n command.extend(download_command)\n\n command.extend([\n \"apt-get --yes --purge remove lsb-release gnupg apt-transport-https\",\n \"rm -rf /var/lib/apt/lists\"\n ])\n return (line for line in command)\n\nclass Git(Command):\n required_keys = [\n {\"key\": \"tools\", \"types\": list}\n ]\n\n def __init__(self, config):\n Command.__init__(self, config, \"run\", \"tools\")\n\n def get_lines(self):\n command_lines = []\n for component in self.components:\n command_lines.append(\"git -c http.sslVerify=false clone {} {}\".format(component.get_from, component.to))\n if component.version is not None:\n command_lines.append('git -C {} checkout {}'.format(component.to, component.version.get()))\n return (line for line in command_lines)\n\n\nsupported_commands = {\n \"apt-get\": AptGet,\n \"curl\": Curl,\n \"bash\": Execute,\n \"copy\": Copy,\n \"pip\": Pip,\n \"env\": Export,\n \"add-apt-repo\": AddAptGetRepo,\n \"git\": Git\n}\n\ndef create_command(config):\n utils.ConfigValidator.validate_dict(Command, config)\n class_name = supported_commands.get(config[\"with\"])\n if class_name is None:\n print(\"{} is not a supported dockerfile config command\".format(config[\"with\"]))\n exit(1)\n instance = class_name(config)\n return instance\n\ndef generate_welcome_message(commands):\n basic_tools = []\n custom_shell_commands = []\n exported_environments = []\n for command in commands:\n if isinstance(command, (AptGet, Copy, Curl, Pip, Git)):\n basic_tools.extend([info for info in command.get_tool_infos() if info is not None])\n elif isinstance(command, (Execute)):\n custom_shell_commands.extend([info for info in command.get_tool_infos() if info is not None])\n elif isinstance(command, Export):\n exported_environments.extend([info for info in command.get_tool_infos() if info is not None])\n return \"\"\"\n{}\\\\n\\\\\n\\\\n\\\\\n{}\\\\n\\\\\nThe following variables have been exported:\\\\n\\\\\n{}\"\"\".format(' '.join(basic_tools), '\\\\n\\\\\\n'.join(custom_shell_commands), ' '.join(exported_environments))\n\n","sub_path":"generator/lib/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"257365979","text":"import os\n\nfrom flask import Blueprint, render_template, request, url_for, session\nfrom werkzeug.utils import redirect\n\n\nfrom src.model import SupermarketForm\nfrom src.utils import get_data, add_data\n\nsupermarkets = Blueprint('supermarkets', __name__, template_folder='templates',\n static_folder='static', static_url_path='/routes/Supermarket/static')\n\nSUPERMARKET_LIST = \"super.json\"\n\n\n@supermarkets.route('/all_supermarkets', methods=['GET'])\ndef get_all_supermarkets():\n return render_template('all_supermarkets.html', data=get_data(SUPERMARKET_LIST))\n\n\n@supermarkets.route('/all_supermarkets', methods=['POST'])\ndef get_some_supermarkets():\n return redirect(url_for('all_supermarkets?location=<location>',\n location=request.form.get('location'), data=get_data(SUPERMARKET_LIST)))\n\n\n@supermarkets.route('/supermarket/<s_id>', methods=['GET', 'POST'])\ndef get_supermarket(s_id):\n for i in get_data(SUPERMARKET_LIST):\n if i['id'] == s_id:\n session[i['id']] = True\n return render_template('supermarket.html',\n name=i['name'],\n location=i['location'],\n image=i['img_name'],\n id=i['id'])\n else:\n return render_template('404_page.html')\n\n\n@supermarkets.route('/add_supermarket', methods=['GET', 'POST'])\ndef add_supermarket():\n form = SupermarketForm()\n return render_template('add_supermarket.html', form=form)\n\n\n@supermarkets.route('/save', methods=['POST'])\ndef save_supermarket():\n form = SupermarketForm()\n\n s = {'id': form.id,\n 'name': request.form.get('name'),\n 'location': str(request.form.get('location')).title(),\n 'img_name': upload_file()}\n\n if get_data(SUPERMARKET_LIST):\n data = get_data(SUPERMARKET_LIST)\n data.append(s)\n add_data(data, SUPERMARKET_LIST)\n else:\n add_data(s, SUPERMARKET_LIST)\n return render_template('all_supermarkets.html', data=get_data(SUPERMARKET_LIST))\n\n\n@supermarkets.route('/upload', methods=['GET', 'POST'])\ndef upload_file():\n if request.files['image']:\n f = request.files['image']\n f.save(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', f.filename))\n return f.filename\n else:\n return 'no_image.png'\n","sub_path":"flask_lesson2/src/routes/Supermarket/supermarkets.py","file_name":"supermarkets.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"453295416","text":"'''19. Leia o valor do raio de uma esfera,\ncalcule e escreva seu volume.\n(v = (4 * p * r 3 ) / 3) (p = 3,14)\n'''\n\n# Entrada\nPI = 3.14\nRAIO_ESFERA = float(input('Insira o raio de uma esfera: '))\n\n# Processamento\n# Calcula o volume da esfera de acordo com a fórmula:\n# volume = 4 * pi * raio³ ) / 3) (pi = 3,14)\nVOLUME_ESFERA = (4 * PI * RAIO_ESFERA**3)/3\n\n# Saída\nprint(\"O volume da esfera é: {}\".format(VOLUME_ESFERA))\n","sub_path":"Atividade_Fabio01/fabio01_19_volume.py","file_name":"fabio01_19_volume.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"553070312","text":"\"\"\" Captcha.Visual.Tests\n\nVisual CAPTCHA tests\n\nSimpleCaptcha Package\nForked from PyCAPTCHA Copyright (C) 2004 Micah Dowty <micah@navi.cx>\n\"\"\"\nfrom simplecaptcha.visual import text, backgrounds, distortions, ImageCaptcha\nfrom simplecaptcha import words\nimport random\n\n__all__ = [\"PseudoGimpy\", \"AngryGimpy\", \"AntiSpam\"]\n\n\nclass PseudoGimpy(ImageCaptcha):\n \"\"\"A relatively easy CAPTCHA that's somewhat easy on the eyes\"\"\"\n\n def get_layers(self):\n word = words.default_word_list.pick()\n self.add_solution(word)\n return [\n random.choice([\n backgrounds.CroppedImage(),\n backgrounds.TiledImage(),\n ]),\n text.TextLayer(word, border_size=1),\n distortions.SineWarp(),\n ]\n\n\nclass AngryGimpy(ImageCaptcha):\n \"\"\"A harder but less visually pleasing CAPTCHA\"\"\"\n def get_layers(self):\n\n word = words.default_word_list.pick()\n self.add_solution(word)\n return [\n backgrounds.TiledImage(),\n backgrounds.RandomDots(),\n text.TextLayer(word, border_size=1),\n distortions.WigglyBlocks(),\n ]\n\n\nclass AntiSpam(ImageCaptcha):\n \"\"\"A fixed-solution CAPTCHA that can be used to hide email addresses or\n URLs from bots\"\"\"\n\n font_factory = text.FontFactory(20, \"vera/VeraBd.ttf\")\n default_size = (512,50)\n\n def get_layers(self, solution=\"murray@example.com\"):\n self.add_solution(solution)\n\n text_layer = text.TextLayer(solution,\n border_size=2,\n font_factory=self.font_factory)\n\n return [\n backgrounds.CroppedImage(),\n text_layer,\n distortions.SineWarp(amplitude_range=(2, 4)),\n ]\n","sub_path":"simplecaptcha/visual/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"52813690","text":"from time import sleep\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom behave import given, when, then\nfrom selenium.webdriver.common.by import By\n\nVIDEO_1 = (By.CSS_SELECTOR, \".style-scope ytd-rich-grid-renderer ytd-rich-item-renderer\")\nLIKE = (By.CSS_SELECTOR, \"a.yt-simple-endpoint.style-scope.ytd-toggle-button-renderer\")\nLIKE_VIDEO = (By.XPATH, \"//*[text()='Like this video?']\")\nPREMIUM_POPUP = (By.CSS_SELECTOR, \"ytd-button-renderer#action-button\")\nADS = (By.CSS_SELECTOR, \".video-ads.ytp-ad-module\")\nSKIP = (By.CSS_SELECTOR, \".ytp-ad-skip-button.ytp-button\")\n\n\n@given('Open Youtube page')\ndef open_youtube(context):\n context.driver.get(\"https://www.youtube.com/\")\n\n\n@when(\"Click on the first video\")\ndef choose_video(context):\n context.driver.find_element(*VIDEO_1).click()\n sleep(2)\n\n\n@then(\"Click on Like\")\ndef like_video(context):\n premium_popup = context.driver.find_elements(*PREMIUM_POPUP)\n ads = context.driver.find_elements(*ADS)\n if len(ads) > 0:\n sleep(6)\n if 'Skip' in ads[0].text:\n context.driver.find_element(*SKIP).click()\n print(\"add was skipped\")\n elif len(premium_popup) > 0:\n context.wait.until(EC.element_to_be_clickable, context.driver.find_element(*PREMIUM_POPUP)).click()\n print(\"ad was skipped\")\n else:\n context.driver.find_element(*LIKE).click()\n sleep(3)\n like_video_text = context.driver.find_element(*LIKE_VIDEO).text\n assert 'Like' in like_video_text, f'{like_video_text} displayed instead'\n print('Liked this video')\n\n\n\n","sub_path":"features/steps/youtube_premium.py","file_name":"youtube_premium.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"311630212","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Genera un data.json a partir de un catálogo en excel.\n\"\"\"\n\nimport os\nimport sys\nimport shutil\nfrom io import StringIO\n\nimport traceback\nimport pandas as pd\nimport arrow\nimport logging\nfrom openpyxl import load_workbook\nfrom series_tiempo_ar import TimeSeriesDataJson\nimport pydatajson.readers as readers\nimport pydatajson.writers as writers\nfrom pydatajson.ckan_reader import read_ckan_catalog\n\nfrom helpers import get_logger, ensure_dir_exists, get_catalogs_index\nfrom helpers import print_log_separator, get_general_config, is_http_or_https\nfrom helpers import get_catalog_download_config, download_with_config\nfrom paths import SCHEMAS_DIR, REPORTES_DIR, BACKUP_CATALOG_DIR, CATALOGS_DIR,\\\n CATALOGS_DIR_INPUT\n\nfrom paths import EXTRACTION_MAIL_CONFIG\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nNOW = arrow.now().isoformat()\nTODAY = arrow.now().format('YYYY-MM-DD')\n\nlogger = get_logger(os.path.basename(__file__))\n\n# pydatajson.ckan_reader modifica el root logger, agregando outputs no deseados\n# a la pantalla. Evitar que los logs se propaguen al root logger.\nlogger.propagate = False\n\n\ndef read_xlsx_catalog(catalog_xlsx_path, logger=None):\n \"\"\"Lee catálogo en excel.\"\"\"\n\n default_values = {\n # \"catalog_modified\": NOW,\n # \"dataset_issued\": NOW,\n # \"distribution_issued\": NOW,\n # \"dataset_modified\": NOW,\n # \"distribution_modified\": NOW\n }\n\n catalogo = readers.read_xlsx_catalog(catalog_xlsx_path, logger)\n catalogo = TimeSeriesDataJson(catalogo, default_values=default_values)\n\n clean_catalog(catalogo)\n\n return catalogo\n\n\ndef clean_catalog(catalog):\n\n for dataset in catalog[\"dataset\"]:\n for distribution in dataset[\"distribution\"]:\n if \"field\" in distribution:\n for field in distribution[\"field\"]:\n if \"title\" in field:\n field[\"title\"] = field[\"title\"].replace(\" \", \"\")\n if \"id\" in field:\n field[\"id\"] = field[\"id\"].replace(\" \", \"\")\n\n\ndef write_json_catalog(catalog_id, catalog, catalog_json_path):\n \"\"\"Escribe catálogo en JSON y guarda una copia con fecha.\"\"\"\n catalog_backup_json_path = os.path.join(BACKUP_CATALOG_DIR, catalog_id,\n \"data-{}.json\".format(TODAY))\n\n # crea los directorios necesarios\n ensure_dir_exists(os.path.dirname(catalog_json_path))\n ensure_dir_exists(os.path.dirname(catalog_backup_json_path))\n\n writers.write_json_catalog(catalog, catalog_json_path)\n writers.write_json_catalog(catalog, catalog_backup_json_path)\n\n\ndef validate_and_filter(catalog_id, catalog, warnings_log):\n \"\"\"Valida y filtra un catálogo en data.json.\"\"\"\n dj = TimeSeriesDataJson(\n catalog, schema_filename=\"catalog.json\", schema_dir=SCHEMAS_DIR)\n\n # valida todo el catálogo para saber si está ok\n is_valid_catalog = dj.is_valid_catalog()\n logger.info(\n \"Metadata a nivel de catálogo es válida? {}\".format(is_valid_catalog))\n\n # genera directorio de reportes para el catálogo\n reportes_catalog_dir = os.path.join(REPORTES_DIR, catalog_id)\n ensure_dir_exists(reportes_catalog_dir)\n\n # genera reporte de validación completo\n dj.validate_catalog(\n only_errors=True,\n fmt=\"list\",\n export_path=os.path.join(\n reportes_catalog_dir,\n EXTRACTION_MAIL_CONFIG[\"attachments\"][\"errors_report\"]))\n\n # genera reporte de datasets para federación\n dj.generate_datasets_report(\n catalog,\n harvest='valid',\n export_path=os.path.join(\n reportes_catalog_dir,\n EXTRACTION_MAIL_CONFIG[\"attachments\"][\"datasets_report\"]))\n\n # genera mensaje de reporte\n subject, message = generate_validation_message(\n catalog_id, is_valid_catalog, warnings_log)\n _write_extraction_mail_texts(catalog_id, subject, message)\n\n # genera catálogo filtrado por los datasets que no tienen error\n catalog_filtered = dj.generate_harvestable_catalogs(\n catalog, harvest='valid')[0]\n\n return catalog_filtered\n\n\ndef _write_extraction_mail_texts(catalog_id, subject, message):\n\n # genera directorio de reportes para el catálogo\n reportes_catalog_dir = os.path.join(REPORTES_DIR, catalog_id)\n ensure_dir_exists(reportes_catalog_dir)\n\n with open(\n os.path.join(reportes_catalog_dir,\n EXTRACTION_MAIL_CONFIG[\"subject\"]), \"wb\") as f:\n f.write(subject.encode(\"utf-8\"))\n with open(\n os.path.join(reportes_catalog_dir,\n EXTRACTION_MAIL_CONFIG[\"message\"]), \"wb\") as f:\n f.write(message.encode(\"utf-8\"))\n\n\ndef generate_validation_message(catalog_id, is_valid_catalog, warnings_log):\n \"\"\"Genera asunto y mensaje del mail de reporte a partir de indicadores.\n\n Args:\n catalog_id (str): Identificador del catálogo\n is_valid_catalog (bool): Indica si el catálogo está libre de errores.\n\n Return:\n tuple: (str, str) (asunto, mensaje)\n \"\"\"\n server_environment = get_general_config()[\"environment\"]\n\n # asunto del mail\n subject = \"[{}] Validacion de catalogo '{}': {}\".format(\n server_environment, catalog_id,\n arrow.now().format(\"DD/MM/YYYY HH:mm\"))\n\n # mensaje del mail\n if isinstance(warnings_log, Exception):\n warnings_str = repr(warnings_log).encode(\"utf8\")\n else:\n warnings_str = warnings_log.getvalue()\n if is_valid_catalog and len(warnings_str) == 0:\n message = \"El catálogo '{}' no tiene errores.\".format(catalog_id)\n else:\n message = \"El catálogo '{}' tiene errores.\".format(catalog_id)\n message += \"\\n{}\".format(warnings_str)\n\n return subject, message\n\n\ndef process_catalog(catalog_id,\n catalog_format,\n catalog_url,\n catalogs_dir=CATALOGS_DIR):\n \"\"\"Descarga y procesa el catálogo.\n\n Transforma catálogos en distintos formatos a data.json, valida y actualiza\n algunos campos de metadatos y emite informes a los administradores.\n\n Args:\n catalog_id (str): Identificador del catálogo.\n catalog_format (str): Uno de \"xlsx\", \"json\" o \"ckan\".\n catalog_url (str): Url pública desde donde descargar el catálogo.\n catalogs_dir (str): Directorio local donde se descargan los catálogos.\n \"\"\"\n\n # loggea warnings en un objeto para el mensaje de reporte\n warnings_log = StringIO()\n\n fh = logging.StreamHandler(warnings_log)\n fh.setLevel(logging.WARNING)\n logger.addHandler(fh)\n\n # crea directorio y template de path al catálogo y reportes\n catalog_dir = os.path.join(catalogs_dir, catalog_id)\n ensure_dir_exists(catalog_dir)\n catalog_path_template = os.path.join(catalog_dir, \"{}\")\n\n # crea directorio y template de path para catálogo original\n catalog_input_dir = os.path.join(CATALOGS_DIR_INPUT, catalog_id)\n ensure_dir_exists(catalog_input_dir)\n catalog_input_path_template = os.path.join(catalog_input_dir, \"{}\")\n\n # procesa el catálogo dependiendo del formato\n logger.info('=== Catálogo {} ==='.format(catalog_id.upper()))\n try:\n logger.info('Descarga y lectura de catálogo')\n extension = catalog_format.lower()\n\n if extension in ['xlsx', 'json']:\n config = get_catalog_download_config(catalog_id)[\"catalog\"]\n catalog_input_path = catalog_input_path_template.format(\n \"catalog.\" + extension)\n\n if is_http_or_https(catalog_url):\n download_with_config(catalog_url, catalog_input_path, config)\n else:\n shutil.copy(catalog_url, catalog_input_path)\n\n if extension == 'xlsx':\n logger.info('Transformación de XLSX a JSON')\n catalog = TimeSeriesDataJson(\n read_xlsx_catalog(catalog_input_path, logger))\n else:\n catalog = TimeSeriesDataJson(catalog_input_path)\n\n elif extension == 'ckan':\n logger.info('Transformación de CKAN API a JSON')\n catalog = TimeSeriesDataJson(read_ckan_catalog(catalog_url))\n\n else:\n raise ValueError(\n '{} no es una extension valida para un catalogo.'.format(\n file_ext))\n\n # filtra, valida y escribe el catálogo en JSON y XLSX\n if catalog and len(catalog) > 0:\n logger.info(\"Valida y filtra el catálogo\")\n catalog_filtered = validate_and_filter(catalog_id, catalog,\n warnings_log)\n\n logger.info('Escritura de catálogo en JSON')\n write_json_catalog(catalog_id, catalog_filtered,\n catalog_path_template.format(\"data.json\"))\n\n # logger.info('Escritura de catálogo en XLSX')\n # writers.write_xlsx_catalog(\n # catalog_filtered, catalog_path_template.format(\"catalog.xlsx\"))\n else:\n raise Exception(\n \"El catálogo {} no se pudo generar\".format(catalog_id))\n\n # genera reportes del catálogo\n # logger.info('Generación de reportes')\n # catalog_filtered.generate_catalog_readme(\n # catalog_filtered,\n # export_path=catalog_path_template.format('README.md'))\n # catalog_filtered.generate_datasets_summary(\n # catalog_filtered,\n # export_path=catalog_path_template.format('datasets.csv'))\n\n except Exception as e:\n logger.error('Error al procesar el catálogo: {}'.format(catalog_id))\n for line in traceback.format_exc().splitlines():\n logger.error(line)\n subject, message = generate_validation_message(catalog_id, False, e)\n _write_extraction_mail_texts(catalog_id, subject, message)\n finally:\n logger.removeHandler(fh)\n\n\ndef main():\n print_log_separator(logger, \"Extracción de catálogos\")\n\n # cargo los parámetros de los catálogos a extraer\n catalogs_index = get_catalogs_index()\n logger.info(\"HAY {} CATALOGOS\".format(len(catalogs_index)))\n\n # procesa los catálogos\n for catalog_id in catalogs_index:\n process_catalog(catalog_id, catalogs_index[catalog_id][\"formato\"],\n catalogs_index[catalog_id][\"url\"], CATALOGS_DIR)\n\n logger.info('>>> FIN DE LA EXTRACCION DE CATALOGOS <<<')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/extract_catalogs.py","file_name":"extract_catalogs.py","file_ext":"py","file_size_in_byte":10503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"650679778","text":"#!/home/graham/anaconda2/bin/python\nimport os\nimport cv2\nimport sys\nimport argparse\nfrom . import ml_tools as ML\nfrom . import file_tools as FT\n\n# THE PATHNAME TO THE MODELS APPLIED BY THIS MODULE\n# is defined here. Alternative pathways based on\n# whether this script is being called directly from\n# its directory or used as a module by predict_aac_scores_2.py\n# from the directory above.\ncurrPath = os.path.dirname(os.path.realpath(__file__))\nmodPath = os.path.join(currPath, '../models_mod2')\n\n#if os.path.basename(os.getcwd())=='modules_mod2': modPath = '../models_mod2'\n#else: modPath = 'models_mod2'\n\n# This module applies three tensorflow models. For each, there is a\n# model weights file (.pb) and a model output labels file (.labels.txt).\n# \npModNameList = [\"aac_bi_l3l4_low\",\"aac_bi_l3l4_high\",\"background_l3l4\"]\npModels,pLabels = {},{}\nfor modName in pModNameList:\n pModels[modName] = os.path.join(modPath,modName+'.pb')\n pLabels[modName] = os.path.join(modPath,modName+'.labels.txt')\npModToAppend = {}\npModToAppend[\"aac_bi_l3l4_low\"] = \"aacLo\"\npModToAppend[\"aac_bi_l3l4_high\"] = \"aacHi\"\npModToAppend[\"background_l3l4\"] = \"back\"\n \n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\",\"--input_dir\",\n help=\"the directory of files to be evaluated\")\n ap.add_argument(\"-o\",\"--output_file\",\n help=\"file for writing the results (.tsv)\")\n args = vars(ap.parse_args())\n runAnalysis(args)\n \ndef runAnalysis(args):\n if len(args[\"output_file\"]) < 5:\n raise ValueError(\"outfile must be .tsv\")\n if not(args[\"output_file\"].split('.')[-1]==\"tsv\"):\n raise ValueError(\"outfile must be .tsv\")\n outBase = '.'.join(args[\"output_file\"].split('.')[:-1])\n\n for pm in pModNameList:\n classMod = ML.TfClassApplyer(pModels[pm],pLabels[pm])\n imgMang = FT.ImageIterator(args[\"input_dir\"])\n outfName = '.'.join([outBase,pModToAppend[pm],'tsv'])\n outf = open(outfName,'w')\n \n imgMang.initiateSort()\n while imgMang.isSorting():\n infName = imgMang.getImgFile()\n imgMang.moveToNext()\n # My full-analysis pipeline will place .tsv files\n # in this directory, so I'll skip those\n if infName.split('.')[-1]!='tsv':\n img = cv2.imread(infName)\n if img is None or img.shape[0]==0 or img.shape[1]==0:\n print(infName)\n if img is None: print(\"\\tis an empty image\")\n else: print(\"\\tis an empty image of shape \"+str(img.shape))\n else:\n res = classMod.getClasses(img)\n fId = os.path.basename(infName).split('.')[0]\n outL = [fId+'.png']\n for c in res.labels():\n outL.append( (c,res.score(c)) )\n outf.write('\\t'.join(list(map(str,outL)))+'\\n')\n if outf!=sys.stdout: outf.close()\n\nif __name__ == \"__main__\": main()\n","sub_path":"model_2/modules_mod2/runApplyClassifiers.py","file_name":"runApplyClassifiers.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"39187877","text":"import pandas as pd\nimport numpy as np\nimport quandl\nimport math\nfrom sklearn import preprocessing, svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\n\n\nquandl.ApiConfig.api_key = \"CQVM78g1Qisb7QCsmHgk\"\ndf = quandl.get('WIKI/GOOGL', start_date=\"2018-12-31\", end_date=\"2019-03-01\")\ndf['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100\ndf['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100\n\ndf = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']]\n\nforecast_col = 'Adj. Close'\ndf.fillna(-99999, inplace=True)\n\nforecast_out = int(math.ceil(0.0026*len(df)))\n\ndf['label'] = df[forecast_col].shift(-forecast_out)\n\nX = np.array(df.drop(['label'],1))\nX = preprocessing.scale(X)\nX = X[:-forecast_out]\nX_lately = X[-forecast_out:]\n\ndf.dropna(inplace=True)\n\ny = np.array(df['label'])\n\n\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)\n\nclf = LinearRegression()\nclf.fit(X_train,y_train)\nconfidence = clf.score(X_test,y_test)\nprint(confidence)\n\nforecast_set = clf.predict(X_lately)\nprint(forecast_set)\n\n\n# Visualising Traning set results\n#plt.scatter(X_train, y_train, color = 'red')\n#plt.show()\n\n\n\n\n\n","sub_path":"learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"267796955","text":"import pytest\n\nfrom smartsim import Experiment, constants\n\n\"\"\"\nTest the launch of simple entity types on pre-existing allocations.\n\nAll entities will obtain the allocation from the environment of the\nuser\n\"\"\"\n\n# retrieved from pytest fixtures\nif pytest.test_launcher not in pytest.wlm_options:\n pytestmark = pytest.mark.skip(reason=\"Not testing WLM integrations\")\n\n\ndef test_models(fileutils, wlmutils):\n exp_name = \"test-models-launch\"\n exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())\n test_dir = fileutils.make_test_dir(exp_name)\n\n script = fileutils.get_test_conf_path(\"sleep.py\")\n settings = wlmutils.get_run_settings(\"python\", f\"{script} --time=5\")\n M1 = exp.create_model(\"m1\", path=test_dir, run_settings=settings)\n M2 = exp.create_model(\"m2\", path=test_dir, run_settings=settings)\n\n exp.start(M1, M2, block=True)\n statuses = exp.get_status(M1, M2)\n assert all([stat == constants.STATUS_COMPLETED for stat in statuses])\n\n\ndef test_ensemble(fileutils, wlmutils):\n exp_name = \"test-ensemble-launch\"\n exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())\n test_dir = fileutils.make_test_dir(exp_name)\n\n script = fileutils.get_test_conf_path(\"sleep.py\")\n settings = wlmutils.get_run_settings(\"python\", f\"{script} --time=5\")\n ensemble = exp.create_ensemble(\"e1\", run_settings=settings, replicas=2)\n ensemble.set_path(test_dir)\n\n exp.start(ensemble, block=True)\n statuses = exp.get_status(ensemble)\n assert all([stat == constants.STATUS_COMPLETED for stat in statuses])\n\n\ndef test_summary(fileutils, wlmutils):\n \"\"\"Fairly rudimentary test of the summary dataframe\"\"\"\n\n exp_name = \"test-launch-summary\"\n exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())\n test_dir = fileutils.make_test_dir(exp_name)\n\n sleep = fileutils.get_test_conf_path(\"sleep.py\")\n bad = fileutils.get_test_conf_path(\"bad.py\")\n sleep_settings = wlmutils.get_run_settings(\"python\", f\"{sleep} --time=3\")\n bad_settings = wlmutils.get_run_settings(\"python\", f\"{bad} --time=6\")\n\n sleep = exp.create_model(\"sleep\", path=test_dir, run_settings=sleep_settings)\n bad = exp.create_model(\"bad\", path=test_dir, run_settings=bad_settings)\n\n # start and poll\n exp.start(sleep, bad)\n assert exp.get_status(bad)[0] == constants.STATUS_FAILED\n assert exp.get_status(sleep)[0] == constants.STATUS_COMPLETED\n\n summary_df = exp.summary()\n print(summary_df)\n row = summary_df.loc[0]\n\n assert sleep.name == row[\"Name\"]\n assert sleep.type == row[\"Entity-Type\"]\n assert 0 == int(row[\"RunID\"])\n assert 0 == int(row[\"Returncode\"])\n\n row_1 = summary_df.loc[1]\n\n assert bad.name == row_1[\"Name\"]\n assert bad.type == row_1[\"Entity-Type\"]\n assert 0 == int(row_1[\"RunID\"])\n assert 0 != int(row_1[\"Returncode\"])\n","sub_path":"tests/on_wlm/test_simple_entity_launch.py","file_name":"test_simple_entity_launch.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"516550728","text":"from django.test import TestCase\nfrom aliss.models import Organisation, ALISSUser, Location\n\nclass LocationTestCase(TestCase):\n def setUp(self):\n t = ALISSUser.objects.create(name=\"Mr Test\", email=\"tester@aliss.org\")\n u = ALISSUser.objects.create(name=\"Mr Updater\", email=\"updater@aliss.org\", is_editor=True)\n c = ALISSUser.objects.create(name=\"Mr Claimant\", email=\"claimant@aliss.org\")\n o = Organisation.objects.create(\n name=\"TestOrg\",\n description=\"A test description\",\n created_by=t,\n updated_by=u,\n claimed_by=c\n )\n l = Location.objects.create(\n name=\"my location\", street_address=\"my street\", locality=\"a locality\", \n postal_code=\"FK1 5XA\", latitude=50.0, longitude=13.0,\n organisation=o, created_by=t, updated_by=u\n )\n\n def test_location_exists(self):\n l = Location.objects.get(name=\"my location\", postal_code=\"FK1 5XA\")\n self.assertTrue(isinstance(l, Location))\n\n def test_user_delete_doesnt_cascade(self):\n ALISSUser.objects.get(email=\"tester@aliss.org\").delete()\n ALISSUser.objects.get(email=\"updater@aliss.org\").delete()\n ALISSUser.objects.get(email=\"claimant@aliss.org\").delete()\n self.test_location_exists()\n\n def test_org_delete_cascades(self):\n Organisation.objects.get(name=\"TestOrg\").delete()\n l = Location.objects.filter(name=\"my location\", postal_code=\"FK1 5XA\").exists()\n self.assertFalse(l)\n\n def test_is_edited_by(self):\n l = Location.objects.get(name=\"my location\", postal_code=\"FK1 5XA\")\n rep = ALISSUser.objects.get(email=\"claimant@aliss.org\")\n editor = ALISSUser.objects.filter(is_editor=True).first()\n punter = ALISSUser.objects.create(name=\"Ms Random\", email=\"random@random.org\")\n staff = ALISSUser.objects.create(name=\"Ms Staff\", email=\"msstaff@aliss.org\", is_staff=True)\n self.assertTrue(l.is_edited_by(l.organisation.created_by))\n self.assertTrue(l.is_edited_by(editor))\n self.assertTrue(l.is_edited_by(rep))\n self.assertTrue(l.is_edited_by(staff))\n self.assertFalse(l.is_edited_by(punter))","sub_path":"aliss/tests/models/test_location.py","file_name":"test_location.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"475199882","text":"\"\"\"Define a bunch of functions that change the request.\n\nThese functions determine the handelability of a request, and run in the order\ngiven here.\n\n\"\"\"\nimport logging\nimport os\nimport urllib\nimport urlparse\nfrom os.path import join, isfile, isdir, dirname, exists\n\nfrom aspen import Response\n\n\nlog = logging.getLogger('aspen.gauntlet')\n\n\ndef intercept_socket(request):\n \"\"\"Given a request object, return a tuple of (str, None) or (str, str).\n\n Intercept socket requests. We modify the filesystem path so that your\n application thinks the request was to /foo.sock instead of to\n /foo.sock/blah/blah/blah/.\n\n \"\"\"\n if request.path.raw.endswith('.sock'):\n # request.path.raw does not include querystring.\n raise Response(404)\n parts = request.path.raw.rsplit('.sock/', 1)\n if len(parts) == 1:\n path = parts[0]\n socket = None\n else:\n path = parts[0] + '.sock'\n socket = parts[1]\n request.path.raw, request.socket = path, socket\n #spam -- log.debug('gauntlet.intercept_socket: ' + request.path.raw)\n\ndef translate(request):\n \"\"\"Translate urlpath to fspath, returning urlpath parts.\n\n We specifically avoid removing symlinks in the path so that the filepath\n remains under the website root. Also, we don't want trailing slashes for\n directories in request.fs.\n\n \"\"\"\n parts = [request.root] + request.path.raw.lstrip('/').split('/')\n request.fs = os.sep.join(parts).rstrip(os.sep)\n request._parts = parts # store for use in processing virtual_paths\n #spam -- log.debug('gauntlet.translate: ' + request.fs)\n\ndef check_sanity(request):\n \"\"\"Make sure the request is under our root.\n \"\"\"\n if not request.fs.startswith(request.root):\n raise response(404)\n\ndef hidden_files(request):\n \"\"\"Protect hidden files.\n \"\"\"\n if '/.' in request.fs[len(request.root):]:\n raise Response(404)\n\ndef virtual_paths(request):\n \"\"\"Support /foo/bar.html => ./%blah/bar.html and /blah.html => ./%flah.html\n\n Parts is a list of fs path parts as returned by translate, above. \n\n Path parts will end up in request.path, a dict subclass. There can only be \n one variable per path part. If a directory has more than one subdirectory\n starting with '%' then only the 'first' is used.\n\n \"\"\"\n if os.sep + '%' in request.fs[len(request.root):]: # disallow direct access\n raise Response(404)\n if not exists(request.fs):\n matched = request.root\n parts = request._parts\n del request._parts\n nparts = len(parts)\n for i in range(1, nparts):\n part = parts[i]\n next = join(matched, part)\n if exists(next): # this URL part names an actual directory\n matched = next\n else: # this part is missing; do we have a %subdir?\n key = None\n for root, dirs, files in os.walk(matched):\n files.sort(key=lambda x: x.lower())\n dirs.sort(key=lambda x: x.lower())\n for name in files + dirs:\n if name.startswith('%'):\n \n # See if we can use this item.\n # ============================\n # We want to allow file matches for the last URL\n # path part, and in that case we strip the file\n # extension. For other matches we need them to be\n # directories.\n\n fs = join(matched, name)\n k = name[1:]\n v = part\n if i == (nparts - 1):\n if isfile(fs):\n # Take care with extensions.\n x = k.rsplit('.', 1)\n y = part.rsplit('.', 1)\n nx = len(x) # 1|2\n if nx != len(y):\n continue\n if nx == 2:\n # If there's an extension, match it.\n k, ext1 = x\n v, ext2 = y\n if ext1 != ext2:\n continue\n elif not isdir(fs):\n continue \n\n\n # We found a suitable match at the current level.\n # ===============================================\n\n matched = fs \n key, value = _typecast(k, v)\n request.path[key] = value\n break # Only use the first %match per level.\n break # don't recurse in os.walk\n if key is None:\n matched = request.root\n break # no match, reset\n if matched != request.root:\n request.fs = matched.rstrip(os.sep)\n #spam -- log.debug('gauntlet.virtual_paths: ' + request.fs)\n\ndef _typecast(key, value):\n \"\"\"Given two strings, return a string, and an int or string.\n \"\"\"\n if key.endswith('.int'): # you can typecast to int\n key = key[:-4]\n try:\n value = int(value)\n except ValueError:\n raise Response(404)\n else: # otherwise it's URL-quoted ASCII\n try:\n value = value.decode('ASCII')\n except UnicodeDecodeError:\n raise Response(400)\n value = urllib.unquote(value)\n return key, value\n\ndef trailing_slash(request):\n if isdir(request.fs):\n if not request.path.raw.endswith('/'):\n request.path.raw += '/'\n raise Response(301, headers={'Location': request.rebuild_url()})\n\ndef index(request):\n if isdir(request.fs):\n for filename in request.default_filenames:\n index = join(request.fs, filename)\n if isfile(index):\n request.fs = index\n break\n #spam -- log.debug('gauntlet.index: ' + request.fs)\n\ndef autoindex(request):\n if isdir(request.fs):\n if request.conf.aspen.no('list_directories'):\n request.headers.set('X-Aspen-AutoIndexDir', request.fs)\n request.fs = request.website.ours_or_theirs('autoindex.html')\n assert request.fs is not None # sanity check\n else:\n raise Response(404)\n #spam -- log.debug('gauntlet.autoindex: ' + request.fs)\n\ndef not_found(request):\n if not isfile(request.fs):\n if request.path.raw == '/favicon.ico': # special case\n request.fs = request.website.find_ours('favicon.ico')\n else:\n raise Response(404)\n #spam -- log.debug('gauntlet.not_found: ' + request.fs)\n\n\ngauntlet = [ intercept_socket\n , translate\n , check_sanity\n , hidden_files\n , virtual_paths\n , trailing_slash\n , index\n , autoindex\n , not_found\n ]\n\ndef run(request):\n \"\"\"Given a request, run it through the gauntlet.\n \"\"\"\n #spam -- log.debug('gauntlet.run: ' + request.path.raw)\n for func in gauntlet:\n func(request)\n\ndef run_through(request, last):\n \"\"\"For testing, here's a function that runs part of the gauntlet.\n\n Pass in a request object and a gauntlet function, the last to be run.\n\n \"\"\"\n #spam -- log.debug('gauntlet.run_through: ' + request.path.raw)\n for func in gauntlet:\n func(request)\n if func is last:\n break\n","sub_path":"aspen/gauntlet.py","file_name":"gauntlet.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"547877841","text":"import os\nimport time\nimport re\nfrom slackclient import SlackClient\nimport boto3\n\n## Set some global vaiables ## \n\nslack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))\nhedwig_id = None\nRTM_READ_DELAY = 1\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n\n\n\ndef parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == hedwig_id:\n return message, event[\"channel\"]\n return None, None\n\n\ndef parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\n\n\ndef handle_command(command, channel):\n \n if command.startswith(\"check connection\"):\n ec2 = boto3.client('ec2',region_name='eu-west-1') \n FullResponse = ec2.describe_instances()\n sendMessage(channel, \"Attempting to connect to AWS now...\") \n response = ec2.monitor_instances(InstanceIds=['i-0235fc59a713cd32c'])\n print(response['ResponseMetadata']['HTTPStatusCode'])\n sendMessage(channel, \"I received a status code of: \" + str(response['ResponseMetadata']['HTTPStatusCode']) + \" So it all looks good to me!\")\n \n if command.find(\"which\" and \"instances\"):\n if command.find(\"running\" or \"started\") != -1:\n ec2 = boto3.resource('ec2', region_name='eu-west-1')\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values':['running']}])\n for instance in instances:\n sendMessage(channel, instance.id + \" \" + instance.instance_type)\n if command.find(\"stopped\") != -1:\n ec2 = boto3.resource('ec2', region_name='eu-west-1')\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values':['stopped']}])\n for instance in instances:\n sendMessage(channel, instance.id + \" \" + instance.instance_type)\n\n\n if command.find(\"status\" and \" check\") != -1:\n if command.find(\"all\") != -1:\n print(\"Check status for all instances\")\n ec2 = boto3.client('ec2', region_name='eu-west-1')\n getAllInstances = ec2.describe_instances()\n for r in getAllInstances['Reservations']:\n for i in r['Instances']:\n checkStatus = ec2.monitor_instances(InstanceIds=[i['InstanceId']])\n sendMessage(channel, \"Status of instance: \" + str(i['InstanceId']) + \" is: \" + str(checkStatus['ResponseMetadata']['HTTPStatusCode']))\n if command.find(\"i-\") != -1:\n # We found an instance ID\n ec2 = boto3.client('ec2',region_name='eu-west-1')\n instanceID = command.split(\"i-\",)\n instanceID = instanceID[1]\n sendMessage(channel, \"Checking status for instance: i-\" + instanceID[0:17])\n checkInstance = ec2.monitor_instances(InstanceIds=[\"i-\"+instanceID[0:17]])\n sendMessage(channel, \"Status Code: \" + str(checkInstance['ResponseMetadata']['HTTPStatusCode']))\n \n if command.find(\"how many\" and \"instances\") != -1:\n command = command.split(\"instances\",)\n if command[1].find(\"total\") != -1:\n instanceCount = int(0)\n ec2 = boto3.client('ec2', region_name='eu-west-1')\n instanceResponse = ec2.describe_instances()\n for reservation in instanceResponse[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n instanceCount = instanceCount +1\n sendMessage(channel, \"The total number of AWS instances in: \" + \"eu-west-1\" +\" is: \" + str(instanceCount))\n\ndef sendMessage(channel,response):\n slack_client.api_call(\"chat.postMessage\",channel=channel, text=response)\n\n\nif __name__ == \"__main__\":\n if slack_client.rtm_connect(with_team_state=False):\n print(\"Hedwig connected and running!\")\n hedwig_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\n while True:\n command, channel = parse_bot_commands(slack_client.rtm_read())\n if command:\n handle_command(command, channel)\n time.sleep(RTM_READ_DELAY)\n else:\n print(\"Connection failed. Exception traceback printed above.\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"611343751","text":"import csv\n\nfrom bs4 import BeautifulSoup\n# import pymysql.cursors\nimport requests\n\n\nsoup = BeautifulSoup(open('standard_drawings.html'), 'html.parser')\n\nrows = soup.find_all('tr')\n\nfile_data = []\n\nfor row in rows:\n row_data = row.find_all('td')\n name = row_data[1].text\n if name:\n code = row_data[2].text\n try:\n pdf = row_data[0].find('a').get('href')\n except AttributeError:\n pdf = None\n d = {\n 'title': name,\n 'code': code,\n 'pdf': pdf\n }\n if pdf:\n # Fetch the pdf from current site and save\n r = requests.get(pdf)\n open('standard_drawings_pdfs/'+pdf.rsplit('/', 1)[-1], 'wb').write(r.content)\n\n # Get and set just the raw file name with no extension\n pdf_name = pdf.rsplit('/', 1)[-1].replace('.pdf', '')\n d['pdf'] = pdf_name\n file_data.append(d)\n\n# Build csv of data\nkeys = file_data[0].keys()\nwith open('standard_drawings.csv', 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(file_data)\n","sub_path":"standard_drawings/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"225357029","text":"import sys\nimport pickle\nsys.path.append(\"../tools/\")\nfrom feature_format import featureFormat, targetFeatureSplit\nfrom tester import dump_classifier_and_data\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.svm import LinearSVC\nfrom sklearn.decomposition import PCA\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom collections import defaultdict\nimport sklearn.grid_search\nimport sklearn.pipeline\nimport math \nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\n\n### Task 1: Select what features you'll use.\n### The first feature must be \"poi\".\n\n### Load the dictionary containing the dataset\nwith open(\"final_project_dataset.pkl\", \"r\") as data_file:\n data_dict = pickle.load(data_file)\nmy_dataset = data_dict\n\n### Task 2: Remove outliers\n\ndel my_dataset['TOTAL'] \t\t\t\t\t\t\t#Deleting outlier total\ndel my_dataset['THE TRAVEL AGENCY IN THE PARK'] \t#not corresponding to a person\ndel my_dataset['LOCKHART EUGENE E']\t\t\t\t\t#has all features missing\n\n### Task 3: Create new feature(s)\n\nfinance_features = ['deferral_payments' , 'expenses' , 'deferred_income' , 'restricted_stock_deferred','loan_advances' ,'other' , 'director_fees', 'bonus' ,'restricted_stock' ,'total_stock_value' ,'long_term_incentive','salary','total_payments','exercised_stock_options']\nnormalized = ['bonus' , 'deferred_income']\n\ndef add_new_features(x):\t\n\t\"\"\"\n\tCreated three ratios based on email : to,from, and shared with POIs\n\t\"\"\"\n\tif(x['from_messages']!='NaN' and x['from_this_person_to_poi']!='NaN'):\n\t ratio_from_x_to_poi = x['from_this_person_to_poi'] / float(x['from_messages'])\n\telse:\n\t ratio_from_x_to_poi = 0.\n\n\tif(x['to_messages']!='NaN' or x['from_poi_to_this_person']!='NaN'):\n\t ratio_from_poi_to_x = x['from_poi_to_this_person'] / float(x['to_messages'])\n\telse:\n\t ratio_from_poi_to_x = 0. \n\n\tif(x['to_messages']!='NaN' and x['shared_receipt_with_poi']!='NaN'):\n\t ratio_shared_receipt_with_poi = x['shared_receipt_with_poi'] / float(x['to_messages'])\n\telse:\n\t ratio_shared_receipt_with_poi = 0.\n\n\treturn ratio_from_x_to_poi , ratio_from_poi_to_x , ratio_shared_receipt_with_poi\n\n#Initialize feature with 0.0\nfor i in my_dataset:\n my_dataset[i]['ratio_to_poi'] = 0.\n my_dataset[i]['ratio_from_poi'] = 0.\n my_dataset[i]['ratio_shared_receipt'] = 0.\n\n#Set value\nfor i in my_dataset : \n ratio_to_poi , ratio_from_poi , ratio_shared_receipt = add_new_features(my_dataset[i])\n my_dataset[i]['ratio_to_poi'] = ratio_to_poi\n my_dataset[i]['ratio_from_poi'] = ratio_from_poi\n my_dataset[i]['ratio_shared_receipt'] = ratio_shared_receipt\n\ndef log_features(x):\n \"\"\"\t\n Created log of financial features\n \"\"\"\n d = defaultdict(lambda: 0.)\n d.clear()\n for features in x:\n if(features in finance_features): \n if(x[features] != 'NaN'):\n if(x[features]!=0) :\n d['log_' + str(features)] = math.log(abs(x[features]) , 10)\n else:\n d['log_' + str(features)] = 0\n return d\n\nfor i in my_dataset : \n d = log_features(my_dataset[i])\n for features in d:\n my_dataset[i][features] = d[features]\n\n\nfeatures_all = set()\nfor x in my_dataset : \n for features in my_dataset[x]:\n features_all.add(features) \ntry :\n features_all.remove('poi')\nexcept KeyError:\n pass\n\nfeatures_list = ['poi']\n\nfor i in features_all : \t\t#Removing e-mail address field from classification\n if(i != 'email_address'):\n features_list.append(i) \n\n### Extract features and labels from dataset for local testing\n\n\ndata = featureFormat(my_dataset, features_list, sort_keys = True)\n\nlabels, features = targetFeatureSplit(data)\n\n### Task 4: Try a varity of classifiers\n\nclf = LinearSVC()\npca = PCA()\nscaler = preprocessing.MinMaxScaler()\nselect = SelectKBest()\n\nfeatures_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42) \t\t\t#extract features\n\nmin_max_scaler = preprocessing.MinMaxScaler()\n\nfeatures_train = min_max_scaler.fit_transform(features_train)\n### Task 5: Tune your classifier to achieve better than .3 precision and recall \n### using our testing script. Check the tester.py script in the final project\n### folder for details on the evaluation method, especially the test_classifier\n### function. Because of the small size of the dataset, the script uses\n### stratified shuffle split cross validation. For more info: \n### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html\n\n# Example starting point. Try investigating other evaluation techniques!\n\nsteps = [('feature_selection' , select) ,('scaler' , scaler) , ('PCA' , pca) , ('classifier' , clf)]\n\npipeline = sklearn.pipeline.Pipeline(steps)\n\nscaler = preprocessing.MinMaxScaler()\n\n\n\nparameters = parameters = dict(feature_selection__k= [35], #18,20,22,25,26,27,29 , 30 , 31 ,33 , 35,\n classifier__loss = ['hinge' , 'squared_hinge'] ,\n classifier__max_iter = [1000,500,2000] ,\n classifier__multi_class = ['ovr' , 'crammer_singer'] ,\n PCA__n_components = [19] ,\n PCA__random_state = [42] ,\n PCA__whiten = [True , False])\n\nsss = StratifiedShuffleSplit(labels, 10, test_size=0.3, random_state=60)\n\ncv = sklearn.grid_search.GridSearchCV(pipeline, param_grid = parameters , scoring = 'f1' ,cv = sss)\n\ncv.fit(features, labels)\n\nclf = cv.best_estimator_\nprint(cv.best_estimator_)\n\n### Task 6: Dump your classifier, dataset, and features_list so anyone can\n### check your results. You do not need to change anything below, but make sure\n### that the version of poi_id.py that you submit can be run on its own and\n### generates the necessary .pkl files for validating your results.\n\ndump_classifier_and_data(clf, my_dataset, features_list)\n\n","sub_path":"final_submission/poi_id.py","file_name":"poi_id.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"258559168","text":"import models.processing_settings.processing_settings as p_s\nimport models.processing_document.processing_document as p_d\nimport numpy as np\nimport re\nfrom operator import attrgetter\nimport models.detect_models.detect_header_processing_model as d_h_p_m\nfrom models.settings.var_container import LOG as log\n\n\n# def detect_acc_pattern(pattern, document):\n# df = document.data_frame.head(10)\n# words = pattern.words\n# count = 0\n# vals = str(df.values).lower()\n#\n# for word in words:\n# try:\n# t = word.lower() in vals\n# if t == True:\n# count += 1\n# except:\n# pass\n# accuracy = count / len(words) * 100\n# model =d_h_p_m.DetectHeaderProcessingModel(pattern,accuracy,count)\n# return model\n#\n# #if (count > 0):\n# # return count / len(words) * 100\n#\n#\n# return -1\n\ndef detect_document_type(document, pattern):\n try:\n iter = 0\n df = document.data_frame.head(5)\n for p in pattern:\n\n for s_pattern in p.words:\n counter = len(df.columns)\n\n for index, row in df.iterrows():\n\n for col in range(0, counter):\n value = str(row[col])\n if value != \"nan\":\n value = value.replace(' ',' ')\n re.sub(\"\\s\\s+\", \" \", value)\n result = re.match(s_pattern, value)\n if (result != None):\n # print(result.group(0))\n return p\n return \"\"\n\n additional_column = df.columns[0]\n pats = []\n for p in pattern:\n for s_pattern in p.words:\n result = re.match(s_pattern, additional_column)\n\n if (result != None):\n # print(result.group(0))\n pats.append(p)\n # return p\n\n if (len(pats)==1):\n return pats[0]\n else:\n max_matches =0\n max_match_index =-1\n _index=0\n for p in pats:\n matches = 0\n\n for s_pattern in p.words:\n counter = len(df.columns)\n\n for index, row in df.iterrows():\n\n for col in range(0, counter):\n value = str(row[col])\n if value != \"nan\":\n value = value.replace(' ', ' ')\n re.sub(\"\\s\\s+\", \" \", value)\n result = re.match(s_pattern, value)\n if (result != None):\n matches+=1\n\n if (matches>max_matches):\n max_matches=matches\n max_match_index =_index\n _index+=1\n if (max_match_index>-1):\n return pats[max_match_index]\n except:\n print(\"Error\")\n return None\n pass\n\n\ndef detect_documents_pattern(project):\n try:\n\n docs = project.documents\n patterns = project.patterns\n d_models = []\n\n for doc in docs:\n if (doc.defined==True):\n continue\n maxPattern = {}\n maxAccuracy = -1\n pattern = detect_document_type(doc, patterns)\n\n if (pattern == None):\n print(\"Document \" + doc.file_name + \" not defined\")\n log.add(\"Document \" + doc.file_name + \" not defined\")\n continue\n\n # t = max(d_models, key=attrgetter('accuracy'))\n doc.type = pattern.documentType\n doc.version = pattern.version\n # h = project.\n if (doc.type.value != 0):\n t = [hh for hh in project.headDocuments if hh.documentType == doc.type and hh.version==doc.version]\n # headers =t[0].headers\n for d in t:\n doc.init_detect_headers(d.headers)\n\n else:\n print(\"Файл \" + doc.file_name + \" не определен\")\n\n except Exception as e:\n pass\n","sub_path":"modules/processing/detect_documents.py","file_name":"detect_documents.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"357999913","text":"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn import cross_validation\nfrom sklearn.externals import joblib\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nclass RndFrst:\n\n def __init__(self):\n self.result = []\n self.prob = []\n self.rfClssf = []\n self.adaClssf = []\n\n\n def runRandomForest(self, trnXs, trnYs, tstXs, tstYs, featDicts):\n for i in range(len(trnXs) - 1):\n rfClssf = RandomForestClassifier(n_estimators=25, criterion='entropy', max_features=None)\n\n bestTrnX = []; bestTrnY = []\n bestCVOvAccu = 0\n while(True):\n cvTrnX, cvValX, cvTrnY, cvValY = cross_validation.train_test_split(trnXs[i], trnYs[i], test_size=0.1)\n rfClssf.fit(cvTrnX, cvTrnY)\n accu, tpAccu, tnAccu, prob = self.evalRFModel(rfClssf, cvValX, cvValY)\n\n curBaseAccu = tpAccu\n if bestCVOvAccu < curBaseAccu:\n bestTrnX = cvTrnX; bestTrnY = cvTrnY; self.prob = prob\n bestCVOvAccu = curBaseAccu\n\n print('Current Best Total Accuracy: ' + str(accu))\n print('Current Best True Positive Accuracy: ' + str(tpAccu))\n print('Current Best True Negative Accuracy: ' + str(tnAccu))\n\n if bestCVOvAccu > 0.9:\n break\n\n print('Best CV Accuracy: ' + str(bestCVOvAccu))\n\n featImp = rfClssf.feature_importances_\n sortedIdx = np.argsort(featImp)\n\n self.displayResults(featDicts, featImp, sortedIdx, i)\n\n a = 2\n\n # scores = cross_validation.cross_val_score(rfClssf, trnXs[i], trnYs[i], cv=10)\n # print('CV Accuracy: %f (+/- %f)' % (scores.mean(), scores.std() * 2))\n\n # rfClssf.fit(bestTrnX, bestTrnY)\n\n # while(True):\n # rfClssf.fit(trnXs[i], trnYs[i])\n # accu, tpAccu, tnAccu = self.evalRFModel(rfClssf, tstXs[i], tstYs[i])\n #\n # if tpAccu > 0.8:\n # break\n #\n #\n # featImp = rfClssf.feature_importances_\n # sortedIdx = np.argsort(featImp)\n #\n # a = 1\n\n\n def runAdaBoost(self, trnXs, trnYs, tstXs, tstYs, featDicts):\n for i in range(len(trnXs) - 1):\n while(True):\n adaClssf = AdaBoostClassifier(n_estimators=1000)\n\n bestTrnX = []\n bestTrnY = []\n bestCVOvAccu = 0\n stopLevelAccu = 0.8\n while (True):\n cvTrnX, cvValX, cvTrnY, cvValY = cross_validation.train_test_split(trnXs[i], trnYs[i], test_size=0.1)\n adaClssf.fit(cvTrnX, cvTrnY)\n accu, tpAccu, tnAccu, prob = self.evalRFModel(adaClssf, cvValX, cvValY)\n\n curBaseAccu = tpAccu\n if bestCVOvAccu < curBaseAccu:\n bestTrnX = cvTrnX;\n bestTrnY = cvTrnY;\n self.prob = prob\n bestCVOvAccu = curBaseAccu\n\n print('Current Best Total Accuracy: ' + str(accu))\n print('Current Best True Positive Accuracy: ' + str(tpAccu))\n print('Current Best True Negative Accuracy: ' + str(tnAccu))\n\n bestCVAccu = accu; bestCVTpAccu = tpAccu; bestCVTnAccu = tnAccu;\n\n if bestCVOvAccu > stopLevelAccu:\n break\n\n print('Best CV Accuracy: ' + str(bestCVOvAccu))\n\n featImp = adaClssf.feature_importances_\n sortedIdx = np.argsort(featImp)\n\n self.displayResults(featDicts, featImp, sortedIdx, i)\n\n adaClssf.fit(bestTrnX, bestTrnY)\n accu, tpAccu, tnAccu, prob = self.evalRFModel(adaClssf, tstXs[i], tstYs[i])\n\n print('Test Total Accuracy: ' + str(accu))\n print('Test True Positive Accuracy: ' + str(tpAccu))\n print('Test True Negative Accuracy: ' + str(tnAccu))\n\n if (((accu + tpAccu) / 2) > stopLevelAccu):\n break\n\n featImp = adaClssf.feature_importances_\n sortedIdx = np.argsort(featImp)\n\n self.displayResults(featDicts, featImp, sortedIdx, i)\n\n self.adaClssf = adaClssf\n\n joblib.dump(adaClssf, 'model_data/model.pkl')\n f = open('accuracies.txt', 'w')\n f.write('CV Overall Accu\\t' + str(bestCVOvAccu) + '\\r\\n')\n f.write('CV Accu\\t' + str(bestCVAccu) + '\\r\\n')\n f.write('CV TP Accu\\t' + str(bestCVTpAccu) + '\\r\\n')\n f.write('CV TN Accu\\t' + str(bestCVTnAccu) + '\\r\\n')\n f.write('Test Accu\\t' + str(accu) + '\\r\\n')\n f.write('Test TP Accu\\t' + str(tpAccu) + '\\r\\n')\n f.write('Test TN Accu\\t' + str(tnAccu) + '\\r\\n')\n f.close()\n\n a = 2\n\n\n def displayResults(self, featDicts, featImp, sortedIdx, idx):\n feats = []\n scores = []\n\n for i in range(20):\n feats.append(featDicts[idx][sortedIdx[len(sortedIdx) - (i + 1)]])\n scores.append(featImp[sortedIdx[len(sortedIdx) - (i + 1)]])\n print(str(feats[i]) + ': ' + str(scores[i]))\n\n\n def evalRFModel(self, model, tstX, tstY):\n pred = model.predict(tstX)\n prob = model.predict_proba(tstX)\n\n compare = (tstY == pred)\n tTstY = (tstY == 1)\n fTstY = (tstY == -1)\n tpRecord = compare & tTstY\n tnRecord = compare & fTstY\n\n accu = float(np.sum(compare)) / float(len(tstY))\n tpAccu = float(np.sum(tpRecord)) / float(np.sum(tTstY))\n tnAccu = float(np.sum(tnRecord)) / float(np.sum(fTstY))\n\n return accu, tpAccu, tnAccu, prob","sub_path":"src_ver_0,1/RandForest.py","file_name":"RandForest.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364498114","text":"'''\n\n\nConsider the fraction, n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction.\n\nIf we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:\n\n1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8\n\nIt can be seen that 2/5 is the fraction immediately to the left of 3/7.\n\nBy listing the set of reduced proper fractions for d ≤ 1,000,000 in ascending order of size, find the numerator of the fraction immediately to the left of 3/7.\n'''\n# This would work, but would take forever.\n# define HCF\n# build dictionary of {n/d:[n,d]}\n# list = list(dict.keys())\n# i = list.index(3/7)\n# return list[i-1]\n\n\n# instead of building out all the numbers, just focus on whatever comes between 3/7 and it's current neighbor...\n\ndef hcf(n,d):\n \n while True:\n q = d//n\n r = d-q*n\n if r==0:\n return n\n d = n\n n = r\n\n\npin = 3/7\nbest = 2/5\nupper = 1000000\nbdict = {best:[2,5]}\n\nfor n in range(8,upper+1):\n inc = 1/n\n q = int(pin // inc)\n if hcf(q,n)==1:\n if pin - q/n < pin - best:\n best = q/n\n bdict.update({best:[q,n]})\nprint('For n = {:7,d} the leftmost neighbor is {}/{}'.format(n,bdict[best][0],bdict[best][1]))","sub_path":"problem_071.py","file_name":"problem_071.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"385552527","text":"from django import http\nfrom django.core.urlresolvers import reverse\n\nclass NameRequiredMiddleware:\n \"\"\"\n Middleware that requires a user fill is name.\n \"\"\"\n \n def process_request(self, request):\n user = request.user\n url = reverse('edit_profile')\n if user.is_authenticated() \\\n and not (user.first_name and user.last_name) \\\n and url not in request.get_full_path():\n return http.HttpResponseRedirect(reverse('edit_profile'))","sub_path":"diagnose/apps/diagnose/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"203849555","text":"\"\"\" trainer_adv_examples.py \"\"\"\n# Author: Dashan Gao\n# Email: dgaoaa@connect.ust.hk\n# Date: Jun 4th, 2021\n# Description: Code revised based on L2L-DA paper, to achieve semantic-preserving adversarial training.\n# TMP\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torchvision.utils as tvutils\nfrom torchvision.utils import save_image\nfrom torch.autograd import Variable\n\nimport os\nimport numpy as np\nfrom glob import glob\nfrom tqdm import trange\nfrom itertools import chain\nfrom tensorboardX import SummaryWriter\n\nfrom adv_defence.models import *\nfrom adv_defence.models import AttrAdvNoiseGenerator\nfrom adv_defence.attacks import get_fgsm, get_pgd, get_cw, run_fgsm, run_pgd, run_cw\nfrom adv_defence.sync_batchnorm import DataParallelWithCallback\nfrom adv_defence.utils import loss as msp_loss\n\n\nclass TrainerAdvExample(object):\n \"\"\"\n Generate adversarial example instead of adversarial noise.\n \"\"\"\n\n def __init__(self, config, train_data_loader, test_data_loader):\n self.config = config\n self.train_data_loader = train_data_loader\n self.test_data_loader = test_data_loader\n self.start_step = 0\n self.tensorboard = None\n self._build_model()\n\n if config.num_gpu > 1:\n self.NoiseGenerator = DataParallelWithCallback(self.NoiseGenerator.cuda(),\n device_ids=range(config.num_gpu))\n self.Classifier = DataParallelWithCallback(self.Classifier.cuda(),\n device_ids=range(config.num_gpu))\n else:\n self.NoiseGenerator = self.NoiseGenerator.cuda()\n self.Classifier = self.Classifier.cuda()\n\n # # Note: check whether :0 is nessasary or not.\n # self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if config.load_path:\n self._load_model()\n\n # create the attacker modules\n self.FGSM = get_fgsm(self.config.dataset)\n self.PGD = get_pgd(self.config.dataset)\n self.CW = get_cw(self.config.dataset)\n\n def _build_model(self):\n noise_channel_size = (3 if self.config.is_rgb else 1) * (1 + (1 if self.config.g_method == 3 else 0) +\n (1 if self.config.g_use_grad else 0))\n\n if self.config.g_double_decoder:\n # Generator with two decoders. One for adv-attr-example, one for attr-example.\n self.NoiseGenerator = AttrAdvNoiseGenerator(self.config.g_base_channel_dim,\n noise_channel_size,\n self.config.g_z_dim,\n self.config.g_deeper_layer,\n self.config.num_classes,\n 3 if self.config.is_rgb else 1)\n else:\n # Generator with only one decoder.\n self.NoiseGenerator = NoiseGenerator(self.config.g_base_channel_dim,\n noise_channel_size,\n self.config.g_z_dim,\n self.config.g_deeper_layer,\n self.config.num_classes,\n 3 if self.config.is_rgb else 1)\n\n self.Classifier = Classifier(num_classes=self.config.num_classes,\n classifier_name=self.config.f_classifier_name,\n dataset=self.config.dataset,\n pretrained=self.config.f_pretrain,\n pretrained_dir=self.config.pretrained_dir)\n self.NoiseGenerator.apply(weights_init_normal)\n if not self.config.f_pretrain:\n self.Classifier.apply(weights_init_normal)\n\n def _load_model(self):\n print(\"[*] Load models from {}...\".format(self.config.load_path))\n paths = glob(os.path.join(self.config.load_path, 'Classifier_*.pth'))\n paths.sort()\n\n if len(paths) == 0:\n path = os.path.join(self.config.load_path, 'Classifier.pth')\n if not os.path.exists(path):\n print(\"[!] No checkpoint found in {}...\".format(self.config.load_path))\n return\n self.start_step = 0\n else:\n idxes = [int(os.path.basename(path.split('.')[-2].split('_')[-1])) for path in paths]\n self.start_step = max(idxes)\n\n if self.config.num_gpu == 0:\n map_location = lambda storage, loc: storage\n else:\n map_location = None\n\n if self.config.f_update_style != -1:\n bad_classifier_state = torch.load('{}/Classifier_{}.pth'.format(self.config.load_path, self.start_step),\n map_location=map_location)\n starts_with_module = False\n for key in bad_classifier_state.keys():\n if key.startswith('module.'):\n starts_with_module = True\n break\n if starts_with_module and (self.config.num_gpu < 1):\n correct_classifier_state = {k[7:]: v for k, v in\n bad_classifier_state.items()}\n else:\n correct_classifier_state = bad_classifier_state\n self.Classifier.load_state_dict(correct_classifier_state)\n\n if self.config.f_update_style != -1:\n bad_generator_state = torch.load('{}/NoiseGen_{}.pth'.format(self.config.load_path, self.start_step),\n map_location=map_location)\n else:\n bad_generator_state = torch.load('{}/Generator.pth'.format(self.config.load_path),\n map_location=map_location)\n\n starts_with_module = False\n for key in bad_generator_state.keys():\n if key.startswith('module.'):\n starts_with_module = True\n break\n if starts_with_module and (self.config.num_gpu < 1):\n correct_generator_state = {k[7:]: v for k, v in\n bad_generator_state.items()}\n else:\n correct_generator_state = bad_generator_state\n self.NoiseGenerator.load_state_dict(correct_generator_state)\n\n def _save_model(self, step):\n print(\"[*] Save models to {}...\".format(self.config.model_dir))\n torch.save(self.Classifier.state_dict(),\n '{}/Classifier_{}.pth'.format(self.config.model_dir, step))\n torch.save(self.NoiseGenerator.state_dict(),\n '{}/NoiseGen_{}.pth'.format(self.config.model_dir, step))\n\n def _merge_noise(self, sum_noise, cur_noise, eps_step, eps_all):\n # 0. normalize noise output first: Don't need to, since we always take the tanh output\n\n # 1. multiply epsilon (with randomness for the training)\n # result: noise is in -eps_step < noise < eps_step\n cur_noise = cur_noise * eps_step\n\n # 2. return mixed output\n return torch.clamp(sum_noise + cur_noise, -1.0 * eps_all, 1.0 * eps_all)\n\n def _cross_entropy_loss(self, noise_class_output, label, pure_batch, adv_mult=1.0):\n log_prob = F.log_softmax(noise_class_output, dim=1)\n weight = torch.ones_like(label).float()\n weight[pure_batch:] *= adv_mult\n output = F.nll_loss(log_prob, label, reduction='none')\n return torch.mean(weight * output)\n\n def _compute_acc(self, logits, labels):\n # logits = logits / torch.norm(logits)\n _max_val, max_idx = torch.max(logits, 1)\n return torch.mean(torch.eq(max_idx, labels).double())\n\n # compute our loss from the output (batch major!)\n def _dsgan_loss(self, noise, output, single_batch, stability=1e-8):\n if noise is None:\n return None\n\n numerator = torch.mean(torch.abs(output[:single_batch] - output[single_batch:]),\n dim=[_ for _ in range(1, len(output.shape))])\n denominator = torch.mean(torch.abs(noise[:single_batch] - noise[single_batch:]),\n dim=[_ for _ in range(1, len(noise.shape))])\n our_term = torch.mean(numerator / (denominator + stability))\n return our_term\n\n def train(self):\n # Optimizer for G\n if self.config.g_optimizer == 'adam':\n g_optimizer = torch.optim.Adam(self.NoiseGenerator.parameters(),\n lr=self.config.g_lr,\n betas=(self.config.g_beta1, self.config.g_beta2),\n weight_decay=self.config.weight_decay)\n elif self.config.g_optimizer == 'sgd':\n g_optimizer = torch.optim.SGD(self.NoiseGenerator.parameters(),\n lr=self.config.g_lr,\n momentum=self.config.g_momentum,\n weight_decay=self.config.weight_decay)\n else:\n raise Exception(\"[!] Optimizer for the generator should be ['adam', 'sgd']\")\n\n # set initial learning rate for the case that it starts training from the middle\n if self.config.f_update_style == 2:\n if self.start_step != 0:\n for group in g_optimizer.param_groups:\n group.setdefault('initial_lr', self.config.g_lr)\n g_scheduler = torch.optim.lr_scheduler.StepLR(g_optimizer,\n step_size=self.config.max_step // 2,\n gamma=self.config.lr_gamma,\n last_epoch=(-1 if self.start_step == 0 else self.start_step))\n else:\n g_scheduler = None\n\n # Optimizer for F\n if self.config.f_optimizer == 'adam':\n f_optimizer = torch.optim.Adam(self.Classifier.parameters(), lr=self.config.f_lr,\n betas=(self.config.f_beta1, self.config.f_beta2),\n weight_decay=self.config.weight_decay)\n elif self.config.f_optimizer == 'sgd':\n f_optimizer = torch.optim.SGD(self.Classifier.parameters(), lr=self.config.f_lr,\n momentum=self.config.f_momentum,\n weight_decay=self.config.weight_decay)\n else:\n raise Exception(\"[!] Optimizer for the generator should be ['adam', 'sgd']\")\n\n f_scheduler = torch.optim.lr_scheduler.StepLR(f_optimizer,\n step_size=self.config.max_step // 2,\n gamma=self.config.lr_gamma,\n last_epoch=(-1 if self.start_step == 0 else self.start_step))\n if self.start_step != 0:\n for group in f_optimizer.param_groups:\n group.setdefault('initial_lr', self.config.f_lr)\n\n # now load the train data\n loader = iter(self.train_data_loader)\n\n # train mode\n self.tensorboard = SummaryWriter(self.config.model_dir)\n self.tensorboard.add_text(tag='argument', text_string=str(self.config.__dict__))\n for step in trange(self.start_step, self.config.max_step, ncols=80):\n try:\n data = loader.next()\n except StopIteration:\n loader = iter(self.train_data_loader)\n data = loader.next()\n\n # convert unit to float\n real_img = self._get_variable(data[0].type(torch.FloatTensor))\n if (not self.config.is_rgb) and (len(real_img.shape) == 3):\n real_img = torch.unsqueeze(real_img, 1) # N W H -> N C W H\n # TODO: add attribute label for CelebA dataset.\n attr_label = self._get_variable(data[1].type(torch.FloatTensor))\n label = self._get_variable(data[2].type(torch.LongTensor))\n\n single_batch_size = label.size(0)\n\n # try to reduce the learning rate of f\n if f_scheduler is not None:\n f_scheduler.step()\n if g_scheduler is not None:\n g_scheduler.step()\n\n # MNIST w/ lenet case only:\n # (pretrain 1K steps to make the classifier to be trained)\n # For all the other cases, we've loaded the pretrained hyperparameters\n # If you have a pretrained weights, then you can start from there.\n if (step < 1000) and (self.config.f_classifier_name == 'lenet'):\n self.Classifier.train()\n self.Classifier.zero_grad()\n class_output = self.Classifier(real_img)\n cls_loss = self._cross_entropy_loss(class_output, label, single_batch_size)\n cls_loss.backward()\n f_optimizer.step()\n continue\n\n ######## Phase 1 #######\n # Classifier inference, to get gradient\n # Grab gradients from f before training the G\n # obtain the gradient from the classifier\n self.Classifier.eval()\n self.Classifier.zero_grad()\n grad_input = real_img.detach()\n grad_input.requires_grad = True\n class_output = self.Classifier.forward(grad_input)\n\n # compute loss\n # f_loss is always being averaged when it computed, so don't need to be re-scaled.\n cls_loss = self._cross_entropy_loss(class_output, label,\n single_batch_size)\n # Add other losses here if you want.\n grad_loss = cls_loss\n\n if self.config.g_use_grad:\n # TODO: Conduct experiments without using gradient as input. Compare the adversarial attack\n # performance change.\n # obtain gradients and disable the gradient for the input\n grad_loss.backward()\n f_grad = grad_input.grad\n # normalized the gradient input\n # Please change it to other normalization if needed\n if self.config.g_normalize_grad:\n f_grad_norm = f_grad + 1e-15 # DO NOT EDIT! Need a stabilizer in here!!!\n f_grad = f_grad / f_grad_norm.norm(dim=(2, 3), keepdim=True)\n\n f_grad = f_grad.detach() # for a sanity check purpose\n\n ######## Phase 2 ###########\n # Train the generator, not the discriminator.\n # But, discriminator is still required, in order to compute the gradient\n double_real_img = torch.cat((real_img, real_img), 0).detach() # double batch-size\n double_label = torch.cat((label, label), 0).detach()\n # TODO: attribute_label for CelebA dataset\n double_attr_label = torch.cat((attr_label, attr_label), 0).detach()\n\n if self.config.g_method % 2 == 1: # g_method == 1 or 3\n double_adv_img = double_real_img.detach().clone() # The adversarial sample with noise\n else:\n double_adv_img = None\n\n if self.config.g_use_grad:\n double_adv_grad = torch.cat((f_grad, f_grad), 0)\n else:\n double_adv_grad = None\n\n if self.config.g_z_dim > 0:\n if self.config.num_gpu > 0:\n g_z = torch.cuda.FloatTensor(single_batch_size * 2, self.config.g_z_dim).normal_()\n else:\n g_z = torch.FloatTensor(single_batch_size * 2, self.config.g_z_dim).normal_()\n else:\n g_z = None\n\n self.NoiseGenerator.train()\n self.Classifier.eval()\n self.NoiseGenerator.zero_grad()\n # msp_alpha = real_img.shape[1] * real_img.shape[2] * real_img.shape[3] \\\n # / (1 + self.NoiseGenerator.hidden_size)\n\n dsgan_loss_sum = torch.tensor(0.0).cuda()\n dec1_proxy_loss = torch.tensor(0.0).cuda()\n dec2_proxy_loss = torch.tensor(0.0).cuda()\n msp_loss_1 = torch.tensor(0.0).cuda()\n msp_loss_2 = torch.tensor(0.0).cuda()\n msp_loss_1__ = torch.tensor(0.0).cuda()\n msp_loss_2__ = torch.tensor(0.0).cuda()\n l_vae_2 = torch.tensor(0.0).cuda()\n l_rec_2 = torch.tensor(0.0).cuda()\n l_msp_2 = torch.tensor(0.0).cuda()\n\n attr_adv_update_list = []\n attr_update_list = []\n clamp_noise = torch.zeros_like(double_real_img)\n\n if self.config.g_mini_update_style not in [0, 1, 2, 3]:\n raise Exception(\"[!] g_mini_update_style should be in [0,1,2,3]\")\n\n if self.config.g_attribute_disentangle_style not in [0, 1]:\n raise Exception(\"[!] g_attribute_disentangle should be in [0, 1]. \"\n \"0: msp_loss (last), 1: msp_loss (every)\")\n\n if self.config.attribute_perturb_style not in [0, 1, 2]:\n raise Exception(\"[!] attribute_perturb_style should be in [0, 1, 2]. \"\n \"0: no attribute perturbation, \"\n \"1: random perturb attribute,\"\n \"2: optimize attribute perturbation\")\n\n for g_iter_step_no in range(self.config.train_g_iter):\n if not self.config.use_cross_entropy_for_g:\n print(\"[!] We cannot train our generator without cross_entropy\")\n break\n\n # generate the current pair\n if self.config.g_use_grad:\n img_grad_advimg = torch.cat((double_real_img, double_adv_grad), 1)\n else:\n img_grad_advimg = double_real_img\n\n # in case of recursive gen\n if self.config.g_method == 3: # 3: PGD, take noise as input\n img_grad_advimg = torch.cat((img_grad_advimg, double_adv_img), 1)\n\n # Feed it to the generator\n\n # Original: predict noise\n # noise_output_for_g = self.NoiseGenerator(img_grad_advnoise, double_label, g_z)\n\n # V1: Predict adv examples with only adversarial noise.\n # adv_img_for_g = self.NoiseGenerator(img_grad_advimg, double_label, g_z)\n\n # V2: Predict adv examples with both 1) adversarial noise and 2) attribute perturbation.\n adv_noise_for_g, attr_img_for_g, attr_pred, mu, logvar = self.NoiseGenerator.forward(\n img_grad_advimg, double_label, g_z)\n\n # clamping learned noise in epsilon boundary\n if self.config.g_method % 2 == 1:\n # 1: L2L or 3: PGD\n clamp_noise = self._merge_noise(clamp_noise, adv_noise_for_g,\n self.config.epsilon * self.config.g_ministep_size,\n self.config.epsilon)\n else:\n # 2: FGSM\n clamp_noise = self.config.epsilon * adv_noise_for_g\n\n # clamping it once again to image boundary\n attr_adv_img_for_g = torch.clamp(attr_img_for_g + clamp_noise, 0.0, 1.0)\n attr_img_for_g = torch.clamp(attr_img_for_g, 0.0, 1.0)\n\n # adv_noise = torch.sub(attr_adv_img_for_g, attr_img_for_g)\n\n # first obtain the gradient information for the current result\n copy_for_grad = attr_adv_img_for_g.detach()\n copy_for_grad.requires_grad = True\n\n # ************************************************************\n # **************** Adversarial-loss **********************\n # Adversarial noise: compute & accumulate classification gradients\n # 0: ce_loss(every) + DS_loss(last), 1: ce_loss(last) + DS_loss(last),\n # 2: ce_loss(every) + DS_loss(every), 3: ce_loss(last) + DS_loss(every)\n if (self.config.g_mini_update_style % 2 == 0) or (g_iter_step_no + 1 == self.config.train_g_iter):\n self.Classifier.zero_grad()\n attr_adv_noise_class_output_for_g = self.Classifier.forward(attr_adv_img_for_g)\n attr_noise_class_output_for_g = self.Classifier.forward(attr_img_for_g)\n dec1_proxy_loss = 0.0\n dec2_proxy_loss = 0.0\n if self.config.use_cross_entropy_for_g:\n attr_adv_ce_loss = self._cross_entropy_loss(attr_adv_noise_class_output_for_g,\n double_label,\n single_batch_size)\n attr_ce_loss = self._cross_entropy_loss(attr_noise_class_output_for_g,\n double_label,\n single_batch_size)\n dec1_proxy_loss -= attr_adv_ce_loss\n dec2_proxy_loss += attr_ce_loss\n\n # ************************************************************\n # **************** MSP-loss ******************************\n # Each generator epoch update.\n if self.config.g_attribute_disentangle and ((self.config.g_attribute_disentangle_style == 1) or\n (g_iter_step_no + 1 == self.config.train_g_iter)):\n # _msp_loss_1, l_rec, l_vae, l_msp, l_msp_1_, l_msp_2_ = msp_loss(attr_adv_img_for_g, double_real_img,\n # double_attr_label, attr_pred, mu, logvar,\n # self.NoiseGenerator.M)\n\n _msp_loss_2, l_rec_2, l_vae_2, l_msp_2, l_msp_1_, l_msp_2_ = msp_loss(attr_img_for_g, double_real_img,\n double_attr_label, attr_pred, mu, logvar,\n self.NoiseGenerator.M)\n\n # _msp_loss_1 = self.config.g_rec_lambda * l_rec + self.config.g_vae_lambda * l_vae \\\n # + self.config.g_msp_lambda * l_msp\n _msp_loss_2 = self.config.g_rec_lambda * l_rec_2 + self.config.g_vae_lambda * l_vae_2 \\\n + self.config.g_msp_lambda * l_msp_2\n\n # msp_loss_1 += self.config.msp_lambda * _msp_loss_1\n msp_loss_2 += self.config.msp_lambda * _msp_loss_2\n msp_loss_1__ += l_msp_1_\n msp_loss_2__ += l_msp_2_\n\n # ************************************************************\n # **************** Diversity-loss ************************\n # Diversity loss: compute & accumulate DSGAN gradient\n if (self.config.g_z_dim > 0) and \\\n ((self.config.g_mini_update_style >= 2) or (g_iter_step_no + 1 == self.config.train_g_iter)):\n dsgan_magnitude = self._dsgan_loss(g_z, clamp_noise, single_batch_size)\n if self.config.dsgan_lambda > 0.0:\n dsgan_loss = -1.0 * self.config.dsgan_lambda * dsgan_magnitude\n else:\n dsgan_loss = 0.0\n dsgan_loss_sum += dsgan_loss\n\n # ************************************************************\n # preparing for the next mini-step\n if g_iter_step_no + 1 != self.config.train_g_iter:\n if self.config.g_use_grad:\n # compute gradient information for the next time step\n self.Classifier.zero_grad()\n grad_output_for_g = self.Classifier.forward(copy_for_grad)\n grad_ce_loss = self._cross_entropy_loss(grad_output_for_g,\n double_label,\n single_batch_size)\n grad_loss = grad_ce_loss\n grad_loss.backward()\n\n # obtain gradients and disable the gradient for the input\n f_grad = copy_for_grad.grad\n # normalized the gradient input.\n # Please change it to other normalization if needed\n if self.config.g_normalize_grad:\n f_grad_norm = f_grad + 1e-15 # DO NOT EDIT! Need a stabilizer in here!!!\n f_grad = f_grad / f_grad_norm.norm(dim=(2, 3), keepdim=True)\n double_adv_grad = f_grad.detach()\n\n if double_adv_img is not None:\n double_adv_img = attr_adv_img_for_g\n\n attr_adv_update_list.append(attr_adv_img_for_g.detach())\n attr_update_list.append(attr_img_for_g.detach())\n\n # if step < 1000:\n # (msp_loss_1 + msp_loss_2).backward()\n # nn.utils.clip_grad_norm_(self.NoiseGenerator.parameters(), 1.0)\n # g_optimizer.step()\n # dec1_loss_sum = torch.tensor(0)\n # dec2_loss_sum = torch.tensor(0)\n # else:\n # Adv_attr example: Update decoder 1 only.\n dec1_loss_sum = self.config.adv_loss_lambda * dec1_proxy_loss + dsgan_loss_sum\n # Attr example: Update all [encoder, decoder 2, M]\n dec2_loss_sum = self.config.dec2_cls_lambda * dec2_proxy_loss + msp_loss_2\n\n for param in self.NoiseGenerator.nonupdate_params:\n param.requires_grad = False\n\n dec1_loss_sum.backward(retain_graph=True)\n\n for param in self.NoiseGenerator.nonupdate_params:\n param.requires_grad = True\n\n dec2_loss_sum.backward(retain_graph=True)\n\n # https://github.com/pytorch/examples/blob/master/word_language_model/main.py\n nn.utils.clip_grad_norm_(self.NoiseGenerator.parameters(), 1.0)\n g_optimizer.step()\n\n\n ######## Phase 3 #########\n # train the Discriminator\n if self.config.f_update_style == 1:\n # merge update\n # + Adversarial training\n # + Attribute generalization\n f_label_list = [torch.cat((label, label, label, label, label), 0)]\n f_update_list = [torch.cat((real_img, attr_update_list[-1], attr_adv_update_list[-1]), 0)]\n\n elif self.config.f_update_style == 2:\n # update false labels first then update the true label\n # + Adversarial training\n # + Attribute generalization\n f_label_list = [double_label, double_label, label]\n f_update_list = [attr_adv_update_list[-1], attr_update_list[-1], real_img]\n\n elif self.config.f_update_style == 3:\n # Update with real image only.\n # - Adversarial training\n # - Attribute generalization\n f_label_list = [label]\n f_update_list = [real_img]\n\n elif self.config.f_update_style == 4:\n # Update with [real_img, attr_perturbation img]\n # - Adversarial training\n # + Attribute generalization\n f_label_list = [torch.cat((label, label), 0)]\n f_update_list = [torch.cat((attr_update_list[-1][:single_batch_size], real_img), 0)]\n\n elif self.config.f_update_style == -1:\n # finetune our generator only.\n # Attack only: f_update_style = -1\n if (step % self.config.save_step) == (self.config.save_step - 1):\n self._save_model(step)\n # self.defence_regular_eval(iter_step=step)\n continue\n\n else:\n raise Exception(\"[!] f_update_style should be [1: single, 2: twice, -1: no update]\")\n\n self.Classifier.train()\n noise_class_output_for_debugging = None\n noise_class_loss_for_debugging = None\n real_pred_sum = 0.0\n fake_pred_sum = 0.0\n for image_for_f, label_for_f in zip(f_update_list, f_label_list):\n # batch-size = 1\n\n self.Classifier.zero_grad()\n\n noise_class_output = self.Classifier(image_for_f)\n if noise_class_output_for_debugging is None:\n noise_class_output_for_debugging = noise_class_output\n\n cls_loss = self._cross_entropy_loss(noise_class_output,\n label_for_f,\n single_batch_size)\n if noise_class_loss_for_debugging is None:\n noise_class_loss_for_debugging = cls_loss\n\n f_loss = cls_loss\n # update the classifier and the generator\n f_loss.backward()\n # https://github.com/pytorch/examples/blob/master/word_language_model/main.py\n nn.utils.clip_grad_norm_(self.Classifier.parameters(), 1.0)\n f_optimizer.step()\n\n ######## Logging ##########\n # All images\n acc_real_img = self._compute_acc(class_output[-single_batch_size:], label).data\n loss_real_img = self._cross_entropy_loss(class_output[-single_batch_size:],\n label, single_batch_size)\n\n if self.config.dsgan_lambda > 0.0:\n self.tensorboard.add_scalar('train/lambda', self.config.dsgan_lambda, step)\n\n self.Classifier.eval()\n eval_image_for_f = torch.cat((attr_update_list[-1], attr_adv_update_list[-1]), 0)\n eval_output_for_f = self.Classifier(eval_image_for_f)\n\n # Attr images\n acc_attr_img = self._compute_acc(\n eval_output_for_f[: single_batch_size * 2], double_label).data\n loss_attr_img_cls = self._cross_entropy_loss(\n eval_output_for_f[: single_batch_size * 2], double_label, single_batch_size)\n # Adv_attr images\n acc_adv_attr_img = self._compute_acc(eval_output_for_f[single_batch_size * 2:], double_label).data\n loss_adv_attr_img = self._cross_entropy_loss(eval_output_for_f[single_batch_size * 2:],\n double_label, single_batch_size)\n\n self.tensorboard.add_scalar('train/loss_real_img_cls', loss_real_img.data, step)\n self.tensorboard.add_scalar('train/acc_real_img', acc_real_img, step)\n self.tensorboard.add_scalar('train/loss_adv_attr_img_cls', loss_adv_attr_img.data, step)\n self.tensorboard.add_scalar('train/acc_adv_attr_img', acc_adv_attr_img, step)\n self.tensorboard.add_scalar('train/attr_img_cls_loss', loss_attr_img_cls.data, step)\n self.tensorboard.add_scalar('train/attr_img_acc', acc_attr_img, step)\n\n if step % self.config.log_step == 0:\n print(\"\")\n print(\"[{}/{}] Acc_F_real_img: {:.4f} Loss_F_real_img: {:.4f} Acc_F_adat_img: {:.4f} \"\n \"Loss_F_adat_img: {:.4f}\".\n format(step, self.config.max_step, acc_real_img, loss_real_img.data,\n acc_adv_attr_img, loss_adv_attr_img.data))\n\n if self.config.train_g_iter > 0:\n self.tensorboard.add_scalar('train/l_rec', l_rec_2 * self.config.g_rec_lambda, step)\n self.tensorboard.add_scalar('train/l_vae', l_vae_2 * self.config.g_vae_lambda, step)\n self.tensorboard.add_scalar('train/l_msp', l_msp_2 * self.config.g_msp_lambda, step)\n if (self.config.g_z_dim > 0) and (dsgan_magnitude is not None):\n if self.config.dsgan_lambda > 0.0:\n self.tensorboard.add_scalar('train/dsgan_loss_sum', dsgan_loss_sum.data, step)\n self.tensorboard.add_scalar('train/dsgan_loss_last', dsgan_magnitude.data, step)\n if step % self.config.log_step == 0:\n print(\"[{}/{}] our_loss: {:.4f} dec1 adv_loss: {:.4f} dec2 adv_loss: {:.4f}\".\n format(step, self.config.max_step, dsgan_magnitude.data, dec1_proxy_loss,\n dec2_proxy_loss))\n self.tensorboard.add_scalar('train/dec1_loss_sum', dec1_loss_sum.data, step)\n self.tensorboard.add_scalar('train/dec2_loss_sum', dec2_loss_sum.data, step)\n # self.tensorboard.add_scalar('train/dec1_msp_loss', msp_loss_1.data, step)\n self.tensorboard.add_scalar('train/dec2_msp_loss', msp_loss_2.data, step)\n self.tensorboard.add_scalar('train/l_msp_1_', msp_loss_1__.data, step)\n self.tensorboard.add_scalar('train/l_msp_2_', msp_loss_2__.data, step)\n\n self.tensorboard.add_scalar('train/dec1_adv_loss', dec1_proxy_loss.data, step)\n self.tensorboard.add_scalar('train/dec2_adv_loss', dec2_proxy_loss.data, step)\n\n # save checkpoints and noise image\n if step % self.config.save_step == 0: # self.config.save_step - 1:\n # attr_list = img_grad_advimg.shape[0] * [[0, 1.0]]\n # attr_adv_img_for_g_max, attr_img_for_g_max = self.NoiseGenerator.predict(\n # img_grad_advimg, double_label, g_z, attr_list)\n # attr_adv_img_for_g_max = torch.clamp(attr_adv_img_for_g_max, 0.0, 1.0)\n # attr_img_for_g_max = torch.clamp(attr_img_for_g_max, 0.0, 1.0)\n #\n # attr_list = img_grad_advimg.shape[0] * [[0, 0.0]]\n # attr_adv_img_for_g_min, attr_img_for_g_min = self.NoiseGenerator.predict(\n # img_grad_advimg, double_label, g_z, attr_list)\n # attr_adv_img_for_g_min = torch.clamp(attr_adv_img_for_g_min, 0.0, 1.0)\n # attr_img_for_g_min = torch.clamp(attr_img_for_g_min, 0.0, 1.0)\n #\n # attr_adv_img_for_g__, attr_img_for_g__, _, _, _ = self.NoiseGenerator.forward(\n # img_grad_advimg, double_label, g_z)\n # attr_adv_img_for_g__ = torch.clamp(attr_adv_img_for_g__, 0.0, 1.0)\n # attr_img_for_g__ = torch.clamp(attr_img_for_g__, 0.0, 1.0)\n\n attr_list = img_grad_advimg.shape[0] * [[0, 1.0]]\n adv_noise_for_g_max, attr_img_for_g_max = self.NoiseGenerator.predict(\n img_grad_advimg, double_label, g_z, attr_list)\n adv_noise_for_g_max = self.config.epsilon * adv_noise_for_g_max\n attr_adv_img_for_g_max = torch.clamp(attr_img_for_g_max + adv_noise_for_g_max, 0.0, 1.0)\n attr_img_for_g_max = torch.clamp(attr_img_for_g_max, 0.0, 1.0)\n\n attr_list = img_grad_advimg.shape[0] * [[0, 0.0]]\n adv_noise_for_g_min, attr_img_for_g_min = self.NoiseGenerator.predict(\n img_grad_advimg, double_label, g_z, attr_list)\n adv_noise_for_g_min = self.config.epsilon * adv_noise_for_g_min\n attr_adv_img_for_g_min = torch.clamp(attr_img_for_g_min + adv_noise_for_g_min, 0.0, 1.0)\n attr_img_for_g_min = torch.clamp(attr_img_for_g_min, 0.0, 1.0)\n\n adv_noise_for_g__, attr_img_for_g__, _, _, _ = self.NoiseGenerator.forward(\n img_grad_advimg, double_label, g_z)\n adv_noise_for_g__ = self.config.epsilon * adv_noise_for_g__\n attr_adv_img_for_g__ = torch.clamp(attr_img_for_g__ + adv_noise_for_g__, 0.0, 1.0)\n attr_img_for_g__ = torch.clamp(attr_img_for_g__, 0.0, 1.0)\n\n self.tensorboard.add_image('train/%s_attr_rot' % self.config.comment, tvutils.make_grid(\n torch.cat((attr_adv_img_for_g_min[:15], attr_adv_img_for_g__[:15], attr_adv_img_for_g_max[:15],\n attr_img_for_g_min[:15], attr_img_for_g__[:15], attr_img_for_g_max[:15]\n ), dim=0), nrow=15), step)\n\n # self.tensorboard.add_image('train/attr_maxrot', tvutils.make_grid(\n # torch.cat((attr_adv_img_for_g_max[:15], attr_img_for_g_max[:15]), dim=0), nrow=15), step)\n #\n # self.tensorboard.add_image('train/attr_minrot', tvutils.make_grid(\n # torch.cat((attr_adv_img_for_g_min[:15], attr_img_for_g_min[:15]), dim=0), nrow=15), step)\n #\n # self.tensorboard.add_image('train/attr_noattr_pert', tvutils.make_grid(\n # torch.cat((attr_adv_img_for_g__[:15], attr_img_for_g__[:15]), dim=0), nrow=15), step)\n\n if self.config.g_use_grad:\n # adv images\n slice1 = attr_adv_update_list[-1][:single_batch_size]\n slice2 = attr_update_list[-1][:single_batch_size]\n\n # noise = clamp_noise.detach()[:single_batch_size]\n # noise_abs = torch.abs(noise)\n # noise_min = torch.min(noise)\n # noise_max = torch.max(noise)\n # noise = noise_abs - noise_min\n # noise /= noise_max\n\n noise = torch.abs(slice1 - slice2)\n\n grad_abs = torch.abs(double_adv_grad)\n grad_min = torch.min(grad_abs)\n grad_rescale = grad_abs - grad_min\n grad_max = torch.max(grad_rescale)\n grad_rescale /= grad_max\n grad_slice1 = grad_rescale[:single_batch_size]\n grad_slice2 = grad_rescale[single_batch_size:]\n self.tensorboard.add_image('train/%s_Real_Attadv_Att_Noise_GradAttAdv_GradAttr' % self.config.comment,\n tvutils.make_grid(torch.cat((real_img[:15], slice1[:15],\n slice2[:15], noise[:15],\n grad_slice1[:15],\n grad_slice2[:15]), 0), nrow=15),\n step)\n\n self._save_model(step)\n # self.defence_regular_eval(iter_step=step)\n\n def _test_classifier(self, image_tensor, label_tensor, iter_step=0, method_name='PGD'):\n total_acc_f = []\n num_items = len(label_tensor)\n self.Classifier.eval()\n for index in range(0, num_items, self.config.single_batch_size):\n # first slice into batch\n adv_img = image_tensor[index:min(index + self.config.single_batch_size, num_items)]\n label = label_tensor[index:min(index + self.config.single_batch_size, num_items)]\n\n # run classifier\n logits = self.Classifier.forward(adv_img)\n\n # get accuracy\n acc_f = self._compute_acc(logits, label)\n total_acc_f.append(acc_f.data)\n\n # aggregate the performance\n performance = sum(total_acc_f) / len(total_acc_f)\n print(\"[{} / {}] Acc: {:.4f}\".format(method_name, iter_step, performance))\n\n if self.tensorboard is not None:\n self.tensorboard.add_scalar('test/{}_acc'.format(method_name), performance, iter_step)\n\n def get_sample_pdf_of_checkpoint(self, default_z_iter=10):\n loader = iter(self.test_data_loader)\n\n test_dir = os.path.join(self.config.model_dir, 'test')\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n self.Classifier.eval()\n self.NoiseGenerator.eval()\n total_acc_f = []\n total_acc_g = []\n real_img_arr = []\n real_label_arr = []\n adv_img_arr = []\n adv_att_arr = []\n\n for step in trange(len(self.test_data_loader), ncols=80):\n try:\n data = loader.next()\n except StopIteration:\n print(\"[!] Test sample generation finished. Samples are in {}\".format(test_dir))\n break\n\n real_img = self._get_variable(data[0].type(torch.FloatTensor))\n if (not self.config.is_rgb) and (len(real_img.shape) == 3):\n real_img = torch.unsqueeze(real_img, 1)\n\n label = self._get_variable(data[1].type(torch.LongTensor))\n single_batch_size = label.size(0)\n\n ######## Phase 1 #######\n # Grab gradient from f before training the G\n self.Classifier.zero_grad()\n grad_input = real_img.detach()\n grad_input.requires_grad = True\n class_output = self.Classifier.forward(grad_input)\n\n # compute loss\n f_loss = self._cross_entropy_loss(class_output, label, single_batch_size)\n f_loss.backward()\n\n if self.config.g_use_grad:\n f_grad = grad_input.grad\n if self.config.g_normalize_grad:\n f_grad_norm = f_grad + 1e-15 # DO NOT EDIT! Need a stabilizer in here!!!\n f_grad = f_grad / f_grad_norm.norm(dim=(2, 3), keepdim=True)\n\n # Phase 2 #\n num_iter_z = default_z_iter if self.config.g_z_dim > 0 else 1\n adv_img_inner_arr = []\n adv_att_inner_arr = []\n for _ in range(num_iter_z):\n adv_grad = f_grad.detach()\n\n if self.config.g_method % 2 == 1:\n adv_sum = torch.zeros_like(real_img)\n else:\n adv_sum = None\n\n if self.config.g_z_dim > 0:\n if self.config.num_gpu > 0:\n g_z = torch.cuda.FloatTensor(single_batch_size, self.config.g_z_dim).normal_()\n else:\n g_z = torch.FloatTensor(single_batch_size, self.config.g_z_dim).normal_()\n else:\n g_z = None\n\n self.NoiseGenerator.zero_grad()\n for g_iter_step_no in range(self.config.train_g_iter):\n img_grad_advimg = real_img\n if self.config.g_use_grad:\n img_grad_advimg = torch.cat((img_grad_advimg, adv_grad), 1)\n\n if self.config.g_method == 3:\n img_grad_advimg = torch.cat((img_grad_advimg, adv_sum), 1)\n\n # feed it to the generator\n noise_output = self.NoiseGenerator.forward(img_grad_advimg, label, g_z)\n\n # generate learned noise\n if self.config.g_method % 2 == 1:\n clamp_noise = self._merge_noise(adv_sum, noise_output,\n self.config.epsilon * self.config.g_ministep_size,\n self.config.epsilon)\n else:\n clamp_noise = self.config.epsilon * noise_output\n adv_img_for_g = torch.clamp(real_img.detach() + clamp_noise,\n 0.0, 1.0)\n copy_for_grad = adv_img_for_g.detach()\n copy_for_grad.requires_grad = True\n\n # preparing for the next mini-step\n # Note: we are not updating the Generator.\n if g_iter_step_no + 1 != self.config.train_g_iter:\n if self.config.g_use_grad:\n self.Classifier.zero_grad()\n grad_output_for_g = self.Classifier.forward(copy_for_grad)\n grad_ce_loss = self._cross_entropy_loss(grad_output_for_g,\n label,\n single_batch_size)\n grad_loss = grad_ce_loss\n grad_loss.backward()\n\n # obtain gradients and disable the gradient for the input\n f_inner_grad = copy_for_grad.grad\n if self.config.g_normalize_grad:\n f_inner_grad_norm = f_inner_grad + 1e-15 # DO NOT EDIT! Need a stabilizer in here!!!\n f_inner_grad = f_inner_grad / f_inner_grad_norm.norm(dim=(2, 3), keepdim=True)\n adv_grad = f_inner_grad.detach()\n\n if adv_sum is not None:\n adv_sum = clamp_noise\n\n # generate learned noise\n target_image = adv_img_for_g.detach()\n target_attack = clamp_noise.detach()\n\n adv_img_inner_arr.append(target_image.detach().data)\n adv_att_inner_arr.append(target_attack.detach().data)\n\n self.Classifier.zero_grad()\n class_output = self.Classifier.forward(real_img)\n noise_class_output = self.Classifier.forward(target_image)\n acc_f = self._compute_acc(class_output, label)\n acc_g = self._compute_acc(noise_class_output, label)\n total_acc_f.append(acc_f.data)\n total_acc_g.append(acc_g.data)\n\n real_img_arr.append(real_img.unsqueeze(1).detach().data)\n real_label_arr.append(label.data)\n adv_img_arr.append(torch.transpose(torch.stack(adv_img_inner_arr), 0, 1))\n adv_att_arr.append(torch.transpose(torch.stack(adv_att_inner_arr), 0, 1))\n\n print(\"[{}] Acc_F: {:.4f}, Acc_FG: {}\".format(test_dir,\n sum(total_acc_f) / len(total_acc_f),\n sum(total_acc_g) / len(total_acc_g)))\n\n print(\"Converting the results into numpy format.\")\n real_img_arr = torch.cat(real_img_arr, 0)\n orig_data_cpu = real_img_arr.mul(255).clamp(0, 255).byte().permute(0, 1, 3, 4, 2).cpu().numpy()\n\n real_label_arr = torch.cat(real_label_arr, 0)\n orig_label_cpu = real_label_arr.to(dtype=torch.int16).cpu().numpy()\n\n adv_img_arr = torch.cat(adv_img_arr, 0)\n adv_img_cpu = adv_img_arr.mul(255).clamp(0, 255).byte().permute(0, 1, 3, 4, 2).cpu().numpy()\n\n adv_att_arr = torch.clamp((1.0 + torch.cat(adv_att_arr, 0) / self.config.epsilon) / 2.0, 0.0, 1.0)\n adv_att_cpu = adv_att_arr.mul(255).clamp(0, 255).byte().permute(0, 1, 3, 4, 2).cpu().numpy()\n\n print(\"start generating a pdf file\")\n item_dict_for_pdf = {}\n for real, label, img, att in zip(orig_data_cpu, orig_label_cpu, adv_img_cpu, adv_att_cpu):\n current_std = np.reshape(att[4:], [6, -1])\n current_std = np.expand_dims(current_std, 1) - np.expand_dims(current_std, 0)\n current_std = np.mean(np.sum(current_std * current_std, axis=-1) * (1 - np.eye(6)))\n\n temp_arr = np.concatenate([img[4:], att[4:]], axis=0)\n temp_arr = np.transpose(temp_arr, (1, 0, 2, 3))\n shape = temp_arr.shape\n\n if (label not in item_dict_for_pdf) or (item_dict_for_pdf[label][0] < current_std):\n if shape[3] == 1:\n item_dict_for_pdf[label] = [current_std, np.reshape(temp_arr, (shape[0], shape[1] * shape[2]))]\n else:\n item_dict_for_pdf[label] = [current_std,\n np.reshape(temp_arr, (shape[0], shape[1] * shape[2], shape[3]))]\n\n sorted_list = [item_dict_for_pdf[_][1] for _ in range(self.config.num_classes)]\n output = np.concatenate(sorted_list, axis=0)\n\n print(\"start saving it in {} as vis_{}.pdf\".format(self.config.log_dir, self.config.model_name))\n import scipy.misc\n scipy.misc.imsave(os.path.join(self.config.log_dir, 'vis_{}.pdf'.format(self.config.model_name)), output)\n\n def _run_single_attack(self, iter_step=0, method_name='PGD'):\n # set test dir to save\n test_dir = os.path.join(self.config.model_dir, 'test')\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n # set a new data_loader\n loader = iter(self.test_data_loader)\n steps_required_per_epoch = len(loader)\n if method_name.endswith('_slow'):\n steps_required_per_epoch = 5\n\n print(steps_required_per_epoch)\n print('[Info] Start running {} for step {}'.format(method_name, iter_step))\n\n # run attack mechanism\n output_list = []\n target_list = []\n self.Classifier.eval()\n for step in range(steps_required_per_epoch):\n try:\n data = loader.next()\n except StopIteration:\n loader = iter(self.test_data_loader)\n data = loader.next()\n\n # convert unit to float\n input_img = self._get_variable(data[0].type(torch.FloatTensor))\n if (not self.config.is_rgb) and (len(input_img.shape) == 3):\n input_img = torch.unsqueeze(input_img, 1)\n target_label = self._get_variable(data[1].type(torch.LongTensor))\n single_batch_size = target_label.size(0)\n\n if method_name == 'FGSM':\n adv_result = run_fgsm(self.FGSM, self.Classifier, input_img, target_label,\n self.config.epsilon)\n elif method_name == 'PGD':\n adv_result = run_pgd(self.PGD, self.Classifier, input_img, target_label,\n self.config.epsilon, self.config.test_iter_steps)\n elif method_name == 'CW':\n adv_result = run_cw(self.CW, self.Classifier, input_img, target_label)\n elif method_name == 'ORIGINAL':\n adv_result = input_img\n\n output_list.append(adv_result)\n target_list.append(target_label)\n\n output_tensor = torch.cat(output_list, dim=0)\n label_tensor = torch.cat(target_list, dim=0)\n\n if self.config.test_save_adv:\n np.save('{}/attack_{}_step{}_img.npy'.format(test_dir, method_name, iter_step),\n output_tensor.permute(0, 2, 3, 1).cpu().numpy())\n np.save('{}/attack_{}_step{}_label.npy'.format(test_dir, method_name, iter_step),\n label_tensor.permute(0, 2, 3, 1).cpu().numpy())\n\n self._test_classifier(output_tensor, label_tensor, iter_step, method_name)\n\n def defence_regular_eval(self, iter_step=0):\n # set classifier to be in evaluation mode\n self.Classifier.eval()\n\n self._run_single_attack(iter_step, 'FGSM')\n self._run_single_attack(iter_step, 'PGD')\n # self._run_single_attack(iter_step, 'CW')\n self._run_single_attack(iter_step, 'ORIGINAL')\n\n # return back to train mode\n self.Classifier.train()\n return\n\n def defence_over_cnw(self, iter_step=0):\n # assume model is loaded properly\n self.Classifier.eval()\n\n self._run_single_attack(iter_step, 'CW')\n\n self.Classifier.train()\n return\n\n def _get_variable(self, inputs):\n if self.config.num_gpu > 0:\n out = Variable(inputs.cuda())\n else:\n out = Variable(inputs)\n return out\n","sub_path":"adv_defence/trainer_adv_example_2.py","file_name":"trainer_adv_example_2.py","file_ext":"py","file_size_in_byte":52210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271611100","text":"import os\nimport uuid\nimport docker.errors\nimport traceback\nimport json\nimport re\n\nLANGUAGES = {\n 'ShellScript': '00',\n 'Python': '01'\n}\n\nSERVICES = {\n 'System': '00',\n 'Docker': '01'\n}\n\nEXCEPTION_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'exceptions.json')\n\nEXCEPTIONS_UUID_LENGTH = 7\n\nif os.path.exists(EXCEPTION_PATH):\n try:\n EXCEPTIONS_MAP = json.load(open(EXCEPTION_PATH))\n except:\n # The json may be corrupted\n EXCEPTIONS_MAP = dict()\nelse:\n EXCEPTIONS_MAP = dict()\n\n\ndef get_exception_codes_from_docker_trace():\n container_code = EXCEPTIONS_MAP[docker.errors.ContainerError.__name__]\n\n # Get last line of the docker traceback which contains the traceback inside the container\n docker_traceback = traceback.format_exc().splitlines()[-1].encode('utf_8').decode('unicode_escape')\n docker_traceback = re.split(':| |\\n', docker_traceback)\n\n exception_codes = [code for exception, code in EXCEPTIONS_MAP.items()\n if exception in docker_traceback and code != container_code]\n\n return exception_codes\n\n\ndef get_exception_code(exception_type):\n\n service_code = SERVICES['System']\n exception_code = EXCEPTIONS_MAP.get(exception_type.__name__, '0000') # '0000' is default exception code\n\n # Exception inside a docker container\n if docker.errors.ContainerError.__name__ in EXCEPTIONS_MAP and \\\n exception_code == EXCEPTIONS_MAP[docker.errors.ContainerError.__name__]:\n\n exception_codes = get_exception_codes_from_docker_trace()\n\n if len(exception_codes) > 0:\n # Take the first code in the list (may have more if multiple exceptions are raised)\n service_code = SERVICES['Docker']\n exception_code = exception_codes.pop()\n\n return exception_code, service_code\n\n\ndef compute_error_code(exception):\n exception_uuid = str(uuid.uuid4())[:EXCEPTIONS_UUID_LENGTH]\n exception_code, service_code = get_exception_code(exception.__class__)\n error_code = '[%s-%s-%s-%s]' % (service_code, LANGUAGES['Python'], exception_code, exception_uuid)\n return error_code\n","sub_path":"substrabac/substrapp/exception_handler.py","file_name":"exception_handler.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"301704929","text":"import os\nfrom pyspark.sql import SparkSession\nimport pandas as pd\nfrom sparktasks.utils.DBUtils import DButils\nfrom sparktasks.utils.utils import UdfUtils\nfrom sparktasks.utils.config import Config\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import StringType\nimport logging\nimport time\n\n\nclass Extract:\n logger = logging.getLogger('sparktasks.housing.Extract')\n\n def __init__(self):\n self.DButils = DButils()\n self.spark = SparkSession.builder.appName('HousingExtract').getOrCreate()\n self.config = Config()\n self.spark.conf.set(\"spark.sql.shuffle.partitions\", 20)\n self.metadata_df = self.DButils.load_from_db(self.spark, self.config.metadata)\n self.metadata_df.createGlobalTempView(self.config.metadata)\n\n # Extracts csv files from zillow.com\n def extract_from_source(self): # ,**kwargs):\n # ti = kwargs['ti']\n # metadata_dictionary = ti.xcom_pull(task_ids='extract_metadata',key=\"dag_last_run_details\")\n housing_price = self.config.housing_price\n print(housing_price)\n self.extract(self.config.housing_price)\n housing_inventory = self.config.housing_inventory\n self.extract(self.config.housing_inventory)\n\n # saves in local folder.Gets previous run from metadata and discards\n # old data.\n def extract(self, source_map):\n try:\n housing_dict = dict(source_map)\n for key, value in housing_dict.items():\n logging.info(\"Data Extract in progress from %s\", value)\n housing_data = pd.read_csv(value)\n data_dir = self.config.data_dir\n full_path = os.path.join(data_dir, key + \".csv\")\n housing_data.to_csv(full_path, index=False)\n logging.info(\"Data Extracted to %s\", full_path)\n except Exception as ex:\n logging.error(\"Error extracting data %s\", ex)\n raise ex\n\n # Stores CSV in landing database as raw tables\n def store_raw_in_db(self):\n self.write_raw(self.config.housing_price)\n split_udf = udf(lambda d: UdfUtils.split_str(d), StringType())\n self.write_raw(self.config.housing_inventory, split_udf)\n\n def write_raw(self, type_config, split_udf=None):\n housing_dict = dict(type_config)\n data_dir = self.config.data_dir\n for name, value in housing_dict.items():\n try:\n start_time = time.time()\n housing_path = os.path.join(data_dir, name + \".csv\")\n self.logger.info(\"Started to create Raw table from %s\", name)\n file_name = os.path.basename(housing_path)\n self.spark.sparkContext.addFile(housing_path)\n housing_us_df = self.spark.read.csv('file://' + housing_path, header=True, inferSchema=True)\n housing_us_df.filter(housing_us_df.StateName.isNull())\n if split_udf is not None:\n housing_us_df = housing_us_df.withColumn(\"RegionName\", split_udf(housing_us_df.RegionName))\n print(housing_us_df.columns)\n housing_us_df = housing_us_df.fillna(0)\n if housing_us_df.count() == 0:\n return\n\n table_name = self.config.get_config('RAW', name)\n self.DButils.save_to_db(housing_us_df, table_name, mode='overwrite')\n self.logger.info(\"Created Raw table name %s\", table_name)\n end_time = time.time()\n self.logger.info(\"It took this long to run write_raw: {}\".format(end_time - start_time))\n except Exception as ex:\n self.logger.error(\"Error in store_raw_in_db %s\", ex)\n raise ex\n\n\nif __name__ == \"__main__\":\n extract = Extract()\n extract.extract_from_source()\n extract.store_raw_in_db()\n","sub_path":"spark/app/sparktasks/housing/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"196306848","text":"from collections import defaultdict\nfrom typing import List\n\nfrom django.core.cache import cache\nfrom graphene.types.scalars import Int, String\n\nfrom chat.tasks import update_dialog\n\n\"\"\"\"\nuser -> dict\nuser = {\n total_msgs : int\n dialogs: {\n dialog_id : int\n }\n}\n\"\"\"\n\n\nclass MissedMsgs:\n @classmethod\n def new_msg(cls, users_ids: List, dialog_id: Int, msg: String):\n users_ids.remove(msg.sender_id)\n cls.__cache_dialog_last_msg(dialog_id=dialog_id, msg=msg)\n cls.__incr_users_msgs(users_ids, dialog_id)\n update_dialog.apply_async(args=(dialog_id,))\n\n def __incr_users_msgs(users_ids, dialog_id):\n qs = cache.get_many(users_ids)\n for user_id in users_ids:\n if qs.get(user_id) == None:\n qs[user_id] = dict()\n qs[user_id][\"total_msgs\"] = 1\n qs[user_id][\"dialogs\"] = defaultdict(int)\n else:\n qs[user_id][\"total_msgs\"] += 1\n\n qs[user_id][\"dialogs\"][dialog_id] += 1\n cache.set_many(qs)\n\n def __cache_dialog_last_msg(dialog_id, msg) -> None:\n cache.set(f\"{dialog_id}_lstmsg\", msg)\n","sub_path":"chat/missedmsgs/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"352780256","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport json\nimport tensorflow as tf\n\nfrom tf_slim import tfexample_decoder\nfrom protos import reader_pb2\n\nPAD = '[PAD]'\nNUM_CHOICES = 4\n\n\nclass TFExampleFields(object):\n \"\"\"Fields in the tf.train.Example.\"\"\"\n img_id = 'img_id'\n annot_id = 'annot_id'\n answer_label = 'answer_label'\n\n img_format = 'image/format'\n img_encoded = 'image/encoded'\n img_bbox_label = \"image/object/bbox/label\"\n img_bbox_score = \"image/object/bbox/score\"\n img_bbox_feature = \"image/object/bbox/feature\"\n img_bbox_scope = \"image/object/bbox/\"\n img_bbox_field_keys = ['ymin', 'xmin', 'ymax', 'xmax']\n\n cls_bert = 'cls_bert'\n\n question = 'question'\n question_tag = 'question_tag'\n question_bert = 'question_bert'\n\n answer_choice = 'answer_choice'\n answer_choice_tag = 'answer_choice_tag'\n answer_choice_bert = 'answer_choice_bert'\n\n\nclass InputFields(object):\n \"\"\"Names of the input tensors.\"\"\"\n # Meta information.\n img_id = 'img_id'\n annot_id = 'annot_id'\n answer_label = 'answer_label'\n\n # Image data.\n img_data = 'img_data'\n img_height = 'img_height'\n img_width = 'img_width'\n\n # Objects.\n num_objects = 'num_objects'\n object_bboxes = 'object_bboxes'\n object_labels = 'object_labels'\n object_scores = 'object_scores'\n object_features = 'object_features'\n\n # Question and answer choices.\n cls_bert = 'cls_bert'\n question = 'question'\n question_tag = 'question_tag'\n question_len = 'question_len'\n question_bert = 'question_bert'\n answer_choices = 'answer_choices'\n answer_choices_tag = 'answer_choices_tag'\n answer_choices_len = 'answer_choices_len'\n answer_choices_bert = 'answer_choices_bert'\n answer_choices_with_question = 'answer_choices_with_question'\n answer_choices_with_question_tag = 'answer_choices_with_question_tag'\n answer_choices_with_question_len = 'answer_choices_with_question_len'\n\n # Bert features of both question and answer choices.\n question_bert = 'question_bert'\n\n\ndef _pad_sequences(sequences, pad=PAD):\n \"\"\"Pads sequences to the max-length.\n\n Args:\n sequences: A list of 1-D tensor of size num_sequences, each elem in\n the 1-D tensor denotes a sequence.\n\n Returns:\n padded_sequences: A [num_sequences, max_sequence_len] tensor.\n lengths: A [num_sequences] int tensor.\n \"\"\"\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths\n\n\ndef _pad_feature_sequences(sequences, pad=PAD, feature_dims=768):\n \"\"\"Pads sequences to the max-length.\n\n Args:\n sequences: A list of 1-D tensor of size num_sequences, each elem in\n the 1-D tensor denotes a sequence.\n\n Returns:\n padded_sequences: A [num_sequences, max_sequence_len] tensor.\n lengths: A [num_sequences] int tensor.\n \"\"\"\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]], [0, 0]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths\n\n\ndef _update_decoded_example(decoded_example, options):\n \"\"\"Updates the decoded example, add size to the varlen feature.\n\n Args:\n decoded_example: A tensor dictionary keyed by name.\n options: An instance of reader_pb2.Reader.\n\n Returns:\n decoded_example: The same instance with content modified.\n \"\"\"\n # Number of objects.\n object_bboxes = decoded_example[InputFields.object_bboxes]\n num_objects = tf.shape(object_bboxes)[0]\n\n # Object Fast-RCNN features.\n object_features = decoded_example.pop(TFExampleFields.img_bbox_feature)\n object_features = tf.reshape(object_features,\n [-1, options.frcnn_feature_dims])\n\n # Question length.\n question = decoded_example[InputFields.question]\n question_len = tf.shape(question)[0]\n question_tag = decoded_example[InputFields.question_tag]\n\n # Answer choices and lengths.\n answer_choices_list = [\n decoded_example.pop(TFExampleFields.answer_choice + '_%i' % i)\n for i in range(1, 1 + NUM_CHOICES)\n ]\n answer_choices_with_question_list = [\n tf.concat([['[CLS]'], question, ['[SEP]'], x, ['[SEP]']], 0)\n for x in answer_choices_list\n ]\n (answer_choices, answer_choices_len) = _pad_sequences(answer_choices_list)\n (answer_choices_with_question, answer_choices_with_question_len\n ) = _pad_sequences(answer_choices_with_question_list)\n\n # Answer tags.\n answer_choices_tag_list = [\n decoded_example.pop(TFExampleFields.answer_choice_tag + '_%i' % i)\n for i in range(1, 1 + NUM_CHOICES)\n ]\n answer_choices_with_question_tag_list = [\n tf.concat([[-1], question_tag, [-1], x, [-1]], 0)\n for x in answer_choices_tag_list\n ]\n answer_choices_tag, _ = _pad_sequences(answer_choices_tag_list, -1)\n answer_choices_with_question_tag, _ = _pad_sequences(\n answer_choices_with_question_tag_list, -1)\n\n # Question bert.\n question_bert_list = [\n tf.reshape(decoded_example.pop(TFExampleFields.question_bert + '_%i' % i),\n [-1, options.bert_feature_dims])\n for i in range(1, 1 + NUM_CHOICES)\n ]\n answer_choice_bert_list = [\n tf.reshape(\n decoded_example.pop(TFExampleFields.answer_choice_bert + '_%i' % i),\n [-1, options.bert_feature_dims]) for i in range(1, 1 + NUM_CHOICES)\n ]\n question_bert, _ = _pad_feature_sequences(question_bert_list, 0,\n options.bert_feature_dims)\n answer_choices_bert, _ = _pad_feature_sequences(answer_choice_bert_list,\n options.bert_feature_dims)\n\n # CLS bert.\n cls_bert_list = [\n decoded_example.pop(TFExampleFields.cls_bert + '_%i' % i)\n for i in range(1, 1 + NUM_CHOICES)\n ]\n cls_bert = tf.stack(cls_bert_list, axis=0)\n\n decoded_example.update({\n InputFields.num_objects:\n num_objects,\n InputFields.object_features:\n object_features,\n InputFields.question_tag:\n question_tag,\n InputFields.question_bert:\n question_bert,\n InputFields.question_len:\n question_len,\n InputFields.answer_choices:\n answer_choices,\n InputFields.answer_choices_tag:\n answer_choices_tag,\n InputFields.answer_choices_bert:\n answer_choices_bert,\n InputFields.answer_choices_len:\n answer_choices_len,\n InputFields.answer_choices_with_question:\n answer_choices_with_question,\n InputFields.answer_choices_with_question_tag:\n answer_choices_with_question_tag,\n InputFields.answer_choices_with_question_len:\n answer_choices_with_question_len,\n InputFields.cls_bert: cls_bert,\n })\n\n # Image shape.\n if InputFields.img_data in decoded_example:\n image = decoded_example[InputFields.img_data]\n # image = _resize_image(image)\n\n image_shape = tf.shape(image)\n height, width = image_shape[0], image_shape[1]\n decoded_example.update({\n InputFields.img_data: image,\n InputFields.img_height: height,\n InputFields.img_width: width,\n })\n\n return decoded_example\n\n\ndef _parse_single_example(example, options):\n \"\"\"Parses a single tf.Example proto.\n\n Args:\n example: An Example proto.\n options: An instance of reader_pb2.Reader.\n\n Returns:\n A dictionary indexed by tensor name.\n \"\"\"\n # Initialize `keys_to_features`.\n keys_to_features = {\n TFExampleFields.img_id: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.annot_id: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.answer_label: tf.io.FixedLenFeature([], tf.int64),\n TFExampleFields.img_bbox_label: tf.io.VarLenFeature(tf.string),\n TFExampleFields.img_bbox_score: tf.io.VarLenFeature(tf.float32),\n TFExampleFields.img_bbox_feature: tf.io.VarLenFeature(tf.float32),\n TFExampleFields.question: tf.io.VarLenFeature(tf.string),\n TFExampleFields.question_tag: tf.io.VarLenFeature(tf.int64),\n }\n for bbox_key in TFExampleFields.img_bbox_field_keys:\n bbox_field = os.path.join(TFExampleFields.img_bbox_scope, bbox_key)\n keys_to_features[bbox_field] = tf.io.VarLenFeature(tf.float32)\n for i in range(1, 1 + NUM_CHOICES):\n keys_to_features.update({\n TFExampleFields.cls_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32),\n TFExampleFields.question_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32),\n TFExampleFields.answer_choice + '_%i' % i:\n tf.io.VarLenFeature(tf.string),\n TFExampleFields.answer_choice_tag + '_%i' % i:\n tf.io.VarLenFeature(tf.int64),\n TFExampleFields.answer_choice_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32)\n })\n\n # Initialize `items_to_handlers`.\n items_to_handlers = {\n InputFields.img_id:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_id,\n default_value=''),\n InputFields.annot_id:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.annot_id,\n default_value=''),\n InputFields.answer_label:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.answer_label,\n default_value=-1),\n InputFields.object_bboxes:\n tfexample_decoder.BoundingBox(\n keys=TFExampleFields.img_bbox_field_keys,\n prefix=TFExampleFields.img_bbox_scope),\n InputFields.object_labels:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_label,\n default_value=''),\n InputFields.object_scores:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_score,\n default_value=0),\n InputFields.question:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.question,\n default_value=PAD),\n InputFields.question_tag:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.question_tag,\n default_value=-1),\n TFExampleFields.img_bbox_feature:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_feature,\n default_value=0),\n }\n\n for i in range(1, 1 + NUM_CHOICES):\n tensor_key = TFExampleFields.cls_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n tensor_key = TFExampleFields.question_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n tensor_key = TFExampleFields.answer_choice + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=PAD)\n tensor_key = TFExampleFields.answer_choice_tag + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=-1)\n tensor_key = TFExampleFields.answer_choice_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n if options.decode_jpeg:\n keys_to_features.update({\n TFExampleFields.img_encoded: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.img_format: tf.io.FixedLenFeature([], tf.string),\n })\n items_to_handlers.update({\n InputFields.img_data:\n tfexample_decoder.Image(image_key=TFExampleFields.img_encoded,\n format_key=TFExampleFields.img_format,\n shape=None)\n })\n\n # Decode example.\n example_decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,\n items_to_handlers)\n\n output_keys = example_decoder.list_items()\n output_tensors = example_decoder.decode(example)\n output_tensors = [\n x if x.dtype != tf.int64 else tf.cast(x, tf.int32) for x in output_tensors\n ]\n decoded_example = dict(zip(output_keys, output_tensors))\n return _update_decoded_example(decoded_example, options)\n\n\ndef _create_dataset(options, is_training, input_pipeline_context=None):\n \"\"\"Creates dataset object based on options.\n\n Args:\n options: An instance of reader_pb2.Reader.\n is_training: If true, shuffle the dataset.\n input_pipeline_context: A tf.distribute.InputContext instance.\n\n Returns:\n A tf.data.Dataset object.\n \"\"\"\n dataset = tf.data.Dataset.list_files(options.input_pattern[:],\n shuffle=is_training)\n\n batch_size = options.batch_size\n if input_pipeline_context:\n if input_pipeline_context.num_input_pipelines > 1:\n dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n input_pipeline_context.input_pipeline_id)\n batch_size = input_pipeline_context.get_per_replica_batch_size(\n options.batch_size)\n\n if is_training:\n if options.cache_dataset:\n dataset = dataset.cache()\n dataset = dataset.repeat()\n dataset = dataset.shuffle(options.shuffle_buffer_size)\n dataset = dataset.interleave(tf.data.TFRecordDataset,\n cycle_length=options.interleave_cycle_length)\n\n parse_fn = lambda x: _parse_single_example(x, options)\n dataset = dataset.map(map_func=parse_fn,\n num_parallel_calls=options.num_parallel_calls)\n\n padded_shapes = {\n InputFields.img_id: [],\n InputFields.annot_id: [],\n InputFields.answer_label: [],\n InputFields.num_objects: [],\n InputFields.object_bboxes: [None, 4],\n InputFields.object_labels: [None],\n InputFields.object_scores: [None],\n InputFields.object_features: [None, options.frcnn_feature_dims],\n InputFields.cls_bert: [NUM_CHOICES, options.bert_feature_dims],\n InputFields.question: [None],\n InputFields.question_tag: [None],\n InputFields.question_bert: [NUM_CHOICES, None, options.bert_feature_dims],\n InputFields.question_len: [],\n InputFields.answer_choices: [NUM_CHOICES, None],\n InputFields.answer_choices_tag: [NUM_CHOICES, None],\n InputFields.answer_choices_bert: [\n NUM_CHOICES, None, options.bert_feature_dims\n ],\n InputFields.answer_choices_len: [NUM_CHOICES],\n InputFields.answer_choices_with_question: [NUM_CHOICES, None],\n InputFields.answer_choices_with_question_tag: [NUM_CHOICES, None],\n InputFields.answer_choices_with_question_len: [NUM_CHOICES],\n }\n padding_values = {\n InputFields.img_id: '',\n InputFields.annot_id: '',\n InputFields.answer_label: -1,\n InputFields.num_objects: 0,\n InputFields.object_bboxes: 0.0,\n InputFields.object_labels: '',\n InputFields.object_scores: 0.0,\n InputFields.object_features: 0.0,\n InputFields.cls_bert: 0.0,\n InputFields.question: PAD,\n InputFields.question_tag: -1,\n InputFields.question_bert: 0.0,\n InputFields.question_len: 0,\n InputFields.answer_choices: PAD,\n InputFields.answer_choices_tag: -1,\n InputFields.answer_choices_bert: 0.0,\n InputFields.answer_choices_len: 0,\n InputFields.answer_choices_with_question: PAD,\n InputFields.answer_choices_with_question_tag: -1,\n InputFields.answer_choices_with_question_len: 0,\n }\n if options.decode_jpeg:\n padded_shapes.update({\n InputFields.img_data: [None, None, 3],\n InputFields.img_height: [],\n InputFields.img_width: [],\n })\n padding_values.update({\n InputFields.img_data: tf.constant(0, dtype=tf.uint8),\n InputFields.img_height: 0,\n InputFields.img_width: 0,\n })\n dataset = dataset.padded_batch(batch_size,\n padded_shapes=padded_shapes,\n padding_values=padding_values,\n drop_remainder=True)\n dataset = dataset.prefetch(options.prefetch_buffer_size)\n return dataset\n\n\ndef get_input_fn(options, is_training):\n \"\"\"Returns a function that generate input examples.\n\n Args:\n options: An instance of reader_pb2.Reader.\n is_training: If true, shuffle the dataset.\n\n Returns:\n input_fn: a callable that returns a dataset.\n \"\"\"\n if not isinstance(options, reader_pb2.VCRReader):\n raise ValueError('options has to be an instance of Reader.')\n\n def _input_fn(input_pipeline_context=None):\n \"\"\"Returns a python dictionary.\n\n Returns:\n A dataset that can be fed to estimator.\n \"\"\"\n return _create_dataset(options, is_training, input_pipeline_context)\n\n return _input_fn\n","sub_path":"readers/bak/vcr_reader.py","file_name":"vcr_reader.py","file_ext":"py","file_size_in_byte":16914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"259511598","text":"# while => bizga tanish while Python da ham sharti False bo'lib qolgancha ishlaydi.\n\nisEntered = False\nname = None\n\nprint(True + 1) # 2\n\nwhile not(isEntered):\n name = input(\"Enter your name: \")\n\n if name.isalpha():\n isEntered = True\n\nprint(\"Hello, \" + name)\n","sub_path":"py_full_course/10_while_loop.py","file_name":"10_while_loop.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"273378615","text":"from Tkinter import *\nroot = Tk()\nV = StringVar()\nS = Scrollbar(root)\nlb = Listbox(root,yscrollcommand = S.set)\nfor item in range(100):\n\tlb.insert(END,str(item*100))\nV.set(('100','200'))\nprint (V.get())\nlb.pack()\nroot.mainloop()","sub_path":"GUI_practice.py","file_name":"GUI_practice.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"433511252","text":"import likeyoubot_resource as lybrsc\nimport likeyoubot_message\nimport cv2\nimport sys\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pyautogui\nimport operator\nimport random\nimport likeyoubot_game as lybgame\nimport likeyoubot_tera as lybgameTera\nfrom likeyoubot_configure import LYBConstant as lybconstant\nimport likeyoubot_scene\nimport time\n\nclass LYBTeraScene(likeyoubot_scene.LYBScene):\n\tdef __init__(self, scene_name):\n\t\tlikeyoubot_scene.LYBScene.__init__(self, scene_name)\n\n\tdef process(self, window_image, window_pixels):\n\n\t\tsuper(LYBTeraScene, self).process(window_image, window_pixels)\n\n\t\trc = 0\n\t\tif self.scene_name == 'nox_init_screen_scene':\n\t\t\trc = self.nox_init_screen_scene()\n\t\telse:\n\t\t\trc = 0\n\n\t\treturn rc\n\n\tdef nox_init_screen_scene(self):\n\t\tprint('DEBUG 1')\n\t\tself.schedule_list = self.get_game_config('schedule_list')\n\t\tif '게임 시작' in self.schedule_list:\n\t\t\t(loc_x, loc_y) = lybgame.LYBGame.locationOnWindow(self.window_image, self.game_object.resource_manager.pixel_box_dic['tera_icon'])\n\t\t\tself.lyb_mouse_click_location(loc_x, loc_y)\n\t\tprint('DEBUG 2')\n\t\treturn 0\n\n\tdef logo_screen_scene(self):\n\n\t\tself.schedule_list = self.get_game_config('schedule_list')\n\t\tprint('DEBUG77:', self.schedule_list)\n\t\tif not '로그인' in self.schedule_list:\n\t\t\treturn 0\n\n\t\tif time.time() - self.get_checkpoint('wait_finding_account') > 30:\n\t\t\tself.status = 0\n\n\t\tif self.status == 0:\n\t\t\tself.set_checkpoint('wait_finding_account')\n\t\t\tif self.get_window_config('multi_account'):\n\t\t\t\tself.loggingToGUI('구글 계정 변경 시도')\n\t\t\t\t\n\t\t\tself.status += 1\n\t\telif self.status == 1:\n\t\t\tif time.time() - self.get_checkpoint('wait_finding_account') > 100:\n\t\t\t\tself.loggingToGUI('구글 계정 감지 실패')\n\t\t\t\treturn -1\n\n\t\t\tif self.get_window_config('multi_account'):\n\t\t\t\tprint('google multi account On')\n\t\t\t\trate = self.game_object.rateMatchedPixelBox(self.window_pixels, 'account_change_icon', custom_tolerance=50)\n\t\t\t\tprint('change icon:', int(rate*100), '%')\n\t\t\t\tis_there = self.lyb_mouse_click('account_change_icon', custom_tolerance=50, custom_threshold=0)\n\t\t\t\tprint('account change is clicked:', is_there)\n\t\t\t\tif is_there:\n\t\t\t\t\tself.status = 2\n\t\t\t\t\tself.set_option('load_complete_flag', False)\n\t\t\telse:\t\n\t\t\t\tself.status = 3\n\t\telif self.status == 2:\n\t\t\tprint('wait for loading account select')\n\t\t\tif self.get_option('load_complete_flag'):\n\t\t\t\tself.set_option('load_complete_flag', False)\n\t\t\t\tself.status = 3\n\t\t\telse:\n\t\t\t\tself.status = 1\n\t\telif self.status == 3:\n\t\t\tself.lyb_mouse_click_location(320, 330)\n\t\t\tself.status +=1\n\t\telif self.status == 4:\n\t\t\tself.status -= 1\n\n\t\treturn self.status\n\n\tdef connect_account_scene(self):\n\t\tif self.status == 0:\n\t\t\tself.set_checkpoint('interval_login')\n\t\t\tself.status += 1\n\t\telif self.status == 1:\n\n\t\t\tprint('select_complete_flag:', self.get_option('select_complete_flag'))\n\t\t\tif (self.get_option('select_complete_flag') == True or \n\t\t\t\ttime.time() - self.get_checkpoint('interval_login') > 100):\n\t\t\t\tself.lyb_mouse_click('connect_account_close_icon')\n\t\t\t\tself.game_object.get_scene('logo_screen_scene').set_option('load_complete_flag', True)\n\t\t\t\tself.set_option('select_complete_flag', False)\n\t\t\t\tself.status = 0\n\t\t\telse:\n\t\t\t\t# 회색\n\t\t\t\tprint('DEBUG 00')\n\t\t\t\tis_logff_status = self.lyb_mouse_click('google_login_letter_0', custom_threshold=0.9)\n\t\t\t\tif not is_logff_status:\n\t\t\t\t\tprint('DEBUG 11')\n\t\t\t\t\t# 파란색\n\t\t\t\t\tself.lyb_mouse_click('google_login_icon')\n\t\t\t\telse:\n\t\t\t\t\tprint('DEBUG 12')\n\t\t\t\t\tself.set_option('select_complete_flag', False)\n\n\t\treturn self.status\n\n\tdef google_play_account_select_scene(self):\n\t\t(top_loc_x, top_loc_y) = lybgame.LYBGame.locationOnWindow(\n\t\t\tself.window_image, \n\t\t\tself.game_object.resource_manager.pixel_box_dic['google_play_letter']\n\t\t\t)\n\t\tself.loggingToGUI('구글 계정 기준점: ' + str((top_loc_x, top_loc_y)))\n\t\tif top_loc_x == -1:\n\t\t\treturn self.status\n\n\t\tif self.status == 0:\n\t\t\t(bottom_loc_x, bottom_loc_y) = lybgame.LYBGame.locationOnWindow(\n\t\t\t\tself.window_image, \n\t\t\t\tself.game_object.resource_manager.pixel_box_dic['google_play_add_account_letter']\n\t\t\t\t)\n\n\t\t\tif bottom_loc_y == -1:\n\t\t\t\t# 계정 5개 이상이라는 의미\n\t\t\t\tself.google_account_number = 1000\n\t\t\telse:\n\t\t\t\tdiff_y = bottom_loc_y - top_loc_y\n\t\t\t\tself.google_account_number = int(diff_y / self.google_account_height)\n\n\t\t\tif self.google_account_number > 5:\n\t\t\t\tself.loggingToGUI('구글 계정 5개 이상 감지됨')\n\t\t\telse:\n\t\t\t\tself.loggingToGUI('구글 계정 '+str(self.google_account_number)+'개 감지됨')\n\n\t\tprint('google account DEBUG:', self.status, self.google_account_number)\n\t\tif self.status >= self.google_account_number:\n\t\t\tself.status = 0\t\t\n\t\t\tself.loggingToGUI(str(self.google_account_number)+' 개의 계정 작업 완료')\t\n\t\t\treturn self.status\n\t\telse:\n\t\t\tself.loggingToGUI(str(self.status + 1)+' 번째 구글 계정 로그인 시도')\n\t\t\tself.game_object.get_scene('connect_account_scene').set_option('select_complete_flag', True)\n\t\t\n\t\tif self.status >= 0 and self.status < 5:\n\t\t\tclick_loc_x = top_loc_x + 10\n\t\t\tclick_loc_y = top_loc_y + self.google_account_height*self.status + self.google_account_height*0.8\n\t\t\tself.lyb_mouse_click_location(click_loc_x, click_loc_y)\n\t\t\t#self.lyb_mouse_move_location(click_loc_x, click_loc_y)\n\t\t\tself.status += 1\n\t\telse:\n\t\t\tif self.just_drag_completed == False:\n\t\t\t\tself.just_drag_completed = True\n\n\t\t\t\tdrag_number = self.status - 5\n\n\t\t\t\tfrom_x = top_loc_x + 10\n\t\t\t\tfrom_y = top_loc_y + self.google_account_height * 2 + 5\n\n\t\t\t\tto_x = top_loc_x + 10\n\t\t\t\tto_y = top_loc_y + self.google_account_height * 1\n\n\t\t\t\tprint(from_x, from_y, to_x, to_y)\n\t\t\t\tself.lyb_mouse_drag_location(from_x, from_y, to_x, to_y, delay=1)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.just_drag_completed = False\n\t\t\t\t# 맨아래까지 왔나?\n\t\t\t\t(bottom_loc_x, bottom_loc_y) = lybgame.LYBGame.locationOnWindow(\n\t\t\t\t\t\tself.window_image, \n\t\t\t\t\t\tself.game_object.resource_manager.pixel_box_dic['google_play_add_account_letter']\n\t\t\t\t\t)\n\t\t\t\tif bottom_loc_x > 0 and bottom_loc_y > 0:\n\t\t\t\t\tself.lyb_mouse_click_location(top_loc_x - 150, top_loc_y)\n\t\t\t\t\t#self.lyb_mouse_move_location(top_loc_x - 150, top_loc_y)\n\t\t\t\t\tself.loggingToGUI(str(self.status + 1)+' 번째 구글 계정 감지 실패')\n\t\t\t\t\tself.loggingToGUI('총 '+str(self.status)+' 개의 계정 작업 완료')\n\t\t\t\t\tself.status = 0\n\n\t\t\t\telse:\n\t\t\t\t\tclick_loc_x = top_loc_x + 10\n\t\t\t\t\tclick_loc_y = top_loc_y + self.google_account_height*4 + self.google_account_height*0.8\n\t\t\t\t\tself.lyb_mouse_click_location(click_loc_x, click_loc_y)\n\t\t\t\t\t#self.lyb_mouse_move_location(click_loc_x, click_loc_y)\n\t\t\t\t\tself.status += 1\n\n\t\treturn self.status\n\n\n\tdef terms_of_use_scene(self):\n\t\t#print(self.game_object.rateMatchedPixelBox(self.window_pixels, 'terms_of_use_bottom_0'))\n\t\tself.lyb_mouse_click('terms_of_use_bottom_0')\n\n\t\t#print(self.game_object.rateMatchedPixelBox(self.window_pixels, 'terms_of_use_bottom_1'))\n\t\tself.lyb_mouse_click('terms_of_use_bottom_1')\n\t\treturn 0\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\tdef get_work_status(self, work_name):\n\t\tif work_name in lybgameTera.LYBTera.work_list:\n\t\t\treturn (lybgameTera.LYBTera.work_list.index(work_name) + 1) * 1000\n\t\telse: \n\t\t\treturn 99999","sub_path":"sample_scene.py","file_name":"sample_scene.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"93248150","text":"from enum import Enum\n\n\nclass ControlType(Enum):\n \"\"\"Control Type\"\"\"\n ABOVE = 1\n BELOW = 2\n TIME = 3\n CLOCKTIME = 4\n\n\nclass Control():\n \"\"\"Defines simple controls that modify links based on a single condition\"\"\"\n def __init__(self):\n self.link_id = \"\"\t\t# string\n \"\"\"a link ID label\"\"\"\n\n self.status = \"\"\t # string\n \"\"\"OPEN or CLOSED, a pump speed setting, or a control valve setting\"\"\"\n\n self.node_id = \"\"\t\t# string\n \"\"\"a node ID label\"\"\"\n\n self.value = 0.0\t\t# real\n \"\"\"a pressure for a junction or a water level for a tank\"\"\"\n\n self.time = \"\"\t\t\t# string\n \"\"\"a time since the start of the simulation in decimal hours or in hours:minutes format\"\"\"\n\n self.clocktime = \"\" # string\n \"\"\"time of day (hour or hour:minute) AM/PM)\"\"\"\n\n self.control_type = ControlType.ABOVE\n \"\"\"Simple controls are used to change link status or settings based on tank water level, junction pressure,\n time into the simulation or time of day\"\"\"\n\n def __str__(self):\n \"\"\"Override default method to return string representation\"\"\"\n return self.get_text()\n\n def get_text(self):\n \"\"\"format contents of this item for writing to file\"\"\"\n if self.link_id:\n prefix = \" LINK \" + self.link_id + ' ' + self.status\n if self.control_type == ControlType.ABOVE or self.control_type == ControlType.BELOW:\n return prefix + \" IF NODE \" + self.node_id + ' ' + self.control_type.name + ' ' + str(self.value)\n elif self.control_type == ControlType.TIME and len(self.time) > 0:\n return prefix + \" AT TIME \" + self.time\n elif self.control_type == ControlType.CLOCKTIME and len(self.clocktime) > 0:\n return prefix + \" AT CLOCKTIME \" + self.clocktime\n return ''\n\n def set_text(self, new_text):\n self.__init__()\n fields = new_text.split()\n self.link_id, self.status = fields[1], fields[2]\n type_str = fields[4].upper()\n if type_str == \"NODE\":\n self.node_id = fields[5]\n self.control_type = ControlType[fields[6].upper()]\n self.value = fields[7]\n elif type_str == \"TIME\":\n self.control_type = ControlType.TIME\n self.time = fields[5]\n elif type_str == \"CLOCKTIME\":\n self.control_type = ControlType.CLOCKTIME\n self.clocktime = ' '.join(fields[5:])\n else:\n raise NameError(\"Unable to parse Control: \" + new_text)\n","sub_path":"src/core/epanet/hydraulics/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"345952835","text":"# GUI opened for sale processing\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\nroot = Tk()\nroot.style = ttk.Style()\nroot.style.theme_use('clam')\nroot.title('Sale Processing')\n\n# Grid setup so elements resize with window\nroot.grid_columnconfigure(0, weight=1)\nroot.grid_columnconfigure(1, weight=1)\nroot.grid_columnconfigure(2, weight=3)\n#root.grid_columnconfigure(3, weight=1)\n\nroot.grid_rowconfigure(0, weight=1)\nroot.grid_rowconfigure(1, weight=1)\nroot.grid_rowconfigure(2, weight=3)\nroot.grid_rowconfigure(3, weight=3)\nroot.grid_rowconfigure(4, weight=3)\n\n# Label Creation\nLabel(root, anchor='center', text='SKU:').grid(row=0, column=0, sticky=EW)\nLabel(root, anchor='center', text='Items').grid(row=0, column=2, sticky=EW)\nLabel(root, anchor='center', text='Quantity:').grid(row=1, column=0, sticky=EW)\nLabel(root, anchor='center', text='Subtotal:').grid(row=4, column=0, sticky=EW)\n\n# Entry Creation\nEntry(root, textvariable='Enter SKU').grid(row=0, column=1, sticky=W)\nEntry(root, textvariable='Enter Qt').grid(row=1, column=1, sticky=W)\n\n# Button Creation\nadd_btn = Button(root, anchor='center', text='Add').grid(row=2, column=0, sticky=NSEW)\ntotal_btn = Button(root, anchor='center', text='Total').grid(row=3, column=0, sticky=NSEW)\nback_btn = Button(root, anchor='center', text='Back').grid(row=2, column=1, sticky=NSEW)\n\nroot.mainloop()","sub_path":"PoSGUI/SaleProcessingGUI.py","file_name":"SaleProcessingGUI.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"379060014","text":"import tkinter\n\n# root = tkinter.Tk()\n# root.title('窗口标题')\n# ww = tkinter.Label(root, text=\"Hello Tkinter!\")\n# ww.pack()\n# root.mainloop()\n\n\nclass Application(tkinter.Frame):\n def __init__(self, master=None):\n tkinter.Frame.__init__(self, master)\n self.pack()\n self.init_widgets()\n\n def init_widgets(self):\n w = tkinter.Label(self)\n bm = tkinter.PhotoImage(file=r'C:\\Users\\Administrator\\Pictures\\timg.gif')\n w.x = bm\n w['image'] = bm\n w.pack()\n ok_button = tkinter.Button(self, text=\"确定\")\n ok_button['background'] = 'yellow'\n ok_button.configure(background='yellow')\n ok_button.pack()\n\n\napp = Application()\nprint(type(app.master))\napp.master.title('窗口标题')\napp.mainloop()\n","sub_path":"test/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"361868006","text":"# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is a wrapper for SFTP Connection object.\"\"\"\nimport os\nimport paramiko\nimport logging\nfrom base64 import decodebytes\nfrom pysftp import Connection, CnOpts\n\n\n\nclass SFTPService: # pylint: disable=too-few-public-methods\n \"\"\"SFTP Service class.\"\"\"\n\n DEFAUILT_CONNECT_SERVER = 'drive.qp.gov.bc.ca'\n\n @staticmethod\n def get_connection(server_name: str = DEFAUILT_CONNECT_SERVER) -> Connection:\n # pylint: disable=protected-access\n return SFTPService._connect(server_name)\n\n @staticmethod\n def _connect(server_name: str) -> Connection:\n\n sftp_host = os.getenv('SFTP_HOST', 'localhost') \n sftp_port = os.getenv('SFTP_PORT', 22) \n\n cnopts = CnOpts()\n # only for local development set this to false .\n if os.getenv('SFTP_VERIFY_HOST').lower() == 'false':\n cnopts.hostkeys = None \n else: \n ftp_host_key_data = os.getenv('SFTP_HOST_KEY', '').encode() \n key = paramiko.RSAKey(data=decodebytes(ftp_host_key_data)) \n cnopts.hostkeys.add(sftp_host, 'ssh-rsa', key) \n \n sftp_priv_key_file = os.getenv('SFTP_ARCHIVE_DIRECTORY', '/opt/app-root/archieve/') + 'sftp_priv_key_file'\n\n # only create key file if it doesn't exist\n if not os.path.isfile(sftp_priv_key_file): \n with open(sftp_priv_key_file, 'w+') as fh:\n sftp_priv_key = os.getenv('BCREG_FTP_PRIVATE_KEY', '') \n fh.write(sftp_priv_key)\n \n sft_credentials = {\n 'username': os.getenv('SFTP_USERNAME', 'foo'),\n # private_key should be the absolute path to where private key file lies since sftp\n 'private_key': sftp_priv_key_file,\n 'private_key_pass': os.getenv('BCREG_FTP_PRIVATE_KEY_PASSPHRASE', '')\n }\n\n # cnopts.hostkeys = None\n # Connection(host=sftp_host, username='TESTPUB', password='742mH273', cnopts=cnopts, port=int(sftp_port))\n sftp_connection = Connection(host=sftp_host, **sft_credentials, cnopts=cnopts, port=int(sftp_port)) \n logging.info('sftp_connection successful')\n \n return sftp_connection\n","sub_path":"jobs/sftp-gazette/services/sftp.py","file_name":"sftp.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"53819652","text":"import unittest\nimport urllib2\n\nfrom ddt import ddt, unpack, data\nfrom django.test import override_settings\nfrom mock import patch, MagicMock\nfrom six import StringIO\n\nfrom idm.plugins.providers import MongoProvider, UserInfoProvider, EdxUsernameProvider\n\n\nclass MongoProviderTest(unittest.TestCase):\n @override_settings(\n PROVIDER={'MongoProvider': {\n 'MONGO_HOST': 'localhost:27017', 'MONGO_DATABASE': 'test', 'MONGO_USER': None, 'MONGO_PASS': None\n }}\n )\n def test_load(self):\n provider = MongoProvider()\n provider.load()\n from pymongo import MongoClient\n self.assertIsInstance(provider.client, MongoClient)\n self.assertIsNotNone(provider.db)\n\n\n@ddt\nclass UserInfoProviderTest(unittest.TestCase):\n @patch('idm.plugins.providers.MongoClient')\n @data(\n # empty result from db\n ({'user_id': ['test']},\n {'$or': [{'ubcEduCwlPUID': {'$in': ['test']}}]},\n [],\n []),\n # one result from db\n ({'user_id': ['test', 'test1']},\n {'$or': [{'ubcEduCwlPUID': {'$in': ['test', 'test1']}}]},\n [{'ubcEduCwlPUID': 'test', 'cn': 'Test Joe', 'uid': 'test', 'edx_id': 'test'}],\n [{'first_name': 'Test', 'last_name': 'Joe', 'user_id': 'test', 'employee_number': None,\n 'remote_id': 'test', 'cwl': 'test', 'student_number': None, 'email': None}]),\n # multiple search criteria\n ({'user_id': ['test'], 'email': ['a@a.com']},\n {'$or': [{'ubcEduCwlPUID': {'$in': ['test']}}, {'mail': {'$in': ['a@a.com']}}]},\n [{'ubcEduCwlPUID': 'test', 'cn': 'Test Joe', 'uid': 'test', 'edx_id': 'test'}],\n [{'first_name': 'Test', 'last_name': 'Joe', 'user_id': 'test', 'employee_number': None,\n 'remote_id': 'test', 'cwl': 'test', 'student_number': None, 'email': None}]),\n )\n @unpack\n def test_load(self, load_params, condition, find_return, expect_users, mock_client):\n mock_db = MagicMock(name='mock_db')\n mock_db.users = MagicMock(name='mock_collection')\n mock_db.users.find.return_value = find_return\n mock_client.return_value.__getitem__.side_effect = lambda _: mock_db\n provider = UserInfoProvider()\n users = provider.load(**load_params)\n\n mock_client.assert_called_once_with('localhost:27017')\n mock_db.users.find.assert_called_once_with(condition)\n self.assertListEqual(users, expect_users)\n\n\n@ddt\nclass EdxUsernameProviderTest(unittest.TestCase):\n @override_settings(EDX_ACCESS_TOKEN='token', EDX_SERVER='http://edx.org', EDX_MAPPING_ENDPOINT='/api/user')\n @patch('idm.plugins.providers.urllib2.urlopen')\n @patch('idm.plugins.providers.urllib2.Request')\n @data(\n # empty query\n ({}, None, None, []),\n # empty result\n ({'edx_username': ['test']}, 'username=test', '{\"results\":[]}', []),\n # return a mapped user\n ({'edx_username': ['test']}, 'username=test',\n '{\"results\":[{\"username\": \"test\", \"remote_id\": \"test_rid\"}]}',\n [{'edx_username': 'test', 'remote_id': 'test_rid'}]),\n )\n @unpack\n def test_load(self, load_params, url_params, response_body, expect_users, mock_request, mock_urlopen):\n mock_urlopen.return_value = StringIO(response_body)\n provider = EdxUsernameProvider()\n users = provider.load(**load_params)\n if load_params:\n mock_request.assert_called_once_with(\n 'http://edx.org/api/user?{}'.format(url_params), None, {'Authorization': 'Bearer token'})\n mock_urlopen.assert_called_once_with(mock_request.return_value)\n self.assertListEqual(users, expect_users)\n\n @patch('idm.plugins.providers.urllib2.urlopen')\n @patch('idm.plugins.providers.urllib2.Request')\n def test_load_expection(self, mock_request, mock_urlopen):\n mock_urlopen.side_effect = urllib2.URLError('error')\n provider = EdxUsernameProvider()\n with self.assertRaises(RuntimeError):\n provider.load(edx_username='test')\n\n\n\n\n","sub_path":"idm/plugins/tests/test_providers.py","file_name":"test_providers.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179099385","text":"'''\n標籤 Label\n'''\n# 載入tkinter\nfrom tkinter import *\n\n\n##==============================================================================================\n## Function\n##==============================================================================================\n\n# 建立標籤\ndef ini_label():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label\")\n # 建立標籤(放在root底下),label可取其它名稱\n label = Label(root, text=\"I like tkinter.\")\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 標籤顏色\ndef int_label_with_color():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"int_label_with_color\")\n # 建立標籤(放在root底下,fg=文字顏色,bg=背景顏色),label可取其它名稱\n label = Label(root, text=\"I like tkinter.\", fg=\"#ff0000\", bg=\"yellow\")\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 標籤顏色、文字高度、文字寬度\ndef ini_label_with_text():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_text\")\n # 建立標籤(放在root底下,fg=文字顏色,bg=背景顏色,height=標籤高度,width=標籤寬度),label可取其它名稱\n label = Label(root, text=\"I like tkinter.\", fg=\"blue\", bg=\"yellow\", height=5, width=15)\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 文字位置\ndef ini_label_with_textpos():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_textpos\")\n # 建立標籤(放在root底下,fg=文字顏色,bg=背景顏色,height=標籤高度,width=標籤寬度,anchor=文字位置),label可取其它名稱\n label = Label(root, text=\"I like tkinter.\", fg=\"blue\", bg=\"yellow\", height=5, width=15, anchor=\"nw\")\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 文字換行\ndef ini_label_with_EOL():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_EOL\")\n # 建立標籤(放在root底下,fg=文字顏色,bg=背景顏色,height=標籤高度,width=標籤寬度,anchor=文字位置,wraplength=換行像素),label可取其它名稱\n # wraplength=40,40像素自動換行\n label = Label(root, text=\"I like tkinter.\", fg=\"blue\", bg=\"yellow\", height=5, width=15, anchor=\"nw\", wraplength=40)\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 字型\ndef ini_label_with_font():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_font\")\n # 建立標籤(放在root底下,fg=文字顏色,bg=背景顏色,height=標籤高度,width=標籤寬度,font=字型),label可取其它名稱\n # wraplength=40,40像素自動換行\n label = Label(root, text=\"I like tkinter.\", fg=\"blue\", bg=\"yellow\", height=5, width=15,\n font=(\"Helvetica\", 20, \"bold\"))\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 位元圖(Bitmap)\ndef ini_label_with_Bitmap():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_Bitmap\")\n # 建立標籤(放在root底下,bitmap=位元圖),label可取其它名稱\n label = Label(root, bitmap=\"hourglass\")\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 位元圖(Bitmap)、文字並存\ndef ini_label_with_text_Bitmap():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_text_Bitmap\")\n # 建立標籤(放在root底下,bitmap=位元圖,compound=bitmap位置),label可取其它名稱\n # compound只有在text和圖像一起存在時,才會有作用\n label = Label(root, text=\"I like Tkinter.\", bitmap=\"hourglass\", compound=\"left\")\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 標籤邊框、文字和標籤間距\ndef ini_label_with_relief():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_relief\")\n # 建立標籤(放在root底下,relief=邊框效果,padx=x軸間距,pady=y軸間距),label可取其它名稱\n label = Label(root, text=\"I like Tkinter.\", relief=\"raised\", padx=5, pady=20)\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 顯示gif圖像\ndef ini_label_with_gif():\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_gif\")\n # gif圖像\n html_gif = PhotoImage(file=\"../imgfolder/1.gif\")\n # 建立標籤(放在root底下,image=圖像),label可取其它名稱\n label = Label(root, image=html_gif)\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 顯示png、jpg圖像\ndef ini_label_with_png_jpg():\n # 載入顯示png、jpg圖像的套件\n from PIL import Image, ImageTk\n\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_with_png_jpg\")\n # 視窗大小\n root.geometry(\"680x400\")\n\n # 讀取png圖像\n image = Image.open(\"../imgfolder/link.png\")\n # 放到link\n link = ImageTk.PhotoImage(image)\n # 建立標籤(放在root底下,image=圖像),label可取其它名稱\n label = Label(root, image=link)\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# png、jpg圖像和文字並存\ndef ini_label_text_with_png_jpg():\n # 載入顯示png、jpg圖像的套件\n from PIL import Image, ImageTk\n\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"ini_label_text_with_png_jpg\")\n # 視窗大小\n root.geometry(\"680x400\")\n\n # 讀取png圖像\n image = Image.open(\"../imgfolder/box.jpg\")\n # 放到link\n link = ImageTk.PhotoImage(image)\n # 建立標籤(放在root底下,image=圖像),label可取其它名稱\n label = Label(root, text=\"I like Tkinter.\", image=link, compound=\"right\")\n # 包裝和定位元件\n label.pack()\n # 執行,放在最後一行\n root.mainloop()\n\n# 計數器\ndef Do_Count():\n # 副程式:計數器\n def run_counter(digit):\n # 副程式:更動數字\n def counting():\n # 告知系統,此處counter是使用上面宣告的counter\n global counter\n counter = counter + 1\n # 更改標籤文字\n digit.config(text=str(counter))\n # 每一秒執行一次\n digit.after(1000, counting)\n\n counting()\n\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 標題\n root.title(\"Counter\")\n\n # 建立標籤(放在root底下,font=字型)\n digit = Label(root, font=(\"Helvetic\", 20, \"bold\"))\n # 包裝和定位元件\n digit.pack()\n\n # 執行計數器\n run_counter(digit)\n # 執行,放在最後一行\n root.mainloop()\n\n# 分隔線\ndef ini_label_with_Separator():\n # 載入分隔線\n from tkinter.ttk import Separator\n\n # 建立視窗,root可取其它名稱\n root = Tk()\n # 視窗標題\n root.title(\"Separator\")\n # 視窗大小\n root.geometry(\"680x400\")\n\n # 內文標題\n myTilte = \"一個人的戲幕\"\n # 內文\n myCntent = \"台下人走過不見舊顏色,台上人唱著心碎離別歌。\"\n\n # 建立標籤(內文標題)\n label_myTilte = Label(root, text=myTilte, font=(\"Helvetic\", 20, \"bold\"))\n # 包裝和定位元件\n label_myTilte.pack(padx=10, pady=10)\n\n # 建立分隔線(放在root底下,orient=水平、垂直)\n sep = Separator(root, orient=HORIZONTAL)\n # 包裝和定位元件(fill=分隔線填滿X軸,左右間距50像素)\n sep.pack(fill=X, padx=50)\n\n # 建立標籤(內文)\n label_myCntent = Label(root, text=myCntent)\n # 包裝和定位元件\n label_myCntent.pack(padx=10, pady=10)\n\n # 執行,放在最後一行\n root.mainloop()\n\n##==============================================================================================\n## Main\n##==============================================================================================\n\n# 建立標籤\n#ini_label()\n\n# 標籤顏色\n#int_label_with_color()\n\n# 標籤顏色、文字高度、文字寬度\n#ini_label_with_text()\n\n#文字位置\n#ini_label_with_textpos()\n\n# 文字換行\n#ini_label_with_EOL()\n\n# 字型\n#ini_label_with_font()\n\n# 位元圖(Bitmap)\n#ini_label_with_Bitmap()\n\n# 位元圖(Bitmap)、文字並存\n#ini_label_with_text_Bitmap()\n\n# 標籤邊框、文字和標籤間距\n#ini_label_relief()\n\n# 顯示gif圖像\n#ini_label_with_gif()\n\n# 顯示png、jpg圖像\n#ini_label_with_png_jpg()\n\n# png、jpg圖像和文字並存\n#ini_label_text_with_png_jpg()\n\n# 計數器\n# 現在計數\n#counter = 0\n#Do_Count()\n\n# 分隔線\n#ini_label_with_Separator()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python_Tkinter/Python_Tkinter_Label.py","file_name":"Python_Tkinter_Label.py","file_ext":"py","file_size_in_byte":9111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"487999374","text":"\"\"\"\nShape inference functions.\n\"\"\"\n\n\ndef _transpose(layer_spec, input_shapes):\n axes = list(layer_spec.transpose.axes)\n input_shape = input_shapes[0]\n output_shape = [None] * len(input_shape)\n\n for j in range(len(input_shape)):\n output_shape[j] = input_shape[axes[j]]\n\n return [output_shape]\n\n\ndef _get_shape(layer_spec, input_shapes):\n rank = len(input_shapes[0])\n return [[rank]]\n\n\ndef _slice_static(layer_spec, input_shapes):\n params = layer_spec.sliceStatic\n input_shape = input_shapes[0]\n rank = len(input_shape)\n output_shape = [-1] * rank\n begin_indices = params.beginIds\n end_indices = params.endIds\n begin_masks = params.beginMasks\n end_masks = params.endMasks\n\n for i in range(rank):\n begin_indices[i] = begin_indices[i] if begin_indices[i] >= 0 else input_shape[i] + begin_indices[i]\n end_indices[i] = end_indices[i] if end_indices[i] >= 0 else input_shape[i] + end_indices[i]\n for idx, dim in enumerate(input_shape):\n if dim > 0: # known\n begin = None if params.beginMasks[idx] else begin_indices[idx]\n end = None if params.endMasks[idx] else end_indices[idx]\n thisslice = slice(begin, end, params.strides[idx])\n thisslicelen = len(list(range(input_shape[idx]))[thisslice])\n output_shape[idx] = thisslicelen\n\n return [output_shape]\n\n\ndef _slice_dynamic(layer_spec, input_shapes):\n input_shape = input_shapes[0]\n rank = len(input_shape)\n output_shape = [-1] * rank\n return [output_shape]\n\n\ndef _squeeze(layer_spec, input_shapes):\n if layer_spec.squeeze.squeezeAll:\n return [[1]]\n axes = list(layer_spec.squeeze.axes)\n input_shape = input_shapes[0]\n rank = len(input_shape)\n\n if axes is None or len(axes) == 0:\n raise NotImplementedError('Unspecified axes not implemented.')\n output_shape = []\n axes = [axis if axis >= 0 else rank + axis for axis in axes]\n for dim in range(rank):\n if dim not in axes:\n output_shape.append(input_shape[dim])\n elif input_shape[dim] > 0 and input_shape[dim] != 1:\n raise ValueError(\n '[Shaper] Cannot squeeze on index %d of shape %s' % (dim, str(input_shape)))\n return [output_shape] if output_shape else [[1]]\n\n\ndef _range_dynamic(layer_spec, input_shapes):\n if len(input_shapes) == 3:\n return [[-1]] # 1 output containing an unknown length of vector\n else:\n raise NotImplementedError('NNSSA converter can only handle 3-input dynamic range at this time.')\n\n\ndef _range_static(layer_spec, input_shapes):\n if len(input_shapes) == 3:\n return [[-1]]\n else:\n params = layer_spec.rangeStatic\n start, end, step = params.startValue, params.endValue, params.stepSizeValue\n return [[int((end - start) / step)]]\n\n\ndef _load_constant(layer_spec, input_shapes):\n shape = list(layer_spec.loadConstant.shape)\n return [shape]\n\n\ndef _load_constant_nd(layer_spec, input_shapes):\n shape = list(layer_spec.loadConstantND.shape)\n return [shape]\n\n\ndef _add(layer_spec, input_shapes):\n if len(input_shapes) == 2:\n r = max(len(input_shapes[0]), len(input_shapes[1]))\n # broadcasting if necessary\n output_shapes = [[1] * (r - len(s)) + s for s in input_shapes]\n output_shapes = [max(output_shapes[0], output_shapes[1])]\n elif len(input_shapes) == 1:\n output_shapes = input_shapes\n else:\n raise ValueError(\"[Shaper] Expects _add layers having either 1 or 2 inputs\")\n return output_shapes\n\n\ndef _broadcastable(layer_spec, input_shapes):\n def broadcast_dim(x, y):\n if x < 0 or y < 0:\n return -1\n if x == 1 or y == 1:\n return max([x, y])\n elif x == y:\n return x\n else:\n return None\n\n max_rank = max([len(s) for s in input_shapes])\n extended_input_shapes = [[1] * (max_rank - len(s)) + list(s) for s in input_shapes]\n output_shape = [1] * max_rank\n for i_dim in range(max_rank):\n for s in extended_input_shapes:\n output_shape[i_dim] = broadcast_dim(output_shape[i_dim], s[i_dim])\n if output_shape[i_dim] is None:\n raise ValueError('[Shaper] Cannot broadcast input_shapes %s' % (str(input_shapes)))\n return [output_shape]\n\n\ndef _scatter(layer_spec, input_shapes):\n # inputs: [target, source, indices]\n return [input_shapes[0]]\n\n\ndef _scatter_nd(layer_spec, input_shapes):\n # get the values of the shape input\n return [input_shapes[0]]\n\n\ndef _gather(layer_spec, input_shapes):\n if len(input_shapes) == 2:\n indices_shape = input_shapes[1]\n return [list(indices_shape) + list(input_shapes[0][1:])]\n else:\n raise ValueError(\"[Shaper] Gather layer accepts only 2 inputs\")\n\n\ndef _gather_nd(layer_spec, input_shapes):\n param_shape = input_shapes[0]\n index_shape = input_shapes[1]\n index_rank = len(index_shape)\n output_shape = index_shape[:-1]\n output_shape[index_rank - 1:] = param_shape[index_shape[index_rank - 1]:]\n return [output_shape]\n\n\ndef _concat_nd(layer_spec, input_shapes):\n if layer_spec.WhichOneof('layer') == 'concat':\n axis = -3\n else:\n axis = layer_spec.concatND.axis\n rank = len(input_shapes[0])\n output_shape = list(input_shapes[0][:])\n if axis < 0:\n axis += rank\n\n for shape in input_shapes:\n if len(shape) != rank:\n raise ValueError('[Shaper] Unable to shape concatND: ranks mismatch')\n\n for shape in input_shapes[1:]:\n for idx, dim in enumerate(shape):\n if output_shape[idx] == -1 or dim == -1:\n output_shape[idx] = -1\n continue\n if idx == axis:\n output_shape[idx] += dim\n elif output_shape[idx] != dim:\n raise ValueError('[Shaper] Unable to shape concatND: shapes mismatch')\n return [output_shape]\n\n\ndef _inner_product(layer_spec, input_shapes):\n if len(input_shapes) == 1: # static weight\n input_shape = input_shapes[0]\n in_channels = layer_spec.innerProduct.inputChannels\n out_channels = layer_spec.innerProduct.outputChannels\n if input_shape[-1] != in_channels:\n raise ValueError('[Shaper] Inner Product layer input channels mismatch')\n return [input_shape[0:-1] + [out_channels]]\n elif len(input_shapes) == 2:\n input_shape, mat_shape = input_shapes[0:2]\n in_channels = input_shape[-1]\n if in_channels != -1 and in_channels != mat_shape[-2]:\n raise ValueError('[Shaper] Inner Product layer input channels mismatch')\n out_channels = mat_shape[-1]\n return [input_shape[0:-1] + [out_channels]]\n else:\n raise ValueError('[Shaper] Inner Product needs either 1 or 2 inputs')\n\n\ndef _split_nd(layer_spec, input_shapes):\n if len(input_shapes) != 1:\n raise NotImplementedError('[Shaper] Dynamic split not implemented.')\n axis = layer_spec.splitND.axis\n num_splits = layer_spec.splitND.numSplits\n output_shape = input_shapes[0][:]\n output_shape[axis] /= num_splits\n if output_shape[axis] == 0:\n raise ValueError('[Shaper] Cannot split shape %s on axis %d' % (str(output_shape), axis))\n return [output_shape] * num_splits\n\n\ndef _identity(layer_spec, input_shapes):\n return input_shapes[:]\n\n\ndef _reverse_seq(layer_spec, input_shapes):\n return [input_shapes[0]]\n\n\ndef _expand_dims(layer_spec, input_shapes):\n input_shape = input_shapes[0]\n axes = list(layer_spec.expandDims.axes)\n target_rank = len(input_shape) + len(axes)\n axes = [axis if axis >= 0 else axis + target_rank for axis in axes]\n\n output_shape = input_shape[:]\n for axis in axes:\n output_shape = list(output_shape[0:axis]) + [1] + list(output_shape[axis:])\n return [output_shape]\n\n\ndef _where_non_zero(layer_spec, input_shapes):\n input_shape = input_shapes[0]\n rank = len(input_shape)\n output_shape = [-1, rank]\n return [output_shape]\n\n\ndef _stack(layer_spec, input_shapes):\n axis = layer_spec.stack.axis\n num_inputs = len(layer_spec.input)\n shape = input_shapes[0]\n for s in input_shapes:\n if s != shape:\n raise ValueError('[Shaper] stack input shapes mismatch')\n output_shape = shape[:axis] + [num_inputs] + shape[axis:]\n return [output_shape]\n\n\ndef _batched_mat_mul(layer_spec, input_shapes):\n if len(input_shapes) == 1:\n a_shape = list(input_shapes[0][:])\n a_shape[-1] = int(layer_spec.batchedMatmul.weightMatrixSecondDimension)\n return [a_shape]\n elif len(input_shapes) == 2:\n a_shape, b_shape = input_shapes\n if len(a_shape) < 2 or len(b_shape) < 2:\n raise ValueError('[Shaper] MatMul with 2 inputs require the ranks of both inputs to be no less than 2')\n tp_a = layer_spec.batchedMatmul.transposeA\n tp_b = layer_spec.batchedMatmul.transposeB\n r_x, c_x = a_shape[-2:]\n r_y, c_y = b_shape[-2:]\n r_o = c_x if tp_a else r_x\n c_o = r_y if tp_b else c_y\n output_shape = list(a_shape[0:-2]) + [r_o, c_o]\n return [output_shape]\n else:\n raise NotImplementedError('[Shaper] Batched MatMul requires either 1 or 2 inputs')\n\n\ndef _embedding_nd(layer_spec, input_shapes):\n input_shape = input_shapes[0]\n if input_shape[-1] != 1:\n raise ValueError('[Shaper] Last dimension of EmbeddingND input must be 1')\n vocab_size = layer_spec.embeddingND.vocabSize\n embedding_size = int(layer_spec.embeddingND.embeddingSize)\n output_shape = input_shapes[0][:]\n output_shape[-1] = embedding_size\n return [output_shape]\n\n\ndef _conv2d(layer_spec, input_shapes):\n raise NotImplementedError('Conv2D: shape logic not implemented')\n\n\ndef _reshape_static(layer_spec, input_shapes):\n target_shape = list(layer_spec.reshapeStatic.targetShape)\n return [target_shape] if target_shape else [[1]]\n\n\ndef _reduce(layer_spec, input_shapes):\n axis_param = layer_spec.reduce.axis\n axis = None\n if axis_param == 2:\n axis = -3\n elif axis_param == 3:\n axis = -2\n elif axis_param == 4:\n axis = -1\n else:\n raise NotImplementedError(\n '[Shaper] Reduce with axis parameter %s is not implemented.' % (str(axis_param)))\n output_shape = input_shapes[0][:]\n output_shape[axis] = 1\n return [output_shape]\n\n\ndef _reduce_general(params, input_shapes):\n if params.reduceAll:\n return [[1]]\n\n axes = list(params.axes)\n output_shape = list(input_shapes[0][:])\n if params.keepDims:\n for axis in axes:\n output_shape[axis] = 1\n else:\n for axis in axes:\n output_shape[axis] = None\n output_shape = [dim for dim in output_shape if dim is not None]\n\n return [output_shape] if output_shape else [[1]]\n\n\ndef _reduce_logsumexp(layer_spec, input_shapes):\n return _reduce_general(layer_spec.reduceLogSumExp, input_shapes)\n\n\ndef _reduce_prod(layer_spec, input_shapes):\n return _reduce_general(layer_spec.reduceProd, input_shapes)\n\n\ndef _reduce_mean(layer_spec, input_shapes):\n return _reduce_general(layer_spec.reduceMean, input_shapes)\n\n\ndef _reduce_sum(layer_spec, input_shapes):\n return _reduce_general(layer_spec.reduceSum, input_shapes)\n\n\ndef _reduce_max(layer_spec, input_shapes):\n return _reduce_general(layer_spec.reduceMax, input_shapes)\n\n\ndef _reduce_min(layer_spec, input_shapes):\n return _reduce_general(layer_spec.reduceMin, input_shapes)\n\n\ndef _argmax(layer_spec, input_shapes):\n params = layer_spec.argMax\n axis = params.axis\n keepdims = not params.removeDim\n\n output_shape = input_shapes[0][:]\n if keepdims:\n output_shape[axis] = 1\n else:\n output_shape[axis] = None\n output_shape = [dim for dim in output_shape if dim is not None]\n\n return [output_shape] if output_shape else [[1]]\n\n\ndef _argmin(layer_spec, input_shapes):\n params = layer_spec.argMin\n axis = params.axis\n keepdims = not params.removeDim\n\n output_shape = input_shapes[0][:]\n if keepdims:\n output_shape[axis] = 1\n else:\n output_shape[axis] = None\n output_shape = [dim for dim in output_shape if dim is not None]\n\n return [output_shape] if output_shape else [[1]]\n\n\ndef _tile(layer_spec, input_shapes):\n params = layer_spec.tile\n reps = params.reps\n assert len(reps) == len(input_shapes[0])\n return [[reps[i] * input_shapes[0][i] for i in range(len(reps))]]\n\n\ndef _fill_static(layer_spec, input_shapes):\n params = layer_spec.fillStatic\n output_shape = params.targetShape\n return [output_shape]\n\n\ndef _fill_dynamic(layer_spec, input_shapes):\n assert (len(input_shapes) == 1 and len(input_shapes[0]) == 1)\n rank = int(input_shapes[0][0])\n return [[-1] * rank]\n\n\ndef _broadcast_to_like(layer_spec, input_shapes):\n return [input_shapes[1]]\n\n\ndef _broadcast_to_static(layer_spec, input_shapes):\n params = layer_spec.broadcastToStatic\n output_shape = params.targetShape\n return [output_shape]\n\n\ndef _pad(layer_spec, input_shapes):\n return [[-1] * len(input_shapes[0])]\n\n\ndef _mirror_pad(layer_spec, input_shapes):\n params = layer_spec.padding\n pad_h = params.paddingAmounts.borderAmounts[0]\n pad_w = params.paddingAmounts.borderAmounts[1]\n output_shape = input_shapes[0]\n output_shape[2] += pad_h.startEdgeSize + pad_h.endEdgeSize\n output_shape[3] += pad_w.startEdgeSize + pad_w.endEdgeSize\n return [output_shape]\n\n\ndef _crop(layer_spec, input_shapes):\n return [[-1] * len(input_shapes[0])]\n\n\ndef _topk(layer_spec, input_shapes):\n params = layer_spec.topK\n value_shape = index_shape = input_shapes[0][:-1] + [params.K]\n output_shapes = [value_shape, index_shape]\n return output_shapes\n\n\ndef _unidirectional_lstm(layer_spec, input_shapes):\n shape = input_shapes[0]\n hidden_size = input_shapes[1][2]\n shape[2] = hidden_size\n return [shape] * 3\n\n\ndef _reorganize_data(layer_spec, input_shapes):\n block_size = layer_spec.reorganizeData.blockSize\n output_shape = input_shapes[0][:]\n if 'SpaceToDepth' in layer_spec.name or 'SpaceToBatchND' in layer_spec.name:\n output_shape[2] //= block_size\n output_shape[3] //= block_size\n output_shape[1] = output_shape[1] * block_size * block_size\n elif 'DepthToSpace' in layer_spec.name or 'BatchToSpaceND' in layer_spec.name:\n output_shape[2] *= block_size\n output_shape[3] *= block_size\n output_shape[1] = output_shape[1] // (block_size * block_size)\n return [output_shape]\n\n\n# We'll enable them one by one\n_LAYER_REGISTRY = {\n 'transpose': _transpose,\n 'getShape': _get_shape,\n 'sliceStatic': _slice_static,\n 'sliceDynamic': _slice_dynamic,\n 'squeeze': _squeeze,\n 'rangeStatic': _range_static,\n 'rangeDynamic': _range_dynamic,\n 'loadConstant': _load_constant,\n 'loadConstantND': _load_constant_nd,\n 'gather': _gather,\n 'gatherND': _gather_nd,\n 'scatter': _scatter,\n 'scatterND': _scatter_nd,\n 'logicalOr': _broadcastable,\n 'logicalNot': _identity,\n 'lessThan': _broadcastable,\n 'lessEqual': _broadcastable,\n 'greaterThan': _broadcastable,\n 'greaterEqual': _broadcastable,\n 'equal': _broadcastable,\n 'notEqual': _broadcastable,\n 'logicalAnd': _broadcastable,\n 'add': _add,\n 'multiply': _add,\n 'concatND': _concat_nd,\n 'concat': _concat_nd,\n 'innerProduct': _inner_product,\n 'activation': _identity,\n 'reverse': _identity,\n 'reverseSeq': _reverse_seq,\n 'copy': _identity,\n 'expandDims': _expand_dims,\n 'stack': _stack,\n 'whereNonZero': _where_non_zero,\n 'addBroadcastable': _broadcastable,\n 'subtractBroadcastable': _broadcastable,\n 'divideBroadcastable': _broadcastable,\n 'whereBroadcastable': _broadcastable,\n 'maxBroadcastable': _broadcastable,\n 'minBroadcastable': _broadcastable,\n 'modBroadcastable': _broadcastable,\n 'floorDivBroadcastable': _broadcastable,\n 'powBroadcastable': _broadcastable,\n 'conv2d': _conv2d,\n 'multiplyBroadcastable': _broadcastable,\n 'reshapeStatic': _reshape_static,\n # 'convolution': _convolution, # We propagate convolutional shapes by directly assigning from SSA output shape\n 'embeddingND': _embedding_nd,\n 'softmax': _identity,\n 'softmaxND': _identity,\n 'unary': _identity,\n 'bias': _add,\n 'max': _add,\n 'min': _add,\n 'reduce': _reduce,\n 'argMax': _argmax,\n 'argMin': _argmin,\n 'reduceLogSumExp': _reduce_logsumexp,\n 'reduceProd': _reduce_prod,\n 'reduceMean': _reduce_mean,\n 'reduceSum': _reduce_sum,\n 'reduceMax': _reduce_max,\n 'reduceMin': _reduce_min,\n 'splitND': _split_nd,\n 'batchedMatmul': _batched_mat_mul,\n 'sin': _identity,\n 'cos': _identity,\n 'tan': _identity,\n 'tile': _tile,\n 'fillLike': _identity,\n 'fillStatic': _fill_static,\n 'fillDynamic': _fill_dynamic,\n 'uniDirectionalLSTM': _unidirectional_lstm,\n 'broadcastToLike': _broadcast_to_like,\n 'broadcastToStatic': _broadcast_to_static,\n 'constantPad': _pad,\n 'padding': _mirror_pad,\n 'crop': _crop,\n 'sign': _identity,\n 'ceil': _identity,\n 'floor': _identity,\n 'round': _identity,\n 'topK': _topk,\n 'reorganizeData': _reorganize_data,\n 'batchnorm': _identity,\n 'clip': _identity,\n 'lrn': _identity,\n}\n\n\ndef _get_translator_function(layer_type):\n \"\"\"Get the right translator function\n \"\"\"\n if layer_type in _LAYER_REGISTRY:\n return _LAYER_REGISTRY[layer_type]\n else:\n raise TypeError(\n \"Shape computation function missing for layer of type %s.\" % type(layer_type))\n\n\ndef _insert_to_dict(dic, key, val):\n \"\"\" Insert key to dic, where dic[key] value is a list of unique elements\n \"\"\"\n if key not in dic:\n dic[key] = []\n if val not in dic[key]:\n dic[key].append(val)\n\n\ndef get_common_shape(x, y):\n \"\"\" Get common shape z from two shapes, x and y.\n If x and y are of different ranks, error out.\n If x and y have the same rank, but x[i] != y[i] for some i, then z[i] = -1, indicating UNKNOWN.\n If x and y are equal, z = x\n \"\"\"\n z = None\n if len(x) == len(y):\n z = list(x)\n for idx in range(len(x)):\n z[idx] = x[idx] if x[idx] == y[idx] else -1\n return z\n\n\ndef is_static_shape(shape):\n return not (False in [x > 0 for x in shape])\n\n\ndef is_a_shape_of(x, y):\n \"\"\"\n True if x is a shape of y.\n y uses -1 to indicate arbitrary number.\n If y is None, then it represent a \"missing\" shape. In this case it will return True.\n \"\"\"\n if y is None:\n return True\n x = (1,) if len(x) == 0 else x # Scalar should be interpreted as an 1-element array\n y = (1,) if len(y) == 0 else y # Scalar should be interpreted as an 1-element array\n if len(x) != len(y):\n return False\n return all([(a[0] == a[1] or a[1] == -1) for a in zip(x, y)])\n\n\ndef _propagate_shapes(nn_spec, blob_names, shapes, srcs, dsts, layer_specs):\n \"\"\"\n Traverse the neural network spec. The spec may not be top level.\n This should be used as the internal recursive call. Use traverse() to do the top level traversal.\n blob_names - a list of blob names\n shapes - a dictionary of {blob_name: shape}\n srcs - a dictionary of {blob_name: layers_writing_to_it}\n dsts - a dictionary of {blob_name: layers_reading_from_it}\n layer_specs - a dictionary of {layer_name: layer_spec} for easy access to parameters.\n\n srcs, dsts, and layer_specs are byproducts that are not necessary for propagating the shapes.\n I made these for debugging purposes.\n \"\"\"\n layers = nn_spec.layers\n for i, layer in enumerate(layers):\n # Register layer\n layer_name = layer.name\n layer_specs[layer_name] = layer\n # Register input blobs\n for j, blob_name in enumerate(layer.input):\n if blob_name not in blob_names:\n raise ValueError(\n '[Shaper] Layer %s input[%d] (%s) has never been seen before.' %\n (layer_name, j, blob_name))\n if blob_name not in shapes:\n raise ValueError(\n '[Shaper] The shape of input[%d] (%s) needed for layer \"%s\" cannot be determined.'\n % (j, blob_name, layer_name))\n # Mark the layer as the destination of blob\n _insert_to_dict(dsts, blob_name, layer_name)\n\n layer_type = layer.WhichOneof('layer')\n if layer_type not in _LAYER_REGISTRY:\n raise NotImplementedError(\n '[Shaper] Layer \"{}\" of type \"{}\" not implemented'.format(layer_name, layer_type))\n if layer_type == 'forloop':\n # If a nested network, recursively traverse into it\n _propagate_shapes(layer.condition)\n _propagate_shapes(layer.bodyNetwork)\n elif layer_type == 'branch':\n _propagate_shapes(layer.ifBranch)\n _propagate_shapes(layer.elseBranch)\n else:\n # If a regular layer, compute output blob shapes.\n layer_translator = _get_translator_function(layer_type)\n input_shapes = [shapes[b] for b in layer.input]\n output_shapes = layer_translator(layer, input_shapes)\n\n # Register output blobs\n for k, blob_name in enumerate(layer.output):\n if blob_name not in blob_names:\n blob_names.append(blob_name)\n _insert_to_dict(srcs, blob_name, layer_name)\n if blob_name not in shapes:\n shapes[blob_name] = output_shapes[k]\n else:\n common_shape = get_common_shape(shapes[blob_name], output_shapes[k])\n if common_shape is None:\n raise ValueError(\n 'Unable to resolve shape for blob %s, with potential shape %s and %s' %\n (blob_name, str(shapes[blob_name]), str(output_shapes[k])))\n\n\ndef _finalize_spec(nn_spec, shapes, overwrite=True):\n \"\"\"\n This is the internal recursive call. Use propagate_shapes() to do the top level traversal.\n nn_spec: spec for the neural network\n shapes: a {str : shape} dictionary tracking the name -> coreml_shape pair\n overwrite: If True, will discard existing tensor shapes in the spec.\n If False, will check for tensor shape existence, write it if spec does not have tensor field,\n otherwise will check for consistency.\n \"\"\"\n layers = nn_spec.layers\n for i, layer in enumerate(layers):\n layer_type = layer.WhichOneof('layer')\n\n if overwrite:\n del layer.inputTensor[:]\n del layer.outputTensor[:]\n\n # input\n if len(layer.inputTensor) == 0:\n for j, blob_name in enumerate(layer.input):\n shape = shapes[blob_name]\n ts = layer.inputTensor.add()\n ts.rank = len(shape)\n ts.dimValue.extend(list(shape))\n else: # This does the check\n for j, blob_name in enumerate(layer.input):\n shape = shapes[blob_name]\n ts = layer.inputTensor[j]\n existing_shape = list(ts.dimValue)\n if not (is_a_shape_of(existing_shape, shape)\n or is_a_shape_of(shape, existing_shape)):\n raise ValueError(\n '[Shaper] For layer %s, Existing shape %s does not match new shape %s' %\n (layer.name, str(existing_shape), str(shape)))\n\n # output\n if len(layer.outputTensor) == 0:\n for j, blob_name in enumerate(layer.output):\n shape = shapes[blob_name]\n ts = layer.outputTensor.add()\n ts.rank = len(shape)\n ts.dimValue.extend(list(shape))\n else: # This does the check\n for j, blob_name in enumerate(layer.output):\n shape = shapes[blob_name]\n ts = layer.outputTensor[j]\n existing_shape = list(ts.dimValue)\n if not (is_a_shape_of(existing_shape, shape)\n or is_a_shape_of(shape, existing_shape)):\n raise ValueError(\n '[Shaper] For layer %s, Existing shape %s does not match new shape %s' %\n (layer.name, str(existing_shape), str(shape)))\n\n # If a nested network, recursively traverse into it\n if layer_type == 'forloop':\n _finalize_spec(layer.condition)\n _finalize_spec(layer.bodyNetwork)\n elif layer_type == 'branch':\n _finalize_spec(layer.ifBranch)\n _finalize_spec(layer.elseBranch)\n else:\n pass\n\n\ndef propagate_shapes(mlmodel_spec, overwrite=True):\n \"\"\"\n Propagate input shapes in the spec into every layer\n This changes the mlmodel_spec!!\n mlmodel_spec - the MLModel spec with the model descriptions\n overwrite - if True, will overwrite existing tensor shapes\n \"\"\"\n blob_names = []\n srcs = {}\n dsts = {}\n shapes = {}\n layer_specs = {}\n\n # put the inputs into Shaper\n for feature in mlmodel_spec.description.input:\n name = feature.name\n blob_names.append(name)\n srcs[name] = []\n shapes[name] = list(feature.type.multiArrayType.shape)\n\n top_nn_spec = mlmodel_spec.neuralNetwork\n _propagate_shapes(top_nn_spec, blob_names, shapes, srcs, dsts, layer_specs)\n _finalize_spec(top_nn_spec, shapes, overwrite=overwrite)\n\n output_names = [output.name for output in mlmodel_spec.description.output]\n\n if overwrite:\n del mlmodel_spec.description.output[:]\n\n if len(mlmodel_spec.description.output) == 0:\n for name in output_names:\n output_ = mlmodel_spec.description.output.add()\n output_.name = name\n shape = shapes[name]\n for n in shape:\n output_.type.multiArrayType.shape.append(n)\n else:\n for output_ in mlmodel_spec.description.output:\n existing_shape = list(output_.type.multiArrayType.shape)\n shape = shapes[output_.name]\n\n if not (is_a_shape_of(existing_shape, shape) or is_a_shape_of(shape, existing_shape)):\n raise ValueError(\n '[Shaper] For layer %s, Existing shape %s does not match new shape %s' %\n (layer.name, str(existing_shape), str(shape)))\n\n\ndef propagate_single_layer(layer, shapes, output_shapes=None, custom_shape_function=None):\n \"\"\"\n Propagate input shape to output shape for a single layer, which could have nested networks\n layer : a layer spec\n shapes : a dictionary that stores all known shapes\n output_shapes : if None, the output tensors' shapes are computed by its shape propagation function,\n defined by _get_translator_function(layer_type). If not None, will force output_shapes to be\n written as the output spec of the layer.\n custom_shape_function : if None, shape function from _LAYER_REGISTRY will be used to infer shape,\n If not None, provided function will be used to compute output shape.\n \"\"\"\n for j, blob_name in enumerate(layer.input):\n if blob_name not in shapes:\n raise ValueError(\n '[Shaper] The shape of input[%d] (%s) needed for layer \"%s\" cannot be determined.' %\n (j, blob_name, layer.name))\n\n layer_type = layer.WhichOneof('layer')\n if output_shapes is None:\n if layer_type not in _LAYER_REGISTRY and custom_shape_function is None:\n raise NotImplementedError(\n '[Shaper] Layer \"{}\" of type \"{}\" not implemented'.format(layer.name, layer_type))\n layer_translator = _get_translator_function(layer_type) if layer_type in _LAYER_REGISTRY else custom_shape_function\n input_shapes = [list(shapes[b]) for b in layer.input]\n output_shapes = layer_translator(layer, input_shapes)\n\n # Register output blobs\n for k, blob_name in enumerate(layer.output):\n if blob_name not in shapes:\n shapes[blob_name] = output_shapes[k]\n else:\n common_shape = get_common_shape(shapes[blob_name], output_shapes[k])\n if common_shape is None:\n raise ValueError(\n 'Unable to resolve shape for blob %s, with potential shape %s and %s' %\n (blob_name, str(shapes[blob_name]), str(output_shapes[k])))\n\n # Write into layer spec\n del (layer.inputTensor[:])\n for j, blob_name in enumerate(layer.input):\n shape = shapes[blob_name]\n ts = layer.inputTensor.add()\n ts.rank = len(shape)\n ts.dimValue.extend(list(map(int, shape)))\n\n del (layer.outputTensor[:])\n for j, blob_name in enumerate(layer.output):\n shape = shapes[blob_name]\n ts = layer.outputTensor.add()\n ts.rank = len(shape)\n ts.dimValue.extend(list(map(int, shape)))\n","sub_path":"coremltools/converters/nnssa/coreml/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":28945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"145805343","text":"\"\"\"\n745. Prefix and Suffix Search (Hard)\n\nGiven many words, words[i] has weight i.\n\nDesign a class WordFilter that supports one function, WordFilter.f(String prefix, String suffix). It will return the word with given prefix and suffix with maximum weight. If no word exists, return -1.\n\nExamples:\n\nInput:\nWordFilter([\"apple\"])\nWordFilter.f(\"a\", \"e\") // returns 0\nWordFilter.f(\"b\", \"\") // returns -1\n \n\nNote:\n\nwords has length in range [1, 15000].\nFor each test case, up to words.length queries WordFilter.f may be made.\nwords[i] has length in range [1, 10].\nprefix, suffix have lengths in range [0, 10].\nwords[i] and prefix, suffix queries consist of lowercase letters only.\n\"\"\"\n\nclass TrieNode(object):\n def __init__(self):\n self.is_word = False\n self.memo = [None] * 26\n self.index = set()\n\nclass WordFilter(object):\n\n def __init__(self, words):\n \"\"\"\n :type words: List[str]\n \"\"\"\n self.words = words\n self.root = TrieNode()\n self.root_inv = TrieNode()\n for i, word in enumerate(words):\n n = self.root\n for j, c in enumerate(word):\n n.index.add(i)\n ind = ord(c) - ord('a')\n if n.memo[ind] is None:\n n.memo[ind] = TrieNode()\n n = n.memo[ind]\n if j == len(word) - 1:\n n.is_word = True\n n.index.add(i)\n\n for i, word in enumerate(words):\n n = self.root_inv\n wordinv = word[::-1]\n for j, c in enumerate(wordinv):\n n.index.add(i)\n ind = ord(c) - ord('a')\n if n.memo[ind] is None:\n n.memo[ind] = TrieNode()\n n = n.memo[ind]\n if j == len(word) - 1:\n n.is_word = True \n n.index.add(i)\n\n def f(self, prefix, suffix):\n \"\"\"\n :type prefix: str\n :type suffix: str\n :rtype: int\n \"\"\"\n n = self.root\n for c in prefix:\n n = n.memo[ord(c) - ord('a')]\n if n is None: return -1\n set_pre = n.index\n n = self.root_inv\n for c in suffix[::-1]:\n n = n.memo[ord(c) - ord('a')]\n if n is None: return -1\n set_post = n.index\n set_ = set_pre & set_post\n if set_:\n return max(set_)\n else:\n return -1\n\n# Your WordFilter object will be instantiated and called as such:\n# obj = WordFilter(words)\n# param_1 = obj.f(prefix,suffix)\n\nif __name__ == \"__main__\":\n a = WordFilter([\"pop\"])\n print(a.f(\"\", \"op\"))\n # print(a.f('a', 'e'))\n # print(a.f('b', ''))\n","sub_path":"python/leetcode/745_prefix_suffix_search.py","file_name":"745_prefix_suffix_search.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"77087968","text":"from PIL import Image\nfrom seleniumrequests import PhantomJS\nimport requests\nmyDriver=PhantomJS(r\"D:\\phantomjs-2.1.1-windows\\bin\\phantomjs\")\ncaptchaUrl=r\"http://jwk.lzu.edu.cn/academic/getCaptcha.do?0.6723898265425539\"\nfor i in range(1,201):\n temp=requests.get(captchaUrl)\n myImage=open(\"%d.jpg\"%(i),'wb')\n myImage.write(temp.content)\n myImage.close()\n\nfor i in range(1,201):\n myImage2=Image.open(\"%d.jpg\"%(i))\n assert isinstance(myImage2,Image.Image)\n myImage2=myImage2.convert('L')\n myImage2.save(\"h%d.jpg\"%(i))\n\nmatrix=[]\nx=60\nfor i in range(256):\n if i <x:\n matrix.append(0)\n else:\n matrix.append(1)\nfor i in range(1,201):\n myImage3=Image.open(\"h%d.jpg\"%(i))\n assert isinstance(myImage3,Image.Image)\n myImage3.point(matrix,'1').save(\"er%d.tif\"%(i))","sub_path":"captcha_processing/get_captcha.py","file_name":"get_captcha.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492707508","text":"def triangle_formatter(file):\r\n text_file = open(file, \"r\")\r\n\r\n lines = text_file.read().split(\"\\n\")\r\n\r\n triangle_sublists = []\r\n count = 0\r\n\r\n for i in lines:\r\n count += 1\r\n\r\n split_lines = i.split(\" \")\r\n\r\n for missio in range(count):\r\n split_lines[missio] = int(split_lines[missio].lstrip(\"0\"))\r\n\r\n triangle_sublists.append(split_lines)\r\n\r\n return triangle_sublists\r\n\r\n\r\ndef max_path_sum(tri):\r\n length = len(tri)\r\n\r\n for row in range(length - 2, -1, -1):\r\n for column in range(0, row + 1):\r\n tri[row][column] += max(tri[row + 1][column], tri[row + 1][column + 1])\r\n return tri[0][0]\r\n\r\n\r\ntriangle = triangle_formatter(\"Additional Files\\p067_triangle.txt\")\r\n\r\nprint(max_path_sum(triangle))\r\n","sub_path":"Problem 067 - Maximum Path Sum II.py","file_name":"Problem 067 - Maximum Path Sum II.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261742473","text":"#ニューラルネットワーク(分類)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#x,y座標\nX = np.arange(-1.0,1.0,0.1)\nY = np.arange(-1.0,1.0,0.1)\n\n#重み\nw_im = np.array([[1.0,2.0],[2.0,3.0]])\nw_mo = np.array([[-1.0,1.0],[1.0, -1.0]])\n\n#バイアス\nb_im = np.array([3.0,-3.0])\nb_mo = np.array([0.4,0.1])\n\n#中間層\ndef middle_layer(x,w,b):\n u = np.dot(x,w) + b\n #シグモイド関数\n return 1/(1+np.exp(-u))\n\n#出力層\ndef output_layer(x,w,b):\n u = np.dot(x,w) + b\n #ソフトマックス関数\n return np.exp(u)/np.sum(np.exp(u))\n\n#分類結果を格納するリスト\nx_1 = []\ny_1 = []\nx_2 = []\ny_2 = []\n\n#グリッドの各マスでニューラルネットワークの演算\nfor i in range(20):\n for j in range(20):\n\n #順伝播\n #入力層\n inp = np.array([X[i],Y[j]])\n #中間層\n mid = middle_layer(inp, w_im, b_im)\n #出力層\n out = output_layer(mid, w_mo, b_mo)\n\n #確率の大小を比較し、分類する\n if out[0] > out[1]:\n x_1.append(X[i])\n y_1.append(Y[j])\n else:\n x_2.append(X[i])\n y_2.append(Y[j])\n\n#散布図の表示\nplt.scatter(x_1, y_1, marker=\"+\")\nplt.scatter(x_2, y_2, marker=\"o\")\nplt.show()\n","sub_path":"NeuralNetworkOfClassification.py","file_name":"NeuralNetworkOfClassification.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112822037","text":"from collections import Counter\nfrom collections import defaultdict\nimport heapq\n\nclass Solution(object):\n def topKFrequent(self, s, k):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n return [x for x, _ in Counter(s).most_common()[:k]]\n\n def topKFrequent2(self, s, k):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n counts = defaultdict(lambda: 0)\n for c in s:\n counts[c] += 1\n counts = heapq.nlargest(k, counts.viewitems(), key=lambda x: x[1])\n return [x for x, _ in counts]\n\n\nfrom lib.print_profiler_stats import print_profiler_stats\n\ns = Solution()\nmul = 1000000\nk = 5\na = (\n [1] * 2*mul +\n [2] * 4*mul +\n [3] * mul +\n [4] * 3*mul+\n [5] * 6*mul +\n [6] * 5*mul +\n [7] * mul +\n [8] * 2*mul +\n [9] * 9*mul +\n [0] * 8*mul)\n\nprint_profiler_stats(s.topKFrequent, a, k)\nprint_profiler_stats(s.topKFrequent2, a, k)\nprint(s.topKFrequent2(a, k))\n","sub_path":"unconverted/python/top_k_freq.py","file_name":"top_k_freq.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"336321466","text":"import nester\nman = []\nother = []\n#strip : 불필요한 공백제거\ntry:\n data = open('sketch.txt')\n\n for each_line in data:\n try:\n (role, line_spoken) = each_line.split(':', 1)\n line_spoken = line_spoken.strip()\n if role == 'Man':\n man.append(line_spoken)\n elif role == 'Other Man':\n other.append(line_spoken)\n except ValueError:\n pass\n\n data.close()\nexcept IOError:\n print('The datafile is missing!')\n\n#Man 과 Other로 구분해서 영화대사 저장하기\ntry:\n man_file = open(\"man_line_spoken.txt\",\"w\")\n other_file = open(\"other_line_spoken.txt\",\"w\")\n print(man,file = man_file)\n print(other,file = other_file)\nexcept IOError as userDefinedError:\n print(\"File is not exist!!!\" + str(userDefinedError))\nfinally:\n #내장항수 반환값중 man_file이라는 객체가 생성될 경우만 파일을 닫는다\n if \"man_file\" in locals():\n man_file.close()\n if \"other_file\" in locals():\n other_file.close()\n\nnester.print_lol(locals(),True)\n","sub_path":"headFirstPython/chapter4/page143.py","file_name":"page143.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"319517092","text":"# TESTING ONLY\nimport sys\nsys.path.append('..')\n\nfrom taser.proto.http import exec_rawRequest\n\nreq = '''GET / HTTP/1.1\nHost: www.yahoo.com\n'''\n\nresp = exec_rawRequest(req, debug=True)\nprint(\"[+] \", resp.url, \" --> \", resp.status_code)\nfor x in resp.request.headers:\n print(\" \"*8, x, \": \", resp.request.headers[x])","sub_path":"tests/raw_requests.py","file_name":"raw_requests.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"232430","text":"\"\"\"tesis URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^carrera/', include('apps.carrera.urls')),\n url(r'^docente/', include('apps.docente.urls')),\n url(r'^empresa/', include('apps.empresa.urls')),\n url(r'^especialidad/', include('apps.especialidad.urls')),\n url(r'^estudiante/', include('apps.estudiante.urls')),\n url(r'^perfil_empresa/', include('apps.perfil_empresa.urls')),\n url(r'^perfil_estudiante/', include('apps.perfil_estudiante.urls')),\n]\n","sub_path":"tesis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"621027664","text":"#ESCAPE THE ABYSS\n#LEVEL 1\nimport random\nimport time\nimport os\nimport sys\nimport pickle\nimport termcolor\nfrom termcolor import *\nimport configFiles.gameconfig\nfrom configFiles.gameconfig import *\nimport configFiles.descriptions\nfrom configFiles.descriptions import * \n\n#Cell inputs\ndef cellFunction():\n CMC.level = 1\n CMC.choiceMaker = False\n UserInputFunction()\n CMC.choiceMaker = True\n if CMC.choiceMaker == True:\n if CMC.word == \"gonorth\" and CMC.cellDoor == \"locked\":\n print(colored(cellNorth, 'green'))\n CMC.text = cellNorth\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n\n if CMC.word == 'gonorth' and CMC.celldoor == \"open\":\n print(colored(cellNorth, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n\n if CMC.word == \"goeast\":\n print(colored(cellEast, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n\n if CMC.word == \"gosouth\":\n print(colored(cellSouth, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n\n if CMC.word == \"gowest\":\n print(colored(cellWest, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n \n if CMC.word == \"readnote\" and CMC.cellBrick == \"true\":\n print(colored(cellNote, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n \n if CMC.word == \"jumpwindow\":\n print(colored(cellWindow, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n \n if CMC.word == \"checkbrick\":\n print(colored(cellBrick, 'green'))\n print(colored(\"---=---\", 'green').center(93, ' '))\n CMC.cellBrick = \"true\"\n cellFunction()\n \n if CMC.word == \"takekey\":\n invItem = \"Cell Key\"\n addingplayerInvDict(invItem)\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n\n if CMC.word == \"usekey\":\n keyItem = \"Cell Key\"\n usingItem(keyItem)\n if CMC.hasItem == \"false\":\n print(colored(\"You don't have the correct key.\"))\n cellFunction()\n\n if CMC.hasItem == \"true\":\n print(colored(cellDoorOpen, 'green'))\n time.sleep(5)\n level1()\n\n else:\n print(colored(\"Now is not the time for that!\", 'green').center(93, \" \"))\n print()\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n \n else:\n print(colored(\"** ERROR **\", 'green'))\n time.sleep(3)\n UserInputFunction()\n\n#Level 1 function\ndef level1Function():\n\n CMC.choiceMaker = False\n UserInputFunction()\n CMC.choiceMaker = True\n \n if CMC.choiceMaker == True:\n None\n\n#Cell description\ndef cell():\n menuStart()\n areaUpdate(cellParagraph)\n CMC.level = 1\n print(colored(cellParagraph, 'green'))\n print()\n print(colored(\"---=---\", 'green').center(93, ' '))\n cellFunction()\n\n#Level 1 description\ndef level1():\n menuStart()\n print(colored(level1Hallway1, 'green'))\n print()\n print(colored(\"---=---\", 'green').center(93, ' '))\n level1Function()\n\n#Level 1 intro text\ndef level1intro():\n menuStart()\n\n print(colored('\"Just going to lay there?\"', 'green').center(93, \" \"))\n print()\n time.sleep(2)\n print(colored('\"pity, such a pathetic, lazy, disappointment.\"', 'green').center(93, \" \"))\n print()\n print(colored(\"---=---\", 'green').center(93, ' '))\n time.sleep(5)\n menuStart()\n\n print(colored('\"Open your eyes.\"', 'green').center(93, \" \"))\n print()\n time.sleep(2)\n print(colored('\"Show me you are worthy\"', 'green').center(93, \" \"))\n print()\n print(colored(\"---=---\", 'green').center(93, ' '))\n time.sleep(5)\n menuStart()\n\n print(colored('\"Good... good, thats right.\"', 'green').center(93, \" \"))\n print()\n time.sleep(2)\n print(colored('\"RISE!\"', 'green').center(93, \" \"))\n print()\n print(colored(\"---=---\", 'green').center(93, ' '))\n time.sleep(5)\n cell()\n\n\n","sub_path":"level1.py","file_name":"level1.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"424042828","text":"# https://www.hackerrank.com/challenges/html-parser-part-2/problem\n\nfrom html.parser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_comment(self, data):\n s = 'Single-line Comment' if '\\n' not in data else 'Multi-line Comment'\n print('>>>', s)\n print(data)\n def handle_data(self, data):\n if data != '\\n':\n print(\">>> Data\")\n print(data)\n\nhtml = \"\"\nfor i in range(int(input())):\n html += input().rstrip()\n html += '\\n'\n\nparser = MyHTMLParser()\nparser.feed(html)\nparser.close()\n","sub_path":"python/regex_and_parsing/html_parser_part2.py","file_name":"html_parser_part2.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"18007808","text":"from flask import (Flask, url_for, render_template, request, redirect, session,flash, jsonify)\nimport pb_view, queries\nimport MySQLdb\nimport bcrypt\n \napp = Flask(__name__)\n\napp.secret_key = 'secret'\n\nDATABASE = 'pbmod'\n\n@app.route('/',methods=['GET','POST'])\ndef home():\n '''Home page method. Displays welcome message or login page.'''\n if request.method == 'GET':\n #check if logged in\n if 'uid' in session:\n # get user info\n username = session['username']\n uid = session['uid']\n conn = queries.getConn(DATABASE)\n campaigns = queries.getUserCampaigns(conn,uid)\n \n #render template\n return render_template('home.html',username = username, campaigns = campaigns)\n else:\n # render template w/o user info\n username = None\n campaigns = None\n return render_template('home.html',username = username, campaigns = campaigns)\n else: #login attempt\n user = request.form.get('user')\n attempt = request.form.get('password')\n \n # check database for user-pass match\n conn = queries.getConn(DATABASE)\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n \n curs.execute('''select hashed from user where username = %s''',\n [user])\n \n password = curs.fetchone()\n if password is None:\n flash(\"Login unsuccessful. Try again.\")\n return redirect(url_for('home'))\n \n hashed = password['hashed']\n \n if bcrypt.hashpw(attempt.encode('utf-8'),hashed.encode('utf-8')) == hashed:\n session['username'] = user\n \n #get uid\n curs.execute('''select uid from user where username = %s''',\n [user])\n uid = curs.fetchone()['uid']\n session['uid'] = uid\n flash(\"Login successful for \" + user)\n return redirect(url_for('home'))\n \n else:\n flash(\"Login unsuccessful. Try again.\")\n return redirect(url_for('home'))\n \n@app.route('/setCamp/',methods=['POST'])\ndef setCamp():\n if request.method == 'POST':\n session['camp'] = request.form.get('campid')\n return redirect(url_for('campPage'))\n\n@app.route('/register/',methods=['GET','POST'])\ndef register():\n '''User registration route.'''\n if request.method == 'GET':\n if 'uid' in session:\n flash('Already logged into existing account.')\n return redirect(url_for('home'))\n else:\n return render_template(\"register.html\")\n else: #submit registration form\n username = request.form.get('user')\n pass1 = request.form.get('password1')\n pass2 = request.form.get('password2')\n \n if pass1 != pass2:\n flash(\"Passwords don't match\")\n return redirect(url_for('register'))\n \n else: #register user into database\n hashed = bcrypt.hashpw(pass1.encode('utf-8'), bcrypt.gensalt())\n conn = queries.getConn(DATABASE)\n try:\n uid = queries.registerUser(conn,username,hashed)\n except:\n flash(\"There was an error registering user\")\n return redirect(url_for(\"register\"))\n if uid == False:\n flash(\"User already exists\")\n return redirect(url_for(\"register\"))\n else:\n session['uid'] = uid\n session['username'] = username\n flash(\"Registration successful\")\n return redirect(url_for(\"home\"))\n \n@app.route('/logout/',methods = ['GET','POST'])\ndef logout():\n '''Route to log out current user'''\n if 'uid' in session:\n session.pop('uid')\n session.pop('username')\n flash(\"Successfully logged out\")\n return redirect(url_for('home'))\n else:\n flash('You are not logged in.')\n return redirect(url_for('home'))\n \n@app.route('/newCamp/', methods=['GET','POST'])\ndef newCamp():\n if 'uid' not in session: #verify user is logged in\n flash(\"Login to add campaign.\")\n return redirect(url_for('home'))\n \n if request.method == 'GET': #display form\n return render_template('new_camp.html')\n else:\n uid = session['uid']\n campName = request.form.get(\"name\")\n players = request.form.get(\"players\")\n players = players.strip().split() #make into list\n \n conn = queries.getConn(DATABASE)\n \n #insert new campaign into db\n cid = queries.createCampaign(conn,campName)\n \n #insert players into campaign\n for player in players:\n playerID = queries.getUserID(conn,player)\n if playerID == None:\n flash(\"There was an error adding \"+player+\" to campaign.\")\n else:\n try:\n queries.addPlayerToCamp(conn,playerID['uid'],cid)\n except:\n flash(\"There was an error adding \"+player+\" to campaign.\")\n \n #insert current user as dm to campaign (assume only DMs are creating campaigns)\n queries.addPlayerToCamp(conn,uid,cid,dm = 'yes')\n \n session['camp'] = cid\n return redirect(url_for('campPage'))\n \n@app.route('/editPlayers/',methods = ['GET','POST'])\ndef editPlayers():\n if 'uid' in session and 'camp' in session:\n uid = session['uid']\n cid = session['camp']\n \n conn = queries.getConn(DATABASE)\n \n if not queries.userIsDM(conn,uid,cid):\n flash(\"You cannot add or remove players.\")\n return redirect(url_for('campPage'))\n \n if request.method == 'GET':\n name = pb_view.camp_name(cid, conn)\n players = map(lambda x: x[0],queries.getCampPlayers(conn,cid)) #get campaign players and clean list\n return render_template('edit_players.html',name = name, players = ' '.join(players))\n \n else:\n playersBefore = map(lambda x: x[0],queries.getCampPlayers(conn,cid))\n players = request.form.get(\"players\")\n players = players.strip().split() #make into list\n \n #Remove players who've been removed\n for player in playersBefore: #this is horrible and i hate it\n if player not in players:\n queries.removePlayer(conn,queries.getUserID(conn,player)['uid'],cid)\n \n #insert players into campaign\n for player in players:\n playerID = queries.getUserID(conn,player)\n if playerID == None:\n flash(\"There was an error adding \"+player+\" to campaign.\")\n else:\n try:\n queries.addPlayerToCamp(conn,playerID['uid'],cid)\n except:\n flash(\"There was an error adding \"+player+\" to campaign.\")\n \n flash(\"Successfully edited players\")\n return redirect(url_for(\"campPage\"))\n \n else:\n flash(\"There was an error accessing requested page.\")\n return redirect(url_for('home'))\n\n@app.route('/campaign/', methods=[\"GET\",\"POST\"])\ndef campPage():\n '''Route to the campaing currently stored in the session'''\n id = session['uid']\n conn = pb_view.getConn('pbmod')\n #get all items that the current user can view in this campaign\n all_items = pb_view.get_all(id, session[\"camp\"], conn)\n #ge the campaign name\n name = pb_view.camp_name(session[\"camp\"], conn)\n #load the initial campaign page\n isDM = queries.userIsDM(conn,id,session[\"camp\"])\n if request.method == 'GET':\n return render_template('camp_page.html', campName=name, comps=all_items, isDM=isDM)\n #if the user clicked a submit button\n else: \n #if user clicked the \"give permission\" button\n if request.form[\"submit\"]==\"giveperm\":\n #get the info on the item giving permission for, including changes \n #to permissions\n kind = request.form.get(\"key\")\n cid = request.form.get(\"cid\")\n nm = request.form.get(\"nam\")\n selected_users = request.form.getlist(\"plays\")\n #change list of selected user nums to ints so they can go into the \n #database\n for i in range(len(selected_users)):\n selected_users[i] = int(selected_users[i])\n #get a list of all users in the campagin \n all_users = pb_view.all_users(id, conn)\n no_perm = []\n #if user in campaign was not checked, revoke permission\n for i in all_users:\n if i not in selected_users:\n no_perm.append(i)\n #change permissions\n pb_view.change_perms(cid, selected_users, no_perm, id, kind.lower(), conn)\n #get new item list\n all_items_new = pb_view.get_all(id, session[\"camp\"], conn)\n #flash success and relaod page\n #TO BE IMPLEMENTED WITH AJAX FOR FINAL VERSION\n flash(\"The permissions on the \" + kind + \" \" + nm + \" have been updated.\")\n return render_template('camp_page.html', campName=name, comps=all_items_new, isDM=isDM)\n #if user selected to update an item\n elif request.form[\"submit\"]==\"Notes\":\n return redirect(url_for('addItem', key=\"Notes\"))\n elif request.form[\"submit\"]==\"Character\":\n return redirect(url_for('addItem', key=\"Character\"))\n elif request.form[\"submit\"]==\"Towns\":\n return redirect(url_for('addItem', key=\"Towns\"))\n else:\n #get info on the item they are updating\n kind = request.form.get(\"key\")\n cid = request.form.get(\"cid\")\n #redirect to update page\n return redirect(url_for('updateItem', key=kind, cid=cid))\n \n@app.route('/addType/', methods=[\"GET\",\"POST\"])\ndef newType():\n '''Route to add new item types to the campaign.'''\n conn = pb_view.getConn('pbmod')\n if request.method == \"GET\":\n return render_template('new_type.html')\n #if submitting form\n else: \n #get campaign id and add the new type to the campaign\n campid = session[\"camp\"]\n new_type = request.form.get(\"kind\")\n add = pb_view.add_type(new_type, campid, conn)\n if add==True:\n flash(\"Type \" + new_type + \" added\")\n return redirect(url_for('campPage'))\n #If it already exists, let user know\n else: \n flash(\"Type already exists. Pick a new name.\")\n return render_template('new_type.html')\n\n@app.route('/update/<key>/<cid>', methods=['POST', 'GET'])\n@app.route('/update/', methods=['POST', 'GET'])\ndef updateItem(key, cid):\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n if key == \"Character\":\n return updateCharacter(cid)\n \n if key == \"Towns\":\n return updateTown(cid)\n \n if key == \"Notes\":\n \n return updateNote(cid)\n \n flash('invalid key: '+key)\n print('invalid key: '+key)\n return redirect(request.referrer)\n \n \n \ndef updateCharacter(cid): # use optional arg \n \n if request.method == \"GET\":\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n curs.execute('''select * from `character` where cid = %s''', [cid])\n results = curs.fetchone()\n return render_template('characterform.html', character = results)\n else: \n submit = request.form.get('submit')\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n if submit == 'delete':\n curs.execute('''delete from `character` where cid = %s''', [cid])\n return redirect(url_for('campaign'))\n elif submit=='update':\n name = request.form.get('character-name')\n print(name)\n cclass = request.form.get('cclass')\n race = request.form.get('race')\n alignment = request.form.get('alignment')\n curs.execute('''update `character` set name = %s, class = %s, race = %s, alignment = %s where cid = %s''', [name, cclass, race, alignment, cid])\n return redirect(url_for('updateItem',key = \"Character\", cid=cid))\n \n \n\n\ndef updateTown(tid): # use optional arg \n if request.method == \"GET\":\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n curs.execute('''select * from towns where tid = %s''', [tid])\n results = curs.fetchone()\n return render_template('townsform.html', towns = results)\n else: \n submit = request.form.get('submit')\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n if submit == 'delete':\n curs.execute('''delete from towns where tid = %s''', [tid])\n return redirect(url_for('campPage'))\n elif submit=='update':\n name = request.form.get('town-name')\n descrip = request.form.get('towndescr')\n map = request.form.get('townmap')\n curs.execute('''update towns set name = %s, descrip = %s, map = %s where tid = %s''', [name, descrip, map, tid])\n return redirect(url_for('updateItem',key = \"Towns\", cid=tid)) \n\n\ndef updateNote(nid): # use optional arg \n if request.method == \"GET\":\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n curs.execute('''select * from notes where nid = %s''', [nid])\n results = curs.fetchone()\n print (\"We got here\")\n return render_template('notesform.html', notes = results)\n else: \n submit = request.form.get('submit')\n conn = pb_view.getConn('pbmod')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n if submit == 'delete':\n curs.execute('''delete from notes where nid = %s''', [nid])\n return redirect(url_for('campPage'))\n elif submit=='update':\n name = request.form.get('Note-name')\n body = request.form.get('body') \n \n curs.execute('''update notes set name = %s, body = %s where nid = %s''', [name, body, nid])\n return redirect(url_for('updateItem', key = 'Notes',cid=nid))\n\n\n@app.route('/add/<key>', methods = ['POST', 'GET'])\ndef addItem(key):\n if request.method == 'GET':\n if key == \"Character\":\n return render_template('addcharacter.html')\n if key == \"Towns\":\n return render_template('addtown.html')\n if key == \"Notes\":\n return render_template('addnote.html')\n elif request.method == 'POST':\n if key == \"Character\":\n return addChar(key)\n if key == \"Towns\":\n return addTown(key)\n if key == \"Notes\":\n return addNote(key)\n \ndef addNote(key):\n conn = pb_view.getConn('pbmod')\n \n notenm = request.form.get('notenm')\n noteBody = request.form.get('noteBody')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n uid = session[\"uid\"]\n campid = session['camp']\n result = \"\"\n if (notenm == \"\" or noteBody == \"\"):\n result= \"error:Missing Input\"\n flash(result)\n else:\n curs.execute('''insert into notes(nid,name, body,campid, uid) values (%s, %s, %s, %s,%s)''', \n [0, notenm, noteBody, campid,uid])\n result = \"Note Added\" \n flash(result)\n conn.commit()\n conn.close()\n return render_template('addnote.html', result = result)\n \ndef addChar(key):\n conn = pb_view.getConn('pbmod')\n characternm = request.form.get('characternm')\n cclass = request.form.get('cclass')\n race = request.form.get('race')\n alignment = request.form.get('alignment')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n uid = session[\"uid\"]\n campid = session['camp']\n if (characternm == \"\" or cclass == \"\" or race ==\"\" or alignment == \"\"):\n result = \"error:Missing Input\"\n flash(result)\n else:\n curs.execute('''insert into `character`(cid,name,class,race,alignment,campid,uid) values (%s, %s, %s, %s, %s, %s)''', \n [0, characternm, cclass, race, alignment, campid, uid ])\n result = \"Character Added\"\n flash(result)\n conn.commit()\n conn.close()\n return render_template('addcharacter.html', result = result)\ndef addTown(key):\n conn = pb_view.getConn('pbmod')\n townnm = request.form.get('townnm')\n towndescr = request.form.get('towndescr')\n townmap = request.form.get('townmap')\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n uid = session[\"uid\"]\n campid = session['camp']\n result = \"\"\n if (townnm == \"\" or towndescr == \"\" or townmap == \"\"):\n result= \"error:Missing Input\"\n else:\n curs.execute('''insert into towns(tid,name,descrip,map,campid,uid) values (%s,%s, %s, %s, %s,%s)''', \n [0, townnm, towndescr,townmap, campid, uid])\n result = \"Town Added\"\n conn.commit()\n conn.close()\n return render_template('addtown.html', result = result) \n\n\n\n \nif __name__ == '__main__':\n app.debug = True\n app.run('0.0.0.0',8082)\n\n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"519265336","text":"import os\nimport random\nimport time\n\nimport numpy\nfrom PIL import Image\nimport pytesseract\nimport cv2\n\n\n# 选定阀值,二值化\ndef get_assign_binary_iamge(image, threshold=160):\n # image = Image.open(file_path)\n\n # 转化到灰度图片\n image.save('new_' + str(int(time.time())) + str(random.randint(10, 99)) + '.png')\n image = image.convert('L')\n # 二值化\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n image = image.point(table, '1')\n return image\n\n\n# 简单图像验证码转str\ndef img_dispose_to_str(image):\n \"\"\"\n\n :param image: 图片对象\n :return:\n \"\"\"\n return pytesseract.image_to_string(image,\n config='--tessdata-dir \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tessdata\"')\n\n\nclass ImageDispose:\n def __init__(self, im, file_path=None):\n self.file_path = file_path\n # 读取文件,并灰值化\n im = get_assign_binary_iamge(im, threshold=200)\n if file_path:\n temp_path = file_path.replace('\\\\', '\\\\temp_')\n im = Image.open(file_path)\n else:\n temp_path = 'temp_' + str(int(time.time())) + str(random.randint(100, 999)) + '.jpg'\n im = get_assign_binary_iamge(im, threshold=200)\n im.save(temp_path)\n self._im = cv2.cvtColor(cv2.imread(temp_path), cv2.COLOR_BGR2GRAY)\n # os.remove(temp_path)\n # opnCv 矩阵图点x. y是反的\n self._height, self._width = self._im.shape[:2]\n\n def save(self, output_path, im):\n \"\"\"\n 保存文件\n :param output_path: 输出路径\n :param im: 图片对象\n :return:\n \"\"\"\n cv2.imwrite(output_path, im)\n\n def get_adaptive_binary_image(self):\n \"\"\"\n 自适应阀值,二值化\n :return:\n \"\"\"\n self._im = cv2.adaptiveThreshold(self._im, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 1)\n\n def clear_border(self, border_width=2):\n \"\"\"\n 去除图像的边框,默认像素宽度border_width=2\n :return:\n \"\"\"\n for y in range(self._width):\n for x in range(self._height):\n if y < border_width or y > self._width - border_width:\n self._im[x, y] = 255\n if x < border_width or x > self._height - border_width:\n self._im[x, y] = 255\n\n def clear_interference_line1(self, threshold=245):\n \"\"\"\n 干扰线降噪,颜色阀值threshold=245,判断标准:目标周围上下左右4邻域\n :return:\n \"\"\"\n for y in range(1, self._width - 1):\n for x in range(1, self._height - 1):\n count = 0\n\n if self._im[x, y - 1] > threshold:\n count += 1\n if self._im[x - 1, y - 1] > threshold:\n count += 1\n if self._im[x - 1, y + 1] > threshold:\n count += 1\n if self._im[x + 1, y + 1] > threshold:\n count += 1\n if self._im[x + 1, y - 1] > threshold:\n count += 1\n if self._im[x, y + 1] > threshold:\n count += 1\n if self._im[x - 1, y] > threshold:\n count += 1\n if self._im[x + 1, y] > threshold:\n count += 1\n # 若是白点数量大于6,则将目标点变白\n if count > 5:\n self._im[x, y] = 255\n\n def clear_interference_line2(self, threshold=245):\n \"\"\"\n 干扰线降噪,颜色阀值threshold=245,判断标准:目标周围上下左右4邻域\n :return:\n \"\"\"\n for y in range(2, self._width - 2):\n for x in range(2, self._height - 2):\n count = 0\n # 处理 25格区域内的情况\n # '''内圈8格'''\n\n if self._im[x, y - 1] > threshold:\n count += 1\n if self._im[x - 1, y - 1] > threshold:\n count += 1\n if self._im[x - 1, y + 1] > threshold:\n count += 1\n if self._im[x + 1, y + 1] > threshold:\n count += 1\n if self._im[x + 1, y - 1] > threshold:\n count += 1\n if self._im[x, y + 1] > threshold:\n count += 1\n if self._im[x - 1, y] > threshold:\n count += 1\n if self._im[x + 1, y] > threshold:\n count += 1\n # 外圈16格\n if self._im[x - 2, y - 2] > threshold:\n count += 1\n if self._im[x - 1, y - 2] > threshold:\n count += 1\n if self._im[x, y - 2] > threshold:\n count += 1\n if self._im[x + 1, y - 2] > threshold:\n count += 1\n if self._im[x + 2, y - 2] > threshold:\n count += 1\n if self._im[x - 2, y - 1] > threshold:\n count += 1\n if self._im[x - 2, y] > threshold:\n count += 1\n if self._im[x - 2, y + 1] > threshold:\n count += 1\n if self._im[x + 2, y - 1] > threshold:\n count += 1\n if self._im[x + 2, y] > threshold:\n count += 1\n if self._im[x + 2, y + 1] > threshold:\n count += 1\n if self._im[x - 2, y + 2] > threshold:\n count += 1\n if self._im[x + 1, y + 2] > threshold:\n count += 1\n if self._im[x, y + 2] > threshold:\n count += 1\n if self._im[x - 1, y + 2] > threshold:\n count += 1\n if self._im[x - 2, y + 2] > threshold:\n count += 1\n if count > 14:\n self._im[x, y] = 255\n\n def clear_interference_line3(self, threshold=245):\n for y in range(1, self._width - 3):\n for x in range(1, self._height - 3):\n if self._im[x, y] > threshold:\n continue\n if self._im[x + 2, y] > threshold and self._im[x - 1, y] > threshold or \\\n self._im[x, y + 2] > threshold and self._im[x, y - 1] > threshold:\n self._im[x, y] = 255\n\n def clear_interference_point(self, threshold=245):\n \"\"\"\n 干扰点降噪,目标周围3, 5,8三种情况的邻域判断\n :param threshold: 颜色阀值255为最大阀值白色,\n :return:\n \"\"\"\n for y in range(self._width - 1):\n for x in range(self._height - 1):\n if self._im[x, y] > threshold:\n continue\n # 最上\n if y == 0:\n # 左上顶点,4域\n if x == 0:\n if int(self._im[x, y]) + int(self._im[x + 1, y]) + int(self._im[x + 1, y + 1]) + int(\n self._im[x, y + 1]) > 2 * threshold:\n self._im[x, y] = 255\n # 右上顶点, 4域\n elif x == self._height - 1:\n if int(self._im[x, y]) + int(self._im[x - 1, y]) + int(self._im[x - 1, y + 1]) + int(\n self._im[x, y + 1]) > 2 * threshold:\n self._im[x, y] = 255\n # 最上非顶点,6域\n else:\n if int(self._im[x, y]) + int(self._im[x - 1, y]) + int(self._im[x - 1, y + 1]) + int(\n self._im[x, y + 1]) + int(self._im[x + 1, y + 1]) + int(\n self._im[x + 1, y]) > 3 * threshold:\n self._im[x, y] = 255\n # 最下\n elif y == self._width - 1:\n # 左下顶点, 4域\n if x == 0:\n if int(self._im[x, y]) + int(self._im[x + 1, y]) + int(self._im[x + 1, y - 1]) + int(\n self._im[x, y - 1]) > 2 * threshold:\n self._im[x, y] = 255\n # 右下顶点 4域\n elif x == self._height - 1:\n if int(self._im[x, y]) + int(self._im[x - 1, y]) + int(self._im[x - 1, y - 1]) + int(\n self._im[x, y - 1]) > 2 * threshold:\n self._im[x, y] = 255\n # 最下非顶点 6域\n else:\n if int(self._im[x, y]) + int(self._im[x - 1, y]) + int(self._im[x - 1, y - 1]) + int(\n self._im[x, y - 1]) + int(self._im[x + 1, y - 1]) + int(\n self._im[x + 1, y]) > 3 * threshold:\n self._im[x, y] = 255\n # 其他\n else:\n # 最左非顶点 6域\n if x == 0:\n if int(self._im[x, y]) + int(self._im[x, y - 1]) + int(self._im[x + 1, y - 1]) + int(\n self._im[x + 1, y]) + int(self._im[x + 1, y + 1]) + int(\n self._im[x, y + 1]) > 3 * threshold:\n self._im[x, y] = 255\n # 最右 非顶点 6域\n elif x == self._height - 1:\n if int(self._im[x, y]) + int(self._im[x, y - 1]) + int(self._im[x - 1, y - 1]) + int(\n self._im[x - 1, y]) + int(self._im[x - 1, y + 1]) + int(\n self._im[x, y + 1]) > 3 * threshold:\n self._im[x, y] = 255\n # 非边界点 9域\n else:\n if int(self._im[x, y]) + int(self._im[x - 1, y - 1]) + int(self._im[x, y - 1]) + int(\n self._im[x + 1, y - 1]) + int(self._im[x - 1, y]) + int(self._im[x + 1, y]) + int(\n self._im[x - 1, y + 1]) + int(self._im[x, y + 1]) + int(\n self._im[x + 1, y + 1]) > 5 * threshold:\n self._im[x, y] = 255\n\n def dispose(self, border=False, line=True, point=True):\n if border:\n self.clear_border()\n if line:\n # self.clear_interference_line2()\n\n # cv2.imshow('Image',self._im)\n self.clear_interference_line3()\n self.clear_interference_line1()\n # cv2.imshow('Image', self._im)\n cv2.imwrite('result_' + str(int(time.time())) + str(random.randint(100, 999)) + '.jpeg', self._im)\n if point:\n self.clear_interference_point()\n # cv2.imshow('Image', self._im)\n\n cv2.imwrite('dispose_ima.jpg', self._im)\n tessdata_dir_config = '--tessdata-dir \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tessdata\"'\n try:\n result = pytesseract.image_to_string('dispose_ima.jpg', config=tessdata_dir_config).replace(' ',\n '').replace(\n '“', '').replace('(', \"\").replace(')', '').replace(',', \"\").replace('\\'', '').replace('.', '').replace(\n '’', '').replace('‘', '').replace('”', '').replace(':', '').replace('\\\"', '')\n os.remove('dispose_ima.jpg')\n except Exception as e:\n print('image_to_string:', e)\n print(result)\n return result\n\n\nif __name__ == '__main__':\n image = Image.open('ddd.png')\n","sub_path":"icp_project/verifyCode_images/test_dispose.py","file_name":"test_dispose.py","file_ext":"py","file_size_in_byte":11833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"267214949","text":"#\n# what is it ? an interactive opencv c++ (and java !) compiler.\n#\n# this script heavily depends on github.com.berak.sugarcoatedchili/bin/compile\n# base assumptions:\n# local (static) openv installs for 2.4(ocv) and 3.0(ocv30) were extracted\n# ant (for java) was downloaded and extracted\n#\n\nimport sys, socket, threading, time, datetime, os, random\nimport subprocess, urllib, urllib2\nfrom cgi import parse_qs, escape\nfrom wsgiref.simple_server import make_server\ntry:\n from io import BytesIO\nexcept ImportError:\n from StringIO import StringIO as BytesIO\n\n\ncode_java_static=\"\"\"\nclass SimpleSample {\n static{ System.loadLibrary(Core.NATIVE_LIBRARY_NAME); }\n public static void cout(Object ... s){ for(Object z:s)System.out.println(z); }\n public static void cerr(Object ... s){ for(Object z:s)System.err.println(z); }\n public static void help(){ cerr(\"help(classname,item);\\\\n 'classname' should be canonical, like org.opencv.core.Mat\\\\n 'item' can be: CONSTRUCTOR, FIELD, METHOD, CLASS, ALL\"); }\n public static void help(String cls){ ClassSpy.reflect(cls,\"CLASS\"); }\n public static void help(String cls,String item){ ClassSpy.reflect(cls,item); }\n public static void main(String[] args) {\n\"\"\"\n\ncode_java_pre_shared = \"\"\"\nimport java.util.*;\nimport java.awt.*;\nimport java.awt.image.*;\nimport java.io.File;\nimport java.io.IOException;\nimport javax.imageio.*;\nimport org.opencv.core.*;\nimport org.opencv.calib3d.*;\nimport org.opencv.features2d.*;\nimport org.opencv.imgproc.*;\nimport org.opencv.objdetect.*;\nimport org.opencv.photo.*;\nimport org.opencv.utils.*;\nimport org.opencv.video.*;\n\"\"\"\n\ncode_java_pre_24=\"\"\"\nimport org.opencv.highgui.*;\n\"\"\" + code_java_static + \"\"\"\n Mat ocv = Highgui.imread(\"input.img\",-1);\n if ( ocv.empty() )\n ocv = new Mat(8,8,CvType.CV_8UC3,new Scalar(40,40,40));\n\"\"\"\n\ncode_java_pre_30=\"\"\"\n//import org.opencv.face.*;\nimport org.opencv.bioinspired.*;\nimport org.opencv.imgcodecs.*;\nimport org.opencv.imgproc.*;\nimport org.opencv.videoio.*;\n\"\"\" + code_java_static + \"\"\"\n Mat ocv = Imgcodecs.imread(\"input.img\",-1);\n if ( ocv.empty() )\n ocv = new Mat(8,8,CvType.CV_8UC3,new Scalar(40,40,40));\n\"\"\"\n\ncode_java_post_24=\"\"\"\n ;;\n Highgui.imwrite(\"output.png\", ocv);\n System.exit(0); // to break out of the ant shell.\n }\n}\n\"\"\"\n\ncode_java_post_30=\"\"\"\n ;;\n Imgcodecs.imwrite(\"output.png\", ocv);\n System.exit(0); // to break out of the ant shell.\n }\n}\n\"\"\"\n\ncode_cpp_pre_static=\"\"\"\nusing namespace cv;\n#include <algorithm>\n#include <iostream>\n#include <numeric>\n#include <vector>\n#include <bitset>\n#include <map>\n#include <set>\nusing namespace std;\nvoid download(const char * url, const char * localthing) {\n system(format(\"curl -s -o %s '%s'\",localthing,url).c_str());\n}\nMat urlimg(const char * url) {\n download(url,\"local.img\");\n Mat im = imread(\"local.img\", -1);\n //system(\"rm local.img\");\n return im;\n}\nint main()\n{\n Mat ocv = imread(\"input.img\",-1);\n if ( ocv.empty() )\n ocv = Mat(8,8,CV_8UC3,Scalar(40,40,40));\n\"\"\"\n\ncode_cpp_pre_24=\"\"\"\n#include \"opencv2/contrib/contrib.hpp\"\n#include \"opencv2/calib3d/calib3d.hpp\"\n#include \"opencv2/core/core.hpp\"\n#include \"opencv2/nonfree/nonfree.hpp\"\n#include \"opencv2/nonfree/features2d.hpp\"\n#include \"opencv2/features2d/features2d.hpp\"\n#include \"opencv2/objdetect/objdetect.hpp\"\n#include \"opencv2/highgui/highgui.hpp\"\n#include \"opencv2/imgproc/imgproc.hpp\"\n#include \"opencv2/legacy/legacy.hpp\"\n#include \"opencv2/ml/ml.hpp\"\n#include \"opencv2/photo/photo.hpp\"\n#include \"opencv2/video/video.hpp\"\n#include \"opencv2/video/tracking.hpp\"\n#include \"opencv2/video/background_segm.hpp\"\n\"\"\" \n\ncode_cpp_pre_30=\"\"\"\n#include \"opencv2/bioinspired.hpp\"\n#include \"opencv2/core.hpp\"\n#include \"opencv2/calib3d.hpp\"\n#include \"opencv2/ccalib.hpp\"\n#include \"opencv2/imgcodecs.hpp\"\n#include \"opencv2/features2d.hpp\"\n#include \"opencv2/xfeatures2d.hpp\"\n#include \"opencv2/xfeatures2d/nonfree.hpp\"\n#include \"opencv2/objdetect.hpp\"\n#include \"opencv2/xobjdetect.hpp\"\n#include \"opencv2/imgproc.hpp\"\n#include \"opencv2/ximgproc.hpp\"\n#include \"opencv2/face.hpp\"\n#include \"opencv2/bgsegm.hpp\"\n#include \"opencv2/optflow.hpp\"\n#include \"opencv2/shape.hpp\"\n#include \"opencv2/saliency.hpp\"\n#include \"opencv2/stitching.hpp\"\n#include \"opencv2/superres.hpp\"\n#include \"opencv2/tracking.hpp\"\n#include \"opencv2/text.hpp\"\n#include \"opencv2/ml.hpp\"\n#include \"opencv2/photo.hpp\"\n#include \"opencv2/xphoto.hpp\"\n#include \"opencv2/video.hpp\"\n\"\"\" \n\ncode_cpp_post=\"\"\"\n ;;\n imwrite(\"output.png\", ocv);\n return 0;\n}\n\"\"\"\n\nstyle=\"\"\"\n<style>\n body,iframe,textarea,table,input,button,select,option,scrollbar,div{\n font-family: Arial, \"MS Trebuchet\", sans-serif; font-size: 12;\n background-color: #333; color:#aaa;\n border-color:#777; border-style:solid; border-width:2;\n margin: 5 5 5 5;\n }\n a{ text-decoration: none; color:#888; }\n a:hover{ color:#ddd; }\n body{ margin: 15 15 15 15; border: 0; }\n textarea,pre{ font-family: Lucida Console; }\n</style>\n\"\"\"\n\n\nz_js = \"\"\"\n var canvas = document.getElementById('input_url');\n canvas.onmousemove = function (evt) {\n canvas.title = '(' + (evt.clientX - canvas.x) + ',' + (evt.clientY - canvas.y) + ')'\n } \n\"\"\"\n\n\ndef write_faq():\n faq = [\n [\"what is it ?\", \"an online opencv c++ / java compiler,<br> meant as an interactive pastebin,<br> or a quick tryout without installing anything locally.<br> basically, your code is running inside some shim, like int main(){/*your code*/}\"],\n [\"what can i do ?\", \"e.g. load an image into ocv, manipulate it, show the result.\"],\n [\"any additional help ?\", \"<a href=answers.opencv.org>answers.opencv.org</a>, <a href=docs.opencv.org>docs.opencv.org</a>, #opencv on freenode\"],\n [\"opencv version ?\", \"2.4.9 / 3.0.0.\"],\n [\"does it support python, too?\", \"no, unfortunately.<br> it's already quite a 'byo' party here on heroku, getting cv2 to run here would mean building python/numpy from scratch.<br> ... maybe later...\"],\n [\"do i need opencv installed ?\", \"no, it's all in the cloud.<br>minimal knowledge of the opencv c++/java api is sure helpful.\"],\n [\"no video ?\", \"no, unfortunately. you can download / manipulate exactly 1 image only (the one named 'ocv')\"],\n [\"is there gpu support of any kind, like ocl or cuda ?\", \"none of it atm. <br>(heroku even seems to support ocl, but i'm too lazy to try that now.)\"],\n [\"does it do c++11 ?\", \"it supports -std=c++0x only.<br>we're running on g++ (Ubuntu 4.4.3-4ubuntu5.1) 4.4.3.\"],\n [\"where are the cascades ?\", \"in './ocv/share/OpenCV/haarcascades', './ocv/share/OpenCV/lbpcascades'\"],\n [\"examples ?\", \"Mat m = Mat::ones(4,4,5);\\r\\ncerr << m << endl;\"],\n [\"i want to program in c.\",\"oh, no.(but try java ;)\"],\n #[\"src code ?\",\"https://github.com/berak/opencv_smallfry/tree/master/chili\"],\n ]\n data = '<html><head>\\n'\n data +='<title>faq'\n data += style\n data += '
      \\n'\n for f in faq: \n k = f[0]\n data +=\"
    • %s
    • \\n\" % (k,k)\n data +=\"

      \\n\"\n for f in faq: \n k = f[0]\n v = f[1]\n data +=\"
    • %s
    • \\n\" % (k,k)\n data += v + \"

      \\n\"\n data +=\"

    \\n\"\n data += '\\n'\n return data\n\n#\n# download an image url, save it, and return the local filename\n#\ndef url_image(u):\n try:\n c = urllib2.urlopen(u)\n img = c.read() \n except: return ''\n fn=\"input.img\"\n f=open(fn,\"wb\")\n f.write(img)\n f.close()\n return fn\n\ndef get_file(fn):\n try:\n f = open(fn,\"rb\")\n except: return \"\"\n it = f.read()\n f.close()\n return it\n\n#\n# .layout\n#\n# url | images\n# code | prog stdout/stderr\n# form buttons | compile results\n# | help link\n#\ndef write_page( code, result, link='',img='',input_url='' ):\n data = '\\n'\n data += style\n data += '
    \\n'\n data += '
    \\n'\n data += '\\n' % input_url\n data += '
    \\n'\n data += '\\n'\n data += '\\n'\n if link: data +='   %s\\n' % (link,link)\n data += '
    \\n'\n data += \"?

    \"\n if input_url: data += \" \"\n data += img\n data += result\n data += '
    \\n\\n'\n data += '\\n'\n return data\n\n\ndef _remove(x): \n try: \n os.remove(x)\n except: pass\n\n\n#\n# execute bot_cmd, return piped stdout/stderr\n# \ndef run_prog( bot_command ):\n try:\n bot = subprocess.Popen(\n bot_command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True)\n except: pass\n\n def collect(out):\n d=''\n while 1:\n z = out.readline()\n if not z : return d\n d += z \n\n data = collect(bot.stdout)\n data += collect(bot.stderr)\n data = data.replace(\"<\",\"<\").replace(\">\",\">\")\n bot.wait()\n return '
    ' + data + '
    '\n\n\n\ndef run_cpp( code, v30 ):\n # save code\n f = open(\"cv.cpp\",\"wb\")\n if v30:\n f.write(code_cpp_pre_30)\n else:\n f.write(code_cpp_pre_24)\n f.write(code_cpp_pre_static)\n f.write(code)\n f.write(code_cpp_post)\n f.close()\n\n # start bot\n script = \"bash build.cv.sh\"\n if v30: script = \"bash build.cv.30.sh\"\n data = run_prog( script )\n data += \"
    \"\n data += run_prog( \"./cv\" )\n _remove(\"cv\")\n return data\n\n\ndef run_java( code,v30 ):\n # save code\n f = open(\"src/SimpleSample.java\",\"wb\")\n f.write(code_java_pre_shared)\n if v30:\n f.write(code_java_pre_30)\n f.write(code)\n f.write(code_java_post_30)\n else:\n f.write(code_java_pre_24)\n f.write(code)\n f.write(code_java_post_24)\n f.close()\n _remove(\"output.png\")\n\n # start (ant) bot\n script = \"bash build.java.sh\"\n if v30: script = \"bash build.java.30.sh\"\n return run_prog( script )\n\n\ndef check_code(code):\n if code.find(\"java.\") >=0 : return \"java\"\n if code.find(\"CvType.\") >=0 : return \"java\"\n if code.find(\"System.\") >=0 : return \"java\"\n if code.find(\"Core.\") >=0 : return \"java\"\n if code.find(\"Highgui.\")>=0 : return \"java\"\n if code.find(\"Imgproc.\")>=0 : return \"java\"\n if code.find(\"org.opencv.\")>=0 : return \"java\"\n return \"cpp\"\n\n\n#\n# top level page choices:\n# \n#/\n#/faq\n#/run\n#/share\n#/output.png\n#\ndef application(environ, start_response):\n code = ''\n input_img = ''\n input_url = ''\n share_url = ''\n\n url = environ['PATH_INFO'];\n try:\n request_body_size = int(environ.get('CONTENT_LENGTH', 0))\n except (ValueError):\n request_body_size = 0\n request_body = environ['wsgi.input'].read(request_body_size)\n d = parse_qs(request_body)\n if len(d): \n code = d.get('txt', '')\n if code: code = code[0]\n input_url = d.get('url', '')\n if input_url: input_url = input_url[0];\n data = \"\"\n err = \"200 OK\"\n content = \"text/html\"\n if url == \"/\":\n data = write_page('','')\n elif url == \"/faq\":\n data = write_faq()\n elif url.startswith(b'/share') :\n try:\n key = url.split(\"/\")[2]\n txt = urllib2.urlopen(\"http://sugarcoatedchili.appspot.com/\"+key).read()\n lll = txt.find('\\r\\n')\n input_url = txt[0:lll]\n code = txt[lll+2:]\n except: pass\n data = write_page(code,'',url,'',input_url)\n elif url.startswith(b'/run') :\n _remove(\"output.png\")\n key = str(int(random.random()*100000))\n dat = urllib.urlencode({\"key\":key,\"code\":code,\"img\":input_url})\n req = urllib2.Request(\"http://sugarcoatedchili.appspot.com/up\",dat)\n res = urllib2.urlopen(req)\n #res.read() # !! reading the thank_you msg will cost ~5secs extra.\n if input_url:\n #_remove(input_img)\n input_img = url_image(input_url)\n lang = check_code(code)\n v30 = url.find(\"30\") > 0\n if lang == \"cpp\":\n result = run_cpp(code, v30)\n if lang == \"java\":\n result = run_java(code, v30)\n data = write_page(code, result, \"/share/\" + key, '', input_url)\n #_remove(\"input.img\")\n elif url == '/output.png' or url == '/share/output.png' :\n data = get_file('output.png')\n content = \"image/png\"\n elif url == '/src/SimpleSample.java' or url == '/cv.cpp' :\n data = get_file(url[1:])\n content = \"text/plain\"\n start_response( err, [ (\"Content-Type\", content), (\"Content-Length\", str(len(data))) ] )\n return iter([data])\n\n \nhttpd = make_server( '0.0.0.0', int(os.environ.get(\"PORT\", 9000)), application )\nwhile True: httpd.handle_request()\n","sub_path":"chili/srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":13664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311092463","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import osv\nfrom openerp.osv import fields\nfrom openerp.tools.translate import _\nimport time\n#import pooler\n\nclass product_product(osv.osv):\n _inherit = 'product.template'\n \n _columns = {\n 'nivel_producto' : fields.selection(\n\t\t\t[('principal','Producto principal'),\n\t\t\t('adicional','Producto adicional'),],'Nivel del producto'),\n\t\t\t'maximo_consultas': fields.boolean('Tiene maximo de consultas?', help=\"Selecciona si el producto tiene un maximo de consultas.\"),\n\t\t'maximo_consultas_mensual' : fields.float('Maximo de consultas mensuales para el producto', digits=(8, 5)),\n\t\t'maximo_subcuentas': fields.boolean('Tiene maximo de subcuentas?', help=\"Selecciona si el producto tiene un maximo de subcuentas.\"),\n\t\t'maximo_subcuentas_total' : fields.float('Maximo de subcuentas permitidas para el producto', digits=(8, 5)),\n\t\t'maximo_juicios_registrados': fields.boolean('Tiene maximo de juicios registrados?', help=\"Selecciona si el producto tiene un maximo de consultas.\"),\n\t\t'maximo_juicios_registrados_mensual' : fields.float('Maximo de juicios registrados para el producto', digits=(8, 5)),\n }\n\nproduct_product()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n","sub_path":"optimit_justiti_general/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"46266082","text":"from core.agility.v3_0.agilitymodel.base.AssetProperty import AssetPropertyBase\n\nclass ForeignAssetPropertyBase(AssetPropertyBase):\n '''\n classdocs\n '''\n def __init__(self, propertydefinitionreference=None):\n AssetPropertyBase.__init__(self)\n self._attrSpecs = getattr(self, '_attrSpecs', {})\n self._attrSpecs.update({'propertyDefinitionReference': {'type': 'Link', 'name': 'propertydefinitionreference', 'minOccurs': '0', 'native': False}})\n self.propertydefinitionreference = propertydefinitionreference \n","sub_path":"core/agility/v3_0/agilitymodel/base/ForeignAssetProperty.py","file_name":"ForeignAssetProperty.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"592645585","text":"#-*- Coding:utf-8 -*-\n# Author: D.Gray\n'''\n梦想的度假圣地:编写一个程序,调查用户梦想的度假圣地。使用类似于\"If you could visit one place in the\nworld,where would you go?\"的提示,并编写一个打印调查结果的代码快\n'''\nresponses = {}\npolling_active = True\nwhile polling_active:\n name = input(\"\\nWhat's you name? \")\n response = input(\"If you could visit one place in the world,where would you go?\")\n\n if (name == '' or response == '') or (name == ' ' or response == ' '):\n print('User information cannot be empty ')\n else:\n responses[name] = response # responses{name:response}将用户输入\n # 的name和response存储到responses字典中\n repeat = input('Would you like to let another person respond?(yes/no)')#询问用户是否继续参与调查\n if repeat == 'no':\n break\n print('\\n---Poll Results---')\n for name,response in responses.items(): #循环遍历responses字典中的键值对参数\n print(name.title(),'would like to go',response,'.')","sub_path":"Python_Study/第一模块学习/Day1/while循环_练习3.py","file_name":"while循环_练习3.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"612262154","text":"\"\"\"\nUnit Test này yêu cầu mô hình Slick có gắn thêm Motors & Sensors\n\nOn Robot Mesh Studio: https://www.robotmesh.com/studio/5ffe5199ce773b05bc962da4\n\"\"\"\n\n\n# NHẬP CÁC ĐỐI TƯỢNG TỪ THƯ VIỆN\n# ==============================\n\nfrom vex import (\n Brain, Bumper,\n Ports, SECONDS\n)\nfrom random import randint\n\n\n# KHỞI TẠO CÁC BỘ PHẬN ROBOT\n# ==========================\n\n# khởi tạo brain\nbrain = Brain()\n\n# khởi tạo các bumper switch\nbumper_9 = Bumper(Ports.PORT9)\nbumper_10 = Bumper(Ports.PORT10)\n\n\n# CHƯƠNG TRÌNH CHÍNH\n# ==================\n\nwhile True:\n # mỗi khi một trong những bumper switches được nhấn\n # thì brain phát âm thanh ngẫu nhiên\n if bumper_9.pressing() or bumper_10.pressing():\n brain.sound.play(\n randint(1, 7), # note: một trong 7 note C, D, E, F, G, A, B\n randint(1, 7), # octave: một số từ 1 đến 7\n 0.5, SECONDS # 0.5 giây\n )\n","sub_path":"VEX-IQ/STEAM-for-VietNam/2021-VEX-IQ-Intro-Course/Lesson-1/Slick-Test-Bumper-Switches.py","file_name":"Slick-Test-Bumper-Switches.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"195208174","text":"\"\"\"Define tests for the client object.\"\"\"\n# pylint: disable=protected-access\nimport asyncio\n\nimport aiohttp\nimport pytest\n\nfrom pyopenuv import Client\nfrom pyopenuv.errors import InvalidApiKeyError, RequestError\n\nfrom tests.async_mock import patch\nfrom tests.common import (\n TEST_ALTITUDE,\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n load_fixture,\n)\n\n\n@pytest.mark.asyncio\nasync def test_bad_api_key(aresponses):\n \"\"\"Test the that the proper exception is raised with a bad API key.\"\"\"\n aresponses.add(\n \"api.openuv.io\",\n \"/api/v1/protection\",\n \"get\",\n aresponses.Response(text=\"\", status=403),\n )\n\n with pytest.raises(InvalidApiKeyError):\n async with aiohttp.ClientSession() as session:\n client = Client(\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n altitude=TEST_ALTITUDE,\n session=session,\n )\n await client.uv_protection_window()\n\n\n@pytest.mark.asyncio\nasync def test_bad_request(aresponses):\n \"\"\"Test that the proper exception is raised during a bad request.\"\"\"\n aresponses.add(\n \"api.openuv.io\",\n \"/api/v1/bad_endpoint\",\n \"get\",\n aresponses.Response(text=\"\", status=500),\n )\n\n with pytest.raises(RequestError):\n async with aiohttp.ClientSession() as session:\n client = Client(\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n altitude=TEST_ALTITUDE,\n session=session,\n )\n await client.async_request(\"get\", \"bad_endpoint\")\n\n\n@pytest.mark.asyncio\nasync def test_protection_window(aresponses):\n \"\"\"Test successfully retrieving the protection window.\"\"\"\n aresponses.add(\n \"api.openuv.io\",\n \"/api/v1/protection\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"protection_window_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n altitude=TEST_ALTITUDE,\n session=session,\n )\n data = await client.uv_protection_window()\n assert data[\"result\"][\"from_uv\"] == 3.2509\n\n\n@pytest.mark.asyncio\nasync def test_timeout():\n \"\"\"Test that a timeout raises an exception.\"\"\"\n async with aiohttp.ClientSession() as session:\n client = Client(\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n altitude=TEST_ALTITUDE,\n session=session,\n )\n\n with patch(\"aiohttp.ClientSession.request\", side_effect=asyncio.TimeoutError):\n with pytest.raises(RequestError):\n await client.uv_forecast()\n\n\n@pytest.mark.asyncio\nasync def test_session_from_scratch(aresponses):\n \"\"\"Test that an aiohttp ClientSession is created on the fly if needed.\"\"\"\n aresponses.add(\n \"api.openuv.io\",\n \"/api/v1/forecast\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"uv_forecast_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n\n client = Client(TEST_API_KEY, TEST_LATITUDE, TEST_LONGITUDE, altitude=TEST_ALTITUDE)\n data = await client.uv_forecast()\n assert len(data[\"result\"]) == 2\n\n\n@pytest.mark.asyncio\nasync def test_uv_forecast(aresponses):\n \"\"\"Test successfully retrieving UV forecast info.\"\"\"\n aresponses.add(\n \"api.openuv.io\",\n \"/api/v1/forecast\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"uv_forecast_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n altitude=TEST_ALTITUDE,\n session=session,\n )\n data = await client.uv_forecast()\n assert len(data[\"result\"]) == 2\n\n\n@pytest.mark.asyncio\nasync def test_uv_index_async(aresponses):\n \"\"\"Test successfully retrieving UV index info (async).\"\"\"\n aresponses.add(\n \"api.openuv.io\",\n \"/api/v1/uv\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"uv_index_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(\n TEST_API_KEY,\n TEST_LATITUDE,\n TEST_LONGITUDE,\n altitude=TEST_ALTITUDE,\n session=session,\n )\n data = await client.uv_index()\n assert data[\"result\"][\"uv\"] == 8.2342\n","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556905181","text":"#!/usr/bin/python\n\n#\n# This python script illustrates fetching information from a CGI program\n# that typically gets its data via an HTML form using a POST method.\n#\n# Copyright (c) 2018, Carnegie Mellon University. All Rights Reserved.\n#\n\nimport requests\nimport glob\n\n# ===> FILL IN YOUR PARAMETERS <===\n\nuserId = 'xxx@andrew.cmu.edu'\npassword = 'xxx'\n\ndef ndeval(fileIn):\n# Form parameters - these must match form parameters in the web page\n\n url = 'https://boston.lti.cs.cmu.edu/classes/11-642/HW/HTS/nes.cgi'\n values = { 'qrel' : 'cw09a.diversity.101-200.qrel',\t# cgi parameter\n 'hwid' : 'HW5'\t\t\t\t# cgi parameter\n }\n\n # Make the request\n\n files = {'infile' : open(fileIn, 'rb') }\t# cgi parameter\n result = requests.post (url, data=values, files=files, auth=(userId, password))\n\n # Replace the
    with \\n for clarity\n\n text = result.text.replace('
    ', '\\n').split(\"\\n\")\n\n dict = {}\n sum1 = 0.0\n sum2 = 0.0\n sum3 = 0.0\n num = 0\n for line in text:\n if line.startswith(\"runid\"):\n cols = line.split(\",\")\n for i in range(len(cols)):\n if cols[i] == \"P-IA@10\":\n dict[\"P-IA@10\"] = i\n elif cols[i] == \"P-IA@20\":\n dict[\"P-IA@20\"] = i\n elif cols[i] == \"alpha-nDCG@20\":\n dict[\"alpha-nDCG@20\"] = i\n if line.startswith(\"run-1\"):\n cols = line.split(\",\")\n sum1 += float(cols[dict[\"P-IA@10\"]])\n sum2 += float(cols[dict[\"P-IA@20\"]])\n sum3 += float(cols[dict[\"alpha-nDCG@20\"]])\n num += 1\n\n print('%.4f' %(sum1/num))\n print('%.4f' %(sum2/num))\n print('%.4f' %(sum3/num))\n\nfor filename in glob.glob(\"xxx/*.teIn\"):\n tokens = filename.split(\"/\")\n params = tokens[len(tokens)-1].split(\".\")[0].split(\"-\")\n\n if params[1] == \"Exp2\" and params[2] == \"bm25\" and params[3] == \"pm2\" and params[4] == \"1\":\n print(params)\n ndeval(filename)\n","sub_path":"QryEval/ndeval.py","file_name":"ndeval.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420936816","text":"import csv\nimport json\n\n\nbooks_dict = {}\nusers_dict = {}\noutput_dict = {\"Users\": []}\n\nwith open(\"../../data_input/input_csv_file_hw3.csv\") as input_csv:\n reader = csv.DictReader(input_csv)\n for count, row in enumerate(reader, 1):\n books_dict[count] = {\n \"title\": row[\"Title\"],\n \"author\": row[\"Author\"],\n \"height\": row[\"Height\"]}\n\nwith open(\"../../data_input/input_json_file_hw3.json\") as input_json:\n users = json.loads(input_json.read())\n for count, user in enumerate(users, 1):\n users_dict[count] = {\n \"name\": user[\"name\"],\n \"gender\": user['gender'],\n \"address\": user['address']\n }\n\nfor number, user in users_dict.items():\n temp_user = user.copy()\n temp_user[\"books\"] = []\n if books_dict:\n temp_user[\"books\"].append(books_dict[number])\n books_dict.pop(number)\n output_dict[\"Users\"].append(temp_user)\n\nwith open(\"../../data_output/output_json_file_hw3.json\", \"w\") as output_json:\n s = json.dumps(output_dict, indent=4)\n output_json.write(s)\n","sub_path":"homework/03_tests_data/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"127296002","text":"from io import BytesIO\nfrom xlwt import Font, Alignment, XFStyle\nfrom odoo import fields, models, api, _\nfrom odoo.tools.translate import _\nfrom datetime import datetime, timedelta\nfrom odoo.exceptions import Warning, ValidationError\nimport base64\n\nfrom odoo import netsvc\n\n\nclass class_attendance(models.Model):\n _name = \"event.class.attendance\"\n\n student_ids = fields.Many2many(\"res.partner\", 'event_class_students_attendance', 'class_id', 'sudent_id',\n \"Students\")\n class_name = fields.Char(\"Class Name\", size=256)\n date = fields.Datetime(\"Date and time\")\n event_id = fields.Many2one(\"event.event\", \"Event\")\n status = fields.Selection([('present', 'Present'), ('absent', 'Absent')], \"Attendance\")\n comment = fields.Text(\"Comments\")\n\n\nclass event_subject(models.Model):\n _name = \"event.subject\"\n\n name = fields.Char('Name')\n has_pdf = fields.Boolean('Includes Study Notes PDF?')\n\n\nclass event_event(models.Model):\n _inherit = 'event.event'\n _order = \"date_begin asc\"\n\n pc_exam = fields.Boolean('PC Exam', help=\"Tic this to view PC Exam types\")\n type_pc_exam = fields.Many2one('pc.exam.type', \"Type of PC Exam\")\n event_course_code = fields.Char(string='Course Code')\n date_tz = fields.Selection('_tz_get', string='Timezone', required=True,\n default='Africa/Johannesburg')\n # seats_available = fields.Integer('Seats Availability')\n month = fields.Integer('Month')\n day = fields.Integer('day')\n product_ids = fields.Many2many('product.product', 'event_product_rel', 'event_id', 'product_id', \"Products\")\n class_attendance_ids = fields.One2many(\"event.class.attendance\", 'event_id', \"Class Attendance\")\n subject = fields.Many2one(\"event.subject\", \"Subject\")\n event_course_id = fields.Many2one('event.course', string=\"Course\")\n event_not_combo_discount = fields.Boolean(string=\"Not COMBO Discount%\")\n\n @api.model\n def create(self, vals):\n \"\"\" To write the Number of seats available \"\"\"\n if 'date_begin' in vals.keys() and not vals.get('month', False):\n date_begin = datetime.strptime(vals['date_begin'], \"%Y-%m-%d %H:%M:%S\")\n vals['month'] = date_begin.month\n vals['day'] = date_begin.day\n if 'seats_max' in vals.keys():\n vals['seats_available'] = vals['seats_max']\n return super(event_event, self).create(vals)\n\n @api.multi\n def onchange_start_date(self):\n res = {'value': {}}\n if self.date_end:\n return res\n if self.date_begin and isinstance(self.date_begin, str):\n date_begin = datetime.strptime(self.date_begin, \"%Y-%m-%d %H:%M:%S\")\n date_end = date_begin + timedelta(hours=1)\n month = str(date_begin).split('-')[1]\n day = str(date_begin).split('-')[2].split(' ')[0]\n res['value'] = {'date_end': date_end.strftime(\"%Y-%m-%d %H:%M:%S\"), 'month': int(month), 'day': int(day)}\n\n return res\n\n # @api.multi\n # def get_data_event(self):\n # val = self.read(cr, uid, id,[],context)\n # context={'lang': 'en_US', 'calendar_default_user_id': 1, 'tz': 'Africa/Johannesburg', 'uid': 1}\n # for i in val:\n # obj= self.browse(cr,uid,i['id'],context)\n # i['date_begin']=obj.date_begin\n # i['date_end']=obj.date_end\n # return val\n\n @api.multi\n def get_attendance_report(self):\n try:\n import xlwt\n except:\n raise Warning(_('User Error'), _('Please Install xlwt Library.!'))\n filename = 'Attendance Register.xls'\n fp = BytesIO()\n wb = xlwt.Workbook(encoding='utf-8')\n\n worksheet = wb.add_sheet('PC_EXAM_ATTENDANCE_REGISTER')\n current_obj = self\n\n # -----------------------\n # Excel Font Style & Sizes\n # ----------------------\n fnt1 = Font()\n fnt1.name = 'TimesNewRoman'\n fnt1.bold = True\n fnt1.height = 16 * 0x14\n\n fnt2 = Font()\n fnt2.name = 'verdana'\n fnt2.bold = True\n fnt2.height = 12 * 0x14\n\n fnt3 = Font()\n fnt3.name = 'TimesNewRoman'\n fnt3.bold = True\n fnt3.height = 13 * 0x12\n\n fnt4 = Font()\n fnt4.name = 'TimesNewRoman'\n fnt4.height = 16 * 0x14\n\n # -----------------\n # Excel Alignment\n # ----------------\n al3 = Alignment()\n al3.horz = Alignment.HORZ_CENTER\n al3.vert = Alignment.VERT_CENTER\n\n al4 = Alignment()\n al4.horz = Alignment.HORZ_LEFT\n al4.vert = Alignment.VERT_CENTER\n\n al5 = Alignment()\n al5.horz = Alignment.HORZ_LEFT\n al5.vert = Alignment.VERT_CENTER\n # -----------------------\n # Excel Style\n # ----------------------\n\n style1 = XFStyle()\n style1.alignment = al3\n style1.font = fnt1\n\n style2 = XFStyle()\n style2.font = fnt2\n style2.alignment = al4\n\n style3 = XFStyle()\n style3.alignment = al3\n style3.font = fnt3\n\n style4 = XFStyle()\n style4.alignment = al5\n style4.font = fnt4\n\n # -------------------------------------------------------------------------\n # HEADER In Excel\n # -------------------------------------------------------------------------\n\n lst = []\n\n fields = ['CIMA STUDENT NUMBER', 'SURNAME', 'NAME', 'SIGNATURE']\n for i in [1, 2, 3, 4]:\n first_col = worksheet.col(i)\n first_col.width = 500 * 20\n\n date_begin = str(datetime.strptime(current_obj.date_begin, \"%Y-%m-%d %H:%M:%S\") + timedelta(hours=2))\n date_end = str(datetime.strptime(current_obj.date_end, \"%Y-%m-%d %H:%M:%S\") + timedelta(hours=2))\n\n worksheet.merge(0, 16, 1, 4)\n try:\n worksheet.insert_bitmap('/opt/custom_modules/event_price_kt/images/charterquest.bmp', 0, 1, 9, 3)\n except:\n pass\n worksheet.write_merge(17, 18, 1, 1, 'SUBJECT:', style1)\n worksheet.write_merge(17, 18, 2, 4, current_obj.name, style1)\n k = 19\n pc_exam = True\n if current_obj.pc_exam == False:\n pc_exam = False\n worksheet.write_merge(k, k + 1, 1, 1, 'LECTURER:', style1)\n # if current_obj.main_speaker_id:\n # worksheet.write_merge(k,k+1,2, 4,current_obj.main_speaker_id.name , style1)\n # else:\n # worksheet.write_merge(k,k+1,2, 4,\"\", style1)\n k = k + 2\n worksheet.write_merge(k, k + 1, 1, 1, 'DATE:', style1)\n worksheet.write_merge(k, k + 1, 2, 4, (date_begin).split(' ')[0], style1)\n worksheet.write_merge(k + 2, k + 3, 1, 1, 'TIME:', style1)\n worksheet.write_merge(k + 2, k + 3, 2, 4, (date_begin)[11:16] + '-' + (date_end)[11:16], style1)\n if not pc_exam:\n worksheet.write_merge(k + 4, k + 5, 1, 1, 'COURSE STUDY OPTION:', style1)\n # worksheet.write_merge(k+4,k+5,2, 4,current_obj.study.name, style1)\n k = k + 4\n else:\n k = k + 2\n worksheet.write_merge(k + 2, k + 3, 1, 1, 'CAMPUS:', style1)\n worksheet.write_merge(k + 2, k + 3, 2, 4, current_obj.address_id.name, style1)\n worksheet.write_merge(k + 4, k + 5, 1, 4, \"PLEASE SIGN ATTENDANCE REGISTER & ENTER CIMA STUDENT NUMBER\", style1)\n i = 1\n for field in fields:\n worksheet.write_merge(k + 6, k + 7, i, i, field, style1)\n i += 1\n i = k + 8\n\n for obj in current_obj.registration_ids:\n partner_name = obj.partner_id.name\n name = partner_name.split(' ')\n if len(name) == 1:\n name = partner_name.split(' ')\n if obj.partner_id.prof_body_id:\n worksheet.write_merge(i, i + 1, 1, 1, obj.partner_id.prof_body_id, style4)\n else:\n worksheet.write_merge(i, i + 1, 1, 1, \"\", style4)\n try:\n worksheet.write_merge(i, i + 1, 2, 2, name[1], style4)\n except:\n worksheet.write_merge(i, i + 1, 2, 2, '', style4)\n worksheet.write_merge(i, i + 1, 3, 3, name[0], style4)\n worksheet.write_merge(i, i + 1, 4, 4, '', style4)\n i += 2\n\n wb.save(fp)\n out = base64.encodestring(fp.getvalue())\n final_arr_data = {}\n final_arr_data['file_stream'] = out\n final_arr_data['name'] = filename\n\n pl_report_id = self.env['attendance.sheet.report'].create(final_arr_data)\n vals = {\n 'name': 'Attendance Sheet Report',\n 'datas': out,\n 'datas_fname': filename\n }\n return {\n 'res_id': pl_report_id.id,\n 'name': filename,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'attendance.sheet.report',\n 'type': 'ir.actions.act_window',\n }\n\n\nclass attendance_sheet_report(models.Model):\n _name = \"attendance.sheet.report\"\n _description = \"To generate attendance Sheet\"\n\n file_stream = fields.Binary('File Stream')\n name = fields.Char('File Name', size=255)\n\n\nclass event_type(models.Model):\n _inherit = \"event.type\"\n\n discount = fields.Float(string='Discount (%)')\n professional_body_code = fields.Char('Professional Body Code', size=64)\n order = fields.Integer(string='Order')\n campus_ids=fields.Many2many(\"res.partner\",domain=[('is_campus','=',True)],string=\"Campus\")\n semester_ids=fields.Many2many('cfo.semester.information',string=\"Semester\")\n qualification_ids = fields.Many2many(\"event.qual\", string=\"Qualification\")\n course_option_ids = fields.Many2many(\"cfo.course.option\", string=\"Course Options\")\n\n @api.constrains('order')\n def check_order(self):\n for record in self:\n obj = self.search([('order', '=', record.order), ('id', '!=', record.id)])\n if obj:\n raise ValidationError(\"Order must be unique\")\n\n @api.model\n def get_event_data(self,id):\n res=self.browse(int(id))\n campus_ids=self.env['res.partner'].search_read([('is_campus','=',True),('id','in',res.campus_ids.ids)],['id','name'])\n qua_ids=self.env['event.qual'].search_read([('id','in',res.qualification_ids.ids)],['id','name'])\n sem_ids=self.env['cfo.semester.information'].search_read([('id','in',res.semester_ids.ids)],['id','name'])\n option_ids = self.env['cfo.course.option'].search_read([('id', 'in', res.course_option_ids.ids)], ['id', 'name'])\n return {'campus':campus_ids,'qua':qua_ids,'sem':sem_ids,'options':option_ids}\n\n @api.model\n def get_event_category(self):\n event_categories=self.env['event.type'].search_read([],['id','name']);\n return event_categories\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n\n nb_register = fields.Integer(string='Number of Participants', required=True, default=1,\n readonly=True, states={'draft': [('readonly', False)]})\n pc_exam = fields.Boolean(string=\"PC Exam\")\n reg_prof_body = fields.Selection([('yes', 'YES'), ('no', 'NO')],\n string='Registered with Professional Body',\n help=\"Student is Registered with Professional Body?\")\n pc_exam_marks = fields.Integer(string='PC Exam Marks')\n pc_exam_result = fields.Selection([('passed', 'Passed'),\n ('failed', 'Failed')],\n string='PC Exam Result')\n start_d = fields.Datetime(string=\"start date\")\n end_d = fields.Datetime(\"end date\")\n encoded_reg_link = fields.Char(\"Encoded Registration Link\")\n\n ######################Reschedule PC Exam Process\n\n # @api.multi\n # def event_reg_url(self):\n # \"\"\" Reschedule Url\"\"\"\n # server_url = self.env['ir.config_parameter'].get_param('web.base.url')\n # server_url = \"http://pcexams.openerponline.co.za\"\n # import hashlib\n # event_reg_id = ids[0]\n # encoded_link = hashlib.sha224(str(event_reg_id)).hexdigest()\n # self.write(cr, uid, event_reg_id, {'encoded_reg_link':encoded_link})\n # url_pattern = server_url +'/reschedule/'+ encoded_link\n # return url_pattern\n\n # @api.multi\n # def registration_open(self, event_id):\n # if not len(event_id) > 1:\n # if event_id.need_rescheduling and not event_id.rescheduling_done:\n # template = self.browse(event_id.id).event_id.email_registration_id\n # subject = 'Reschedule CharterQuest PC Exams Confirmation'+' '+self.browse(event_id.id).event_id.name\n # template.write({'subject': subject})\n # if template:\n # mail_message = template.send_mail(event_id)\n # event_id.write({'need_rescheduling': False, 'rescheduling_done': True})\n #\n # if context==None:\n # context = {}\n # event_obj = self.pool.get('event.event')\n # for register in self.browse(cr, uid, ids, context=context):\n # event_id = register.event_id.id\n # no_of_registration = register.nb_register\n # #event_obj.check_registration_limits_before(cr, uid, [event_id], no_of_registration, context=context)\n # seats_available = register.event_id.seats_available-1\n # registrations = len(self.pool.get('event.registration').search(cr,uid,[('event_id','=',event_id),('state','=','open')]))\n # seats_available = register.event_id.seats_max-registrations-1\n # self.pool.get('event.event').write(cr,uid,[event_id], {'seats_available': seats_available})\n # #iTo change date time zone to SA\n # dic={}\n # dic['start_d']=str(datetime.strptime(register.event_id.date_begin, \"%Y-%m-%d %H:%M:%S\")+timedelta(hours=2))\n # dic['end_d']=str(datetime.strptime(register.event_id.date_end, \"%Y-%m-%d %H:%M:%S\")+timedelta(hours=2))\n # self.write(cr,uid,ids,dic)\n #\n # if not len(ids) > 1:\n # if self.browse(cr,uid,ids).need_rescheduling == False and self.browse(cr,uid,ids).need_rescheduling == False:\n # res = self.confirm_registration(cr, uid, ids, context=context)\n # self.mail_user(cr, uid, ids, context=context)\n\n\nclass pc_exam_type(models.Model):\n _name = 'pc.exam.type'\n\n name = fields.Char('Name', size=64, required=True)\n discount_amount = fields.Float(\"Discount Amount\")\n type_event_id = fields.Many2one('event.type', 'Professional Body')\n\n\nclass sale_order(models.Model):\n _inherit = 'sale.order'\n _order = 'id desc'\n\n current_quote = fields.Boolean(\"Current Quote\")\n pc_exam_type = fields.Many2one('pc.exam.type', \"PC exam type\")\n pc_exam = fields.Boolean('PC Exam')\n provisional_booking = fields.Boolean(\"Provisional Booking\")\n link_portal = fields.Char(\"Link\")\n # Fot payment page capture\n diposit_selected = fields.Integer(\"Selected Deposit %\")\n due_amount = fields.Float('Total Due')\n outstanding_amount = fields.Float(string=\"Outstanding Amount\")\n interest_amount = fields.Float(string=\"Interest Amount\")\n months = fields.Integer(\"Months required to pay\")\n out_standing_balance_incl_vat = fields.Float(\"Outstanding Balance (inclusive of VAT & Interest)\")\n monthly_amount = fields.Float(\"Monthly Amount\")\n no_of_days = fields.Char(type='char', string='Days From requested date')\n no_of_reminder_emails_sent = fields.Integer(\"No.of Reminder Emails Sent\", default=0)\n freequote_opt_out = fields.Boolean(\"FreeQuote Opt-out\")\n debit_link = fields.Char(string=\"url\")\n\n @api.model\n def send_freequote_remainder_email(self):\n # \"\"\"Email to send remainder about free quote\"\"\"\n sale_order = self.env['sale.order']\n year = datetime.now().year\n\n start_date = str(year) + \"-01-01\"\n end_date = str(year) + \"-12-31\"\n\n draft_domain1 = [('state', '=', 'draft'), ('date_order', '>=', start_date),\n ('date_order', '<=', end_date)]\n draft_domain = [('state', '=', 'draft'), ('date_order', '>=', start_date),\n ('date_order', '<=', end_date)]\n order_domain = [('state', '!=', 'draft'), ('date_order', '>=', start_date),\n ('date_order', '<=', end_date)]\n\n sem1_ids = sale_order.search(draft_domain1 + [('semester_id.semester', '=', '1')])\n sem2_ids = sale_order.search(draft_domain1 + [('semester_id.semester', '=', '2')])\n sem1_filtered_ids = []\n\n for sale in sem1_ids:\n partner = sale.partner_id.id\n try:\n max_id = max(sale_order.search(draft_domain + [('semester_id.semester', '=', '1'),\n ('partner_id', '=', partner)]))\n if max_id.id not in sem1_filtered_ids:\n sem1_filtered_ids.append(max_id.id)\n except:\n pass\n\n sem2_filtered_ids = []\n\n for sale in sem2_ids:\n partner = sale.partner_id.id\n if sale_order.search(order_domain + [('semester_id.semester', '=', '2'),\n ('partner_id', '=', partner)]):\n continue\n else:\n try:\n max_id = max(sale_order.search(\n draft_domain + [('semester_id.semester', '=', '2'), ('partner_id', '=', partner)]))\n if max_id.id not in sem2_filtered_ids:\n sem2_filtered_ids.append(max_id.id)\n except:\n pass\n print(\"\\n\\n\\n\\n\\n======sem1_filtered_ids====\", sem1_filtered_ids)\n print(\"\\n\\n\\n\\n\\n======sem2_filtered_ids====\", sem2_filtered_ids)\n ids = list(set(sem1_filtered_ids + sem2_filtered_ids))\n print(\"\\n\\n\\n\\n\\n======self.browse====\",ids,self.browse(ids))\n for sale_order_id in self.browse(ids):\n print(\"\\n\\n\\n\\n\\n=====sale_order_id.freequote_opt_out======\",sale_order_id.freequote_opt_out)\n if not sale_order_id.freequote_opt_out:\n print(\"\\n\\n\\n\\n\\n=====sale_order_id.no_of_reminder_emails_sent======\", sale_order_id.no_of_reminder_emails_sent)\n if sale_order_id.no_of_reminder_emails_sent == 4:\n continue\n print(\"\\n\\n\\n\\n\\n=====sale_order_id.no_of_days======\",\n sale_order_id.no_of_days)\n if sale_order_id.no_of_days in ['7', '14 days', '21 days', '28 days']:\n no_of_reminder_emails_sent = sale_order_id.no_of_reminder_emails_sent + 1\n sale_order_id.sudo().write({'no_of_reminder_emails_sent': no_of_reminder_emails_sent})\n template_id = self.env['mail.template'].search([('name', '=', \"Freequote Reminder Email\")])\n if template_id:\n template_id.send_mail(sale_order_id.id)\n return True\n\n\nclass sale_order_line(models.Model):\n _inherit = \"sale.order.line\"\n\n pcexam_voucher_id = fields.Many2one('pcexams.voucher', 'PCExams Voucher')\n\n @api.multi\n def _prepare_invoice_line(self, qty):\n res = super(sale_order_line, self)._prepare_invoice_line(qty)\n res['pcexam_voucher_id'] = self.pcexam_voucher_id and self.pcexam_voucher_id.id\n return res\n\n\nclass res_partner(models.Model):\n _inherit = 'res.partner'\n\n examwritten = fields.Selection([('1', 'Yes'), ('2', 'No')], \"Ever Written PC Exam Before?\")\n dob = fields.Date('Date of Birth')\n prof_body_id = fields.Char(\"Prof Body Student ID\", size=56)\n student_company = fields.Char(\"Students Company\", size=256)\n prof_password = fields.Char(\"Prof Body Login Password\", size=56)\n\n\nclass cancel_reason(models.Model):\n _name = \"cancel.reason\"\n\n reason = fields.Selection([('non_payment', 'Non Payment'),\n ('not_reg_prof_body', 'Not Registered on Professional Body Exam Register'),\n ('not_attend', 'Non Attendance')], string=\"Reason for Cancellation\", required=True)\n\n @api.one\n def registration_cancel(self):\n current_record = self.env['event.registration'].browse(self._context.get('active_ids', []))\n current_record.state = 'cancel'\n return {'type': 'ir.actions.act_window_close'}\n\n\nclass account_invoice(models.Model):\n _inherit = \"account.invoice\"\n\n payu_reference = fields.Char('PayU Reference', size=256)\n payu_transaction_id = fields.Char('PayU Transaction Number', size=256)\n\n # @api.multi\n # def confirm_paid(self):\n # \n # if context is None:\n # context = {}\n # self.write(cr, uid, ids, {'state':'paid'}, context=context)\n # for invoice in self.browse(cr, uid, ids, context=context):\n # if invoice.sale_order_id and invoice.sale_order_id.pc_exam:\n # self.send_confirmation_email(cr,uid,ids,context)\n # event_reg_id = self.pool.get('event.registration').search(cr,uid,[('origin','=',invoice.sale_order_id.name)])\n # self.pool.get('event.registration').registration_open(cr,uid,event_reg_id,context)\n # return True\n # \n # @api.multi\n # def send_confirmation_email(self):\n # for invoice in self.browse(cr, uid, ids, context=context):\n # template_id = self.pool.get('email.template').search(cr,uid,[('name','=',\"Invoice - Confirmation\")])\n # if template_id:\n # mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id[0],invoice.id)\n # return True\n\n\nclass account_invoice_line(models.Model):\n _inherit = \"account.invoice.line\"\n\n event_id = fields.Many2one('event.event', \"Event\")\n pcexam_voucher_id = fields.Many2one('pcexams.voucher', 'PCExams Voucher')\n\n\nclass EventCourses(models.Model):\n _name = 'event.course'\n\n name = fields.Char(string=\"Course Name\")\n\n\nclass ProductProduct(models.Model):\n _inherit = 'product.product'\n\n course_material_id = fields.Many2one('course.material', string=\"Course Material\")\n\n\nclass CourseMaterial(models.Model):\n _name = 'course.material'\n\n name = fields.Char(string='Course Material Name')\n event_id = fields.Many2one('event.event', string=\"Event\")\n study_option_id = fields.Many2one('product.product', string=\"Study Option\", domain=[('event_ok', '=', True)])\n material_ids = fields.One2many('material.product', 'course_material_id', string=\"Material\")\n\n @api.multi\n @api.constrains('event_id', 'study_option_id')\n def check_order(self):\n for record in self:\n obj = self.sudo().search([('event_id', '=', record.event_id.id),\n ('study_option_id', '=', record.study_option_id.id),\n ('id', '!=', self.id)])\n if obj:\n raise ValidationError(\"Combination must be unique.\")\n\n\nclass MaterialProduct(models.Model):\n _name = 'material.product'\n\n material_product_id = fields.Many2one('product.product', 'Materials')\n course_material_id = fields.Many2one('course.material', string=\"Course Material\")\n","sub_path":"event_price_kt/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":23394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"513627534","text":"# 编写一个高效的算法来判断 m x n 矩阵中,是否存在一个目标值。该矩阵具有如下特性: \n# \n# \n# 每行中的整数从左到右按升序排列。 \n# 每行的第一个整数大于前一行的最后一个整数。 \n# \n# \n# 示例 1: \n# \n# 输入:\n# matrix = [\n# [1, 3, 5, 7],\n# [10, 11, 16, 20],\n# [23, 30, 34, 50]\n# ]\n# target = 3\n# 输出: true\n# \n# \n# 示例 2: \n# \n# 输入:\n# matrix = [\n# [1, 3, 5, 7],\n# [10, 11, 16, 20],\n# [23, 30, 34, 50]\n# ]\n# target = 13\n# 输出: false \n# Related Topics 数组 二分查找 \n# 👍 254 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n\n \"\"\"\n 拉直后二分查找\n \"\"\"\n\n if not matrix:\n return False\n row = len(matrix)\n col = len(matrix[0])\n\n length = row * col\n\n left = 0\n right = length - 1\n\n # if left == right:\n # return matrix[left][right] == target\n\n while left < right:\n mid = (left + right)/2\n row_mid = mid / col\n col_mid = mid % col\n # print(\"row_mid\",row_mid,\"col_mid\",col_mid)\n if matrix[row_mid][col_mid] < target:\n left = mid + 1\n elif matrix[row_mid][col_mid] > target:\n right = mid\n else:\n return True\n # print(\"left\", left, \"right\", right)\n return left == right and matrix[left/col][left%col] == target\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"week04/[74]搜索二维矩阵.py","file_name":"[74]搜索二维矩阵.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"219573611","text":"import re\n\nfrom datetime import datetime\n\ndate_re = re.compile('^(?P[0-9]{4})-(?P(0[0-9]|1[0-2]))-(?P([0-2][0-9]|3[01]))-(?P([01][0-9]|2[0-4]))h(?P([0-5][0-9]|60))')\n\n\nclass Snapshot(object):\n def __init__(self, module, dataset, interval, label, size=None):\n \"\"\"using ZFS cannonical name for snapshot\"\"\"\n\n self.module = module\n self.dataset = dataset\n self.interval = interval\n self.label = label\n self.prefix = \"%s-auto-snap\" % self.module.name\n\n match = date_re.match(self.label)\n if match:\n self.is_auto_snap = True\n date = {key: int(value) for key, value in match.groupdict().items()}\n self.datetime = datetime(date['year'], date['month'], date['day'], date['hour'], date['minute'])\n else:\n self.is_auto_snap = False\n self.datetime = None\n\n if size is not None:\n self.size = size\n else:\n self.size = self.module.get_snapshot_size(self)\n\n def __repr__(self):\n return \"Snapshot(%s, %s, %s)\" % (self.dataset, self.interval, self.label)\n\n def __str__(self):\n return self.name\n\n @property\n def name(self):\n return self.module.print_snapshot(self.dataset, self.interval, self.label)\n\n def destroy(self):\n self.module.destroy_snapshot(self.name)\n","sub_path":"tardis/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"523261222","text":"import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.nn import functional as F\nimport torch.optim as optim\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\nclass My_BasicBlock(nn.Module):\n expansion = 1\n med_out = None\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(My_BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, width, stride, groups, dilation)\n self.bn1 = norm_layer(width)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n My_BasicBlock.med_out = out\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n @classmethod\n def get_med_out(cls):\n return cls.med_out\n\n\n","sub_path":"features/my_block.py","file_name":"my_block.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"56329548","text":"#!/usr/bin/env python\n\"\"\"\nmsodde.py\n\nmsodde is a script to parse MS Office documents\n(e.g. Word, Excel), to detect and extract DDE links.\n\nSupported formats:\n- Word 2007+ (.docx, .dotx, .docm, .dotm)\n\nAuthor: Philippe Lagadec - http://www.decalage.info\nLicense: BSD, see source code or documentation\n\nmsodde is part of the python-oletools package:\nhttp://www.decalage.info/python/oletools\n\"\"\"\n\n# === LICENSE ==================================================================\n\n# msodde is copyright (c) 2017 Philippe Lagadec (http://www.decalage.info)\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import print_function\n\n#------------------------------------------------------------------------------\n# CHANGELOG:\n# 2017-10-18 v0.52 PL: - first version\n# 2017-10-20 PL: - fixed issue #202 (handling empty xml tags)\n\n__version__ = '0.52dev2'\n\n#------------------------------------------------------------------------------\n# TODO: detect beginning/end of fields, to separate each field\n# TODO: test if DDE links can also appear in headers, footers and other places\n# TODO: add xlsx support\n\n#------------------------------------------------------------------------------\n# REFERENCES:\n\n\n#--- IMPORTS ------------------------------------------------------------------\n\n# import lxml or ElementTree for XML parsing:\ntry:\n # lxml: best performance for XML processing\n import lxml.etree as ET\nexcept ImportError:\n import xml.etree.cElementTree as ET\n\nimport argparse\nimport zipfile\nimport os\nimport sys\n\n\n# === CONSTANTS ==============================================================\n\n\nNS_WORD = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'\n\n# XML tag for 'w:instrText'\nTAG_W_INSTRTEXT = '{%s}instrText' % NS_WORD\nTAG_W_FLDSIMPLE = '{%s}fldSimple' % NS_WORD\nTAG_W_INSTRATTR= '{%s}instr' % NS_WORD\n\n# === FUNCTIONS ==============================================================\n\ndef process_args():\n parser = argparse.ArgumentParser(description='A python tool to detect and extract DDE links in MS Office files')\n parser.add_argument(\"filepath\", help=\"path of the file to be analyzed\")\n\n args = parser.parse_args()\n\n if not os.path.exists(args.filepath):\n print('File {} does not exist.'.format(args.filepath))\n sys.exit(1)\n\n return args\n\n\n\ndef process_file(filepath):\n z = zipfile.ZipFile(filepath)\n data = z.read('word/document.xml')\n z.close()\n # parse the XML data:\n root = ET.fromstring(data)\n text = u''\n # find all the tags 'w:instrText':\n # (each is a chunk of a DDE link)\n for elem in root.iter(TAG_W_INSTRTEXT):\n # concatenate the text of the field, if present:\n if elem.text is not None:\n text += elem.text\n\n for elem in root.iter(TAG_W_FLDSIMPLE):\n # concatenate the attribute of the field, if present:\n if elem.attrib is not None:\n text += elem.attrib[TAG_W_INSTRATTR]\n \n\n return text\n\n\n#=== MAIN =================================================================\n\ndef main():\n # print banner with version\n print ('msodde %s - http://decalage.info/python/oletools' % __version__)\n print ('THIS IS WORK IN PROGRESS - Check updates regularly!')\n print ('Please report any issue at https://github.com/decalage2/oletools/issues')\n print ('')\n\n args = process_args()\n print('Opening file: %s' % args.filepath)\n text = process_file(args.filepath)\n print ('DDE Links:')\n print(text)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"oletools/msodde.py","file_name":"msodde.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"388694521","text":"import numpy as np\nimport warnings\nimport re\nfrom math import pi\nimport os, sys\nimport xml.etree.cElementTree as ET\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nclass _mat():\n def __init__(self, filename, grps, tr_scatt=False):\n \"\"\" Constructor of a single material, reads from a provided\n filename and parses the required data from the xml file\n\n Material properties are as follows:\n self.gen: any tags in , i.e. name, id\n self.prop: any tags in material/prop, i.e. nu\n self.gconst: any non-xsec tags in grp_struct, i.e. chi\n self.xsec: any tags in xsec, i.e. sig_t, sig_a, sig_f\n self.derived: derived quantities:\n - inv_sig_t: inverse of if present\n - diff_coef: diffusion coef, if present\n - chi_nu_sig_f: chi*nu*sig_f if and present\n \"\"\"\n\n # Verify file exists\n assert os.path.exists(filename), \"Material file: \" + filename\\\n + \" does not exist\"\n\n self.n_grps = grps\n self.gen = {} # General properties\n self.prop = {} # Physical properties\n self.gconst = {} # Group non cross-section data\n self.xsec = {} # Group cross-section data\n self.derived = {} # Derived quantities\n\n # All dictionaries that hold material parameters\n self.all_dict= [self.gen, self.prop, self.gconst,\n self.xsec, self.derived]\n\n self.__parse_XML__(filename, grps) # Parse input XML file\n self.__validate__(filename) # Validate material data\n\n # If needed, transpose scattering matrix\n if 'sig_s' in self.xsec and tr_scatt: \n self.xsec['sig_s'] = np.transpose(self.xsec['sig_s'])\n\n if self.xsec:\n self.__derive_xsec__() # Calc other xsecs\n \n if 'sig_t' in self.xsec:\n self.__derive_sig_t__() # Calc sig_t derv. prop\n\n if 'sig_s' in self.xsec and 'g_thermal' in self.gconst:\n self.__derive_thermal__() # Calc thermal eigenvalues\n\n if self.isSource:\n self.__derive_fiss__() # Calc fission derv. prop\n\n if 'sig_t' in self.xsec and 'ksi_ua' in self.derived and\\\n 'g_thermal' in self.gconst:\n self.__derive_acceleration__() # Calc acceleration\n\n # PUBLIC FUNCTIONS\n\n def get_props(self):\n # Returns an array of all the properties that the material has\n return [d.keys() for d in self.all_dict]\n\n def get(self, prop):\n # Returns the value of a given property prop\n try:\n return next(d[prop] for d in self.all_dict if prop in d)\n except StopIteration:\n raise RuntimeError(\"Invalid material property for \"\n + self.gen['id'] + \": \"+ prop)\n\n # INITIALIZATION FUNCTIONS ========================================\n\n def __derive_acceleration__(self):\n # Calculate acceleration properties\n i = int(self.gconst['g_thermal'])\n\n sig_t_ua = np.dot(self.derived['ksi_ua'],\n self.xsec['sig_t'][i:])\n\n diff_coef_ua = np.dot(self.derived['ksi_ua'],\n self.derived['diff_coef'][i:])\n\n try:\n sig_r_ua = sig_t_ua - \\\n np.sum(np.multiply(self.derived['ksi_ua'],\n self.xsec['sig_s'][i:,i:]))\n self.derived.update({'sig_r_ua': sig_r_ua})\n except KeyError:\n pass\n\n self.derived.update({'sig_t_ua': sig_t_ua})\n self.derived.update({'diff_coef_ua': diff_coef_ua})\n\n\n def __derive_sig_t__(self):\n # Calculate derived quantities based on sig_t\n\n # Find non-zero entries of Sig_t\n non_zero = self.xsec['sig_t']!=0\n # Inverse Sig_t\n inv_sig_t = np.power(self.xsec['sig_t'], -1,\n where=non_zero)\n # Diffusion Coeff\n diff_coef = np.power(3. * self.xsec['sig_t'], -1,\n where=non_zero)\n\n self.derived.update({'diff_coef': diff_coef})\n self.derived.update({'inv_sig_t': inv_sig_t})\n\n def __derive_fiss__(self):\n # Calculate derived quantities based on fission properties\n\n if 'chi' in self.gconst and 'sig_f' in self.xsec:\n vec_2 = self.prop['nu']*self.xsec['sig_f']\n elif 'nu_sig_f' in self.xsec:\n vec_2 = self.xsec['nu_sig_f']\n elif 'nu_sig_f' in self.gconst:\n vec_2 = self.gconst['nu_sig_f']\n else:\n vec_2 = np.array([0,0])\n \n chi_nu_sig_f = np.outer(self.gconst['chi'], vec_2)\n \n self.derived.update({'chi_nu_sig_f': chi_nu_sig_f})\n\n def __derive_thermal__(self):\n # Calculate quantities based on thermal scattering\n i = int(self.gconst['g_thermal'])\n\n # Slice scattering matrix based on g_thermal\n thermal = self.xsec['sig_s'][i:, i:]\n th_d_i = np.tril(thermal)\n th_u = np.triu(thermal, 1)\n \n total = np.diag(self.xsec['sig_t'][i:])\n try:\n M = np.matmul(np.linalg.inv(total - th_d_i), th_u)\n w,v = np.linalg.eig(M)\n ksi_ua = v[:, np.argmax(np.absolute(w))]\n ksi_ua = ksi_ua/np.sum(ksi_ua)\n except np.linalg.LinAlgError:\n warnings.warn(\"Matrix for thermal eigenvalue is singular,\" +\n \"setting value of ksi_ua to 0\")\n ksi_ua = np.zeros(self.n_grps - i)\n \n self.derived.update({'ksi_ua': ksi_ua})\n\n def __derive_xsec__(self):\n if 'sig_t' in self.xsec and 'sig_s' in self.xsec:\n sig_r = self.xsec['sig_t'] - np.diag(self.xsec['sig_s'])\n self.xsec.update({'sig_r': sig_r })\n \n def __parse_XML__(self, filename, grps):\n # Parse the XML file\n\n # These are the tags that identify the different sections of\n # the XML file. They are here to make it easy to change it in\n # the future.\n tag_prop = \"prop\" # Phys. Properties\n tag_grp_structures = \"grp_structures\" # Group structures\n tag_xsec = \"xsec\" # Cross-sections\n\n # Get data and root\n root = ET.parse(filename).getroot()\n\n # Parse top level tags, get roots for physical properties and\n # group structures\n for el in root.findall('./material/'):\n if el.tag == tag_prop:\n prop_root = el\n elif el.tag == tag_grp_structures:\n grp_structs = el\n else:\n self.__dict_add__(self.gen, el)\n\n # Parse physical properties\n for el in prop_root:\n self.__dict_add__(self.prop, el)\n\n # Find correct group structure, or throw error\n try:\n grp_root = grp_structs.findall(\".*[@n='\" +\n str(self.n_grps) + \"']\")[0]\n except IndexError:\n raise KeyError(filename + \": group structure for n=\" +\n str(self.n_grps) + \" not found\")\n\n # Parse non cross-section constant data\n for el in grp_root:\n if el.tag == tag_xsec:\n xsec_root = el\n else:\n self.__dict_add__(self.gconst, el)\n\n # Parse cross-sections\n for el in xsec_root.findall('*'):\n self.__dict_add__(self.xsec, el)\n\n if 'nu' in self.prop and 'sig_f' in self.xsec:\n self.isSource = True\n elif 'nu_sig_f' in self.xsec or 'nu_sig_f' in self.gconst:\n self.isSource = True\n else:\n self.isSource = False\n\n def __validate__(self, filename):\n # Perform validation checks on data\n\n # Verify it has a material ID\n assert 'id' in self.gen,\\\n filename + \": has no valid material id\"\n\n # Verify that all cross-sections have the same number of groups\n # by checking that the dimensions are all identical\n\n for key, a in self.xsec.iteritems():\n if np.shape(a) != (self.n_grps,) and\\\n np.shape(a) != (self.n_grps, self.n_grps):\n raise RuntimeError(filename +\n \"\"\": Cross-sections must have the\n same dimensions, error with: \"\"\" + key)\n\n # Verify that all cross-sections are positive\n if not all([np.all(m) for m in map(lambda x: x>=0,\n self.xsec.values())]):\n raise RuntimeError(filename +\n ': contains negative cross-section.')\n\n # Verify that g_thermal is not higher than the number of groups\n if 'g_thermal' in self.gconst:\n if self.gconst['g_thermal'] + 1 > self.n_grps:\n raise RuntimeError(filename +\n ': g_thermal > n_groups')\n if not self.gconst['g_thermal'].is_integer():\n raise RuntimeError(filename +\n ': g_thermal must be an integer')\n\n ## UTILITY FUNCTIONS =============================================\n\n def __dict_add__(self, dict, el):\n try:\n # Try to convert to float\n val = float(el.text)\n except ValueError:\n try:\n # Try to convert if a comma separated list of floats\n val = np.array(map(float, el.text.split(',')))\n except ValueError:\n try:\n # Try to convert to a matrix\n val = np.array([map(float, s.split(',')) for s in\n el.text.split(';')])\n # Just store the string\n except ValueError:\n val = el.text\n dict.update({el.tag: val})\n\n\nclass mat_lib():\n def __init__(self, n_grps, files=[], tr_scatt=False):\n '''Material Library class, holds multiple _mat objects provided\n at initialization or added later.\n\n files: list of filenames to material xml files\n '''\n self.mats = [] # Holds all materials\n self._n_grps = n_grps # Energy groups\n\n for f in files:\n self.add(f, tr_scatt)\n\n def add(self, filename, tr_scatt=False):\n \"\"\" Adds the material stored in filename to the library, if it\n is not already in there. \"\"\"\n\n new_mat = _mat(filename, grps = self._n_grps,\n tr_scatt=tr_scatt)\n\n if new_mat.gen['id'] not in self.ids():\n self.mats.append(new_mat)\n else:\n raise RuntimeError(\"Cannot add file \" + filename +\n \", mat_id already exists in material library\")\n\n def n_grps(self):\n return self._n_grps\n\n def ids(self):\n \"\"\" Returns the id's of stored materials \"\"\"\n return [mat.gen['id'] for mat in self.mats]\n\n\n def get(self, prop, mat_id=''):\n \"\"\" Returns a dictionary with material ids as keys and the\n specified property as values\"\"\"\n\n if prop == 'n_grps':\n return self._n_grps\n \n data = self.__mat_data__(prop)\n\n if mat_id:\n try:\n return data[mat_id]\n except KeyError:\n raise KeyError(\"Bad material id\")\n else:\n return data\n\n def get_per_str(self, *args, **kwargs):\n try:\n return np.divide(self.get(*args, **kwargs), 4.0*np.pi)\n except TypeError:\n return {k: np.divide(v, 4.0*np.pi) for k, v\n in self.get(*args, **kwargs).iteritems()}\n\n def props(self, mat_id=None):\n data = {}\n for mat in self.mats:\n data.update({mat.gen['id']: mat.get_props()})\n if mat_id:\n return data[mat_id]\n else:\n return data\n\n def __mat_data__(self, prop):\n data = {}\n\n for mat in self.mats:\n data.update({mat.get('id') : mat.get(prop)})\n\n return data\n\nclass mat_map():\n def __init__(self, lib, layout, layout_dict, x_max, n, x_min=0,\n y_min=0, y_max=None):\n \"\"\" mat map will create a material map based on a string input\n map and problem parameters \"\"\"\n x = [x_min, x_max]\n y = [y_min, y_max] if y_max else [y_min, x_max]\n\n self.mat_dict = layout_dict\n self.mat_lib = lib\n\n try:\n self.x = map(float, x)\n self.y = map(float, y)\n except ValueError:\n raise ValueError(\"x and y domain limits must be numbers\")\n\n assert n > 0, \"Total cells must be an integer greater than 0\"\n \n self.dx = x[1]/float(n)\n self.dy = y[1]/float(n)\n self.n = int(n)\n\n #Generate layout\n # Split into words\n split_layout = re.sub(\"[^\\w]\", \" \", layout).split()\n\n # Verify a square number have been given\n n_dim = np.sqrt(len(split_layout))\n\n assert n_dim.is_integer(),\\\n \"Layout must have a square number of entries\"\n\n assert n >= n_dim, \"Total cells n must be greater than \" +\\\n \"or equal to the size of the provided layout\"\n\n assert (n % n_dim) == 0, \"n (mesh cells) must be a multiple of the size\" +\\\n \" of the provided layout\"\n\n n_dim = int(n_dim)\n\n self.layout = [split_layout[i:i + n_dim] for i in\n range(0, len(split_layout), n_dim)]\n\n self.array = self.__build_array__()\n\n def plot(self): # pragma: no cover\n n = int(np.sqrt(len(self.array)))\n mat_set = list(set(self.array))\n layout = [self.array[x:x+n] \n for x in range(0,len(self.array), n)]\n for j, s in enumerate(mat_set):\n for i, row in enumerate(layout):\n layout[i] = [r.replace(s, str(j*10)) for r in row]\n fl_array = np.flipud(np.array([map(float,row) for row in layout]))\n\n plt.figure(figsize=(6,6))\n values = np.unique(fl_array)\n im = plt.imshow(fl_array, interpolation='none', extent=[0,n,0,n])\n colors = [ im.cmap(im.norm(value)) for value in values]\n # create a patch (proxy artist) for every color \n patches = [ mpatches.Patch(color=colors[i], \n label=\"{l}\".format(l=mat_set[int(values[i]/10.0)])) for i in range(len(values)) ]\n # put those patched as legend-handles into the legend\n plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )\n\n plt.grid(True)\n plt.show()\n \n \n def get(self, prop, loc):\n # Get property from material at given location, loc is either\n # the index of the location k or a tuple of x and y\n if isinstance(loc, tuple):\n k = int(loc[0]/self.dx) + int(loc[1]/self.dy)*self.n\n else:\n k = loc\n\n return self.mat_lib.get(prop=prop, mat_id=self.array[k])\n\n def __build_array__(self):\n # Builds the array\n try:\n array = []\n for row in reversed(self.layout):\n to_add = []\n for col in row:\n to_add += int(1.0/len(row)*self.n)*[self.mat_dict[col]]\n to_add = int(1.0/len(row)*self.n)*to_add\n array += to_add\n return array\n except KeyError:\n raise KeyError(\"Bad material id in mat_dictionary\")\n","sub_path":"material.py","file_name":"material.py","file_ext":"py","file_size_in_byte":15552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"220969860","text":"\"\"\"\n pyexcel_io.database.sql\n ~~~~~~~~~~~~~~~~~~~\n\n The lower level handler for database import and export\n\n :copyright: (c) 2014-2017 by Onni Software Ltd.\n :license: New BSD License, see LICENSE for more details\n\"\"\"\nfrom pyexcel_io.book import BookReader, BookWriter\nfrom pyexcel_io.sheet import SheetWriter\nfrom pyexcel_io.utils import is_empty_array, swap_empty_string_for_none\nimport pyexcel_io.constants as constants\nfrom pyexcel_io.database.querysets import QuerysetsReader\nfrom ._common import TableExportAdapter, TableExporter\nfrom ._common import TableImporter, TableImportAdapter\n\n\nclass PyexcelSQLSkipRowException(Exception):\n \"\"\"\n Raised this exception to skipping a row\n while data import\n \"\"\"\n pass\n\n\nclass SQLTableReader(QuerysetsReader):\n \"\"\"Read a table\n \"\"\"\n def __init__(self, session, table, export_columns=None, **keywords):\n everything = session.query(table).all()\n column_names = None\n if export_columns:\n column_names = export_columns\n else:\n if len(everything) > 0:\n column_names = sorted([\n column for column in everything[0].__dict__\n if column != '_sa_instance_state'])\n QuerysetsReader.__init__(self, everything, column_names, **keywords)\n\n\nclass SQLTableWriter(SheetWriter):\n \"\"\"Write to a table\n \"\"\"\n def __init__(self, session, table_params, auto_commit=True, **keywords):\n self.__session = session\n self.__table = None\n self.__initializer = None\n self.__mapdict = None\n self.__column_names = None\n self.__auto_commit = auto_commit\n self._keywords = keywords\n if len(table_params) == 4:\n (self.__table, self.__column_names,\n self.__mapdict, self.__initializer) = table_params\n else:\n raise ValueError(constants.MESSAGE_INVALID_PARAMETERS)\n\n if isinstance(self.__mapdict, list):\n self.__column_names = self.__mapdict\n self.__mapdict = None\n\n def write_row(self, array):\n if is_empty_array(array):\n print(constants.MESSAGE_EMPTY_ARRAY)\n else:\n new_array = swap_empty_string_for_none(array)\n try:\n self._write_row(new_array)\n except PyexcelSQLSkipRowException:\n print(constants.MESSAGE_IGNORE_ROW)\n print(new_array)\n\n def _write_row(self, array):\n row = dict(zip(self.__column_names, array))\n obj = None\n if self.__initializer:\n # allow initinalizer to return None\n # if skipping is needed\n obj = self.__initializer(row)\n if obj is None:\n obj = self.__table()\n for name in self.__column_names:\n if self.__mapdict is not None:\n key = self.__mapdict[name]\n else:\n key = name\n setattr(obj, key, row[name])\n self.__session.add(obj)\n\n def close(self):\n if self.__auto_commit:\n self.__session.commit()\n\n\nclass SQLTableExportAdapter(TableExportAdapter):\n def __init__(self, model, export_columns=None):\n TableExportAdapter.__init__(self, model, export_columns)\n self.table = model\n\n def get_name(self):\n return getattr(self.table, '__tablename__', None)\n\n\nclass SQLTableExporter(TableExporter):\n def __init__(self, session):\n TableExporter.__init__(self)\n self.session = session\n\n\nclass SQLBookReader(BookReader):\n def open(self, file_name, **keywords):\n raise NotImplementedError()\n\n def open_stream(self, file_stream, **keywords):\n raise NotImplementedError()\n\n def open_content(self, file_content, **keywords):\n self.__exporter = file_content\n self._load_from_tables()\n\n def read_sheet(self, native_sheet):\n reader = SQLTableReader(\n self.__exporter.session,\n native_sheet.table,\n native_sheet.export_columns)\n return reader.to_array()\n\n def _load_from_tables(self):\n self._native_book = self.__exporter.adapters\n\n\nclass SQLTableImportAdapter(TableImportAdapter):\n def __init__(self, model):\n TableImportAdapter.__init__(self, model)\n self.table = model\n\n def get_name(self):\n return getattr(self.table, '__tablename__', None)\n\n\nclass SQLTableImporter(TableImporter):\n def __init__(self, session):\n TableImporter.__init__(self)\n self.session = session\n\n\nclass SQLBookWriter(BookWriter):\n def open_content(self, file_content, auto_commit=True, **keywords):\n self.__importer = file_content\n self.__auto_commit = auto_commit\n\n def create_sheet(self, sheet_name):\n sheet_writer = None\n adapter = self.__importer.get(sheet_name)\n if adapter:\n sheet_writer = SQLTableWriter(\n self.__importer.session,\n (adapter.table, adapter.column_names,\n adapter.column_name_mapping_dict,\n adapter.row_initializer),\n auto_commit=self.__auto_commit\n )\n return sheet_writer\n\n\n_registry = {\n \"file_type\": constants.DB_SQL,\n \"reader\": SQLBookReader,\n \"writer\": SQLBookWriter,\n \"stream_type\": \"special\",\n \"library\": \"built-in\"\n}\n\nexports = (_registry,)\n","sub_path":"pyexcel_io/database/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"156533161","text":"from django.db import models\n\nfrom provarme_dashboard.core.models import AbstractBaseModel\n\n\nclass Store(AbstractBaseModel):\n\n STATUS = (\n (True, 'Ativo'),\n (False, 'Inativo'),\n )\n\n name = models.CharField('Nome da Loja', max_length=100)\n url = models.URLField('URL da Loja', max_length=100)\n client_id_mp = models.CharField('CLIENT_ID (Mercado Pago)', max_length=255)\n client_secret_mp = models.CharField('CLIENT_SECRET (Mercado Pago)', max_length=255)\n status = models.BooleanField(verbose_name='Situação', default=True, choices=STATUS)\n\n class Meta:\n verbose_name = 'Loja'\n verbose_name_plural = 'Lojas'\n\n def __str__(self):\n return self.name\n","sub_path":"provarme_dashboard/store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"646890348","text":"import timing\nimport time\nimport os\n\n'''\nThis timer is easy to use.\nUsing a custom made timing script, markers can be timed with ease.\nAll you need to do is input marker names, and press enter when you complete them.\n'''\n\nos.system(\"clear\")\nmarker = \"blank\"\nmarkers = []\nwhile marker != \"\":\n\tmarker = input(\"Marker name: \")\n\tif marker:\n\t\tmarkers.append(marker)\n\t\t\nos.system(\"clear\")\n\nfor i in range(10):\n\tprint(str(10 - i) + \" seconds remaining to start...\")\n\ttime.sleep(1)\n\tos.system(\"clear\")\n\nos.system(\"clear\")\n\nfor marker in markers:\n\ttiming.start(marker)\n\tinput(\"Timing for marker \\\"\" + marker + \"\\\"\")\n\ttiming.stop(marker)\n\tos.system(\"clear\")\n\nos.system(\"clear\")\n\ntiming.sumtimes(markers)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"198246501","text":"from base import BaseAPITestCase\n\nfrom contentcuration.models import Task\nfrom contentcuration.tasks import create_async_task, non_async_test_task\n# from celery.contrib.testing.worker import start_worker\n# from contentcuration.celery import app\n\n\nclass AsyncTaskTestCase(BaseAPITestCase):\n \"\"\"\n These tests check that creating and updating Celery tasks using the create_async_task function result in\n an up-to-date Task object with the latest status and information about the task.\n \"\"\"\n task_url = '/api/task'\n\n def test_asynctask_reports_success(self):\n \"\"\"\n Tests that when an async task is created and completed, the Task object has a status of 'SUCCESS' and\n contains the return value of the task.\n \"\"\"\n metadata = {'test': True}\n task_options = {\n 'user_id': self.user.pk,\n 'task_type': 'asynctask',\n 'metadata': metadata\n }\n task, task_info = create_async_task('test', task_options)\n self.assertTrue(Task.objects.filter(metadata__test=True).count()==1)\n self.assertEqual(task_info.user, self.user)\n self.assertEqual(task_info.task_type, 'test')\n self.assertEqual(task_info.is_progress_tracking, False)\n result = task.get()\n self.assertEqual(Task.objects.get(task_id=task.id).metadata['result'], 42)\n self.assertEqual(Task.objects.get(task_id=task.id).status, 'SUCCESS')\n\n def test_asynctask_reports_progress(self):\n \"\"\"\n Test that we can retrieve task progress via the Task API.\n \"\"\"\n metadata = {'test': True}\n task_options = {\n 'user_id': self.user.pk,\n 'task_type': 'asynctask',\n 'metadata': metadata\n }\n task, task_info = create_async_task('progress-test', task_options)\n self.assertTrue(Task.objects.filter(metadata__test=True).count()==1)\n result = task.get()\n self.assertEqual(result, 42)\n self.assertEqual(Task.objects.get(task_id=task.id).status, 'SUCCESS')\n\n # progress is retrieved dynamically upon calls to get the task info, so\n # use an API call rather than checking the db directly for progress.\n url = '{}/{}'.format(self.task_url, task_info.id)\n response = self.get(url)\n self.assertEqual(response.data['status'], 'SUCCESS')\n self.assertEqual(response.data['task_type'], 'progress-test')\n self.assertEqual(response.data['metadata']['progress'], 100)\n self.assertEqual(response.data['metadata']['result'], 42)\n\n def test_asynctask_reports_error(self):\n \"\"\"\n Tests that if a task fails with an error, that the error information is stored in the Task object for later\n retrieval and analysis.\n \"\"\"\n metadata = {'test': True}\n task_options = {\n 'user_id': self.user.pk,\n 'task_type': 'asynctask',\n 'metadata': metadata\n }\n task, task_info = create_async_task('error-test', task_options)\n\n task = Task.objects.get(task_id=task.id)\n self.assertEqual(task.status, 'FAILURE')\n self.assertTrue('error' in task.metadata)\n\n error = task.metadata['error']\n self.assertItemsEqual(list(error.keys()), ['task_args', 'task_kwargs', 'traceback'])\n self.assertEqual(len(error['task_args']), 0)\n self.assertEqual(len(error['task_kwargs']), 0)\n traceback_string = '\\n'.join(error['traceback'])\n self.assertTrue(\"Exception\" in traceback_string)\n self.assertTrue(\"I'm sorry Dave, I'm afraid I can't do that.\" in traceback_string)\n\n def test_only_create_async_task_creates_task_entry(self):\n \"\"\"\n Test that we don't add a Task entry when we create a new Celery task outside of the create_async_task API.\n \"\"\"\n\n task = non_async_test_task.apply_async()\n\n result = task.get()\n self.assertEquals(result, 42)\n self.assertEquals(Task.objects.filter(task_id=task.id).count(), 0)\n","sub_path":"contentcuration/contentcuration/tests/test_asynctask.py","file_name":"test_asynctask.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"334273568","text":"# Copyright 2013, Sandia Corporation. Under the terms of Contract\n# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain\n# rights in this software.\n\n# Computes a coordinate representation using Multidimensional Scaling\n# on an alpha-sum of distance matrices.\n#\n# S. Martin\n# 1/6/2015\n\n# Now set up to handle landmarks (3/1/2021). To preserve compatibility with older \n# models, landmarks should be set to the entire dataset in the case when full \n# (square) distance matrices are available. In this case the exact behavior is\n# preserved. If landmarks are a subset of the full dataset, there is a slightly\n# different behavior when analyzing subsets (namely a subset does not trigger\n# a full re-calculation of the coordinates, since that would not be possible\n# without full distance matrices).\n\n\"\"\"Computes a coordinate representation using Multidimensional Scaling\non an alpha-sum of distance matrices. The distance matrices are stored\nin a list of 2d numpy arrays and the alpha values are stored in a 1d numpy\narray.\"\"\"\n\nimport numpy as np\nimport scipy.linalg\nimport scipy.optimize\n\nfrom scipy import spatial\nfrom sklearn.decomposition import PCA\n\n# better version of natural sort\n# from natsort import natsorted\n\n# for natural sort function\nimport re\n\nimport cherrypy\n\n# cmdscale translation from Matlab by Francis Song \ndef cmdscale(D, full=False):\n \"\"\" \n Classical multidimensional scaling (MDS) \n \n Parameters \n ---------- \n D : (n, n) array \n Symmetric distance matrix.\n full: Boolean\n Use to compute all eigenvalues/vectors \n \n Returns \n ------- \n Y : (n, 2) array\n Configuration matrix. Each column represents a dimension. Only the \n p dimensions corresponding to positive eigenvalues of B are returned. \n Note that each dimension is only determined up to an overall sign, \n corresponding to a reflection. Only returns 2d coordinates.\n Yinv : multiply (dx - d_mean) times Yinv to get 2d projected coordinates,\n where dx is a row vector of distances to points in Y and d_mean\n is the average of the distance in Y.\n \"\"\"\n \n # number of points\n n = len(D)\n\n # for multiple points, solve eigenvalue problem\n if n > 1:\n\n # Centering matrix\n H = np.eye(n) - np.ones((n, n)) / n\n\n # YY^T\n B = -H.dot(D ** 2).dot(H) / 2\n\n # diagonalize\n if full: \n\n # keep all eigenvalues/vectors \n evals, evecs = np.linalg.eigh(B)\n\n else:\n # keep only largest two eigenvalues/vectors\n evals, evecs = scipy.linalg.eigh(B, eigvals=(n-2,n-1))\n\n # Sort by eigenvalue in descending order\n idx = np.argsort(evals)[::-1]\n evals = evals[idx]\n evecs = evecs[:,idx]\n\n # Compute the coordinates using positive-eigenvalued components only\n w, = np.where(evals > 0)\n L = np.diag(np.sqrt(evals[w]))\n V = evecs[:,w]\n Y = V.dot(L)\n\n # compute inverse for projection\n Linv = np.diag(np.reciprocal(np.sqrt(evals[w])))\n Yinv = -V.dot(Linv) / 2.0\n\n # if no coordinates then use two columns of zeros for Y and Yinv\n if len(w) == 0:\n Y = np.zeros((n,2))\n Yinv = np.zeros((n,2))\n\n # if only one coordinate then add one column of zeros to Y and Yinv\n if len(w) == 1:\n Y = np.append(np.reshape(Y, (Y.shape[0],1)),\n np.zeros((Y.shape[0],1)), axis=1)\n Yinv = np.append(np.reshape(Yinv, (Yinv.shape[0], 1)),\n np.zeros((Yinv.shape[0], 1)), axis=1)\n\n # for one point set coordinates to center of screen\n else:\n Y = np.array([0, 0])\n Yinv = np.array([0, 0])\n\n return Y, Yinv\n\n\n# this is the legacy, non-landmark behavior, preserved \n# in case of models with full pairwise distance matrices\ndef compute_coords_subset (dist_mats, alpha_values, old_coords, subset, proj=None):\n \"\"\"\n Computes sum alpha_i^2 dist_mat_i.^2 then calls cmdscale to compute\n classical multidimensional scaling.\n \n INPUTS: - dist_mats is a list of numpy arrays containing square\n matrices (n,n) representing distances,\n - alpha_values is a numpy array containing a vector of \n alpha values between 0 and 1.\n - subset is a vector of length n with 1 = in subset, 0 = not in subset.\n - proj is an optional vector similar to subset, defaults to vector\n of all 1.\n \n OUTPUTS: Y is a numpy array of coordinates (n,2) and\n \"\"\"\n\n # set projection default (vector of all ones -- everything in projection)\n num_tests = dist_mats[0].shape[0]\n if proj is None:\n proj = np.ones(num_tests)\n\n # make sure projection is an array\n else:\n proj = np.asarray(proj)\n\n # get sizes of projection, subset\n num_proj = int(np.sum(proj))\n num_subset = int(np.sum(subset))\n\n # use subset if not full dataset, otherwise get projection\n cmd_subset = subset\n compute_proj = False\n if num_subset == num_tests:\n cmd_subset = proj\n compute_proj = True\n\n # init distance matrix to size of working subset\n num_cmd_subset = int(np.sum(cmd_subset))\n full_dist_mat = np.zeros((num_cmd_subset,num_cmd_subset))\n\n # compute alpha-sum of distance matrices on subset\n subset_inds = np.where(cmd_subset)[0]\n for i in range(len(dist_mats)):\n full_dist_mat = full_dist_mat + alpha_values[i]**2 * \\\n dist_mats[i][subset_inds[:,None], subset_inds]**2\n\n # compute mds coordinates on subset\n mds_subset_coords, proj_inv = cmdscale(np.sqrt(full_dist_mat))\n\n # if not in subset, assign coordinates of [0,0]\n mds_coords = old_coords\n mds_coords[subset_inds,:] = mds_subset_coords\n\n # compute projection, if no subset and projection not full dataset\n if compute_proj and (num_proj < num_tests):\n\n # get points to project\n proj_inds = np.where(cmd_subset==0)[0]\n num_proj_inds = len(proj_inds)\n\n # compute mean distance squared for points in projection\n mean_dist = np.mean(full_dist_mat, axis=1)\n\n # compute distance squared for each point to be projected\n proj_dist_mat = np.zeros((num_proj_inds, num_proj))\n for i in range(len(dist_mats)):\n proj_dist_mat = proj_dist_mat + alpha_values[i] ** 2 * \\\n dist_mats[i][proj_inds[:, None], subset_inds] ** 2\n\n # compute projected coords\n proj_coords = (proj_dist_mat - mean_dist).dot(proj_inv)\n\n # put projected coords into mds coords\n mds_coords[proj_inds,:] = proj_coords\n\n return mds_coords\n\n\n# this is the newer, landmark behavior\ndef compute_coords_landmark (dist_mats, alpha_values, old_coords, subset, \n proj=None, landmarks=None):\n \"\"\"\n Computes sum alpha_i^2 dist_mat_i.^2 then calls cmdscale to compute\n classical multidimensional scaling.\n \n INPUTS: -- dist_mats is a list of numpy arrays containing square\n matrices (n,n) representing distances,\n -- alpha_values is a numpy array containing a vector of \n alpha values between 0 and 1.\n -- old_coords is a numpy array containing the previous coordinates.\n -- subset is a vector of length n with 1 = in subset, 0 = not in subset.\n -- proj is an optional vector similar to subset, defaults to vector\n of all 1.\n -- landmarks is an optional vector which specifies landmark indices to use\n in the MDS calculation using mask. if the dist_mats are not square it \n is required and the matrices are assumed to be size (n,k), where k is \n the number of landmarks\n \n OUTPUTS: Y is a numpy array of coordinates (n,2) and\n \"\"\"\n\n # set landmark default, vector of all ones, indicating that\n # everything is a landmark and distance matrices are square\n num_tests = dist_mats[0].shape[0]\n if landmarks is None:\n landmarks = np.ones(num_tests)\n \n # make sure landmarks is an array\n else:\n landmarks = np.asarray(landmarks)\n\n # use legacy behavior if we have full pairwise distance matrices\n num_landmarks = int(np.sum(landmarks))\n if num_landmarks == num_tests:\n return compute_coords_subset (dist_mats, alpha_values, \n old_coords, subset, proj=proj)\n\n # set projection default (vector of all ones -- everything in base calculation)\n if proj is None:\n proj = np.ones(num_tests)\n\n # make sure projection is an array\n else:\n proj = np.asarray(proj)\n\n # remove projected points from landmarks\n landmarks = np.multiply(landmarks, proj)\n\n # get size of subset\n num_subset = int(np.sum(subset))\n\n # always use landmarks to compute basic coordinates\n num_landmarks = int(np.sum(landmarks))\n full_dist_mat = np.zeros((num_landmarks,num_landmarks))\n\n # compute alpha-sum of distance matrices on landmarks\n landmark_rows = np.where(landmarks)[0]\n landmark_cols = np.arange(num_landmarks)\n for i in range(len(dist_mats)):\n full_dist_mat = full_dist_mat + alpha_values[i]**2 * \\\n dist_mats[i][landmark_rows[:,None], landmark_cols]**2\n\n # compute mds coordinates on landmarks\n mds_landmark_coords, proj_inv = cmdscale(np.sqrt(full_dist_mat))\n\n # if not in landmarks, assign old coordinates\n mds_coords = old_coords\n mds_coords[landmark_rows,:] = mds_landmark_coords\n\n # now project onto landmarks\n if num_landmarks < num_tests:\n\n # get points to project (subset or proj except landmarks)\n if num_subset == num_tests:\n proj_inds = np.where(landmarks==0)[0]\n else:\n proj_inds = np.where(np.logical_and(landmarks==0, subset))[0]\n \n # compute mean distance squared for points in projection\n mean_dist = np.mean(full_dist_mat, axis=1)\n\n # compute distance squared for each point to be projected\n num_proj_inds = len(proj_inds)\n proj_dist_mat = np.zeros((num_proj_inds, num_landmarks))\n for i in range(len(dist_mats)):\n proj_dist_mat = proj_dist_mat + alpha_values[i] ** 2 * \\\n dist_mats[i][proj_inds[:, None], landmark_cols] ** 2\n\n # compute projected coords\n proj_coords = (proj_dist_mat - mean_dist).dot(proj_inv)\n\n # put projected coords into mds coords\n mds_coords[proj_inds,:] = proj_coords\n \n return mds_coords\n\n\n# this is the newest, coordinate only behavior\ndef compute_coords (dist_mats, alpha_values, old_coords, subset, \n proj=None, landmarks=None, use_coordinates=False):\n \"\"\"\n Computes sum alpha_i^2 dist_mat_i.^2 then calls cmdscale to compute\n classical multidimensional scaling.\n \n INPUTS: -- dist_mats is a list of numpy arrays containing square\n matrices (n,n) representing distances,\n -- alpha_values is a numpy array containing a vector of \n alpha values between 0 and 1.\n -- old_coords is a numpy array containing the previous coordinates.\n -- subset is a vector of length n with 1 = in subset, 0 = not in subset.\n -- proj is an optional vector similar to subset, defaults to vector\n of all 1.\n -- landmarks is an optional vector which specifies landmark indices to use\n in the MDS calculation using mask. if the dist_mats are not square it \n is required and the matrices are assumed to be size (n,k), where k is \n the number of landmarks\n -- use_coordinates is an optional Boolean flag indicating distance matrices\n are in fact just coordinates\n \n OUTPUTS: Y is a numpy array of coordinates (n,2) and\n \"\"\"\n\n # if distance matrices are actual distances, use previous behavior\n if use_coordinates == False:\n return compute_coords_landmark(dist_mats, alpha_values, old_coords, subset,\n proj, landmarks)\n\n # number of points/components\n [num_tests, num_comps] = dist_mats[0].shape\n\n # number of landmarks (all points)\n landmarks = np.ones(num_tests)\n\n # set projection default (vector of all ones -- everything in base calculation)\n if proj is None:\n proj = np.ones(num_tests)\n\n # make sure projection is an array\n else:\n proj = np.asarray(proj)\n\n # remove projected points from landmarks\n landmarks = np.multiply(landmarks, proj)\n\n # get size of subset\n num_subset = int(np.sum(subset))\n\n # always use landmarks to compute basic coordinates\n num_landmarks = int(np.sum(landmarks))\n full_dist_mat = np.zeros((num_landmarks, num_comps))\n\n # compute alpha-sum of distance matrices on landmarks\n landmark_rows = np.where(landmarks)[0]\n landmark_cols = np.arange(num_comps)\n for i in range(len(dist_mats)):\n full_dist_mat = full_dist_mat + alpha_values[i] * \\\n dist_mats[i][landmark_rows[:,None], landmark_cols]\n\n # compute 2D coordinates using PCA\n pca = PCA(n_components=2)\n mds_landmark_coords = pca.fit_transform(full_dist_mat)\n\n # if not in landmarks, assign old coordinates\n mds_coords = old_coords\n mds_coords[landmark_rows,:] = mds_landmark_coords\n\n # now project onto landmarks\n if num_landmarks < num_tests:\n\n # get points to project (subset or proj except landmarks)\n if num_subset == num_tests:\n proj_inds = np.where(landmarks==0)[0]\n else:\n proj_inds = np.where(np.logical_and(landmarks==0, subset))[0]\n \n # compute distance squared for each point to be projected\n num_proj_inds = len(proj_inds)\n proj_dist_mat = np.zeros((num_proj_inds, num_comps))\n for i in range(len(dist_mats)):\n proj_dist_mat = proj_dist_mat + alpha_values[i] * \\\n dist_mats[i][proj_inds[:, None], landmark_cols]\n\n proj_coords = pca.transform(proj_dist_mat)\n\n # put projected coords into mds coords\n mds_coords[proj_inds,:] = proj_coords\n\n return mds_coords\n\n\ndef scale_coords (coords, full_coords, subset, center):\n \"\"\"\n Adjusts coords (with 2 columns) so that the orientation is\n correlated with the full_coords and scaled by the scalar\n scale to fit in a box [0,1]^2.\n \n INPUTS: -- coords is numpy matrix (n,2) of coords to scale,\n -- full_coords is numpy matrix (n,2) to align\n as a reference for the coords vector.\n -- subset is a vector of length n with 1 = in subset, 0 = not in subset.\n -- center is the subset center\n \n OUTPUTS: a numpy matrix (n,2) of adjusted coordinates.\n \"\"\"\n\n # get subset for scaling\n subset_inds = np.where(subset)[0]\n subset_coords = coords[subset_inds,:]\n\n # mean subtract subset coords\n subset_coords_mean = np.mean(subset_coords, axis=0)\n subset_coords_ms = subset_coords - subset_coords_mean\n\n # mean subtract full subset\n full_subset_coords = full_coords[subset_inds,:]\n full_subset_mean = np.mean(full_subset_coords, axis=0)\n full_subset_coords_ms = full_subset_coords - full_subset_mean\n\n # use Kabsch algorithm to rotate coords in line with full_coords\n corr_mat = np.dot(subset_coords_ms.transpose(),full_subset_coords_ms)\n u,s,v = np.linalg.svd(corr_mat)\n rot_mat = np.dot(v, u.transpose())\n\n # rotate to get new coords\n rot_coords = np.dot(subset_coords_ms, rot_mat.transpose())\n \n # get max absolute value for x,y independently\n coords_scale_x = np.amax(np.absolute(rot_coords[:,0]))\n coords_scale_y = np.amax(np.absolute(rot_coords[:,1]))\n\n # make sure we do not divide by 0\n if coords_scale_x < np.finfo(float).eps:\n coords_scale_x = 1.0\n if coords_scale_y < np.finfo(float).eps:\n coords_scale_y = 1.0\n\n # scale to [0,1]^2 independently for x,y\n scaled_coords = rot_coords\n scaled_coords[:,0] = scaled_coords[:,0] / (2.0 * coords_scale_x) + 0.5\n scaled_coords[:,1] = scaled_coords[:,1] / (2.0 * coords_scale_y) + 0.5\n\n # set coords of anything not in subset\n num_coords = coords.shape[0]\n new_coords = np.zeros((num_coords,2))\n for i in range(num_coords):\n if subset[i] == 0:\n\n # compute direction to move non-subset coord\n move_dir = coords[i,:] - center\n norm_move_dir = np.linalg.norm(move_dir)\n if norm_move_dir == 0:\n move_dir = np.array([-1,-1])\n else:\n move_dir = center + 2.0 * move_dir/norm_move_dir\n\n # put somewhere beyond [0,1], but in the same direction from center\n new_coords[i,:] = move_dir\n\n # set newly computed subset coordinates\n new_coords[subset_inds,:] = scaled_coords\n\n return new_coords\n\n\ndef init_coords (var_dist, proj=None, landmarks=None, use_coordinates=False):\n \"\"\"\n Computes initial MDS coordinates assuming alpha values are all 1.0\n\n INPUTS: - var_dist is a list of distance matrices \n - proj is a vector mask of projected points (optional)\n - landmarks is a vector of indices of landmark points (optional)\n - use_coordinates indicates that var_dist actually contains PCA intermediates\n\n OUTPUTS: mds_coords are the initial scaled MDS coordinates\n full_mds_coords are the unscaled version of the same coordinates\n \"\"\"\n\n num_vars = len(var_dist)\n\n # assume initial alpha values are all one\n alpha_values = np.ones(num_vars)\n\n # scale distance matrices (or coordinates) by maximum, unless maximum is zero\n for i in range(0, num_vars):\n\n coords_scale = np.amax(np.absolute(var_dist[i]))\n if coords_scale < np.finfo(float).eps:\n coords_scale = 1.0\n\n var_dist[i] = var_dist[i] / coords_scale\n\n # compute MDS coordinates assuming alpha = 1 for scaling, full subset, full view\n subset_mask = np.ones(var_dist[0].shape[0])\n old_coords = np.zeros((var_dist[0].shape[0], 2))\n full_mds_coords = compute_coords(var_dist, alpha_values, old_coords, subset_mask, \n proj=proj, landmarks=landmarks, use_coordinates=use_coordinates)\n\n # scale using full coordinates\n subset_center = np.array([.5,.5])\n mds_coords = scale_coords(full_mds_coords, full_mds_coords, subset_mask, subset_center)\n\n return mds_coords, full_mds_coords\n\n\ndef compute_alpha_clusters_PCA (var_dist, meta_columns, meta_column_types):\n \"\"\"\n Computes the alpha cluster values using PCA\n\n INPUTS: -- var_dist is a list of distance matrices\n -- meta_columns is a list of meta data arrays\n -- meta_column_types is a list of the meta data array types\n\n\n OUTPUTS: alpha_cluster_mat is a matrix containing all the alpha\n values for clustering each meta data array\n \"\"\"\n\n # landmarks should always be None for this calculation\n\n # get size of data\n num_tests = var_dist[0].shape[0]\n num_vars = len(var_dist)\n\n # form a matrix using only first PCA components\n X = np.asarray([list(var_dist[i][:,0]) for i in range(num_vars)]).transpose()\n\n # for each quantitative meta variable, compute scaled property vector\n num_meta_cols = len(meta_column_types)\n prop_vecs = []\n for i in range(num_meta_cols):\n \n # populate property vector data\n if meta_column_types[i] == \"float64\":\n \n prop_vec = np.asarray(meta_columns[i])\n\n elif meta_column_types[i] == \"string\":\n \n # compute property i values\n # using strings (sorted alphabetically and assigned\n # values starting at 0)\n\n # sort potential values in string metadata\n uniq_sorted_columns = natsorted(set(meta_columns[i]))\n\n # use alphabetical order to make a vector of numbers\n meta_column_num = np.asarray([uniq_sorted_columns.index(str_meta) \n for str_meta in meta_columns[i]])\n\n prop_vec = meta_column_num\n\n # do nothing\n else:\n prop_vec = 0\n\n # scale property vector data\n prop_scale = np.nanmax(np.absolute(prop_vec))\n if prop_scale < np.finfo(float).eps:\n prop_scale = 1.0\n prop_vec = prop_vec / prop_scale\n\n # save property vector\n prop_vecs.append(prop_vec)\n\n # compute NNLS cluster button alpha values, if more than one data point\n alpha_cluster_mat = np.zeros((num_meta_cols, num_vars))\n if num_tests > 1:\n for i in range(num_meta_cols):\n if (meta_column_types[i] == \"float64\") or \\\n (meta_column_types[i] == \"string\"):\n\n # remove NaNs\n nan_mask = np.logical_not(np.isnan(prop_vecs[i]))\n nan_prop = prop_vecs[i][nan_mask]\n nan_X = X[nan_mask, :]\n\n # if nothing left then return zero\n if nan_X.shape[0] > 1:\n\n beta_i = scipy.optimize.nnls(nan_X, nan_prop)\n alpha_i = np.sqrt(beta_i[0])\n\n # again don't divide by zero\n alpha_max_i = np.amax(alpha_i)\n if alpha_max_i <= np.finfo(float).eps:\n alpha_max_i = 1\n alpha_cluster_mat[i, :] = alpha_i / alpha_max_i\n\n return alpha_cluster_mat\n\n\ndef compute_alpha_clusters (var_dist, meta_columns, meta_column_types, \n landmarks=None, use_coordinates=False):\n \"\"\"\n Computes the alpha cluster values.\n\n INPUTS: -- var_dist is a list of distance matrices\n -- meta_columns is a list of meta data arrays\n -- meta_column_types is a list of the meta data array types\n -- landmarks is a mask indicating landmarks\n -- use_coordinates to treat distance matrices as coordinates\n\n OUTPUTS: alpha_cluster_mat is a matrix containing all the alpha\n values for clustering each meta data array\n \"\"\"\n\n # if distance matrices are PCA components, use new behavior\n if use_coordinates == True:\n return compute_alpha_clusters_PCA (var_dist, meta_columns, meta_column_types)\n\n # if landmarks are not given, assume everything is a landmark\n num_tests = var_dist[0].shape[0]\n if landmarks is None:\n landmarks = np.ones(num_tests)\n \n # make sure landmarks is an array\n else:\n landmarks = np.asarray(landmarks)\n\n # get landmarks locations in distance matrices\n num_landmarks = int(np.sum(landmarks))\n landmark_rows = np.where(landmarks)[0]\n landmark_cols = np.arange(num_landmarks)\n\n # compute alpha cluster values using landmarks only\n num_vars = len(var_dist)\n num_time_series = num_landmarks\n\n # form a matrix with each distance matrix as a column (this is U matrix)\n all_dist_mat = np.zeros((num_time_series * num_time_series, num_vars))\n for i in range(num_vars):\n all_dist_mat[:, i] = np.squeeze(np.reshape(var_dist[i][landmark_rows[:,None], landmark_cols],\n (num_time_series * num_time_series, 1)))\n\n # for each quantitative meta variable, compute distances as columns (V matrices)\n prop_dist_mats = [] # store as a list of numpy columns\n num_meta_cols = len(meta_column_types)\n for i in range(num_meta_cols):\n if meta_column_types[i] == \"float64\":\n\n # compute pairwise distance matrix vector for property i\n landmark_data_i = np.asarray(meta_columns[i])[landmark_rows]\n prop_dist_mats.append(compute_prop_dist_vec(landmark_data_i, num_time_series))\n\n elif meta_column_types[i] == \"string\":\n\n # compute pairwise distance matrix for property i\n # using strings (sorted alphabetically and assigned\n # values starting at 0)\n\n # sort potential values in string metadata\n uniq_sorted_columns = natsorted(set(meta_columns[i]))\n\n # use alphabetical order to make a vector of numbers\n meta_column_num = np.asarray([uniq_sorted_columns.index(str_meta) \n for str_meta in meta_columns[i]])[landmark_rows]\n\n prop_dist_mats.append(compute_prop_dist_vec(meta_column_num, num_time_series))\n\n else:\n\n # do nothing\n prop_dist_mats.append(0)\n\n # compute NNLS cluster button alpha values, if more than one data point\n alpha_cluster_mat = np.zeros((num_meta_cols, num_vars))\n if num_time_series > 1:\n for i in range(num_meta_cols):\n if (meta_column_types[i] == \"float64\") or \\\n (meta_column_types[i] == \"string\"):\n\n # remove NaNs\n nan_mask = np.logical_not(np.isnan(prop_dist_mats[i]))\n nan_prop = prop_dist_mats[i][nan_mask]\n nan_dist_mat = all_dist_mat[nan_mask, :]\n\n # if nothing left then return zero\n if nan_dist_mat.shape[0] > 1:\n\n beta_i = scipy.optimize.nnls(all_dist_mat, prop_dist_mats[i])\n alpha_i = np.sqrt(beta_i[0])\n\n # again don't divide by zero\n alpha_max_i = np.amax(alpha_i)\n if alpha_max_i <= np.finfo(float).eps:\n alpha_max_i = 1\n alpha_cluster_mat[i, :] = alpha_i / alpha_max_i\n\n return alpha_cluster_mat\n\n\n# subroutine for compute_alpha_clusters which computes the pairwise\n# distance matrix for the alpha slider optimization\ndef compute_prop_dist_vec(prop_vec, vec_length):\n\n # compute pairwise distance matrix for property\n prop_dist_mat = np.absolute(\n np.transpose(np.tile(prop_vec, (vec_length, 1))) - np.tile(prop_vec, (vec_length, 1)))\n prop_dist_vec = np.squeeze(np.reshape(prop_dist_mat, (vec_length * vec_length, 1)))\n\n # make sure we don't divide by 0\n prop_dist_vec_max = np.nanmax(prop_dist_vec)\n if prop_dist_vec_max <= np.finfo(float).eps:\n prop_dist_vec_max = 1.0\n\n return prop_dist_vec / prop_dist_vec_max\n\n\n# helper function for compute alpha to do natural sort\n# taken from Ned Batchelder's blog\ndef natsorted(l):\n\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n\n return sorted(l, key=alphanum_key)\n\n\n# use max-min algorithm to choose landmarks\ndef select_landmarks(num_points, num_landmarks, variable):\n\n num_vars = len(variable)\n\n # first landmark is first point in dataset\n landmarks = [0]\n\n # next landmarks are chosen by max-min\n min_combined_dist = np.full((num_points, 1), np.inf)\n for i in range(1, num_landmarks):\n\n # compute combined distance from each point to previous landmark\n combined_dist = np.zeros((num_points, 1))\n for j in range(num_vars):\n combined_dist += spatial.distance.cdist(variable[j], variable[j][[landmarks[i-1]],:])\n \n # compute minimum distance from each point to set of landmarks\n min_combined_dist = np.minimum(min_combined_dist, combined_dist)\n\n # next landmark is maximum of minimum distances\n landmarks.append(np.argmax(min_combined_dist))\n\n # make landmarks 1-based numpy array\n landmarks = np.asarray(landmarks) + 1\n\n return landmarks\n\n","sub_path":"web-server/plugins/slycat-dac/py/dac_compute_coords.py","file_name":"dac_compute_coords.py","file_ext":"py","file_size_in_byte":28341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"390838463","text":"from typing import List\n\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n M = len(matrix)\n N = len(matrix[0]) if M else 0\n \n dp = [[0 for _ in range(N)] for _ in range(M)]\n maxL = 0\n \n for i in range(M):\n for j in range(N):\n if i == 0:\n dp[0][j] = 1 if matrix[0][j] == \"1\" else 0\n elif j == 0:\n dp[i][0] = 1 if matrix[i][0] == \"1\" else 0\n else:\n if matrix[i][j] == \"0\":\n dp[i][j] = 0\n else:\n dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1\n \n maxL = max(maxL, dp[i][j])\n return maxL * maxL","sub_path":"leetcode/221-Maximal-Square.py","file_name":"221-Maximal-Square.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"247540213","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('course', '0008_gendertop_topten'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='coursenumber',\n name='all_time',\n field=models.TextField(default='', verbose_name='\\u6240\\u6709\\u4e0a\\u8bfe\\u65f6\\u95f4'),\n ),\n migrations.AlterField(\n model_name='usercourse',\n name='course',\n field=models.ForeignKey(verbose_name='\\u8bfe\\u5934\\u53f7', to='course.CourseNumber'),\n ),\n ]\n","sub_path":"zq_taoke_new/course/migrations/0009_auto_20151015_0911.py","file_name":"0009_auto_20151015_0911.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"548273041","text":"from zoya.res import zoyacontrol as zcr\nfrom zzeppelin.util import inputsheet\n\nclass Generator( inputsheet.InputGenerator ):\n\n# Common keys\n kright = zcr.ZK_F\n kup = zcr.ZK_E\n kleft = zcr.ZK_S\n kdown = zcr.ZK_D\n kact = zcr.ZK_J\n kunact = zcr.ZK_W\n\n def _Direction( self, state, recs=None ):\n Single = self._Single\n if recs is None :\n recs = list()\n Single( self.kright, state, 1, recs )\n Single( self.kup, state, 2, recs )\n Single( self.kleft, state, 4, recs )\n Single( self.kdown, state, 8, recs )\n return recs\n\n def _DirAndAct( self, dirstate, recs=None ):\n Single = self._Single\n if recs is None :\n recs = list()\n self._Direction( dirstate, recs )\n Single( self.kact, zcr.KS_STRIKEDOWN, 16, recs )\n Single( self.kunact, zcr.KS_STRIKEDOWN, 32, recs )\n return recs\n\n def Generate( self ):\n Single = self._Single\n Direction = self._Direction\n DirAndAct = self._DirAndAct\n CreateEntry = self._CreateInputSheet\n kact = self.kact\n kunact = self.kunact\n# Control modules\n # 999 Debug/testing\n self._DebugSheet( Single, CreateEntry )\n # 1 Unit\n recs = Single( kact, zcr.KS_STRIKEDOWN, 1 )\n # Aux\n Single( zcr.ZK_I, zcr.KS_STRIKEDOWN, 2, recs )\n Single( zcr.ZK_MOUSERIGHT, zcr.KS_STRIKEDOWN, 4, recs )\n Single( zcr.ZK_U, zcr.KS_STRIKEDOWN, 8, recs )\n Single( zcr.ZK_T, zcr.KS_STRIKEDOWN, 16, recs )\n Single( zcr.ZK_O, zcr.KS_STRIKEDOWN, 32, recs )\n CreateEntry( 1, recs )\n # 2 MobileUnit move\n # Direction\n recs = Direction( zcr.KS_DOWN )\n # Jump start/stop\n Single( zcr.ZK_SPACE, zcr.KS_STRIKEDOWN, 16, recs )\n Single( zcr.ZK_SPACE, zcr.KS_STRIKEUP, 32, recs )\n # Aux (ship movement)\n Single( zcr.ZK_L, zcr.KS_DOWN, 64, recs )\n Single( zcr.ZK_SEMICOLON, zcr.KS_STRIKEDOWN, 128, recs )\n Single( zcr.ZK_SEMICOLON, zcr.KS_STRIKEUP, 256, recs )\n Single( zcr.ZK_A, zcr.KS_DOWN, 512, recs )\n CreateEntry( 2, recs )\n # 3 Follower (AI)\n # 4 MenuRoot\n # System Menu\n recs = Single( zcr.ZK_ESC, zcr.KS_STRIKEDOWN, 1 )\n # Play Menu\n Single( zcr.ZK_Q, zcr.KS_STRIKEDOWN, 2, recs )\n CreateEntry( 4, recs )\n # 5 MenuDisplay\n recs = DirAndAct( zcr.KS_REPEAT )\n # alt acts\n # Single( zcr.ZK_A, zcr.KS_STRIKEDOWN, 64, recs )\n # Single( zcr.ZK_Q, zcr.KS_STRIKEDOWN, 128, recs )\n # Single( zcr.ZK_W, zcr.KS_STRIKEDOWN, 256, recs )\n # Single( zcr.ZK_R, zcr.KS_STRIKEDOWN, 512, recs )\n CreateEntry( 5, recs )\n # 8 Dialogue\n recs = Single( kact, zcr.KS_STRIKEDOWN, 1 )\n CreateEntry( 8, recs )\n\n def _DebugSheet( self, Single, CreateEntry ):\n recs = Single( zcr.ZK_1, zcr.KS_STRIKEDOWN, 2 )\n super()._DebugSheet( Single, CreateEntry, recs )\n\n\n# ------------------------------------------------\n\n\ndef Generate( gen ):\n return inputsheet.Generate( gen, Generator )\n\n","sub_path":"util/inputsheet.py","file_name":"inputsheet.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"468116166","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom gaussComparator import gaussComparator\n\n\ndef f_target(x):\n x += 0.8\n width = 1.2\n x0 = 5.8\n d = np.abs(x-x0)\n return np.cos(1.2*x) - 0.3*x*np.exp(-d**2/(2*width**2)) - 2 + 0.7*np.sin(4*x)\n\nX = np.linspace(0,10,1000)\ny_target = f_target(X)\n\nimport matplotlib as mpl\nmpl.rcParams['axes.linewidth'] = 1.0\nfs = 16\n\nplt.figure(figsize=(7,4))\nplt.xticks(np.arange(0, 12, step=2), fontsize=fs)\nplt.yticks(np.arange(-4, 1, step=1), fontsize=fs)\nplt.xlabel('x', fontsize=fs)\nplt.ylabel('Energy', fontsize=fs)\nplt.xlim([0,9.6])\nplt.ylim([-4.5,-1.3])\nplt.plot(X, y_target, color='darkgreen', lw=2.5)\nplt.axis('off')\nplt.savefig('energyLandscape.pdf', transparent=True)\nplt.show()\n","sub_path":"krrThomas/copenPresentation/MLSearchVSnormal.py","file_name":"MLSearchVSnormal.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"268068129","text":"# author: Roderick DeValcourt\r\n# project: Assignment: Build A Chatbot\r\n\r\n# main menu design\r\n\r\n# Main Menu\r\n\r\n# 1. Provide Context (google)\r\n# 2. Provide Context (wikipedia)\r\n# 3. Save Json Context\r\n# 4. Train NLTK/NLP\r\n# 5. Chat\r\n# 6. Quit\r\n\r\n\r\nimport re\r\nimport string\r\nfrom datetime import datetime\r\n\r\nimport googlesearch\r\nimport requests\r\nimport wikipedia\r\nfrom bs4 import BeautifulSoup\r\nfrom lxml import html\r\nfrom textblob import TextBlob\r\n\r\nfrom chatbotwindow import ChatBotWindow\r\nfrom convert_text_to_json import ConvertTextToJson\r\nfrom trainingchatbot import TrainChatBot\r\n\r\n\r\ndef get_input(title) -> int:\r\n try:\r\n result = int(input(title))\r\n except ValueError:\r\n result = 0\r\n return result\r\n\r\n\r\ndef get_new_blob(title) -> TextBlob:\r\n inputted_text = input(title)\r\n result = TextBlob(inputted_text)\r\n return result\r\n\r\n\r\nclass Information:\r\n def __init__(self):\r\n self.result = ''\r\n self.url = ''\r\n self.text_input = ''\r\n\r\n def set_result(self, r):\r\n self.result = r\r\n\r\n def set_url(self, u):\r\n self.url = u\r\n\r\n def set_text_input(self, ti):\r\n self.text_input = ti\r\n\r\n def __str__(self):\r\n message_text = '{},{},{}\\n'.format(self.text_input, self.url, self.result)\r\n return message_text\r\n\r\n\r\nclass queryUrl:\r\n def __init__(self):\r\n self.fallback = 'Sorry, I cannot think of a reply for that.'\r\n self.list_information = []\r\n\r\n def chatbot_query_wikipedia(self, query):\r\n\r\n try:\r\n\r\n res = wikipedia.search(query, results=10)\r\n for r in res:\r\n\r\n try:\r\n\r\n r2 = wikipedia.page(r)\r\n # print(r2.url)\r\n # print(r2.content)\r\n # tree = html.fromstring(r2.content)\r\n\r\n soup = BeautifulSoup(r2.content, features=\"lxml\")\r\n\r\n article_text = ''\r\n article = soup.findAll('p')\r\n for element in article:\r\n article_text += '\\n' + ''.join(element.findAll(text=True))\r\n\r\n scrubbed = article_text.encode(\"ascii\", \"ignore\")\r\n article_text = scrubbed.decode()\r\n\r\n article_text = re.sub(' +', ' ', article_text)\r\n\r\n article_text = article_text.replace('/\\\\ /g', '\\\\\\\\')\r\n\r\n article_text = article_text.replace('\\r', ' ')\r\n article_text = article_text.replace('\\n', ' ')\r\n article_text = article_text.replace('\\t', '')\r\n article_text = article_text.replace('\"', '\\\\\"')\r\n\r\n article_text = article_text.strip()\r\n\r\n first_sentence = article_text.split('.')\r\n first_sentence = first_sentence[0].split('?')[0]\r\n\r\n chars_without_whitespace = first_sentence.translate(\r\n {ord(c): None for c in string.whitespace}\r\n )\r\n\r\n if len(chars_without_whitespace) > 0:\r\n info = Information()\r\n info.url = r2.url\r\n info.set_result(first_sentence)\r\n info.set_text_input(query)\r\n\r\n self.list_information.append(info)\r\n except Exception:\r\n continue\r\n\r\n except wikipedia.exceptions.DisambiguationError as e:\r\n for s in e.options:\r\n result = self.fallback\r\n print(s)\r\n\r\n def chatbot_query_google(self, query):\r\n\r\n try:\r\n search_result_list = googlesearch.search(query, num_results=10, lang='en')\r\n\r\n howmany = len(search_result_list)\r\n\r\n for index in range(howmany):\r\n\r\n try:\r\n\r\n page = requests.get(search_result_list[index])\r\n\r\n url = search_result_list[index]\r\n\r\n # url = requests.Request.url\r\n\r\n tree = html.fromstring(page.content)\r\n\r\n soup = BeautifulSoup(page.content, features=\"lxml\")\r\n\r\n article_text = ''\r\n article = soup.findAll('p')\r\n for element in article:\r\n article_text += '\\n' + ''.join(element.findAll(text=True))\r\n\r\n scrubbed = article_text.encode(\"ascii\", \"ignore\")\r\n article_text = scrubbed.decode()\r\n\r\n article_text = re.sub(' +', ' ', article_text)\r\n\r\n article_text = article_text.replace('/\\\\ /g', '\\\\\\\\')\r\n\r\n article_text = article_text.replace('\\r', ' ')\r\n article_text = article_text.replace('\\n', ' ')\r\n article_text = article_text.replace('\\t', '')\r\n article_text = article_text.replace('\"', '\\\\\"')\r\n\r\n article_text = article_text.strip()\r\n\r\n first_sentence = article_text.split('.')\r\n first_sentence = first_sentence[0].split('?')[0]\r\n\r\n chars_without_whitespace = first_sentence.translate(\r\n {ord(c): None for c in string.whitespace}\r\n )\r\n\r\n if len(chars_without_whitespace) > 0:\r\n info = Information()\r\n info.url = url\r\n info.set_result(first_sentence)\r\n info.set_text_input(query)\r\n self.list_information.append(info)\r\n\r\n except:\r\n continue\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\nclass Menu:\r\n\r\n def __init__(self):\r\n self.options = dict()\r\n self.valid_selection = False\r\n self.selection = 0\r\n self.text_input = ''\r\n self.keep_going = True\r\n self.ok = False\r\n self.when = datetime.now()\r\n self.display_output_list = []\r\n self.file_name = self.when.strftime(\"roderick_devalcourt_ms548_build_a_chatbot_log_%m_%d_%Y.txt\")\r\n self.url = ''\r\n\r\n def __del__(self):\r\n if not self.ok:\r\n self.append_to_file()\r\n\r\n def __str__(self):\r\n result = ''\r\n for key in self.options:\r\n if key == 0:\r\n result += '{}\\n'.format(self.options[0])\r\n else:\r\n result += '{}. {}\\n'.format(key, self.options[key])\r\n return result\r\n\r\n def print_log(self, text_info):\r\n dt_string = self.when.strftime(\"%m/%d/%Y %I:%M:%S %p\")\r\n formatted_output = '{}\\n{}\\n-------------\\n'.format(dt_string, text_info)\r\n print(formatted_output)\r\n self.save_print_out(formatted_output)\r\n\r\n def save_print_out(self, text_info):\r\n self.display_output_list.append(text_info)\r\n\r\n def append_to_file(self):\r\n try:\r\n where_at = 0\r\n how_many = len(self.display_output_list)\r\n if how_many > 0:\r\n output_file = open(self.file_name, 'a')\r\n while where_at < how_many:\r\n output = self.display_output_list[where_at]\r\n output_file.write(output)\r\n where_at += 1\r\n output_file.close()\r\n if not self.ok:\r\n self.ok = True\r\n except IOError:\r\n print('Error writing to file {}'.format(self.file_name))\r\n\r\n def set_option(self, opt, title):\r\n if opt not in self.options:\r\n self.options[opt] = title\r\n else:\r\n if opt in self.options:\r\n print('{} {}\\n'.format(self.options[opt], 'already exists in menu'))\r\n\r\n def set_selection(self, sel):\r\n self.selection = sel\r\n\r\n def get_selection(self):\r\n sel = self.selection\r\n return sel\r\n\r\n def process(self):\r\n print('Menu.process() -- override it!')\r\n\r\n\r\nclass Main(Menu):\r\n\r\n def __init__(self):\r\n Menu.__init__(self)\r\n\r\n self.set_option(0, 'Main Menu')\r\n self.set_option(1, 'Provide Context (google)')\r\n self.set_option(2, 'Provide Context (wikipedia)')\r\n self.set_option(3, 'Save Json Context')\r\n self.set_option(4, 'Train NLTK/NLP')\r\n self.set_option(5, 'Chat')\r\n self.set_option(6, 'Quit')\r\n self.output = 'c:\\\\pythonProject1\\\\data\\\\result.txt'\r\n self.output_json = 'c:\\\\pythonProject1\\\\data\\\\result.json'\r\n self.sentence_list = []\r\n self.article_words = []\r\n self.url = ''\r\n self.result = []\r\n\r\n def process(self):\r\n while self.keep_going:\r\n self.print_log(self)\r\n opt = get_input('Please Choose: ')\r\n\r\n inputted_information = 'Please Choose: {}'.format(opt)\r\n self.print_log(inputted_information)\r\n\r\n self.set_selection(opt)\r\n self.process_selection()\r\n\r\n def process_selection(self):\r\n\r\n sel = self.get_selection()\r\n\r\n if sel == 0:\r\n self.valid_selection = False\r\n self.print_log('input a valid selection!')\r\n self.keep_going = True\r\n\r\n if sel in self.options:\r\n self.valid_selection = True\r\n else:\r\n self.valid_selection = False\r\n self.print_log('input a valid selection!')\r\n self.keep_going = True\r\n\r\n # 1. Provide Context (google)\r\n # 2. Provide Context (wikipedia)\r\n # 3. Save Json Context\r\n # 4. Train NLTK/NLP\r\n # 5. Chat\r\n # 6. Quit\r\n\r\n if self.valid_selection:\r\n if sel == 1:\r\n # 1. Provide Context (google)\r\n\r\n obj = get_new_blob('question/input >>')\r\n self.text_input = obj.__str__()\r\n\r\n if len(self.text_input) > 0:\r\n self.print_log(self.text_input)\r\n\r\n q = queryUrl()\r\n q.chatbot_query_google(self.text_input)\r\n\r\n of = open(self.output, \"a\", encoding='utf8')\r\n for i in q.list_information:\r\n self.result.append(i)\r\n of.write(i.__str__())\r\n\r\n of.close()\r\n\r\n if sel == 2:\r\n # 2. Provide Context (wikipedia)\r\n\r\n obj = get_new_blob('question/input >>')\r\n self.text_input = obj.__str__()\r\n\r\n if len(self.text_input) > 0:\r\n self.print_log(self.text_input)\r\n\r\n q = queryUrl()\r\n\r\n q.chatbot_query_wikipedia(self.text_input)\r\n\r\n of = open(self.output, \"a\", encoding='utf8')\r\n for i in q.list_information:\r\n self.result.append(i)\r\n of.write(i.__str__())\r\n\r\n of.close()\r\n\r\n if sel == 3:\r\n # 3. Save Json Context\r\n\r\n try:\r\n how_many = len(self.result)\r\n count = 0\r\n of = open(self.output_json, \"a\", encoding='utf8')\r\n of.write('{\\\"intents\\\": [\\n')\r\n while count < how_many:\r\n\r\n # for i in self.result:\r\n\r\n i = self.result[count]\r\n\r\n # jsonStr = json.dumps(i.__dict__)\r\n # of.write(jsonStr)\r\n\r\n cttj = ConvertTextToJson()\r\n\r\n jsonString = cttj.go(i.text_input)\r\n\r\n of.write(' {\\\"tag\\\": \\\"query\\\",\\n')\r\n of.write(' \\\"patterns\\\": ')\r\n of.write(jsonString)\r\n of.write(',\\n')\r\n of.write(' \\\"responses\\\": [\\\"')\r\n of.write(i.result)\r\n of.write('\\\"],\\n')\r\n of.write(' \\\"context\\\": [\\\"')\r\n of.write(i.url)\r\n of.write('\\\"]\\n')\r\n if count < how_many - 1:\r\n of.write(' },\\n')\r\n else:\r\n of.write(' }]\\n')\r\n\r\n count += 1\r\n\r\n of.write('}\\n')\r\n of.close()\r\n\r\n # {\"intents\": [\r\n # {\"tag\": \"query\",\r\n # \"patterns\": [\"sentences go here\"],\r\n # \"responses\": [\"response goes here\"],\r\n # \"context\": [\"\"]\r\n # },\r\n # {\"tag\": \"query\",\r\n # \"patterns\": [\"sentences go here\"],\r\n # \"responses\": [\"response goes here\"],\r\n # \"context\": [\"\"]\r\n # }\r\n # }\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n if sel == 4:\r\n # 4. Train NLTK/NLP\r\n\r\n # file_name = 'intents.json'\r\n # file_name = 'results.json'\r\n # self.output_json = 'c:\\\\pythonProject1\\\\data\\\\result.json'\r\n\r\n file_name = 'c:\\\\pythonProject1\\\\data\\\\result.json'\r\n\r\n tcb = TrainChatBot()\r\n tcb.go(file_name)\r\n\r\n if sel == 5:\r\n # 5. Chat\r\n\r\n cbw = ChatBotWindow()\r\n cbw.go()\r\n\r\n if sel == 6:\r\n # 6. Quit\r\n self.keep_going = False\r\n\r\n\r\ndef run():\r\n m = Main()\r\n m.process()\r\n\r\n\r\nrun()\r\n","sub_path":"ms548_roderick_devalcourt_build_a_chatbot.py","file_name":"ms548_roderick_devalcourt_build_a_chatbot.py","file_ext":"py","file_size_in_byte":13508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"638862343","text":"import pdb\nimport time\nimport numpy as np\nimport cv2\nimport glob\n\nimport cv2.aruco as aruco\n\ncap = cv2.VideoCapture(0)\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\ndef read_node_matrix( reader, name ):\n node = reader.getNode( name )\n return node.mat()\n\ndef draw_board_axis( board, board_corners, board_ids, frame, camera_matrix, dist_coeffs, axis_length ):\n corners = np.asarray(board_corners)\n ids = np.asarray(board_ids)\n retval,rvec,tvec = aruco.estimatePoseBoard(corners, ids, board, camera_matrix, dist_coeffs)\n frame = aruco.drawAxis( frame, camera_matrix, dist_coeffs, rvec, tvec, axis_length_inches)\n axis_origin = np.asarray( [[0,0,0]],dtype=np.float )\n imgpts, jac = cv2.projectPoints( axis_origin, rvec, tvec, camera_matrix, dist_coeffs )\n text_base = (int(imgpts[0][0][0]),int(imgpts[0][0][1])-100)\n #print(text_base)\n board_range = np.linalg.norm(tvec)\n board_range_str = \"Range: {:.2f} meters\".format( board_range )\n cv2.putText(frame, board_range_str, text_base, font, 1, (255,255,255),2,cv2.LINE_AA)\n return board_range\n\n# LOAD DICTIONARY\naruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\n\n# length from the generated markers \n# TODO maker a configuration file\naxis_length_inches = 0.0508\nhexagon_corners = [\nnp.array([[-0.023,0.048,0.044], [0.023,0.048,0.044], [0.023,0.003,0.044], [-0.023,0.003,0.044]] ,dtype=np.float32),\nnp.array([[0.027,0.048,0.042], [0.050,0.048,0.002], [0.050,0.003,0.002], [0.027,0.003,0.042]] ,dtype=np.float32),\nnp.array([[0.050,0.048,-0.002], [0.027,0.048,-0.042], [0.028,0.003,-0.042], [0.050,0.003,-0.002]] ,dtype=np.float32),\nnp.array([[0.023,0.048,-0.044], [-0.023,0.048,-0.044],[-0.023,0.003,-0.044],[0.023,0.003,-0.044]] ,dtype=np.float32),\nnp.array([[-0.027,0.048,-0.042],[-0.050,0.048,-0.002],[-0.050,0.003,-0.002],[-0.027,0.003,-0.042]],dtype=np.float32),\nnp.array([[-0.049,0.048,0.002], [-0.027,0.048,0.042], [-0.028,0.003,0.042], [-0.049,0.003,0.002]] ,dtype=np.float32)]\n\nport_board_ids = np.array( [[21],[22],[23],[24],[25],[26]], dtype=np.int32)\nport_board = aruco.Board_create( hexagon_corners, \n aruco.getPredefinedDictionary(aruco.DICT_6X6_250),\n port_board_ids )\n\nstar_board_ids = np.array( [[27],[28],[29],[30],[31],[32]], dtype=np.int32)\nstar_board = aruco.Board_create( hexagon_corners, \n aruco.getPredefinedDictionary(aruco.DICT_6X6_250),\n star_board_ids )\n\n# read the cameraParameters.xml file generated by\n# opencv_interactive-calibration\ncamera_reader = cv2.FileStorage()\ncamera_reader.open(\"cameraParameters.xml\",cv2.FileStorage_READ)\n\n# camera configurations\ncamera_matrix = read_node_matrix( camera_reader, \"cameraMatrix\" )\ndist_coeffs = read_node_matrix( camera_reader, \"dist_coeffs\" )\n\nwhile(True):\n time.sleep( 0.1 )\n # Read frame from Camera\n # convert frame to grayscale\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # identify markers and \n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict)\n frame = aruco.drawDetectedMarkers(frame, corners, ids)\n\n detected_port_ids = list()\n detected_port_corners = list()\n\n detected_starboard_ids = list()\n detected_starboard_corners = list()\n\n #print(\"ids = \", ids)\n #print(\"corners = \", corners)\n\n if( ids is not None ):\n for i in range(len(ids)):\n detected_id = ids[i]\n detected_corner = corners[i]\n #print( detected_id, detected_corner )\n if( detected_id in port_board_ids ):\n detected_port_ids.append( detected_id )\n detected_port_corners.append( detected_corner )\n elif( detected_id in star_board_ids ):\n detected_starboard_ids.append(detected_id)\n detected_starboard_corners.append(detected_corner)\n\n port_range_m = -1\n starboard_range_m = -1\n if( detected_port_ids ):\n port_range_m = draw_board_axis( port_board, detected_port_corners, detected_port_ids,\n frame, camera_matrix, dist_coeffs, axis_length_inches )\n if( detected_starboard_ids ): \n starboard_range_m = draw_board_axis( star_board, detected_starboard_corners, detected_starboard_ids,\n frame, camera_matrix, dist_coeffs, axis_length_inches )\n\n print( \"range( port ) = {:.2f}, range( starboard ) = {:.2f}\".format( port_range_m, starboard_range_m ) )\n # imshow and waitKey are required for the window\n # to open on a mac.\n cv2.imshow('frame', frame)\n\n if( cv2.waitKey(1) & 0xFF == ord('q') ):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n","sub_path":"multiple-tagboards/aruco_boards_pose_test.py","file_name":"aruco_boards_pose_test.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"542947209","text":"# Authon :wang\nimport requests\nimport unittest\nimport time\nfrom SamplesInterface.common.logger import Log\nfrom SamplesInterface.config.getCookies import CookiesUtil\n\ncookies = CookiesUtil()\nCookie = CookiesUtil.cookies\nqa_ip = CookiesUtil.qa_ip\nqa_port = CookiesUtil.qa_port\nqa_http = CookiesUtil.qa_http\n\n\nclass Test(unittest.TestCase):\n log = Log()\n\n def setUp(self):\n self.headers = {\n \"Accept\": \"image/webp,image/apng,image/*,*/*;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cookie\": Cookie\n }\n\n def tearDown(self):\n # print(self.result)\n pass\n\n def test_1_v1_home_alarmVehicles(self):\n ''' 首页-告警车辆定位 '''\n self.log.info(\"------ 首页-告警车辆定位统计:start!---------\")\n self.v1_home_alarmVehicles_url = '' + qa_http + '://' + qa_ip + ':' + qa_port + '/v1/home/alarmVehicles'\n self.log.info(\"------ 首页-告警车辆定位统计:url:%s\" % self.v1_home_alarmVehicles_url)\n r = requests.get(self.v1_home_alarmVehicles_url, headers=self.headers, verify=False)\n self.result = r.json()\n self.log.info(\"------ 首页-告警车辆定位统计:响应结果:%s\" % self.result)\n self.status_code = r.status_code\n self.assertEqual(self.status_code, 200)\n self.assertEqual(self.result['success'], True)\n self.assertTrue(self.result['success'])\n print(self.result)\n self.log.info(\"------ 首页-告警车辆定位统计:end!---------\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"SamplesInterface/case/case01_home/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"262258032","text":"import traceback\n\nfrom .interceptors import entity_dead_exception\nfrom .scroll import EntityDeadException\nfrom app.log import root\n\n\ninterceptors = {\n EntityDeadException: [entity_dead_exception],\n}\n\n\ndef sort_out_exception(handlers, instance):\n try:\n return any((fn(instance) for fn in handlers))\n\n except Exception as exc:\n raise root.error(exc, exc_info=True)\n\n\ndef handle_python_exception(exc_type, exc_val, exc_tb):\n root.critical(f'{exc_type} {exc_val}')\n traceback.print_tb(exc_tb)\n\n\ndef intercept_exception(instance, exc_type, exc_val, exc_tb):\n handlers = interceptors.get(exc_type, [])\n if handlers:\n return sort_out_exception(handlers, instance)\n\n handle_python_exception(exc_type, exc_val, exc_tb)\n","sub_path":"app/entities/exceptions/references.py","file_name":"references.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"295072010","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nfrom numpy import (maximum as max_, logical_not as not_, absolute as abs_, minimum as min_, select)\n\nfrom openfisca_france.model.base import * # noqa analysis:ignore\n\n# Paris forfait familles\nclass paris_forfait_famille_elig(Variable):\n column = BoolCol\n label = u\"Eligibilité à Paris Forfait Famille\"\n entity = Famille\n definition_period = MONTH\n\n def function(famille, period):\n parisien = famille('parisien', period)\n statut_occupation_logement = famille.demandeur.menage('statut_occupation_logement', period)\n charge_logement = (\n (statut_occupation_logement == 1) +\n (statut_occupation_logement == 2) +\n (statut_occupation_logement == 3) +\n (statut_occupation_logement == 4) +\n (statut_occupation_logement == 5) +\n (statut_occupation_logement == 7)\n )\n\n result = parisien * charge_logement\n\n return result\n\nclass paris_forfait_famille(Variable):\n column = FloatCol\n label = u\"Famille qui est eligible à l'aide paris forfait famille \"\n entity = Famille\n definition_period = MONTH\n\n def function(famille, period, legislation):\n last_month = period.last_month\n\n premier_plafond = legislation(period).paris.paris_forfait_famille.premier_plafond\n deuxieme_plafond = legislation(period).paris.paris_forfait_famille.deuxieme_plafond\n aide_1er_plafond = legislation(period).paris.paris_forfait_famille.aide_1er_plafond\n aide_2eme_plafond = legislation(period).paris.paris_forfait_famille.aide_2eme_plafond\n\n nb_enfants = famille('paris_nb_enfants', period)\n elig = famille('paris_forfait_famille_elig', period)\n ressources_mensuelles_famille = famille('paris_base_ressources_commun', last_month)\n montant_aide = select([(ressources_mensuelles_famille <= premier_plafond),\n (ressources_mensuelles_famille <= deuxieme_plafond)], [aide_1er_plafond, aide_2eme_plafond])\n result = (select([(nb_enfants >= 3), (nb_enfants < 3)], [montant_aide, 0])) * elig\n return result\n","sub_path":"openfisca_paris/paris_forfait_famille.py","file_name":"paris_forfait_famille.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"444162710","text":"from datetime import datetime\nimport hashlib\nimport io\nimport pickledb\nimport time\n\nfrom .cache import Cache\nfrom .errors import BucketNotEmpty, NoSuchBucket, NoSuchKey, HttpError\nfrom .models import Bucket, BucketQuery, S3Item\nfrom .sia import Sia\n\n\nclass SiaStore(object):\n def __init__(self, base_dir, host='localhost', port=9980, password='', cache_dir='.'):\n self.sia = Sia(host=host, port=port, password=password)\n self.base_dir = base_dir\n self.buckets = self.get_all_buckets()\n self.md5_cache = pickledb.load(f'{cache_dir}/md5-cache.db', False)\n self.file_cache = Cache(cache_dir=f'{cache_dir}/file_cache')\n\n def _pre_exit(self):\n self.md5_cache.dump()\n\n def _md5(self, bucket_name, key, retrieve_on_miss=True):\n \"\"\"Get md5 from cache, otherwise retrieve file and recalculate.\"\"\"\n md5 = self.md5_cache.get(f'{bucket_name}/{key}')\n\n if not md5 and retrieve_on_miss:\n md5 = self.get_item(bucket_name, key).md5\n\n return md5\n\n def get_all_buckets(self):\n buckets = []\n\n for directory in self.sia.list(self.base_dir)['directories']:\n\n # Skip parent directory\n if directory['siapath'] == self.base_dir:\n continue\n\n # Use modified time since created isn't available\n create_date = datetime.strptime(directory['mostrecentmodtime'][:-4], '%Y-%m-%dT%H:%M:%S.%f')\n path = directory['siapath'].lstrip(f'{self.base_dir}/')\n buckets.append(Bucket(path, create_date))\n\n return buckets\n\n def get_bucket(self, bucket_name):\n for bucket in self.buckets:\n if bucket.name == bucket_name:\n return bucket\n\n def create_bucket(self, bucket_name):\n if bucket_name not in [bucket.name for bucket in self.buckets]:\n self.sia.create_folder(f'{self.base_dir}/{bucket_name}')\n self.buckets = self.get_all_buckets()\n\n return self.get_bucket(bucket_name)\n\n def delete_bucket(self, bucket_name):\n bucket = self.get_bucket(bucket_name)\n if not bucket:\n raise NoSuchBucket()\n\n try:\n self.sia.delete_folder(f'{self.base_dir}/{bucket_name}')\n except:\n raise BucketNotEmpty()\n\n def _block_until_uploaded(self, bucket_name, item_name, timeout_seconds=60):\n uploaded = False\n attempts = 0\n key = f'{self.base_dir}/{bucket_name}/{item_name}'\n while not uploaded:\n uploaded = self.sia.get_file_status(key)['available']\n time.sleep(1)\n attempts += 1\n if attempts > timeout_seconds:\n raise Exception(\"File failed to fully upload\")\n\n def store_data(self, bucket, item_name, headers, data):\n print(f'starting store for {item_name}')\n m = hashlib.md5()\n m.update(data)\n key = f'{self.base_dir}/{bucket.name}/{item_name}'\n\n md5 = m.hexdigest()\n self.md5_cache.set(f'{bucket.name}/{item_name}', md5)\n\n self.sia.upload_file(key, data)\n self.file_cache.put(md5, data)\n return S3Item(item_name, md5=md5)\n\n def store_item(self, bucket, item_name, handler):\n size = int(handler.headers['content-length'])\n data = handler.rfile.read(size)\n return self.store_data(bucket, item_name, {}, data)\n\n def get_item(self, bucket_name, item_name, content=True):\n key = f'{bucket_name}/{item_name}'\n data = b''\n try:\n details = self.sia.get_file_status(f'{self.base_dir}/{key}')\n if content:\n md5 = self._md5(bucket_name, item_name, retrieve_on_miss=False)\n if md5:\n data = self.file_cache.get(md5)\n if not data:\n data = self.sia.get_file(f'{self.base_dir}/{key}')\n except HttpError as e:\n if e.status_code == 400:\n raise NoSuchKey()\n raise\n\n if not details['available']:\n raise NoSuchKey()\n\n m = hashlib.md5()\n m.update(data)\n md5 = m.hexdigest()\n self.md5_cache.set(key, md5)\n\n item = S3Item(\n key,\n md5=md5,\n size=details['filesize'],\n modified_date=details['modtime'].rsplit('.')[0] + '.000Z',\n # Make up a content_type - this may break some clients\n content_type='unknown',\n )\n item.io = io.BytesIO(data)\n self.file_cache.put(md5, data)\n\n return item\n\n def delete_item(self, bucket_name, item_name):\n # s3 doesn't differentiate between files and folders, but sia does. If\n # file deletion fails, assume it was a folder, and delete that. Side\n # note: If you create files and folders with the same name within Sia,\n # this can cause weird situations in s3.\n path = f'{self.base_dir}/{bucket_name}/{item_name}'\n try:\n self.sia.delete_file(path)\n self.md5_cache.rem(f'{bucket_name}/{item_name}')\n except Exception:\n self.sia.delete_folder(path)\n\n def get_all_keys(self, bucket, **kwargs):\n max_keys = int(kwargs['max_keys'])\n prefix = kwargs.get('prefix')\n delimiter = kwargs.get('delimiter', '')\n if delimiter not in set(['/', '']):\n raise Exception('Delimiter only supports / or `` currently')\n\n is_truncated = False\n matches = []\n common_prefixes = []\n directories_to_walk = [f'{prefix}']\n walked_directories = set(directories_to_walk)\n \n while len(directories_to_walk) > 0:\n path = directories_to_walk.pop(-1)\n\n try:\n results = self.sia.list(f'{self.base_dir}/{bucket.name}/{path}')\n except Exception:\n raise NoSuchKey()\n\n for file_details in results['files']:\n key = file_details['siapath'].lstrip(f'{self.base_dir}/{bucket.name}')\n\n matches.append(S3Item(\n key,\n md5=self._md5(bucket.name, key),\n modified_date=file_details['modtime'][:-10] + '000Z',\n size=file_details['filesize'],\n ))\n\n for dir_details in results['directories']:\n directory = dir_details['siapath']\n path = directory.lstrip(f'{self.base_dir}/{bucket.name}') + '/'\n\n if path in walked_directories or path == '/':\n continue\n\n # Check for common prefixes\n if delimiter == '/':\n common_prefixes.append(path)\n elif path not in walked_directories:\n directories_to_walk.append(path)\n walked_directories.add(path)\n\n if len(matches) >= max_keys:\n is_truncated = True\n break\n\n return BucketQuery(bucket, matches, is_truncated, common_prefixes, **kwargs)\n","sub_path":"s3_proxy/sia_store.py","file_name":"sia_store.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"362586267","text":"import datetime\nfrom logger_tpl_diff.util import run\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if request.form.get('time') == datetime.datetime.now().strftime('%Y-%m-%d'):\n r = run(request.form.get('arg'))\n else:\n r = run(request.form.get('arg'), request.form.get('time'))\n return render_template('res.html', data=sorted(r, key=lambda x: x['time'], reverse=True))\n current_time = datetime.datetime.now()\n min_time = current_time - datetime.timedelta(days=7)\n return render_template('index.html', current_time=current_time.strftime('%Y-%m-%d'),\n max_time=current_time.strftime('%Y-%m-%d'), min_time=min_time.strftime('%Y-%m-%d'))\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 9999)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"158697361","text":"'''\r\nCreated on 2012-6-1\r\n\r\n@author: Sky\r\n'''\r\nfrom Entities.Entity import Entity, HasRoom, HasRegion, HasTemplateId\r\nfrom Entities.DataEntity import DataEntity\r\nfrom Entities.LogicEntity import LogicEntity\r\nfrom Entities.Attributes import Databank\r\n\r\nclass ItemTemplate(Entity, DataEntity):\r\n def __init__(self):\r\n Entity.__init__(self)\r\n DataEntity.__init__(self)\r\n self.m_isquantity = False\r\n self.m_quantity = 1\r\n self.m_logics = []\r\n self.m_room = \"0\"\r\n self.m_region = \"0\"\r\n \r\n def IsQuantity(self):\r\n return self.m_isquantity\r\n \r\n def GetQuantity(self):\r\n return self.m_quantity\r\n \r\n def Load(self, sr, prefix):\r\n self.m_name = sr.get(prefix + \":NAME\")\r\n self.m_description = sr.get(prefix + \":DESCRIPTION\")\r\n self.m_isquantity = sr.get(prefix + \":ISQUANTITY\")\r\n if self.m_isquantity == \"False\":\r\n self.m_isquantity = False\r\n else:\r\n self.m_isquantity = True\r\n self.m_quantity = int(sr.get(prefix + \":QUANTITY\"))\r\n \r\n self.m_attributes.Load(sr, prefix)\r\n \r\n logics = sr.get(prefix + \":LOGICS\").split(\" \")\r\n self.m_logics = []\r\n for i in logics:\r\n self.m_logics.append(i)\r\n \r\nclass Item(LogicEntity, DataEntity, HasRoom, HasRegion, HasTemplateId):\r\n def __init__(self):\r\n LogicEntity.__init__(self)\r\n DataEntity.__init__(self)\r\n HasRoom.__init__(self)\r\n HasRegion.__init__(self)\r\n HasTemplateId.__init__(self)\r\n \r\n self.m_isquantity = False\r\n self.m_quantity = 1\r\n \r\n def GetName(self):\r\n if self.m_isquantity:\r\n return self.m_name.replace(\"<#>\", str(self.m_quantity))\r\n else:\r\n return self.m_name \r\n \r\n def IsQuantity(self):\r\n return self.m_isquantity\r\n \r\n def GetQuantity(self):\r\n return self.m_quantity\r\n \r\n def SetQuantity(self, p_quantity):\r\n self.m_quantity = p_quantity\r\n \r\n def LoadTemplate(self, p_template):\r\n self.m_templateid = p_template.GetId()\r\n self.m_name = p_template.GetName()\r\n self.m_description = p_template.GetDescription()\r\n self.m_isquantity = p_template.m_isquantity\r\n self.m_quantity = p_template.m_quantity\r\n self.m_attributes = Databank()\r\n for i in p_template.m_attributes.m_bank.keys():\r\n self.m_attributes.Add(i, p_template.m_attributes.m_bank[i]) \r\n \r\n for i in p_template.m_logics:\r\n self.AddLogic(i)\r\n \r\n def Load(self, sr, prefix):\r\n #self.Remove()\r\n \r\n self.m_name = sr.get(prefix + \":NAME\")\r\n self.m_description = sr.get(prefix + \":DESCRIPTION\")\r\n self.m_room = sr.get(prefix + \":ROOM\")\r\n self.m_region = sr.get(prefix + \":REGION\")\r\n self.m_isquantity = sr.get(prefix + \":ISQUANTITY\")\r\n if self.m_isquantity == \"False\":\r\n self.m_isquantity = False\r\n else:\r\n self.m_isquantity = True\r\n self.m_quantity = int(sr.get(prefix + \":QUANTITY\"))\r\n \r\n self.m_templateid = sr.get(prefix + \":TEMPLATEID\")\r\n \r\n self.m_attributes.Load(sr, prefix)\r\n \r\n self.m_logic.Load(sr, prefix, self.m_id)\r\n \r\n #self.Add()\r\n \r\n def Save(self, sr, prefix):\r\n sr.set(prefix + \":NAME\", self.m_name)\r\n sr.set(prefix + \":DESCRIPTION\", self.m_description)\r\n sr.set(prefix + \":ROOM\", self.m_room)\r\n sr.set(prefix + \":REGION\", self.m_region)\r\n sr.set(prefix + \":ISQUANTITY\", str(self.m_isquantity))\r\n sr.set(prefix + \":QUANTITY\", str(self.m_quantity))\r\n sr.set(prefix + \":TEMPLATEID\", self.m_templateid)\r\n \r\n self.m_attributes.Save(sr, prefix)\r\n \r\n self.m_logic.Save(sr, prefix)\r\n \r\n def Add(self, character, region, room):\r\n if self.m_region == \"0\":\r\n # when regions are 0, that means the item is on a Item.character\r\n c = character(self.m_room)\r\n c.AddItem(self.m_id)\r\n else:\r\n reg = region(self.m_region)\r\n reg.AddItem(self.m_id)\r\n \r\n r = room(self.m_room)\r\n r.AddItem(self.m_id)\r\n \r\n def Remove(self, character, region, room):\r\n if self.m_room == \"0\":\r\n return\r\n \r\n # when regions are 0, that means the item is on a Item.character\r\n if self.m_region == \"0\":\r\n c = character(self.m_room)\r\n c.DelItem(self.m_id)\r\n else:\r\n reg = region(self.m_region)\r\n reg.DelItem(self.m_id)\r\n \r\n r = room(self.m_room)\r\n r.DelItem(self.m_id)\r\n","sub_path":"src/Entities/Item.py","file_name":"Item.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"476620952","text":"import os\nimport itertools\nfrom jinja2 import Template\nfrom SyntaxAnalyzerHLSL import SyntaxNode\nfrom SyntaxAnalyzerHLSL import SyntaxAnalyzer\nfrom itertools import groupby\nfrom SyntaxAnalyzerHLSL import complete_system_types\n\nsrvs_types = [\n\t'Buffer',\t# Buffer\n\t'Texture1D',\n\t'Texture1DArray',\n\t'Texture2D',\n\t'Texture2DArray',\n\t'Texture3D',\n\t'TextureCube',\n\t'TextureCubeArray',\n\n\t'Texture2DMS',\n\t'Texture2DMSArray',\n\n 'StructuredBuffer'\n\t]\n\nuavs_types = [\n\t'RWBuffer',\n\t'RWByteAddressBuffer',\n\t'RWStructuredBuffer',\n 'AppendStructuredBuffer',\n\t'RWTexture1D',\n\t'RWTexture1DArray',\n\t'RWTexture2D',\n\t'RWTexture2DArray',\n\t'RWTexture3D'\n\t]\n\ndef class_name_from_filename(filename):\n\tbasename = os.path.basename(filename)\t\t# SomeShader.vs.hlsl\n\t(root, ext) = os.path.splitext(basename)\t# root = SomeShader.vs, ext = .hlsl\n\t(sroot, sext) = os.path.splitext(root)\t\t# sroot = SomeShader, sext = .vs\n\treturn sroot+sext[1:].upper()\t\t\t\t# SomeShaderVS\n\ndef pipeline_name_from_filename(filename):\n\tbasename = os.path.basename(filename)\t\t# SomeShader.vs.hlsl\n\t(root, ext) = os.path.splitext(basename)\t# root = SomeShader.vs, ext = .hlsl\n\t(sroot, sext) = os.path.splitext(root)\t\t# sroot = SomeShader, sext = .vs\n\treturn sroot\t\t\t\t\t\t\t\t# SomeShaderVS\n\ndef parsePermutations(filename):\n\tpermutations = {'enums': [], 'options': []}\n\twith open(filename, 'r') as file:\n\t\tdata = file.readlines()\n\t\tfor line in data:\n\t\t\tif str(line).startswith('#if') and str(line).find('ENUM_') != -1:\n\t\t\t\ts = str(str(line).rstrip()).split(' ')\n\t\t\t\tfor part in s:\n\t\t\t\t\tif str(part).startswith('ENUM'):\n\t\t\t\t\t\tenum_parts = str(part).split('_')\n\t\t\t\t\t\tenum_typename = str(enum_parts[1]).lower().title()\n\t\t\t\t\t\tenum_value = str(enum_parts[2]).lower().title()\n\t\t\t\t\t\tfor value_part in enum_parts[3:]:\n\t\t\t\t\t\t\tenum_value += str(value_part).lower().title()\n\t\t\t\t\t\t#enum_value = str(str.join(enum_parts[2:], \" \").title()).split(' ').join()\n\t\t\t\t\t\t#enum_value = str(enum_value[0]).upper() + str(enum_value[1:])\n\t\t\t\t\t\tfound = False\n\t\t\t\t\t\tfor x in range(len(permutations['enums'])):\n\t\t\t\t\t\t\tif enum_typename in permutations['enums'][x]:\n\t\t\t\t\t\t\t\tenum_value_found = False\n\t\t\t\t\t\t\t\tfor key, value in permutations['enums'][x].iteritems():\n\t\t\t\t\t\t\t\t\tfor evalue in value:\n\t\t\t\t\t\t\t\t\t\tif evalue['value'] == enum_value:\n\t\t\t\t\t\t\t\t\t\t\tenum_value_found = True\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\tif not enum_value_found:\n\t\t\t\t\t\t\t\t\tpermutations['enums'][x][enum_typename].append({'value': enum_value, 'flag': str(part)})\n\t\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif not found:\n\t\t\t\t\t\t\tpermutations['enums'].append({enum_typename : [{'value': enum_value, 'flag': str(part)}]})\n\t\t\tif str(line).startswith('#if') and str(line).find('OPTION_') != -1:\n\t\t\t\ts = str(str(line).rstrip()).split(' ')\n\t\t\t\tfor part in s:\n\t\t\t\t\tif str(part).startswith('OPTION'):\n\t\t\t\t\t\toption_parts = str(part).split('_')\n\t\t\t\t\t\toption_value = str(option_parts[1]).lower()\n\t\t\t\t\t\tfor value_part in option_parts[2:]:\n\t\t\t\t\t\t\toption_value += str(value_part).lower().title()\n\n\t\t\t\t\t\tfound = False\n\t\t\t\t\t\tfor option_value_test in permutations['options']:\n\t\t\t\t\t\t\tfor okey, ovalue in option_value_test.iteritems():\n\t\t\t\t\t\t\t\tif okey == 'value' and ovalue == option_value:\n\t\t\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tif found:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif not found:\n\t\t\t\t\t\t\tif option_value not in permutations['options']:\n\t\t\t\t\t\t\t\tpermutations['options'].append({'value': option_value, 'flag': str(part)})\n\treturn permutations\n\ndef optionPermute(options):\n\tresult = []\n\tcount = 1 << len(options)\n\tcurrentValue = 0\n\tfor i in range(count):\n\t\tres = []\n\t\tfor a in range(len(options)):\n\t\t\tif currentValue & (1 << a) == (1 << a):\n\t\t\t\tres.append({'type': 'option', 'variable_name': options[a]['value'], 'value': 'true', 'flag': options[a]['flag']})\n\t\t\telse:\n\t\t\t\tres.append({'type': 'option', 'variable_name': options[a]['value'], 'value': 'false', 'flag': options[a]['flag']})\n\t\tresult.append(res)\n\t\tcurrentValue += 1\n\treturn result\n\ndef enumPermute(enums):\n\tlists = []\n\tfor dic in enums:\n\t\ttemp = []\n\t\tfor key, value in dic.iteritems():\n\t\t\tfor enum_value in value:\n\t\t\t\ttemp.append({'type': 'enum', 'variable_name': str(key).lower(), 'value': str(key).title()+'::'+str(enum_value['value']), 'flag': enum_value['flag']})\n\t\tlists.append(temp)\n\treturn list(itertools.product(*lists))\n\ndef permute(permutations):\n\tresult = []\n\n\top = optionPermute(permutations['options'])\n\ten = enumPermute(permutations['enums'])\n\t\n\tpermutation_id = 0\n\tfor o in op:\n\t\tfor e in en:\n\t\t\ttempdict = {}\n\t\t\ttemp = []\n\t\t\ttemp.extend(o)\n\t\t\ttemp.extend(e)\n\t\t\tif len(temp) > 0:\n\t\t\t\ttempdict['list'] = temp\n\t\t\t\ttempdict['id'] = '%03d' % permutation_id\n\n\t\t\t\tflags = []\n\t\t\t\tfor perm in tempdict['list']:\n\t\t\t\t\tif perm['type'] == 'option' and perm['value'] == 'true':\n\t\t\t\t\t\tflags.append(perm['flag'])\n\t\t\t\t\tif perm['type'] == 'enum':\n\t\t\t\t\t\tflags.append(perm['flag'])\n\n\t\t\t\ttempdict['defines'] = flags\n\t\t\t\tresult.append(tempdict)\n\t\t\t\tpermutation_id += 1\n\treturn result\n\nclass CodeGenerator:\n\tdef __init__(self, root_level_definition_tokens, template, output, stage, binary_path, support_path, src_path):\n\t\tself.stage = stage\n\t\tself.binary_path = binary_path\n\t\tself.support_path = support_path\n\t\tfilename, file_extension = os.path.splitext(os.path.basename(output))\n\t\tself.interface_path = filename + '.h'\n\t\tpermutations = parsePermutations(src_path)\n\t\tp = permute(permutations)\n\n\t\t# permutations['enums'] structure is a list of dictionaries\n\t\t# permutations['enums'] = [{'enum_typename', ['enum_value1', 'enum_value2']}, \n\t\t#\t\t\t\t\t\t\t{'enum_typename2', ['enum_value1', 'enum_value2']}]\n\t\t# permutations['enums'][enum number]['Mode'][enum value number] == 'enum_value1'\n\t\t#for perm in permutations['enums']:\n\t\t#\tfor enum_typename, enum_values in perm.iteritems():\n\t\t#\t\tfor enum_value in enum_values:\n\t\t#\t\t\tprint str(enum_typename) + str(enum_value)\n\t\t# permutations['options'] is just a list of boolean type variable names\n\n\t\ttemplate_data = self.produce_template_data(root_level_definition_tokens, output)\n\t\ttemplate_data['permutations'] = p\n\t\ttemplate_data['enums'] = permutations['enums']\n\t\ttemplate_data['options'] = permutations['options']\n\n\t\twith open(template, 'r') as file:\n\t\t\ttemplate_file = Template(file.read())\n\t\t\twith open(output, 'w') as output_file:\n\t\t\t\toutput_file.write(template_file.render(template_data))\n\t\t\t\n\tdef type_is_srv(self, type):\n\t\tparts = type.split('<')\n\t\treturn parts[0] in srvs_types\n\n\tdef type_is_uav(self, type):\n\t\tparts = type.split('<')\n\t\treturn parts[0] in uavs_types\n\n\tdef get_cpp_type(self, type):\n\t\tif self.type_is_srv(type):\n\t\t\tif 'Buffer' in type:\n\t\t\t\tif 'Bindless' in type:\n\t\t\t\t\treturn 'BufferBindlessSRV'\n\t\t\t\telse:\n\t\t\t\t\treturn 'BufferSRV'\n\t\t\telif 'Texture' in type:\n\t\t\t\tif 'Bindless' in type:\n\t\t\t\t\treturn 'TextureBindlessSRV'\n\t\t\t\telse:\n\t\t\t\t\treturn 'TextureSRV'\n\t\tif self.type_is_uav(type):\n\t\t\tif 'Buffer' in type:\n\t\t\t\tif 'Bindless' in type:\n\t\t\t\t\treturn 'BufferBindlessUAV'\n\t\t\t\telse:\n\t\t\t\t\treturn 'BufferUAV'\n\t\t\telif 'Texture' in type:\n\t\t\t\tif 'Bindless' in type:\n\t\t\t\t\treturn 'TextureBindlessUAV'\n\t\t\t\telse:\n\t\t\t\t\treturn 'TextureUAV'\n\t\tif type in complete_system_types:\n\t\t\treturn type[:1].upper()+type[1:]\n\n\tdef produce_template_data(self, root_level_tokens, output):\n\t\tclass_name = class_name_from_filename(output)\n\t\tpipeline_configuration_class = pipeline_name_from_filename(output)\n\n\t\tbinpath = self.binary_path.replace('\\\\', '/')\n\t\tbase_directory = os.path.dirname(os.path.normpath(binpath))\n\t\tbase_filename_ext = os.path.basename(os.path.normpath(binpath))\n\t\tbase_filename, base_file_extension = os.path.splitext(base_filename_ext)\n\t\tbase_directory_and_file = os.path.join(base_directory, base_filename)\n\n\t\tresult = {\n\t\t\t'has_constants' : False,\n\t\t\t'has_texture_srvs' : False,\n\t\t\t'has_texture_uavs' : False,\n\t\t\t'has_bindless_texture_srvs' : False,\n\t\t\t'has_bindless_texture_uavs' : False,\n\t\t\t'has_buffer_srvs' : False,\n\t\t\t'has_buffer_uavs' : False,\n\t\t\t'has_bindless_buffer_srvs' : False,\n\t\t\t'has_bindless_buffer_uavs' : False,\n\t\t\t'has_samplers' : False,\n\t\t\t'ShaderClass' : class_name,\n\t\t\t'class_type' : self.stage + 'Shader',\n\t\t\t'ShaderLoadInterfaceHeader' : self.interface_path,\n\t\t\t'shader_pipeline_configuration_class' : pipeline_configuration_class,\n\t\t\t'ShaderBinaryPath': self.binary_path.replace('\\\\', '/'),\n\n\t\t\t'BaseExt': base_file_extension,\n\t\t\t'BasePathAndFile': base_directory_and_file.replace('\\\\', '/'),\n\n\t\t\t'ShaderSupportPath': self.support_path.replace('\\\\', '/'),\n\t\t\t'constant_structures' : [],\n\t\t\t'texture_srvs' : [],\n\t\t\t'texture_uavs' : [],\n\t\t\t'bindless_texture_srvs' : [],\n\t\t\t'bindless_texture_uavs' : [],\n\t\t\t'buffer_srvs' : [],\n\t\t\t'buffer_uavs' : [],\n\t\t\t'bindless_buffer_srvs' : [],\n\t\t\t'bindless_buffer_uavs' : [],\n\t\t\t'samplers' : [],\n\t\t\t'descriptor_count' : 0,\n\n\t\t\t'srvs' : [],\n\t\t\t'uavs' : [],\n\t\t\t'dimensions' : [],\n\n\t\t\t'input_parameters' : []\n\t\t\t}\n\t\tfor token in root_level_tokens:\n\t\t\tif token.syntax_type == 'function' and token.name == 'main':\n\t\t\t\tfor child in token.childs:\n\t\t\t\t\tresult['input_parameters'].append({'name' : child.name, 'semantic' : child.semantic, 'type' : child.type })\n\t\t\tif token.type == 'cbuffer':\n\t\t\t\tresult['has_constants'] = True\n\t\t\t\tidentifiers = []\n\t\t\t\tfor child in token.childs:\n\t\t\t\t\tidentifiers.append({'type': self.get_cpp_type(child.type), 'name': child.name })\n\t\t\t\tresult['constant_structures'].append({\n\t\t\t\t\t'name': token.name, \n\t\t\t\t\t'identifier': token.name[:1].lower()+token.name[1:], \n\t\t\t\t\t'identifiers': identifiers })\n\t\t\t\tresult['descriptor_count'] += 1\n\t\t\tif token.type == 'sampler' or token.type == 'SamplerComparisonState':\n\t\t\t\tresult['has_samplers'] = True\n\t\t\t\tresult['samplers'].append({ 'name' : token.name, 'identifier': token.name[:1].lower()+token.name[1:] })\n\t\t\telse:\n\t\t\t\tif self.type_is_srv(token.type):\n\t\t\t\t\tif 'Buffer' in token.type:\n\t\t\t\t\t\tif 'Bindless' in token.type:\n\t\t\t\t\t\t\tresult['bindless_buffer_srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['has_bindless_buffer_srvs'] = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresult['buffer_srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['has_buffer_srvs'] = True\n\t\t\t\t\t\tresult['descriptor_count'] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif 'Bindless' in token.type:\n\t\t\t\t\t\t\tresult['bindless_texture_srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['dimensions'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name, 'dimension': token.dimension })\n\t\t\t\t\t\t\tresult['has_bindless_texture_srvs'] = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresult['texture_srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['srvs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['dimensions'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name, 'dimension': token.dimension })\n\t\t\t\t\t\t\tresult['has_texture_srvs'] = True\n\t\t\t\t\t\tresult['descriptor_count'] += 1\n\t\t\t\t\t\t\n\t\t\t\tif self.type_is_uav(token.type):\n\t\t\t\t\tif 'Buffer' in token.type:\n\t\t\t\t\t\tif 'Bindless' in token.type:\n\t\t\t\t\t\t\tresult['bindless_buffer_uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['has_bindless_buffer_uavs'] = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresult['buffer_uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['has_buffer_uavs'] = True\n\t\t\t\t\t\tresult['descriptor_count'] += 1\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tif 'Bindless' in token.type:\n\t\t\t\t\t\t\tresult['bindless_texture_uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['dimensions'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name, 'dimension': token.dimension })\n\t\t\t\t\t\t\tresult['has_bindless_texture_uavs'] = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresult['texture_uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['uavs'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name })\n\t\t\t\t\t\t\tresult['dimensions'].append({'type' : self.get_cpp_type(token.type), 'identifier': token.name, 'dimension': token.dimension })\n\t\t\t\t\t\t\tresult['has_texture_uavs'] = True\n\t\t\t\t\t\tresult['descriptor_count'] += 1\n\t\t\t\t\t\t\n\t\treturn result\n","sub_path":"darkness-engine/tools/codegen/CodeGenerator.py","file_name":"CodeGenerator.py","file_ext":"py","file_size_in_byte":12699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"146676188","text":"import math\r\n\r\nliste = []\r\nfor i in range(3, 1000000):\r\n somme = 0\r\n for nb in str(i):\r\n somme += math.factorial(int(nb))\r\n if somme == i:\r\n print(i)\r\n liste.append(i)\r\nprint(sum(liste))\r\ninput()","sub_path":"35-digit factorial.py","file_name":"35-digit factorial.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"240580380","text":"'''5.\r\nб) Дано текстові файли f1 і f2. Переписати зі збереженням порядку проходження\r\nкожен другий компонент файлу f1 в f2, а кожен другий компонент файлу f2 - в файл f1.\r\nВикористовувати допоміжний файл h.\r\n(Кудрявцев В.С)'''\r\nimport os\r\nuser=0\r\nwhile user!=4:\r\n user=int(input('1 - New Folder\\n2 - Create and/or write in txt file\\n3 - Output result\\n4 - Stop program\\nEnter number:'))\r\n if user == 1:\r\n poth=input('Enter folder path:')\r\n os.mkdir(poth)\r\n os.chdir(poth)\r\n elif user == 2:\r\n f1=open('f1.txt',\"w\")\r\n print('Created,write:')\r\n f1.write(input()) #Запись в файл ф1\r\n f1.close()\r\n elif user == 3:\r\n f2=open('f2.txt','w')\r\n f1=open('f1.txt')\r\n f1r=f1.read()\r\n if f1r=='': #Возвращает к выбору если файл пустой\r\n print('Please write in file by button 2')\r\n continue\r\n for i in range(len(f1r)):\r\n if i%2!=0:\r\n f2.write(f1r[i]) #Заполнение ф2 каждым вторым компонентом файла ф1\r\n f1.close()\r\n f2.close()\r\n f1=open('f1.txt','a') #Режим=\"а\" не стирает предыдущие записи в файле, а добавляет к нему новые\r\n f2=open('f2.txt')\r\n f2r=f2.read()\r\n for i in range(len(f2r)):\r\n if i%2!=0:\r\n f1.write(f2r[i]) #Заполнение ф1 каждым вторым компонентом файла ф2\r\n f1.close()\r\n f2.close()\r\n f1=open('f1.txt')\r\n f1r=f1.read()\r\n print('f1:\\n',f1r)\r\n f2=open('f2.txt')\r\n f2r=f2.read()\r\n print('f2:\\n',f2r)\r\n \r\n \r\n \r\n","sub_path":"5.2.py","file_name":"5.2.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"161687349","text":"def read(filename):\n \"\"\"Return contents of text file as string.\"\"\"\n with open(filename) as file:\n return file.read()\n\n\ntry:\n valid_words = read(\"dictionary.txt\").split()\n print(\"spellchecking ...\")\n while True:\n word = input(\"Enter a word (or hit return to cancel): \")\n if len(word) == 0:\n break;\n if word.lower() in valid_words:\n print(\" '\" + word + \"' looks ok\")\n else:\n print(\" '\" + word + \"' not in this dictionary\")\n print(\"spellchecking ... finished\")\nexcept OSError as err:\n print(err)\n print(\"Stopping, unable to read dictionary file.\")\n\nprint()\ninput(\"Press return to continue ...\")\n","sub_path":"COMP1753/week11/L09 Files/04Spellchecker_tryExcept.py","file_name":"04Spellchecker_tryExcept.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"114553525","text":"#! /usr/bin/env python\n# ./gfs_download.py -i 2019-10-22\n\nimport sys\nimport os\nimport urllib\nimport datetime\nimport argparse\nimport subprocess\nimport time\nimport requests\n\nfrom tqdm import tqdm\nfrom datetime import datetime, timedelta\n\nfrom db import MyDb\n\nchunk_size = 1024\n\n\ndef valid_date(value):\n try:\n return datetime.strptime(value, \"%Y-%m-%d\")\n except ValueError:\n msg = \"Fecha en formato no valido: '{0}'.\".format(value)\n raise argparse.ArgumentTypeError(msg)\n\n\ndef download_file(url, filename_out):\n try:\n sess = requests.Session()\n adapter = requests.adapters.HTTPAdapter(max_retries=5)\n sess.mount('http://', adapter)\n sess.mount('https://', adapter)\n\n r = sess.get(url=url, stream=True, timeout=120)\n\n with open(filename_out, 'wb') as f:\n f.write(r.content)\n\n except requests.exceptions.Timeout as e:\n print(e)\n except requests.exceptions.TooManyRedirects as e:\n print(e)\n except requests.exceptions.RequestException as e:\n print(e)\n\n\nepilog = \"\"\"\n\"\"\"\nparsr = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)\n\nparsr.add_argument('-i', '--dateini', required=False,\n help='Anterior time-range boundary in yyyy-mm-dd', type=valid_date)\n\nopts = parsr.parse_args()\n\nserver_url = r'https://nomads.ncep.noaa.gov/'\nbase_url = r'cgi-bin/filter_gfs_1p00.pl?'\n\ndateini = None\n\nif opts.dateini:\n dateini = datetime.strptime(str(opts.dateini), '%Y-%m-%d %H:%M:%S')\nelse:\n dateini = datetime.today()\n\nif dateini:\n\n db = MyDb()\n\n params = {\n 'file': 'gfs.t00z.pgrb2.1p00.f000',\n 'lev_10_m_above_ground': 'on',\n 'var_UGRD': 'on',\n 'var_VGRD': 'on',\n 'leftlon': '0',\n 'rightlon': '360',\n 'toplat': '90',\n 'bottomlat': '-90',\n 'dir': '/gfs.%s/00' % dateini.strftime(\"%Y%m%d\"),\n }\n\n url = server_url + base_url + urllib.urlencode(params)\n\n base_dir = '/mnt/simar-images/weather/gfs/%s/%s/%s' % (dateini.strftime(\"%Y\"),\n dateini.strftime(\"%m\"), dateini.strftime(\"%d\"))\n\n if not os.path.isdir(base_dir):\n os.makedirs(base_dir)\n\n filename_grib = os.path.join(base_dir, 'gfs.t00z.pgrb2.1p00.f000_%s00.grib2' % dateini.strftime(\"%Y%m%d\"))\n filename_json = os.path.join(base_dir, 'gfs.t00z.pgrb2.1p00.f000_%s00.json' % dateini.strftime(\"%Y%m%d\"))\n\n if not os.path.isfile(filename_grib):\n download_file(url, filename_grib)\n\n if os.path.isfile(filename_grib):\n print(\"\\033[92mPROCESSED\\033[0m: File downloaded [%s]\" % filename_grib)\n\n cmd = ['grib2json/bin/./grib2json -d -n -o', filename_json, filename_grib]\n\n if not os.path.isfile(filename_json):\n os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-7-openjdk-amd64/jre/'\n p = os.system(' '.join(cmd))\n\n if os.path.isfile(filename_json):\n print(\"\\033[92mPROCESSED\\033[0m: File processed [%s]\" % filename_json)\n\n w = db.checar_gfs('wind', dateini.strftime(\"%Y-%m-%d\"), '00')\n if not w:\n db.add_gfs(url, server_url, filename_json, '1dgr', 'wind',\n dateini.strftime(\"%Y-%m-%d\"), '00')\n print(\"\\033[92mADDED\\033[0m: Wind data added.\")\n else:\n print(\"\\033[94mINFO\\033[0m: Wind already data added [%s]\" % w['filename'])\n\n\n\n\n","sub_path":"pysimar/gfs_download.py","file_name":"gfs_download.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"26709589","text":"# -*- coding: UTF-8 -*-\r\n# @Date : 2018-01-08 23:56\r\nimport random\r\nclass MySort:\r\n #初始化\r\n def __init__(self,start,end,count):\r\n self.start = start\r\n self.end = end\r\n self.count = count\r\n def mysort(self):\r\n if type(self.start) != int or type(self.count) != int or type(self.end) != int:\r\n raise Exception(\"参数必须为int型!\")\r\n if self.start > self.end:\r\n raise Exception(\"起始数不能大于终止数!\")\r\n #生成数字列表,并排序\r\n testa = []\r\n for i in range(self.count):\r\n testa.append(random.randint(self.start,self.end))\r\n #从大到小排序列表\r\n for i in range(self.count):\r\n for j in range(i):\r\n if testa[j] < testa[j+1]:\r\n temp = testa[j]\r\n testa[j] = testa[j+1]\r\n testa[j+1] = temp\r\n return testa\r\n#使用示例\r\nif __name__ == '__main__':\r\n sorted_Data = MySort(10,1000,100).mysort()\r\n #打印排序后的结果\r\n print(sorted_Data)","sub_path":"第一期/上海-Jason/pyLearn/task01/MySort.py","file_name":"MySort.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"16619650","text":"from __future__ import annotations\n\nimport io\nfrom typing import TYPE_CHECKING, Any, Optional\n\nimport discord\nfrom discord import File, Message\nfrom discord.ext import commands\n\nif TYPE_CHECKING:\n from bot import BeattieBot\n\n\nclass BContext(commands.Context):\n \"\"\"An extension of Context to add a reply method and send long content as a file\"\"\"\n\n bot: BeattieBot\n\n async def reply(\n self,\n content: str = None,\n *,\n mention_author: bool = None,\n **kwargs: Any,\n ) -> discord.Message:\n if mention_author is None:\n mention_author = False\n return await super().reply(content, mention_author=mention_author, **kwargs)\n\n async def send(\n self,\n content: Optional[str] = None,\n *,\n file: Optional[File] = None,\n files: list[File] = None,\n **kwargs: Any,\n ) -> Message:\n if content and len(content) >= 2000:\n fp = io.BytesIO()\n fp.write(content.encode(\"utf8\"))\n fp.seek(0)\n content = None\n new_file = discord.File(fp, filename=f\"{self.message.id}.txt\")\n if files is not None:\n files.append(new_file)\n elif file is not None:\n files = [file, new_file]\n file = None\n else:\n file = new_file\n kwargs[\"file\"] = file\n kwargs[\"files\"] = files\n\n return await super().send(\n content,\n **kwargs,\n )\n","sub_path":"context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"355110928","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of couchapp released under the Apache 2 license.\n# See the NOTICE for more information.\n\n\nclass BackendVendor(object):\n \"\"\" vendor backend interface \"\"\"\n url = \"\",\n license = \"\",\n author = \"\",\n author_email = \"\",\n description = \"\"\n long_description = \"\"\n\n scheme = None\n\n def fetch(url, path, *args, **opts):\n raise NotImplementedError\n","sub_path":"couchapp/vendors/backends/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"342922498","text":"import sys\nimport math\n\ntest_input = \"\"\"5\n10 40 30 50 20\"\"\"\n\ndef main(num_len, nums):\n num_len = int(num_len)\n nums = sorted([int(i) for i in nums.split()])\n u = sum(nums) / num_len\n diffs = [(i - u)**2 for i in nums]\n s = math.sqrt(sum(diffs) / num_len)\n print(\"{0:.1f}\".format(s))\n\nif __name__ == \"__main__\":\n num_len = next(sys.stdin)\n nums = next(sys.stdin)\n main(num_len, nums)\nelse:\n # python3 -m xxx.py\n main(*test_input.split(\"\\n\"))","sub_path":"hackerrank/10daystat1c.py","file_name":"10daystat1c.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"432416083","text":"\"\"\"\n Reader\n Description:\n\"\"\"\n\nimport os, pathlib, threading, subprocess\n\nfrom subprocess import Popen, PIPE\n\nclass Reader(object):\n\n HOME = os.environ['HOME']\n PATH_BASE = str(pathlib.Path.cwd())\n \n def __init__(self):\n pass\n #self.tag = mongo.db.tag\n\n def run(self):\n\n args = [\n self.PATH_BASE + \"/app/bin/impinj.jar\",\n \"192.168.0.99\",\n ]\n\n process = subprocess.Popen(\n [\"java\", \"-jar\"] + list(args),\n encoding=\"utf-8\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n def writeall(process):\n while True:\n # print(\"read data: \")\n data = process.stdout.readline()\n if data:\n print(data)\n #self.tag.insert(data)\n process.stdout.flush()\n\n writer = threading.Thread(target=writeall, args=(process,))\n writer.start()\n\n return process\n\n def _write(self, process, message):\n process.stdin.write(message + \"\\n\")\n process.stdin.flush()","sub_path":"app/reader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"632796558","text":"# Various Import Statements\r\nimport pandas as pd\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport dash_table as dt\r\nimport plotly.graph_objs as go\r\nimport plotly.figure_factory as ff\r\nimport numpy as np\r\nimport scipy.stats as sp\r\nfrom scipy.optimize import Bounds, minimize\r\nfrom plotly.subplots import make_subplots\r\nfrom dash.dependencies import Input, Output, State\r\nimport statsmodels.api as sm\r\nimport statsmodels.stats.moment_helpers as mh\r\nfrom statsmodels.graphics.gofplots import qqplot\r\nfrom statsmodels.tsa.stattools import adfuller\r\nfrom statsmodels.distributions.empirical_distribution import ECDF\r\nfrom numpy.linalg import inv\r\nimport math\r\nfrom sklearn.linear_model import Lasso\r\nfrom sklearn.linear_model import Ridge\r\nfrom sklearn.linear_model import ElasticNet\r\nfrom sklearn.linear_model import LogisticRegressionCV\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import TimeSeriesSplit\r\nfrom sklearn.feature_selection import SelectFromModel\r\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\r\nimport cvxpy as cp\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nimport config\r\n\r\n\r\ndef get_df_columns(filename):\r\n df = pd.read_csv(filename, na_values=-99.99, index_col=0, parse_dates=[0])\r\n df.dropna(how='all', inplace=True, axis=1)\r\n df.columns = df.columns.str.strip()\r\n return df.columns\r\n\r\n\r\ndef get_df(filename, start_period=None, end_period=None, format='%Y%m', reqd_strategies=None, mode='return',\r\n to_per=False):\r\n \"\"\"\r\n\r\n :param filename:\r\n :param start_period:None if NA\r\n :param end_period:None if NA\r\n :param format:\r\n :param reqd_strategies: None if NA\r\n :param mode: return or nos or size\r\n :return:\r\n \"\"\"\r\n df = pd.read_csv(filename, index_col=0, parse_dates=[0], na_values=-99.99)\r\n if mode == 'return':\r\n df = df / 100\r\n df.dropna(how='all', inplace=True, axis=1)\r\n df.columns = df.columns.str.strip()\r\n if reqd_strategies is not None:\r\n df = df[reqd_strategies]\r\n if to_per:\r\n df.index = pd.to_datetime(df.index, format=format).to_period('M')\r\n else:\r\n df.index = pd.to_datetime(df.index, format=format)\r\n # if start_period and end_period is not None:\r\n # return df[start_period:end_period]\r\n # else:\r\n return df[start_period:end_period]\r\n\r\n\r\ndef get_ann_vol(df):\r\n ann_vol = df.std() * np.sqrt(12)\r\n return ann_vol\r\n\r\n\r\ndef get_ann_return(df, periodicity=12, expm1=True):\r\n if isinstance(df, (np.ndarray, np.generic)):\r\n ann_factor = periodicity / df.shape[0]\r\n else:\r\n ann_factor = periodicity / len(df.index)\r\n ann_ret_np = ann_factor * (np.log1p(df).sum()) # using log method for eff computation\r\n if expm1:\r\n ann_ret_np = np.expm1(ann_ret_np)\r\n else:\r\n ann_ret_np = np.exp(ann_ret_np)\r\n return ann_ret_np\r\n\r\n\r\ndef get_sharpe_ratio(ann_ret, ann_vol, rf=0.10):\r\n return (ann_ret - rf) / ann_vol\r\n\r\n\r\ndef get_semi_std(df):\r\n semi_std = df[df < 0].std(ddof=0)\r\n return semi_std\r\n\r\n\r\ndef hist_var(col_series, alpha):\r\n return np.percentile(col_series, alpha * 100)\r\n\r\n\r\ndef para_var(col_series, alpha):\r\n z = sp.norm.ppf(alpha)\r\n return col_series.mean() + z * col_series.std(ddof=0)\r\n\r\n\r\ndef corn_var(col_series, alpha):\r\n z = sp.norm.ppf(alpha)\r\n kurtosis = sp.kurtosis(col_series, fisher=True)\r\n skew = sp.skew(col_series)\r\n z = (z +\r\n (z ** 2 - 1) * skew / 6 +\r\n (z ** 3 - 3 * z) * (kurtosis - 3) / 24 -\r\n (2 * z ** 3 - 5 * z) * (skew ** 2) / 36)\r\n return col_series.mean() + z * col_series.std(ddof=0)\r\n\r\n\r\ndef get_VaR(df: pd.DataFrame, var_method, alpha):\r\n if var_method == 'historic':\r\n return df.aggregate(hist_var, alpha=alpha)\r\n elif var_method == 'parametric':\r\n return df.aggregate(para_var, alpha=alpha)\r\n elif var_method == 'cornish':\r\n return df.aggregate(corn_var, alpha=alpha)\r\n\r\n\r\ndef get_CVaR(df, VaR):\r\n CVaR = pd.Series(\r\n {df.columns[i]: df[df[df.columns[i]] < VaR[i]][df.columns[i]].mean() for i in range(len(df.columns))})\r\n return CVaR\r\n\r\n\r\ndef add_1(ddf):\r\n return ddf + 1\r\n\r\n\r\ndef drawdown(df: pd.DataFrame, retrive_index=False, init_wealth=1000, is1p=True):\r\n if retrive_index:\r\n if is1p:\r\n factor = np.exp(np.cumsum(np.log(df))) # using log instead of cumprod for efficiency\r\n else:\r\n factor = np.exp(np.cumsum(np.log1p(df))) # using log instead of cumprod for efficiency\r\n wealth_index = init_wealth * factor\r\n return wealth_index\r\n factor = np.exp(np.cumsum(np.log1p(df))) # using log instead of cumprod for efficiency\r\n wealth_index = init_wealth * factor\r\n prev_peaks = wealth_index.cummax()\r\n drawdowns = (wealth_index - prev_peaks) / prev_peaks\r\n return [wealth_index, prev_peaks, drawdowns]\r\n\r\n\r\ndef risk_info(df, risk_plot=['ann_ret'], rf=0.03, alpha=0.05, var_method='cornish', only_sharpe=False):\r\n ann_vol = get_ann_vol(df)\r\n ann_ret = get_ann_return(df)\r\n sharpe_ratio = get_sharpe_ratio(ann_ret, ann_vol, rf)\r\n if only_sharpe:\r\n return sharpe_ratio\r\n semi_std = get_semi_std(df)\r\n kurtosis = sp.kurtosis(df, fisher=True)\r\n skew = sp.skew(df)\r\n VaR = get_VaR(df, var_method, alpha)\r\n CVaR = get_CVaR(df, VaR)\r\n drawdown_df = drawdown(df)[2]\r\n drawdown_df = drawdown_df.aggregate(lambda col_series: col_series.min())\r\n info = pd.DataFrame({'ann_ret': ann_ret,\r\n 'ann_vol': ann_vol,\r\n 'sharpe_ratio': sharpe_ratio,\r\n 'semi_dev': semi_std,\r\n 'Kurtosis': kurtosis,\r\n 'Skew': skew,\r\n 'VaR': VaR,\r\n 'CVaR': CVaR,\r\n 'Drawdown': drawdown_df})\r\n return info.sort_values(by=risk_plot, ascending=True).transpose()\r\n\r\n\r\ndef terminal_risk_stats(fv, floor_factor, wealth_index, aslst=False, strategy=None):\r\n floor_value = fv * floor_factor\r\n if isinstance(wealth_index, pd.DataFrame):\r\n terminal_wealth = wealth_index.iloc[-1]\r\n else:\r\n terminal_wealth = wealth_index # The terminal row\r\n n_scenarios = terminal_wealth.shape[0]\r\n exp_wealth = np.mean(terminal_wealth)\r\n med_wealth = np.median(terminal_wealth)\r\n vol_wealth = np.std(terminal_wealth)\r\n failure_mask = np.less(terminal_wealth, floor_value)\r\n n_breaches = failure_mask.sum()\r\n p_breaches = n_breaches / n_scenarios\r\n # exp_loss_post_breach = np.mean(terminal_wealth[failure_mask]) if n_breaches > 0 else 0.0\r\n # exp_shortfall1 = floor_value - exp_loss_post_breach if n_breaches > 0 else 0.0\r\n exp_shortfall = np.dot(floor_value - terminal_wealth, failure_mask) / n_breaches if n_breaches > 0 else 0.0\r\n best_case = terminal_wealth.max()\r\n worst_case = terminal_wealth.min()\r\n if aslst:\r\n stats = [strategy, exp_wealth, vol_wealth, med_wealth, n_breaches, p_breaches, exp_shortfall]\r\n return stats\r\n else:\r\n return '''\r\n Mean: ${:.2f}\\n\r\n Median: ${:.2f}\\n\r\n Violations: {} ({:.2%})\\n\r\n Exp Shortfall: ${:.2f}\\n\r\n Diff in worst and best case scenario: {}\\n\r\n Worst Case: {}\r\n '''.format(exp_wealth, med_wealth, n_breaches, p_breaches, exp_shortfall, best_case - worst_case,\r\n worst_case)\r\n\r\n\r\ndef ren_df(df, rev_name, exis_name='index'):\r\n return df.reset_index().rename(columns={exis_name: rev_name})\r\n\r\n\r\ndef get_cov(df, periodicity=12):\r\n return df.cov()*periodicity\r\n\r\n\r\ndef get_pf_ret(wt_array, ret_array):\r\n return wt_array.T @ ret_array\r\n\r\n\r\ndef get_pf_vol(wt_array, cov_mat):\r\n return (wt_array.T @ cov_mat @ wt_array) ** 0.5\r\n\r\n\r\ndef annualize_pf_vol(pf_vol, periodicity):\r\n return pf_vol * np.sqrt(periodicity)\r\n\r\n\r\ndef format_perc(wts):\r\n return '{:.4%}'.format(wts)\r\n\r\n\r\ndef get_hover_info(n_assets, reqd_strategies, wts_list):\r\n hoverinfo = []\r\n pf_alloc_wts_str = [list(map(format_perc, wt_array)) for wt_array in wts_list]\r\n for pf_alloc in pf_alloc_wts_str:\r\n hovertext = ''\r\n for i in range(n_assets):\r\n hovertext += ('{}: {} \\n'.format(reqd_strategies[i], pf_alloc[i]))\r\n hoverinfo.append(hovertext)\r\n return hoverinfo\r\n\r\n\r\ndef optimize_wts(ret_series, cov_mat, n_points):\r\n wts_list = []\r\n n_assets = ret_series.shape[0]\r\n ret_array = ret_series.to_numpy()\r\n init_guess = np.repeat(1 / n_assets, n_assets)\r\n bounds = Bounds(lb=0.0, ub=1.0)\r\n is_tgt_met = {\r\n 'type': 'eq',\r\n 'args': (ret_array,),\r\n 'fun': lambda wt_array, ret_array: get_pf_ret(wt_array, ret_array) - tgt_ret\r\n }\r\n wts_sum_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda wt_array: np.sum(wt_array) - 1\r\n }\r\n for tgt_ret in np.linspace(ret_series.min(), ret_series.max(), n_points):\r\n results = minimize(fun=get_pf_vol,\r\n args=(cov_mat,),\r\n method='SLSQP',\r\n bounds=bounds,\r\n constraints=[is_tgt_met, wts_sum_to_1],\r\n x0=init_guess,\r\n options={'disp': False})\r\n wts_list.append(results.x)\r\n return wts_list\r\n\r\n\r\ndef get_mean_var_pts(ret_series, cov_df, n_points, reqd_strategies):\r\n n_assets = ret_series.shape[0]\r\n ret_array = ret_series.to_numpy()\r\n cov_mat = cov_df.to_numpy()\r\n wts_list = optimize_wts(ret_series, cov_mat, n_points)\r\n pf_ret = [get_pf_ret(wt_array, ret_array) for wt_array in wts_list]\r\n pf_vol = [annualize_pf_vol(get_pf_vol(wt_array, cov_mat), 12) for wt_array in wts_list]\r\n hover_desc = get_hover_info(n_assets, reqd_strategies, wts_list)\r\n mean_var_df = pd.DataFrame({'Returns': pf_ret,\r\n 'Volatility': pf_vol,\r\n 'Hover Description': hover_desc})\r\n return mean_var_df\r\n\r\n\r\ndef get_msr(ret_series, cov_df, rf, reqd_strategies, gmv=False, onlywts=False):\r\n if onlywts: # if called from bt_roll and gmv weighting schemes\r\n n_assets = len(ret_series.columns)\r\n else:\r\n n_assets = ret_series.shape[0]\r\n if gmv:\r\n ret_array = np.repeat(1.0,\r\n n_assets) # for gmv wts to be independent of E(R) and thus minimisation function tries to manipulate volatility to minimioze -ve SR\r\n else:\r\n ret_array = ret_series.to_numpy()\r\n cov_mat = cov_df.to_numpy()\r\n bounds = Bounds(lb=0.0, ub=1.0)\r\n init_guess = np.repeat(1 / n_assets, n_assets)\r\n sum_wts_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda wt_array: np.sum(wt_array) - 1\r\n }\r\n\r\n def neg_msr(wt_array, ret_array, cov_mat, rf):\r\n return -(get_pf_ret(wt_array, ret_array) - rf) / get_pf_vol(wt_array, cov_mat)\r\n\r\n results = minimize(fun=neg_msr,\r\n args=(ret_array, cov_mat, rf,),\r\n method='SLSQP',\r\n bounds=bounds,\r\n constraints=[sum_wts_to_1],\r\n options={'disp': False},\r\n x0=init_guess)\r\n msr_wt_array = results.x\r\n if onlywts:\r\n return msr_wt_array\r\n if gmv:\r\n ret_array = ret_series.to_numpy() # ret_series restored for calculating mean_var pts using optimized weights (optimized independent of E(R))\r\n msr_ret = get_pf_ret(msr_wt_array, ret_array)\r\n msr_vol = annualize_pf_vol(get_pf_vol(msr_wt_array, cov_mat), 12)\r\n hover_desc = get_hover_info(n_assets, reqd_strategies, [msr_wt_array])[0]\r\n return [msr_vol, msr_ret, hover_desc, msr_wt_array]\r\n\r\n\r\ndef get_gmv(ret_series, cov_df, rf, reqd_strategies, onlywts=False):\r\n return get_msr(ret_series, cov_df, rf, reqd_strategies, gmv=True, onlywts=onlywts)\r\n\r\n\r\ndef get_eq(ret_series, cov_df, reqd_strategies):\r\n n_assets = ret_series.shape[0]\r\n ret_array = ret_series.to_numpy()\r\n cov_mat = cov_df.to_numpy()\r\n eq_wt_array = np.repeat(1 / n_assets, n_assets)\r\n eq_ret = get_pf_ret(eq_wt_array, ret_array)\r\n eq_vol = annualize_pf_vol(get_pf_vol(eq_wt_array, cov_mat), 12)\r\n hover_desc = get_hover_info(n_assets, reqd_strategies, [eq_wt_array])[0]\r\n return [eq_vol, eq_ret, hover_desc]\r\n\r\n\r\ndef get_corr_mat(df, window):\r\n \"\"\"\r\n\r\n :param df:\r\n :return: -> gives correlation matrix for each block of window period and mean correlations\r\n \"\"\"\r\n corr_mat = df.rolling(window=window).corr().dropna(how='all', axis=0)\r\n corr_mat.index.names = ['Date', 'Sector']\r\n corr_groupings = corr_mat.groupby(level='Date')\r\n corr_series = corr_groupings.apply(lambda corr_mat: corr_mat.values.mean()) # getting mean corr for corr_mat for each date (each date being groupedby)\r\n return [corr_mat, corr_series]\r\n\r\n\r\ndef cipp_algo(risky_ret_df: pd.DataFrame, multiplier, floor: float, reqd_strategies, poi, alpha, var_method, rf=0.03,\r\n max_draw_mode=False, plot=True, s0=1000, gbm=False):\r\n def repl_shape(new_df: pd.DataFrame, tgt_df: pd.DataFrame):\r\n return new_df.reindex_like(tgt_df)\r\n\r\n init_wealth = s0\r\n pf_value = init_wealth\r\n prev_peak = init_wealth\r\n floor_value = init_wealth * floor\r\n riskfree_df = repl_shape(pd.DataFrame(), risky_ret_df)\r\n cppi_ret_history = repl_shape(pd.DataFrame(), risky_ret_df)\r\n cppi_value_history = repl_shape(pd.DataFrame(), risky_ret_df)\r\n cppi_risky_wt_history = repl_shape(pd.DataFrame(), risky_ret_df)\r\n cppi_cushion_history = repl_shape(pd.DataFrame(), risky_ret_df)\r\n cppi_floor_history = repl_shape(pd.DataFrame(), risky_ret_df)\r\n riskfree_df[:] = rf / 12\r\n\r\n for dt_index in range(len(risky_ret_df.index)):\r\n if max_draw_mode:\r\n prev_peak = np.maximum(prev_peak, pf_value)\r\n floor_value = prev_peak * floor\r\n cushion = (pf_value - floor_value) / pf_value\r\n risky_wt = multiplier * cushion\r\n risky_wt = np.maximum(risky_wt, 0)\r\n risky_wt = np.minimum(risky_wt, 1)\r\n rf_wt = 1 - risky_wt\r\n cppi_pf_rt = (risky_wt * risky_ret_df.iloc[dt_index]) + (rf_wt * riskfree_df.iloc[dt_index])\r\n pf_value = (cppi_pf_rt + 1) * pf_value\r\n\r\n # create logs\r\n cppi_ret_history.iloc[dt_index] = cppi_pf_rt.transpose()\r\n cppi_value_history.iloc[dt_index] = pf_value.transpose()\r\n cppi_cushion_history.iloc[dt_index] = cushion\r\n cppi_floor_history.iloc[dt_index] = floor_value\r\n cppi_risky_wt_history.iloc[dt_index] = risky_wt\r\n\r\n if gbm:\r\n return cppi_value_history\r\n # plot wealth index, drawdowns cushions and weights\r\n app = dash.Dash()\r\n temp_risky_ret = drawdown(risky_ret_df)\r\n risky_wealth = temp_risky_ret[0]\r\n risky_drawdown = temp_risky_ret[2]\r\n cppi_drawdown = drawdown(cppi_ret_history)[2]\r\n cppi_wealth_plot = go.Scatter(x=cppi_value_history.index,\r\n y=cppi_value_history[poi],\r\n name='cppi_wealth_index',\r\n text=(cppi_risky_wt_history[poi] * 100).round(decimals=2))\r\n cppi_drawdown_plot = go.Scatter(x=cppi_drawdown.index,\r\n y=cppi_drawdown[poi],\r\n name='cppi_drawdown')\r\n cppi_wt_plot = go.Scatter(x=cppi_risky_wt_history.index,\r\n y=cppi_risky_wt_history[poi],\r\n name='cppi-risky-asset-alloc')\r\n risky_wealth_plot = go.Scatter(x=risky_wealth.index,\r\n y=risky_wealth[poi],\r\n name='risky_wealth_index')\r\n risky_drawdown_plot = go.Scatter(x=risky_drawdown.index,\r\n y=risky_drawdown[poi],\r\n name='risky_drawdown')\r\n floor_plot = go.Scatter(x=cppi_floor_history.index,\r\n y=cppi_floor_history[poi],\r\n mode='lines',\r\n line=dict(dash='dashdot',\r\n width=3),\r\n name='Floor')\r\n lowpt_cppi_drawdown = cppi_drawdown[poi].min()\r\n lowpt_cppi_drawdown_year = cppi_drawdown[poi].idxmin()\r\n lowpt_risky_drawdown = risky_drawdown[poi].min()\r\n lowpt_risky_drawdown_year = risky_drawdown[poi].idxmin()\r\n lowpts = [[lowpt_cppi_drawdown, lowpt_cppi_drawdown_year], [lowpt_risky_drawdown, lowpt_risky_drawdown_year]]\r\n fig = make_subplots(rows=3, cols=1, shared_xaxes=True)\r\n fig.add_trace(cppi_wealth_plot, row=1, col=1)\r\n fig.add_trace(risky_wealth_plot, row=1, col=1)\r\n fig.add_trace(floor_plot, row=1, col=1)\r\n fig.add_trace(cppi_drawdown_plot, row=2, col=1)\r\n fig.add_trace(risky_drawdown_plot, row=2, col=1)\r\n fig.add_trace(cppi_wt_plot, row=3, col=1)\r\n fig.update_layout(height=750)\r\n annotations = [dict(x=i[1],\r\n y=i[0],\r\n ax=0,\r\n ay=50,\r\n xref='x',\r\n yref='y2',\r\n arrowhead=7,\r\n showarrow=True,\r\n text='Max DrawDown is {} and occurred at {}'.format(i[0], i[1])) for i in lowpts]\r\n fig.update_layout(annotations=annotations)\r\n app.layout = html.Div([dcc.Graph(id='drawdowns', figure=fig)])\r\n if __name__ != '__main__' and plot:\r\n app.run_server()\r\n\r\n # create a result dataframe\r\n backtest_results = {'cppi_wealth': cppi_value_history,\r\n 'cppi_return': cppi_ret_history,\r\n 'cppi_drawdown': cppi_drawdown,\r\n 'cppi_risky_wts': cppi_risky_wt_history,\r\n 'risky_wealth': risky_wealth,\r\n 'floor': floor_value,\r\n 'risky_drawdown': risky_drawdown}\r\n return backtest_results\r\n\r\n\r\ndef plot_eff_frontier(ret_series: pd.Series, cov_df: pd.DataFrame, n_points: int, reqd_strategies, rf, show_msr=True,\r\n show_eq=False, show_gmv=True):\r\n mean_var_df = get_mean_var_pts(ret_series, cov_df, n_points, reqd_strategies)\r\n msr = get_msr(ret_series, cov_df, rf, reqd_strategies)\r\n eq = get_eq(ret_series, cov_df, reqd_strategies)\r\n gmv = get_gmv(ret_series, cov_df, rf, reqd_strategies)\r\n app = dash.Dash()\r\n data = [go.Scatter(x=mean_var_df['Volatility'],\r\n y=mean_var_df['Returns'],\r\n mode='markers+lines',\r\n name='efficient_frontier',\r\n text=mean_var_df['Hover Description'])]\r\n if show_msr:\r\n data.append(go.Scatter(x=[0, msr[0]],\r\n y=[rf, msr[1]],\r\n mode='markers+lines',\r\n name='CML',\r\n text=['RF - 100%', msr[2]]))\r\n if show_eq:\r\n data.append(go.Scatter(x=[eq[0]],\r\n y=[eq[1]],\r\n mode='markers',\r\n name='EQ',\r\n text=[eq[2]]))\r\n if show_gmv:\r\n data.append(go.Scatter(go.Scatter(x=[gmv[0]],\r\n y=[gmv[1]],\r\n mode='markers',\r\n name='GMV',\r\n text=[gmv[2]])))\r\n\r\n app.layout = html.Div([html.Div([dcc.Graph(id='eff_frontier', figure=dict(data=data,\r\n layout=go.Layout(\r\n title='Efficient Frontier',\r\n xaxis=dict(title='Variance'),\r\n yaxis=dict(title='mean'),\r\n hovermode='closest')))]),\r\n html.Div([html.Pre(id='display_info')])])\r\n\r\n @app.callback(Output('display_info', 'children'),\r\n [Input('eff_frontier', 'hoverData')])\r\n def upd_markdown(hover_data):\r\n hover_data = hover_data['points'][0]\r\n wts_data = hover_data['text']\r\n pf_vol_data = hover_data['x']\r\n pf_ret_data = hover_data['y']\r\n disp_data = '''\r\n The weights are \\n{}\r\n PF - Volatility: {:.2%}\r\n PF - Return : {:.2%}\r\n '''.format(wts_data, pf_vol_data, pf_ret_data)\r\n return disp_data\r\n\r\n if __name__ != '__main__':\r\n app.run_server()\r\n\r\n\r\ndef plot_corr_mktret(ind_ret_filename, n_firms_filename, size_filename, start_period, end_period, format,\r\n reqd_strategies=None, window=36, retrieve_mcw=False, to_per=False, retrieve_mkt_cap_wts=False):\r\n app = dash.Dash()\r\n # Populate all reqd dataframes\r\n ind_ret_m_df = get_df(ind_ret_filename, start_period, end_period, format, reqd_strategies, mode='return',\r\n to_per=to_per)\r\n ind_n_firms_df = get_df(n_firms_filename, start_period, end_period, format, reqd_strategies, mode='nos',\r\n to_per=to_per)\r\n ind_size_df = get_df(size_filename, start_period, end_period, format, reqd_strategies, mode='size', to_per=to_per)\r\n\r\n # industry returns --> mkt cap returns for index constructions\r\n mkt_cap_df = ind_n_firms_df * ind_size_df\r\n total_mkt_cap_series = mkt_cap_df.sum(axis=1)\r\n mkt_wts_df = mkt_cap_df.divide(total_mkt_cap_series, axis=0)\r\n if retrieve_mkt_cap_wts:\r\n return mkt_wts_df\r\n mcw = ind_ret_m_df * mkt_wts_df\r\n mcw_ret_df = pd.DataFrame({'mkt_cap_wt_ret_monthly': mcw.sum(axis=1)})\r\n if retrieve_mcw:\r\n return mcw_ret_df\r\n\r\n # index_generation\r\n mcw_index = drawdown(mcw_ret_df)[0]\r\n # mcw_index_36MA = mcw_index.rolling(window=window).mean()\r\n\r\n # rolling returns\r\n mcw_rolling_returns = mcw_ret_df.rolling(window=window).aggregate(get_ann_return)\r\n\r\n # corr matrix\r\n corr_results = get_corr_mat(mcw, window=window)\r\n corr_series = corr_results[1]\r\n\r\n # plots\r\n # ret_data = go.Scatter(x=mcw_ret_df.index,\r\n # y=mcw_ret_df['mkt_cap_wt_ret_monthly'],\r\n # mode='lines',\r\n # name='mcw_returns')\r\n roll_ret_data = go.Scatter(x=mcw_rolling_returns.index,\r\n y=mcw_rolling_returns['mkt_cap_wt_ret_monthly'],\r\n mode='lines',\r\n name='roll_returns')\r\n roll_corr_data = go.Scatter(x=corr_series.index,\r\n y=corr_series,\r\n mode='lines',\r\n name='roll_corr',\r\n yaxis='y2')\r\n # index_data = go.Scatter(x=mcw_index.index,\r\n # y=mcw_index['mkt_cap_wt_ret_monthly'],\r\n # mode='lines',\r\n # name='index')\r\n # ma_data = go.Scatter(x=mcw_index_36MA.index,\r\n # y=mcw_index_36MA['mkt_cap_wt_ret_monthly'],\r\n # mode='lines',\r\n # name='ma_index',\r\n # yaxis='y2')\r\n layout = go.Layout(yaxis=dict(title='roll_return'),\r\n yaxis2=dict(side='right',\r\n overlaying='y1',\r\n title='roll_corr'),\r\n hovermode='closest')\r\n app.layout = html.Div([dcc.Graph(id='corr', figure=dict(data=[roll_ret_data, roll_corr_data], layout=layout))])\r\n\r\n if __name__ != '__main__':\r\n app.run_server()\r\n\r\n\r\ndef plot(df, mode, reqd_strategies: list, risk_plot: list, poi, var_method, alpha, rf):\r\n alpha = alpha / 100\r\n app = dash.Dash()\r\n infodf = risk_info(df, risk_plot=risk_plot, rf=rf, alpha=alpha, var_method=var_method)\r\n idx = reqd_strategies.index(poi)\r\n if mode == 'returns' or mode == 'downside':\r\n hist_plot = [df[col] for col in df.columns]\r\n group_labels = df.columns\r\n fig = ff.create_distplot(hist_plot, group_labels, show_hist=False)\r\n if mode == 'downside':\r\n var_annotation_x = infodf.loc['VaR'][idx]\r\n cvar_annotation_x = infodf.loc['CVaR'][idx]\r\n annotations = [dict(x=var_annotation_x,\r\n y=0,\r\n ax=0,\r\n ay=-200,\r\n showarrow=True,\r\n arrowhead=7,\r\n text='Min {} probability for {} % loss'.format(alpha, -(var_annotation_x * 100).round(\r\n decimals=4)),\r\n xref='x',\r\n yref='y'),\r\n dict(x=cvar_annotation_x,\r\n y=0,\r\n ax=0,\r\n ay=-100,\r\n showarrow=True,\r\n arrowhead=7,\r\n text='Expected loss is {} %'.format(-(cvar_annotation_x * 100).round(decimals=4)),\r\n xref='x',\r\n yref='y')\r\n ]\r\n fig.update_layout(annotations=annotations)\r\n app.layout = html.Div([dcc.Graph(id='returns', figure=fig)])\r\n\r\n elif mode == 'risk_stats':\r\n infodf = ren_df(infodf, 'risk_params', 'index')\r\n app.layout = dt.DataTable(id='risk-stats',\r\n columns=[{'name': col,\r\n 'id': col} for col in infodf.columns],\r\n data=infodf.to_dict('records'))\r\n\r\n elif mode == 'risk_plot':\r\n data = [go.Bar(x=infodf.columns,\r\n y=infodf.loc[risk_type],\r\n name=risk_type) for risk_type in risk_plot]\r\n app.layout = html.Div([dcc.Graph(id='risk_plots', figure=dict(data=data))])\r\n\r\n elif mode == 'drawdowns':\r\n all_index = drawdown(df)\r\n ddf = all_index[2][reqd_strategies[idx]]\r\n wdf = all_index[0][reqd_strategies[idx]]\r\n pdf = all_index[1][reqd_strategies[idx]]\r\n wealth_plot = go.Scatter(x=df.index,\r\n y=wdf,\r\n name='wealth_index')\r\n peak_plot = go.Scatter(x=df.index,\r\n y=pdf,\r\n name='peak_index')\r\n draw_plot = go.Scatter(x=df.index,\r\n y=ddf,\r\n name='drawdown')\r\n lowpt_drawdown = ddf.min()\r\n lowpt_drawdown_year = ddf.idxmin()\r\n fig = make_subplots(rows=2, cols=1, shared_xaxes=True)\r\n fig.add_trace(wealth_plot, row=1, col=1)\r\n fig.add_trace(peak_plot, row=1, col=1)\r\n fig.add_trace(draw_plot, row=2, col=1)\r\n annotations = [dict(x=lowpt_drawdown_year,\r\n y=lowpt_drawdown,\r\n ax=0,\r\n ay=150,\r\n xref='x',\r\n yref='y2',\r\n arrowhead=7,\r\n showarrow=True,\r\n text='Max DrawDown is {} and occurred at {}'.format(lowpt_drawdown, lowpt_drawdown_year))]\r\n fig.update_layout(annotations=annotations)\r\n app.layout = html.Div([dcc.Graph(id='drawdowns', figure=fig)])\r\n\r\n\r\n else:\r\n app.layout = html.Div([dcc.Markdown(id='test', children='Hi')])\r\n if __name__ != '__main__':\r\n app.run_server()\r\n\r\n\r\ndef gbm_stock(s0, n_scenarios, steps_per_yr, n_years, er, vol, floor, multiplier, rf, cppi, ret_series=False):\r\n floor_value = floor * s0\r\n dt = 1 / steps_per_yr\r\n total_time_steps = int(n_years * steps_per_yr) + 1\r\n\r\n # Using refined method\r\n dz = np.random.normal(loc=(1 + er) ** dt, scale=vol * np.sqrt(dt),\r\n size=(total_time_steps, n_scenarios))\r\n # mu and sigma is annualized.\r\n # The drift and rw terms require mu and sigma for the infinitesimally small time.\r\n # Even better to use continuous comp ret.\r\n # eg dt = 0.25 and mu is 10% per year. So drift term for 1Qtr needs mu for such qtr viz (1.1)**0.25\r\n gbm_df = pd.DataFrame(dz)\r\n gbm_df.loc[0] = 1.0\r\n if cppi:\r\n gbm_df = gbm_df.apply(lambda gbm_rets: gbm_rets - 1)\r\n wealth_index = cipp_algo(gbm_df, multiplier=multiplier, floor=floor, reqd_strategies=[''], poi='', alpha='',\r\n var_method='', rf=rf, s0=s0, gbm=True)\r\n else:\r\n wealth_index = drawdown(gbm_df, retrive_index=True, init_wealth=s0)\r\n if ret_series:\r\n gbm_df.drop(0, inplace=True)\r\n return gbm_df\r\n return wealth_index\r\n\r\n\r\ndef plot_gbm(s0=100):\r\n # plot\r\n app = dash.Dash()\r\n app.layout = html.Div(\r\n [html.Div([html.Label(id='l_sce', children='N-Scenarios: '), dcc.Input(id='i_sce', type='number', value=10)]),\r\n html.Div([html.Label(id='l_st/yr', children='N-Steps per year: '),\r\n dcc.Input(id='i_st/yr', type='number', value=12)]),\r\n html.Div([html.Label(id='l_yr', children='N-Years: '), dcc.Input(id='i_yr', type='number', value=10)]),\r\n html.Div([html.Label(id='l_er', children='Expected Return: '),\r\n dcc.Input(id='i_er', type='number', value=0.07, step=0.005)]),\r\n html.Div([html.Label(id='l_vol', children='Expected Volatility: '),\r\n dcc.Input(id='i_vol', type='number', value=0.15, step=0.005)]),\r\n html.Div([html.Label(id='l_floor', children='Floor: '),\r\n dcc.Input(id='i_floor', type='number', value=0.8, step=0.1)]),\r\n html.Div([html.Label(id='l_multi', children='Multiplier: '), dcc.Input(id='i_multi', type='number', value=3)]),\r\n html.Div([html.Label(id='l_rf', children='Risk Free Rate: '),\r\n dcc.Input(id='i_rf', type='number', value=0.03, step=0.005)]),\r\n html.Div([dcc.RadioItems(id='cppi', options=[{'label': 'CPPI?', 'value': 1}, {'label': 'Risky?', 'value': 0}],\r\n value=1)]),\r\n html.Div([html.Button(id='gen_gbm', children='Generate', n_clicks=0)]),\r\n html.Div([dcc.Graph(id='gbm_plot')]),\r\n html.Div([dcc.Markdown(id='gbm_stats')], style={'fontsize': '40em'})])\r\n\r\n @app.callback(Output('gbm_stats', 'children'),\r\n [Input('gen_gbm', 'n_clicks')],\r\n [State('i_sce', 'value'),\r\n State('i_st/yr', 'value'),\r\n State('i_yr', 'value'),\r\n State('i_er', 'value'),\r\n State('i_vol', 'value'),\r\n State('i_floor', 'value'),\r\n State('i_multi', 'value'),\r\n State('i_rf', 'value'),\r\n State('cppi', 'value')])\r\n def update_gbm(n_clicks, n_scenarios, steps_per_yr, n_years, er, vol, floor, multiplier, rf, cppi):\r\n wealth_index = gbm_stock(s0, n_scenarios, steps_per_yr, n_years, er, vol, floor, multiplier, rf, cppi)\r\n wealth_index.to_csv('tempfile.csv')\r\n result_stats = terminal_risk_stats(s0, floor, wealth_index)\r\n return result_stats\r\n\r\n @app.callback(Output('gbm_plot', 'figure'),\r\n [Input('gbm_stats', 'children'),\r\n Input('i_floor', 'value')])\r\n def upd_gbm_plot(gbm_stats, floor):\r\n floor_value = floor * s0\r\n fig = make_subplots(rows=1, cols=2, shared_yaxes=True)\r\n wealth_index = pd.read_csv('tempfile.csv', index_col='Unnamed: 0')\r\n gbm_motion = wealth_index.aggregate(lambda scenario: go.Scatter(x=scenario.index, y=scenario))\r\n gbm_motion = gbm_motion.tolist()\r\n hist_plot = []\r\n for scenario in wealth_index.columns:\r\n hist_plot.append(wealth_index[scenario].tolist())\r\n length = len(hist_plot)\r\n for i in range(length - 1):\r\n hist_plot[0].extend(hist_plot[i + 1])\r\n hist_plot = go.Histogram(y=hist_plot[0],\r\n name='gbm_dist')\r\n for gbm_data in gbm_motion:\r\n fig.add_trace(gbm_data, row=1, col=1)\r\n fig.add_trace(hist_plot, row=1, col=2) # Hist bin plot\r\n floor_threshold = [\r\n dict(type='line', xref='paper', yref='y1', x0=0, x1=1, y0=floor_value, y1=floor_value, name='floor',\r\n line=dict(dash='dashdot', width=5))]\r\n fig.update_layout(showlegend=False,\r\n hovermode='y',\r\n height=500,\r\n shapes=floor_threshold)\r\n return fig\r\n\r\n if __name__ != '__main__':\r\n app.run_server()\r\n\r\n\r\ndef get_macaulay_duration(pvf): # Nota annualized. Make sure to annualize\r\n mac_dur = pvf.apply(lambda pvf: np.average(pvf.index + 1, weights=pvf))\r\n return mac_dur\r\n\r\n\r\ndef get_discount_factor(disc_rate: pd.DataFrame, period):\r\n disc_factors_df = disc_rate.apply(lambda r: np.power((1 + r), -period))\r\n return disc_factors_df\r\n\r\n\r\ndef get_present_value(cash_flow: pd.Series, disc_rate: pd.DataFrame):\r\n if not isinstance(disc_rate, pd.DataFrame):\r\n cash_flow.index -= 1 # To correct for cash_flow.index+1 when called from cir()\r\n disc_rate = pd.DataFrame(data=[disc_rate for t in cash_flow.index], index=cash_flow.index)\r\n get_present_value(cash_flow, disc_rate)\r\n if not len(disc_rate.index) == len(cash_flow.index):\r\n dr_steps = disc_rate.shape[0]\r\n cf_steps = cash_flow.shape[0]\r\n shortfall = cf_steps - dr_steps\r\n dr_last = disc_rate.iloc[-1]\r\n append_rate_df = pd.DataFrame(\r\n data=np.asarray(pd.concat([dr_last] * shortfall, axis=0)).reshape(shortfall, disc_rate.shape[1]),\r\n index=range(dr_steps, cf_steps, 1))\r\n disc_rate = disc_rate.append(append_rate_df)\r\n disc_factors = get_discount_factor(disc_rate, cash_flow.index + 1)\r\n present_value_factors = disc_factors.apply(lambda disc_factor: disc_factor * cash_flow)\r\n present_value = present_value_factors.sum()\r\n mac_dur = get_macaulay_duration(present_value_factors)\r\n return np.asarray(present_value), mac_dur\r\n\r\n\r\ndef gen_bond_cash_flows(tenor, steps_per_year, cr, fv):\r\n dt = 1 / steps_per_year\r\n total_time_steps = int(tenor * steps_per_year)\r\n periodicity_adj_cr = cr * dt\r\n coupon_cf = fv * periodicity_adj_cr\r\n bond_cf = pd.Series([coupon_cf for i in range(0, total_time_steps)])\r\n bond_cf.iloc[-1] += fv\r\n return bond_cf\r\n\r\n\r\ndef get_bond_prices(n_years, tenor, steps_per_year, disc_rate, cr=0.03, fv=100):\r\n dt = 1 / steps_per_year\r\n if isinstance(disc_rate, pd.DataFrame):\r\n periodicity_adj_disc_rate = disc_rate * dt\r\n bond_cf = gen_bond_cash_flows(tenor, steps_per_year, cr, fv)\r\n bond_prices, mac_dur = get_present_value(bond_cf, periodicity_adj_disc_rate)\r\n return bond_prices, mac_dur, bond_cf\r\n else:\r\n total_time_steps = int(n_years * steps_per_year)\r\n disc_rate = pd.DataFrame(data=np.repeat(disc_rate, total_time_steps).reshape(total_time_steps, 1))\r\n return get_bond_prices(n_years, tenor, steps_per_year, disc_rate, cr, fv)\r\n\r\n\r\ndef get_funding_ratio(pv_liabilities, pv_assets):\r\n return np.divide(pv_assets, pv_liabilities)\r\n\r\n\r\ndef get_terminal_wealth(rets):\r\n return np.exp(np.log1p(rets).sum())\r\n\r\n\r\ndef cumulate(rets):\r\n return np.expm1(np.log1p(rets).sum())\r\n\r\n\r\ndef get_optimal_wts(md_liab, ldb, sdb, av, disc_rate, dt):\r\n x0 = np.repeat(0.5, 2)\r\n bounds = Bounds(lb=0.00, ub=1.00)\r\n\r\n def core_check_algo(wts, ldb, sdb, av, disc_rate, dt):\r\n wt_l = wts[0]\r\n alloc_long_dur_bond = av * wt_l\r\n alloc_short_dur_bond = av * (1 - wt_l)\r\n n_long_dur_bond_match = alloc_long_dur_bond / ldb[0]\r\n n_short_dur_bond_match = alloc_short_dur_bond / sdb[0]\r\n dur_matched_bond_cf = pd.DataFrame(\r\n data=pd.concat([ldb[2] * n_long_dur_bond_match, sdb[2] * n_short_dur_bond_match]), columns=['cf'])\r\n dur_matched_bond_cf = dur_matched_bond_cf.groupby(dur_matched_bond_cf.index)['cf'].sum()\r\n dur_matched_bond_cf.index += 1\r\n disc_rate = disc_rate * dt\r\n pv_pf, mac_dur_pf = get_present_value(dur_matched_bond_cf, disc_rate)\r\n mac_dur_pf = mac_dur_pf[0]\r\n return mac_dur_pf * dt\r\n\r\n def check_dur_match(wts, md_liab, ldb, sdb, av, disc_rate, dt):\r\n mac_dur_pf = core_check_algo(wts, ldb, sdb, av, disc_rate, dt)\r\n return mac_dur_pf - md_liab\r\n\r\n sum_wts_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda wts: np.sum(wts) - 1\r\n }\r\n\r\n is_diff_zero = {\r\n 'type': 'eq',\r\n 'args': (md_liab, ldb, sdb, av, disc_rate, dt),\r\n 'fun': check_dur_match\r\n }\r\n\r\n result = minimize(fun=check_dur_match,\r\n args=(md_liab, ldb, sdb, av, disc_rate, dt),\r\n x0=x0,\r\n method='SLSQP',\r\n bounds=bounds,\r\n constraints=[sum_wts_to_1, is_diff_zero],\r\n options={'disp': False}\r\n )\r\n wts = result.x\r\n return wts\r\n\r\n\r\n# # NEED TO ADAPT FOR BONDS WITH VARYING COUPON PERIODS\r\ndef get_duration_matched_pf(liabilities: pd.Series, n_years: list, steps_per_year: list, disc_rate, cr: list, fv: list,\r\n av, fr_change_sim=False):\r\n pv_liabilities, mac_dur_liabilities = get_present_value(liabilities, disc_rate)\r\n pv_bond_1, mac_dur_bond_1, bond_cf_1 = get_bond_prices(n_years[0], n_years[0], steps_per_year[0], disc_rate, cr[0],\r\n fv[0])\r\n pv_bond_2, mac_dur_bond_2, bond_cf_2 = get_bond_prices(n_years[1], n_years[1], steps_per_year[1], disc_rate, cr[1],\r\n fv[1])\r\n # bond_cf_1.index += 1\r\n # bond_cf_2.index += 1\r\n mac_dur_bond_1 = mac_dur_bond_1.loc[0] / steps_per_year[0]\r\n mac_dur_bond_2 = mac_dur_bond_2.loc[0] / steps_per_year[1]\r\n mac_dur_liabilities = mac_dur_liabilities.loc[0]\r\n pv_bond_1 = pv_bond_1[0]\r\n pv_bond_2 = pv_bond_2[0]\r\n pv_liabilities = pv_liabilities[0]\r\n if mac_dur_bond_1 > mac_dur_bond_2:\r\n long_dur_bond = [pv_bond_1, mac_dur_bond_1, bond_cf_1, steps_per_year[0]]\r\n short_dur_bond = [pv_bond_2, mac_dur_bond_2, bond_cf_2, steps_per_year[1]]\r\n else:\r\n long_dur_bond = [pv_bond_2, mac_dur_bond_2, bond_cf_2, steps_per_year[1]]\r\n short_dur_bond = [pv_bond_1, mac_dur_bond_1, bond_cf_1, steps_per_year[0]]\r\n tts_for_pf = steps_per_year[0] if len(bond_cf_1.index) > len(bond_cf_2.index) else steps_per_year[\r\n 1] # To adj disc_rate periodicity for dur_match pf\r\n dt = 1 / tts_for_pf\r\n\r\n # computes duration match wtss\r\n\r\n wt_array = get_optimal_wts(mac_dur_liabilities, long_dur_bond, short_dur_bond, av, disc_rate, dt)\r\n wt_long_dur_bond = wt_array[0]\r\n wt_short_dur_bond = wt_array[1]\r\n # wt_short_dur_bond = (long_dur_bond[1] - mac_dur_liabilities) / (long_dur_bond[1] - short_dur_bond[1])\r\n # wt_long_dur_bond = 1-wt_short_dur_bond\r\n # wt_long_dur_bond = 1.0\r\n # wt_short_dur_bond = 1-wt_long_dur_bond\r\n alloc_long_dur_bond = av * wt_long_dur_bond\r\n alloc_short_dur_bond = av * wt_short_dur_bond\r\n n_long_dur_bond_match = alloc_long_dur_bond / long_dur_bond[0]\r\n n_short_dur_bond_match = alloc_short_dur_bond / short_dur_bond[0]\r\n n_long_bond_full = av / long_dur_bond[0]\r\n n_short_bond_full = av / short_dur_bond[0]\r\n dur_matched_bond_cf = pd.DataFrame(\r\n data=pd.concat([long_dur_bond[2] * n_long_dur_bond_match, short_dur_bond[2] * n_short_dur_bond_match]),\r\n columns=['cf'])\r\n dur_matched_bond_cf = dur_matched_bond_cf.groupby(dur_matched_bond_cf.index)['cf'].sum()\r\n dur_matched_bond_cf.index += 1\r\n long_bond_cf_full = long_dur_bond[2] * n_long_bond_full\r\n short_bond_cf_full = short_dur_bond[2] * n_short_bond_full\r\n disc_rate = disc_rate * dt\r\n pv_pf, mac_dur_pf = get_present_value(dur_matched_bond_cf, disc_rate)\r\n pv_pf = pv_pf[0]\r\n mac_dur_pf = mac_dur_pf[0] * dt\r\n if fr_change_sim:\r\n disc_rates = np.linspace(0, 0.1, 50)\r\n fr_long = []\r\n fr_short = []\r\n fr_match = []\r\n dr_list = []\r\n for dr in disc_rates:\r\n dr_list.append(dr)\r\n liab, dur_li = get_present_value(liabilities, dr)\r\n l_bond, dur_l = get_present_value(long_bond_cf_full, dr)\r\n s_bond, dur_s = get_present_value(short_bond_cf_full, dr)\r\n m_bond, dur_m = get_present_value(dur_matched_bond_cf, dr)\r\n fr_long.append(get_funding_ratio(liab[0], l_bond[0]))\r\n fr_short.append(get_funding_ratio(liab[0], s_bond[0]))\r\n fr_match.append(get_funding_ratio(liab[0], m_bond[0]))\r\n fr = pd.DataFrame({\r\n 'dr': dr_list,\r\n 'fr_long': fr_long,\r\n 'fr_short': fr_short,\r\n 'fr_match': fr_match,\r\n }).set_index(keys='dr')\r\n app = dash.Dash()\r\n data = [go.Scatter(x=fr.index,\r\n y=fr[col],\r\n mode='lines',\r\n name=col) for col in fr.columns]\r\n app.layout = html.Div([dcc.Graph(id='cfr', figure=dict(data=data))])\r\n app.run_server()\r\n return [wt_long_dur_bond, wt_short_dur_bond, mac_dur_pf, mac_dur_liabilities, long_dur_bond[1], short_dur_bond[1]]\r\n\r\n\r\ndef conv_to_short_rate(r):\r\n \"\"\"\r\n price relative = exp(t*sr) => ln(1+r)/t = sr (assumes t = 1)\r\n :param r: annualised interest rate\r\n :return: short rates\r\n \"\"\"\r\n return np.log1p(r)\r\n\r\n\r\ndef conv_to_annualised_rate(sr):\r\n \"\"\"\r\n exp(t*sr) - 1 = r (assumes t = 1)\r\n :param sr: short rate\r\n :return: annualised rate for a given short rate\r\n \"\"\"\r\n return np.expm1(sr)\r\n\r\n\r\ndef get_rates_gbm(rf, n_years, steps_per_yr, n_scenarios, volatility, a, b):\r\n dt = 1 / steps_per_yr\r\n b = conv_to_short_rate(b) # Since short rates are being modelled\r\n sr = conv_to_short_rate(rf)\r\n total_time_steps = int(n_years * steps_per_yr) + 1\r\n shock = np.random.normal(loc=0, scale=volatility * np.sqrt(dt), size=(total_time_steps, n_scenarios))\r\n rates = np.empty_like(shock)\r\n # For ZCB price generation\r\n # Formula - please refer cir1.png\r\n h = math.sqrt(a ** 2 + 2 * volatility ** 2)\r\n zcb = np.empty_like(shock)\r\n\r\n def price(ttm, rf):\r\n _A = ((2 * h * math.exp((h + a) * ttm / 2)) / (2 * h + (h + a) * (math.exp(h * ttm) - 1))) ** (\r\n 2 * a * b / volatility ** 2)\r\n _B = (2 * (math.exp(h * ttm) - 1)) / (2 * h + (h + a) * (math.exp(h * ttm) - 1))\r\n _P = _A * np.exp(-_B * rf)\r\n return _P\r\n\r\n zcb[0] = price(n_years, rf)\r\n\r\n rates[0] = sr\r\n for steps in range(1, total_time_steps):\r\n prev_rate = rates[steps - 1]\r\n drift = a * (b - prev_rate) * dt\r\n shock[steps] = shock[steps] * np.sqrt(prev_rate)\r\n dr = drift + shock[steps]\r\n rates[steps] = abs(prev_rate + dr)\r\n zcb[steps] = price(n_years - steps * dt, rates[steps])\r\n rates_gbm_df = pd.DataFrame(data=conv_to_annualised_rate(rates), index=range(total_time_steps))\r\n zcb_gbm_df = pd.DataFrame(data=zcb, index=range(total_time_steps))\r\n zcb_rets = zcb_gbm_df.pct_change().dropna()\r\n return rates_gbm_df, zcb_gbm_df, zcb_rets\r\n\r\n\r\ndef get_btr(rates_gbm_df, n_years, steps_per_yr, tenor, cr, fv, n_scenarios):\r\n cb_df, mac_dur_df, bond_cf = get_bond_gbm(rates_gbm_df, n_years, steps_per_yr, tenor, cr, fv)\r\n mac_dur_df = mac_dur_df / steps_per_yr\r\n bond_ann_ret = get_bond_tr(cb_df, bond_cf, n_scenarios)\r\n return bond_ann_ret, cb_df, mac_dur_df\r\n\r\n\r\ndef reshape_disc_rate(n_years, steps_per_year, n_scenarios, disc_rate):\r\n rates_df = pd.DataFrame(data=disc_rate, index=range(0, (n_years * steps_per_year + 1)),\r\n columns=range(0, n_scenarios))\r\n return rates_df\r\n\r\n\r\ndef get_bond_gbm(rates_gbm_df: pd.DataFrame, n_years, steps_per_yr, tenor=0, cr=0.05, fv=100):\r\n bond_cf = 0\r\n dt = 1 / steps_per_yr\r\n total_time_steps = int(n_years * steps_per_yr)\r\n n_scenarios = len(rates_gbm_df.columns)\r\n cb = np.repeat(0.0, (total_time_steps) * n_scenarios).reshape(total_time_steps, n_scenarios)\r\n mac_dur = np.empty_like(cb)\r\n # CB prices\r\n for step in range(0, total_time_steps):\r\n ttm = total_time_steps - step\r\n disc_rate = rates_gbm_df.loc[step]\r\n disc_rate = pd.DataFrame(np.asarray(pd.concat([disc_rate] * ttm, axis=0)).reshape(ttm, n_scenarios))\r\n cb[step], mac_dur[step], temp = get_bond_prices(n_years - step * dt, tenor - step * dt, steps_per_yr, disc_rate,\r\n cr, fv)\r\n if step == 0:\r\n bond_cf = temp\r\n cb_df = pd.DataFrame(cb)\r\n mac_dur_df = pd.DataFrame(mac_dur)\r\n cb_df = cb_df.append(cb_df.iloc[-1] * (rates_gbm_df.iloc[-2] * dt + 1), ignore_index=True)\r\n return cb_df, mac_dur_df, bond_cf\r\n\r\n\r\ndef get_bond_tr(cb_df, bond_cf, n_scenarios):\r\n print()\r\n if not len(cb_df.index) - len(bond_cf) == 1:\r\n dr_steps = cb_df.shape[0]\r\n cf_steps = bond_cf.shape[0]\r\n shortfall = cf_steps - dr_steps + 2\r\n bond_cf.drop(bond_cf.tail(shortfall).index, inplace=True)\r\n else:\r\n bond_cf.drop(bond_cf.tail(1).index, inplace=True)\r\n bond_cf.index += 1\r\n concat_cf = pd.concat([bond_cf] * n_scenarios, axis=1)\r\n concat_cf.loc[0] = 0\r\n concat_cf.loc[len(concat_cf.index)] = 0\r\n tcf_df = (cb_df + concat_cf)\r\n tr_df = (np.divide(tcf_df, cb_df.shift()) - 1).dropna()\r\n # bond_ann_ret = get_ann_return(tr_df)\r\n return tr_df\r\n\r\n\r\ndef plot_cir():\r\n app = dash.Dash()\r\n\r\n def upd_label(out_id, inp_id):\r\n @app.callback(Output(out_id, 'children'),\r\n [Input(inp_id, 'value')])\r\n def upd_(value):\r\n return value\r\n\r\n app.layout = html.Div([html.Div([html.Div([html.Label(children='Select Initial asset value: '),\r\n dcc.Slider(id='sl_av', min=0.10, max=1.5, step=0.05, value=0.75),\r\n html.Label(id='out_av')], style={'display': 'inline-block'}),\r\n html.Div([html.Label(children='Select Initial rf annualized: '),\r\n dcc.Slider(id='sl_rf', min=0.01, max=0.10, step=0.005, value=0.03),\r\n html.Label(id='out_rf')], style={'display': 'inline-block'}),\r\n html.Div([html.Label(children='Select expected LT RF: '),\r\n dcc.Slider(id='sl_ltrf', min=0.01, max=0.10, step=0.005, value=0.03),\r\n html.Label(id='out_ltrf')], style={'display': 'inline-block'}),\r\n html.Div([html.Label(children='Select speed of MR: '),\r\n dcc.Slider(id='sl_speed', min=0.2, max=1, step=0.05, value=0.5),\r\n html.Label(id='out_speed')], style={'display': 'inline-block'}),\r\n html.Div([html.Label(children='Select volatility: '),\r\n dcc.Slider(id='sl_vola', min=0, max=1, step=0.05, value=0.15),\r\n html.Label(id='out_vola')], style={'display': 'inline-block'}),\r\n html.Button(id='sub_cir', children='SUBMIT', n_clicks=0,\r\n style={'display': 'inline-block'})],\r\n style={'display': 'flex', 'justify-content': 'space-evenly'}),\r\n html.Div([html.Div([html.Label(children='Select N-Periods: '),\r\n dcc.Slider(id='sl_periods', min=1, max=20, step=1, value=10),\r\n html.Label(id='out_periods')], style={'display': 'inline-block'}),\r\n html.Div([html.Label(children='Select steps_per_yr: '),\r\n dcc.Slider(id='sl_stperyr', min=1, max=10000, step=1, value=12),\r\n html.Label(id='out_stperyr')], style={'display': 'inline-block'}),\r\n html.Div([html.Label(children='Select N-Scenarios: '),\r\n dcc.Slider(id='sl_scenarios', min=2, max=250, step=1, value=10),\r\n html.Label(id='out_scenarios')], style={'display': 'inline-block'})\r\n ], style={'display': 'flex', 'justify-content': 'space-evenly',\r\n 'padding-top': '25px'}),\r\n html.Div([dcc.Graph(id='cir')]),\r\n html.Div([dcc.Graph(id='hist_tfr')])])\r\n\r\n upd_label('out_av', 'sl_av')\r\n upd_label('out_rf', 'sl_rf')\r\n upd_label('out_ltrf', 'sl_ltrf')\r\n upd_label('out_speed', 'sl_speed')\r\n upd_label('out_vola', 'sl_vola')\r\n upd_label('out_periods', 'sl_periods')\r\n upd_label('out_stperyr', 'sl_stperyr')\r\n upd_label('out_scenarios', 'sl_scenarios')\r\n\r\n @app.callback([Output('cir', 'figure'),\r\n Output('hist_tfr', 'figure')],\r\n [Input('sub_cir', 'n_clicks')],\r\n [State('sl_rf', 'value'),\r\n State('sl_periods', 'value'),\r\n State('sl_stperyr', 'value'),\r\n State('sl_scenarios', 'value'),\r\n State('sl_vola', 'value'),\r\n State('sl_speed', 'value'),\r\n State('sl_ltrf', 'value'),\r\n State('sl_av', 'value')])\r\n def upd_cir(n_clicks, rf, n_years, steps_per_yr, n_scenarios, volatility, a, b, av):\r\n def get_scatter_points(df: pd.DataFrame):\r\n return df.aggregate(lambda scenario: go.Scatter(x=scenario.index, y=scenario)).tolist()\r\n\r\n tenor = n_years\r\n rates_gbm_df, zcb_gbm_df, zcb_rets = get_rates_gbm(rf, n_years, steps_per_yr, n_scenarios, volatility, a, b)\r\n liabilities = zcb_gbm_df # Assuming same liab as that of ZCB\r\n cb_df, mac_dur_df, bond_cf = get_bond_gbm(rates_gbm_df, n_years=n_years, steps_per_yr=steps_per_yr, tenor=tenor)\r\n\r\n # Investments in ZCB at T0\r\n n_bonds = av / zcb_gbm_df.loc[0, 0]\r\n av_zcb_df = n_bonds * zcb_gbm_df\r\n # fr_zcb = (av_zcb_df/liabilities).round(decimals=6)\r\n fr_zcb = get_funding_ratio(liabilities, av_zcb_df).round(decimals=6)\r\n fr_zcb_df = fr_zcb.pct_change().dropna()\r\n\r\n # Cash investments cumprod\r\n fd_rates = rates_gbm_df.apply(lambda x: x / steps_per_yr)\r\n av_cash_df = drawdown(fd_rates, retrive_index=True, init_wealth=av, is1p=False)\r\n # fr_cash = av_cash_df/liabilities\r\n fr_cash = get_funding_ratio(liabilities, av_cash_df)\r\n fr_cash_df = fr_cash.pct_change().dropna()\r\n\r\n fig = make_subplots(rows=4, cols=2, shared_xaxes=True, specs=[[{}, {}],\r\n [{}, {}],\r\n [{}, {}],\r\n [{}, {}]], subplot_titles=(\r\n \"CIR model of Interest rates\", \"ZCB Prices based on CIR\", \"CB Prices based on CIR\",\r\n \"CB Mac Dur\", \"Cash invested in FD with rolling maturity\",\r\n \" {:.4f} ZCB investments at T=0\".format(n_bonds), \"Funding Ratio %ch-Cash\",\r\n \"Funding Ratio %ch-ZCB\"))\r\n rates_gbm = get_scatter_points(rates_gbm_df)\r\n zcb_gbm = get_scatter_points(zcb_gbm_df)\r\n cb_gbm = get_scatter_points(cb_df)\r\n cb_mac_dur_gbm = get_scatter_points(mac_dur_df)\r\n av_zcb_gbm = get_scatter_points(av_zcb_df)\r\n av_cash_gbm = get_scatter_points(av_cash_df)\r\n fr_cash_gbm = get_scatter_points(fr_cash_df)\r\n fr_zcb_gbm = get_scatter_points(fr_zcb_df)\r\n tfr_cash_hist = fr_cash.iloc[-1].tolist()\r\n tfr_zcb_hist = fr_zcb.iloc[-1].loc[0] # since all are same\r\n\r\n for rates_data in rates_gbm:\r\n fig.add_trace(rates_data, row=1, col=1)\r\n for zcb_price in zcb_gbm:\r\n fig.add_trace(zcb_price, row=1, col=2)\r\n for cb_price in cb_gbm:\r\n fig.add_trace(cb_price, row=2, col=1)\r\n for cb_mac_dur in cb_mac_dur_gbm:\r\n fig.add_trace(cb_mac_dur, row=2, col=2)\r\n for av_cash in av_cash_gbm:\r\n fig.add_trace(av_cash, row=3, col=1)\r\n for av_zcb in av_zcb_gbm:\r\n fig.add_trace(av_zcb, row=3, col=2)\r\n for fr_cash in fr_cash_gbm:\r\n fig.add_trace(fr_cash, row=4, col=1)\r\n for fr_zcb in fr_zcb_gbm:\r\n fig.add_trace(fr_zcb, row=4, col=2)\r\n\r\n b = conv_to_annualised_rate(b)\r\n mrl = [dict(type='line', xref='x1', yref='y1', x0=0, x1=n_years * steps_per_yr, y0=b, y1=b,\r\n name='Mean Reverting Level',\r\n line=dict(dash='dashdot', width=5))]\r\n fig.update_xaxes(matches='x')\r\n fig.update_layout(showlegend=False,\r\n height=1000,\r\n hovermode='closest',\r\n shapes=mrl)\r\n tfr_zcb = [\r\n dict(type='line', xref='x1', yref='paper', y0=0, y1=1, x0=tfr_zcb_hist, x1=tfr_zcb_hist, name='tfr-zcb',\r\n line=dict(dash='dashdot', width=5))]\r\n tfr_cash_distplot = ff.create_distplot(hist_data=[tfr_cash_hist], group_labels=[\"tfr_cash\"], show_hist=False)\r\n tfr_cash_distplot.update_layout(shapes=tfr_zcb, hovermode='closest')\r\n return fig, tfr_cash_distplot\r\n\r\n app.run_server()\r\n\r\n\r\ndef bt_mix(r1: pd.DataFrame, r2: pd.DataFrame, allocator, **kwargs):\r\n if not r1.shape == r2.shape:\r\n raise ValueError(\"Returns need to be of same shape\")\r\n wt_r1 = allocator(r1, r2, **kwargs)\r\n if not wt_r1.shape == r1.shape:\r\n raise ValueError(\"Use a compatible allocator\")\r\n return wt_r1 * r1 + (1 - wt_r1) * r2\r\n\r\n\r\ndef fixed_mix_allocator(r1: pd.DataFrame, r2: pd.DataFrame, wt_r1):\r\n return pd.DataFrame(data=wt_r1, index=r1.index, columns=r1.columns)\r\n\r\n\r\ndef glide_path_allocator(r1: pd.DataFrame, r2: pd.DataFrame, wt_start=0.8, wt_end=0.2):\r\n n_points = r1.shape[0]\r\n n_scenarios = r1.shape[1]\r\n wt_r1 = pd.Series(np.linspace(wt_start, wt_end, n_points))\r\n wt_r1 = pd.concat([wt_r1] * n_scenarios, axis=1)\r\n return wt_r1\r\n\r\n\r\ndef floor_allocator(r1: pd.DataFrame, r2: pd.DataFrame, floor, zcb_prices: pd.DataFrame, m=3, max_dd_mode=False):\r\n zcb_prices = zcb_prices.drop(index=0).reindex()\r\n if not r1.shape == r2.shape:\r\n raise ValueError(\"Non-Compatible rets dataframe\")\r\n wt_r1 = pd.DataFrame().reindex_like(r1)\r\n total_time_steps, n_scenarios = r1.shape\r\n pf_value = np.repeat(1, n_scenarios)\r\n floor_value = np.repeat(1, n_scenarios)\r\n peak_value = np.repeat(1, n_scenarios)\r\n for step in range(0, total_time_steps):\r\n if max_dd_mode:\r\n peak_value = np.maximum(peak_value, pf_value)\r\n floor_value = floor * peak_value\r\n else:\r\n floor_value = floor * zcb_prices.iloc[step]\r\n cushion = (pf_value - floor_value) / pf_value\r\n wt1 = (cushion * m).clip(0, 1)\r\n pf_ret = wt1 * r1.iloc[step] + (1 - wt1) * r2.iloc[step]\r\n pf_value = (1 + pf_ret) * pf_value\r\n wt_r1.iloc[step] = wt1\r\n return wt_r1\r\n\r\n\r\ndef distplot_terminal_paths(floor_factor, **kwargs):\r\n app = dash.Dash()\r\n terminal_paths = []\r\n pf_type = []\r\n stats = []\r\n for key, value in kwargs.items():\r\n pf_type.append(key)\r\n terminal_paths.append(value.tolist())\r\n stats.append(terminal_risk_stats(fv=1, floor_factor=floor_factor, wealth_index=value, aslst=True, strategy=key))\r\n stats = pd.DataFrame(data=stats, columns=[\"strategy\", 'Exp_wealth', \"Exp_Volatility\", \"Med_Wealth\", \"#_violations\",\r\n \"p_violations\", \"CVaR\"])\r\n floor = [dict(type='line', xref='x1', yref='paper', y0=0, y1=1, x0=floor_factor, x1=floor_factor, name='floor',\r\n line=dict(dash='dashdot', width=5))]\r\n fig = ff.create_distplot(hist_data=terminal_paths, group_labels=pf_type, show_hist=False)\r\n fig.update_layout(shapes=floor)\r\n app.layout = html.Div([html.Div([dcc.Graph(id='terminal_dist_plot', figure=fig)]),\r\n html.Div([dt.DataTable(id='risk-stats',\r\n columns=[{'name': col,\r\n 'id': col} for col in stats.columns],\r\n data=stats.to_dict('records'))])\r\n ])\r\n app.run_server()\r\n\r\n\r\ndef get_options_cv(elasticnet=False):\r\n if elasticnet:\r\n params = {\r\n 'max_lamda': 0.25,\r\n 'n_lamdas': 20,\r\n 'max_l1_ratio': 0.99,\r\n 'n_l1-ratio': 50,\r\n 'k_folds': 10,\r\n 'randomseed': 7777\r\n }\r\n else:\r\n params = {\r\n 'max_lamda': 0.25,\r\n 'n_lamdas': 100,\r\n 'k_folds': 10,\r\n 'randomseed': 7777\r\n }\r\n return params\r\n\r\ndef regress(dependent_var: pd.DataFrame, explanatory_var: pd.DataFrame, start_period=None, end_period=None,\r\n intercept=True, excess_mkt=True, rfcol='RF', method='ols', lamda=0.1, C=0.1, penalty='l1'):\r\n \"\"\"\r\n Runs a linear regression to decompose the dependent variable into the explanatory variables\r\n returns an object of type statsmodel's RegressionResults on which you can call\r\n .summary() to print a full summary\r\n .params for the coefficients\r\n .tvalues and .pvalues for the significance levels\r\n .rsquared_adj and .rsquared for quality of fit\r\n NOTE: SKLearn calles Lambda Alpha. Also, it uses a scaled version of LASSO argument, so here I scale when converting lambda to alpha\r\n \"\"\"\r\n if isinstance(dependent_var, pd.Series):\r\n dependent_var = pd.DataFrame(dependent_var)\r\n dependent_var = dependent_var.loc[start_period:end_period]\r\n explanatory_var = explanatory_var.loc[start_period:end_period]\r\n if excess_mkt:\r\n dependent_var = dependent_var - explanatory_var.loc[:, [rfcol]].values\r\n explanatory_var = explanatory_var.drop([rfcol], axis=1)\r\n if method == 'ols':\r\n if intercept:\r\n explanatory_var['Alpha'] = 1\r\n regression_result = sm.OLS(dependent_var, explanatory_var).fit()\r\n return regression_result\r\n elif method == 'lasso':\r\n alpha = lamda / (2*dependent_var.shape[0])\r\n sk_lasso = Lasso(alpha=alpha, fit_intercept=intercept).fit(X=explanatory_var, y=dependent_var)\r\n print_sklearn_results(method=method, intercept=sk_lasso.intercept_, coeff=sk_lasso.coef_, explanatory_df=explanatory_var, dependent_df=dependent_var, alpha=alpha, lamda=lamda)\r\n return sk_lasso\r\n elif method == 'ridge':\r\n alpha = lamda\r\n sk_ridge = Ridge(alpha=alpha, fit_intercept=intercept).fit(X=explanatory_var, y=dependent_var)\r\n print_sklearn_results(method=method, intercept=sk_ridge.intercept_, coeff=sk_ridge.coef_, explanatory_df=explanatory_var, dependent_df=dependent_var, alpha=alpha, lamda=lamda)\r\n return sk_ridge\r\n elif method == 'cv_lasso':\r\n params = get_options_cv()\r\n max_alpha = params['max_lamda'] / (2*dependent_var.shape[0])\r\n alphas = np.linspace(1e-6, max_alpha, params['n_lamdas'])\r\n parameters = {'alpha': alphas}\r\n lasso = Lasso(fit_intercept=True, random_state=params['randomseed'])\r\n cv_lasso = GridSearchCV(lasso, parameters, cv=params['k_folds'], refit=True)\r\n cv_lasso = cv_lasso.fit(X=explanatory_var, y=dependent_var)\r\n lasso_best = cv_lasso.best_estimator_\r\n alpha_best = cv_lasso.best_params_['alpha']\r\n lamda_best = alpha_best * 2 * dependent_var.shape[0]\r\n print('Max_alpha is : {}'.format(max_alpha))\r\n print_sklearn_results(method=method, intercept=lasso_best.intercept_, coeff=lasso_best.coef_,\r\n explanatory_df=explanatory_var, dependent_df=dependent_var, alpha=alpha_best, lamda=lamda_best)\r\n return cv_lasso\r\n elif method == 'cv_elasticnet':\r\n params = get_options_cv(elasticnet=True)\r\n max_alpha = params['max_lamda'] / (2 * dependent_var.shape[0])\r\n alphas = np.linspace(1e-6, max_alpha, params['n_lamdas'])\r\n max_l1_ratio = params['max_l1_ratio']\r\n l1_ratios = np.linspace(1e-6, max_l1_ratio, params['n_l1-ratio'])\r\n parameters = {'alpha': alphas, 'l1_ratio': l1_ratios}\r\n elasticnet = ElasticNet(fit_intercept=True, random_state=params['randomseed'])\r\n cv_elasticnet = GridSearchCV(elasticnet, parameters, cv=params['k_folds'], refit=True)\r\n cv_elasticnet = cv_elasticnet.fit(X=explanatory_var, y=dependent_var)\r\n elastic_best = cv_elasticnet.best_estimator_\r\n alpha_best = cv_elasticnet.best_params_['alpha']\r\n l1_ratio_best = cv_elasticnet.best_params_['l1_ratio']\r\n lasso_lamda_best = alpha_best * 2 * dependent_var.shape[0] * l1_ratio_best\r\n ridge_lambda_best = alpha_best * dependent_var.shape[0] * (1 - l1_ratio_best)\r\n msg = '''\r\n Best L1 ratio is : {}\r\n Best Lasso_Lambda is : {} \r\n Best Ridge_Lambda is : {}\r\n '''.format(l1_ratio_best, lasso_lamda_best, ridge_lambda_best)\r\n print(msg)\r\n print_sklearn_results(method=method, intercept=elastic_best.intercept_, coeff=elastic_best.coef_,\r\n explanatory_df=explanatory_var, dependent_df=dependent_var, alpha=alpha_best,\r\n lamda=lasso_lamda_best)\r\n return cv_elasticnet\r\n elif method == 'cv_log_regression':\r\n dependent_var = dependent_var['Label'].to_numpy()\r\n scoring = \"roc_auc\"\r\n kfolds = TimeSeriesSplit(n_splits=3)\r\n # Create regularization hyperparameter space - lower values strong regularisation\r\n C = np.reciprocal([0.00000001, 0.00000005, 0.0000001, 0.0000005, 0.000001, 0.000005, 0.00001, 0.00005,\r\n 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100, 500, 1000, 5000])\r\n hyperparameters = dict(C=C)\r\n lr_l1 = LogisticRegression(max_iter=10000, penalty=penalty, solver='saga')\r\n log_regression_l1_best = GridSearchCV(estimator=lr_l1, param_grid=hyperparameters, cv=kfolds, scoring=scoring).fit(X=explanatory_var, y=dependent_var).best_estimator_\r\n return log_regression_l1_best\r\n elif method == 'log_regression':\r\n lr_l1 = LogisticRegression(max_iter=10000, C=C, penalty=penalty, solver='saga').fit(X=explanatory_var, y=dependent_var)\r\n return lr_l1\r\n return None\r\n\r\n\r\ndef print_sklearn_results(method, intercept, coeff, explanatory_df, dependent_df, alpha, lamda):\r\n period = str(explanatory_df.index[0]) + ' - ' + str(explanatory_df.index[-1])\r\n desc = '''\r\n Regression method is {}\r\n Time period is {}\r\n Alpha is {}\r\n Lambda is {}\r\n '''.format(method, period, alpha, lamda)\r\n print(desc)\r\n factor_names = ['intercept'] + list(explanatory_df.columns)\r\n loadings = np.insert(coeff, 0, intercept)\r\n loadings = pd.DataFrame(loadings, index=factor_names, columns=[method]).transpose()\r\n print(loadings)\r\n\r\n\r\ndef tracking_error(act_rets, exp_rets):\r\n act_rets.columns = [0]\r\n err = act_rets - exp_rets\r\n sqderr = (err ** 2).sum()\r\n return np.sqrt(sqderr)\r\n\r\n\r\ndef pf_tracking_error(weights, actual_rets, bm_rets):\r\n exp_rets = pd.DataFrame(data=(weights * bm_rets).sum(axis=1))\r\n return tracking_error(actual_rets, exp_rets)\r\n\r\n\r\ndef style_analyze(dependent_var: pd.DataFrame, explanatory_var: pd.DataFrame, start_period=None, end_period=None,\r\n droprf=False, rfcol='RF'):\r\n if isinstance(dependent_var, pd.Series):\r\n dependent_var = pd.DataFrame(dependent_var)\r\n dependent_var = dependent_var.loc[start_period:end_period]\r\n explanatory_var = explanatory_var.loc[start_period:end_period]\r\n if droprf:\r\n explanatory_var = explanatory_var.drop([rfcol], axis=1)\r\n n_expl_var = explanatory_var.shape[1]\r\n init_guess = np.repeat(1 / n_expl_var, n_expl_var)\r\n bounds = Bounds(lb=0.0, ub=1.0)\r\n wts_sum_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda wts: np.sum(wts) - 1\r\n }\r\n result = minimize(fun=pf_tracking_error,\r\n args=(dependent_var, explanatory_var),\r\n bounds=bounds,\r\n constraints=[wts_sum_to_1],\r\n method='SLSQP',\r\n options={'disp': False},\r\n x0=init_guess)\r\n weights = pd.Series(data=result.x, index=explanatory_var.columns)\r\n return weights\r\n\r\n\r\n# WEIGHTING OPTIMIZERS - BACKTEST WEIGHTING SCHEMES\r\n\r\ndef weight_ew(r, cap_wts=None, max_cw_mult=None, microcap_threshold=None, **kwargs):\r\n n_components = len(r.columns)\r\n weights = np.repeat(1 / n_components, n_components)\r\n weights = pd.Series(weights, index=r.columns)\r\n if cap_wts is not None:\r\n cap_weights = cap_wts.loc[r.index[0]] # Cap wts as at begining of a window\r\n if microcap_threshold is not None and microcap_threshold > 0:\r\n drop_microcap_mask = cap_weights < microcap_threshold\r\n weights[drop_microcap_mask] = 0\r\n weights = weights / weights.sum() # Recomputes weights\r\n if max_cw_mult is not None and max_cw_mult > 0:\r\n weights = np.minimum(weights, cap_weights * max_cw_mult)\r\n weights = weights / weights.sum() # Recomputes weights\r\n return weights\r\n\r\n\r\ndef weight_custom(r, cust_wts):\r\n return cust_wts\r\n\r\n\r\ndef weight_cw(r, cap_wts, **kwargs):\r\n cap_wts = cap_wts.loc[r.index[0]] # Because for a rolling period, i would create PF using 1st available market weights for such window.\r\n return cap_wts\r\n\r\n\r\ndef sample_cov(r, **kwargs):\r\n return r.cov()\r\n\r\n\r\ndef cc_cov(r, **kwargs):\r\n \"\"\"\r\n Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model\r\n Average of sample correlation is used to find const_cov\r\n \"\"\"\r\n sample_corr = r.corr()\r\n n_assets = len(r.columns)\r\n avg_distinct_rho = (sample_corr.values.sum() - n_assets) / (\r\n n_assets * (n_assets - 1)) # Taking avg of off diagonal corr matrix on one side\r\n const_corr = np.full_like(sample_corr, avg_distinct_rho)\r\n np.fill_diagonal(const_corr, 1.)\r\n sd = r.std()\r\n # Convert to cov using statsmodel\r\n const_cov_sm = mh.corr2cov(const_corr, sd)\r\n # Convert to cov using formula and outer product - alternate way is to use sd @ sd.T instead of np.outer(sd, sd) -> yields matrix(mxm)\r\n const_cov = const_corr * np.outer(sd, sd)\r\n return pd.DataFrame(const_cov, columns=r.columns, index=r.columns)\r\n\r\n\r\ndef stat_shrinkage_cov(r, delta=0.5, **kwargs):\r\n s_cov = sample_cov(r, **kwargs)\r\n c_cov = cc_cov(r, **kwargs)\r\n stat_cov = delta * c_cov + (1 - delta) * s_cov\r\n return stat_cov\r\n\r\n\r\ndef weight_gmv(r, cov_estimator=sample_cov, **kwargs):\r\n cov_df = cov_estimator(r, **kwargs)\r\n wts = get_gmv(ret_series=r, cov_df=cov_df, rf=0.03, reqd_strategies=None, onlywts=True)\r\n return wts\r\n\r\n\r\ndef weight_erc(r, cov_estimator=sample_cov, **kwargs):\r\n cov_df = cov_estimator(r, **kwargs)\r\n wts = equal_risk_contrib(cov_df)\r\n return wts\r\n\r\n\r\ndef bt_roll(r, window, weighting_scheme, **kwargs):\r\n total_periods = len(r.index)\r\n windows = [(start, start + window) for start in range(total_periods - window)]\r\n weights = [weighting_scheme(r.iloc[win[0]:win[1]], **kwargs) for win in windows]\r\n # Convert from list of weights to dataframe with sectors along columns and index begining after first rolling period, so that it aligns with returns df\r\n weights = pd.DataFrame(weights, columns=r.columns, index=r.iloc[window:].index)\r\n returns = (weights * r).sum(axis=1, min_count=1)\r\n return returns\r\n\r\n\r\ndef as_colvec(x):\r\n if np.ndim(x) == 2:\r\n return x\r\n else:\r\n return np.expand_dims(x, axis=1)\r\n\r\n\r\ndef rev_opt_implied_returns(delta, sigma_prior: pd.DataFrame, wts_prior: pd.Series):\r\n \"\"\"\r\n Obtain the implied expected returns by reverse engineering the weights\r\n Inputs:\r\n delta: Risk Aversion Coefficient (scalar)\r\n sigma: Variance-Covariance Matrix (N x N) as DataFrame\r\n w: Portfolio weights (N x 1) as Series\r\n Returns an N x 1 vector of Returns as Series\r\n \"\"\"\r\n pi = (delta * sigma_prior @ wts_prior).squeeze() # @ may be used instead of dot, but some issues arise if a df is not passed\r\n pi.name = 'Implied Returns'\r\n return pi\r\n\r\n\r\ndef omega_proportional_prior(sigma_prior: pd.DataFrame, tau, p: pd.DataFrame):\r\n \"\"\"\r\n As we noted previously, \\cite{he1999intuition} suggest that if the investor does not have a specific way to explicitly\r\n quantify the uncertaintly associated with the view in the Ω matrix, one could make the simplifying assumption\r\n that Ω is proportional to the variance of the prior.\r\n\r\n Returns the He-Litterman simplified Omega\r\n Inputs:\r\n sigma: N x N Covariance Matrix as DataFrame\r\n tau: a scalar\r\n p: a K x N DataFrame linking Q and Assets\r\n returns a K x K diagonal DataFrame, a Matrix representing Prior Uncertainties - Omega\r\n \"\"\"\r\n scaled_sigma_prior = (tau * sigma_prior).to_numpy()\r\n helit_omega_matrix_kxk = p.to_numpy() @ scaled_sigma_prior @ p.T.to_numpy()\r\n helit_omega_diag_values = np.diag(helit_omega_matrix_kxk)\r\n # Make a diag matrix from the diag elements of Omega\r\n return pd.DataFrame(np.diag(helit_omega_diag_values), columns=p.index, index=p.index)\r\n\r\n\r\ndef black_litterman(wts_prior: pd.Series, sigma_prior: pd.DataFrame, p: pd.DataFrame, q: pd.Series, omega=None,\r\n delta=2.5, tau=0.02):\r\n \"\"\"\r\n\r\n :param wts_prior: N x 1 col vector\r\n :param sigma_prior: N x N cov matrix\r\n :param p: K x N view portfolio - associating views with assets\r\n :param q: K x 1 col vector representing views\r\n :param omega: Uncertainty around views. If none - omega - proportional prior\r\n :param delta: Risk aversion factor\r\n :param tau: Uncertainty factor scaling sigma_prior\r\n :return: posterior returns and cov based on black litterman formula\r\n \"\"\"\r\n if omega is None:\r\n omega = omega_proportional_prior(sigma_prior, tau, p).to_numpy()\r\n p = p.to_numpy()\r\n q = q.to_numpy()\r\n n_assets = wts_prior.shape[0]\r\n k_views = q.shape[0]\r\n # Get implied pi\r\n pi = rev_opt_implied_returns(delta, sigma_prior, wts_prior).to_numpy()\r\n # Scaled sigma_prior\r\n sigma_prior = sigma_prior.to_numpy()\r\n scaled_sigma_prior = (tau * sigma_prior)\r\n common_factor = scaled_sigma_prior @ p.T @ inv(p @ scaled_sigma_prior @ p.T + omega)\r\n bl_mu = pi + common_factor @ (q - (p @ pi))\r\n bl_sigma = sigma_prior + scaled_sigma_prior - common_factor @ p @ scaled_sigma_prior\r\n return bl_mu, bl_sigma, omega\r\n\r\n\r\ndef get_inverse_df(df: pd.DataFrame):\r\n return pd.DataFrame(inv(df), index=df.columns, columns=df.index)\r\n\r\n\r\ndef w_msr_closed_form(sigma: pd.DataFrame, mu: pd.Series, scale=True):\r\n \"\"\"\r\n\r\n :param sigma: N x N cov mat\r\n :param mu: N x 1 expected return col vector\r\n :param scale: to give % of wt and it assumes all wts are +ve\r\n :return: wts_msr\r\n \"\"\"\r\n wts_msr = inv(sigma) @ mu\r\n if scale:\r\n wts_msr = wts_msr / wts_msr.sum()\r\n return wts_msr\r\n\r\n\r\ndef get_optimal_wts(sigma_prior: pd.DataFrame, bl_sigma: pd.DataFrame, pi: pd.Series, bl_mu: pd.Series, delta, tau, wts_he=True, scale=True):\r\n if wts_he:\r\n prior_wts_equil = ((inv(sigma_prior) @ pi) / delta) / (1 + tau)\r\n posterior_wts_optimal = (inv(bl_sigma) @ bl_mu) / delta\r\n else:\r\n prior_wts_equil = ((inv(sigma_prior) @ pi) / delta)\r\n posterior_wts_optimal = (inv(bl_sigma) @ bl_mu)\r\n if scale:\r\n posterior_wts_optimal = posterior_wts_optimal / posterior_wts_optimal.sum()\r\n wts_diff = posterior_wts_optimal - prior_wts_equil\r\n return prior_wts_equil, posterior_wts_optimal, wts_diff\r\n\r\n\r\ndef get_risk_contrib(weights, cov):\r\n marginal_contrib = cov @ weights\r\n pf_var = get_pf_vol(weights, cov) ** 2\r\n indiv_risk_contrib = (marginal_contrib * weights) / pf_var\r\n return indiv_risk_contrib\r\n\r\n\r\ndef target_risk_contrib(target_risk, cov):\r\n n_assets = cov.shape[0]\r\n init_guess = np.repeat(1/n_assets, n_assets)\r\n bounds = Bounds(lb=0.0, ub=1.0)\r\n wts_sum_to_1 = {\r\n 'type': 'eq',\r\n 'fun': lambda wts: np.sum(wts) - 1\r\n }\r\n\r\n def min_sq_deviation(weights, cov, target_risk):\r\n indiv_risk_contrib = get_risk_contrib(weights, cov)\r\n err = indiv_risk_contrib - target_risk\r\n return err.T @ err\r\n\r\n results = minimize(fun=min_sq_deviation,\r\n args=(cov, target_risk),\r\n x0=init_guess,\r\n bounds=bounds,\r\n method='SLSQP',\r\n options={'disp':False},\r\n constraints=[wts_sum_to_1])\r\n weights = results.x\r\n return weights\r\n\r\n\r\ndef equal_risk_contrib(cov):\r\n n_assets = cov.shape[0]\r\n target_risk = np.repeat(1/n_assets, n_assets)\r\n weights = target_risk_contrib(target_risk, cov)\r\n return weights\r\n\r\n\r\ndef get_wealth_index_risk(btr: dict):\r\n btr_df = pd.DataFrame(btr)\r\n btr_df.dropna(inplace=True)\r\n btr_wealth_index = pd.DataFrame({key+'_wealth_index': drawdown(btr[key], retrive_index=True, is1p=False) for key in btr_df.columns})\r\n wealth_data_plots=[go.Scatter(x=btr_wealth_index.index.to_timestamp(),\r\n y=btr_wealth_index[str],\r\n name=str) for str in btr_wealth_index.columns]\r\n return wealth_data_plots, risk_info(btr_df)\r\n\r\n\r\ndef split_regime_returns(wealth_data, reqd_assets, regime_select):\r\n wealth_data[reqd_assets] = wealth_data[reqd_assets].pct_change()\r\n wealth_data.dropna(how='any', inplace=True, axis=0)\r\n asset_returns = wealth_data[reqd_assets]\r\n growth_regime_mask = wealth_data[regime_select] == 1\r\n crash_regime_mask = wealth_data[regime_select] == -1\r\n asset_returns_growth_regime, asset_returns_crash_regime = wealth_data.loc[growth_regime_mask, reqd_assets], wealth_data.loc[crash_regime_mask, reqd_assets]\r\n return asset_returns, asset_returns_growth_regime, asset_returns_crash_regime\r\n\r\n\r\ndef QQ_plot(rets_data, reqd_assets:list):\r\n qq_scatter_data = [go.Scatter(x=qqplot(rets_data[asset], line='s').gca().lines[0].get_xdata(),\r\n y=qqplot(rets_data[asset], line='s').gca().lines[0].get_ydata(),\r\n mode='markers',\r\n name=asset) for asset in reqd_assets]\r\n qq_scatter_opt_data = [go.Scatter(x=qqplot(rets_data[asset], line='s').gca().lines[1].get_xdata(),\r\n y=qqplot(rets_data[asset], line='s').gca().lines[1].get_ydata(),\r\n mode='lines',\r\n name=asset) for asset in reqd_assets]\r\n qq_scatter_data.extend(qq_scatter_opt_data)\r\n qq_layout = go.Layout(title='QQ-Plots',\r\n xaxis=dict(title='Idealised Quantiles'),\r\n yaxis=dict(title='Actual Quantiles'),\r\n showlegend=True)\r\n fig = go.Figure(data=qq_scatter_data, layout=qq_layout)\r\n # fig.add_trace(data=qq_scatter_opt_data)\r\n return fig\r\n\r\n\r\ndef ecdf_plot(rets_data, reqd_assets:list):\r\n ecdf_data = [go.Scatter(x=ECDF(rets_data[asset]).x,\r\n y=ECDF(rets_data[asset]).y,\r\n mode='lines',\r\n name=asset) for asset in reqd_assets]\r\n ecdf_layout = go.Layout(title='ECDF Plot')\r\n fig = go.Figure(ecdf_data, ecdf_layout)\r\n return fig\r\n\r\n\r\ndef trend_filter(rets_data, lambda_value):\r\n \"\"\"\r\n Strips and returns the drift term for identification of regime changes using ML algo - refer coursera notebook\r\n :param rets_data:\r\n :param lambda_value:\r\n :return:\r\n \"\"\"\r\n #USING CVXPY convex optimiser\r\n n_periods = rets_data.shape[0]\r\n rets = rets_data.to_numpy()\r\n\r\n D_full = np.diag([1]*n_periods) - np.diag([1]*(n_periods-1), 1)\r\n D = D_full[0:n_periods-1,]\r\n beta = cp.Variable(n_periods)\r\n lambd = cp.Parameter(nonneg=True)\r\n lambd.value = lambda_value\r\n\r\n def lasso_min(betas, rets, lambd):\r\n return cp.norm(rets-betas, 2)**2 + lambd*cp.norm(cp.matmul(D, betas), 1)\r\n\r\n problem = cp.Problem(cp.Minimize(lasso_min(beta, rets, lambd)))\r\n problem.solve()\r\n\r\n # NOT WORKING\r\n # n_periods = rets_data.shape[0]\r\n # D_full = np.diag([1] * n_periods) - np.diag([1] * (n_periods - 1), 1)\r\n # D = D_full[0:n_periods - 1, ]\r\n # def lasso_min(betas, rets, D, lambda_value):\r\n # return np.linalg.norm(rets-betas)**2 + lambda_value*np.linalg.norm(D@betas,1)\r\n #\r\n # init_guess = np.repeat(1/n_periods, n_periods)\r\n # bounds = Bounds(lb=0.0, ub=1.0)\r\n # results = minimize(fun=lasso_min,\r\n # args=(rets_data, D, lambda_value),\r\n # x0=init_guess,\r\n # bounds=bounds,\r\n # method='SLSQP',\r\n # options={'disp':False})\r\n # betas = pd.Series(results.x, index=rets_data.index)\r\n # return betas\r\n betas = pd.DataFrame(beta.value, index=rets_data.index.to_timestamp(), columns=['drift'])\r\n return betas\r\n\r\n\r\ndef get_regime_switches(betas:pd.DataFrame, threshold_value=1e-5):\r\n betas['crash_regime'] = False\r\n betas['crash_regime'] = betas['drift'] < threshold_value\r\n switches = betas[betas['crash_regime'].diff().fillna(False)]\r\n betas_switch = betas.loc[switches.index]\r\n crash_periods_start = list(betas_switch[betas_switch['crash_regime']].index)\r\n crash_periods_end = list(betas_switch[betas_switch['crash_regime'] == False].index)\r\n coordinate_list = list(zip(crash_periods_start, crash_periods_end))\r\n return coordinate_list\r\n\r\n\r\ndef trend_filter_plot(rets_data, lambda_value, threshold_value):\r\n betas = trend_filter(rets_data, lambda_value)\r\n regime_plot_data = [go.Scatter(x=betas.index,\r\n y=rets_data,\r\n mode='lines',\r\n name='Actual TS data'),\r\n go.Scatter(x=betas.index,\r\n y=betas['drift'],\r\n mode='lines',\r\n name='Drift term')]\r\n fig = go.Figure(regime_plot_data, go.Layout(title='Trend Filter Plot'))\r\n regimes = get_regime_switches(betas, threshold_value)\r\n for coordinates in regimes:\r\n rect_shape = dict(type='rect', y0=0, y1=1, x0=coordinates[0], x1=coordinates[1],\r\n yref='paper', xref='x1', fillcolor='rgba(255,24,86,0.5)')\r\n fig.add_shape(rect_shape)\r\n return fig\r\n\r\n\r\ndef transition_matrix(regime: pd.Series):\r\n n_unique = regime.value_counts()\r\n n_unique.index = ['normal', 'crash']\r\n switches = regime.diff().fillna(0.0)\r\n n_switches = switches.value_counts()\r\n n_switches.index = ['no_switch', 'cr_gr_switch', 'gr_cr_switch']\r\n p_matrix = pd.DataFrame({\r\n 'normal': [(n_unique['normal'] - n_switches['gr_cr_switch']), n_switches['gr_cr_switch']] / n_unique['normal'],\r\n 'crash': [n_switches['cr_gr_switch'], (n_unique['crash'] - n_switches['cr_gr_switch'])] / n_unique['crash']\r\n }, index=['normal', 'crash']).T\r\n return p_matrix\r\n\r\n\r\ndef check_ranks(coeff_matrix, b):\r\n aug_matrix = np.append(coeff_matrix, b.reshape(1, len(b)).T, axis=1)\r\n rank_coeff_matrix = np.linalg.matrix_rank(coeff_matrix)\r\n rank_aug_matrix = np.linalg.matrix_rank(aug_matrix)\r\n if rank_aug_matrix == rank_coeff_matrix:\r\n print('Unique stationary pi exist')\r\n else:\r\n print('Do Markov Simulation')\r\n return None\r\n\r\n\r\ndef get_markov_stationary_distr(p_matrix: pd.DataFrame):\r\n \"\"\"\r\n Please refer https://towardsdatascience.com/markov-chain-analysis-and-simulation-using-python-4507cee0b06e\r\n Also see notes\r\n A.pi = b\r\n So, (AT.A).pi = (AT.b) # Visualise as reverse transforming b vector to land at pi\r\n Needed to implement markov simulation\r\n \"\"\"\r\n n_states = p_matrix.shape[0]\r\n i = np.repeat(1, n_states)\r\n I = np.identity(n_states)\r\n P = p_matrix.to_numpy()\r\n A = np.append((P.T - I), [i], axis=0)\r\n b = np.repeat(0, n_states)\r\n b = np.append(b, 1)\r\n check_ranks(A, b)\r\n pi = pd.Series(np.linalg.solve(A.T @ A, A.T @ b), index=p_matrix.columns)\r\n return pi\r\n\r\n\r\ndef get_multivariate_sim_scenario_rets(asset_data, n_years, n_scenarios, regime_col='Regime-5'):\r\n \"\"\"\r\n :return: 3 dimensional matrix asset returns simulated for time steps for each scenario scenario wise\r\n \"\"\"\r\n np.random.seed(7)\r\n rets, ret_gr, ret_cr = split_regime_returns(asset_data, config.asset_categories, regime_col) # mNot annualised\r\n regime = asset_data[regime_col]\r\n p_matrix = transition_matrix(regime)\r\n pi = get_markov_stationary_distr(p_matrix)\r\n time_steps = n_years * 12\r\n mvn_distr_gr = np.random.multivariate_normal(get_ann_return(ret_gr, periodicity=1, expm1=False),\r\n get_cov(ret_gr, periodicity=1), (n_scenarios, time_steps))\r\n mvn_distr_cr = np.random.multivariate_normal(get_ann_return(ret_cr, periodicity=1, expm1=False),\r\n get_cov(ret_cr, periodicity=1), (n_scenarios, time_steps))\r\n mvn_mixed = mvn_distr_gr * pi['normal'] + mvn_distr_cr * pi['crash'] # Long term stationary probability distribution representing fraction of time spent in each state\r\n # mvn_mixed = mvn_mixed.reshape(len(rets.columns), time_steps, n_scenarios)\r\n return mvn_mixed\r\n\r\n\r\ndef fix_mix(orig_wts, mvn_asset_rets, spending_rate, rebal_freq=None):\r\n \"\"\"\r\n rebal_freq if none implies that at first time step, wt scheme which was used to form portfolio is allowed to run its course throughout\r\n rebal freq if in month (say 3) means at end of each 3rd month, wt scheme is reset to original wt scheme\r\n \"\"\"\r\n n_scenarios, time_steps, n_assets = mvn_asset_rets.shape\r\n wealth_index = np.zeros((int(time_steps/12), n_scenarios))\r\n for scenario in range(n_scenarios):\r\n asset_rets = mvn_asset_rets[scenario]\r\n cum_pf_rets_component_wise = orig_wts # Initial weight adopted for first time step\r\n if rebal_freq is None:\r\n for period in range(time_steps):\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * asset_rets[period]\r\n if period % 12 == 0:\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * (1-spending_rate)\r\n wealth_index[int(period/12), scenario] = np.sum(cum_pf_rets_component_wise)\r\n else:\r\n for period in range(time_steps):\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * asset_rets[period]\r\n if period % rebal_freq == 0:\r\n cum_pf_rets_component_wise = np.sum(\r\n cum_pf_rets_component_wise) * orig_wts # Rebalnce occurs at the end of the period\r\n if period % 12 == 0:\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * (1 - spending_rate)\r\n wealth_index[int(period / 12), scenario] = np.sum(cum_pf_rets_component_wise)\r\n return wealth_index\r\n\r\n\r\ndef build_pf_ret(mvn_asset_rets, orig_wts, allocator=fix_mix, spending_rate=0.03, rebal_freq=None):\r\n return allocator(orig_wts, mvn_asset_rets, spending_rate, rebal_freq)\r\n\r\n\r\ndef retrieve_stationary_time_series(features_df: pd.DataFrame, threshold=0.1):\r\n \"\"\"\r\n #Refer https://machinelearningmastery.com/time-series-data-stationary-python/\r\n # Check stationarity in time series data\r\n # We will perform adfuller test to check unit roots 3 times.\r\n # First time for non-stationary series we will take first order difference\r\n # Second time we will take second order difference\r\n # Third time if there are still remaining non-stationary columns we will drop them from feature set\r\n \"\"\"\r\n def check_ad_fuller(col):\r\n result = adfuller(col)\r\n p_value = result[1]\r\n return p_value\r\n\r\n non_stationary_cols = []\r\n\r\n for order in range(3):\r\n for col in features_df.columns:\r\n p_value = check_ad_fuller(features_df[col])\r\n if p_value > threshold: #failing to reject h0 and thus original time series has unit root and is thus non stationary\r\n if order == 2:\r\n non_stationary_cols.append(col)\r\n features_df.drop(non_stationary_cols, axis=1)\r\n else:\r\n features_df[col] = features_df[col].diff() #Taking differences to again check for stationarity\r\n features_df.dropna(axis=0, inplace=True)\r\n return features_df\r\n\r\n\r\ndef standardise_data(stationary_df:pd.DataFrame):\r\n \"\"\"\r\n Does standardisation\r\n :param feature_df:\r\n :return: standardised dataframe\r\n \"\"\"\r\n scalar = StandardScaler()\r\n scalar.fit(stationary_df)\r\n standardised_feature_df = pd.DataFrame(scalar.transform(stationary_df), columns=stationary_df.columns, index=stationary_df.index)\r\n return standardised_feature_df\r\n\r\n\r\ndef get_selected_features(dataset_features, model):\r\n \"\"\"\r\n Selects those statistically significant features and transforms the original dataset to selected features. Mainly DIMENSIONALITY REDUCTION OF DATASET\r\n \"\"\"\r\n model = SelectFromModel(model, prefit=True)\r\n feature_bool_mask = model.get_support()\r\n selected_features = dataset_features.columns[feature_bool_mask]\r\n transformed_dataset = pd.DataFrame(model.transform(dataset_features), columns=dataset_features.columns[feature_bool_mask], index=dataset_features.index)\r\n return selected_features, transformed_dataset\r\n","sub_path":"sps_finance_toolkit.py","file_name":"sps_finance_toolkit.py","file_ext":"py","file_size_in_byte":87290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"433009388","text":"#!/usr/bin/env python\nfrom lugref import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# See Table I from the paper\nsigma_0 = 1e5\nsigma_1 = math.sqrt(1e5)\nsigma_2 = 0.4\nFc = 1\nFs = 1.5\nvs = 0.001\nts = 1e-6\n\n\"\"\"\nPlot 1\n\"\"\"\ntime_span = 0.1\nt = np.linspace(0,time_span,time_span/ts)\n\n# Velocity range\nv = np.linspace(-0.005,0.005,100)\n\nF = [] \nFss = []\n\nfor i in range(0,len(v)):\n z = 0.0\n for j in range(0,len(t)):\n fj, z = lugref(z, v[i], Fc, Fs, vs, sigma_0, sigma_1, sigma_2, ts)\n F.append(fj)\n Fss.append(F[-1])\n\nplt.plot(v, Fss)\nplt.grid()\nplt.xlabel('Velocity (m/s)')\nplt.ylabel('Friction force (N)')\nplt.title('Friction force at steady state condition')\nplt.show()\n\n\"\"\"\nPlot 2\n\"\"\"\n\n# Zoom into certain velocity to see its transient behaviour\nF = []\nv = 0.002\nz = 0\nfor j in range(0,len(t)):\n fj, z = lugref(z, v, Fc, Fs, vs, sigma_0, sigma_1, sigma_2, ts)\n F.append(fj)\n\nplt.figure()\nplt.plot(t, F)\nplt.grid()\nplt.xlabel('Time (s)')\nplt.ylabel('Friction force (N)')\nplt.title('Friction force for v = 0.002')\nplt.show()\n\n\"\"\"\nPlot 3\n\"\"\"\n\n# Apply sinusoidal velocity and measure the friction force (Fig. 3 of the paper)\nplt.figure()\nplt.hold(True)\ncolor = ['r','g','b']\n\nF_omega = []\nv = []\nt = np.linspace(0,10.0,10.0/ts)\nomega = [1,10,25]\nlegend = ['1 rad/s', '10 rad/s', '25 rad/s']\n\nfor i in range(0,len(omega)):\n F_omega.append([])\n z = 0\n v = [0.001 * (math.sin(omega[i]*x)+1.5) for x in t]\n for j in range(0,len(t)):\n fj, z = lugref(z, v[j], Fc, Fs, vs, sigma_0, sigma_1, sigma_2, ts)\n F_omega[i].append(fj)\n\n # Start from t = 3 up to the end\n plt.plot(v[int(3.0/ts):],F_omega[i][int(3.0/ts):], color[i],label=legend[i])\n\nplt.grid()\nplt.xlabel('Velocity (m/s)')\nplt.ylabel('Friction force (N)')\nplt.title('Hysteresis in friction with varying velocity')\nplt.show()\n","sub_path":"python/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"463955003","text":"#!/usr/bin/python\n\nfrom string import maketrans\n\nclass TranslateChars(object):\n\n def __init__(self, challenge_string):\n self.challenge_string = challenge_string\n self.orig_chars = 'abcdefghijklmnopqrstuvwxyz'\n self.new_chars = 'cdefghijklmnopqrstuvwxyzab'\n\n def translate_chars(self):\n trans_chars = maketrans(self.orig_chars, self.new_chars)\n newstring = self.challenge_string.translate(trans_chars)\n print(newstring)\n\n\nif __name__ == \"__main__\":\n challenge_1 = TranslateChars(\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\")\n challenge_1.translate_chars()\n \n \n\n","sub_path":"1_ascii_translate.py","file_name":"1_ascii_translate.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"209454253","text":"import RPi.GPIO as GPIO\nimport time\n\npin_pwm = 18\nfrequency = 50\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(pin_pwm, GPIO.OUT)\np = GPIO.PWM(pin_pwm, frequency)\np.start(0)\n\ntry:\n dc = 0\n min_dc = 2.2\n max_dc = 11.5\n change_step = 10\n change = float((max_dc-min_dc)/change_step)\n toggle = 1\n delay = 0.4\n dc = min_dc\n\n while True:\n p.ChangeDutyCycle(dc)\n\n #show curent angle\n print('angle: %d '%((dc-min_dc)*180/(max_dc-min_dc)-90))#,end='\\r')\n time.sleep(delay)\n\n #to make servo moves like windscreen wipe\n if dc + change <= max_dc and toggle:\n dc += change\n \n elif dc-change >= min_dc:\n dc -= change\n toggle = 0\n else:\n dc += change\n toggle = 1\nexcept:\n GPIO.cleanup()\n","sub_path":"svmotor2.py","file_name":"svmotor2.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551043000","text":"def scale(strng, k, n):\r\n if not strng:\r\n return ''\r\n ret = []\r\n for line in strng.split('\\n'):\r\n temp = ''\r\n for ch in line:\r\n temp += ''.join(ch for _ in range(k))\r\n for _ in range(n):\r\n ret.append(temp)\r\n return '\\n'.join(ret)","sub_path":"7-kyu/scaling-squared-strings/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"263651966","text":"import scramble\nimport movesticker\nimport tkinter as tk\nimport csv\nimport os\nfrom datetime import datetime\nimport time\n\n#基本視窗\nwin = tk.Tk()\nwin.title('Random Scramble Generator')\nwin.geometry('1440x900')\nwin.config(background = '#323232')\n\ntime_list = []\nsc_list = [] \n\n\n#顯示打亂圖形\nlabelList = []\n\ndef draw_scramble():\n\n stikers = movesticker.move_as_scramble(sc)\n \n for k in range(6):\n for i in range(3):\n for j in range(3):\n xstart = 900\n ystart = 625\n num = 9 * k + 3 * i + j + 1\n var = 'L' + str(num)\n var = tk.Label(text=' ',bg=stikers[k][i*3+j])\n if k == 0:\n var.place(x=(j+3)*26+4+xstart,y=i*25+ystart)\n elif k <= 4:\n var.place(x=(j+3*(k-1))*26+4*(k-1)+xstart,y=(i+3)*25+4+ystart)\n elif k == 5:\n var.place(x=(j+3)*26+4+xstart,y=(i+6)*25+4*2+ystart)\n labelList.append(var)\n \n#隱藏打亂圖形\ndef hide_draw_scramble(): \n print('111')\n print(len(labelList))\n for i in range(0,len(labelList)):\n labelList[i].place_forget()\n\n\nsc = scramble.scramble3() #一打開就產生第一組sc\ndraw_scramble() #一打開就顯示第一次打亂圖形\n\n\n\n#產生新打亂\ndef gen_sc():\n global sc\n sc = scramble.scramble3() #從scramble程式產生sc,sc為一陣列,如['R',\"U'\",'L',...]\n sc_btn.config(text = sc) #將sc_btn的文字設定為sc\n draw_scramble() #更新打亂圖形\n\n\n#設置物件\nsc_btn = tk.Button(text = sc , fg = 'white', bg = '#323232',font = 'Arial 30' ) #顯示打亂,按下則產生新打亂\nsc_btn.pack(anchor='n')\n\nsolve_show = tk.Label(text = 'solve: 0', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示為第幾次復原\nsolve_show.place(x=30, y=200)\n\nmo3_show = tk.Label(text = 'mo3 ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示mo3\nmo3_show.place(x=30, y=240)\n\nao5_show = tk.Label(text = 'ao5 ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示ao5\nao5_show.place(x=30, y=280)\n\nao12_show = tk.Label(text = 'ao12 ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示ao12\nao12_show.place(x=30, y=320)\n\nmean_show = tk.Label(text = 'mean ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示mean\nmean_show.place(x=30, y=360)\n\ntime_1_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30') #顯示上一次時間,按下則print出該次sc\ntime_1_show.place(x=30, y=410)\n\ntime_2_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30') #顯示上上次時間,按下則print出該次sc\ntime_2_show.place(x=30, y=490)\n\ntime_3_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30')\ntime_3_show.place(x=30, y=570)\n\ntime_4_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30')\ntime_4_show.place(x=30, y=650) \n\ntime_5_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30')\ntime_5_show.place(x=30, y=730)\n\nplus_two_btn = tk.Button(text = '+2', fg = 'white', bg = '#323232',font = 'Arial 20') #計時完按下則該次時間+2,再按一下取消\nplus_two_btn.place(x=110, y=810)\n\ndnf_btn = tk.Button(text = 'DNF', fg = 'white', bg = '#323232',font = 'Arial 20') #計時完按下則該次為DNF,再按一下取消\ndnf_btn.place(x=30, y=810)\n\ndef hide_object():\n sc_btn.pack_forget() #隱藏sc\n solve_show.place_forget() #隱藏第幾次復原\n mo3_show.place_forget() #隱藏mo3\n ao5_show.place_forget() #隱藏ao5 \n ao12_show.place_forget() #隱藏ao12 \n mean_show.place_forget() #隱藏mean \n time_1_show.place_forget() #隱藏過去五次成績\n time_2_show.place_forget()\n time_3_show.place_forget()\n time_4_show.place_forget() \n time_5_show.place_forget()\n plus_two_btn.place_forget() #隱藏+2按鈕\n dnf_btn.place_forget() #隱藏DNF按鈕\n export.place_forget() #隱藏export按鈕\n hide_draw_scramble() #隱藏打亂圖形\n\ndef show_object():\n sc_btn.pack(anchor='n') #顯示sc\n solve_show.place(x=30, y=200) #顯示第幾次復原\n mo3_show.place(x=30, y=240) #顯示mo3\n ao5_show.place(x=30, y=280) #顯示ao5 \n ao12_show.place(x=30, y=320) #顯示ao12 \n mean_show.place(x=30, y=360) #顯示mean\n time_1_show.place(x=30, y=410) #顯示過去五次成績\n time_2_show.place(x=30, y=490)\n time_3_show.place(x=30, y=570)\n time_4_show.place(x=30, y=650) \n time_5_show.place(x=30, y=730)\n plus_two_btn.place(x=110, y=810) #顯示+2按鈕\n dnf_btn.place(x=30, y=810) #顯示DNF按鈕\n export.place(x=165,y=810) #顯示export按鈕\n\n\n\n#碼表 \nt_ms = 0 #運行時間\nt_s = 0\nt_m = 0\n\ntime_show = tk.Label(text = '%02d.%02d' % (t_s, t_ms),fg='white',bg = '#323232',font = 'Arial 120') #一打開就顯示時間\ntime_show.place(x=480,y=350)\n\n#開始計時\nrun = 2 #計算mean時避免/0\nafter_cancel = None\npress_plus2 = 0\npress_dnf = 0\ntime_start = None\n\ndef space_trigger(x):\n global run, t_m,t_ms,t_s ,press_plus2 ,press_dnf,time_start\n press_plus2 = press_dnf = 0\n if run %2 == 0: #計時開始&歸零\n \n t_ms = 0\n t_s = 0\n t_m = 0\n run += 1\n time_start = time.time() #記錄開始時間\n timer_start() #觸發碼表\n hide_object() #隱藏物件\n\n else: #計時結束\n\n time_list.append(t_ms)\n sc_list.append(sc)\n calculate_ao5(int(run/2))\n calculate_mo3(int(run/2))\n calculate_ao12(int(run/2))\n calculate_mean(int(run/2))\n time_list_show(int(run/2))\n solve_show.config(text = 'solve: ' + str(int(run/2)))\n\n gen_sc()\n\n run += 1\n timer_stop()\n\n show_object() #顯示物件\n\ndef timer_start(): #碼表開始運行\n global t_ms , after_cancel \n time_now = time.time() #當下時間\n t_ms = (time_now - time_start)*1000 #開始時間 - 當下時間 = 經過秒數\n time_show.config(text = time_translation(t_ms, t_s, t_m)) #改成用time_translate \n after_cancel = win.after(1,timer_start) #每個1ms呼叫自身\n \ndef timer_stop(): #碼表停止\n global after_cancel\n win.after_cancel(after_cancel)\n after_cancel = None \n\ndef time_translation(ms, s, m): #將ms轉換成m,s,ms\n while ms >= 1000:\n s = s + 1\n ms -= 1000\n while s >= 60:\n m = m + 1\n s -= 60 \n if m > 0 :\n return str(m) + ':' + \"%02d\" % s + '.' + \"%03d\" % ms \n else:\n return str(s) + '.' + \"%03d\" % ms\n\ndef calculate_ao5(n):\n if n >= 5:\n ao5 = int((sum(time_list[n-5:n]) - max(time_list[n-5:n]) - min(time_list[n-5:n])) / 3)\n if ao5 < 0 :\n ao5_show.config(text = 'ao5: DNF' )\n else:\n ao5_show.config(text = 'ao5: ' + str(time_translation(ao5,0,0)) )\n\ndef calculate_mo3(n):\n if n >= 3:\n mo3 = int(sum(time_list[n-3:n]) / 3)\n if mo3 < 0 :\n mo3_show.config(text = 'mo3: DNF' )\n else:\n mo3_show.config(text = 'mo3: ' + str(time_translation(mo3,0,0)) )\n\ndef calculate_ao12(n):\n if n >= 12:\n ao12 = int((sum(time_list[n-12:n]) - max(time_list[n-12:n]) - min(time_list[n-12:n])) / 10)\n if ao12 < 0 :\n ao12_show.config(text = 'ao12: DNF' )\n else:\n ao12_show.config(text = 'ao12: ' + str(time_translation(ao12,0,0)) )\n\n\ndef calculate_mean(n):\n mean_list = []\n for i in range(0 , n):\n if time_list[i] >= 0 :\n mean_list.append(time_list[i])\n if len(mean_list) == 0 :\n mean_show.config(text = 'mean: DNF')\n else: \n mean = int(sum(mean_list[0:i+1]) / len(mean_list))\n mean_show.config(text = 'mean: ' + str(time_translation(mean,0,0)) )\n\ndef time_list_show(n):\n if n >= 5:\n if time_list[n-5] < 0 :\n time_5_show.config(text = 'DNF')\n else: time_5_show.config(text = time_translation(time_list[n-5],0,0))\n if n >= 4:\n if time_list[n-4] < 0 :\n time_4_show.config(text = 'DNF')\n else: time_4_show.config(text = time_translation(time_list[n-4],0,0))\n if n >= 3:\n if time_list[n-3] < 0 :\n time_3_show.config(text = 'DNF')\n else: time_3_show.config(text = time_translation(time_list[n-3],0,0))\n if n >= 2:\n if time_list[n-2] < 0 :\n time_2_show.config(text = 'DNF')\n else: time_2_show.config(text = time_translation(time_list[n-2],0,0))\n if n >= 1:\n if time_list[n-1] < 0 :\n time_1_show.config(text = 'DNF')\n else: time_1_show.config(text = time_translation(time_list[n-1],0,0))\n\n\ndef show_sc(x): #按下按鈕顯示打亂步驟 x = 第幾個\n n = int(run/2)-1\n print(time_translation(time_list[n-x],0,0),end=' ')\n for i in range(len(sc_list[n-x])):\n print(sc_list[n-x][i] ,end=' ')\n print()\n\n\ndef plus_two():\n global press_plus2\n press_plus2 += 1\n n = int(run/2)-1\n if press_plus2 % 2 == 1 :\n time_list[n-1] += 2000\n else:\n time_list[n-1] -= 2000\n calculate_ao5(n)\n calculate_ao12(n)\n calculate_mean(n)\n calculate_mo3(n)\n time_list_show(n)\n\ndef dnf():\n global press_dnf\n press_dnf += 1 \n n = int(run/2)-1\n if press_dnf % 2 == 1 :\n time_list[n-1] -= 10000000\n else:\n time_list[n-1] += 10000000\n calculate_ao5(n)\n calculate_ao12(n)\n calculate_mean(n)\n calculate_mo3(n)\n time_list_show(n)\n\n#偵測開始、結束計時\nwin.bind('', space_trigger)\n\n#按鈕執行函式\nsc_btn.config(command = gen_sc)\ntime_1_show.config(command = lambda: show_sc(1)) #預設tkinter Button控制的函數不可有參數\ntime_2_show.config(command = lambda: show_sc(2)) #若需參數要在函數前加 lambda:\ntime_3_show.config(command = lambda: show_sc(3))\ntime_4_show.config(command = lambda: show_sc(4))\ntime_5_show.config(command = lambda: show_sc(5))\nplus_two_btn.config(command = plus_two)\ndnf_btn.config(command = dnf)\n\n#輸出資料\ndef export_csv():\n #開啟輸出的 CSV 檔案\n datetime_dt = datetime.today()\n file_name = str(os.getcwd())+'//Downloads//TimeOutput_' + str(datetime_dt.strftime(\"%Y_%m_%d_%H_%M_%S\")) + '.csv' \n with open(file_name, 'w', newline='') as csvFile:\n # 建立 CSV 檔寫入器\n writer = csv.writer(csvFile)\n #標題\n writer.writerow(['No.','Time','Scramble'])\n \n for i in range(0,len(time_list)):\n t = time_translation(time_list[i],0,0)\n writer.writerow([str(i+1),t,sc_list[i]])\n\nexport = tk.Button(text = 'Export CSV' , fg = 'white', bg = '#323232',font = 'Arial 20')\nexport.config(command = export_csv)\nexport.place(x=165,y=810)\n \n#常駐主視窗\nwin.attributes('-topmost', True)\nwin.mainloop()","sub_path":"Rubiks-Cube-Timer_Windows10.py","file_name":"Rubiks-Cube-Timer_Windows10.py","file_ext":"py","file_size_in_byte":11020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"202695240","text":"from DAO import CommentDAO,MoreDAO,DiaryDealDAO,QunDAO,UserDAO,UserDealDAO,MoreDAO,DiaryDAO\nfrom app1.models import Comment,Diary,More,Userdeal,Diarydeal\nfrom app1.AllBack import PageForId\nfrom app1.util import ResultCode,TimeUtil,fun\nfrom app1.AllBack import Chatuser_back\nimport json\n\n#添加日志处理\n\ndef adddiaryword(request):\n result={}\n if request.method == \"POST\":\n diaryid = request.POST.get(\"diaryid\", None) # 读取post数据,None为默认值\n qunid = request.POST.get(\"qunid\", None) # 读取post数据,None为默认值\n state = request.POST.get(\"state\", None) # 读取post数据,None为默认值\n dealtype = request.POST.get(\"dealtype\", None) # 读取post数据,None为默认值\n userid = request.POST.get(\"userid\", None) # 读取post数据,None为默认值\n parameter1 = request.POST.get(\"parameter1\", None) # 读取post数据,None为默认值\n parameter2 = request.POST.get(\"parameter2\", None) # 读取post数据,None为默认值\n parameter3 = request.POST.get(\"parameter3\", None) # 读取post数据,None为默认值\n parameter4 = request.POST.get(\"parameter4\", None) # 读取post数据,None为默认值\n parameter5 = request.POST.get(\"parameter5\", None) # 读取post数据,None为默认值\n parameter6 = request.POST.get(\"parameter6\", None) # 读取post数据,None为默认值\n parameter7 = request.POST.get(\"parameter7\", None) # 读取post数据,None为默认值\n parameter8 = request.POST.get(\"parameter8\", None) # 读取post数据,None为默认值\n parameter9 = request.POST.get(\"parameter9\", None) # 读取post数据,None为默认值\n parameter10 = request.POST.get(\"parameter10\", None) # 读取post数据,None为默认值\n\n if request.method == \"GET\":\n diaryid = request.GET.get(\"diaryid\", None) # 读取get数据,None为默认值\n qunid = request.GET.get(\"qunid\", None) # 读取get数据,None为默认值\n state = request.GET.get(\"state\", None) # 读取get数据,None为默认值\n dealtype = request.GET.get(\"dealtype\", None) # 读取get数据,None为默认值\n userid = request.GET.get(\"userid\", None) # 读取get数据,None为默认值\n parameter1 = request.GET.get(\"parameter1\", None) # 读取get数据,None为默认值\n parameter2 = request.GET.get(\"parameter2\", None) # 读取get数据,None为默认值\n parameter3 = request.GET.get(\"parameter3\", None) # 读取get数据,None为默认值\n parameter4 = request.GET.get(\"parameter4\", None) # 读取get数据,None为默认值\n parameter5 = request.GET.get(\"parameter5\", None) # 读取get数据,None为默认值\n parameter6 = request.GET.get(\"parameter6\", None) # 读取get数据,None为默认值\n parameter7 = request.GET.get(\"parameter7\", None) # 读取get数据,None为默认值\n parameter8 = request.GET.get(\"parameter8\", None) # 读取get数据,None为默认值\n parameter9 = request.GET.get(\"parameter9\", None) # 读取get数据,None为默认值\n parameter10 = request.GET.get(\"parameter10\", None) # 读取get数据,None为默认值\n\n print(\"增加日志处理接口参数:用户id:\" + userid + \"群id:\" + qunid)\n\n try:\n dealdiary=Diarydeal()\n dealdiary.diaryid=diaryid\n dealdiary.qunid=qunid\n dealdiary.userid=userid\n dealdiary.state=state\n dealdiary.dealtype=dealtype\n dealdiary.date=TimeUtil.getCurrentDate()\n dealdiary.time=TimeUtil.getCurrentTime()\n dealdiary.parameter1=parameter1\n dealdiary.parameter2=parameter2\n dealdiary.parameter3=parameter3\n dealdiary.parameter4=parameter4\n dealdiary.parameter5=parameter5\n dealdiary.parameter6=parameter6\n dealdiary.parameter7=parameter7\n dealdiary.parameter8=parameter8\n dealdiary.parameter9=parameter9\n dealdiary.parameter10=parameter10\n\n DiaryDealDAO.addDealDiary(dealdiary)\n result[\"data\"]=\"0\"\n result[\"respcode\"]= ResultCode.SUCCESS\n result[\"errorcode\"]= \"\"\n result[\"message\"]= \"添加日志处理成功\"\n\n\n except Exception as e:\n print(e)\n result[\"data\"]=\"0\"\n result[\"respcode\"]= ResultCode.FAIL\n result[\"errorcode\"]= ResultCode.FAIL\n result[\"message\"]= \"写日志处理失败\"\n return result\n\n\n\n#添加日志处理\ndef adddsomeiarydeal(request):\n result={}\n if request.method == \"POST\":\n liststr = request.POST.get(\"liststr\", None) # 读取post数据,None为默认值\n qunid = request.POST.get(\"qunid\", None) # 读取post数据,None为默认值\n useridstr = request.POST.get(\"userid\", None) # 读取post数据,None为默认值\n state = request.POST.get(\"state\", None) # 读取post数据,None为默认值\n dealtype = request.POST.get(\"dealtype\", None) # 读取post数据,None为默认值\n parameter1 = request.POST.get(\"parameter1\", None) # 读取post数据,None为默认值\n parameter2 = request.POST.get(\"parameter2\", None) # 读取post数据,None为默认值\n parameter3 = request.POST.get(\"parameter3\", None) # 读取post数据,None为默认值\n parameter4 = request.POST.get(\"parameter4\", None) # 读取post数据,None为默认值\n parameter5 = request.POST.get(\"parameter5\", None) # 读取post数据,None为默认值\n parameter6 = request.POST.get(\"parameter6\", None) # 读取post数据,None为默认值\n parameter7 = request.POST.get(\"parameter7\", None) # 读取post数据,None为默认值\n parameter8 = request.POST.get(\"parameter8\", None) # 读取post数据,None为默认值\n parameter9 = request.POST.get(\"parameter9\", None) # 读取post数据,None为默认值\n parameter10 = request.POST.get(\"parameter10\", None) # 读取post数据,None为默认值\n\n if request.method == \"GET\":\n liststr = request.GET.get(\"liststr\", None) # 读取get数据,None为默认值\n qunid = request.GET.get(\"qunid\", None) # 读取get数据,None为默认值\n useridstr = request.GET.get(\"userid\", None) # 读取get数据,None为默认值\n state = request.GET.get(\"state\", None) # 读取get数据,None为默认值\n dealtype = request.GET.get(\"dealtype\", None) # 读取get数据,None为默认值\n parameter1 = request.GET.get(\"parameter1\", None) # 读取get数据,None为默认值\n parameter2 = request.GET.get(\"parameter2\", None) # 读取get数据,None为默认值\n parameter3 = request.GET.get(\"parameter3\", None) # 读取get数据,None为默认值\n parameter4 = request.GET.get(\"parameter4\", None) # 读取get数据,None为默认值\n parameter5 = request.GET.get(\"parameter5\", None) # 读取get数据,None为默认值\n parameter6 = request.GET.get(\"parameter6\", None) # 读取get数据,None为默认值\n parameter7 = request.GET.get(\"parameter7\", None) # 读取get数据,None为默认值\n parameter8 = request.GET.get(\"parameter8\", None) # 读取get数据,None为默认值\n parameter9 = request.GET.get(\"parameter9\", None) # 读取get数据,None为默认值\n parameter10 = request.GET.get(\"parameter10\", None) # 读取get数据,None为默认值\n\n userid=0\n if(useridstr):\n userid = int(useridstr)\n\n\n print(\"增加多个日志处理接口参数:liststr:\" + liststr)\n\n dealdiaryList =[]\n dealdiaryList = json.loads(liststr)\n\n if(not dealdiaryList and len(dealdiaryList)!=0):\n userdeal = Userdeal()\n userdeal.userid=userid\n userdeal.qunid=qunid\n userdeal.state=state\n userdeal.dealtype=dealtype\n userdeal.date=TimeUtil.getCurrentDate()\n userdeal.time=TimeUtil.getCurrentTime()\n userdeal.parameter1=parameter1\n userdeal.parameter2=parameter2\n userdeal.parameter3=parameter3\n userdeal.parameter4=parameter4\n userdeal.parameter5=parameter5\n userdeal.parameter6=parameter6\n userdeal.parameter7=parameter7\n userdeal.parameter8=parameter8\n userdeal.parameter9=parameter9\n userdeal.parameter10=parameter10\n\n\n UserDealDAO.addUserDeal(userdeal)\n userdeal = UserDealDAO.selectUserDealfordeal(userdeal)\n if(userdeal):\n for i in range(len(dealdiaryList)):\n dealdiary = dealdiaryList[i]\n dealdiary.dealid=userdeal.id\n dealdiary.date=userdeal.date\n dealdiary.time=userdeal.time\n DiaryDealDAO.addDealDiary(dealdiary)\n\n\n result[\"data\"]=\"0\"\n result[\"respcode\"]= ResultCode.SUCCESS\n result[\"errorcode\"]=\"\"\n result[\"message\"]= \"添加日志处理成功\"\n\n return result\n\n\n#删除指定id日志处理\ndef deletedealdiary(request):\n result = {}\n if request.method == \"POST\":\n dealdiaryid = request.POST.get(\"dealdiaryid\", None) # 读取post数据,None为默认值\n if request.method == \"GET\":\n dealdiaryid = request.GET.get(\"dealdiaryid\", None) # 读取get数据,None为默认值\n\n print(\"删除日志处理接口参数:处理ID:\"+dealdiaryid)\n\n dealdiary = DiaryDealDAO.selectDealDiary(dealdiaryid)\n\n if(dealdiary.state==ResultCode.DIARYDEAL_NOTSEND or dealdiary.state==ResultCode.DIARYDEAL_NOTRECEIVE or dealdiary.state==ResultCode.DIARYDEAL_NOTRESPONSE):\n try:\n DiaryDealDAO.deleteDealDiary(dealdiaryid)\n result[\"data\"]= ResultCode.SUCCESS\n result[\"respcode\"]= ResultCode.SUCCESS\n result[\"errorcode\"]= ResultCode.SUCCESS\n result[\"message\"]=\"删除日志处理成功!\"\n print(\"删除成功\")\n except Exception as e:\n result[\"data\"]= ResultCode.FAIL\n result[\"respcode\"]= ResultCode.FAIL\n result[\"errorcode\"]=ResultCode.FAIL\n result[\"message\"]=\"删除失败!\"\n print(\"删除失败\")\n\n else:\n result[\"data\"]=ResultCode.FAIL\n result[\"respcode\"]=ResultCode.FAIL\n result[\"errorcode\"]=ResultCode.FAIL\n result[\"message\"]=\"商家已处理,不可删除\"\n return result\n\n\n\n#分页查询指定用户的日志处理\ndef getalldealdiaryforuserid(request):\n returnData={}\n if request.method == \"POST\":\n pageNo = request.POST.get(\"page\", None) # 读取post数据,None为默认值\n userid = request.POST.get(\"userid\", None) # 读取post数据,None为默认值\n if request.method == \"GET\":\n pageNo = request.GET.get(\"page\", None) # 读取get数据,None为默认值\n userid = request.GET.get(\"userid\", None) # 读取get数据,None为默认值\n\n print(\"查询指定用户日志处理接口参数:用户ID\" + userid+\"pageNo:\"+pageNo)\n\n\n user = UserDAO.userInfoId(userid)\n\n page = PageForId()\n page.pageNo=pageNo\n page.userId=userid\n page.pageSize=10\n page.start=10*(pageNo-1)\n try:\n alluserdeal_back = []\n userelse = fun.user2else_back(user)\n alluserdeal = UserDealDAO.selectuserdealforuserid(page)\n for i in len(alluserdeal):\n\n userDeal_back = fun.userdeal2back(alluserdeal[i])\n userDeal_back.user(userelse)\n userDeal_back.qun(QunDAO.getqunInfoqunid(alluserdeal[i].qunid))\n alldealdiary_back = []\n alldealdiary = DiaryDealDAO.selectdealdiaryfordealid(userDeal_back.id)\n for t in range(len(alldealdiary)):\n dealDiary_Back = fun.dealdiary2back(alldealdiary[t])\n diary=DiaryDAO.selectDiary(alldealdiary[t].diaryid)\n diaryback=fun.diary2back(diary)\n dealDiary_Back.siary(diaryback)\n alldealdiary_back.add(dealDiary_Back)\n\n userDeal_back.alldealdiary(alldealdiary_back)\n alluserdeal_back.add(userDeal_back)\n\n returnData[\"respcode\"]=ResultCode.SUCCESS\n returnData[\"message\"]=\"查询所有日志处理成功!\"\n returnData[\"data\"]= alluserdeal_back\n returnData[\"errorcode\"]=ResultCode.SUCCESS\n print(\"查询成功数目\"+len(alluserdeal_back))\n\n except Exception as e:\n returnData[\"respcode\"]=ResultCode.FAIL\n returnData[\"message\"]= \"查询所有日志处理失败!\"\n returnData[\"errorcode\"]= ResultCode.FAIL\n returnData[\"data\"]=\"\"\n print(\"查询失败\")\n return returnData\n\n\n#分页查询指定群组的日志处理\n\ndef getalldealdiaryforqunid(request):\n returnData={}\n if request.method == \"POST\":\n pageNo = request.POST.get(\"page\", None) # 读取post数据,None为默认值\n qunid = request.POST.get(\"qunid\", None) # 读取post数据,None为默认值\n if request.method == \"GET\":\n pageNo = request.GET.get(\"page\", None) # 读取get数据,None为默认值\n qunid = request.GET.get(\"qunid\", None) # 读取get数据,None为默认值\n\n print(\"查询指定群日志处理接口参数:群ID\" + qunid+\"pageNo:\"+pageNo)\n\n qun = QunDAO.getqunInfoqunid(qunid)\n\n page = PageForId()\n page.pageNo(pageNo)\n page.qunid(qunid)\n page.pageSize(10)\n page.start(10*(pageNo-1))\n try:\n alluserdeal_back = []\n alluserdeal = UserDealDAO.selectuserdealforqunid(page)\n print(\"查询到数目\"+len(alluserdeal))\n for i in range(len(alluserdeal)):\n userDeal_back = fun.userdeal2back(alluserdeal[i])\n userelse = fun.user2else_back(UserDAO.getUserInfoId(alluserdeal[i].userid))\n userDeal_back.user=userelse\n userDeal_back.qun=qun\n\n alldealdiary_back =[]\n alldealdiary = DiaryDealDAO.selectdealdiaryfordealid(userDeal_back.id)\n for t in range(len(alldealdiary)):\n\n dealDiary_Back = fun.dealdiary2back(alldealdiary[t])\n diary=DiaryDAO.selectDiary(alldealdiary[t].id)\n diaryback=fun.diary2back(diary)\n dealDiary_Back.diary(diaryback)\n alldealdiary_back.add(dealDiary_Back)\n\n userDeal_back.alldealdiary(alldealdiary_back)\n alluserdeal_back.add(userDeal_back)\n\n returnData[\"respcode\"]=ResultCode.SUCCESS\n returnData[\"message\"]=\"查询所有日志处理成功!\"\n returnData[\"data\"]=alluserdeal_back\n returnData[\"errorcode\"]=ResultCode.SUCCESS\n print(\"查询成功数目\"+len(alluserdeal_back))\n\n except Exception as e:\n returnData[\"respcode\"]=ResultCode.FAIL\n returnData[\"message\"]=\"查询所有日志处理失败!\"\n returnData[\"errorcode\"]=ResultCode.FAIL\n returnData[\"data\"]=\"\"\n print(\"查询失败\")\n\n return returnData\n\n\n#修改用户处理窗台\ndef updatedealdiarystate(request):\n result={}\n userdeal=Userdeal()\n if request.method == \"POST\":\n dealid = request.POST.get(\"dealid\", None) # 读取post数据,None为默认值\n state = request.POST.get(\"state\", None) # 读取post数据,None为默认值\n if request.method == \"GET\":\n dealid = request.GET.get(\"dealid\", None) # 读取get数据,None为默认值\n state = request.GET.get(\"state\", None) # 读取get数据,None为默认值\n\n print(\"修改用户处理状态接口参数:用户处理id:\"+dealid)\n try:\n userdeal = UserDealDAO.selectUserDealforid(dealid)\n userdeal.state=state\n UserDealDAO.updateUserDeal(userdeal)\n result[\"data\"]=\"\"\n result[\"respcode\"]=ResultCode.SUCCESS\n result[\"errorcode\"]=\"\"\n result[\"message\"]=\"修改用户处理成功\"\n\n except Exception as e:\n print(e)\n result[\"data\"]=\"\"\n result[\"respcode\"]=ResultCode.FAIL\n result[\"errorcode\"]= ResultCode.FAIL\n result[\"message\"]= \"修改日志处理失败\"\n\n return result\n\n\n\n\n\n\n\n\n#查询用户对置顶日志或群组或用户的deal\ndef getdealdiary(userid,diaryid,deal):\n more = More()\n more.userid_source=userid\n more.deal=deal\n more.diaryid_destination=diaryid\n more = MoreDAO.selectmoreInfomore(more)\n return more\n\n#查询用户对置顶日志或群组或用户的deal\ndef getuserdeal(userid,userid_destination,deal):\n more = More()\n more.userid_source=userid\n more.deal=deal\n more.userid_destination=userid_destination\n more = MoreDAO.selectmoreInfomore(more)\n return more\n\n#查询用户对置顶日志或群组或用户的deal\ndef getqundeal(userid,qunid,deal):\n more = More()\n more.userid_source=userid\n more.deal=deal\n more.qunid_destination=qunid\n more = MoreDAO.selectmoreInfomore(more)\n return more\n\n\n\n\n","sub_path":"CDLLP/app1/Control/DiaryDealController.py","file_name":"DiaryDealController.py","file_ext":"py","file_size_in_byte":16688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"225425949","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tinnakorn Group\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n# from filename import classname # in my folder\n# from ubuntu_library import classname or filename # in ubuntu\n# from module import filename or classname #in odoo\n\nfrom openerp import models, fields, api #import file \"model.py\", \"fields.py\", \"api.py\" from folder openerp\nfrom openerp.exceptions import ValidationError\nfrom datetime import date,datetime,timedelta\nfrom openerp.exceptions import except_orm, Warning, RedirectWarning\n\nDatetime_FORMAT = '%Y-%m-%d'\n\nclass PurchaseRequest(models.Model):\n _inherit = 'purchase.request'\n\n # def _get_fullpack(self):\n # self.full_pack = True\n\n date_request = fields.Datetime('Submit Request date',\n help=\"วันที่กดปุ่ม request\",\n track_visibility='onchange')\n date_approve = fields.Datetime('Approve date',\n help=\"วันที่กดปุ่น approve\",\n track_visibility='onchange')\n\n @api.multi\n def _get_fullpack(self):\n self.full_pack = True\n for line in self.line_ids:\n if line.product_id.nperpack and line.product_id.nperpack != 0:\n if round(line.product_qty/line.product_id.nperpack,4) != float(int(line.product_qty/line.product_id.nperpack)):\n self.full_pack=self.full_pack and False\n\n full_pack = fields.Boolean(compute='_get_fullpack', string='Full Pack')\n\n # Rewrite the button_to_approve (copy form the original)\n # @api.multi\n # def button_to_approve(self):\n # if self.full_pack == False:\n # raise ValidationError(\"สินค้าสำหรับขาย ซื้อเต็ม Packเท่านั้น \")\n # self.state = 'to_approve'\n # return True\n\n @api.multi\n def button_to_approve(self):\n if self.full_pack == False:\n raise ValidationError(\"สินค้าสำหรับขาย ซื้อเต็ม Packเท่านั้น \")\n\n self.name = self.env['ir.sequence'].get('purchase.request')\n\n if self.description==False:\n str_description = ''\n else:\n str_description = self.description.encode('utf-8')\n # Add text in message to send by mail#\n str_requested_by = self.requested_by.name.encode('utf-8')\n\n datetime_date_start = datetime.strptime(self.date_start, \"%Y-%m-%d\")\n date_from7 = datetime_date_start + timedelta(hours=7)\n\n str_product_all = ''\n for line in self.line_ids:\n str_product_all = str_product_all+line.product_id.name.encode('utf-8')+' '+str(line.product_qty)+' '+line.product_uom_id.name.encode('utf-8')+r' วันที่ต้องการ '+str(line.date_required)+'
    '\n\n a = str_product_all +r'หมายเหตุ : '+str_description\n b = r'ขอสั่งซื้อจาก '+ str_requested_by + r' วันที่ขอสั่งซื้อ: ' + str(date_from7)\n self.message_post(body=a, subject=b, subtype='mt_comment')\n self.date_request = datetime.now()\n\n for line in self.line_ids:\n if (line.product_id.type == 'product') and (not(line.product_id.product_tmpl_id.weight_net) or line.product_id.product_tmpl_id.weight_net==0):\n print ('net_weight net_weight net_weight net_weight net_weight')\n raise ValidationError(u\"สินค้า \" + line.product_id.name + u\" ยังไม่ได้ใส่ net_weight\")\n\n return self.write({'state': 'to_approve'})\n\n @api.multi\n def button_approved(self):\n self.ensure_one()\n str_requested_by = self.requested_by.name.encode('utf-8')\n str_product_all = ''\n for line in self.line_ids:\n str_product_all = str_product_all+line.product_id.name.encode('utf-8')+' '+str(line.product_qty)+' '+line.product_uom_id.name.encode('utf-8')+r' วันที่ต้องการ '+str(line.date_required)+'
    '\n if self.description==False:\n str_description = ''\n else:\n str_description = self.description.encode('utf-8')\n\n if self.requested_by:\n self.message_follower_ids = [(4, self.requested_by.partner_id.id)]\n # Add people in group \"PR mail approve\" in message follower\n pr_mail_group_id = self.env.ref('purchase_request.group_purchase_request_mail_approved').id\n user_mail_pr=self.env['res.groups'].search([('id','=',pr_mail_group_id)]).users\n for user in user_mail_pr:\n self.message_follower_ids = [(4, user.partner_id.id)]\n\n a = str_product_all +r'หมายเหตุ : '+str_description\n b = r'อนุมัติการสั่งซื้อแล้ว จาก '+ str_requested_by\n self.message_post(body=a, subject=b, subtype='mt_comment')\n\n self.date_approve = datetime.now()\n self.state = 'approved'\n return True\n\n @api.multi\n @api.onchange('line_ids')\n def check_last_record(self):\n show_warning = False\n str_var = 'ปริมาณสินค้าครั้งล่าสุดของ \\n'\n str1 = 'ปริมาณของสินค้าได้เปลี่ยนแปลงเกิน 30% ของครั้งล่าสุด \\n'\n str2 = r'product quantity change more than 30% of the most recent record '+'\\n'\n str3 = \"\\n แต่ไม่เป็นไรหรอก แค่เตือนเฉยๆ\"\n\n for rec in self.line_ids:\n if type(rec.id) != int:\n if len(self.env['purchase.request.line'].search([('name','=',rec.name)])) != 0:\n last_qty = self.env['purchase.request.line'].search([('name','=',rec.name)])[-1].product_qty\n\n var = self.env['purchase.request.line'].search([('name','=',rec.name)])[-1].product_qty *30/100\n\n\n if (rec.product_qty > (last_qty+var)) or (rec.product_qty < (last_qty-var)):\n show_warning = True\n str_name = rec.name.encode('utf-8')\n else:\n if len(self.env['purchase.request.line'].search([('name','=',rec.name),('id','!=',rec.id)])) != 0:\n last_qty = self.env['purchase.request.line'].search([('name','=',rec.name),('id','!=',rec.id)])[-1].product_qty\n var = self.env['purchase.request.line'].search([('name','=',rec.name),('id','!=',rec.id)])[-1].product_qty *30/100\n\n if (rec.product_qty > (last_qty+var)) or (rec.product_qty < (last_qty-var)):\n show_warning = True\n #str_name = str(rec.name)\n str_qty = str(last_qty)\n str_name = rec.name.encode('utf-8')\n str_var += str_name+ r' คือ '+str_qty+'\\n'\n\n if show_warning == True:\n warning = {\n 'title': 'Warning!',\n 'message' : str1+str2+'\\n'+str_var+str3}\n return {'warning': warning}\n\n\n\n # @api.model\n # def create(self, vals):\n # full_pack = vals.get('full_pack', False)\n # if full_pack == 0:\n # raise ValidationError(\"สินค้าสำหรับขาย ซื้อเต็ม Packเท่านั้น \")\n # return super(PurchaseRequest, self).create(vals)\n #\n # @api.multi\n # def write(self, vals):\n # #If there is no change in 'full_pack', use the current value\n # full_pack = vals.get('full_pack', self.full_pack)\n # if full_pack == 0:\n # raise ValidationError(\"สินค้าสำหรับขาย ซื้อเต็ม Packเท่านั้น \")\n # return super(PurchaseRequest, self).write(vals)\n\nclass PurchaseRequestLine(models.Model):\n _inherit = 'purchase.request.line'\n\n date_request = fields.Datetime(related='request_id.date_request',\n string='Submit Request Date', readonly=True,\n store=True)\n date_approve = fields.Datetime(related='request_id.date_approve',\n string='Approve Date', readonly=True,\n store=True)\n\n @api.multi\n def button_cancel(self):\n self.ensure_one()\n uid = self.env.uid\n current_user_name = self.env['res.users'].browse(uid).partner_id.name\n str_product = self.product_id.name.encode('utf-8')+' '+str(self.product_qty)+' '+self.product_uom_id.name.encode('utf-8')+r' วันที่ต้องการ '+str(self.date_required)\n if self.requested_by:\n self.message_follower_ids = [(4, self.requested_by.partner_id.id)]\n a = str_product\n b = r'แจ้งการกด Cancel Purchase Request Line จาก '+ current_user_name.encode('utf-8')\n self.message_post(body=a, subject=b, subtype='mt_comment')\n self.request_state = 'rejected'\n return True\n\n @api.model\n def create(self, vals):\n if 'date_required' in vals:\n year_invalid = str(datetime.now().year + 543)\n if vals['date_required'][:4] == year_invalid:\n raise ValidationError(\"Requested date: Year Invalid. ต้องใส่ปี ค.ศ. เท่านั้น\")\n return super(PurchaseRequestLine, self).create(vals)\n\n\n @api.onchange('product_id', 'product_uom_id')\n def onchange_product_id(self):\n if self.product_id:\n name = self.product_id.name\n spec = False\n if self.product_id.code:\n name = '[%s] %s' % (name, self.product_id.code)\n #if self.product_id.description_purchase:\n # name += '\\n' + self.product_id.description_purchase\n # เปลี่ยนให้ส่งข้อมูล ไปที่ specifications แทนที่จะส่งไปที่ name\n if self.product_id.description_purchase:\n spec = self.product_id.description_purchase\n self.product_uom_id = self.product_id.uom_id.id\n self.product_qty = 1\n self.name = name\n self.specifications = spec\n\n\n\nclass PurchaseRequestLineMakePurchaseOrder(models.TransientModel):\n _inherit = \"purchase.request.line.make.purchase.order\"\n\n @api.model\n def _prepare_purchase_order_line(self, po, item):\n po_line_obj = self.env['purchase.order.line']\n product_uom = self.env['product.uom']\n product = item.product_id\n default_uom_po_id = product.uom_po_id.id\n qty = product_uom._compute_qty(item.product_uom_id.id,\n item.product_qty,\n default_uom_po_id)\n supplier_pricelist = \\\n po.partner_id.property_product_pricelist_purchase \\\n and po.partner_id.property_product_pricelist_purchase.id or False\n vals = po_line_obj.onchange_product_id(\n supplier_pricelist, product.id, qty, default_uom_po_id,\n po.partner_id.id, date_order=False,\n fiscal_position_id=po.partner_id.property_account_position.id,\n date_planned=item.line_id.date_required,\n name=False, price_unit=False, state='draft')['value']\n vals.update({\n 'order_id': po.id,\n 'product_id': product.id,\n 'account_analytic_id': item.line_id.analytic_account_id.id,\n 'taxes_id': [(6, 0, vals.get('taxes_id', []))],\n 'purchase_request_lines': [(4, item.line_id.id)],\n 'date_planned':\n vals.get('date_planned', False) or item.line_id.date_required,\n\n })\n # Winyoo Add the below line\n if item.line_id.specifications:\n vals['name'] = item.line_id.specifications\n if item.line_id.procurement_id:\n vals['procurement_ids'] = [(4, item.line_id.procurement_id.id)]\n\n return vals","sub_path":"winyoo_purchase_request/purchase_request.py","file_name":"purchase_request.py","file_ext":"py","file_size_in_byte":13034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441078071","text":"from django.views.generic import View\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\n\nfrom employees.models import Employee\n\n\nclass ProfileView(View):\n \"\"\"\n View will fetch and display the current\n users profile data.\n \"\"\"\n token = None\n user_name = None\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n The dispatch method is responsible for checking\n if the user is authenticated and setting up\n initial values .\n \"\"\"\n if 'token' not in request.session:\n return HttpResponseRedirect('/login/')\n self.token = request.session['token']\n self.user_name = request.session['user_name']\n return super(ProfileView, self).dispatch(\n request, *args, **kwargs\n )\n\n def get(self, request):\n \"\"\"\n Will only respond to ALL GET requests and\n return a list of all employees.\n \"\"\"\n\n try:\n # attempt to create a new user by authenticating against the API\n employee = Employee(token=self.token)\n profile = employee.objects.me()\n return render(\n request,\n 'profile.html',\n {\n 'profile': profile,\n 'user_name': self.user_name,\n 'page_title': 'My Profile'\n }\n )\n except Exception as ex:\n return HttpResponseRedirect('/dashboard')\n\n return HttpResponseRedirect('/dashboard')\n","sub_path":"employees/views/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"343276075","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Created on 2015-07-29 15:11:25\n# Project: Acfun\n\nimport re\nimport json\nimport datetime\n\nimport pymysql.cursors\n\nfrom pyspider.libs.base_handler import *\n\nclass Handler(BaseHandler):\n crawl_config = {\n 'headers': {\n 'Host': 'www.acfun.tv',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36'\n }\n }\n\n api_get_comment = 'http://www.acfun.tv/comment_list_json.aspx?contentId='\n api_get_info = ''\n\n #每隔三分钟刷新一次\n @every(minutes=3)\n def on_start(self):\n \"\"\"\n 入口函数\n \"\"\"\n self.crawl('http://www.acfun.tv/v/list110/index.htm', callback=self.index_page, force_update=True)\n self.crawl('http://www.acfun.tv/v/list73/index.htm', callback=self.index_page, force_update=True)\n self.crawl('http://www.acfun.tv/v/list74/index.htm', callback=self.index_page, force_update=True)\n self.crawl('http://www.acfun.tv/v/list75/index.htm', callback=self.index_page, force_update=True)\n\n\n def index_page(self, response):\n \"\"\"\n 解析主页\n \"\"\"\n for each in response.doc('a[href^=\"http\"]').items():\n #reg_result = re.match(r\"http://www.acfun.tv/[av]/a[bc](\\d+)\", each.attr.href)\n #目前只抓取文章和视频\n reg_result = re.match(r\"http://www.acfun.tv/a/ac(\\d+)\", each.attr.href)\n if reg_result:\n self.crawl(each.attr.href, callback=self.parse_page, age=60,\n save={'contentId':reg_result.group(1)})\n\n def parse_page(self, response):\n \"\"\"\n 解析内页\n 爬第1页评论\n \"\"\"\n ac_id = response.save['contentId']\n ac_type = response.doc('#area-title-view>div.l>p>a').eq(1).text()\n ac_title = response.doc('#txt-title-view').text()\n ac_up = response.doc('#area-title-view>div.l>p>a').eq(2).text()\n ac_post_time = response.doc('#area-title-view>div.l>p>span').eq(0).text()\n ac_url = response.url\n\n #没问题\n accommentsinfo = Accommentsinfo(ac_id, ac_type, ac_title, ac_up, ac_post_time, ac_url)\n #存一下\n accommentsinfo.save()\n\n url = 'http://www.acfun.tv/comment_list_json.aspx?contentId='+ac_id+'¤tPage=1'\n self.crawl(url, callback=self.parse_first_comment, age=60, priority=2,\n save={'info':accommentsinfo.get_info()})\n\n def parse_first_comment(self, response):\n \"\"\"\n 解析评论第一页\n 分发其他页评论\n \"\"\"\n info = response.save['info']\n\n json_data = json.loads(response.text)\n total_page = json_data['totalPage']\n comments = json_data['commentContentArr']\n\n #首先分发其他页评论\n for page in range(2, total_page+1):\n url = 'http://www.acfun.tv/comment_list_json.aspx?contentId=' + \\\n str(info['id']) + '¤tPage=' + str(page)\n self.crawl(url, callback=self.parge_comment, age=30*60,\n save={'info':info})\n\n #然后解析第一页评论\n return self.analyze_comment(info, comments)\n\n\n def parge_comment(self, response):\n \"\"\"\n 解析评论页面\n \"\"\"\n info = response.save['info']\n\n \"\"\"\n 检查是否删除\n 否-->更新数据库\n 是-->检查数据库\n \"\"\"\n json_data = json.loads(response.text)\n comments = json_data['commentContentArr']\n\n return self.analyze_comment(info, comments)\n\n\n def analyze_comment(self, info, comments):\n \"\"\"\n 分析评论\n \"\"\"\n for _, comment in comments.items():\n new_comment = Accomments(comment['cid'], info['id'])\n\n ac_user_id = comment['userID']\n if ac_user_id != 4:\n new_comment.set_content(comment['content'])\n new_comment.set_user_name(comment['userName'])\n new_comment.set_layer(comment['count'])\n self.check_siji(new_comment)\n new_comment.save()\n else:\n return self.update_delete(comment['cid'], info['url'])\n\n def update_delete(self, cid, url):\n \"\"\"\n 更新delete\n \"\"\"\n connection = pymysql.connect(host='localhost',\n user='deleteso',\n passwd='deletepassso',\n db='deleteso',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n try:\n with connection.cursor() as cursor:\n # Read a single record\n sql = \"SELECT * FROM `accomments` WHERE `cid`=%s AND isDelete != 1 \"\n cursor.execute(sql, (cid))\n result = cursor.fetchone()\n if result != None:\n sql = \"UPDATE `accomments` SET isDelete=1, checkTime=%s WHERE cid=%s\"\n cursor.execute(sql, (str(datetime.datetime.now()), cid))\n connection.commit()\n\n finally:\n connection.close()\n\n if result != None:\n result['checkTime'] = result['checkTime'].strftime(\"%Y-%m-%d %H:%M:%S\")\n result['url'] = url\n result.pop('isDelete', None)\n return result\n\n def check_siji(self, comment):\n \"\"\"\n 检查是否老司机\n \"\"\"\n if comment.get_content().find(u\"佛曰:\") > -1 \\\n or comment.get_content().find(u\"如是我闻:\") > -1 \\\n or comment.get_content().find(u\"*:\") > -1:\n comment.set_siji(1)\n elif comment.get_content().find(u\"ed2k://\") > -1:\n #linkUrl = \"ed2k:\" + comment.get_content()[comment.get_content().find(u\"ed2k://\"):]\n #encodedContent = comment.get_content().replace(self.encodeFoyu(linkUrl),linkUrl,1)\n #comment.set_content(encodedContent)\n comment.set_siji(1)\n elif comment.get_content().find(u\"magnet:?\") > -1:\n #linkUrl = \"magnet:?\" + comment.get_content()[comment.get_content().find(u\"magnet:?\"):]\n #encodedContent = comment.get_content().replace(self.encodeFoyu(linkUrl),linkUrl,1)\n #comment.set_content(encodedContent)\n comment.set_siji(1)\n else:\n comment.set_siji(0)\n\nclass Accommentsinfo(object):\n \"\"\"\n 文章/视频/番剧信息\n \"\"\"\n info = {}\n\n def __init__(self, ac_id, ac_type, ac_title, ac_up, ac_post_time, ac_url):\n self.info['id'] = int(ac_id)\n self.info['type'] = ac_type\n self.info['title'] = ac_title\n self.info['up'] = ac_up\n self.info['postTime'] = ac_post_time\n self.info['url'] = ac_url\n\n def get_info(self):\n return self.info\n\n def set_id(self, ac_id):\n self.info['id'] = int(ac_id)\n\n def set_type(self, ac_type):\n self.info['type'] = ac_type\n\n def set_title(self, ac_title):\n self.info['title'] = ac_title\n\n def set_up(self, ac_up):\n self.info['up'] = ac_up\n\n def set_postTime(self, ac_post_time):\n self.info['postTime'] = ac_post_time\n\n def set_url(self, ac_url):\n self.info['url'] = ac_url\n\n def save(self):\n connection = pymysql.connect(host='localhost',\n user='deleteso',\n passwd='deletepassso',\n db='deleteso',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n try:\n with connection.cursor() as cursor:\n # Create a new record\n sql = \"INSERT INTO `accommentsinfo`(`id`, `type`, `title`, `up`, `postTime`, `url`) VALUES (%s, %s, %s, %s, %s, %s) \\\n ON DUPLICATE KEY UPDATE type=type, title=title, up=up, postTime=postTime, url=url\"\n cursor.execute(sql, (self.info['id'], self.info['type'], self.info['title'], self.info['up'], self.info['postTime'], self.info['url']))\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n connection.commit()\n\n finally:\n connection.close()\n\nclass Accomments(object):\n \"\"\"\n 评论信息\n \"\"\"\n\n info = {}\n\n def __init__(self, ac_cid, ac_acid):\n self.info['cid'] = int(ac_cid)\n self.info['acid'] = int(ac_acid)\n self.info['checkTime'] = str(datetime.datetime.now())\n self.info['isDelete'] = 0\n\n def get_info(self):\n return self.info\n\n def get_content(self):\n return self.info['content']\n\n def set_content(self, ac_content):\n self.info['content'] = ac_content\n\n def set_user_name(self, ac_user_name):\n self.info['userName'] = ac_user_name\n\n def set_layer(self, ac_layer):\n self.info['layer'] = int(ac_layer)\n\n def set_siji(self, ac_siji):\n self.info['siji'] = int(ac_siji)\n\n def save(self):\n connection = pymysql.connect(host='localhost',\n user='deleteso',\n passwd='deletepassso',\n db='deleteso',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n try:\n with connection.cursor() as cursor:\n # Create a new record\n sql = \"INSERT INTO `accomments`(`cid`, `content`, `userName`, `layer`, `acid`, `isDelete`, `siji`, `checkTime`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) \\\n ON DUPLICATE KEY UPDATE content=content, userName=userName, layer=layer, acid=acid, isDelete=isDelete, siji=siji, checkTime=VALUES(checkTime) \"\n cursor.execute(sql, (self.info['cid'], self.info['content'], self.info['userName'], self.info['layer'], self.info['acid'], self.info['isDelete'], self.info['siji'], self.info['checkTime']))\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n connection.commit()\n\n finally:\n connection.close()\n","sub_path":"sweet-spider/acfun.py","file_name":"acfun.py","file_ext":"py","file_size_in_byte":10343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"468059794","text":"import socket\r\nimport os\r\nimport shutil\r\nfrom datetime import datetime\r\n\"\"\"\r\npwd - показывает название рабочей директории\r\nls - показывает содержимое текущей директории\r\ncat - отправляет содержимое файла\r\nmkdir - создаёт папку\r\nrmdir - удаляет папку\r\nrm - удалить файл\r\nrename - переименовать файл\r\ncopycs \r\ncopysc \r\nexit (отключение) реализовано на клиенте\r\nlogin реализовано на клиенте\r\n\"\"\"\r\n\r\ndef change_port(port,server_socket):\r\n \"\"\"Проверка, занят ли порт с этим сокетом. При занятости порта происходит его инкремент.\"\"\"\r\n while(True):\r\n try:\r\n server_socket.bind(('',port))\r\n except socket.error:\r\n port+=1\r\n else:\r\n break\r\n return port\r\n\r\ndef check_access(path):\r\n \"\"\"Проверяет, есть ли доступ у пользователя к этой директории \"\"\"\r\n global userdir\r\n fullpath = str() \r\n if os.path.isabs(path):\r\n fullpath = path\r\n else:\r\n fullpath = os.path.join(userdir,path)\r\n if userdir in fullpath:\r\n return True\r\n else:\r\n print(\"Доступ запрещён!\")\r\n return False\r\n\r\ndef pwd():\r\n \"\"\"Возвращает директорию пользователя \"\"\"\r\n global userdir\r\n return \"Директория пользователя \" + userdir\r\n\r\ndef ls(path):\r\n \"\"\"Возвращает содержимое директории \"\"\"\r\n return \"Содержимое директории \" + path + \":\\n\" + \"; \".join(os.listdir(path))\r\n\r\ndef cat(filename):\r\n \"\"\"Возвращает содержимое файла \"\"\"\r\n global userdir\r\n fullpath = str()\r\n content = str()\r\n if os.path.isabs(filename):\r\n fullpath = filename\r\n else:\r\n fullpath = os.path.join(userdir,filename)\r\n if os.path.exists(fullpath):\r\n with open(fullpath, \"r\") as f:\r\n for line in f:\r\n content+=line\r\n return \"Содержимое файла \" + fullpath + \":\\n\" + content\r\n \r\ndef mkdir(path):\r\n \"\"\"Создаёт директорию \"\"\"\r\n global userdir\r\n if check_access(path):\r\n fullpath = os.path.join(userdir,path)\r\n if not os.path.exists(fullpath):\r\n os.mkdir(fullpath)\r\n return \"Создана папка \" + fullpath\r\n return \"Ошибка доступа к директории \" + path\r\n\r\ndef rmdir(path):\r\n \"\"\"Удаляет папки \"\"\"\r\n global userdir\r\n fullpath=\"\"\r\n if not os.path.isabs(filename):\r\n fullpath=os.path.join(userdir,filename)\r\n if check_access(fullpath) and os.path.exists(fullpath):\r\n shutil.rmtree(fullpath)\r\n return \"Удалена папка \" + fullpath\r\n return \"Ошибка доступа к директории \" + path\r\n\r\ndef rm(filename):\r\n \"\"\"Удаляет файлы\"\"\"\r\n global userdir\r\n fullpath=\"\"\r\n if not os.path.isabs(filename):\r\n fullpath=os.path.join(userdir,filename)\r\n if check_access(fullpath) and os.path.exists(fullpath):\r\n os.remove(fullpath)\r\n return \"Удалён файл \" + fullpath\r\n return \"Ошибка доступа к директории \" + fullpath\r\n \r\ndef copycs(filename_from,filename_to):\r\n \"\"\" Копирует с клиента на сервер \"\"\"\r\n global userdir\r\n if not os.path.isabs(filename_from):\r\n filename_from=os.path.join(userdir,filename_from)\r\n if not os.path.isabs(filename_to):\r\n filename_to=os.path.join(os.getcwd(),filename_to)\r\n shutil.copyfile(filename_from, filename_to)\r\n return filename_from + \" скопирован в \" + filename_to\r\n\r\ndef copysc(filename_from, filename_to):\r\n \"\"\" Копирует с сервера на клиент \"\"\"\r\n global userdir\r\n if not os.path.isabs(filename_from):\r\n filename_from=os.path.join(os.getcwd(),filename_from)\r\n if not os.path.isabs(filename_to):\r\n filename_to=os.path.join(userdir,filename_to)\r\n shutil.copyfile(filename_from, filename_to)\r\n return filename_from + \" скопирован в \" + filename_to\r\n \r\ndef process(req):\r\n \"\"\" Обрабатывает команды\"\"\"\r\n global userdir\r\n res= str()\r\n if req == \"pwd\":\r\n res = pwd()\r\n elif req == \"ls\":\r\n res = ls(userdir)\r\n elif req.split()[0]==\"cat\":\r\n res = cat(req.split()[1])\r\n elif req.split()[0]==\"mkdir\":\r\n res = mkdir(req.split()[1])\r\n elif req.split()[0]==\"rmdir\":\r\n res = rmdir(req.split()[1])\r\n elif req.split()[0]==\"rm\":\r\n res = rm(req.split()[1])\r\n elif req.split()[0]==\"copycs\":\r\n res = copycs(req.split()[1],req.split()[2])\r\n elif req.split()[0]==\"copysc\":\r\n res = copysc(req.split()[1],req.split()[2])\r\n elif req.split()[0]==\"login\":\r\n userdir=os.path.join(os.getcwd(), req.split()[1])\r\n current_user=req.split()[1]\r\n res = \"Пользователь {} вошёл в систему \".format(req.split()[1])\r\n elif req==\"exit\":\r\n return \"Выход\"\r\n else:\r\n res=\"bad request\"\r\n return res\r\n\r\ndef log(message,file):\r\n \"\"\"Записывает лог в файл \"\"\"\r\n if os.path.exists(os.path.join(os.getcwd(),file)):\r\n mod=\"a\"\r\n else:\r\n mod=\"w+\"\r\n with open(file,mod) as f:\r\n f.write(str(datetime.now()) + \": \"+ message+\"\\n\")\r\n\r\nuserdir = os.path.join(os.getcwd(), \"docs\") #директория по умолчанию\r\ncurrent_user=\"\"\r\n\r\nsock = socket.socket()\r\nport = change_port(int(input(\"Введите номер порта \")),sock)\r\nsock.listen()\r\nprint(\"Прослушиваем порт \", port)\r\n\r\nwhile True:\r\n conn, addr = sock.accept()\r\n \r\n request = conn.recv(1024).decode()\r\n print(request)\r\n \r\n response = process(request)\r\n conn.send(response.encode())\r\n log(response,\"log.txt\")\r\n\r\nconn.close()\r\n","sub_path":"ftp-server.py","file_name":"ftp-server.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"554837449","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# def my_abs(x):\n# # 参数类型检查\n# if not isinstance(x, (int, float)):\n# raise TypeError('bad operand type')\n# if x >= 0:\n# return x\n# else:\n# return -x\n#\n# print(my_abs(-99))\n#\n# def nop():\n# pass\n\n# import math\n#\n# def move(x, y, step, angle=0):\n# nx = x + step * math.cos(angle)\n# ny = y - step * math.sin(angle)\n# return nx, ny\n#\n# x, y = move(100, 100, 60, math.pi / 6)\n# print(x, y)\n#\n# r = move(100, 100, 60, math.pi / 6)\n# print(r) # tuple\n\nimport math\n\ndef quadratic(a, b, c):\n if not (isinstance(a, (int, float)) and isinstance(b, (int, float)) and isinstance(c, (int, float))):\n raise TypeError('bad operand type')\n r = b*b - 4*a*c\n if r < 0:\n return\n elif r == 0:\n x = -1 * b / 2 * a\n return x\n else:\n x1 = (-1 * b + math.sqrt(r)) / (2 * a)\n x2 = (-1 * b - math.sqrt(r)) / (2 * a)\n return x1, x2\n\nprint('quadratic(2, 3, 1) =', quadratic(2, 3, 1))\nprint('quadratic(1, 3, -4) =', quadratic(1, 3, -4))\nprint('quadratic(2, 4, 2) =', quadratic(2, 4, 2))\n\nif quadratic(2, 3, 1) != (-0.5, -1.0):\n print('测试失败')\nelif quadratic(1, 3, -4) != (1.0, -4.0):\n print('测试失败')\nelif quadratic(2, 4, 2) != (-4.0):\n print('测试失败')\nelse:\n print('测试成功')","sub_path":"function/def_func.py","file_name":"def_func.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"116261151","text":"import os\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\n\n\ndef map_dir(map_function, dirpath, cores=1, **kwargs):\n filelist = []\n for root, _, files in os.walk(dirpath):\n for name in files:\n filelist.append(os.path.join(root, name))\n\n results = []\n if cores == 1:\n for filename in tqdm(filelist):\n try:\n results.append(map_function(filename))\n except:\n print('error')\n else:\n pool = Pool(cores)\n results = pool.map(map_function, filelist)\n\n if kwargs.get('return_list', False):\n return results, filelist\n else:\n return results\n","sub_path":"DeepSymphony/utils/BatchProcessing.py","file_name":"BatchProcessing.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"397683988","text":"'''\n将各个数据集分类好,变成训练集,测试集\ndataset---train/test---group_0---pic_0---address+mat\n'''\nimport glob # 获取文件名列表\nimport os\n\nimport h5py\nimport numpy as np\nimport PIL\nimport scipy\nfrom matplotlib import pyplot as plt\nfrom scipy import io as scio\nfrom scipy import ndimage as scnd\n\nDATASET_TYPE = ['mall', 'expo2010']\n\n\ndef return_final_dir(path):\n return path.split('\\\\')[-1]\n\n\ndef get_expo2010(root):\n result = list()\n for set_index, set_type in enumerate(['train', 'test']):\n result.append([])\n frame_path = os.path.join(root, '%s_frame' % set_type)\n label_path = os.path.join(root, '%s_label' % set_type)\n video_name = list(map(return_final_dir, glob.glob(os.path.join(label_path, '*'))))\n for video_index in range(len(video_name)):\n video_path = video_name[video_index]\n mat_list = glob.glob(os.path.join(video_path, '*.mat'))\n for mat_index in range(0, len(mat_list)-1):\n pic_index = mat_index\n mat_path = mat_list[mat_index]\n pic_name = mat_path.split('\\\\')[-1].replace('.mat', '.jpg')\n pic_path = os.path.join(frame_path, pic_name)\n single_pic = PIL.Image.open(pic_path)\n single_pic = np.array(single_pic)\n try:\n gt_file = scio.loadmat(mat_path)\n single_gt = gt_file['point_position']\n except Exception:\n featrue = h5py.File(mat_path, 'r')\n gt_file = featrue['point_position']\n if len(gt_file.shape) == 1:\n single_gt = np.empty((1, 2))\n else:\n flatten_gt = np.ravel(gt_file.value, order='F')\n single_gt = np.reshape(flatten_gt, (gt_file.shape[::-1]))\n single_gt = swap_axis(single_gt)\n single_dens = gaussian_process(single_gt, single_pic.shape[:2])\n file_name = 'video_%s_pic_%s_.h5' % (str(video_index).zfill(5), str(pic_index).zfill(5))\n h5_path = os.path.join(file_name)\n with h5py.File(h5_path, 'w') as h5_file:\n h5_file['pic'] = single_pic\n h5_file['gt'] = single_gt\n h5_file['dens'] = single_dens\n\n\ndef gaussian_process(p_gt_list: np.array, p_shape): # 尝试使用发散卷积\n pos_list = p_gt_list.copy()\n corner_pos = np.array([[0, 0], [0, p_shape[1]], [p_shape[0], 0], p_shape]) # 需要扩展,防止人数过少\n dens_array = np.zeros(p_shape)\n if len(pos_list):\n pos_extend = np.concatenate((pos_list, corner_pos), axis=0)\n kd_tree = scipy.spatial.KDTree(pos_extend, leafsize=2048)\n kd_dis, kd_locat = kd_tree.query(pos_list, k=4)\n for index, pos in enumerate(pos_list):\n if pos[0] < p_shape[0] and pos[1] < p_shape[1]: # 防止越界\n temp_filter = np.zeros(p_shape)\n temp_filter[pos[0], pos[1]] = 1.0\n sigma = (kd_dis[index][1]+kd_dis[index][2]+kd_dis[index][3])*0.1\n dens_array += scnd.filters.gaussian_filter(temp_filter, sigma, mode='constant')\n return dens_array\n\n\ndef swap_axis(gt_list): # 对gt_list翻转并取整,因为图片的坐标与array的坐标不一致\n gt_list = gt_list.tolist()\n result = []\n for pos in gt_list:\n result.append(pos[::-1])\n result = np.array(result).astype(np.int)\n return result\n\n\ndef get_dataset_mall(root_path):\n '''\n 返回mall数据集的所有数据,分为训练数据集和测试数据集,各自按照视频片段分割\n 每个视频片段包括图片列表和gt列表\n 按照统一的命名规则将所有数据处理,存放在指定的文件夹procesed_data中\n 训练集/测试集_视频片段编号_图片编号.h5\n 例如video0_pic0.h5\n 文件中保存原图片,gt列表,密度图\n '''\n frame_path = os.path.join(root_path, 'frames')\n processed_path = os.path.join(root_path, 'train_processed')\n pic_list = glob.glob(os.path.join(frame_path, '*.jpg'))\n gt_path = os.path.join(root_path, 'mall_gt.mat')\n gt_file = scio.loadmat(gt_path) # gt_list = gt_file['frame'][0][pic_index][0, 0][0]\n video_index = 0\n for pic_index in range(len(pic_list)):\n single_pic = PIL.Image.open(pic_list[pic_index])\n single_pic = np.array(single_pic)\n single_gt = gt_file['frame'][0][pic_index][0, 0][0]\n single_gt = swap_axis(single_gt)\n single_dens = gaussian_process(single_gt, single_pic.shape[:2])\n file_name = 'video_%s_pic_%s_.h5' % (str(video_index).zfill(5), str(pic_index).zfill(5))\n h5_path = os.path.join(processed_path, file_name)\n with h5py.File(h5_path, 'w') as h5_file:\n h5_file['pic'] = single_pic\n h5_file['gt'] = single_gt\n h5_file['dens'] = single_dens\n\n\ndef get_dataset_expo2010(root_path):\n '''\n 返回expo2010数据集的所有数据,分为训练数据集和测试数据集,各自按照视频片段分割\n 每个视频片段包括图片列表和gt列表\n 按照统一的命名规则将所有数据处理,存放在指定的文件夹procesed_data中\n 训练集/测试集_视频片段编号_图片编号.h5\n 例如video0_pic0.h5\n 文件中保存原图片,gt列表,密度图\n '''\n for set_type in ['test', 'train']:\n processed_path = os.path.join(root_path, '%s_processed' % set_type)\n frame_path = os.path.join(root_path, '%s_frame' % set_type)\n label_path = os.path.join(root_path, '%s_label' % set_type)\n label_type = glob.glob(os.path.join(label_path, '*'))\n for video_index in range(0, len(label_type)):\n video_path = label_type[video_index]\n mat_list = glob.glob(os.path.join(video_path, '*.mat'))\n for mat_index in range(0, len(mat_list)-1):\n pic_index = mat_index\n mat_path = mat_list[mat_index]\n pic_name = mat_path.split('\\\\')[-1].replace('.mat', '.jpg')\n pic_path = os.path.join(frame_path, pic_name)\n single_pic = PIL.Image.open(pic_path)\n single_pic = np.array(single_pic)\n try:\n gt_file = scio.loadmat(mat_path)\n single_gt = gt_file['point_position']\n except Exception:\n featrue = h5py.File(mat_path, 'r')\n gt_file = featrue['point_position']\n if len(gt_file.shape) == 1:\n single_gt = np.empty((1, 2))\n else:\n flatten_gt = np.ravel(gt_file.value, order='F')\n single_gt = np.reshape(flatten_gt, (gt_file.shape[::-1]))\n single_gt = swap_axis(single_gt)\n single_dens = gaussian_process(single_gt, single_pic.shape[:2])\n file_name = 'video_%s_pic_%s_.h5' % (str(video_index).zfill(5), str(pic_index).zfill(5))\n h5_path = os.path.join(processed_path, file_name)\n with h5py.File(h5_path, 'w') as h5_file:\n h5_file['pic'] = single_pic\n h5_file['gt'] = single_gt\n h5_file['dens'] = single_dens\n\n\ndef check_file(file_path):\n h5_file = h5py.File(file_path, 'r')\n pic = h5_file['pic']\n dens = h5_file['dens']\n plt.imshow(dens)\n plt.show()\n plt.imshow(pic)\n plt.show()\n\n\nif __name__ == '__main__':\n get_expo2010('Datasets\\\\expo2010')\n # get_dataset_mall(os.path.join('Datasets', 'mall'))\n # get_dataset_expo2010(os.path.join('Datasets', 'expo2010'))\n","sub_path":"Detection/Ccounting/mymodel/split_dataset.py","file_name":"split_dataset.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"647564438","text":"from ..grammar import Word,WordType,NOUN\nfrom .. import abstract\n\n'''\nThis module simply contains a list of nouns we'll likely need.\n'''\n\n@WordType(NOUN)\nclass Class (Word):\n '''\n The noun \"class\".\n '''\n# def __init__(self):\n# super(self.__class__,self).__init__(NOUN)\n \n def create(self,**options):\n '''\n Creates a abstract.Class object, calls create on it.\n '''\n class_obj = abstract.Class(self[\"name\"],options.get(\"in_location\"))\n class_obj.create()\n\n@WordType(NOUN)\nclass Function (Word):\n \n def create(self,**options):\n '''\n Creates a function. Requires that a class location be specified\n '''\n if(options.get(\"in\") == None):\n raise Exception(\n \"You need to specify where the function is defined\")\n print(\"Created function %s in class \\\"%s\\\"\" %\n (self[\"name\"], options.get(\"in\")))\n\n def define(self,**options):\n '''\n Synonym for create.\n '''\n self.create(**options)\n\nif \"__main__\" == __name__:\n a = Class();\n print(a.word_type())\n b = Function()\n print(b.word_type())\n print(b._word_type)\n","sub_path":"wat/dictionary/nouns.py","file_name":"nouns.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"272630615","text":"import logging\r\n\r\nimport azure.functions as func\r\nfrom pymongo import MongoClient\r\nfrom bson.json_util import dumps\r\n\r\n\r\ndef main(req: func.HttpRequest) -> func.HttpResponse:\r\n logging.info('Python HTTP trigger function processed a request.')\r\n uri = \"mongodb://caper:ElT8nHTJe3p1iilLllrqE6Do4nvqrMgFlU3bDQhUrBJTUhzSEr3CD5do0RqrzO0BJ2wXPOX9PXrY69hAhe0l5w==@caper.mongo.cosmos.azure.com:10255/?ssl=true&replicaSet=globaldb&maxIdleTimeMS=120000&appName=@caper@\"\r\n client = MongoClient(uri)\r\n db = client['db-one']\r\n collection = db['collection-two']\r\n name = req.params.get('name')\r\n if not name:\r\n try:\r\n req_body = req.get_json()\r\n except ValueError:\r\n pass\r\n else:\r\n name = req_body.get('name')\r\n\r\n if name:\r\n return func.HttpResponse(f\"Hello, {name}. This HTTP triggered function executed successfully.\")\r\n else:\r\n return func.HttpResponse(\r\n # \"This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response.\",\r\n # status_code=200\r\n dumps(list(collection.find())),\r\n )\r\n","sub_path":"azure/getProducts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"647152073","text":"import feedparser\nfrom celery.utils.log import get_task_logger\nfrom config.celery import app\nfrom django.conf import settings\n\nfrom feeds.models import Feed, Entry\n\nlogger = get_task_logger(__name__)\n\n\n@app.task\ndef update_feed(feed_id):\n \"\"\"Updates a single feed\n\n Parameters:\n -----------\n feed_id : int\n The id of the feed to be updated.\n \"\"\"\n feed = Feed.objects.get(id=feed_id)\n d = feedparser.parse(feed.url)\n\n if d[\"status\"] == 410:\n feed.gone = True\n feed.save()\n return\n\n if d[\"status\"] > 400 or d[\"bozo\"] == 1:\n feed.failed_tries += 1\n feed.save()\n return\n\n feed.failed_tries = 0\n feed.save()\n for entry in d[\"entries\"]:\n Entry.objects.update_or_create(\n link=entry[\"link\"],\n defaults={\n \"feed\": feed,\n \"title\": entry[\"title\"],\n \"summary\": entry[\"summary\"],\n },\n )\n\n\n@app.task\ndef update_all_feeds():\n \"\"\"Update all feeds in the system\"\"\"\n logger.info('Updating all existing feeds!')\n for feed in Feed.objects.filter(\n gone=False, failed_tries__lte=settings.MAX_FAILED_TRIES\n ):\n update_feed(feed.id)\n","sub_path":"feeds/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"384307109","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 21 14:49:00 2019\r\n\r\n@author: aimee\r\n\"\"\"\r\n\r\n#%%\r\nimport pygame\r\nimport os\r\npath = r'C:\\Users\\aimee\\binus tingz\\program design methods\\pygem'\r\nos.chdir(path)\r\n\r\nclass ship():\r\n def __init__(self, ai_settings, screen):\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n self.image = pygame.image.load('deadpool.bmp')\r\n self.rect = self.image.get_rect()\r\n self.screen_rect = screen.get_rect()\r\n self.rect.centerx = self.screen_rect.centerx\r\n self.rect.bottom = self.screen_rect.bottom\r\n self.center = float(self.rect.centerx)\r\n \r\n self.moving_right = False\r\n self.moving_left = False\r\n \r\n def update(self):\r\n if self.moving_right and self.rect.right < self.screen_rect.right:\r\n self.center += self.ai_settings.dp_speed_factor \r\n if self.moving_left and self.rect.left > 0:\r\n self.center -= self.ai_settings.dp_speed_factor\r\n self.rect.centerx = self.center\r\n \r\n def blitme(self):\r\n self.screen.blit(self.image, self.rect)\r\n \r\n def center_ship(self):\r\n self.center = self.screen_rect.centerx","sub_path":"alien invasion/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"602401070","text":"import requests\nimport json\nimport os\n\nROUTER_URL = os.getenv(\"ROUTER_URL\")\n\n\ndef request_router(path):\n r = requests.get(ROUTER_URL + path).json()\n return json.dumps(r)\n\n\ndef json_to_excel(ws, data, row=0):\n for item in data:\n col = 0\n for key, value in item.items():\n if row == 0:\n ws.write(row, col, key)\n else:\n if type(value) != str:\n value = str(value)\n ws.write(row, col, value)\n col += 1\n row += 1\n","sub_path":"athena_ui/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"479994366","text":"from layers import *\nfrom rnn_layers import *\nfrom models import Model\n\ndef SentimentNet(word_to_idx):\n \"\"\"Construct a RNN model for sentiment analysis\n\n # Arguments:\n word_to_idx: A dictionary giving the vocabulary. It contains V entries,\n and maps each string to a unique integer in the range [0, V).\n # Returns\n model: the constructed model\n \"\"\"\n vocab_size = len(word_to_idx)\n\n model = Model()\n model.add(FCLayer(vocab_size, 200, name='embedding', initializer=Guassian(std=0.01)))\n model.add(BidirectionalRNN(RNNCell(in_features=200, units=50, initializer=Guassian(std=0.01))))\n model.add(FCLayer(100, 32, name='fclayer1', initializer=Guassian(std=0.01)))\n model.add(TemporalPooling()) # defined in layers.py\n model.add(FCLayer(32, 2, name='fclayer2', initializer=Guassian(std=0.01)))\n \n return model","sub_path":"rnn/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"349749976","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 26 15:13:24 2021\n\n@author: Gilles.DELBECQ\n\"\"\"\n\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt \nimport numpy as np\nimport os\nimport math\nfrom scipy.signal import find_peaks\nimport seaborn as sns\n\nfrom mpl_toolkits import mplot3d\n\ndef get_marker_list(file):\n '''\n Gets marker list from MOCAP file \n '''\n import pandas as pd\n \n data = pd.read_csv(file,sep=',',header=2,delimiter=None,na_values='')\n\n #Get markers list\n markers = [x for x in data.columns if 'Unnamed' not in x]\n \n #Assert all markers are string formated\n for marker in markers : \n assert type(marker) == str, 'Markers are not strings'\n \n return markers\n\ndef new_file_index(file):\n '''\n Creates new index for optimized dataframe, including \"Marker1:X\",\"Marker1:Y\"...format\n '''\n \n pre_format = ['Frame','SubFrame']\n \n positions = ['X','Y','Z']\n \n markers = get_marker_list(file)\n \n marker_index = []\n for marker in markers :\n for position in positions : \n marker_index.append('{}:{}'.format(marker,position))\n \n new_file_index = pre_format + marker_index\n \n return new_file_index\n\ndef dataframe(file,header=4):\n \n '''\n Returns on optimzed dataframe based on architecture of the raw file\n '''\n \n import pandas as pd \n \n data = pd.read_csv(file,sep=',',header=header,delimiter=None,na_values='')\n \n opt_dataframe = pd.DataFrame(data.values,columns=new_file_index(file))\n \n return opt_dataframe\n\n\ndef coord(file,marker,fstart=1,fstop=-1,projection=None,step=1):\n '''\n Returns array with XYZ coordinates for a single marker\n '''\n \n data = dataframe(file)\n \n if fstop == -1:\n stop = data.shape[0]-1\n else:\n stop = fstop\n \n xs = data.iloc[fstart:stop,data.columns.get_loc('{}:X'.format(marker))].values\n ys = data.iloc[fstart:stop,data.columns.get_loc('{}:Y'.format(marker))].values\n zs = data.iloc[fstart:stop,data.columns.get_loc('{}:Z'.format(marker))].values\n \n\n if projection == 'X':\n proj = np.arrange(0,len(xs),step)\n xs = np.asarray([x+w for x,w in zip(xs,proj)]).ravel()\n \n if projection == 'Y':\n proj = np.arange(0,len(ys),step)\n ys = np.asarray([y+w for y,w in zip(ys,proj)]).ravel()\n\n return xs,ys,zs\n\ndef normalized(file, ref_marker, target_marker):\n \"\"\"\n Calculate the normalized coordinates of target_marker with ref_marker as the reference\n \"\"\"\n ref_x,ref_y,ref_z = coord(file,ref_marker)\n target_x, target_y,target_z = coord(file,target_marker)\n norm_x=target_x-ref_x\n norm_y=target_y-ref_y\n norm_z=target_z-ref_z\n \n return norm_x,norm_y,norm_z\n\ndef calculate_angle(file,markerA,markerB,markerC):\n \"\"\"\n Calculate angle formed in markerB\n https://stackoverflow.com/questions/35176451/python-code-to-calculate-angle-between-three-point-using-their-3d-coordinates\n \"\"\"\n A,B,C=np.asarray(coord(file,markerA)),np.asarray(coord(file,markerB)),np.asarray(coord(file,markerC))\n ba = A - B\n bc = C - B\n \n cosine_angle=[]\n for i in range(len(ba[0])):\n cosine_angle.append(np.dot(ba[:,i], bc[:,i]) / (np.linalg.norm(ba[:,i]) * np.linalg.norm(bc[:,i])))\n \n angle = np.degrees(np.arccos(cosine_angle))\n return angle\n\ndef detect_stance(file,marker):\n trajectory=coord(file,marker)[2]\n acceleration =np.gradient(np.gradient(trajectory))\n peaks, _ = find_peaks(acceleration,prominence=1)\n \n return trajectory,acceleration,peaks\n\n\nroot_dir = r'C:\\Users\\Gilles.DELBECQ\\Desktop\\CSV_MOCAP_05_11\\toast'\n\nobstacle_csv =r\"C:/Users/Gilles.DELBECQ/Desktop/CSV_MOCAP_05_11/Octobre 2021/Param Mocap Nov 2021/obstacles.xlsx\"\ndata_csv = r'C:/Users/Gilles.DELBECQ/Desktop/CSV_MOCAP_05_11/Octobre 2021/Param Mocap Nov 2021/Data.xlsx'\n\ndf_data = pd.read_excel(data_csv)\n\nDIST,STIM,SPOT=[],[],[]\n\n\n\n#First Loop : loop on all csv files to list them in the list \"Files\"\nFiles = []\nfor r, d, f in os.walk(root_dir):\n# r=root, d=directories, f = files\n for filename in f:\n if '.csv' in filename:\n Files.append(os.path.join(r, filename))\n \nprint('Files to analyze : {}'.format(len(Files)))\n\nfor file in Files:\n df_raw = pd.read_csv(file,sep=',',header=2,delimiter=None,na_values='')\n \n marker_list = get_marker_list(file)\n subject=marker_list[0].split(':')[0]\n session = file.split('\\\\')[-1].split('_')[1]\n trial = file.split('\\\\')[-1].split('_')[2].split('.')[0]\n \n\n df_data_file = df_data.loc[((df_data['Animal'] == int(subject)) & (df_data['Session'] == int(session))& (df_data['Trial'] == int(trial)))]\n freq = df_data_file['Freq'].tolist()[0]\n power = df_data_file['Power'].tolist()[0]\n tracking_quality = df_data_file['Tracking quality'].tolist()[0]\n obstacle_idx = df_data_file['Obstacle'].tolist()[0]\n\n right_foot=coord(file,\"{}:Foot_R2\".format(subject))\n left_foot=coord(file,\"{}:Foot_L2\".format(subject))\n base_tail=coord(file,\"{}:Back1\".format(subject))\n hip_r=coord(file,\"{}:Pelvis_R2\".format(subject))\n hip_l=coord(file,\"{}:Pelvis_L2\".format(subject))\n \n angles=[]\n vectors = np.array(hip_l)-np.array(hip_r)\n origin = np.array((-1,0,0))\n \n \n \n for i in range(len(vectors[0])):\n num = vectors[0][i]*1\n a = math.sqrt(vectors[0][i]*vectors[0][i]+vectors[1][i]*vectors[1][i]+vectors[2][i]*vectors[2][i])\n b=math.sqrt(1*1)\n \n \n angle = np.degrees(math.acos(num/(a*b)))\n angles.append(angle)\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n \n # ax.plot3D(right_foot[0],right_foot[1],right_foot[2])\n # ax.plot3D(left_foot[0],left_foot[1],left_foot[2])\n # ax.plot3D(base_tail[0],base_tail[1],base_tail[2])\n \n ax.plot3D(hip_r[0],hip_r[1],hip_r[2],color='red')\n ax.plot3D(hip_l[0],hip_l[1],hip_l[2],color='blue')\n \n \n for i in range(len(hip_r[1])):\n ax.plot3D([hip_r[0][i],hip_l[0][i]],[hip_r[1][i],hip_l[1][i]],[hip_r[2][i],hip_l[2][i]],color='blue',alpha=0.5)","sub_path":"old/MOCAP Analysis/2022/MOCAP Hips Balance.py","file_name":"MOCAP Hips Balance.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"605244231","text":"from unittest.mock import NonCallableMock, patch, sentinel\n\nfrom preacher.presentation.listener import LoggingReportingListener\nfrom preacher.presentation.logging import LoggingReporter\n\nPKG = 'preacher.presentation.listener.logging'\n\n\ndef test_on_scenario():\n reporter = NonCallableMock(LoggingReporter)\n listener = LoggingReportingListener(reporter)\n listener.on_scenario(sentinel.result)\n listener.on_end(sentinel.status)\n\n reporter.show_scenario_result.assert_called_once_with(sentinel.result)\n reporter.show_status.assert_called_once_with(sentinel.status)\n\n\n@patch(f'{PKG}.LoggingReportingListener', return_value=sentinel.listener)\n@patch(f'{PKG}.LoggingReporter', return_value=sentinel.logger)\ndef test_from_logger(logger_ctor, listener_ctor):\n listener = LoggingReportingListener.from_logger(sentinel.py_logger)\n assert listener is sentinel.listener\n\n logger_ctor.assert_called_once_with(sentinel.py_logger)\n listener_ctor.assert_called_once_with(sentinel.logger)\n","sub_path":"tests/presentation/listener/test_logging_reporting_listener.py","file_name":"test_logging_reporting_listener.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"15192991","text":"\"\"\"Methods for ensembling pixel-wise classifications to the crown scale\n\"\"\"\nimport numpy as _np\n\n\n# a function to average probabilities by crown id\ndef average(predictions, id_labels, sp_labels):\n \"\"\"Sets the output labels for prediction probabilities by id (e.g., by crown) and by species.\n \n Args: \n id_labels - the labels (usually, crown labels) that probabilities are aggregated to\n sp_labels - the species labels\n \n Returns:\n output_pr - the averaged prediction probabilities\n \"\"\"\n # create the output array to store the results\n id_unique = _np.unique(id_labels)\n sp_unique = _np.unique(sp_labels)\n n_id = len(id_unique)\n n_sp = len(sp_unique)\n output_pr = _np.zeros(n_id * n_sp)\n\n # loop through each crown, calculate the average probability per crown, and write it to the array\n for i in range(n_id):\n id_index = id_labels == id_unique[i]\n output_pr[i * n_sp:(i + 1) * n_sp] = predictions[id_index].mean(axis=0)\n\n return output_pr\n\n\n# a function to reconcile the crown and species labels for csv output\ndef get_csv_labels(id_labels, sp_labels):\n \"\"\"Sets the output labels for prediction probabilities by id (e.g., by crown) and by species.\n \n Args: \n id_labels - the labels (usually, crown labels) that probabilities are aggregated to\n sp_labels - the species labels\n \n Returns:\n id_rows, sp_rows - the csv ordered id and species labels\n \"\"\"\n # get the unique id and species labels\n id_unique = _np.unique(id_labels)\n sp_unique = _np.unique(sp_labels)\n n_id = len(id_unique)\n n_sp = len(sp_unique)\n\n id_rows = _np.repeat(id_unique, n_sp)\n sp_rows = _np.repeat(sp_unique, n_id).reshape(n_sp, n_id).flatten(order='F')\n\n return id_rows, sp_rows\n","sub_path":"ccbid/crown_ensemble.py","file_name":"crown_ensemble.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"1250142","text":"import numpy as np\r\nfrom scipy.stats import norm\r\n\r\nimport autofit as af\r\nfrom autoarray.inversion import pixelizations as pix, regularization as reg\r\nfrom autofit.exc import PriorException\r\nfrom autogalaxy.galaxy import galaxy as g\r\nfrom autogalaxy.profiles import light_profiles as lp\r\nfrom autogalaxy.profiles import mass_profiles as mp\r\n\r\n\r\ndef isprior(obj):\r\n if isinstance(obj, af.Model):\r\n return True\r\n return False\r\n\r\n\r\ndef isinstance_or_prior(obj, cls):\r\n if isinstance(obj, cls):\r\n return True\r\n if isinstance(obj, af.Model) and obj.cls == cls:\r\n return True\r\n return False\r\n\r\n\r\ndef pixelization_from(model: af.Collection) -> pix.Pixelization:\r\n \"\"\"\r\n For a model containing one or more galaxies, inspect its attributes and return the `pixelization` of a galaxy\r\n provided one galaxy has a pixelization, otherwise it returns none. There cannot be more than one `Pixelization` in\r\n a model.\r\n \r\n This function expects that the input model is a `Collection` where the first model-component has the\r\n name `galaxies`, and is itself a `Collection` of `Galaxy` instances. This is the\r\n standard API for creating a model in PyAutoGalaxy.\r\n\r\n The result of `pixelization_from_model` is used by the preloading to determine whether certain parts of a\r\n calculation can be cached before the non-linear search begins for efficiency.\r\n\r\n Parameters\r\n ----------\r\n model : af.Collection\r\n Contains the `galaxies` in the model that will be fitted via the non-linear search.\r\n\r\n Returns\r\n -------\r\n pix.Pixelization or None:\r\n The `Pixelization` of a galaxy, provided one galaxy has a `Pixelization`.\r\n \"\"\"\r\n\r\n for galaxy in model.galaxies:\r\n if hasattr(galaxy, \"pixelization\"):\r\n if galaxy.pixelization is not None:\r\n if isinstance(galaxy.pixelization, af.Model):\r\n return galaxy.pixelization.cls\r\n else:\r\n return galaxy.pixelization\r\n\r\n\r\ndef has_pixelization_from_model(model: af.Collection):\r\n \"\"\"\r\n For a model containing one or more galaxies, inspect its attributes and return `True` if a galaxy has a\r\n `Pixelization` otherwise return `False`.\r\n\r\n This function expects that the input model is a `Collection` where the first model-component has the\r\n name `galaxies`, and is itself a `Collection` of `Galaxy` instances. This is the\r\n standard API for creating a model in PyAutoGalaxy.\r\n\r\n The result of `has_pixelization_from_model` is used by the preloading to determine whether certain parts of a\r\n calculation can be cached before the non-linear search begins for efficiency.\r\n\r\n Parameters\r\n ----------\r\n model : af.Collection\r\n Contains the `galaxies` in the model that will be fitted via the non-linear search.\r\n\r\n Returns\r\n -------\r\n pix.Pixelization or None:\r\n The `Pixelization` of a galaxy, provided one galaxy has a `Pixelization`.\r\n \"\"\"\r\n pixelization = pixelization_from(model=model)\r\n\r\n return pixelization is not None\r\n\r\n\r\ndef pixelization_is_model_from(model: af.Collection):\r\n \"\"\"\r\n For a model containing one or more galaxies, inspect its attributes and return `True` if a galaxy has a\r\n `Pixelization` which is a model-component with free parameters, otherwise return `False`. Therefore, a `False`\r\n may be returned if a galaxy has a `Pixelization` but it is an `instance` where no parameters are free parameters\r\n in the non-linear search.\r\n\r\n This function expects that the input model is a `Collection` where the first model-component has the\r\n name `galaxies`, and is itself a `Collection` of `Galaxy` instances. This is the\r\n standard API for creating a model in PyAutoGalaxy.\r\n\r\n The result of `pixelization_is_model_from_model` is used by the preloading to determine whether certain parts of a\r\n calculation can be cached before the non-linear search begins for efficiency.\r\n\r\n Parameters\r\n ----------\r\n model : af.Collection\r\n Contains the `galaxies` in the model that will be fitted via the non-linear search.\r\n\r\n Returns\r\n -------\r\n pix.Pixelization or None:\r\n The `Pixelization` of a galaxy, provided one galaxy has a `Pixelization`.\r\n \"\"\"\r\n if model.galaxies:\r\n for galaxy in model.galaxies:\r\n if isprior(galaxy.pixelization):\r\n return True\r\n return False\r\n\r\n\r\ndef hyper_model_from(\r\n setup_hyper, result: af.Result, include_hyper_image_sky: bool = False\r\n) -> af.Collection:\r\n \"\"\"\r\n Make a hyper model from the `Result` of a model-fit, where the hyper-model is the maximum log likelihood instance\r\n of the inferred model but turns the following hyper components of the model to free parameters:\r\n\r\n 1) The `Pixelization` of any `Galaxy` in the model.\r\n 2) The `Regularization` of any `Galaxy` in the model.\r\n 3) Hyper data components like a `HyperImageSky` or `HyperBackgroundNoise` if input into the function.\r\n 4) `HyperGalaxy` components of the `Galaxy`'s in the model, which are used to scale the noise in regions of the\r\n data which are fit poorly.\r\n\r\n The hyper model is typically used in pipelines to refine and improve an `Inversion` after model-fits that fit the\r\n `Galaxy` light and mass components.\r\n\r\n Parameters\r\n ----------\r\n setup_hyper : SetupHyper\r\n The setup of the hyper analysis if used (e.g. hyper-galaxy noise scaling).\r\n result : af.Result\r\n The result of a previous `Analysis` search whose maximum log likelihood model forms the basis of the hyper model.\r\n include_hyper_image_sky : hd.HyperImageSky\r\n This must be true to include the hyper-image sky in the model, even if it is turned on in `setup_hyper`.\r\n\r\n Returns\r\n -------\r\n af.Collection\r\n The hyper model, which has an instance of the input results maximum log likelihood model with certain hyper\r\n model components now free parameters.\r\n \"\"\"\r\n\r\n model = result.instance.as_model((pix.Pixelization, reg.Regularization))\r\n\r\n if setup_hyper is None:\r\n return None\r\n\r\n if setup_hyper.hyper_galaxy_names is None:\r\n if not has_pixelization_from_model(model=model):\r\n if setup_hyper.hypers_all_off:\r\n return None\r\n if setup_hyper.hypers_all_except_image_sky_off:\r\n if not include_hyper_image_sky:\r\n return None\r\n\r\n model.hyper_image_sky = setup_hyper.hyper_image_sky\r\n model.hyper_background_noise = setup_hyper.hyper_background_noise\r\n\r\n if setup_hyper.hyper_galaxy_names is not None:\r\n\r\n for path_galaxy, galaxy in result.path_galaxy_tuples:\r\n if path_galaxy[-1] in setup_hyper.hyper_galaxy_names:\r\n if not np.all(result.hyper_galaxy_image_path_dict[path_galaxy] == 0):\r\n\r\n galaxy = getattr(model.galaxies, path_galaxy[-1])\r\n\r\n setattr(galaxy, \"hyper_galaxy\", af.Model(g.HyperGalaxy))\r\n\r\n return model\r\n\r\n\r\ndef hyper_fit(hyper_model: af.Collection, setup_hyper, result: af.Result, analysis):\r\n \"\"\"\r\n Perform a hyper-fit, which extends a model-fit with an additional fit which fixes the non-hyper components of the\r\n model (e.g., `LightProfile`'s, `MassProfile`) to the `Result`'s maximum likelihood fit. The hyper-fit then treats\r\n only the hyper-model components as free parameters, which are any of the following model components:\r\n\r\n 1) The `Pixelization` of any `Galaxy` in the model.\r\n 2) The `Regularization` of any `Galaxy` in the model.\r\n 3) Hyper data components like a `HyperImageSky` or `HyperBackgroundNoise` if input into the function.\r\n 4) `HyperGalaxy` components of the `Galaxy`'s in the model, which are used to scale the noise in regions of the\r\n data which are fit poorly.\r\n\r\n The hyper model is typically used in pipelines to refine and improve an `Inversion` after model-fits that fit the\r\n `Galaxy` light and mass components.\r\n\r\n Parameters\r\n ----------\r\n hyper_model : Collection\r\n The hyper model used by the hyper-fit, which models hyper-components like a `Pixelization` or `HyperGalaxy`'s.\r\n setup_hyper : SetupHyper\r\n The setup of the hyper analysis if used (e.g. hyper-galaxy noise scaling).\r\n result : af.Result\r\n The result of a previous `Analysis` search whose maximum log likelihood model forms the basis of the hyper model.\r\n analysis : Analysis\r\n An analysis class used to fit imaging or interferometer data with a model.\r\n\r\n Returns\r\n -------\r\n af.Result\r\n The result of the hyper model-fit, which has a new attribute `result.hyper` that contains updated parameter\r\n values for the hyper-model components for passing to later model-fits.\r\n \"\"\"\r\n\r\n if hyper_model is None:\r\n return result\r\n\r\n setup_hyper.search.paths.path_prefix = result.search.paths.path_prefix\r\n setup_hyper.search.paths.name = f\"{result.search.paths.name}__hyper\"\r\n setup_hyper.search.paths.unique_tag = result.search.paths.unique_tag\r\n\r\n analysis.set_hyper_dataset(result=result)\r\n\r\n hyper_result = setup_hyper.search.fit(model=hyper_model, analysis=analysis)\r\n\r\n setattr(result, \"hyper\", hyper_result)\r\n\r\n return result\r\n\r\n\r\ndef stochastic_model_from(\r\n result,\r\n include_lens_light=False,\r\n include_pixelization=False,\r\n include_regularization=False,\r\n subhalo_centre_width=None,\r\n subhalo_mass_at_200_log_uniform=True,\r\n):\r\n \"\"\"\r\n Make a stochastic model from the `Result` of a model-fit, where the stochastic model uses the same model\r\n components as the original model but may switch certain components (e.g. the lens light, source pixelization)\r\n to free parameters.\r\n\r\n The stochastic model is used to perform a stochastic model-fit, which refits a model but introduces a log\r\n likelihood cap whereby all model-samples with a likelihood above this cap are rounded down to the value of the cap.\r\n\r\n This `log_likelihood_cap` is determined by sampling ~250 log likeilhood values from the original model's, but where\r\n each model evaluation uses a different KMeans seed of the pixelization to derive a unique pixelization with which\r\n to reconstruct the source galaxy (therefore a pixelization which uses the KMeans method, like the\r\n `VoronoiBrightnessImage` must be used to perform a stochastic fit).\r\n\r\n The cap is computed as the mean of these ~250 values and it is introduced to avoid underestimated errors due\r\n to artificial likelihood boosts.\r\n\r\n Parameters\r\n ----------\r\n result : af.Result\r\n The result of a previous `Analysis` search whose maximum log likelihood model forms the basis of the hyper model.\r\n include_lens_light : bool\r\n If `True` and the model includes any `LightProfile`'s, these are fitted for in the model.\r\n include_pixelization : bool\r\n If `True` the `VoronoiBrightnessImage` pixelization in the model is fitted for.\r\n include_regularization : bool\r\n If `True` the regularization in the model is fitted for.\r\n subhalo_centre_width : float\r\n The `sigma` value of the `GaussianPrior` on the centre of the subhalo, if it is included in the lens model.\r\n subhalo_mass_at_200_log_uniform : bool\r\n if `True`, the subhalo mass (if included) does not assume a `GaussianPrior` from the previous fit, but instead\r\n retains the default `LogUniformPrior`.\r\n\r\n Returns\r\n -------\r\n af.Collection\r\n The stochastic model, which is the same model as the input model but may fit for or fix additional parameters.\r\n \"\"\"\r\n if not hasattr(result.model.galaxies, \"lens\"):\r\n raise PriorException(\r\n \"Cannot extend a search with a stochastic search if the lens galaxy `Model` \"\r\n \"is not named `lens`. \"\r\n )\r\n\r\n model_classes = [mp.MassProfile]\r\n\r\n if include_lens_light:\r\n model_classes.append(lp.LightProfile)\r\n\r\n if include_pixelization:\r\n model_classes.append(pix.Pixelization)\r\n\r\n if include_regularization:\r\n model_classes.append(reg.Regularization)\r\n\r\n model = result.instance.as_model(model_classes)\r\n\r\n model.galaxies.lens.take_attributes(source=result.model.galaxies.lens)\r\n\r\n if hasattr(model.galaxies, \"subhalo\"):\r\n model.galaxies.subhalo.take_attributes(source=result.model.galaxies.subhalo)\r\n\r\n if subhalo_centre_width is not None:\r\n model.galaxies.subhalo.mass.centre = result.model_absolute(\r\n a=subhalo_centre_width\r\n ).galaxies.subhalo.mass.centre\r\n\r\n if subhalo_mass_at_200_log_uniform:\r\n model.galaxies.subhalo.mass.mass_at_200 = af.LogUniformPrior(\r\n lower_limit=1e6, upper_limit=1e11\r\n )\r\n\r\n return model\r\n\r\n\r\ndef stochastic_fit(stochastic_model, result, analysis):\r\n \"\"\"\r\n Perform a stochastic model-fit, which refits a model but introduces a log likelihood cap whereby all model-samples\r\n with a likelihood above this cap are rounded down to the value of the cap.\r\n\r\n This `log_likelihood_cap` is determined by sampling ~250 log likelihood values from the original model's maximum\r\n log likelihood model. However, the pixelization used to reconstruct the source of each model evaluation uses a\r\n different KMeans seed, such that each reconstruction uses a unique pixel-grid. The model must therefore use a\r\n pixelization which uses the KMeans method to construct the pixel-grid, for example the `VoronoiBrightnessImage`.\r\n\r\n The cap is computed as the mean of these ~250 values and it is introduced to avoid underestimated errors due\r\n to artificial likelihood boosts.\r\n\r\n Parameters\r\n ----------\r\n setup_hyper : SetupHyper\r\n The setup of the hyper analysis if used (e.g. hyper-galaxy noise scaling).\r\n result : af.Result\r\n The result of a previous `Analysis` search whose maximum log likelihood model forms the basis of the hyper model.\r\n include_hyper_image_sky : hd.HyperImageSky\r\n This must be true to include the hyper-image sky in the model, even if it is turned on in `setup_hyper`.\r\n\r\n Returns\r\n -------\r\n af.Collection\r\n The hyper model, which has an instance of the input results maximum log likelihood model with certain hyper\r\n model components now free parameters.\r\n \"\"\"\r\n\r\n mean, sigma = norm.fit(result.stochastic_log_evidences)\r\n log_likelihood_cap = mean\r\n\r\n search = result.search\r\n search.paths.name = (\r\n f\"{result.search.paths.name}__stochastic_likelihood_cap_\"\r\n + \"{0:.1f}\".format(log_likelihood_cap)\r\n )\r\n search.paths.unique_tag = result.search.paths.unique_tag\r\n\r\n search.paths.save_object(\r\n \"stochastic_log_evidences\", result.stochastic_log_evidences\r\n )\r\n\r\n stochastic_result = search.fit(\r\n model=stochastic_model, analysis=analysis, log_likelihood_cap=log_likelihood_cap\r\n )\r\n\r\n setattr(result, \"stochastic\", stochastic_result)\r\n\r\n return result\r\n","sub_path":"autogalaxy/analysis/model_util.py","file_name":"model_util.py","file_ext":"py","file_size_in_byte":15081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"644896635","text":"#! /usr/bin/env python3\n# -*- coding: ASCII -*-\nimport subprocess\nimport dpkt\nimport socket\nimport sys\n# import datetime\nfrom operator import itemgetter\n\nf1 = open('/home/mailru/scripts/data/print_toptlk.txt', 'w+r')\n\nsubprocess.check_call(\n ['RESULT=$(ip r l | grep default | cut -d \" \" -f 5)&& tcpdump -tnn -c 30 -w \\\n packets.pcap -i $RESULT'], shell=True)\n\nf = open('packets.pcap')\npcap = dpkt.pcap.Reader(f)\n\npackets = []\ntotal_buf = 0\nfor ts, buf in pcap:\n total_buf += len(buf)\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n packet = {'IP_Src': socket.inet_ntoa(ip.src), 'IP_Dst': socket.inet_ntoa(\n ip.dst), 'Proto': type(ip.data), 'Size': len(buf)}\n packets.append(packet)\nproto_packets = []\nfor packet in packets:\n if len(proto_packets) != 0:\n flag = 0\n for proto_packet in proto_packets:\n if packet['Proto'] == proto_packet['Proto']:\n flag = 1\n proto_packet['Amount'] += 1\n proto_packet['Size'] += packet['Size']\n break\n if flag == 0:\n buffer = {'Proto': packet['Proto'],\n 'Amount': 1, 'Size': packet['Size']}\n proto_packets.append(buffer)\n else:\n buffer = {'Proto': packet['Proto'],\n 'Amount': 1, 'Size': packet['Size']}\n proto_packets.append(buffer)\nip_packets = []\nfor packet in packets:\n if len(ip_packets) != 0:\n flag = 0\n for ip_packet in ip_packets:\n if packet['IP_Src'] == ip_packet['IP_Src']:\n flag = 1\n ip_packet['Size'] += packet['Size']\n ip_packet['Amount'] += 1\n break\n if flag == 0:\n buffer = {'IP_Src': packet['IP_Src'],\n 'Size': packet['Size'], 'Amount': 1}\n ip_packets.append(buffer)\n else:\n buffer = {'IP_Src': packet['IP_Src'],\n 'Size': packet['Size'], 'Amount': 1}\n ip_packets.append(buffer)\nnumber_sort = sorted(ip_packets, key=itemgetter('Amount'), reverse=True)\nsize_sort = sorted(ip_packets, key=itemgetter('Size'), reverse=True)\nproto_sort = sorted(proto_packets, key=itemgetter('Size'), reverse=True)\nprint(\"PROCESSING: top talking IP addresses sorted by amount of packets\")\n# -----------------------------------table No1--------------------\nf1.write('')\nf1.write('')\nf1.write(\"\")\nf1.write('')\nfor packet in number_sort:\n f1.write(\"\")\n # print (packet['IP_Src'], \" \", packet['Amount'])\n f1.write(\"\\n\")\n f1.write(\"\")\n f1.write(\"\t\t\")\n f1.write(\"\")\n f1.write(\"\")\n\n# -------------------------------table No2------------------------\nprint(\"PROCESSING: top talking IP addresses sorted by total bytes size \")\nf1.write('')\nf1.write(\"\")\nf1.write(\"\")\nfor packet in size_sort:\n f1.write(\"\")\n # print (packet['IP_Src'], \" \", packet['Size'])\n f1.write(\"\\n\")\n f1.write(\"\")\n f1.write(\"\t\")\n f1.write(\"\")\n f1.write(\"\")\n\n# -------------------------------table No3------------------------------\nprint(\"PROCESSING: top used Protocols sorted by percentage of traffic\")\nf1.write(\"\")\nf1.write(\"\")\nf1.write(\"\")\nfor packet in proto_sort:\n f1.write(\"\")\n # print (packet['Proto'], \" \",\n # int((float(packet['Size']) / total_buf)* 100), '%')\n f1.write(\"\\n\")\n f1.write(\"\")\n f1.write(\"\t\")\n f1.write(\"\")\n f1.write(\"\")\n\n# What is the average packet rate? (packets/second)\n# The last time stamp\n# print \"The packets/second %f \" % (packets/(last-first))\n\n\n# what is the protocol distribution?\n# use dictionary\n\nf1.close()\nf.close()\nsys.exit(0)\n","sub_path":"dpkt_test.py","file_name":"dpkt_test.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"546309848","text":"import sys\n\ndef main():\n\twith open(sys.argv[1]) as f:\n\t\tfor line in f:\n\t\t\tprint(get_response(int(line)))\n\ndef get_response(number):\n\tif 0 <= number <= 2:\n\t\treturn \"Still in Mama's arms\"\n\telif 3 <= number <= 4:\n\t\treturn \"Preschool Maniac\"\n\telif 5 <= number <= 11:\n\t\treturn \"Elementary school\"\n\telif 12 <= number <= 14:\n\t\treturn \"Middle school\"\n\telif 15 <= number <= 18:\n\t\treturn \"High school\"\n\telif 19 <= number <= 22:\n\t\treturn \"College\"\n\telif 23 <= number <= 65:\n\t\treturn \"Working for the man\"\n\telif 66 <= number <= 100:\n\t\treturn \"The Golden Years\"\n\telse:\n\t\treturn \"This program is for humans\"\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Codeeval/Age Distribution/P.py","file_name":"P.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"86715312","text":"# coding=utf8\nimport ply.lex as lex\nfrom ply.lex import TOKEN\nimport re\n\nstates = (\n ('name', 'exclusive'),\n ('tail', 'exclusive'),\n)\n\ntokens = (\n 'FUNCTYPE', 'ANY', 'FUNCNAME', 'PARAMETRS',\n)\n\nt_ANY = r'.'\n\n\ndef t_FUNCTYPE(t):\n r'(?m)^(int|long|short)\\s+'\n if t.lexer.current_state() == 'name':\n t.lexer.begin('tail') # переходим в начальное состояние\n else:\n t.lexer.begin('name') # парсим строку\n return t\n\n\ndef t_name_FUNCNAME(t):\n r'[a-zA-Z][a-zA-Z0-9]{0,15}'\n if t.lexer.current_state() == 'tail':\n t.lexer.begin('INITIAL') # переходим в начальное состояние\n else:\n t.lexer.begin('tail') # парсим строку\n return t\n\n\ndef t_name_ANY(t):\n r'.'\n t.lexer.begin('INITIAL') # переходим в начальное состояние\n\n\ndef t_tail_PARAMETRS(t):\n r'\\s*\\((\\s*(int|long|short)\\s+[a-zA-Z][a-zA-Z0-9]{0,15}\\s*,?)*\\)\\s*;'\n t.lexer.begin('INITIAL') # переходим в начальное состояние\n return t\n\n\ndef t_tail_ANY(t):\n r'.'\n t.lexer.begin('INITIAL') # переходим в начальное состояние\n\n\n# говорим что ничего не будем игнорировать\nt_name_ignore = ' ' # это кстати обязательная переменная, без неё нельзя создать новый state\nt_tail_ignore = ' ' # это кстати обязательная переменная, без неё нельзя создать новый state\nt_ignore = ' \\r\\t\\f'\n\n\n# ну и куда же мы без обработки ошибок\ndef t_name_error(t):\n print(\"Illegal character in NAME '%s'\" % t.value[0])\n t.lexer.skip(1)\n t.lexer.begin('INITIAL')\n\n\n# а здесь мы обрабатываем ошибки. Кстати заметьте формат названия функции\ndef t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n t.lexer.begin('INITIAL')\n\n\n# а здесь мы обрабатываем ошибки. Кстати заметьте формат названия функции\ndef t_tail_error(t):\n print(\"Illegal character in TAIL'%s'\" % t.value[0])\n t.lexer.skip(1)\n t.lexer.begin('INITIAL')\n\n\ndef t_newline(t):\n r'\\n'\n t.lexer.lineno += len(t.value)\n t.lexer.begin('INITIAL')\n\n\nlexer = lex.lex()\n\nif __name__ == \"__main__\":\n\n f = open(\"../Functions/sample.txt\")\n nf = f.read()\n f.close()\n lexer.input(nf)\n\n while True:\n tok = lexer.token() # читаем следующий токен\n if not tok:\n break\n if tok.type == \"FUNCTYPE\":\n print('\\n')\n\n print(tok.type, tok.value)\n","sub_path":"Bin/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"467292549","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\n\nfrom models import setup_db, Client, Artist, Project\nfrom auth import AuthError, requires_auth\n\n\napp = Flask(__name__)\nsetup_db(app)\nCORS(app)\n\n'''\nGET '/projects'\n - require permission 'get:projects'\n - return status code 200 and json {'success': True, 'projects': projects}\n projects: a list of open projects' names\n'''\n@app.route('/projects')\n@requires_auth(permission='get:projects')\ndef get_projects(jwt):\n projects = Project.query.all()\n projects = [p.name for p in projects]\n return jsonify({'success': True, 'projects': projects})\n\n\n'''\nGET '/artists'\n - require permission 'get:artists'\n - return status code 200 and json {'success': true, 'artists': artists}\n artists: a paginated list of artists\n'''\n@app.route('/artists')\n@requires_auth(permission='get:artists')\ndef get_artists(jwt):\n artists = Artist.query.all()\n artists = [a.name for a in artists]\n return jsonify({'success': True, 'artists': artists})\n\n\n'''\nGET '/projects/'\n - require permission 'get:projects'\n - return status code 200 and json {'success': true, 'project': project}\n project: the project with id requested\n - return status code 404 if is not found\n'''\n@app.route('/projects/')\n@requires_auth(permission='get:projects')\ndef get_project_detail(jwt, id):\n project = Project.query.filter_by(id=id).one_or_none()\n if (project is None):\n abort(404)\n return jsonify({'success': True, 'project': project.format()})\n\n\n'''\nPOST '/clients'\n - require permission 'post:clients'\n - return status code 200 and json {'success': true, 'client': client_id}\n client: the client created\n - return status code 422 if request is unprocessable\n'''\n@app.route('/clients', methods=['POST'])\n@requires_auth(permission='post:clients')\ndef post_client(jwt):\n body = request.get_json()\n if (body is None):\n abort(422)\n name = body.get('name')\n description = body.get('description')\n if (name is None or description is None):\n abort(422)\n\n client = Client(name=name, description=description)\n\n try:\n client.insert()\n except Exception as e:\n abort(422)\n client = Client.query.filter_by(name=name).one()\n return jsonify({'success': True, 'client': client.name})\n\n\n'''\nPOST '/projects/'\n - require permission 'post:projects'\n - return status code 200 and json {'success': true, 'project': project}\n project: the project created\n - return status code 422 if request is unprocessable\n'''\n@app.route('/projects', methods=['POST'])\n@requires_auth(permission='post:projects')\ndef post_project(jwt):\n body = request.get_json()\n if (body is None):\n abort(422)\n name = body.get('name')\n client_id = body.get('client_id')\n description = body.get('description')\n if (name is None or client_id is None or description is None):\n abort(422)\n project = Project(name=name, client_id=client_id, description=description)\n\n try:\n project.insert()\n except Exception:\n abort(422)\n return jsonify({'success': True, 'project': project.format()})\n\n\n'''\nPOST '/artists/'\n - require permission 'post:artists'\n - return status code 200 and json {'success': true, 'artist': artist}\n artists: the artists just added\n - return status code 422 if request is unprocessable\n'''\n@app.route('/artists', methods=['POST'])\n@requires_auth(permission='post:artists')\ndef post_artists(jwt):\n body = request.get_json()\n if (body is None):\n abort(422)\n name = body.get('name')\n portfolio_link = body.get('portfolio_link')\n if (name is None or portfolio_link is None):\n abort(422)\n artist = Artist(name=name, portfolio_link=portfolio_link)\n \n try:\n artist.insert()\n except Exception:\n abort(422)\n return jsonify({'success': True, 'artist': artist.format()})\n\n\n'''\nPATCH '/projects/'\n - require permission 'patch:projects'\n - return status code 200 and json {'success': true, 'project': project}\n project: the project created\n - return status code 404 if is not found\n - return status code 422 if request is unprocessable\n'''\n@app.route('/projects/', methods=['PATCH'])\n@requires_auth(permission='patch:projects')\ndef patch_project(jwt, id):\n project = Project.query.filter_by(id=id).one_or_none()\n if (project is None):\n abort(404)\n body = request.get_json()\n if (body is None):\n abort(422)\n name = body.get('name')\n client_id = body.get('client_id')\n description = body.get('description')\n if (name is not None):\n project.name = name\n if (client_id is not None):\n project.client_id = client_id\n if (description is not None):\n project.description = description\n try:\n project.update()\n except Exception as e:\n abort(422)\n return jsonify({'success': True, 'project': project.format()})\n\n\n'''\nDELETE '/projects/'\n - require permission 'delete:projects'\n - return status code 200 and json {'success': true, 'deleted': name}\n'''\n@app.route('/projects/', methods=['DELETE'])\n@requires_auth(permission='delete:projects')\ndef delete_project(jwt, id):\n project = Project.query.filter_by(id=id).one_or_none()\n if (project is None):\n abort(404)\n name = project.name\n project.delete()\n return jsonify({'success': True, 'project': name})\n\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n\n# Error Handling\n'''\nExample error handling for unprocessable entity\n'''\n@app.errorhandler(422)\ndef unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n\n'''\nImplement error handler for 404\n error handler should conform to general task above\n'''\n@app.errorhandler(404)\ndef unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404\n\n\n'''\nImplement error handler for AuthError\n error handler should conform to general task above\n'''\n@app.errorhandler(AuthError)\ndef autherror(error):\n return jsonify({\n \"success\": False,\n \"code\": error.status_code,\n \"error\": error.error[\"code\"],\n \"description\": error.error[\"description\"]\n }), error.status_code\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"110603123","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# TOMUSS: The Online Multi User Simple Spreadsheet)\n# Copyright (C) 2008-2012 Thierry EXCOFFIER, Universite Claude Bernard\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Contact: Thierry.EXCOFFIER@bat710.univ-lyon1.fr\n\nimport os\nimport time\nfrom .. import plugin\nfrom .. import document\nfrom .. import utilities\nfrom .. import configuration\nfrom .. import column\nfrom .. import cell\n\nclass Stat:\n sum = 0\n nr = 0\n min = 1e9\n max = 0\n sorted = False\n values = None\n \n def __init__(self):\n self.values = []\n\n def add(self, duration):\n self.values.append(duration)\n self.sum += duration\n self.nr += 1\n if duration < self.min:\n self.min = duration\n if duration > self.max:\n self.max = duration\n self.sorted = False\n\n def avg(self):\n if self.nr:\n return self.sum / self.nr\n return 1\n \n def mediane(self):\n if not self.sorted:\n self.values.sort()\n self.sorted = True\n if self.nr:\n return self.values[len(self.values)//2]\n else:\n return 1\n\n def problems(self):\n if self.nr:\n avg = self.avg()\n return len(list([i\n for i in self.values\n if i > 4*avg])\n ) / float(self.nr)\n else:\n return 0\n \n def __str__(self):\n mediane = self.mediane()\n avg = self.avg()\n return 'Avg:%9.4f Avg/Med:%5.1f Min:%8.4f Max:%10.6f Sum:%11.6f [%5d] %2d%%>4*Avg' % (\n avg, avg/mediane, self.min, self.max, self.sum, self.nr,\n int(100*self.problems()))\n\ndef run(service_name, lines):\n try:\n f = open(os.path.join('LOGS', service_name), 'r', encoding = \"utf-8\")\n except IOError:\n utilities.warn(service_name + ' unreadable')\n return\n service_name = service_name.split('.')[0]\n\n d = {}\n\n begin_time = None\n start_time = 0\n five_minute = time.time() - 5*60\n one_hour = time.time() - 60*60\n one_day = time.time() - 60*60*24\n one_week = time.time() - 60*60*24*7\n one_month = time.time() - 60*60*24*30\n for line in f:\n try:\n start_time, duration, name = line.strip().split(' ')\n if begin_time == None:\n begin_time = float(start_time)\n duration = float(duration)\n start_time = float(start_time)\n except ValueError:\n continue\n if name not in d:\n d[name] = (Stat(), Stat(), Stat(), Stat(), Stat(), Stat())\n d[name][0].add(duration)\n if start_time > five_minute:\n d[name][1].add(duration)\n if start_time > one_hour:\n d[name][2].add(duration)\n if start_time > one_day:\n d[name][3].add(duration)\n if start_time > one_week:\n d[name][4].add(duration)\n if start_time > one_month:\n d[name][5].add(duration)\n \n keys = list(d.keys())\n keys.sort(key = lambda x: d[x][0].sum/(d[x][0].nr+1))\n keys.reverse()\n for k in keys:\n launch_thread = ' '\n for p in plugin.plugins:\n if p.name == k:\n if p.launch_thread:\n launch_thread = '*' # XXX Miss many\n break\n\n for when, dd in zip(('All', '5 minutes', 'hour',\n 'day', 'week', 'month'), d[k]):\n if dd.nr == 0:\n continue\n lines.append(cell.Line((\n cell.CellValue(service_name),\n cell.CellValue(k),\n cell.CellValue(dd.mediane()*1000),\n cell.CellValue(dd.avg()/dd.mediane()),\n cell.CellValue(dd.min*1000),\n cell.CellValue(dd.max*1000),\n cell.CellValue(dd.sum),\n cell.CellValue(dd.nr),\n cell.CellValue(launch_thread == '*'\n and configuration.yes or ''),\n cell.CellValue(dd.problems()\n and dd.problems()*100 or ''),\n cell.CellValue(when),\n )))\n\n\n@utilities.add_a_lock\ndef profiling(server):\n \"\"\"Display the statistics on the plugin usage, number of call and times.\"\"\"\n\n columns = (\n column.Column('0', '', freezed='F', width=2, type='Text',\n title=server._('COL_TITLE_service')),\n column.Column('1', '', freezed='F', width=2, type='Text',\n title=server._('COL_TITLE_plugin')),\n column.Column('2', '', width=2, type='Note', minmax='[0;1000]',\n title=server._('COL_TITLE_med'),\n comment=server._('COL_COMMENT_med'),\n ),\n column.Column('3', '', width=2, type='Note', minmax='[0;10]',\n title=server._('COL_TITLE_avg/med'),\n comment=server._('COL_COMMENT_avg/med'),\n ),\n column.Column('4', '', width=2, type='Note', minmax='[0;1000]',\n title=server._('COL_TITLE_min'),\n comment=server._('COL_COMMENT_min'),\n ),\n column.Column('5', '', width=2, type='Note', minmax='[0;1000]',\n title=server._('COL_TITLE_max'),\n comment=server._('COL_COMMENT_max'),\n ),\n column.Column('6', '', width=2, type='Note', minmax='[0;NaN]',\n title=server._('COL_TITLE_total'),\n comment=server._('COL_COMMENT_total'),\n ),\n column.Column('7', '', width=2, type='Note', minmax='[0;NaN]',\n title=server._('COL_TITLE_call'),\n comment=server._('COL_COMMENT_call'),\n ),\n column.Column('8', '', width=2, type='Text',\n title=server._('COL_TITLE_batch'),\n comment=server._('COL_COMMENT_batch'),\n ),\n column.Column('9', '', width=2, type='Note', minmax='[0;5]',\n title=server._('COL_TITLE_slow'),\n comment=server._('COL_COMMENT_slow'),\n ),\n column.Column('10', '', width=2, type='Text',\n title=server._('COL_TITLE_when'),\n comment=server._('COL_COMMENT_when'),\n ),\n )\n lines = []\n run(os.path.join('TOMUSS', str(server.the_year) + '.times'), lines)\n\n for url, port, year, semester, host in configuration.suivi.servers():\n server.the_file.write('\\n')\n run(os.path.join('SUIVI%d' % port, str(server.the_year) + '.times'\n ), lines)\n\n document.virtual_table(server, columns, lines,\n {\n 'comment': server._(\"TABLE_COMMENT_profiling\"),\n 'default_nr_columns': 11,\n })\n\nplugin.Plugin('profiling', '/profiling/{Y}',\n function=profiling,\n group='roots',\n launch_thread = True,\n link=plugin.Link(html_class=\"verysafe\", where='debug',\n url=\"javascript:go_year_after('profiling')\"\n ),\n )\n\n","sub_path":"PLUGINS/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"577880954","text":"from distutils.core import setup\r\nimport py2exe\r\nimport sys\r\n\r\nsys.argv.append('py2exe')\r\n\r\npy2_options = {\r\n \"includes\": [\"sip\"],\r\n \"dll_excludes\": [\"MSVCP90.dll\"],\r\n \"ascii\": 0,\r\n \"compressed\":1,\r\n \"optimize\":2,\r\n}\r\n\r\nsetup(\r\n name='PyQt Demo',\r\n version='1.0',\r\n windows=[{\"script\": \"main.py\"}],\r\n zipfile=None,\r\n options={'py2exe': py2_options}\r\n)","sub_path":"p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"516126590","text":"#!/bin/env python\n\n# Load csv data\nfile = open('pubchem_smiles-CDs.csv','r')\ntext = file.readlines()\nfile.close()\n\n# Import depiction stuff\nfrom openeye.oechem import *\nfrom openeye.oeiupac import *\nfrom openeye.oedepict import *\n\n#Process file and depict structures\nfor line in text:\n #Skip lines starting w comment\n if line[0]=='#': continue\n\n #Otherwise depict and save\n tmp = line.split(',')\n cid = tmp[0]\n smiles = tmp[1].strip()\n mol = OEMol()\n OEParseSmiles(mol, smiles)\n OEPrepareDepiction(mol)\n OERenderMolecule(cid+'.pdf', mol)\n\n","sub_path":"paper/figure_source_files/2D_structures/make_2D_figs-CDs.py","file_name":"make_2D_figs-CDs.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"425053708","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis program converts an infix expression into a postfix expression using a syntax\ndirected translation scheme (i.e. with semantic actions). Currently no\nbrackets.\n\nThe input is considered to be a space separated expression like (of single digits)\n9 + 4 - 1\n\nGrammar is:\nexpr ==> expr + term | expr - term | term\nterm ==> 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ( expr )\n\nLeft factoring:\nexpr ==> term rest\nrest ==> + term rest | - term rest | epsilon\nterm ==> 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ( expr )\n\nInserting Semantic Actions:\nexpr ==> term rest | epsilon\nrest ==> + term {print ('+')} rest | - term {print ('-')} rest | epsilon\nterm ==> 0 {print ('0')} | 1 {print ('1')} | and so on... | ( expr )\n\"\"\" \nimport evalpostfix as ep\n\ntoken = \"\"\ntokgen = None\npostfixlist = []\ndigits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\ndef tokengenerator (toklist):\n for tok in toklist:\n yield tok\n\ndef match (t):\n global token, tokgen\n if token == t:\n token = next (tokgen)\n else:\n print (\"Parsing error with token '{}'\".format (token))\n\ndef expr ():\n global token\n if token in digits:\n term (); rest ();\n #else epsilon production implicit\n\ndef rest () :\n global token\n if token == '+':\n match ('+'); term (); postfixlist.append ('+'); rest ();\n elif token == '-':\n match ('-'); term (); postfixlist.append ('-'); rest ();\n #else epsilon production\n\ndef term ():\n global token\n if token in digits:\n postfixlist.append (token); match (token); \n elif token == '(':\n match ('('); expr (); match (')');\n\ndef formatproper (line):\n words = line.split ()\n line = \"\".join (words)\n return line\n\n\ndef compile (line):\n line = formatproper (line)\n\n toklist = list (line + '$')\n global tokgen\n tokgen = tokengenerator (toklist)\n\n global token\n token = next (tokgen)\n expr ()\n\ndef main ():\n global token, postfixlist, tokgen\n while True:\n token = tokgen = None\n postfixlist = []\n\n line = input ().strip()\n if (not line or line == 'quit' or\n line == 'exit'):\n break\n compile (line)\n print (\"Postfix expr = \", postfixlist)\n result = ep.evalpfix (postfixlist)\n print (result)\n\nif __name__ == '__main__':\n main ()\n\n\n","sub_path":"calculator/infix2postfix.py","file_name":"infix2postfix.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"317054098","text":"class AppendWordCount:\n def __init__(self, string):\n self.string = string\n\n def append_count(self):\n words = self.string.split(\" \")\n\n res = \"\"\n for word in words:\n if word != \"\":\n res += word + str(len(word)) + \" \"\n else:\n res += \" \"\n\n return res\n\nif __name__ == \"__main__\":\n string = input(\"Enter a sentence \\n\")\n a1 = AppendWordCount(string)\n print(a1.append_count())\n","sub_path":"phase2/appendWordCountToWord.py","file_name":"appendWordCountToWord.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"421750869","text":"from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\nfrom django.db.models import Q, F\n\nfrom eventology.common.models import TimestampedModel\nfrom users.models import User\n\n# Create your models here.\nclass EventCategory(TimestampedModel):\n name = models.CharField(max_length=255)\n\n class Meta:\n ordering = [\"name\"]\n\n def __str__(self):\n return f\"{self.name}\"\n\n\nclass Event(TimestampedModel):\n title = models.CharField(max_length=255)\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n category = models.ForeignKey(\n EventCategory, on_delete=models.SET_NULL, blank=True, null=True\n )\n image_urls = ArrayField(models.URLField(), default=list, blank=True)\n image_ids = ArrayField(models.CharField(max_length=100), default=list, blank=True)\n start_date_time = models.DateTimeField()\n end_date_time = models.DateTimeField()\n venue = models.CharField(max_length=255)\n description = models.TextField()\n\n class Meta:\n constraints = [\n models.CheckConstraint(\n check=Q(image_urls__len=F(\"image_ids__len\")),\n name=\"num_image_urls_eq_num_image_ids\",\n ),\n models.CheckConstraint(\n check=Q(start_date_time__lte=F(\"end_date_time\")),\n name=\"event_start_date_time_lte_end_date_time\",\n ),\n ]\n ordering = [\"-created_at\"]\n\n def __str__(self):\n return f\"{self.title} | {self.creator}\"\n\n\nclass EventSignUp(TimestampedModel):\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=[\"user_id\", \"event_id\"], name=\"unique_user_event_signup\"\n )\n ]\n ordering = [\"-created_at\"]\n\n def __str__(self):\n return f\"{self.user.name} | {self.event}\"\n\n\nclass EventLike(TimestampedModel):\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=[\"user_id\", \"event_id\"], name=\"unique_user_event_like\"\n )\n ]\n ordering = [\"-created_at\"]\n\n def __str__(self):\n return f\"{self.user.name} | {self.event}\"\n\n\nclass EventComment(TimestampedModel):\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n content = models.TextField()\n\n class Meta:\n ordering = [\"created_at\"]\n\n def __str__(self):\n return f\"{self.user.name} | {self.event}\"\n","sub_path":"backend/eventology/events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"605242436","text":"import numpy as np\nfrom GBS_list import GBList\n\ndef main(train_data, train_label, purity = 1.0):\n \"\"\"\n Function function: according to the specific purity threshold, obtain the particle partition and sampling point under the purity threshold\n Input: training set sample, training set label, purity threshold\n Output: sample after pellet sampling, sample label after pellet sampling\n \"\"\"\n numberSample, numberFeature = train_data.shape\n train = np.hstack((train_data, train_label.reshape(numberSample, 1))) # Compose a new two dimensional array\n index = np.array(range(0, numberSample)).reshape(numberSample, 1) # Index column, into two-dimensional array format\n train = np.hstack((train, index)) # Add index column\n\n granular_balls = GBList.GBList(train, train)\n granular_balls.init_granular_balls(purity=purity, min_sample=numberFeature * 2) # initialization\n init_l = granular_balls.granular_balls\n\n DataAll = np.empty(shape=[0, numberFeature])\n DataAllLabel = []\n for granular_ball in init_l:\n data = granular_ball.boundaryData\n DataAll = np.vstack((DataAll, data[:, : numberFeature]))\n DataAllLabel.extend(data[:, numberFeature])\n return DataAll, DataAllLabel","sub_path":"GBS.py","file_name":"GBS.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"617706530","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass WaveGlowLoss(torch.nn.Module):\n\tdef __init__(self, sigma=1.0):\n\t\tsuper(WaveGlowLoss, self).__init__()\n\t\tself.sigma = sigma\n\n\tdef forward(self, model_output):\n\t\tz, log_s_list, log_det_W_list = model_output\n\t\tfor i, log_s in enumerate(log_s_list):\n\t\t\tif i == 0:\n\t\t\t\tlog_s_total = torch.sum(log_s)\n\t\t\t\tlog_det_W_total = log_det_W_list[i]\n\t\t\telse:\n\t\t\t\tlog_s_total = log_s_total + torch.sum(log_s)\n\t\t\t\tlog_det_W_total += log_det_W_list[i]\n\n\t\tloss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total\n\t\treturn loss/(z.size(0)*z.size(1)*z.size(2))\n\n\nclass InvertibleConv(torch.nn.Module):\n\tdef __init__(self, channels):\n\t\tsuper(InvertibleConv, self).__init__()\n\t\tprint(\"Channels: \", channels)\n\t\tself.conv = torch.nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=False)\n\t\t\n\t\tW = torch.qr(torch.FloatTensor(channels, channels).normal_())[0]\n\t\t\n\t\tif torch.det(W) < 0:\n\t\t\tW[:, 0] = -1*W[:,0]\n\t\tW = W.view(channels, channels, 1)\n\t\tself.conv.weight.data = W\n\t\t\n\tdef forward(self, z, reverse=False):\n\t\tbatch_size, group_size, n_of_groups = z.size()\n\t\t# Here, group size refers to the channel dimension of the data, and each matrix of channel dimension (i.e. [:, i, :]) is\n\t\t# going to be multiplied by the W matrix. The n_of_groups is number of groups, and that's *how many* matrices there are\n\t\t# in the channel dimension, and each one will be multiplied by W\n\t\t# The larger the group_size value is, the more thorough the \"mixing\" of the variables is before going back to the AC layer\n\t\t# In the extreme case of group_size=1, the variables are never permuted before going back to AC, and the flow would work\n\t\t# terribly because nothing would change order, so the same values would go into the WN each step of flow. In the other\n\t\t# extreme, where n_of_groups=1, mixing is maximized, but we run the risk of our W matrix being too big and possibly getting\n\t\t# numerical instability when we try to invert it, since we're not using very high precision to represent it (float32).\n\t\t\n\t\tW = self.conv.weight.squeeze()\n\t\t\n\t\tif reverse:\n\t\t\tif not hasattr(self, 'W_inv'):\n\t\t\t\tW_inv = W.float().inverse()\n\t\t\t\tW_inv = Variable(W_inv[..., None])\n\t\t\t\tself.W_inv = W_inv\n\t\t\tz = F.conv1d(z, self.W_inv, bias=None, stride=1, padding=0)\n\t\t\treturn z\n\t\telse:\n\t\t\tlog_det_w = batch_size * n_of_groups * torch.logdet(W)\n\t\t\tz = self.conv(z)\n\t\t\treturn z, log_det_w\n\t\t\nclass AffineCoupling(torch.nn.Module):\n\tdef __init__(self, n_in_channels, n_context_channels, n_layers, dilation_list, n_channels, kernel_size):\n\t\tsuper(AffineCoupling, self).__init__()\n\t\tself.n_in_channels = n_in_channels\n\t\tself.n_context_channels = n_context_channels\n\t\tself.n_layers = n_layers\n\t\tself.dilation_list = dilation_list\n\t\tself.n_channels = n_channels\n\t\tself.kernel_size = kernel_size\n\t\tself.WN = WN(n_in_channels, n_context_channels, n_layers, n_channels, kernel_size, dilation_list)\n\n\tdef forward(self, forecast, context, reverse=False):\n\t\t\"\"\"\n\t\tcontext: batch x ? x ?\n\t\tforecast: batch x time\n\t\t\"\"\"\n\t\tif reverse:\n\t\t\tn_half = int(forecast.size(1)/2)\n\t\t\tforecast_0 = forecast[:, :n_half, :]\n\t\t\tforecast_1 = forecast[:, n_half:, :]\n\n\t\t\toutput = self.WN(forecast_0, context)\n\n\t\t\ts = output[:, n_half:, :]\n\t\t\tb = output[:, :n_half, :]\n\t\t\tforecast_1 = (forecast_1 - b)/torch.exp(s)\n\t\t\tforecast = torch.cat([forecast_0, forecast_1], 1)\n\n\t\t\treturn forecast\n\t\telse:\n\t\t\tn_half = int(forecast.size(1)/2)\n\t\t\tforecast_0 = forecast[:, :n_half, :]\n\t\t\tforecast_1 = forecast[:, n_half:, :]\n\n\t\t\toutput = self.WN(forecast_0, context)\n\t\t\tlog_s = output[:, n_half:, :]\n\t\t\tb = output[:, :n_half, :]\n\t\t\tforecast_1 = torch.exp(log_s)*forecast_1 + b # Might want to use sigmoid or clip the input to the exp for stability\n\n\t\t\tforecast = torch.cat([forecast_0, forecast_1], 1)\n\n\t\t\treturn forecast, log_s \n\t\t\nclass WaveGlow(torch.nn.Module):\n\tdef __init__(self, n_context_channels, n_flows, n_group, n_early_every, n_early_size, n_layers, dilation_list, n_channels, kernel_size, use_cuda=True):\n\t\tsuper(WaveGlow, self).__init__()\n\n\t\tassert(n_layers == len(dilation_list))\n\t\tself.n_flows = n_flows # Number of steps of flow\n\t\tself.n_group = n_group # \n\t\tself.n_early_every = n_early_every\n\t\tself.n_early_size = n_early_size\n\t\tself.n_channels = n_channels\n\t\tself.IC = torch.nn.ModuleList()\n\t\tself.AC = torch.nn.ModuleList()\n\t\tself.use_cuda = use_cuda\n\t\t\n\t\tn_half = int(n_group/2)\n\t\t\n\t\tn_remaining_channels = n_group\n\n\t\tfor k in range(n_flows):\n\t\t\tif k % self.n_early_every == 0 and k > 0:\n\t\t\t\tn_half = n_half - int(self.n_early_size/2)\n\t\t\t\tn_remaining_channels = n_remaining_channels - self.n_early_size\n\t\t\tself.IC.append(InvertibleConv(n_remaining_channels))\n\t\t\t# In original code, things used to instantiate the WN here (since they don't use AC class):\n\t\t\t# WN(n_half, n_mel_channels*n_group, **WN_config)\n\t\t\tself.AC.append(AffineCoupling(n_half, n_context_channels, n_layers, dilation_list, n_channels, kernel_size))\n\n\t\tself.n_remaining_channels = n_remaining_channels # Apparently will be useful at inference, according to authors\n\t\t\n\t\t\t\n\tdef forward(self, forecast, context):\n\t\t'''\n\t\tTransform a forecast with a given context into the latent space (so a spherical gaussian sample)\n\t\t\n\t\tforecast: torch FloatTensor of shape [b, N], where b is batch dimension and N is length of forecast (usually 96)\n\t\tcontext: torch FloatTensor of shape [b, M], where b is batch dimension and M is num features (currently probably just =N,\n\t\t\tuse past 24 hours of data to predict next 24 hours of data)\n\n\t\t'''\n\t\t \n\t\t# context = context.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n\t\t# context = context.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)\n\t\t\n\t\t# Not sure why we do this, applying it blindly from original code\n\t\t# unfold(dimension, size, step): returns tensor which contains all slices of size \"size\" from the tensor\n\t\t# in the dimension \"dimension\". Step between two slices is given by step.\n\t\t# Ex: [1,2,3,4].unfold(0, 2, 1) = [1,2], [2,3], [3,4]\n\t\t# In effect, THI is the reshape operation which moves things from the spatial dimension into the channel dimension\n\t\tforecast = forecast.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)\n\t\t\n\t\toutput_forecast = []\n\t\tlog_s_list = []\n\t\tlog_det_W_list = []\n\t\t\n\t\tfor k in range(self.n_flows):\n\t\t\tif k % self.n_early_every == 0 and k > 0:\n\t\t\t\toutput_forecast.append(forecast[:, :self.n_early_size, :])\n\t\t\t\tforecast = forecast[:, self.n_early_size:, :]\n\t\t\t\t\n\t\t\tforecast, log_det_W = self.IC[k](forecast)\n\t\t\tlog_det_W_list.append(log_det_W)\n\t\t\t\n\t\t\tforecast, log_s = self.AC[k](forecast, context)\n\t\t\tlog_s_list.append(log_s)\n\n\t\t\t# print(\"Shape of forecast in forward: \", forecast.shape)\n\t\t\t\n\t\toutput_forecast.append(forecast)\n\t\t# This keeps track of what shapes were assigned early, so that if we want to re-use these generated\n\t\t# latent samples in generate, we can properly assign the shapes\n\t\tearly_assignment_shapes = [] \n\t\tfor early_output in output_forecast:\n\t\t\tearly_assignment_shapes.append(early_output.shape)\n\t\treturn torch.cat(output_forecast, 1), log_s_list, log_det_W_list, early_assignment_shapes\n\n\tdef generate(self, context, sigma=1.0, latent_z=None, early_assignment_shapes=None):\n\t\tif latent_z is not None:\n\t\t\tassert early_assignment_shapes is not None, \"If using latent_z, must also give early_assignment_shapes\"\n\t\tif early_assignment_shapes is not None:\n\t\t\tassert latent_z is not None, \"If giving early_assignment_shapes, specify latent_z as well\"\n\n\t\t# if we don't give specifc points in the latent space to transform, sample random ones. Otherwise, use the specified points\n\t\tif latent_z is None:\n\t\t\tif self.use_cuda:\n\t\t\t\tforecast = torch.cuda.FloatTensor(context.size(0), self.n_remaining_channels, int(self.n_channels / self.n_group)).normal_()\n\t\t\telse:\n\t\t\t\tforecast = torch.FloatTensor(context.size(0), self.n_remaining_channels, int(self.n_channels / self.n_group)).normal_()\n\t\t\t# forecast = torch.autograd.Variable(sigma*forecast) # why does this have autograd in original paper? Never trains in this dir\n\t\t\tforecast = sigma*forecast # why does this have autograd in original paper? Never trains in this dir\n\t\telse:\n\t\t\tlatent_z_parts = []\n\t\t\tfor i in range(len(early_assignment_shapes)):\n\t\t\t\tif i==0:\n\t\t\t\t\tlatent_z_parts.append(latent_z[:, :early_assignment_shapes[i][1], :])\n\t\t\t\telse:\n\t\t\t\t\tstart = early_assignment_shapes[i-1][1]\n\t\t\t\t\tfinish = start + early_assignment_shapes[i][1]\n\t\t\t\t\tlatent_z_parts.append(latent_z[:, start:finish, :])\n\n\n\t\t\tlatent_z_parts = latent_z_parts[::-1]\n\t\t\tforecast = latent_z_parts[0]\n\n\n\t\t# To keep track of how many early_every pieces we've appended, when we're using a given latent_z\n\t\tnum_early_every_inserted = 1 \n\t\tfor k in reversed(range(self.n_flows)):\n\t\t\tforecast = self.AC[k](forecast, context, reverse=True)\n\t\t\tforecast = self.IC[k](forecast, reverse=True)\n\t\t\t\n\t\t\tif k % self.n_early_every == 0 and k > 0:\n\t\t\t\tif latent_z is None:\n\t\t\t\t\tif self.use_cuda:\n\t\t\t\t\t\tz = torch.cuda.FloatTensor(context.size(0), self.n_early_size, int(self.n_channels / self.n_group)).normal_()\n\t\t\t\t\telse:\n\t\t\t\t\t\tz = torch.FloatTensor(context.size(0), self.n_early_size, int(self.n_channels / self.n_group)).normal_()\n\t\t\t\t\tforecast = torch.cat((sigma*z, forecast), 1)\n\t\t\t\telse:\n\t\t\t\t\tz = latent_z_parts[num_early_every_inserted]\n\t\t\t\t\tforecast = torch.cat((z, forecast), 1)\n\t\t\t\t\tnum_early_every_inserted += 1\n\t\t\n\t\t# check dimensions and shit\n\t\tforecast = forecast.permute(0, 2, 1).contiguous().view(forecast.size(0), -1).data\n\t\treturn forecast\n\t\n# Copied directly from waveglow github\n# Work this out on paper, it's related to the process used in the Table on pg 4 of glow paper\n# I believe that separated it out into its own function so they could use jit on it for a speed up\n\n@torch.jit.script\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\n\tn_channels_int = n_channels[0]\n\tin_act = input_a+input_b\n\tt_act = torch.tanh(in_act[:, :n_channels_int, :])\n\ts_act = torch.sigmoid(in_act[:, n_channels_int:, :])\n\tacts = t_act * s_act\n\treturn acts\n\n\n\n\nclass WN(torch.nn.Module):\n\t\"\"\"\n\tThis is the WaveNet like layer for the affine coupling. The primary difference\n\tfrom WaveNet is the convolutions need not be causal. There is also no dilation\n\tsize reset. The dilation only doubles on each layer\n\t\"\"\"\n\tdef __init__(self, n_in_channels, n_context_channels, n_layers, n_channels,\n\t\t\t\t kernel_size, dilation_list):\n\t\tsuper(WN, self).__init__()\n\t\tassert(kernel_size % 2 == 1)\n\t\tassert(n_channels % 2 == 0)\n\t\tassert(len(dilation_list) == n_layers)\n\t\t# number of layers in the neural network\n\t\tself.n_layers = n_layers\n\t\t# Number of channels in the data (not sure why doesn't match n_in_channels)\n\t\tself.n_channels = n_channels \n\t\tself.in_layers = torch.nn.ModuleList()\n\t\tself.res_skip_layers = torch.nn.ModuleList()\n\t\tself.dilation_list = dilation_list\n\n\t\tstart = torch.nn.Conv1d(n_in_channels, n_channels, 1)\n\t\tstart = torch.nn.utils.weight_norm(start, name='weight')\n\t\tself.start = start\n\n\t\t# Initializing last layer to 0 makes the affine coupling layers\n\t\t# do nothing at first. This helps with training stability\n\t\tend = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)\n\t\tend.weight.data.zero_()\n\t\tend.bias.data.zero_()\n\t\tself.end = end\n\t\t# Self.end is the final layer, which outputs something with 2*n_channels\n\t\t# We need this to have double the channels because it is computing both\n\t\t# the shift and scale factor, and each one of those is n_channels big\n\n\t\t# This is the conditioning layer, the layer which accepts our context\n\t\t# data as input, and outputs the data which gets mixed into the network\n\t\t# as it processes the samples. Specifically, the mixing happens in the\n\t\t# fused_add_tanh_sigmoid_multiply function, where output from this\n\t\t# layer is added to the sample being transformed by the network before\n\t\t# doing the tanh and sigmoid activations.\n\t\t\n\t\t# Note the output size of this layer, 2*n_channels*nlayers\n\t\t# This is enough output that, at each layer of this NN (and therefore at each\n\t\t# call to the fused_add_tanh_sigmoid_multiply function), there is a unique\n\t\t# bit of output from this layer that is mixed with the sample in that layer\n\t\t# of the neural network. If we need to regularize the model more, we could\n\t\t# consider not having it be unique for every layer\n\t\tcond_layer = torch.nn.Conv1d(n_context_channels, 2*n_channels*n_layers, 1)\n\t\tself.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')\n\n\t\tfor i in range(n_layers):\n\t# dilation = 2 ** i # gonna want to change this since our data is much lower dimensional\n\t\t\tdilation = self.dilation_list[i]\n\t\t\tpadding = int((kernel_size*dilation - dilation)/2)\n\t\t\tin_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size,\n\t\t\t\t\t\t\t\t\t dilation=dilation, padding=padding)\n\t\t\tin_layer = torch.nn.utils.weight_norm(in_layer, name='weight')\n\t\t\tself.in_layers.append(in_layer)\n\n\n\t\t\t# last one is not necessary\n\t\t\tif i < n_layers - 1:\n\t\t\t\tres_skip_channels = 2*n_channels\n\t\t\telse:\n\t\t\t\tres_skip_channels = n_channels\n\t\t\tres_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)\n\t\t\tres_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')\n\t\t\tself.res_skip_layers.append(res_skip_layer)\n\n\tdef forward(self, forecast, context):\n\t\tforecast = self.start(forecast)\n\t\toutput = torch.zeros_like(forecast)\n\t\tn_channels_tensor = torch.IntTensor([self.n_channels])\n\n\t\tcontext = self.cond_layer(context)\n\n\t\tfor i in range(self.n_layers):\n\t\t\tcontext_offset = i*2*self.n_channels\n\t\t\tacts = fused_add_tanh_sigmoid_multiply(\n\t\t\t\tself.in_layers[i](forecast),\n\t\t\t\tcontext[:,context_offset:context_offset+2*self.n_channels,:],\n\t\t\t\tn_channels_tensor)\n\n\t\t\tres_skip_acts = self.res_skip_layers[i](acts)\n\t\t\tif i < self.n_layers - 1:\n\t\t\t\tforecast = forecast + res_skip_acts[:,:self.n_channels,:]\n\t\t\t\toutput = output + res_skip_acts[:,self.n_channels:,:]\n\t\t\telse:\n\t\t\t\toutput = output + res_skip_acts\n\n\t\treturn self.end(output)\n\t\n\t","sub_path":"training/waveglow_model.py","file_name":"waveglow_model.py","file_ext":"py","file_size_in_byte":13972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"50242620","text":"\n\n#calss header\nclass _CONQUER():\n\tdef __init__(self,): \n\t\tself.name = \"CONQUER\"\n\t\tself.definitions = [u'to take control or possession of foreign land, or a group of people, by force: ', u'to deal with or successfully fight against a problem or an unreasonable fear: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_conquer.py","file_name":"_conquer.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"358550173","text":"import sys\nimport json\n\ncount =0;\nfor line in sys.stdin: \n fields = (line.rstrip('\\n').split('\\t'))[1]\n dict = json.loads(fields)\n output = []\n for key in dict:\n value = dict[key]\n output.append(value)\n if (len(output) == 21):\n print('\\t'.join([x for x in output]))\n \n ","sub_path":"assignments/assignment3/convert_tsv.py","file_name":"convert_tsv.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"413964534","text":"import logging\nfrom typing import List\nfrom openfizzbuzz.core.letter import Letter, LetterFactory\n\nlogger = logging.getLogger('word')\n\n\nclass Word:\n letters: List[Letter] = None\n letter_factory = None\n\n def __init__(self, letters, letter_factory=LetterFactory):\n self.letters = letters\n self.letter_factory = letter_factory\n\n def __repr__(self):\n return ''.join([letter.__repr__() for letter in self.letters])\n\n def __add__(self, other):\n return WordFactory.create_word(self.letters + other.letters, self.letter_factory)\n\n\nclass WordFactory:\n\n @staticmethod\n def create_word(chars, letter_factory=LetterFactory):\n if not chars:\n raise ValueError('Attempted to create an empty word')\n\n if isinstance(chars[0], str):\n return Word(LetterFactory.create_letters(chars), letter_factory)\n elif isinstance(chars[0], Letter):\n return Word(chars, letter_factory)\n","sub_path":"openfizzbuzz/core/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"278917295","text":"from __future__ import print_function\n\nimport os\nimport numpy as np\nfrom keras.layers import Input, Reshape, Dense, Dropout, Activation, Flatten, PReLU, merge\nfrom keras.layers.convolutional import Convolution2D, UpSampling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import *\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom keras.models import Model\nfrom keras import backend as K\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimg_rows, img_cols = 28, 28\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = np.expand_dims(X_train, axis=3)\nX_test = np.expand_dims(X_test, axis=3)\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\nK.set_image_dim_ordering('tf')\n\n# generator\nnch = 256\ng_input = Input(shape=(100, ))\ng_input_cond = Input(shape=(10, ))\ng = Dense(nch * 7 * 7, init='glorot_normal')(merge([g_input, g_input_cond], mode='concat', concat_axis=1))\ng = BatchNormalization(mode=2)(g)\ng = PReLU()(g)\ng = Reshape([7, 7, nch])(g)\ng = Convolution2D(nch, 3, 3, border_mode='same', init='glorot_uniform')(g)\ng = BatchNormalization(axis=3, mode=2)(g)\ng = PReLU()(g)\ng = UpSampling2D(size=(2, 2))(g)\ng = Convolution2D(nch / 2, 3, 3, border_mode='same', init='glorot_uniform')(g)\ng = BatchNormalization(axis=3, mode=2)(g)\ng = PReLU()(g)\ng = UpSampling2D(size=(2, 2))(g)\ng = Convolution2D(nch / 4, 3, 3, border_mode='same', init='glorot_uniform')(g)\ng = BatchNormalization(axis=3, mode=2)(g)\ng = PReLU()(g)\ng = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(g)\ng_output = Activation('sigmoid')(g)\ngenerator = Model([g_input, g_input_cond], g_output)\ng_opt = Adam(lr=1e-4)\ngenerator.compile(loss='binary_crossentropy', optimizer=g_opt)\n\n# discriminator\ng_output_shape = X_train.shape[1:]\nd_input = Input(shape=g_output_shape)\nd_input_cond = Input(shape=(10, ))\nd = Convolution2D(32, 3, 3, border_mode='same', subsample=(2, 2))(d_input)\nd = PReLU()(d)\nd = Dropout(0.2)(d)\nd = Convolution2D(64, 3, 3, border_mode='same', subsample=(2, 2))(d)\nd = PReLU()(d)\nd = Dropout(0.2)(d)\nd = Convolution2D(128, 3, 3, border_mode='same', subsample=(2, 2))(d)\nd = PReLU()(d)\nd = Flatten()(d)\nd = Dropout(0.2)(d)\nd = Dense(128)(merge([d, d_input_cond], mode='concat', concat_axis=1))\nd = PReLU()(d)\nd = Dropout(0.5)(d)\nd_output = Dense(2, activation='softmax')(d)\ndiscriminator = Model([d_input, d_input_cond], d_output)\nd_opt = Adam(lr=1e-3)\ndiscriminator.compile(loss='categorical_crossentropy', optimizer=d_opt)\n\n\n# zamrzavanje/odmrzavanje slojeva\ndef make_trainable(net, val):\n net.trainable = val\n for l in net.layers:\n l.trainable = val\n\n# ceo GAN model\ngan_input = Input(shape=(100, ))\ngan_input_cond = Input(shape=(10, ))\ngen_output = generator([gan_input, gan_input_cond])\ngan_output = discriminator([gen_output, gan_input_cond])\ngan = Model([gan_input, gan_input_cond], gan_output)\ngan.compile(loss='categorical_crossentropy', optimizer=g_opt)\n\n\ndef plot_loss(losses):\n plt.figure(figsize=(10, 8))\n plt.plot(losses['d'], label='discriminitive loss')\n plt.plot(losses['g'], label='generative loss')\n plt.legend()\n plt.show()\n\n\ndef combine_images(generated_images):\n num = generated_images.shape[0]\n width = int(np.sqrt(num))\n height = int(np.ceil(float(num)/width))\n shape = generated_images.shape[1:3]\n image = np.zeros((height*shape[0], width*shape[1]), dtype=generated_images.dtype)\n for index, img in enumerate(generated_images):\n i = int(index/width)\n j = index % width\n image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = img[:, :, 0]\n return image\n\n\n# recnik za cuvanje vrednosti funkcije greske\nlosses = {'d': [], 'g': []}\n\nplt_noise = np.random.uniform(0, 1, size=[100, 100])\nplt_cond = np.tile(np.arange(0, 10), 10).flatten()\nplt_cond = np_utils.to_categorical(plt_cond, 10)\n\noutput_imgs_dir = 'imgs/mnist_cnn_gan_cond'\nif not os.path.exists(output_imgs_dir):\n os.mkdir(output_imgs_dir)\n\n\n# glavna petlja za obucavanje\ndef train_for_n(nb_epoch=5000, start_at=0, batch_size=32):\n for e in tqdm(range(start_at, start_at+nb_epoch)):\n # generator izgenerise slike\n take_idx = np.random.randint(0, X_train.shape[0], size=batch_size)\n image_batch = X_train[take_idx, :, :, :]\n cond_batch = y_train[take_idx]\n cond_batch = np_utils.to_categorical(cond_batch, 10)\n noise_gen = np.random.uniform(0, 1, size=(batch_size, 100))\n generated_images = generator.predict([noise_gen, cond_batch])\n\n if e % 100 == 0:\n # plotovati rezultate generatora na svakih 100 epoha\n plt_generated_images = generator.predict([plt_noise, plt_cond])\n image = combine_images(plt_generated_images)\n image *= 255.0\n Image.fromarray(image.astype(np.uint8)).save(output_imgs_dir + '/epoch_{}.jpg'.format(e))\n\n # --- obucavanje diskriminatora ---\n # spajanje realnih i izgenerisanih primera\n X = np.concatenate((image_batch, generated_images))\n y = np.zeros([2 * batch_size, 2])\n # prva polovina primera su realni, druga polovina su izgenerisani\n y[0:batch_size, 1] = 1\n y[batch_size:, 0] = 1\n\n # omoguciti obucavanje diskriminatora\n make_trainable(discriminator, True)\n d_loss = discriminator.train_on_batch([X, np.concatenate((cond_batch, cond_batch))], y)\n losses['d'].append(d_loss)\n\n # --- obucavanje generatora ---\n # generisanje uniformnog suma\n noise_tr = np.random.uniform(0, 1, size=[batch_size, 100])\n y2 = np.zeros([batch_size, 2])\n # zelimo da izlaz iz generatora bude klasifikovan kao da je iz realnog skupa podataka (varamo diskriminator)\n y2[:, 1] = 1\n\n # onemoguciti obucavanje diskriminatora\n make_trainable(discriminator, False)\n g_loss = gan.train_on_batch([noise_tr, cond_batch], y2)\n losses['g'].append(g_loss)\n\n\n# obucavanje 10k epoha sa originalnim LR\ntrain_for_n(nb_epoch=10000, start_at=0, batch_size=32)\n\n# obucavanje 5k epoha sa smanjenim LR\nK.set_value(g_opt.lr, 1e-5)\nK.set_value(d_opt.lr, 1e-4)\ntrain_for_n(nb_epoch=5000, start_at=10000, batch_size=32)\n\n# obucavanje 5k epoha sa jos smanjenim LR\nK.set_value(g_opt.lr, 1e-6)\nK.set_value(d_opt.lr, 1e-5)\ntrain_for_n(nb_epoch=5000, start_at=15000, batch_size=32)\n\n# plotovanje kriva obucavanja\nplot_loss(losses)\n","sub_path":"05-generative/mnist_cnn_gan_cond.py","file_name":"mnist_cnn_gan_cond.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"71862368","text":"import os\nimport shutil\n\n# Removing Folders\nfiles=[]\nfor i in os.listdir():\n if os.path.isfile(i)==True:\n files.append(i)\n\n# Removing main file name\nfiles.remove('File_organizer.py')\n\n# Getting Extensions and files\nfn_ext=[i.split(\".\") for i in files]\n\n# Creating folders and moving\nfor i in range(len(fn_ext)):\n if os.path.exists(fn_ext[i][1]+\"__files\")==False:\n os.mkdir(fn_ext[i][1]+\"__files\")\n shutil.move(files[i],fn_ext[i][1]+\"__files\")\n else:\n shutil.move(files[i],fn_ext[i][1]+\"__files\")\n\nprint(\"DONE\")\n","sub_path":"FileOrganizer/File_organizer.py","file_name":"File_organizer.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"47032597","text":"import pickle\r\nimport os\r\nimport pathlib\r\n\r\n\r\nclass Account:\r\n\tdef __init__(self):\r\n\t\tself.accNo: 0\r\n\t\tself.name:''\r\n\t\tself.deposit:0\r\n\t\tself.type=''\r\n\r\n\tdef createAccount(self):\r\n\t\tself.accNo=int(input('enter the account number: '))\r\n\t\tself.name = input('enter account holder name : ')\r\n\t\tself.type = input('enter the type of the account : ')\r\n\t\tself.deposit = int(input('enter the initial amount >=5000 for current'))\r\n\t\tprint('\\n\\n\\nAccount Created')\r\n\r\n\t# def showAccount(self):\r\n\t# \tprint('Account number:',self.accNo)\r\n\t# \tprint('Account holder name: ',self.name)\r\n\t# \tprint('Type of Account: ',self.type)\r\n\t# \tprint('Balance: ',self.deposit)\r\n\r\n\t# def modifyAccount(self):\r\n\t# \tprint('Account number : ',self.accNo)\r\n\t# \tself.name = input('modify account holder name : ')\r\n\t# \tself.type = input('modify the type of the account : ')\r\n\t# \tself.deposit = int(input('modify Balance'))\r\n\r\n\t# def depositAmount(self,amount):\r\n\t# \tself.deposit+=amount\r\n\r\n\t# def withdrawAmount(self,amount):\r\n\t# \tself.deposit-=amount\r\n\r\n\t# def report(self):\r\n\t# \tprint(self.accNo,' ',self.name,' ',self.type,' ',self.deposit)\r\n\r\n\t# def getAccountno(self):\r\n\t# \treturn self.accNo\r\n\r\n\t# def getAccountholderName(self):\r\n\t# \treturn self.name\r\n\r\n\t# def getAccountType(self):\r\n\t# \treturn self.type\r\n\r\n\t# def getDeposit(self):\r\n\t# \treturn self.deposit\r\n\r\n\r\ndef intro():\r\n\tprint('**************')\r\n\tprint('**** BANK MANAGEMENT ****')\r\n\tprint('**************')\r\n\r\ndef writeAccount():\r\n\taccount = Account()\r\n\taccount.createAccount()\r\n\twriteAccountsFile(account)\r\n\r\ndef displayAll():\r\n\tfile = pathlib.Path('accounts.data')\r\n\r\n\tif file.exists():\r\n\t\tinfile = open('accounts.data','rb')\r\n\t\tmylist = pickle.load(infile)\r\n\r\n\t\tfor item in mylist:\r\n\t\t\tprint(item.accNo,' ',item.name,' ',item.type,' ',item.deposit)\r\n\t\tinfile.close()\r\n\telse:\r\n\t print('no records to display')\r\n\r\n\r\ndef displaySp(num):\r\n\tfile = pathlib.Path('accounts.data')\r\n\r\n\tif file.exists():\r\n\t\t\tinfile = open('accounts.data','rb')\r\n\t\t\tmylist = pickle.load(infile)\r\n\t\t\tinfile.close()\r\n\t\t\tfound = False\r\n\r\n\t\t\tfor item in mylist:\r\n\t\t\t\tif item.accNo == num:\r\n\t\t\t\t\tprint('your account balance is = ',item.deposit)\r\n\r\n\t\t\t\t\tfound =True\r\n\telse:\r\n\t\t\tprint('no record found')\r\n\r\n\tif not found:\r\n\t\t\tprint('no existing records with this number')\r\n\r\n\r\ndef depostAndwithdraw(num1,num2):\r\n\t\tfile = pathlib.Path('accounts.data')\r\n\r\n\t\tif file.exists():\r\n\t\t\tinfile = open('accounts.data','rb')\r\n\t\t\tmylist = pickle.load(infile)\r\n\t\t\tinfile.close()\r\n\r\n\t\t\tos.remove('accounts.data')\r\n\r\n\t\t\tfor item in mylist:\r\n\t\t\t\tif item.accNo == num1:\r\n\t\t\t\t\tif num2 == 1:\r\n\t\t\t\t\t\tamount = int(input('enter the amount to deposit : '))\r\n\t\t\t\t\t\titem.deposit += amount\r\n\t\t\t\t\t\tprint('your account is updated')\r\n\r\n\t\t\t\t\tif num2 ==2:\r\n\t\t\t\t\t\tamount = int(input('enter the amount to withdarw'))\r\n\t\t\t\t\t\tif amount<=item.deposit:\r\n\t\t\t\t\t\t\t\titem.deposit -= amount\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tprint('you cannot withdarw larger ammount')\r\n\t\telse:\r\n\t\t print('no records to search')\r\n\r\n\t\toutfile = open('newaccounts.data','wb')\r\n\t\tpickle.dump(mylist,outfile)\r\n\t\toutfile.close()\r\n\r\n\t\tos.rename('newaccounts.data','accounts.data')\r\n\r\n\r\ndef deleteAccount(num):\r\n\t\tfile = pathlib.Path('accounts.data')\r\n\r\n\t\tif file.exists():\r\n\t\t\tinfile = open('accounts.data','rb')\r\n\t\t\toldlist = pickle.load(infile)\r\n\t\t\tinfile.close()\r\n\r\n\t\t\tnewlist =[]\r\n\t\t\tfor item in oldlist:\r\n\t\t\t\tif item.accNo!= num:\r\n\t\t\t\t\tnewlist.apppend(item)\r\n\t\t\tos.remove('accounts.data')\r\n\t\t\toutfile = open('newaccounts.data','wb')\r\n\t\t\tpickle.dump(newlist,outfile)\r\n\t\t\toutfile.close()\r\n\t\t\tos.rename('newaccounts.data','accounts.data')\r\n\r\n\r\ndef modifyAccount(num):\r\n\t\tfile = pathlib.Path('accounts.data')\r\n\r\n\t\tif file.exists():\r\n\t\t\tinfile = open('accounts.data','rb')\r\n\t\t\toldlist = pickle.load(infile)\r\n\t\t\tinfile.close()\r\n\r\n\t\t\tos.remove('accounts.data')\r\n\r\n\t\t\tfor item in oldlist:\r\n\t\t\t\tif item.accNo == num:\r\n\t\t\t\t\titem.name = input('enter the Account holder name: ')\r\n\t\t\t\t\titem.type = input('enter the account type: ')\r\n\t\t\t\t\titem.deposit = int(input('enter the amount: '))\r\n\r\n\t\t\toutfile = open('newaccounts.data','wb')\r\n\t\t\tpickle.dump(oldlist,outfile)\r\n\t\t\toutfile.close()\r\n\t\t\tos.rename('newaccounts.data','accounts.data')\r\n\r\n\r\ndef writeAccountsFile(account):\r\n\t\tfile = pathlib.Path('accounts.data')\r\n\r\n\t\tif file.exists():\r\n\t\t\tinfile = open('accounts.data','rb')\r\n\t\t\toldlist = pickle.load(infile)\r\n\t\t\toldlist.append(account)\r\n\t\t\tinfile.close()\r\n\t\t\tos.remove('accounts.data')\r\n\r\n\t\telse:\r\n\t\t\toldlist = [account]\r\n\r\n\t\toutfile = open('newaccounts.data','wb')\r\n\t\tpickle.dump(oldlist,outfile)\r\n\t\toutfile.close()\r\n\t\tos.rename('newaccounts.data','accounts.data')\r\n\r\n\r\nch = ''\r\nnum = 0\r\nintro()\r\nwhile ch!=8:\r\n\r\n\t\tprint('\\tMAIN MENU')\r\n\t\tprint('\\t1. NEW ACCOUNT')\r\n\t\tprint('\\t2. DEPOSIT AMOUNT')\r\n\t\tprint('\\t3. WITHDRAW AMOUNT')\r\n\t\tprint('\\t4. BALANCE ENQUIRY')\r\n\t\tprint('\\t5. ALL ACCOUNT HOLDER LIST')\r\n\t\tprint('\\t6. CLOSE AN ACCOUNT')\r\n\t\tprint('\\t7. MODIFY AN ACCOUNT')\r\n\t\tprint('\\t8. EXIT')\r\n\t\tprint('\\tSelect your option (1-8)')\r\n\t\tch = input()\r\n\r\n\t\t\r\n\r\n\t\tif ch ==\"1\":\r\n\t\t\twriteAccount()\r\n\t\telif ch ==\"2\":\r\n\t\t\tnum = int(input('\\t Enter your account no: '))\r\n\t\t\tdepostAndwithdraw(num,1)\r\n\t\telif ch ==\"3\":\r\n\t\t\tnum = int(input('\\t Enter your account no: '))\r\n\t\t\tdepostAndwithdraw(num,2)\r\n\t\telif ch =='4':\r\n\t\t\tnum = int(input('\\t Enter your account no: '))\r\n\t\t\tdisplaySp(num)\r\n\t\telif ch == '5':\r\n\t\t\tdisplayAll()\r\n\t\telif ch == '6':\r\n\t\t\tnum = int(input('\\t Enter your account no: '))\r\n\t\t\tdeleteAccount(num)\r\n\t\telif ch == '7':\r\n\t\t\tnum = int(input('\\t Enter your account no: '))\r\n\t\t\tmodifyAccount(num)\r\n\t\telif ch == '8':\r\n\t\t\tprint('thanks for using bank MANAGEMENT system')\t \r\n\t\t\tbreak \r\n\t\telse:\r\n\t\t\tprint('invalid choice')\r\n\r\n\t\tch = input('enter your choice: ')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"banking.py","file_name":"banking.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"633151954","text":"import re\n\nimport pandas as pd\nfrom pandas.io.sql import DatabaseError\nimport psycopg2\n\nfrom sqlstate import *\n\n\nclass AlarmSql(object):\n def __init__(self, dbname, logdbname, host, user, root):\n self.dbname = dbname\n self.host = host\n self.dbuser = user\n self.root = root\n self.logdbname = logdbname\n\n self.conn_alm = None\n self.conn_log = None\n\n self.pvlist = None\n\n def connect(self):\n self.conn_alm = psycopg2.connect(dbname=self.dbname,\n host=self.host,\n user=self.dbuser)\n self.conn_alm.autocommit = True\n\n self.conn_log = psycopg2.connect(dbname=self.logdbname,\n host=self.host,\n user=self.dbuser)\n self.conn_log.autocommit = True\n\n def close(self):\n if self.conn_alm:\n self.conn_alm.close()\n\n if self.conn_log:\n self.conn_log.close()\n\n print(\"close\")\n\n def current_alarm_all(self):\n sql_str = SQL_CURRENT_ALARM_ALL.format(self.root)\n try:\n data = pd.read_sql(sql=sql_str, con=self.conn_alm)\n except (ValueError, DatabaseError):\n raise\n return data\n\n def current_alarm_msg(self, msg):\n sql_str = SQL_CURRENT_ALARM_MSG.format(self.root, msg)\n try:\n data = pd.read_sql(sql=sql_str, con=self.conn_alm)\n except (ValueError, DatabaseError):\n raise\n return data\n\n def history_alarm_all(self, message, starttime, endtime):\n # with message filter\n if message and message != \".*\":\n try:\n mask = self.pvlist[\"message\"].str.contains(message)\n pvlist = self.pvlist[mask]\n except re.error:\n return []\n # id, datum, record_name, severity, eventtime, status\n\n sql_str = SQL_HISTORY_GROUP.format(self.root)\n params = (starttime, endtime, pvlist[\"record_name\"].tolist())\n\n try:\n data = pd.read_sql(sql=sql_str, con=self.conn_log,\n params=params)\n except (ValueError, DatabaseError):\n raise\n\n ret = data.merge(self.pvlist)\n\n return ret.sort_values(by=\"id\", ascending=False)\n\n # without message filter\n # id, datum, record_name, severity, eventtime, status\n sql_str = SQL_HISTORY_ALL.format(self.root)\n params = (starttime, endtime)\n\n try:\n data = pd.read_sql(sql=sql_str, con=self.conn_log,\n params=params)\n except (ValueError, DatabaseError):\n raise\n\n ret = data.merge(self.pvlist, how=\"left\")\n ret[\"group\"] = ret[\"group\"].fillna(\"Unknown\")\n ret[\"message\"] = ret[\"message\"].fillna(\"Unknown\")\n\n return ret.sort_values(by=\"id\", ascending=False)\n\n def history_alarm_group(self, group, message, starttime, endtime):\n try:\n mask = self.pvlist[\"group\"].str.match(group, na=False)\n pvlist = self.pvlist[mask]\n pvlist = pvlist[pvlist[\"message\"].str.contains(message)]\n except re.error:\n return []\n\n # id, datum, record_name, severity, eventtime, status\n sql_str = SQL_HISTORY_GROUP.format(self.root)\n params = (starttime, endtime, pvlist[\"record_name\"].tolist())\n\n try:\n data = pd.read_sql(sql=sql_str, con=self.conn_log, params=params)\n except (ValueError, DatabaseError):\n raise\n\n ret = data.merge(self.pvlist)\n\n return ret.sort_values(by=\"id\", ascending=False)\n\n def update_pvlist(self):\n # record_name, message, group, sub_group, sub_sub_group\n sql_str = SQL_PV_LIST.format(self.root)\n try:\n df = pd.read_sql(sql=sql_str, con=self.conn_alm)\n except (ValueError, DatabaseError):\n df = self.pvlist\n\n df[\"group\"] = (df[\"group\"] + df[\"sub_group\"].apply(self._sgstr)\n + df[\"sub_sub_group\"].apply(self._sgstr))\n\n self.pvlist = df.drop([\"sub_group\", \"sub_sub_group\"], axis=1)\n\n def _sgstr(self, sg_str):\n return \" / \" + sg_str if sg_str else \"\"\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"474626987","text":"import os\nimport json\nimport copy\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport gym\nfrom gym import spaces\n\nclass SimpleShapesEnv(gym.Env):\n def __init__(\n self,\n data_dir='/private/home/theop123/workspace/multi-agent-communication/gym_shapes/data/shapes/shapes_06_05_08:34',\n split='train.large', # train.med, train.small, train.tiny, val, test\n n_agents=4,\n obs_height=5,\n obs_width=5,\n step_size=2,\n max_steps=30,\n normalize=True,\n obs_coordinates=False,\n obs_time_step=True):\n \n self.n_agents = n_agents\n self.obs_height = obs_height\n self.obs_width = obs_width\n self.step_size = step_size\n self.max_steps = max_steps\n self.normalize = normalize\n self.obs_coordinates = obs_coordinates\n self.obs_time_step = obs_time_step\n \n # Load data\n self.data_root = os.path.join(data_dir, split)\n self.data = np.load(self.data_root + '.input.npy')[:, :, :, ::-1]\n \n self.n_imgs = self.data.shape[0]\n self.img_shape = self.data[0].shape\n self.img_obs_shape = (self.obs_height, self.obs_width, self.img_shape[2])\n \n self.max_row = self.img_shape[0] - self.obs_height\n self.max_col = self.img_shape[1] - self.obs_width \n \n # Gym parameters\n self._action_space = spaces.Discrete(5) \n \n len_obs = self.img_obs_shape[0] * self.img_obs_shape[1] * self.img_obs_shape[2]\n if self.obs_coordinates:\n len_obs += 2\n if self.obs_time_step:\n len_obs += 1\n self._observation_space = spaces.Box(low=0.0, high=1.0, shape=(len_obs,), dtype=np.float32)\n \n def reset(self, agent_positions=None, goal_position=None, img_idx=9):\n self.n_steps = 0\n \n self.image = copy.deepcopy(self.data[img_idx]) \n \n if goal_position is not None:\n self.goal = Agent(*goal_position)\n else:\n self.goal = Agent(random.randint(0, self.max_row), \n random.randint(0, self.max_col))\n \n self.image[self.goal.row:self.goal.row + self.obs_height,\n self.goal.col:self.goal.col + self.obs_width, :] = 255\n \n if agent_positions is not None:\n self.agents = [Agent(*pos) for pos in agent_positions]\n else:\n self.agents = [Agent(random.randint(0, self.max_row), \n random.randint(0, self.max_col)) \n for _ in range(self.n_agents)] \n \n joint_obs = self._get_joint_obs()\n \n return joint_obs\n\n def step(self, joint_action):\n self.n_steps += 1\n \n for agent, action in zip(self.agents, joint_action):\n self._move(agent, action)\n \n joint_obs = self._get_joint_obs()\n reward = sum([self._on_goal(agent) for agent in self.agents]) / self.n_agents\n done = (self.n_steps == self.max_steps)\n \n return joint_obs, reward, done, {}\n \n def render(self, save_dir=\"\"):\n visions = np.zeros(self.img_shape, dtype=int)\n positions = np.zeros(self.img_shape[:2], dtype=int)\n \n for agent in self.agents:\n r, c = agent.pos\n h, w = self.obs_height, self.obs_width\n visions[r:r + h, c:c + w, :] = self.image[r:r + h, c:c + w, :]\n positions[r:r + h, c:c + w] = 1\n \n fig = plt.figure()\n plt.subplot(1, 3, 1)\n plt.imshow(self.image)\n plt.subplot(1, 3, 2)\n plt.imshow(visions)\n plt.subplot(1, 3, 3)\n plt.imshow(positions)\n if save_dir is not \"\":\n plt.savefig(save_dir + str(self.n_steps))\n plt.show()\n \n def _get_joint_obs(self):\n # Image observations\n img_obs = np.zeros((self.n_agents, *self.img_obs_shape))\n \n for i, agent in enumerate(self.agents):\n img_obs[i] = self._get_img_obs(agent)\n \n joint_obs = img_obs.reshape(self.n_agents, -1)\n\n # Coordinate observations\n if self.obs_coordinates:\n coordinates = np.zeros((self.n_agents, 2))\n \n for i, agent in enumerate(self.agents):\n coordinates[i] = np.array(agent.pos)\n \n if self.normalize:\n coordinates[:, 0] = coordinates[:, 0] / self.max_row\n coordinates[:, 1] = coordinates[:, 1] / self.max_col\n \n joint_obs = np.concatenate((joint_obs, coordinates), axis=1)\n \n # Time step observation\n if self.obs_time_step:\n time_step = np.full((self.n_agents, 1), self.n_steps)\n \n if self.normalize:\n time_step = time_step / self.max_steps\n \n joint_obs = np.concatenate((joint_obs, time_step), axis=1)\n \n return joint_obs\n \n def _get_img_obs(self, agent):\n obs = self.image[agent.row:agent.row + self.obs_height, \n agent.col:agent.col + self.obs_width, :]\n \n if self.normalize:\n obs = obs / 255.0\n \n return obs\n \n def _on_goal(self, agent):\n row_on_goal = agent.row - int(self.obs_height / 2) <= self.goal.row <= agent.row + int(self.obs_height / 2)\n col_on_goal = agent.col - int(self.obs_width / 2) <= self.goal.col <= agent.col + int(self.obs_width / 2)\n return row_on_goal and col_on_goal\n \n def _move(self, agent, action):\n assert action in range(5)\n \n if action == 0: # Move North\n agent.row = max(agent.row - self.step_size, 0) \n elif action == 1: # Move East\n agent.col = min(agent.col + self.step_size, self.max_col) \n elif action == 2: # Move South\n agent.row = min(agent.row + self.step_size, self.max_row)\n elif action == 3: # Move West\n agent.col = max(agent.col - self.step_size, 0) \n else:\n pass\n \n def seed(self, seed):\n np.random.seed(seed)\n random.seed(seed)\n \n @property\n def observation_space(self):\n return self._observation_space\n\n @property\n def action_space(self):\n return self._action_space\n \nclass Agent:\n def __init__(self, row, col):\n self.row = row\n self.col = col\n \n @property\n def pos(self):\n return self.row, self.col","sub_path":"gym_shapes/simple_shapes_env.py","file_name":"simple_shapes_env.py","file_ext":"py","file_size_in_byte":6599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"428159539","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 1 19:38:03 2018\n\n@author: farismismar\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nimport scipy.special\nimport matplotlib.ticker as tick\n\nimport os\nos.chdir('/Users/farismismar/Desktop/E_Projects/UT Austin Ph.D. EE/Papers/Conferences to ASILOMAR/testbed')\n\nqfunc = lambda x: 0.5-0.5*scipy.special.erf(x/np.sqrt(2))\n\n# This has the Delta_gamma component\ndef ber_modified(sinr, delta=0, q=140):\n # sinr is in dB\n error = 1 - (1 - qfunc(np.sqrt(2.*(delta + 10**(sinr/10.))))) ** q # ** q\n return error\n\n\nsinr = np.linspace(0,18,100)\n#per = [ber(x) for x in sinr]\n\n#plt.figure(figsize=(7,5))\n#plt.rc('text', usetex=True)\n#plt.rc('font', family='serif')\n##plot_edge, = plt.semilogy(sinr, ber_modified(sinr, delta=0, q=1), linestyle='-', color='k', label='QPSK (one OFDM symbol)')\n#\n#ax = plt.gca()\n#ax.set_yscale('log')\n#ax.get_xaxis().get_major_formatter().labelOnlyBase = False\n#\n#plot_baseline, = plt.semilogy(sinr, ber_modified(2.*sinr), linestyle='-', color='b', label='Average user $i$ (FPA)')\n#\n## Note the improvement was computed from Fig 11 in the paper.\n##plot_vpc, = plt.semilogy(sinr, ber_modified(2.*sinr, 2/20+3*18/20), linestyle='-', color='r', label='Average user $i$ (Vanilla power control)')\n#plot_dpc, = plt.semilogy(sinr, ber_modified(2.*sinr, 3*18/20), linestyle='-', color='g', label='Average user $i$ (DQN)')\n#\n#plt.grid(True,which=\"both\")#,ls=\"-\", color='0.65')\n#\n#plt.xlabel('Average DL SINR (dB)')\n#plt.xlim(xmin=0,xmax=9)\n#plt.ylabel('$\\Xi$ PER')\n#plt.title('Voice Packet Error Lower Bound Plot vs SINR -- One VoLTE Frame')\n#\n#plt.legend(handles=[plot_baseline, plot_dpc]) #plot_vpc, plot_dpc])\n#plt.savefig('figures/packet_error.pdf', format=\"pdf\")\n#plt.show()\n#plt.close()\n\n\ntau = 20\n#vanilla_tpc = ['start', 0, 'network', 2, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'end']\n#vanilla_tpc = vanilla_tpc[1::2]\n#vanilla_tpc.insert(0, 0) # initial state\n\n\ndeepq_tpc = ['start', 0, 'network', 2, 'network', 3, 'network', 3, 'network', 0, 'network', 0, 'network', 3, 'network', 3, 'network', 4, 'network', 0, 'network', 3, 'network', 2, 'network', 0, 'network', 3, 'network', 3, 'network', 0, 'ABORTED']\n#deepq_tpc = ['start', 0, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'end'] # TPC 111\n\ndeepq_tpc = deepq_tpc[1::2]\ndeepq_tpc.insert(0, 0)\ndeepq_tpc = np.array(deepq_tpc)\n'''\nQ-learning\nEpisode 1018 finished after 19 timesteps (and epsilon = 0.01).\nAction progress: \n\nSINR progress: \n['start', 4.0, 3.0, 4.0, 2.0, 0.0, 1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 1.0, 2.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 'end']\n\nDeep Q-Learning\nEpisode 1018 finished after 17 timesteps (and epsilon = 0.01).\nAction progress: \n['start', 0, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'network', 3, 'end']\nSINR progress: \n['start', 4.0, 2.0, 3.0, 4.0, 5.0, 3.0, 1.0, 2.0, 0.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 'end']\n'''\n\n# Convert actions to actual SINR changes\ndeepq_tpc[deepq_tpc == 0] = 0\ndeepq_tpc[deepq_tpc == 1] = -3\ndeepq_tpc[deepq_tpc == 2] = -1\ndeepq_tpc[deepq_tpc == 3] = 1\ndeepq_tpc[deepq_tpc == 4] = 3\n\ntime= np.arange(tau)\n\nfig, ax1 = plt.subplots(figsize=(7,5))\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.grid(True)\nfpa = ax1.axhline(y=0, xmin=0, color=\"green\", linewidth=1.5, label='Power commands -- FPA')\n#vanilla, = ax1.step(np.arange(len(vanilla_tpc)), vanilla_tpc, color='b', linewidth=2.5, label='TPC -- Vanilla')#\ndeep, = ax1.step(np.arange(len(deepq_tpc)), deepq_tpc, color='b', label='Power commands -- DQN')#\nax1.set_xlabel('Transmit time interval (TTI)')\nax1.set_ylabel('Number of power commands')\nax1.set_yticks([-1,0,1,2,3])\nax1.xaxis.set_major_formatter(tick.FormatStrFormatter('%0g'))\nax1.xaxis.set_ticks(np.arange(0, tau + 1))\n#ax2 = ax1.twinx()\n#sinr, = plt.plot(time, SINR, linestyle='-', color='b', label='DL SINR')\nplt.title(r'Episode $\\tau = 111$ -- Power Commands ')\nplt.legend(handles=[fpa, deep])# vanilla, deep])\n#ax2.set_ylabel('Average DL SINR $\\gamma_{DL}$(dB)')\n\nplt.xlim(xmin=0,xmax=tau)\n\nfig.tight_layout()\nplt.savefig('figures/tpc.pdf', format=\"pdf\")\nplt.show(fig)\nplt.close(fig)\n\n\n\n#\n## Compute retainability\n#for time in [ 30e3, 60e3]:\n# for ret in [0.3448, .7082, .8389]:\n# tau = 20\n# N = np.ceil(time/tau)\n# print('{}, {:.2f}%, {:.2f}%'.format(time, 100 * ret, 100 * ret ** N))\n# \n\n\n# Plot MOS\ntau = 20 # ms\nT = tau #6 * 1e3 * tau # 120 sec\n\nsinr = np.linspace(-2,14,100)\n\n#def scale(per): \n #per_scaled = [0.005 if i < -2 else (0.5 * (-2 - i) / (-2 - max(per))) for i in per]\n# per_scaled = [0.005 + i for i in per_scaled] # 0.05% is the minimum value\n \n # return per_scaled\n\n\n\n# TODO:\n # Obtain the corrective/improvement factors from the PC plot\ndef payload(T, tau=20, NAF=0.5, Lamr=0, Lsid=61): # T and tau in ms, Lamr/Lsid is in bits\n Lsid = Lsid * tau / 8 # from bits to bytes per sec\n return NAF * Lamr * np.ceil(T/tau) + (1 - NAF) * Lsid * np.ceil(T/(8*tau))\n\nfig = plt.figure(figsize=(7,5))\nfor improvement in np.array([0, -1 * 2/20 +1 * 17/20 ]):#2/20+3*18/20,3*18/20]): \n result = []\n \n for framelength in np.arange(1000): # 1000 taus\n volterate = 23.85 # kbps\n NAF = 0.7\n \n # something is wrong here with ber..\n ber = [ber_modified(x, delta=improvement, q=7*framelength*tau) for x in sinr]\n per = np.round(np.log10(ber), 0) # we actually need the exponent.\n N = len(per)\n fer = sum([1 for x in per if x > -2]) / N #scale(per) # will get a frame error for this bit error rate\n Lamr = volterate * tau / 8 # in bytes per sec\n payld = payload(T=tau, tau=tau, NAF=NAF, Lamr=Lamr) # in bytes\n MOS = 4 - 0.7*np.log(fer) - 0.1 * np.log(tau * framelength * fer * N) # fer ok, second term: duration of lost packets in ms?\n\n MOS = min(4, MOS) #4 if MOS >= 4 else MOS # for i in MOS]\n MOS = max(1, MOS) #1 if MOS <= 1 else MOS# for i in MOS]\n # print(improvement, fer, MOS)\n result.append(MOS)\n \n #print('{} {:.0f}% {:.1f}'.format(volterate, 100 * NAF, payld))\n if (improvement == 0):\n str = 'FPA'\n elif (improvement == -1 * 2/20 +1 * 17/20):\n str = 'Closed-loop'\n plt.plot(result, label='AMR = {} kbps, AF = {}, Power control = {}'.format(volterate, NAF, str))\n \n \nplt.legend()\nplt.title('Experimental mean opinion score vs packet error rate')\nplt.xlabel('Packet error rate')\nplt.ylabel('MOS')\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.xlim(xmin=0,xmax=250)\n\nplt.grid(True)\n\n# Fix the x axis to show packet error rates \nax = plt.gca()\n#ax.set_xticks([0,200,400,600,800,1000])\nax.set_xticks([0,50,100,150,200,250])\n#ax.set_xticklabels([0,0.1,0.2,0.3,0.4,0.5])\nax.set_xticklabels([0,0.05,0.10,0.15,0.2,0.25])\n\n\nplt.savefig('figures/mos.pdf', format='pdf')\nplt.show()\nplt.close(fig)\n\n\n\n\n####################################\n# Plotting the episodes on one graph\n####################################\nSINR_MIN = -3 #dB \nbaseline_SINR_dB = 4.0\nfinal_SINR_dB = baseline_SINR_dB + 2.0 # this is the improvement\nmax_timesteps_per_episode = 20\nmax_episodes_to_run = 725\n\nepisode_index = 724 #379 #825\n\n\n#score_progress_fpa = [4.0,4.0,4.0,4.0,4.0,4.0,4.0,1.0,1.0,1.0,-3,-3.0,-3.0,-3,-3,-3,-3,-0.0,-0.0,-0.0,5.0]\n#score_progress_cl = [4.0,3.0,4.0,0.0,1.0,-3.0,-2.0,-1.0,0.0,1.0,2.0,3.0,4.0,0.0,1.0,-1.0,0.0,4.0,5.0,6.0]\nscore_progress_cl = [4.0,3.0,4.0,5.0,1.0,-1.0,-3.0,-2.0,-1.0,-3.0,-2.0,-1.0,0.0,1.0,2.0,3.0,3.0,1.0,6.0]\nscore_progress_fpa = [4.0,4.0,2.0,-3,-3.0,-3.0,-3.0,-3.0,-3,-3,-3,-3.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,-0.0,-0.0]\n#\n\n# Do some nice plotting here\nfig = plt.figure()\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.xlabel('Transmit Time Intervals (1 ms)')\n\n# Only integers \nax = fig.gca()\nax.xaxis.set_major_formatter(tick.FormatStrFormatter('%0g'))\nax.xaxis.set_ticks(np.arange(0, max_timesteps_per_episode + 1))\n\nax.set_autoscaley_on(False)\n\nplt.plot(score_progress_fpa, marker='o', linestyle=':', color='b', label='FPA')\nplt.plot(score_progress_cl, marker='D', linestyle='-', color='k', label='CL')\n\nplt.xlim(xmin=0, xmax=max_timesteps_per_episode)\n\nplt.axhline(y=SINR_MIN, xmin=0, color=\"red\", linewidth=1.5)\nplt.axhline(y=final_SINR_dB, xmin=0, color=\"green\", linewidth=1.5)\nplt.ylabel('Average DL Received SINR (dB)')\nplt.title('Episode {0} / {1}'.format(episode_index + 1, max_episodes_to_run))\nplt.grid(True)\nplt.ylim(-8,10)\nplt.legend()\n\nplt.savefig('figures/episode_{}_output.pdf'.format(episode_index + 1), format=\"pdf\")\nplt.show(block=True)\nplt.close(fig)\n\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":9240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376127097","text":"from unittest.mock import patch\n\nfrom urllib.parse import urlencode\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom realtime.helpers import ProducerHelper\n\n\nclass AnalyticsViewTestCase(TestCase):\n\n def setUp(self):\n self.timestamp = '1543837516'\n self.url = reverse('analytics')\n self.params = {'action': 'click', 'timestamp': self.timestamp, 'user': 'test'}\n\n def test_should_return_400_for_missing_post_data(self):\n for key in self.params.keys():\n params = dict(self.params)\n params.pop(key)\n response = self.client.post('%s?%s' % (self.url, urlencode(params)))\n self.assertEqual(response.status_code, 400)\n\n def test_should_work_for_complete_post_data(self):\n params = {'user': 'test', 'action': 'impression', 'timestamp': self.timestamp}\n response = self.client.post('%s?%s' % (self.url, urlencode(params)))\n self.assertEqual(response.status_code, 200)\n\n def test_should_fail_for_wrong_timestamp(self):\n params = dict(self.params)\n params['timestamp'] = self.timestamp * 4\n response = self.client.post('%s?%s' % (self.url, urlencode(params)))\n self.assertEqual(response.status_code, 400)\n\n @patch.object(ProducerHelper, 'get_instance')\n def test_should_write_incoming_post_data_to_kafka(self, mocked_producer):\n \"\"\"\n In a successful scenario, the received data should be passed over to Apache Kafka\n Also producer should be flushed to make sure the data is sent.\n For this simpel scenario we register no success/failure callbacks.\n :param mocked_producer:\n :return:\n \"\"\"\n response = self.client.post('%s?%s' % (self.url, urlencode(self.params)))\n self.assertEqual(response.status_code, 200)\n\n send_called = False\n flush_called = False\n for call in mocked_producer.mock_calls:\n if call[0] == '().send':\n send_called = True\n self.assertTupleEqual(call[1], ('interactions', self.params))\n if call[0] == '().flush':\n flush_called = True\n\n self.assertTrue(send_called)\n self.assertTrue(flush_called)\n","sub_path":"realtime/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"594752371","text":"import os\n\nimport pandas as pd\n\nhere = os.path.abspath(os.path.dirname(__file__))\nzarr_col_pangeo_cmip6 = 'https://storage.googleapis.com/cmip6/pangeo-cmip6.json'\ncdf_col_sample_cmip6 = os.path.join(here, 'sample-collections/cmip6-netcdf.json')\nmulti_variable_col = os.path.join(here, 'sample-collections/multi-variable-collection.json')\ncdf_col_sample_cmip5 = os.path.join(here, 'sample-collections/cmip5-netcdf.json')\ncdf_col_sample_cesmle = os.path.join(here, 'sample-collections/cesm1-lens-netcdf.json')\ncatalog_dict_records = os.path.join(here, 'sample-collections/catalog-dict-records.json')\nzarr_col_aws_cesm = (\n 'https://raw.githubusercontent.com/NCAR/cesm-lens-aws/master/intake-catalogs/aws-cesm1-le.json'\n)\n\n\nsample_df = pd.DataFrame(\n [\n {\n 'component': 'atm',\n 'frequency': 'daily',\n 'experiment': '20C',\n 'variable': 'FLNS',\n 'path': 's3://ncar-cesm-lens/atm/daily/cesmLE-20C-FLNS.zarr',\n 'format': 'zarr',\n },\n {\n 'component': 'atm',\n 'frequency': 'daily',\n 'experiment': '20C',\n 'variable': 'FLNSC',\n 'path': 's3://ncar-cesm-lens/atm/daily/cesmLE-20C-FLNSC.zarr',\n 'format': 'zarr',\n },\n ]\n)\n\nsample_esmcol_data = {\n 'esmcat_version': '0.1.0',\n 'id': 'aws-cesm1-le',\n 'description': '',\n 'catalog_file': '',\n 'attributes': [],\n 'assets': {'column_name': 'path', 'format': 'zarr'},\n 'aggregation_control': {\n 'variable_column_name': 'variable',\n 'groupby_attrs': ['component', 'experiment', 'frequency'],\n 'aggregations': [\n {'type': 'union', 'attribute_name': 'variable', 'options': {'compat': 'override'}}\n ],\n },\n}\n\nsample_esmcol_data_without_agg = {\n 'esmcat_version': '0.1.0',\n 'id': 'aws-cesm1-le',\n 'description': '',\n 'catalog_file': '',\n 'attributes': [],\n 'assets': {'column_name': 'path', 'format': 'zarr'},\n}\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"537051104","text":"import tensorflow as tf\nimport numpy as np\n\nseq_data = [['word', '단어'], ['word','나무'],\n ['game', '놀이'], ['girl','소녀'],\n ['kiss', '키스'], ['love','사랑']]\n\nchar_arr= [c for c in 'SEPabcdefghijklmnopqrstuvwxyz단어나무놀이소녀키스사랑']\nnum_dic = {n: i for i, n in enumerate(char_arr)}\ndic_len = len(num_dic)\n\ndef make_batch(seq_data):\n input_batch=[]\n output_batch=[]\n target_batch=[]\n\n for seq in seq_data:\n input = [num_dic[n] for n in seq[0]]\n output = [num_dic[n] for n in ('S'+seq[1])]\n target = [num_dic[n] for n in (seq[1] + 'E')]\n input_batch.append(np.eye(dic_len)[input]) # one hot 으로 바꿔줌\n output_batch.append(np.eye(dic_len)[output])\n target_batch.append(target)\n # print(input_batch)\n # print(output_batch)\n # print(target_batch)\n return input_batch, output_batch, target_batch\n\nlearning_rate = 0.01\nhidden_size = 128\ninput_dim = dic_len\nn_class = dic_len\n\nenc_input = tf.placeholder(dtype=tf.float32, shape=[None, None, input_dim])\ndec_input = tf.placeholder(dtype=tf.float32, shape=[None, None, input_dim])\ntargets = tf.placeholder(dtype=tf.int64,shape = [None, None])\n\nwith tf.varaible_scope('encode'):\n enc_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)\n enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=0.5)\n\n outputs, enc_states = tf.nn.dynamic_rnn(enc_cell, enc_input, dtype=tf.float32)\n\nmake_batch(seq_data)\n\n","sub_path":"day3/seq2seqEx.py","file_name":"seq2seqEx.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"574282012","text":"import re\nimport math\nfrom collections import Counter\nimport logging\n\nimport psycopg2 as psg\nfrom psycopg2 import sql\nfrom mrcc import CCJob\nfrom bs4 import BeautifulSoup\nfrom mrjob.util import log_to_stream\n\nfrom sql_queries import DOCUMENT_CANONICAL_TYPES\nfrom sql_queries import FILTER_QUERIES\nfrom sql_queries import DOCUMENT_ENTITY_CANONICAL_TYPES\nfrom sql_queries import WORD_IDF\n\nLOG = logging.getLogger('ProcessQuery')\nlog_to_stream(format=\"%(asctime)s %(levelname)s %(name)s: %(message)s\", name='ProcessQuery')\n\n\nclass ProcessQuery(CCJob):\n \"\"\"\n \tProcess batch of queries\n \"\"\"\n def mapper_init(self):\n self.conn = psg.connect('dbname=qa_app \\\n user=akshanshgupta \\\n password=Everest \\\n host=127.0.0.1 \\\n port=5432')\n self.cur = self.conn.cursor()\n from myconfig import WINDOW_SIZE\n self.WINDOW_SIZE = WINDOW_SIZE\n\n def get_doc_canonical_types(self, document_id):\n doc_entity_types_query = self.cur.execute(DOCUMENT_CANONICAL_TYPES.\\\n format(document_id=document_id))\n doc_entity_types = self.cur.fetchall()\n return tuple([result[1] for result in doc_entity_types])\n\n def get_doc_queries(self, doc_canonical_types):\n doc_canonical_types = sql.Literal(doc_canonical_types)\n doc_queries_query = self.cur.execute(sql.SQL(FILTER_QUERIES).\\\n format(types=doc_canonical_types).as_string(self.conn))\n doc_queries = self.cur.fetchall()\n return doc_queries\n\n def get_idf_score(self, words):\n words = sql.Literal(tuple(words))\n idf_scores_query = self.cur.execute(sql.SQL(WORD_IDF).\\\n format(words=words).as_string(self.conn))\n idf_scores = self.cur.fetchall()\n return sum([float(result[1]) for result in idf_scores])\n\n def get_text_snippet_query(self, query_id, S, target_type, document_id,\n doc_entity_canonical_types, doc_html_text):\n S = S.split(',')\n for item in doc_entity_canonical_types:\n if item[9] == target_type:\n start_byte = item[3] - self.WINDOW_SIZE\n end_byte = item[4] + self.WINDOW_SIZE\n window_text = doc_html_text[start_byte:end_byte]\n window_text = re.sub('[^A-Za-z0-9 ]+', ' ', window_text)\n window_words = window_text.lower().split()\n matching_words = list(set(window_words) & set(S))\n if not matching_words == []:\n total_score = self.get_idf_score(matching_words)\n yield query_id, (document_id, window_text, total_score)\n\n\n def clean_text(self, html):\n soup = BeautifulSoup(html) # create a new bs4 object from the html data loaded\n for script in soup([\"script\", \"style\"]): # remove all javascript and stylesheet code\n script.extract()\n # get text\n text = soup.get_text()\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text = '\\n'.join(chunk for chunk in chunks if chunk)\n return text\n\n def process_record(self, record):\n if record['WARC-Type'] != 'response':\n # we're only interested in the HTTP responses\n return\n try:\n document_id = record['WARC-TREC-ID']\n doc_canonical_types = self.get_doc_canonical_types(document_id)\n if not doc_canonical_types:\n return\n relevant_queries = self.get_doc_queries(doc_canonical_types)\n if not relevant_queries:\n return\n doc_entity_canonical_types_query = self.cur.execute(DOCUMENT_ENTITY_CANONICAL_TYPES.\\\n format(document_id=document_id))\n doc_entity_canonical_types = self.cur.fetchall()\n doc_encoding = doc_entity_canonical_types[0][1]\n \n if doc_encoding != 'UTF-8':\n try:\n doc_html_text = record.payload.decode(doc_encoding).encode('utf-8')\n except:\n LOG.info('Could not decode document %s', document_id)\n return \n else:\n doc_html_text = record.payload\n\n for query_id, _, S, target_type in relevant_queries:\n for key, value in self.get_text_snippet_query(query_id, S, target_type, document_id,\n doc_entity_canonical_types, doc_html_text):\n yield key, value\n except:\n \treturn\n\n def combiner_init(self):\n from myconfig import MIN_SCORE\n self.MIN_SCORE = MIN_SCORE\n\n def combiner(self, key, values):\n \"\"\"\n Sums up count for each mapper\n \"\"\"\n for val in values:\n if val[2] > self.MIN_SCORE:\n yield key, val\n \n def reducer(self, key, values):\n \"\"\"\n Ouputs IDF of each word using number of documents\n as a constant\n \"\"\"\n values = sorted(values, key=lambda x: x[2], reverse=True)\n for val in values:\n yield (key, val[0]), (val[1], val[2])\n\n\nif __name__ == '__main__':\n ProcessQuery.run()","sub_path":"process_queries.py","file_name":"process_queries.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376667193","text":"__author__ = 'ratijha'\n\nimport threading\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport logging\n\nimport sys\nfrom tkinter import messagebox\nfrom lib.Vxworks import Vxworks\nfrom lib.telnet_ssh import ssh\nimport time\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nhandler = logging.FileHandler('out.log')\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlog.addHandler(handler)\nhandler = logging.FileHandler('out.log')\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlog.addHandler(handler)\n\n\nclass ProvideException(object):\n def __init__(self, func):\n self._func = func\n\n def __call__(self, *args):\n\n try:\n return self._func(*args)\n\n except Exception as e:\n log.debug('Exception was thrown', str(e))\n messagebox.showerror(\"Application Crashed\")\n # log.debug(traceback.log.debug_stack())\n # sys.exit()\n # Optionally raise your own exceptions, popups etc\n\nclass Vxworks_gui:\n\n def __init__(self, session, widget):\n self.session = session\n self.widget =widget\n self.index = 0\n self.flag = 0\n self.telnet = Vxworks()\n self.ssh = ssh()\n self.ip_entry = []\n self.sec_btn = []\n self.input = []\n self.nonsec_btn= []\n self.debug_btn = []\n self.secport= []\n self.nonsecport = []\n self.debug_port = []\n self.output=[]\n self.save_log = []\n self.cmd_entry = []\n self.login = []\n self.Notimes = []\n self.browse_btn = []\n self.WT= []\n self.vxworks_content(self.session, self.widget)\n\n def vxworks_content(self,tab_label, note_tab):\n v = tk.StringVar()\n v.set(1)\n newvalue = tk.StringVar\n ttk.Label(tab_label, text=\"IP Address\", style=\"BW.TLabel\", relief=\"groove\" ).grid(row=0, padx = 5, pady = 5)\n ip_entry = ttk.Entry(tab_label, textvariable=newvalue)\n ip_entry.grid(column = 1, row = 0,sticky=tk.W, padx = 5, pady = 5)\n self.ip_entry.append(ip_entry)\n frm = ttk.Frame(tab_label)\n frm.grid(column=2, row=0)\n sec_btn = ttk.Radiobutton(frm,text= \"Sec\", variable=v, value=0)\n sec_btn.grid(row =0, column =0, padx=5, pady=3)\n self.sec_btn.append(sec_btn)\n nonsec_btn = ttk.Radiobutton(frm, text=\"Non-Sec\", variable=v, value =1)\n debug_btn = ttk.Radiobutton(frm, text=\"Debug\", variable=v, value =2)\n debug_btn.grid(row=0, column=2,padx=5,pady=3)\n self.debug_btn.append(debug_btn)\n disconnect_bttn = ttk.Button(tab_label, text=\"Disconnect\")\n disconnect_bttn.bind('',lambda event, index=self.index :self.onButtonClick(event, index))\n disconnect_bttn.grid(column=3, row=1, sticky=tk.W, padx = 5 , pady = 5)\n nonsec_btn.grid(row=0, column=1,padx=5,pady=3)\n self.nonsec_btn.append(nonsec_btn)\n secport = tk.Text(frm, height=1, width=4)\n secport.grid(row=1,column=0, padx=5, pady=3)\n secport.insert(\"end\",\"22\")\n self.secport.append(secport)\n nonsecport = tk.Text(frm, height=1, width=4)\n nonsecport.grid(row=1,column=1,padx=5, pady=3)\n nonsecport.insert(\"end\",\"23\")\n self.nonsecport.append(nonsecport)\n debug_port = tk.Text(frm, height=1, width=4)\n debug_port.grid(row=1,column=2,padx=5, pady=3)\n self.debug_port.append(debug_port)\n connect_bttn = ttk.Button(tab_label, text=\"Connect\")\n connect_bttn.bind('',lambda event, index=self.index :self.onButtonClick(event, index))\n connect_bttn.grid(column=3, row= 0, sticky=tk.W, padx = 5 , pady = 5)\n ttk.Label(tab_label, text=\"Command\", relief= tk.GROOVE).grid(row=1, padx = 5, pady =5)\n cmd_entry = tk.Text(tab_label, height = 5, width=20)\n cmd_entry.grid(column=1,row=1, sticky=tk.W, padx = 5, pady =5)\n scroll = tk.Scrollbar(tab_label)\n scroll.grid(row=1, column=1, sticky=tk.E)\n cmd_entry.config(yscrollcommand=scroll.set)\n self.cmd_entry.append(cmd_entry)\n scroll.config(command=cmd_entry.yview)\n login = ttk.Checkbutton(tab_label, text=\"Login/Logout\")\n login.grid(row=2, column=0)\n self.login.append(login)\n frm = ttk.Frame(tab_label)\n frm.grid(row=2,column=1, columnspan=2, sticky=tk.W)\n ttk.Label(frm,text=\"No. of Times\",relief=tk.GROOVE).grid(row=0, column=0, padx=5, pady=3)\n Notimes = tk.Text(frm, height =1, width=3)\n Notimes.grid(row=0,column=1, padx=5, pady=3)\n Notimes.insert('end','5')\n self.Notimes.append(Notimes)\n ttk.Label(frm,text=\"Wait-Time\", relief=tk.GROOVE).grid(row=0, column=2, padx=5, pady=3)\n WT = tk.Text(frm,height=1, width=3)\n WT.grid(row=0,column=3, padx=5,pady=3)\n WT.insert('end','5')\n self.WT.append(WT)\n cmd_button = ttk.Button(tab_label, text=\"Send\")\n cmd_button.bind('',lambda event, index=self.index : self.onButtonClick(event, index))\n #cmd_button.bind('', lambda event, index=self.index : self.onButtonClick(event, index) )\n cmd_button.grid(row =3,column=1, sticky=tk.E, padx = 5, pady=5)\n ttk.Label(tab_label, text=\"Output\",relief=tk.GROOVE).grid(row=4,column=0, padx = 5, pady=5, sticky = tk.W)\n output = tk.Text(tab_label, width=60, height=40)\n scroll = tk.Scrollbar(tab_label, command=output.yview)\n scroll.grid(row=5,sticky=tk.W)\n output.config(yscrollcommand=scroll.set)\n output.place(x = 0, y=240)\n self.output.append(output)\n frm1=ttk.Frame(tab_label)\n frm1.grid(row=5, column=7, sticky=tk.E)\n ttk.Label(frm1, text=\"Input Commands\", relief=tk.GROOVE).grid(row =0, column=0, padx=5,pady=5, sticky = tk.E)\n input = tk.Text(frm1,height=20,width=30)\n input.grid(row=0,column=1, sticky=tk.E)\n self.input.append(input)\n save_log = ttk.Checkbutton(frm1,text=\"Save Log\")\n save_log.grid(row = 1, column=0,padx=5,pady=5)\n self.save_log.append(save_log)\n browse_btn = ttk.Button(frm1, text=\"Save\")\n browse_btn.bind('', lambda event, index=self.index : self.browse(event, index))\n browse_btn.grid(row=1, column =1, sticky = tk.W, padx=5,pady=5)\n self.browse_btn.append(browse_btn)\n stop_btn = ttk.Button(frm1, text=\"STOP\")\n stop_btn.grid(row=2, column=0)\n stop_btn.bind('',self.quit)\n new_sess = ttk.Frame(note_tab)\n note_tab.add(new_sess, text=\"+\")\n self.index += 1\n\n # @ProvideException\n def onButtonClick(self, event, index):\n if event.widget['text'] == \"Connect\":\n def callback():\n log.debug(index)\n log.debug(self.ip_entry)\n ip = self.ip_entry[index].get()\n log.debug(ip)\n # port = 2361\n if 'selected' in self.sec_btn[index].state():\n port = self.secport[index].get('1.0','end')\n res=self.ssh.connect(ip,int(port))\n elif 'selected' in self.nonsec_btn[index].state():\n port = self.nonsecport[index].get('1.0','end')\n res = self.telnet.connect(ip, int(port))\n elif 'selected' in self.debug_btn[index].state():\n port = self.debug_port[index].get('1.0','end')\n res = self.telnet.connect(ip, int(port))\n log.debug(res)\n if res is None:\n messagebox.showerror(\"Connection Error\" ,\"There is one Existing Connection for ip :{0}\".format(ip))\n else:\n self.output[index].insert('end', res)\n t = threading.Thread(target=callback)\n t.start()\n elif event.widget['text'] == \"Disconnect\":\n def callback():\n ip = self.ip_entry[index].get()\n log.debug(ip)\n # port = 2361\n if 'selected' in self.sec_btn[index].state():\n port = self.secport[index].get('1.0','end')\n res = self.ssh.disconnect()\n elif 'selected' in self.nonsec_btn[index].state():\n port = self.nonsecport[index].get('1.0','end')\n res = self.telnet.disconnect()\n # res = self.tl1.connect(ip, int(port))\n # self.output[index].insert('end', res)\n msg = \"{0} disconnected\".format(ip)\n messagebox.showinfo('Disconnected',msg)\n t = threading.Thread(target=callback)\n t.start()\n elif event.widget['text'] == \"Send\":\n def callback():\n cmdlist = []\n cmd = self.cmd_entry[index].get('1.0','end')\n cmd = cmd.rstrip()\n login = self.login[index].state()\n cmdlist = cmd.split(';')\n Notimes = int(self.Notimes[index].get('1.0','end'))\n log.debug(Notimes)\n if Notimes != 0:\n loop = Notimes -1\n\n i = 1\n while True:\n #for i in range(int(self.Notimes[index].get('1.0','end'))):\n\n for cmd in cmdlist:\n ip = self.ip_entry[index].get()\n if cmd != '':\n self.input[index].insert('end',str(i) + \" \" + cmd)\n i += 1\n self.input[index].insert('end','\\n')\n if 'selected' in self.sec_btn[index].state():\n port = self.secport[index].get('1.0','end')\n res = self.ssh.send(cmd, ip=ip)\n elif 'selected' in self.nonsec_btn[index].state():\n res = self.telnet.send(cmd)\n else:\n res = self.telnet.send(cmd)\n self.output[index].insert('end', res)\n if 'selected' in login:\n ip = self.ip_entry[index].get()\n log.debug(self.sec_btn[index].state())\n if 'selected' in self.sec_btn[index].state():\n self.ssh.disconnect()\n port = self.secport[index].get('1.0','end')\n res = self.ssh.connect(ip,int(port))\n elif 'selected' in self.nonsec_btn[index].state():\n self.telnet.disconnect()\n port = self.nonsecport[index].get('1.0','end')\n res = self.telnet.connect(ip, int(port))\n else:\n self.telnet.disconnect()\n port = self.nonsecport[index].get('1.0','end')\n res = self.telnet.connect(ip, int(port))\n self.output[index].insert('end', res)\n # res = self.telnet.send(cmd)\n # self.output[index].insert('end', res)\n # if 'selected' in login:\n # self.telnet.disconnect()\n # ip = self.ip_entry[index].get()\n # #log.debug(self.sec_btn[index].state())\n # # if 'selected' in self.sec_btn[index].state():\n # # port = self.secport[index].get('1.0','end')\n # # elif 'selected' in self.nonsec_btn[index].state() :\n # # port = self.nonsecport[index].get('1.0','end')\n # res = self.telnet.connect(ip)\n # self.output[index].insert('end', res)\n if self.WT[index].get('1.0','end') != \"\":\n time.sleep(int(self.WT[index].get('1.0','end')))\n if self.flag == 1:\n break\n if Notimes != 0:\n log.debug(\"Inside IF\")\n loop = loop - 1\n if loop < 0:\n break\n #try:\n send_thread = threading.Thread(target=callback)\n send_thread.start()\n #except KeyboardInterrupt as e:\n # log.debug(e.reason)\n # self.flag = 1\n # @ProvideException\n def browse(self, event, index):\n if event.widget['text'] == \"Save\":\n def callback():\n if 'selected' in self.save_log[index].state():\n file = filedialog.askopenfilename(filetypes=[(\"Text files\",\"*.txt\")])\n #file1 = filedialog.Open(file)\n if file:\n with open(file,\"w\") as f:\n f.write(self.output[index].get('1.0','end'))\n #file = filedialog.asksaveasfilename(filetypes=[(\"Text files\",\"*.txt\")])\n #filename = filedialog.askopenfile()\n #log.debug(filename.read())\n t = threading.Thread(target=callback)\n t.start()\n\n def quit(self, event):\n log.debug(\"you pressed control-c\")\n self.flag = 1","sub_path":"Vxworks_gui.py","file_name":"Vxworks_gui.py","file_ext":"py","file_size_in_byte":13647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"108559852","text":"class Overloaded:\n def __init__(self, val):\n self.value = val\n def add(self, other):\n new_object = Overloaded(self.value + other.value)\n return new_object\n\nx = Overloaded(3)\ny = Overloaded(4)\n\nz = x.add(y)\nprint(z.value)\n","sub_path":"oop/overloaded.py","file_name":"overloaded.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"206704721","text":"import socket\nimport sys\nimport select\nimport pickle\nimport pdb\nfrom crypto.Cipher import AES\nfrom crypto import Random\nfrom crypto.Hash import SHA256\nfrom crypto.Signature import PKCS1_v1_5\nfrom crypto.PublicKey import RSA\nimport pickle\n\ncid = sys.argv[1]\nSERVER = sys.argv[2]\nPORT = sys.argv[3]\nBYZANTINE = sys.argv[4]\n\n\nkey = False\nwith open (\"keys.pem\", \"r\") as myfile:\n key = RSA.importKey(myfile.read())\n\npublic_key = key.publickey()\nverifier = PKCS1_v1_5.new(public_key)\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nintro = \"REGISTER_REQUEST \"+str(cid)\nintro = intro.encode()\n\nclient.sendto(intro, (SERVER, int(PORT)))\n\nresponse, address = client.recvfrom(1024)\nneighbors = pickle.loads(response)\nprint(neighbors)\n\nmessages = []\nsigns = []\n\nSENT = False\n\n#verified = verifier.verify(digest, sig)\n\nwhile 1:\n\t\tinputready, outputready, exceptrdy = select.select([0, client], [],[], 0.5)\n\t\tfor i in inputready:\n\t\t\tdata, address = client.recvfrom(1024)\n\t\t\tport = address[1]\n\t\t\tunpickled = pickle.loads(data)\n\t\t\tprint(messages)\n\t\t\tmessages.append(unpickled[0].decode())\n\t\t\tsigns.append(unpickled[1])\n\t\t\tif len(messages) >= 3:\n\t\t\t\tif len(set(messages)) == 1:\n\t\t\t\t\tprint(\"committed: \", messages[0])\n\t\t\t\telse:\n\t\t\t\t\tsender = []\n\t\t\t\t\tfor i in range(len(signs)):\n\t\t\t\t\t\tdigest = SHA256.new()\n\t\t\t\t\t\tdigest.update(messages[i].encode())\n\t\t\t\t\t\tsender.append(verifier.verify(digest, signs[i]))\n\t\t\t\t\tif all(flag == True for flag in sender) == True:\n\t\t\t\t\t\tprint(\"no commit\")\n\t\t\t\t\telse: \n\t\t\t\t\t\tsigned_messages = [messages[m] for m in range(len(sender)) if sender[m] == True]\n\t\t\t\t\t\tif len(set(signed_messages)) == 1:\n\t\t\t\t\t\t\tprint(\"committed: \", signed_messages[0])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"no commit\")\n\n\t\tif not (inputready or outputready or exceptrdy):\n\t\t\tif BYZANTINE == 'n' and not SENT:\n\t\t\t\tfor i in neighbors:\n\t\t\t\t\tclient.sendto(data, (i[3], i[2]))\n\t\t\t\tSENT = True\n\n\t\t\tif BYZANTINE == 'y' and not SENT:\n\t\t\t\tmatch_message = str(\"11\").encode()\n\t\t\t\tdigest = SHA256.new()\n\t\t\t\tdigest.update(match_message)\n\t\t\t\tdata_string = pickle.dumps((match_message, signs[0]))\n\t\t\t\tfor i in neighbors:\n\t\t\t\t\tclient.sendto(data_string, (i[3], i[2]))\n\t\t\t\tSENT = True\n\t\t\t\t\t\n\n\n\n\n\n\n\n\n\t\t\t# \t\tm = 0\n\t\t\t# \t\twhile m in range(len(messages)):\n\t\t\t# \t\t\tmatch_message = messages[m].encode()\n\t\t\t# \t\t\tlength = 16 - (len(match_message) % 16)\n\t\t\t# \t\t\tmatch_message += bytes([length])*length\n\t\t\t# \t\t\tfor i in neighbors:\n\t\t\t# \t\t\t\tcipher = AES.new(keys[int(cid)][i[0]])\n\t\t\t# \t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t# \t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\t\t\t# \t\t\tmessages.pop(m)\n\t\t\t# \t\t\tm+=1\n\t\t\t\t\n\t\t\t# \tif BYZANTINE == 'y':\n\t\t\t# \t\tm = 0\n\t\t\t# \t\twhile m in range(len(messages)):\n\t\t\t# \t\t\tmatch_message = \"11\".encode()\n\t\t\t# \t\t\tlength = 16 - (len(match_message) % 16)\n\t\t\t# \t\t\tmatch_message += bytes([length])*length\n\t\t\t# \t\t\tfor i in neighbors:\n\t\t\t# \t\t\t\tcipher = AES.new(keys[int(cid)][i[0]])\n\t\t\t# \t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t# \t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\t\t\t# \t\t\tmessages.pop(m)\n\t\t\t# \t\t\tm+=1\n\t\t\t# \tSTAGE += 1\n\t\t\t# print(\"DONE:\", messages)\n\n\n\n\n\t\t\t# if STAGE == 'PRE':\n\t\t\t# \tmatch_message = \"PREP10\".encode()\n\t\t\t# \tif BYZANTINE == 'n':\n\t\t\t# \t\tlength = 16 - (len(match_message) % 16)\n\t\t\t# \t\tmatch_message += bytes([length])*length\n\t\t\t# \t\tfor i in neighbors:\n\t\t\t# \t\t\tcipher = AES.new(keys[int(cid)][i[0]])\n\t\t\t# \t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t# \t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t# \t\tcipher = AES.new(keys[int(cid)]['server'])\n\t\t\t# \t\tciphertext = cipher.encrypt(match_message)\n\t\t\t# \t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\n\t\t\t# \tif BYZANTINE == 'y':\n\t\t\t# \t\tmatch = \"PREP11\".encode()\n\t\t\t# \t\tlength = 16 - (len(match) % 16)\n\t\t\t# \t\tmatch += bytes([length])*length\n\t\t\t# \t\tfor i in neighbors:\n\t\t\t# \t\t\tcipher = AES.new(keys[int(cid)][i[0]])\n\t\t\t# \t\t\tciphertext = cipher.encrypt(match)\n\t\t\t# \t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t# \t\tcipher = AES.new(keys[int(cid)]['server'])\n\t\t\t# \t\tciphertext = cipher.encrypt(match)\n\t\t\t# \t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\t\t\t\t\n\t\t\t# \tSTAGE = 'PREP'\n\t\t\t# \tprint(\"Sent prepare\")\n\t\t\t\t\t\t\n\n\t\t\t# elif STAGE == 'PREP':\n\t\t\t# \tmatch_message = \"PREP10\"\n\t\t\t# \tif len([match for match in messages if match == match_message]) >= 1:\n\t\t\t# \t\tmatch_message = \"COMMIT10\".encode()\n\t\t\t# \t\tif BYZANTINE == 'n':\n\t\t\t# \t\t\tlength = 16 - (len(match_message) % 16)\n\t\t\t# \t\t\tmatch_message += bytes([length])*length\n\t\t\t# \t\t\tfor i in neighbors:\n\t\t\t# \t\t\t\tcipher = AES.new(keys[int(cid)][i[0]])\n\t\t\t# \t\t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t# \t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t# \t\t\tcipher = AES.new(keys[int(cid)]['server'])\n\t\t\t# \t\t\tciphertext = cipher.encrypt(match_message)\n\t\t\t# \t\t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\n\t\t\t# \t\tif BYZANTINE == 'y':\n\t\t\t# \t\t\tmatch = \"COMMIT11\".encode()\n\t\t\t# \t\t\tlength = 16 - (len(match) % 16)\n\t\t\t# \t\t\tmatch += bytes([length])*length\n\t\t\t# \t\t\tfor i in neighbors:\n\t\t\t# \t\t\t\tcipher = AES.new(keys[int(cid)][i[0]])\n\t\t\t# \t\t\t\tciphertext = cipher.encrypt(match)\n\t\t\t# \t\t\t\tclient.sendto(ciphertext, (i[3], i[2]))\n\n\t\t\t# \t\t\tcipher = AES.new(keys[int(cid)]['server'])\n\t\t\t# \t\t\tciphertext = cipher.encrypt(match)\n\t\t\t# \t\t\tclient.sendto(ciphertext, (SERVER, int(PORT)))\n\n\t\t\t# \t\tprint(\"Sent commit\")\n\t\t\t# \t\tSTAGE = 'COMMIT'\n\n\n\t\t\t# elif STAGE == 'COMMIT':\n\t\t\t# \tif BYZANTINE == 'n':\n\t\t\t# \t\tif len([match for match in messages if match == match_message]) >= 2:\n\t\t\t# \t\t\tprint(\"committed \", match_message)\n\t\t\t# \tSTAGE = 'DONE'\n\t\t\t# \tprint(STAGE)","sub_path":"SocketProgramming/DolevStrong/Attacker/test_conn.py","file_name":"test_conn.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"531663071","text":"while True:\n try:\n operation = input('Выберете операцию + - * / \\nДля выхода введите 0 : ')\n if operation == '0':\n break\n elif operation not in [0, '+', '-', '*', '/']:\n print('Вы ввели недопустимый символ')\n continue\n else:\n numbers = input('Введите 2 числа для вычисления через пробел: ').split()\n if operation == '+':\n s = int(numbers[0]) + int(numbers[1])\n print(f'Сумма = {s}')\n if operation == '-':\n d = int(numbers[0]) - int(numbers[1])\n print(f'Разность = {d}')\n if operation == '*':\n c = int(numbers[0]) * int(numbers[1])\n print(f'Произведение = {c}')\n if operation == '/':\n q = int(numbers[0]) / int(numbers[1])\n print(f'Разность = {q}')\n except ZeroDivisionError:\n print('Ай красава, на ноль делить низя!')\n","sub_path":"les2_1.py","file_name":"les2_1.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492143258","text":"from ..inquiry import Inquiry\nfrom ..report import Report\n\n\n@Report.from_validate\ndef validate_inquiry(source, *, parallel=False, **options):\n \"\"\"Validate inquiry\n\n API | Usage\n -------- | --------\n Public | `from frictionless import validate_inquiry`\n\n Parameters:\n source (dict|str): an inquiry descriptor\n parallel? (bool): enable multiprocessing\n\n Returns:\n Report: validation report\n\n \"\"\"\n native = isinstance(source, Inquiry)\n inquiry = source.to_copy() if native else Inquiry(source, **options)\n return inquiry.run(parallel=parallel)\n","sub_path":"frictionless/validate/inquiry.py","file_name":"inquiry.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"434311156","text":"import openpyxl as excel\nimport datetime\n\n# 曜日名の定義\nweekname = [\"月\",\"火\",\"水\",\"木\",\"金\",\"土\", \"日\"]\n\n# 新規ワークブックを作る\nwb = excel.Workbook()\nws = wb.active\n\n# 今年の9月1日を得る --- (*1)\nnow = datetime.datetime.now()\ntm = datetime.date(now.year, 9, 1)\n# 366日分を繰り返してセルに書き込む --- (*2)\nfor i in range(1, 367):\n # 年月日と曜日を書き込む --- (*3)\n ws.cell(column=1, row=i, value=tm.year)\n ws.cell(column=2, row=i, value=tm.month)\n ws.cell(column=3, row=i, value=tm.day)\n ws.cell(column=4, row=i, value=weekname[tm.weekday()])\n # 翌日を得る --- (*4)\n tm = tm + datetime.timedelta(days=1)\n\nwb.save(\"cal.xlsx\")\n","sub_path":"PycharmProjects/Excel_1/excel_study_c.py","file_name":"excel_study_c.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"23391400","text":"# -*- coding: utf-8 -*-\n\n'''\nГодится ли гора для восхождения?\n\nНапишите функцию, которая будет принимать список чисел,\nпредставляющих отдельные высоты горы, и определять,\nгодится ли такая гора для восхождения на нее.\n\nПригодными для восхождения горами будем считать те,\nгде разница между соседними высотами не превышает 5 единиц.\n\nПримечание. Список может начинаться с любого числа и быть любой длины.\n\nПримеры:\n\nis_scalable([1, 2, 4, 6, 7, 8]) True\nis_scalable([40, 45, 50, 45, 47, 52]) True\nis_scalable([2, 9, 11, 10, 18, 21]) False\n'''\n\nfrom typing import List\n\ndef is_scalable(arr: List[int]) -> bool:\n if arr:\n # Сравнение разности вершин по модулю и возврат результата.\n return all(abs(arr[pos] - arr[pos + 1]) <= 5\n # Проход по иднексам списка (на один индекс меньше).\n for pos in range(len(arr) - 1))\n return False\n\n\n# Тесты.\ntests = (\n ([1, 2, 4, 6, 7, 8], True),\n ([40, 45, 50, 45, 47, 52], True),\n ([2, 9, 11, 10, 18, 21], False),\n ([40, 45, 50, 45, 47, 52, 70], False),\n ([5, 9, 11, 10, 12, 11, 15], True),\n ([40, 45, 51, 46, 47, 52], False),\n ([], False)\n)\n\nfor index, item in enumerate(tests):\n res = is_scalable(arr=item[0])\n assert res == item[1], f'tests:{index:>02} >>> {item[0]} -> {res} != {item[1]}'\n\n","sub_path":"medium/is_scalable.py","file_name":"is_scalable.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"304658032","text":"\"\"\"\nらぼったー機能\n\"\"\"\n\nimport datetime\nfrom typing import Tuple\nimport psycopg2\n\nfrom library.database import Database\n\n\nclass LabotterDatabase(Database):\n \"\"\"\n らぼったーのDB処理\n \"\"\"\n\n def check_exist_user_name(self, user_name: str) -> bool:\n \"\"\"\n ユーザーが存在するかどうか確認する\n \"\"\"\n with self.conn.cursor() as cursor:\n cursor.execute(\n \"SELECT COUNT(*) FROM labotter WHERE user_name = %s;\", (user_name,))\n row = int(cursor.fetchone()[0])\n return row != 0\n\n def check_lab_in_flag(self, user_name: str) -> bool:\n \"\"\"\n らぼいんしているかどうか\n \"\"\"\n lab_in_flag = False\n with self.conn.cursor() as cursor:\n cursor.execute(\n \"SELECT lab_in_flag FROM labotter WHERE user_name = %s;\", (user_name,))\n lab_in_flag = cursor.fetchone()[0]\n\n return bool(lab_in_flag)\n\n def create_labo_row(self, user_name: str) -> bool:\n \"\"\"\n らぼったー会員登録\n \"\"\"\n c_lab_row_flag = True\n with self.conn.cursor() as cursor:\n try:\n cursor.execute(\n \"\"\"\n INSERT INTO\n labotter(user_name, lab_in_flag, lab_in, lab_rida, min_sum)\n VALUES (%s, '0', null, null, '0');\n \"\"\", (user_name,))\n self.conn.commit()\n except psycopg2.Error:\n c_lab_row_flag = False\n print('Can not execute sql(add).')\n\n return c_lab_row_flag\n\n def registory_labo_in(self, user_name: str, start_time: str) -> bool:\n \"\"\"\n らぼいん\n \"\"\"\n r_lab_in_flag = True\n with self.conn.cursor() as cursor:\n try:\n cursor.execute(\"UPDATE labotter SET \\\n lab_in_flag = '1', \\\n lab_in = %s \\\n WHERE user_name = %s;\", (start_time, user_name,))\n self.conn.commit()\n except psycopg2.Error:\n r_lab_in_flag = False\n print('Can not execute sql(labo_in).')\n\n return r_lab_in_flag\n\n def registory_labo_rida(self, user_name: str, end_time: str, add_sum: int) -> bool:\n \"\"\"\n らぼりだ\n \"\"\"\n r_lab_rida_flag = True\n with self.conn.cursor() as cursor:\n try:\n cursor.execute(\"UPDATE labotter SET \\\n lab_in_flag = '0', \\\n min_sum = %s, \\\n lab_rida = %s \\\n WHERE user_name = %s;\", (add_sum, end_time, user_name,))\n self.conn.commit()\n except psycopg2.Error:\n r_lab_rida_flag = False\n print('Can not execute sql(labo_rida).')\n\n return r_lab_rida_flag\n\n def get_labo_in_time_and_sum_time(self, user_name: str) -> Tuple[str, int]:\n \"\"\"\n らぼいん時間とらぼ滞在時間合計を返す\n \"\"\"\n labo_in_time = None\n with self.conn.cursor() as cursor:\n cursor.execute(\n \"SELECT lab_in, min_sum FROM labotter WHERE user_name = %s;\", (user_name,))\n labo_in_time, min_sum = cursor.fetchone()\n return labo_in_time, min_sum\n\n\ndef labo_in(user_name: str) -> Tuple[bool, str]:\n \"\"\"らぼいん処理\"\"\"\n\n success_flag = False # 登録処理管理用のフラグ。成功したらTrueにする\n dt_now = datetime.datetime.now()\n start_time = dt_now.strftime('%Y-%m-%d %H:%M:%S')\n\n with LabotterDatabase() as lab:\n # 初回登録時の処理\n if not lab.check_exist_user_name(user_name):\n lab.create_labo_row(user_name)\n # らぼりだ中ならば処理をする\n if not lab.check_lab_in_flag(user_name):\n success_flag = lab.registory_labo_in(user_name, start_time)\n\n return success_flag, start_time\n\n\ndef labo_rida(user_name: str) -> Tuple[bool, str, int, int]:\n \"\"\"らぼりだ処理\"\"\"\n\n success_flag = False # 登録処理管理用のフラグ。成功したらTrueにする\n dt_now = datetime.datetime.now()\n diff_time = 0\n min_sum = 0\n end_time = dt_now.strftime('%Y-%m-%d %H:%M:%S')\n\n with LabotterDatabase() as lab:\n # 初回登録時の処理\n if not lab.check_exist_user_name(user_name):\n lab.create_labo_row(user_name)\n\n # らぼいん中ならば処理をする\n if lab.check_lab_in_flag(user_name):\n labo_in_time, min_sum = lab.get_labo_in_time_and_sum_time(\n user_name)\n start_time = datetime.datetime.strptime(\n str(labo_in_time), '%Y-%m-%d %H:%M:%S')\n diff_time = int((dt_now - start_time).total_seconds())\n min_sum = min_sum + diff_time\n success_flag = lab.registory_labo_rida(\n user_name, end_time, min_sum)\n\n return success_flag, end_time, diff_time, min_sum\n","sub_path":"library/labotter.py","file_name":"labotter.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"203185521","text":"# !/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\n'''\n\n@author: yerik\n\n@contact: xiangzz159@qq.com\n\n@time: 2018/6/20 16:04\n\n@desc:\n\n'''\n\nfrom ccxt.base.exchange import Exchange\n\n# -----------------------------------------------------------------------------\n\ntry:\n basestring # Python 3\nexcept NameError:\n basestring = str # Python 2\nimport hashlib\nimport math\nimport json\nfrom ccxt.base.errors import ExchangeError\nfrom ccxt.base.errors import InvalidOrder\nfrom ccxt.base.errors import OrderNotFound\n\n\nclass fcoin (Exchange):\n\n def describe(self):\n return self.deep_extend(super(fcoin, self).describe(), {\n 'id': 'fcoin',\n 'name': 'FCoin',\n 'countries': 'CN',\n 'rateLimit': 1000,\n 'userAgent': self.userAgents['chrome39'],\n 'version': 'v2',\n 'accounts': None,\n 'accountsById': None,\n 'hostname': 'api.fcoin.com',\n 'has': {\n 'CORS': False,\n 'fetchDepositAddress': False,\n 'fetchOHCLV': True,\n 'fetchOpenOrders': True,\n 'fetchClosedOrders': True,\n 'fetchOrder': True,\n 'fetchOrders': False,\n 'fetchTradingLimits': False,\n 'withdraw': False,\n },\n 'timeframes': {\n '1m': 'M1',\n '3m': 'M3',\n '5m': 'M5',\n '15m': 'M15',\n '30m': 'M30',\n '1h': 'H1',\n '4h': 'H4',\n '6h': 'H6',\n '1d': 'D1',\n '1w': 'W1',\n '1M': '1M',\n },\n 'urls': {\n 'logo': 'https://www.fcoin.com/static/images/logo_beta.png',\n 'api': 'https://api.fcoin.com',\n 'www': 'https://www.fcoin.com',\n 'doc': 'https://developer.fcoin.com/',\n 'fees': 'https://support.fcoin.com/hc/zh-cn/articles/360003715514-%E4%BA%A4%E6%98%93%E6%89%8B%E7%BB%AD%E8%B4%B9%E5%8F%8A%E8%AE%A2%E5%8D%95%E8%A7%84%E5%88%99%E8%AF%B4%E6%98%8E',\n },\n 'api': {\n 'market': {\n 'get': [\n 'ticker/{symbol}',\n 'depth/{level}/{symbol}',\n 'trades/{symbol}',\n 'candles/{resolution}/{symbol}',\n ],\n },\n 'public': {\n 'get': [\n 'server-time',\n 'currencies',\n 'symbols',\n ]\n },\n 'private': {\n 'get': [\n 'accounts/balance',\n 'orders',\n 'orders/{orderId}',\n 'orders/{orderId}/match_results'\n ],\n 'post': [\n 'orders',\n 'orders/{orderId}/submit-cancel'\n ],\n },\n },\n 'fees': {\n 'trading': {\n 'tierBased': False,\n 'percentage': True,\n 'maker': 0.001,\n 'taker': 0.001,\n },\n },\n })\n\n def parse_markets(self, markets):\n numMarkets = len(markets)\n if numMarkets < 1:\n raise ExchangeError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))\n result = []\n for i in range(0, len(markets)):\n market = markets[i]\n baseId = market['base_currency']\n quoteId = market['quote_currency']\n price_decimal = market['price_decimal']\n amount_decimal = market['amount_decimal']\n base = baseId.upper()\n quote = quoteId.upper()\n id = baseId + quoteId\n base = self.common_currency_code(base)\n quote = self.common_currency_code(quote)\n symbol = base + '/' + quote\n precision = {\n 'amount': amount_decimal,\n 'price': price_decimal,\n }\n lot = math.pow(10, -precision['amount'])\n maker = 0 if (base == 'OMG') else 0.2 / 100\n taker = 0 if (base == 'OMG') else 0.2 / 100\n result.append({\n 'id': id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'lot': lot,\n 'precision': precision,\n 'taker': taker,\n 'maker': maker,\n 'limits': {\n 'amount': {\n 'min': lot,\n 'max': math.pow(10, precision['amount']),\n },\n 'price': {\n 'min': math.pow(10, -precision['price']),\n 'max': None,\n },\n 'cost': {\n 'min': 0,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n\n def fetch_markets(self):\n response = self.publicGetSymbols()\n return self.parse_markets(response['data'])\n\n def fetch_ticker(self, symbol, params={}):\n self.load_markets()\n market = self.market(symbol)\n response = self.marketGetTickerSymbol(self.extend({\n 'symbol': market['id'],\n }, params))\n return self.parse_ticker(response['data'], market)\n\n def parse_ticker(self, data, market=None):\n symbol = None\n if market:\n symbol = market['symbol']\n timestamp = self.milliseconds()\n ticker = data['ticker']\n bid = float(ticker[2])\n bidVolume = float(ticker[3])\n ask = float(ticker[4])\n askVolume = float(ticker[5])\n last = float(ticker[0])\n high = float(ticker[7])\n low = float(ticker[8])\n open = None\n close = None\n change = None\n percentage = None\n average = None\n baseVolume = float(ticker[9])\n quoteVolume = float(ticker[10])\n vwap = None\n if baseVolume is not None and quoteVolume is not None and baseVolume > 0:\n vwap = quoteVolume / baseVolume\n return {\n 'symbol': symbol,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'high': high,\n 'low': low,\n 'bid': bid,\n 'bidVolume': bidVolume,\n 'ask': ask,\n 'askVolume': askVolume,\n 'vwap': vwap,\n 'open': open,\n 'close': close,\n 'last': last,\n 'previousClose': None,\n 'change': change,\n 'percentage': percentage,\n 'average': average,\n 'baseVolume': baseVolume,\n 'quoteVolume': quoteVolume,\n 'info': data,\n }\n\n def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):\n result = []\n if len(bidasks):\n for i in range(0, int(len(bidasks) / 2)):\n result.append([bidasks[2 * i + 0], bidasks[2 * i + 1]])\n return result\n\n def fetch_order_book(self, symbol, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n limit = 'L20' if limit is None else 'L' + str(limit)\n response = self.marketGetDepthLevelSymbol(self.extend({\n 'symbol': market['id'],\n 'level': limit,\n }, params))\n order_book = response['data']\n ts = order_book['ts']\n return self.parse_order_book(order_book, ts)\n\n def fetch_trades(self, symbol, since=None, limit=20, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n 'limit': limit\n }\n if since is not None:\n request['before'] = since\n response = self.marketGetTradesSymbol(self.extend(request, params))\n trades = response['data']\n result = []\n for trade in trades:\n result.append(self.parse_trade(trade, market))\n result = self.sort_by(result, 'timestamp')\n return self.filter_by_symbol_since_limit(result, symbol, since, limit)\n\n def parse_trade(self, trade, market):\n timestamp = trade['ts']\n return {\n 'info': trade,\n 'id': str(trade['id']),\n 'order': None,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': market['symbol'],\n 'type': None,\n 'side': trade['side'],\n 'price': trade['price'],\n 'amount': trade['amount'],\n }\n\n def parse_ohlcv(self, ohlcv, market=None, timeframe='5m', since=None, limit=None):\n return [\n ohlcv['id'] * 1000,\n ohlcv['open'],\n ohlcv['high'],\n ohlcv['low'],\n ohlcv['close'],\n ohlcv['quote_vol'],\n ]\n\n def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=20, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n 'resolution': self.timeframes[timeframe],\n 'limit': limit,\n }\n if since is not None:\n request['before'] = since\n if limit is not None:\n request['size'] = limit\n response = self.marketGetCandlesResolutionSymbol(self.extend(request, params))\n return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)\n\n def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountsBalance()\n balances = response['data']\n result = {'info': response}\n for balance in balances:\n uppercase = balance['currency'].upper()\n currency = self.common_currency_code(uppercase)\n account = dict()\n account['free'] = balance['available']\n account['used'] = balance['frozen']\n account['total'] = balance['balance']\n result[currency] = account\n return self.parse_balance(result)\n\n def fetch_orders(self, symbol=None, since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n limit = 20 if limit is None else limit\n request = {\n 'limit': limit\n }\n if symbol is not None:\n request['symbol'] = market['id']\n if since is not None:\n request['after'] = since\n\n response = self.privateGetOrders(self.extend(request, params))\n return self.parse_orders(response['data'], market, since, limit)\n\n def fetch_order(self, id, symbol=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n response = self.privateGetOrderOrdersId(self.extend({\n 'order_id': id,\n }, params))\n return self.parse_order(response['data'], market)\n\n def parse_order(self, order, market=None):\n side = order['side']\n type = order['type']\n status = self.parse_order_status(order['state'])\n symbol = None\n if not market:\n if 'symbol' in order:\n if order['symbol'] in self.markets_by_id:\n marketId = order['symbol']\n market = self.markets_by_id[marketId]\n if market:\n symbol = market['symbol']\n timestamp = order['created_at']\n amount = float(order['amount'])\n filled = float(order['field_amount'])\n remaining = amount - filled\n price = float(order['price'])\n cost = float(order['executed_value'])\n fee = float(order['fill_fees'])\n average = 0\n if filled > 0:\n average = float(cost / filled)\n result = {\n 'info': order,\n 'id': str(order['id']),\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': symbol,\n 'type': type,\n 'side': side,\n 'price': price,\n 'average': average,\n 'cost': cost,\n 'amount': amount,\n 'filled': filled,\n 'remaining': remaining,\n 'status': status,\n 'fee': fee,\n }\n return result\n\n def parse_order_status(self, status):\n if status == 'partial_filled':\n return 'open'\n elif status == 'partial_canceled':\n return 'canceled'\n elif status == 'filled':\n return 'closed'\n elif status == 'canceled':\n return 'canceled'\n elif status == 'submitted':\n return 'open'\n elif status == 'pending_cancel':\n return 'open'\n return status\n\n def create_order(self, symbol, type, side, amount, price=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n order = {\n 'amount': self.amount_to_precision(symbol, amount),\n 'symbol': market['id'],\n 'type': type,\n 'side': side,\n }\n if type == 'limit':\n order['price'] = self.price_to_precision(symbol, price)\n response = self.privatePostOrders(self.extend(order, params))\n return {\n 'info': response,\n 'id': response['data'],\n }\n\n def cancel_order(self, id, symbol=None, params={}):\n return self.privatePostOrdersOrderIdSubmitCancel({'order_id': id})\n\n def get_signed(self, sig_str):\n \"\"\"signed params use sha512\"\"\"\n sig_str = base64.b64encode(sig_str)\n signature = base64.b64encode(hmac.new(bytes(self.secret, 'utf-8'), sig_str, digestmod=hashlib.sha1).digest())\n return signature\n\n def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):\n if api == 'private':\n url = '/' + self.version\n else:\n url = '/' + self.version + '/' + api\n\n url += '/' + self.implode_params(path, params)\n query = self.omit(params, self.extract_params(path))\n if api == 'private':\n self.check_required_credentials()\n param = ''\n if params != {}:\n sort_pay = sorted(params.items())\n # sort_pay.sort()\n for k in sort_pay:\n param += '&' + str(k[0]) + '=' + str(k[1])\n param = param.lstrip('&')\n timestamp = str(int(time.time() * 1000))\n full_url = self.urls['api'] + url\n\n if method == 'GET':\n if param:\n full_url = full_url + '?' + param\n sig_str = method + full_url + timestamp\n elif method == 'POST':\n sig_str = method + full_url + timestamp + param\n\n signature = self.get_signed(bytes(sig_str, 'utf-8'))\n\n headers = {\n 'FC-ACCESS-KEY': self.apiKey,\n 'FC-ACCESS-SIGNATURE': signature.decode(),\n 'FC-ACCESS-TIMESTAMP': timestamp\n }\n\n if method == 'POST':\n body = self.json(query)\n headers['Content-Type'] = 'application/json'\n else:\n if params:\n url += '?' + self.urlencode(params)\n url = self.urls['api'] + url\n return {'url': url, 'method': method, 'body': body, 'headers': headers}","sub_path":"python/ccxt/fcoin.py","file_name":"fcoin.py","file_ext":"py","file_size_in_byte":15622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"87655729","text":"import re, os, json, logging\nimport numpy as np\nfrom . import expose, directions\nfrom .server.world import Vector\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nTEMPLATES = os.path.join(HERE, 'templates')\nlog = logging.getLogger(__name__)\n\n\n@expose.expose()\nasync def copy(\n name='stamp',\n depth=5,\n width=5,\n height=5,\n position=None,\n direction=None,\n *,\n world=None,\n player=None,\n):\n \"\"\"Read blocks to create a template that can be stamped elsewhere\n\n name -- name that you can use to reference this copy when pasting\n depth, width, height -- dimensions of the cube to copy\n position, direction -- middle of the lowest slice of the cube will be just\n in front of position with cube extending in direction\n\n returns message with name and the content copied\n \"\"\"\n\n name = sanitize(name)\n\n if direction is None:\n direction = player.direction\n direction, cross = directions.forward_and_cross(direction)\n\n if not direction:\n raise RuntimeError(\"Unable to find direction\")\n\n if position is None:\n position = player.tile_position + direction\n\n start = position - (cross * (width // 2))\n stop = start + (cross * width) + (direction * depth) + (Vector(0, 1, 0) * height)\n\n layers = await world.getBlockArray(start, stop)\n\n save_template(player, name, layers)\n return f'Saved template to {name} with {layers}'\n\n@expose.expose()\nasync def show_pastes():\n \"\"\"Show the name of pastes that are available\"\"\"\n return list_templates()\n\n@expose.expose()\nasync def paste(\n name='stamp',\n position=None,\n direction=None,\n *,\n world=None,\n player=None,\n):\n \"\"\"Paste previously copied blocks into the current location (see copy)\n\n See: show_pastes for a list of available templates...\n\n position, direction -- template will be directly in front of this position in direction\n \"\"\"\n\n name = sanitize(name)\n template = load_template(player, name)\n\n if not template:\n return f'No template {name} found'\n\n if direction is None:\n direction = player.direction\n direction, cross = directions.forward_and_cross(direction)\n\n if not direction:\n raise RuntimeError(\"Unable to find direction\")\n\n if position is None:\n position = player.tile_position + direction\n\n up = Vector(0, 1, 0)\n\n height = len(template)\n depth = len(template[0])\n width = len(template[0][0])\n\n log.info(\"Template size: %s,%s,%s\", depth, width, height)\n\n # the template doesn't rotate, so we need to decide our position relative to it\n # rather than its position\n if direction[2] > 0:\n # we are facing north, native format, so start is to our left\n start = position - (cross * (width // 2))\n elif direction[2] < 0:\n # we are facing south, so we need to make start the full depth and then to our right...\n start = position + (cross * (width // 2)) + (direction * (depth - 1))\n elif direction[0] > 0:\n # we are facing east, so we should start from depth//2 to our right\n start = position - (cross * (depth // 2))\n elif direction[0] < 0:\n # we are facing west, so we should start from depth//2 to our left + width\n start = position + (cross * (depth // 2)) + (direction * (depth - 1))\n else:\n return 'Unable to determine start position'\n\n locations, blocks = [], []\n for y, layer in enumerate(template):\n for z, row in enumerate(layer):\n for x, cell in enumerate(row):\n locations.append(start + (x, y, z))\n blocks.append(cell)\n await world.setBlockList(locations, blocks)\n\n\ndef sanitize(text):\n return ''.join(BAD_CHARS.split(text))\n\n\nBAD_CHARS = re.compile(r'[^0-9a-zA-Z_]')\n\n\ndef template_filename(player, template_name):\n \"\"\"Get the template filename for the given player\n\n names are restriced to A-Za-z0-9_ for both player and template\n \"\"\"\n playername = sanitize(player.name)\n template_name = sanitize(template_name)\n return os.path.join(TEMPLATES, template_name)\n\n\ndef save_template(player, name, template):\n filename = template_filename(player, name)\n log.info(\"Saving to file: %s\", filename)\n struct = {\n 'author': player.name,\n 'name': name,\n 'blocks': template,\n }\n result = json.dumps(struct)\n with open(filename, 'w') as fh:\n fh.write(result)\n\n\ndef load_template(player, name):\n filename = template_filename(player, name)\n log.info(\"Loading from file: %s\", filename)\n if os.path.exists(filename):\n content = json.loads(open(filename).read())\n return content['blocks']\n log.info(\"No such file: %s\", filename)\n return None\n\ndef list_templates():\n return sorted(os.listdir(TEMPLATES))","sub_path":"pycraft/copypaste.py","file_name":"copypaste.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"410530755","text":"\"\"\"\nSupport for Blue Iris.\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/hpprinter/\n\"\"\"\nfrom datetime import datetime\nimport sys\nimport logging\n\nfrom homeassistant.helpers import device_registry as dr\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.event import async_call_later, async_track_time_interval\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\n\nfrom custom_components.hpprinter import HPDeviceData\nfrom .const import *\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass HPPrinterHomeAssistant:\n def __init__(self, hass, name, host, config_entry):\n self._scan_interval = SCAN_INTERVAL\n self._hass = hass\n self._name = name\n self._hp_data = HPDeviceData(hass, host, name)\n self._entities = {}\n self._domain_loaded = {}\n self._config_entry = config_entry\n self._components_hash = None\n self._remove_async_track_time = None\n self._unload_domain = []\n self._load_domain = []\n self._should_reload = False\n self._last_update = None\n self._is_first_time_online = True\n\n @property\n def name(self):\n return self._name\n\n def initialize(self):\n if self._hp_data is not None:\n async_call_later(self._hass, 5, self.async_finalize)\n\n async def async_finalize(self, event_time):\n _LOGGER.debug(f\"async_finalize called at {event_time}\")\n\n self._hass.services.async_register(DOMAIN, 'save_debug_data', self.save_debug_data)\n\n self._remove_async_track_time = async_track_time_interval(self._hass, self.async_update, SCAN_INTERVAL)\n\n self._hass.async_create_task(self.async_init_entry())\n\n async def async_remove(self):\n _LOGGER.debug(f\"async_remove called\")\n\n self._hass.services.async_remove(DOMAIN, 'save_debug_data')\n\n if self._remove_async_track_time is not None:\n self._remove_async_track_time()\n\n unload = self._hass.config_entries.async_forward_entry_unload\n\n self._hass.async_create_task(unload(self._config_entry, DOMAIN_BINARY_SENSOR))\n self._hass.async_create_task(unload(self._config_entry, DOMAIN_SENSOR))\n\n async def async_update_entry(self, entry, clear_all):\n _LOGGER.info(f\"async_update_entry: {entry}\")\n\n self._config_entry = entry\n self._last_update = datetime.now()\n\n self._load_domain = []\n self._unload_domain = []\n\n if clear_all:\n device_reg = await dr.async_get_registry(self._hass)\n device_reg.async_clear_config_entry(self._config_entry.entry_id)\n\n for domain in [DOMAIN_SENSOR, DOMAIN_BINARY_SENSOR]:\n has_entities = self._domain_loaded.get(domain, False)\n\n if domain not in self._load_domain:\n self._load_domain.append(domain)\n\n if has_entities and domain not in self._unload_domain:\n self._unload_domain.append(domain)\n\n if clear_all:\n await self.async_update(datetime.now())\n\n async def async_init_entry(self):\n _LOGGER.debug(f\"async_init_entry called\")\n\n await self.async_update_entry(self._config_entry, True)\n\n def set_domain_entities_state(self, domain, has_entities):\n self._domain_loaded[domain] = has_entities\n\n def get_entities(self, domain):\n return self._entities.get(domain, {})\n\n def get_entity(self, domain, name):\n entities = self.get_entities(domain)\n entity = {}\n if entities is not None:\n entity = entities.get(name, {})\n\n return entity\n\n def set_entity(self, domain, name, data):\n entities = self._entities.get(domain)\n\n if entities is None:\n self._entities[domain] = {}\n\n entities = self._entities.get(domain)\n\n entities[name] = data\n\n def save_debug_data(self, service_data):\n \"\"\"Call BlueIris to refresh information.\"\"\"\n _LOGGER.debug(f\"Saving debug data {DOMAIN} ({service_data})\")\n\n self._hass.async_create_task(self._hp_data.get_data(self.store_data))\n\n def store_data(self, file, content):\n try:\n path = self._hass.config.path(file)\n\n with open(path, 'w+') as out:\n out.write(content)\n\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.error(f'Failed to log {file} data, Error: {ex}, Line: {line_number}')\n\n async def async_update(self, event_time):\n _LOGGER.info(f\"Updating {event_time}\")\n\n data = await self._hp_data.get_data()\n\n cartridges_data = data.get(HP_DEVICE_CARTRIDGES)\n\n is_online = self.create_status_binary_sensor(data)\n\n self.create_status_sensor(data)\n\n if is_online:\n self.create_printer_sensor(data)\n self.create_scanner_sensor(data)\n\n if cartridges_data is not None:\n for key in cartridges_data:\n cartridge = cartridges_data.get(key)\n\n if cartridge is not None:\n self.create_cartridge_sensor(data, cartridge, key)\n\n if self._is_first_time_online:\n self._is_first_time_online = False\n\n await self.async_update_entry(self._config_entry, False)\n\n await self.discover_all()\n\n async def discover_all(self):\n for domain in [DOMAIN_SENSOR, DOMAIN_BINARY_SENSOR]:\n await self.discover(domain)\n\n async def discover(self, domain):\n signal = SIGNALS.get(domain)\n\n if signal is None:\n _LOGGER.error(f\"Cannot discover domain {domain}\")\n return\n\n unload = self._hass.config_entries.async_forward_entry_unload\n setup = self._hass.config_entries.async_forward_entry_setup\n\n entry = self._config_entry\n\n can_unload = domain in self._unload_domain\n can_load = domain in self._load_domain\n can_notify = not can_load and not can_unload\n\n if can_unload:\n _LOGGER.info(f\"Unloading domain {domain}\")\n\n self._hass.async_create_task(unload(entry, domain))\n self._unload_domain.remove(domain)\n\n if can_load:\n _LOGGER.info(f\"Loading domain {domain}\")\n\n self._hass.async_create_task(setup(entry, domain))\n self._load_domain.remove(domain)\n\n if can_notify:\n async_dispatcher_send(self._hass, signal)\n\n def create_status_sensor(self, data):\n is_online = data.get(HP_DEVICE_IS_ONLINE, False)\n status = data.get(PRINTER_CURRENT_STATUS, \"Off\")\n model = data.get(ENTITY_MODEL)\n\n name = data.get(\"Name\", DEFAULT_NAME)\n sensor_name = f\"{name} {HP_DEVICE_STATUS}\"\n\n icon = \"mdi:printer\" if is_online else \"mdi:printer-off\"\n\n attributes = {\n \"friendly_name\": sensor_name,\n \"device_class\": \"connectivity\"\n }\n\n entity = {\n ENTITY_NAME: sensor_name,\n ENTITY_STATE: status,\n ENTITY_ATTRIBUTES: attributes,\n ENTITY_ICON: icon,\n ENTITY_MODEL: model\n }\n\n self.set_entity(DOMAIN_SENSOR, sensor_name, entity)\n\n def create_status_binary_sensor(self, data):\n is_online = data.get(HP_DEVICE_IS_ONLINE, False)\n model = data.get(ENTITY_MODEL)\n\n name = data.get(\"Name\", DEFAULT_NAME)\n sensor_name = f\"{name} {HP_DEVICE_STATUS}\"\n\n icon = \"mdi:printer-off\"\n\n if is_online:\n icon = \"mdi:printer\"\n\n attributes = {\n \"friendly_name\": sensor_name,\n \"device_class\": \"connectivity\"\n }\n\n entity = {\n ENTITY_NAME: sensor_name,\n ENTITY_STATE: is_online,\n ENTITY_ATTRIBUTES: attributes,\n ENTITY_ICON: icon,\n ENTITY_MODEL: model\n }\n\n self.set_entity(DOMAIN_BINARY_SENSOR, sensor_name, entity)\n\n return is_online\n\n def create_printer_sensor(self, data):\n printer_data = data.get(HP_DEVICE_PRINTER)\n model = data.get(ENTITY_MODEL)\n\n if printer_data is not None:\n name = data.get(\"Name\", DEFAULT_NAME)\n sensor_name = f\"{name} {HP_DEVICE_PRINTER}\"\n\n state = printer_data.get(HP_DEVICE_PRINTER_STATE)\n\n attributes = {\n \"unit_of_measurement\": \"Pages\",\n \"friendly_name\": sensor_name\n }\n\n for key in printer_data:\n if key != HP_DEVICE_PRINTER_STATE:\n attributes[key] = printer_data[key]\n\n entity = {\n ENTITY_NAME: sensor_name,\n ENTITY_STATE: state,\n ENTITY_ATTRIBUTES: attributes,\n ENTITY_ICON: PAGES_ICON,\n ENTITY_MODEL: model\n }\n\n self.set_entity(DOMAIN_SENSOR, sensor_name, entity)\n\n def create_scanner_sensor(self, data):\n scanner_data = data.get(HP_DEVICE_SCANNER)\n model = data.get(ENTITY_MODEL)\n\n if scanner_data is not None:\n name = data.get(\"Name\", DEFAULT_NAME)\n sensor_name = f\"{name} {HP_DEVICE_SCANNER}\"\n\n state = scanner_data.get(HP_DEVICE_SCANNER_STATE)\n\n attributes = {\n \"unit_of_measurement\": \"Pages\",\n \"friendly_name\": sensor_name\n }\n\n for key in scanner_data:\n if key != HP_DEVICE_SCANNER_STATE:\n attributes[key] = scanner_data[key]\n\n entity = {\n ENTITY_NAME: sensor_name,\n ENTITY_STATE: state,\n ENTITY_ATTRIBUTES: attributes,\n ENTITY_ICON: SCANNER_ICON,\n ENTITY_MODEL: model\n }\n\n self.set_entity(DOMAIN_SENSOR, sensor_name, entity)\n\n def create_cartridge_sensor(self, data, cartridge, key):\n name = data.get(\"Name\", DEFAULT_NAME)\n model = data.get(ENTITY_MODEL)\n sensor_name = f\"{name} {key}\"\n\n state = cartridge.get(HP_DEVICE_CARTRIDGE_STATE, 0)\n\n attributes = {\n \"unit_of_measurement\": \"%\",\n \"friendly_name\": sensor_name\n }\n\n for key in cartridge:\n if key != HP_DEVICE_CARTRIDGE_STATE:\n attributes[key] = cartridge[key]\n\n entity = {\n ENTITY_NAME: sensor_name,\n ENTITY_STATE: state,\n ENTITY_ATTRIBUTES: attributes,\n ENTITY_ICON: INK_ICON,\n ENTITY_MODEL: model\n }\n\n self.set_entity(DOMAIN_SENSOR, sensor_name, entity)\n\n\ndef _get_printer(hass: HomeAssistant, name) -> HPPrinterHomeAssistant:\n if DATA_HP_PRINTER not in hass.data:\n hass.data[DATA_HP_PRINTER] = {}\n\n printers = hass.data[DATA_HP_PRINTER]\n printer = None\n\n if name in printers:\n printer = printers[name]\n\n return printer\n","sub_path":"custom_components/hpprinter/home_assistant.py","file_name":"home_assistant.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"13470487","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\n\n\"\"\"\n연속해서 3개의 계단을 밟을 수 없는 조건의 점화식 도출\n1. 두 칸 전의 계단은 밟아도 상관 없음\n2. 한 칸 전의 계단을 밟은 경우 두 칸 전의 계단을 밟으면 안되기 때문에 한 칸 전의 계단의 두 칸 전 계단의 값을 DP에 더해줌\n둘 중 큰 숫자가 DP\n\"\"\"\n\nN = int(input())\n\nstair = [0]\nfor _ in range(N):\n stair.append(int(input()))\n\nif N == 1:\n print(stair[1])\nelse:\n dp = [0] * (N+1)\n dp[1] = stair[1]\n dp[2] = stair[1] + stair[2]\n\n for i in range(3, N+1):\n dp[i] = max(dp[i-3]+stair[i-1]+stair[i], dp[i-2]+stair[i])\n\n print(dp[N])","sub_path":"DP/2579_S3_계단오르기.py","file_name":"2579_S3_계단오르기.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"514303574","text":"# Copyright 2015 Rackspace\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport xml.etree.ElementTree as ET\n\nfrom cafe.common.reporting.base_report import BaseReport\n\n\nclass XMLReport(BaseReport):\n def generate_report(self, result, path):\n root = ET.Element(\"testsuite\")\n root.attrib['name'] = result.name\n root.attrib['tests'] = str(result.testsRun)\n root.attrib['errors'] = str(result.errors)\n root.attrib['failures'] = str(result.failures)\n root.attrib['skips'] = str(result.skipped)\n root.attrib['time'] = str(result.time)\n for testcase in result.test_logs:\n testcase_tag = ET.SubElement(root, 'testcase')\n testcase_tag.attrib['classname'] = testcase.name.split(\"(\")[1][:-1]\n testcase_tag.attrib['name'] = testcase.name.split()[0]\n testcase_tag.attrib['time'] = str(testcase.time)\n err = testcase.err\n if testcase.status == \"FAIL\":\n testcase_tag.attrib['result'] = \"FAILED\"\n error_tag = ET.SubElement(testcase_tag, 'failure')\n error_tag.attrib['type'] = err.split(\":\")[1].split()[-1]\n error_tag.attrib['message'] = err.split(\":\")[-1].strip()\n error_tag.text = err\n elif testcase.status == \"skipped\":\n testcase_tag.attrib['result'] = \"SKIPPED\"\n skipped_tag = ET.SubElement(testcase_tag, 'skipped')\n skipped_tag.attrib['message'] = err\n elif testcase.status == \"ERROR\":\n testcase_tag.attrib['result'] = \"ERROR\"\n error_tag = ET.SubElement(testcase_tag, 'error')\n error_tag.attrib['type'] = err.split(\":\")[1].split()[-1]\n error_tag.attrib['message'] = err.split(\":\")[-1].strip()\n error_tag.text = err\n elif testcase.status == \"ok\":\n testcase_tag.attrib['result'] = \"PASSED\"\n else:\n testcase_tag.attrib['result'] = (\n testcase.status.upper().replace(\" \", \"_\"))\n\n if os.path.isdir(path):\n path += \"/results.xml\"\n\n with open(path, 'wb') as fp:\n ET.ElementTree(root).write(fp)\n","sub_path":"cafe/common/reporting/xml_report.py","file_name":"xml_report.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"134465505","text":"class Solution(object):\n\tdef numberofvowels(self, s):\n\t\tcount = 0\n\t\tfor x in s:\n\t\t\tif(x == 'a' or x == 'e' or x == 'i' or x == 'o' or x == 'u'):\n\t\t\t\tcount += 1\n\t\tprint(count)\n\nb = Solution()\nb.numberofvowels(\"azcbobobegghakl\")","sub_path":"problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"36"} +{"seq_id":"207985057","text":"\"\"\"\n进程池演示\n进程池函数必须在进程池创建之前声明\n父进程结束,进程池会立即销毁\n\"\"\"\nfrom multiprocessing import Pool\nfrom time import sleep,ctime\n\n# 进程池函数\ndef worker(msg,sec):\n print(ctime(),\"---\",msg)\n sleep(sec)\n\n# 创建进程池 进程已经有了\npool = Pool(4)\n\n# 添加事件\nfor i in range(10):\n msg = \"Tedu-%d\"%i\n pool.apply_async(func=worker,args=(msg,2))\n\npool.close() # 关闭进程池 不能添加新的事件函数\n\npool.join() # 阻塞回收进程池\n\n","sub_path":"note-month02/03network/day13-duojinchneg/day13/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"205767039","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\nExercise 3, problem 1\r\n\"\"\"\r\nbasename = \"Station\"\r\n\r\n#create empty list\r\nfilenames = []\r\n\r\n#for loop that will become station numbers\r\nfor stationnumber in range(21):\r\n station = basename + \"_\" + str(stationnumber) + \".txt\"\r\n filenames.append(station)\r\n \r\nprint(filenames) \r\n \r\n \r\n\r\n \r\n\r\n ","sub_path":"station_name_generator.py","file_name":"station_name_generator.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45882346","text":"import streamlit as st\nimport datetime\nimport requests\nimport numpy as np\nimport pandas as pd\n\nst.set_page_config(\n page_title=\"TaxiFareModel\", # => Quick reference - Streamlit\n page_icon=\"🚖\",\n layout=\"centered\", # wide\n)\n\n'''\n# TaxiFareModel front\n'''\n\nst.markdown('''\nRemember that there are several ways to output content into your web page...\n\nEither as with the title by just creating a string (or an f-string). Or as with this paragraph using the `st.` functions\n''')\n'''\n## Here we would like to add some controllers in order to ask the user to select the parameters of the ride\n\n1. Let's ask for:\n- date and time\n- pickup longitude\n- pickup latitude\n- dropoff longitude\n- dropoff latitude\n- passenger count\n'''\n'''## Please select the parameters of your ride'''\n# Ask for date and time\npickup_date = st.date_input(\n 'Please select a pickup date',\n datetime.date(2020, 8, 27)\n)\nst.write('You selected: ', pickup_date)\n\npickup_time = st.time_input('Select pickup time', datetime.time(8, 45))\nst.write('You selected following time: ', pickup_time)\n\npickup_datetime = f\"{pickup_date} {pickup_time}\"\n\n# Ask for pickup longitude\npickup_longitude = st.number_input(\n 'Pickup longitude', value=-73.975836205698,\n step=1e-8) #40.75308327937108, -73.975836205698\nst.write('Your pickup longitude is: ', pickup_longitude)\n\n# Ask for pickup latitude\npickup_latitude = st.number_input(\n 'Pickup latitude', step=1e-8,\n value=-40.75308327937108) # 4.854343157542917\nst.write('Your pickup latitude is: ', pickup_latitude)\n\n# Ask for dropoff longitude\ndropoff_longitude = st.number_input(\n 'Dropoff longitude', step=1e-8,\n value=-73.97368251324504) # -73.97437988130339\nst.write('Your dropoff longitude is: ', dropoff_longitude)\n\n# Ask for pickup latitude\ndropoff_latitude = st.number_input(\n 'Dropoff latitude', step=1e-8,\n value=40.78262576554464) # 40.78262576554464\nst.write('Your dropoff latitude is: ', dropoff_latitude)\n\n# Ask for passenger count\npassenger_count = st.number_input('Passenger count', 2)\nst.write('Your passenger count is: ', passenger_count)\n\n\ndata = pd.DataFrame({\n 'lat': [float(pickup_latitude),\n float(dropoff_latitude)],\n 'lon': [float(pickup_longitude),\n float(dropoff_longitude)],\n 'Taxi locations': ['Pickup', 'Dropoff'],\n})\n\nst.map(data)\n\n'''\n\n## Once we have these, let's call our API in order to retrieve a prediction\n\nSee ? No need to load a `model.joblib` file in this app, we do not even need to know anything about Data Science in order to retrieve a prediction...\n\n🤔 How could we call our API ? Off course... The `requests` package 💡\n'''\n\n\nurl = 'https://modelnew-iehtyxk3za-ew.a.run.app/predict'\n\nif url == 'https://taxifare.lewagon.ai/predict':\n\n st.markdown(\n 'Maybe you want to use your own API for the prediction, not the one provided by Le Wagon...'\n )\n'''\n\n2. Let's build a dictionary containing the parameters for our API...\n\n3. Let's call our API using the `requests` package...\n\n4. Let's retrieve the prediction from the **JSON** returned by the API...\n\n## Finally, we can display the prediction to the user\n'''\nkey = \"2013-07-06 17:18:00.000000119\"\n\nparams = {\n 'key': [key],\n 'pickup_datetime': [pickup_datetime],\n 'pickup_longitude': [float(pickup_longitude)],\n 'pickup_latitude': [float(pickup_latitude)],\n 'dropoff_longitude': [float(dropoff_longitude)],\n 'dropoff_latitude': [float(dropoff_latitude)],\n 'passenger_count': [int(passenger_count)]\n}\n\nrequest = requests.get(url, params=params)\n\nfare = request.json()['prediction']\n\nf\"You're predicted fare will be {fare}\"\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"220947408","text":"from tkinter import *\nroot = Tk()\nroot.geometry()\n\ndef print_item(event):\n print(lb.get(lb.curselection()))\n\nvar = StringVar()\nlb = Listbox(root, height=5, selectmode=BROWSE, listvariable=var)\nlb.bind('', print_item)\n\nlist_item = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\nfor item in list_item:\n lb.insert(END,item)\n\nscr = Scrollbar(root)\nlb.configure(yscrollcommand=scr.set)\nscr['command'] = lb.yview\n\nscr.pack(side=RIGHT, fill=Y)\nlb.pack(side=LEFT, fill=BOTH)\nroot.mainloop()\n","sub_path":"Test/python/gui/scrollbar.py","file_name":"scrollbar.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551129406","text":"\nfrom nintendo.common import tls, util, types, xml\nimport urllib.parse\nimport contextlib\nimport datetime\nimport anyio\nimport json\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nSTATUS_NAMES = {\n\t100: \"Continue\",\n\t200: \"OK\",\n\t201: \"Created\",\n\t400: \"Bad Request\",\n\t401: \"Unauthorized\",\n\t403: \"Forbidden\",\n\t404: \"Not Found\",\n\t405: \"Method Not Allowed\",\n\t406: \"Not Acceptable\",\n\t409: \"Conflict\",\n\t412: \"Precondition Failed\",\n\t422: \"Unprocessable Entity\",\n\t500: \"Internal Server Error\",\n\t502: \"Bad Gateway\",\n\t503: \"Service Unavailable\"\n}\n\nSTATUS_SUCCESS = [200, 201]\n\n\nJSON_TYPES = [\n\t\"application/json\",\n\t\"application/problem+json\"\n]\n\nXML_TYPES = [\n\t\"application/xml\",\n\t\"text/xml\"\n]\n\nTEXT_TYPES = [\n\t\"application/x-www-form-urlencoded\",\n\t\"text/plain\",\n\t\"text/html\",\n\t*JSON_TYPES,\n\t*XML_TYPES\n]\n\n\nclass HTTPError(Exception): pass\n\n\ndef format_date():\n\tnow = datetime.datetime.now(datetime.timezone.utc)\n\treturn now.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\t\n\t\ndef urlencode(data):\n\treturn urllib.parse.quote(data)\ndef urldecode(data):\n\treturn urllib.parse.unquote(data)\n\t\ndef formencode(data, url=True):\n\tfields = []\n\tfor key, value in data.items():\n\t\tif url:\n\t\t\tkey = urlencode(str(key))\n\t\t\tvalue = urlencode(str(value))\n\t\tfields.append(\"%s=%s\" %(key, value))\n\treturn \"&\".join(fields)\n\ndef formdecode(data, url=True):\n\tif not data: return {}\n\t\t\n\tfields = {}\n\tfor field in data.split(\"&\"):\n\t\tif not \"=\" in field:\n\t\t\traise HTTPError(\"Malformed form parameter\")\n\t\tkey, value = field.split(\"=\", 1)\n\t\tif url:\n\t\t\tkey = urldecode(key)\n\t\t\tvalue = urldecode(value)\n\t\tfields[key] = value\n\treturn fields\n\n\nclass HTTPMessage:\n\tdef __init__(self):\n\t\tself.version = \"HTTP/1.1\"\n\t\n\t\tself.headers = types.CaseInsensitiveDict()\n\t\tself.body = b\"\"\n\t\t\n\t\tself.text = None\n\t\t\n\t\tself.files = {}\n\t\tself.form = {}\n\t\tself.plainform = {}\n\t\tself.json = {}\n\t\tself.xml = None\n\t\t\n\t\tself.boundary = \"--------BOUNDARY--------\"\n\t\t\n\tdef check_version(self):\n\t\tif not self.version.startswith(\"HTTP/\"):\n\t\t\traise HTTPError(\"HTTP version must start with HTTP/\")\n\t\tif self.version != \"HTTP/1.1\":\n\t\t\traise HTTPError(\"HTTP version not supported\")\n\t\t\n\tdef transfer_encodings(self):\n\t\tencoding = self.headers.get(\"Transfer-Encoding\", \"identity\")\n\t\treturn [enc.strip() for enc in encoding.split(\",\")]\n\t\t\n\tdef is_chunked(self):\n\t\treturn \"chunked\" in self.transfer_encodings()\n\t\t\n\tdef finish_parsing(self):\n\t\tcontent_type = self.headers.get(\"Content-Type\", \"\")\n\t\tfields = content_type.split(\";\")\n\t\ttype = fields[0].strip()\n\t\t\n\t\tparam = {}\n\t\tfor field in fields[1:]:\n\t\t\tfield = field.strip()\n\t\t\tif not \"=\" in field:\n\t\t\t\traise HTTPError(\"Malformed directive in Content-Type header\")\n\t\t\t\n\t\t\tkey, value = field.split(\"=\", 1)\n\t\t\tparam[key] = value\n\t\t\n\t\tif type in TEXT_TYPES:\n\t\t\ttry:\n\t\t\t\tself.text = self.body.decode(param.get(\"charset\", \"UTF-8\"))\n\t\t\texcept UnicodeDecodeError:\n\t\t\t\traise HTTPError(\"Failed to decode HTTP body\")\n\t\t\n\t\tif type == \"application/x-www-form-urlencoded\":\n\t\t\tself.form = formdecode(self.text)\n\t\t\tself.plainform = formdecode(self.text, False)\n\t\t\n\t\tif type in JSON_TYPES:\n\t\t\ttry:\n\t\t\t\tself.json = json.loads(self.text)\n\t\t\texcept json.JSONDecodeError:\n\t\t\t\traise HTTPError(\"Failed to decode JSON body\")\n\t\t\t\t\n\t\tif type in XML_TYPES:\n\t\t\ttry:\n\t\t\t\tself.xml = xml.parse(self.text)\n\t\t\texcept ValueError as e:\n\t\t\t\traise HTTPError(\"Failed to decode XML body: %s\" %e)\n\t\t\n\tdef encode_body(self):\n\t\ttext = self.text\n\t\tbody = self.body\n\t\t\n\t\tif self.plainform:\n\t\t\tif \"Content-Type\" not in self.headers:\n\t\t\t\tself.headers[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\t\t\ttext = formencode(self.plainform, False)\n\t\t\t\n\t\telif self.form:\n\t\t\tif \"Content-Type\" not in self.headers:\n\t\t\t\tself.headers[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\t\t\ttext = formencode(self.form)\n\t\t\n\t\telif self.json:\n\t\t\tif \"Content-Type\" not in self.headers:\n\t\t\t\tself.headers[\"Content-Type\"] = \"application/json\"\n\t\t\ttext = json.dumps(self.json)\n\t\t\t\n\t\telif self.xml is not None:\n\t\t\tif \"Content-Type\" not in self.headers:\n\t\t\t\tself.headers[\"Content-Type\"] = \"application/xml\"\n\t\t\ttext = self.xml.encode()\n\t\t\t\n\t\telif self.files:\n\t\t\tif \"Content-Type\" not in self.headers:\n\t\t\t\tself.headers[\"Content-Type\"] = \"multipart/form-data\"\n\t\t\tself.headers[\"Content-Type\"] += \"; boundary=%s\" %self.boundary\n\t\t\t\n\t\t\ttext = None\n\t\t\tbody = b\"\"\n\t\t\tfor name, data in self.files.items():\n\t\t\t\tbody += b\"--%s\\r\\n\" %self.boundary.encode()\n\t\t\t\tbody += b\"Content-Disposition: form-data; name=\\\"%s\\\"\\r\\n\\r\\n\" %name.encode()\n\t\t\t\tbody += data + b\"\\r\\n\"\n\t\t\tbody += b\"--%s--\\r\\n\" %self.boundary.encode()\n\t\t\t\n\t\tif text is not None:\n\t\t\tif \"Content-Type\" not in self.headers:\n\t\t\t\tself.headers[\"Content-Type\"] = \"text/plain\"\n\t\t\tbody = text.encode()\n\t\t\n\t\tif body and \"Content-Type\" not in self.headers:\n\t\t\tself.headers[\"Content-Type\"] = \"application/octet-stream\"\n\t\t\n\t\tif self.is_chunked():\n\t\t\tif not body:\n\t\t\t\treturn b\"0\\r\\n\\r\\n\"\n\t\t\treturn b\"%x\\r\\n\" %len(body) + body + b\"\\r\\n0\\r\\n\\r\\n\"\n\t\telse:\n\t\t\tif body:\n\t\t\t\tself.headers[\"Content-Length\"] = len(body)\n\t\t\treturn body\n\t\n\tdef encode_start_line(self): return \"\"\n\t\n\tdef encode_headers(self):\n\t\tself.encode_body()\n\t\t\n\t\tlines = [self.encode_start_line()]\n\t\tfor key, value in self.headers.items():\n\t\t\tlines.append(\"%s: %s\" %(key, value))\n\t\t\n\t\ttext = \"\\r\\n\".join(lines) + \"\\r\\n\\r\\n\"\n\t\treturn text.encode()\n\t\n\tdef encode(self):\n\t\treturn self.encode_headers() + self.encode_body()\n\t\t\n\t@classmethod\n\tdef parse(cls, data):\n\t\tparser = HTTPParser(cls)\n\t\tparser.update(data)\n\t\t\n\t\tif not parser.complete():\n\t\t\traise HTTPError(\"HTTP message is incomplete\")\n\t\tif parser.buffer:\n\t\t\traise HTTPError(\"Got more data than expected\")\n\t\t\n\t\treturn parser.message\n\n\nclass HTTPRequest(HTTPMessage):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.method = \"GET\"\n\t\tself.path = \"/\"\n\t\t\n\t\tself.params = {}\n\t\tself.continue_threshold = 1024\n\t\t\n\t\tself.certificate = None\n\t\t\n\tdef finish_parsing(self):\n\t\tsuper().finish_parsing()\n\t\t\n\t\tif \"?\" in self.path:\n\t\t\tself.path, params = self.path.split(\"?\", 1)\n\t\t\tself.params = formdecode(params)\n\t\t\n\tdef encode_start_line(self):\n\t\tpath = self.path\n\t\tif self.params:\n\t\t\tpath += \"?\" + formencode(self.params)\n\t\treturn \"%s %s %s\" %(self.method, path, self.version)\n\t\t\n\tdef parse_start_line(self, line):\n\t\tfields = line.split(maxsplit=2)\n\t\tif len(fields) != 3:\n\t\t\traise HTTPError(\"Failed to parse HTTP request start line\")\n\t\t\n\t\tself.method = fields[0]\n\t\tself.path = fields[1]\n\t\tself.version = fields[2]\n\t\t\n\t\tself.check_version()\n\t\t\n\tdef encode_body(self):\n\t\tbody = super().encode_body()\n\t\tif self.continue_threshold is not None:\n\t\t\tif len(body) > self.continue_threshold:\n\t\t\t\tself.headers[\"Expect\"] = \"100-continue\"\n\t\treturn body\n\t\n\t@staticmethod\n\tdef build(method, path):\n\t\trequest = HTTPRequest()\n\t\trequest.method = method\n\t\trequest.path = path\n\t\treturn request\n\t\t\n\t@staticmethod\n\tdef get(path):\n\t\treturn HTTPRequest.build(\"GET\", path)\n\t\t\n\t@staticmethod\n\tdef post(path):\n\t\treturn HTTPRequest.build(\"POST\", path)\n\t\t\n\nclass HTTPResponse(HTTPMessage):\n\tdef __init__(self, status_code=500):\n\t\tsuper().__init__()\n\t\tself.status_code = status_code\n\t\tself.status_name = STATUS_NAMES.get(status_code, \"Unknown\")\n\t\t\n\tdef success(self):\n\t\treturn self.status_code in STATUS_SUCCESS\n\t\t\n\tdef error(self):\n\t\treturn not self.success()\n\t\n\tdef raise_if_error(self):\n\t\tif self.error():\n\t\t\traise HTTPError(\"HTTP request failed with status %i\" %self.status_code)\n\t\t\n\tdef encode_start_line(self):\n\t\treturn \"%s %i %s\" %(self.version, self.status_code, self.status_name)\n\t\t\n\tdef parse_start_line(self, line):\n\t\tfields = line.split(maxsplit=2)\n\t\tif len(fields) != 3:\n\t\t\traise HTTPError(\"Failed to parse HTTP response start line\")\n\t\t\n\t\tself.version = fields[0]\n\t\tself.check_version()\n\t\t\n\t\tif not fields[1].isdecimal():\n\t\t\traise HTTPError(\"HTTP response has invalid status code\")\n\t\t\n\t\tself.status_code = int(fields[1])\n\t\tself.status_name = fields[2]\n\t\t\n\nclass HTTPParser:\n\tdef __init__(self, cls):\n\t\tself.message = cls()\n\t\t\n\t\tself.buffer = b\"\"\n\t\tself.state = self.state_header\n\t\t\n\tdef complete(self): return self.state is None\n\tdef header_complete(self): return self.state != self.state_header\n\t\n\tdef update(self, data):\n\t\tself.buffer += data\n\t\twhile not self.state():\n\t\t\tpass\n\t\t\t\n\tdef finish(self):\n\t\tself.message.finish_parsing()\n\t\tself.state = None\n\t\treturn self.message\n\t\n\tdef state_header(self):\n\t\tif not b\"\\r\\n\\r\\n\" in self.buffer:\n\t\t\treturn True\n\t\t\n\t\theader, self.buffer = self.buffer.split(b\"\\r\\n\\r\\n\", 1)\n\t\t\n\t\ttry:\n\t\t\tlines = header.decode().splitlines()\n\t\texcept UnicodeDecodeError:\n\t\t\traise HTTPError(\"Failed to decode HTTP header\")\n\t\t\t\n\t\tif len(lines) == 0:\n\t\t\traise HTTPError(\"HTTP message must start with header line\")\n\t\t\n\t\tself.message.parse_start_line(lines[0])\n\t\t\n\t\tfor header in lines[1:]:\n\t\t\tif not \": \" in header:\n\t\t\t\traise HTTPError(\"Invalid line in HTTP headers\")\n\t\t\tkey, value = header.split(\": \", 1)\n\t\t\tself.message.headers[key] = value\n\t\t\n\t\tif self.message.is_chunked():\n\t\t\tself.state = self.state_chunk_header\n\t\t\treturn False\n\t\telif \"Content-Length\" in self.message.headers:\n\t\t\tif not self.message.headers[\"Content-Length\"].isdecimal():\n\t\t\t\traise HTTPError(\"Invalid Content-Length header\")\n\t\t\tself.state = self.state_body\n\t\t\treturn False\n\t\t\n\t\tself.finish()\n\t\treturn True\n\t\t\n\tdef state_chunk_header(self):\n\t\tif not b\"\\r\\n\" in self.buffer:\n\t\t\treturn True\n\t\t\t\n\t\tline, self.buffer = self.buffer.split(b\"\\r\\n\", 1)\n\t\ttry:\n\t\t\tline = line.decode()\n\t\texcept UnicodeDecodeError:\n\t\t\traise HTTPError(\"Failed to decode chunk length\")\n\n\t\tif not util.is_hexadecimal(line):\n\t\t\traise HTTPError(\"Invalid HTTP chunk length\")\n\t\t\n\t\tself.chunk_length = int(line, 16)\n\t\t\n\t\tself.state = self.state_chunk_body\n\t\treturn False\n\t\t\n\tdef state_chunk_body(self):\n\t\tif len(self.buffer) < self.chunk_length + 2:\n\t\t\treturn True\n\t\t\t\n\t\tif self.buffer[self.chunk_length : self.chunk_length + 2] != b\"\\r\\n\":\n\t\t\traise HTTPError(\"HTTP chunk should be terminated with \\\\r\\\\n\")\n\t\t\n\t\tself.message.body += self.buffer[:self.chunk_length]\n\t\t\n\t\tself.buffer = self.buffer[self.chunk_length + 2:]\n\t\t\n\t\tif self.chunk_length == 0:\n\t\t\tself.finish()\n\t\t\treturn True\n\t\t\t\n\t\tself.state = self.state_chunk_header\n\t\treturn False\n\t\t\n\tdef state_body(self):\n\t\tlength = int(self.message.headers[\"Content-Length\"])\n\t\tif len(self.buffer) < length:\n\t\t\treturn True\n\t\t\t\n\t\tself.message.body = self.buffer[:length]\n\t\tself.buffer = self.buffer[length:]\n\t\tself.finish()\n\t\treturn True\n\n\nclass HTTPClient:\n\tdef __init__(self, sock):\n\t\tself.sock = sock\n\t\tself.buffer = b\"\"\n\t\n\tasync def send(self, data):\n\t\tawait self.sock.send(data)\n\t\n\tasync def recv(self):\n\t\tif self.buffer:\n\t\t\tbuffer = self.buffer\n\t\t\tself.buffer = b\"\"\n\t\t\treturn self.buffer\n\t\treturn await self.sock.recv()\n\t\t\n\tasync def close(self):\n\t\tawait self.sock.close()\n\t\n\tasync def abort(self):\n\t\tawait self.sock.abort()\n\t\n\tasync def request(self, req):\n\t\tlogger.debug(\"Sending HTTP request headers\")\n\t\tawait self.send(req.encode_headers())\n\t\t\n\t\tif req.headers.get(\"Expect\") == \"100-continue\":\n\t\t\tresponse = await self.receive_response()\n\t\t\tif response.status_code != 100:\n\t\t\t\traise HTTPError(\"Expected 100-continue response\")\n\t\t\n\t\tlogger.debug(\"Sending HTTP request body\")\n\t\tawait self.send(req.encode_body())\n\t\tresponse = await self.receive_response()\n\t\t\n\t\treturn response\n\t\t\t\n\tasync def receive_response(self):\n\t\tparser = HTTPParser(HTTPResponse)\n\t\twhile not parser.complete():\n\t\t\tdata = await self.recv()\n\t\t\tparser.update(data)\n\t\tself.buffer += parser.buffer\n\t\treturn parser.message\n\t\n\tdef local_address(self):\n\t\treturn self.sock.local_address()\n\tdef remote_address(self):\n\t\treturn self.sock.remote_address()\n\t\t\n\t\t\nclass HTTPServerClient:\n\tdef __init__(self, handler, client):\n\t\tself.handler = handler\n\t\tself.client = client\n\t\n\tasync def process(self):\n\t\ttry:\n\t\t\tparser = HTTPParser(HTTPRequest)\n\t\t\twhile not parser.header_complete():\n\t\t\t\tdata = await self.client.recv()\n\t\t\t\tparser.update(data)\n\t\t\t\n\t\t\tif parser.message.headers.get(\"Expect\") == \"100-continue\":\n\t\t\t\tawait self.client.send(HTTPResponse(100).encode())\n\t\t\t\n\t\t\twhile not parser.complete():\n\t\t\t\tdata = await self.client.recv()\n\t\t\t\tparser.update(data)\n\t\t\t\n\t\t\trequest = parser.message\n\t\t\trequest.certificate = self.client.remote_certificate()\n\t\t\t\n\t\t\tresponse = await self.handle_request(request)\n\t\t\tawait self.client.send(response.encode())\n\t\texcept Exception:\n\t\t\tlogger.exception(\"Failed to process HTTP request\")\n\t\n\tasync def handle_request(self, request):\n\t\tlogger.info(\"Received HTTP request: %s %s\", request.method, request.path)\n\t\t\n\t\ttry:\n\t\t\tresponse = await self.handler(request)\n\t\t\tif not isinstance(response, HTTPResponse):\n\t\t\t\tlogger.error(\"HTTP handler must return HTTPResponse\")\n\t\t\t\tresponse = HTTPResponse(500)\n\t\texcept Exception:\n\t\t\tlogger.exception(\"HTTP handler raised an exception\")\n\t\t\tresponse = HTTPResponse(500)\n\t\t\n\t\tlogger.info(\"Sending HTTP response (%i)\", response.status_code)\n\t\treturn response\n\n\n@contextlib.asynccontextmanager\nasync def connect(host, port, context=None):\n\tasync with tls.connect(host, port, context) as client:\n\t\tyield HTTPClient(client)\n\nasync def request(req, context = None):\n\tif \"Host\" not in req.headers:\n\t\traise ValueError(\"HTTP request requires Host header\")\n\t\n\tlogger.info(\"Performing HTTP request: %s %s\", req.method, req.path)\n\t\n\thost = req.headers[\"Host\"]\n\tport = 443 if context else 80\n\t\n\tif \":\" in host:\n\t\thost, port = host.split(\":\", 1)\n\t\tif not port.isdecimal():\n\t\t\traise ValueError(\"HTTP request has invalid Host header\")\n\t\tport = int(port)\n\t\t\n\tlogger.info(\"Establishing HTTP connection with %s:%i\", host, port)\n\t\n\tasync with connect(host, port, context) as client:\n\t\tresponse = await client.request(req)\n\t\n\tlogger.info(\"Received HTTP response: %i\", response.status_code)\n\treturn response\n\nasync def get(url, headers={}, context=None):\n\tif \"://\" in url:\n\t\tscheme, url = url.split(\"://\", 1)\n\t\tif scheme == \"http\":\n\t\t\tcontext = None\n\t\telif scheme == \"https\":\n\t\t\tif context is None:\n\t\t\t\tcontext = tls.TLSContext()\n\t\t\t\tcontext.load_default_authorities()\n\t\telse:\n\t\t\traise ValueError(\"Invalid HTTP url scheme: %s\" %scheme)\n\t\n\tif \"/\" in url:\n\t\thost, path = url.split(\"/\", 1)\n\telse:\n\t\thost = url\n\t\tpath = \"/\"\n\t\n\treq = HTTPRequest.get(\"/\" + path)\n\treq.headers = types.CaseInsensitiveDict(headers)\n\treq.headers[\"Host\"] = host\n\treturn await request(req, context)\n\n@contextlib.asynccontextmanager\nasync def serve(handler, host=\"\", port=0, context=None):\n\tasync def handle(client):\n\t\thost, port = client.remote_address()\n\t\tlogger.debug(\"New HTTP connection: %s:%i\", host, port)\n\t\t\n\t\tclient = HTTPServerClient(handler, client)\n\t\tawait client.process()\n\t\n\tlogger.info(\"Starting HTTP server at %s:%i\", host, port)\n\tasync with tls.serve(handle, host, port, context):\n\t\tyield\n\tlogger.info(\"HTTP server is closed\")\n","sub_path":"nintendo/common/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":14564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"622892073","text":"# 3rd party\nfrom wagtail.core import blocks\nfrom wagtail.embeds import embeds\nfrom wagtail.embeds.blocks import EmbedBlock as WagtailEmbedBlock\nfrom wagtail.documents.blocks import DocumentChooserBlock\nfrom wagtail.contrib.table_block.blocks import TableBlock as OGTableBlock\nfrom wagtail.images.blocks import ImageChooserBlock\n\nfrom wagtailnhsukfrontend.blocks import ( # NOQA\n ImageBlock, PanelBlock, ExpanderBlock, GreyPanelBlock, InsetTextBlock,\n PanelListBlock, WarningCalloutBlock, FlattenValueContext, ActionLinkBlock\n)\n\n\nclass BasePromoBlock(FlattenValueContext, blocks.StructBlock):\n\n class Meta:\n icon = 'pick'\n template = 'wagtailnhsukfrontend/promo.html'\n\n link_page = blocks.PageChooserBlock(required=False, label=\"Page\")\n url = blocks.URLBlock(label=\"URL\", required=False)\n heading = blocks.CharBlock(required=True)\n description = blocks.CharBlock(required=False)\n content_image = ImageChooserBlock(label=\"Image\", required=False)\n alt_text = blocks.CharBlock(required=False)\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context=parent_context)\n page = value.get('link_page', '')\n if page is not None:\n url = page.url\n else:\n url = value.get('url', '')\n\n context['url'] = url\n return context\n\n\nclass PromoBlock(BasePromoBlock):\n\n class Meta:\n template = 'wagtailnhsukfrontend/promo.html'\n\n size = blocks.ChoiceBlock([\n ('', 'Default'),\n ('small', 'Small'),\n ], required=False)\n\n heading_level = blocks.IntegerBlock(\n min_value=2,\n max_value=4,\n default=3,\n help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.'\n )\n\n\nclass PromoGroupBlock(FlattenValueContext, blocks.StructBlock):\n\n class Meta:\n template = 'wagtailnhsukfrontend/promo_group.html'\n\n column = blocks.ChoiceBlock([\n ('one-half', 'One-half'),\n ('one-third', 'One-third'),\n ], default='one-half', required=True)\n\n size = blocks.ChoiceBlock([\n ('', 'Default'),\n ('small', 'Small'),\n ], required=False)\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context)\n context['num_columns'] = {\n 'one-half': 2,\n 'one-third': 3,\n }[value['column']]\n return context\n\n heading_level = blocks.IntegerBlock(\n min_value=2,\n max_value=4,\n default=3,\n help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.'\n )\n\n promos = blocks.ListBlock(BasePromoBlock)\n\n\nclass TableBlock(OGTableBlock):\n\n class Meta:\n template = 'core/blocks/table.html'\n\n\nclass PanelTableBlock(blocks.StructBlock):\n\n class Meta:\n template = 'core/blocks/panel_table.html'\n\n title = blocks.CharBlock()\n table = TableBlock()\n\n\nclass EmbedBlock(WagtailEmbedBlock):\n\n \"\"\"Overriding the built in Wagtail embed so that we can have proper\n responsive markup.\n \"\"\"\n\n class Meta:\n template = 'core/blocks/embed.html'\n\n def get_context(self, value, parent_context={}):\n context = super().get_context(value, parent_context=parent_context)\n embed_url = getattr(value, 'url', None)\n if embed_url:\n embed = embeds.get_embed(embed_url)\n context['embed_html'] = embed.html\n context['embed_url'] = embed_url\n context['ratio'] = embed.ratio\n\n return context\n\n\nclass CaptionedEmbedBlock(blocks.StructBlock):\n\n \"\"\"Overriding the built in Wagtail embed so that we can have proper\n responsive markup.\n \"\"\"\n\n class Meta:\n template = 'core/blocks/captioned_embed.html'\n\n embed = EmbedBlock()\n title = blocks.CharBlock(required=False)\n sub_title = blocks.CharBlock(required=False)\n\n\nclass LinkStructBlockMixin(object):\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context=parent_context)\n\n context['value']['url'] = self.get_url(value['link'])\n return context\n\n def get_url(self, value):\n\n if value['link_page']:\n return value['link_page'].url\n\n elif value['link_document']:\n return value['link_document'].file\n\n elif value['link_external']:\n return value['link_external']\n\n return None\n\n\nclass LinkFields(blocks.StructBlock):\n link_page = blocks.PageChooserBlock(required=False, label=\"Page\")\n link_document = DocumentChooserBlock(required=False, label=\"Document\")\n link_external = blocks.URLBlock(required=False, label=\"URL\")\n\n\nclass LinkBlock(blocks.StructBlock, LinkStructBlockMixin):\n label = blocks.CharBlock(required=False)\n link = LinkFields(required=False, label=\"Link to (choose one)\")\n\n\nclass NHSXExpanderBody(ExpanderBlock.BodyStreamBlock):\n table = TableBlock()\n\n\nclass NHSXExpanderBlock(ExpanderBlock):\n body = NHSXExpanderBody(required=True)\n\n\nblog_link_blocks = [\n ('link', blocks.PageChooserBlock(required=True, label=\"Page\", page_type=\"blog_posts.BlogPost\")),\n]\n\n\nnews_link_blocks = [\n ('link', blocks.PageChooserBlock(required=True, label=\"Page\", page_type=\"news.News\")),\n]\n\n\npage_link_blocks = [\n ('link', LinkBlock()),\n]\n\n\ncontent_blocks = [\n ('rich_text', blocks.RichTextBlock(group=\" Content\")),\n ('block_quote', blocks.BlockQuoteBlock(group=\" Content\")),\n ('embed', EmbedBlock(group=\" Content\")),\n ('captioned_embed', CaptionedEmbedBlock(group=\" Content\")),\n]\n\nnhs_blocks = [\n ('image', ImageBlock(group=\" NHS Components\")),\n ('panel', PanelBlock(group=\" NHS Components\")),\n ('promo', PromoBlock(group=\" NHS Components\")),\n ('expander', NHSXExpanderBlock(group=\" NHS Components\")),\n ('grey_panel', GreyPanelBlock(group=\" NHS Components\")),\n ('inset_text', InsetTextBlock(group=\" NHS Components\")),\n ('panel_list', PanelListBlock(group=\" NHS Components\")),\n ('promo_group', PromoGroupBlock(group=\" NHS Components\")),\n ('warning_callout', WarningCalloutBlock(group=\" NHS Components\")),\n ('table', TableBlock(group=\" NHS Components\")),\n ('panel_table', PanelTableBlock(group=\" NHS Components\")),\n ('action_link', ActionLinkBlock(group=\" NHS Components\")),\n]\n\nnhsx_blocks = content_blocks + nhs_blocks\n","sub_path":"app/modules/core/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"97350859","text":"import logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nfrom PyQt5 import QtWidgets, QtCore\nfrom ..mixins import ToolWindow\nfrom ....core.instrument.instrument import Instrument\nfrom ....core.services import FileSequence\nfrom .fsnselector_ui import Ui_Form\nfrom sastool.classes2 import Exposure\n\n\nclass FSNSelector(QtWidgets.QWidget, Ui_Form, ToolWindow):\n FSNSelected = QtCore.pyqtSignal(int, 'QString', Exposure)\n\n def __init__(self, *args, **kwargs):\n credo = kwargs.pop('credo')\n self.horizontal = kwargs.pop('horizontal', False)\n QtWidgets.QWidget.__init__(self, *args, **kwargs)\n self.setupToolWindow(credo)\n self._fsconnections = []\n self.setupUi(self)\n\n def setupUi(self, Form):\n Ui_Form.setupUi(self, Form)\n if self.horizontal:\n self.hlayout = QtWidgets.QHBoxLayout()\n self.hlayout.setContentsMargins(0, 0, 0, 0)\n self.hlayout.addWidget(self.label)\n self.hlayout.addWidget(self.prefixComboBox)\n self.hlayout.addWidget(self.label_2)\n self.hlayout.addWidget(self.FSNSpinBox)\n self.hlayout.addWidget(self.buttonContainer)\n self.hlayout.addSpacerItem(\n QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Minimum))\n del self.formLayout\n QtWidgets.QWidget().setLayout(self.layout()) # an ugly trick to get rid of the original layout.\n self.setLayout(self.hlayout)\n self.prefixComboBox.clear()\n assert isinstance(self.credo, Instrument)\n fs = self.credo.services['filesequence']\n assert isinstance(fs, FileSequence)\n self.prefixComboBox.addItems(sorted(fs.get_prefixes()))\n self.prefixComboBox.setCurrentIndex(0)\n self.prefixComboBox.currentIndexChanged.connect(self.onPrefixChanged)\n self._fsconnections = [fs.connect('lastfsn-changed', self.onLastFSNChanged)]\n self.FSNSpinBox.valueChanged.connect(self.onFSNSpinBoxValueChanged)\n self.gotoLastPushButton.clicked.connect(self.onGotoLast)\n self.gotoFirstPushButton.clicked.connect(self.onGotoFirst)\n self.reloadPushButton.clicked.connect(self.onReload)\n self.onPrefixChanged()\n\n def onGotoFirst(self):\n self.FSNSpinBox.setValue(self.FSNSpinBox.minimum())\n self.onReload()\n\n def onGotoLast(self):\n self.FSNSpinBox.setValue(self.FSNSpinBox.maximum())\n self.onReload()\n\n def onReload(self):\n self.onFSNSpinBoxValueChanged()\n\n def setFSN(self, fsn:int):\n if self.FSNSpinBox.value() != fsn:\n self.FSNSpinBox.setValue(fsn)\n self.onReload()\n\n def setPrefix(self, prefix:str):\n if self.prefixComboBox.currentText() != prefix:\n self.prefixComboBox.setCurrentIndex(self.prefixComboBox.findText(prefix))\n\n def onFSNSpinBoxValueChanged(self):\n fs = self.credo.services['filesequence']\n assert isinstance(fs, FileSequence)\n try:\n exposure = fs.load_exposure(self.prefixComboBox.currentText(), self.FSNSpinBox.value())\n self.FSNSelected.emit(self.FSNSpinBox.value(), self.prefixComboBox.currentText(), exposure)\n del exposure\n except FileNotFoundError as fnfe:\n QtWidgets.QMessageBox.critical(self.window(), 'Error while loading exposure', 'Cannot load exposure {} #{}: cannot find file {}'.format(self.prefixComboBox.currentText(), self.FSNSpinBox.value(), fnfe.filename),)\n\n def onLastFSNChanged(self, filesequence, prefix, lastfsn):\n try:\n if prefix != self.prefixComboBox.currentText():\n return False\n except RuntimeError:\n self.cleanup()\n self.close()\n self.FSNSpinBox.setMaximum(lastfsn)\n return False\n\n def onPrefixChanged(self):\n self.FSNSpinBox.setMinimum(0)\n self.FSNSpinBox.setMaximum(self.credo.services['filesequence'].get_lastfsn(self.prefixComboBox.currentText()))\n\n def cleanup(self):\n logger.debug('FSNselector cleanup called')\n for c in self._fsconnections:\n self.credo.services['filesequence'].disconnect(c)\n self._fsconnections = []\n super().cleanup()\n\n# class FSNSelectorHorizontal(FSNSelector):\n# horizontal = True\n\n\n# FSNSelectorVertical = FSNSelector\n","sub_path":"cct/qtgui/core/fsnselector/fsnselector.py","file_name":"fsnselector.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"509726882","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n27. 内部リンクの除去\n\n26の処理に加えて,テンプレートの値からMediaWikiの内部リンクマークアップを除去し,テキストに変換せよ(参考: マークアップ早見表).\n\n\"\"\"\n\nimport re\nimport fileinput\n\nif __name__ == '__main__':\n\n # p1 = re.compile(r\"\\[\\[([^\\[\\]]+)\\]\\]\")\n p1 = re.compile(r\".*?\\[\\[(.+)\\]\\]\")\n p2 = re.compile(r\".*?\\[\\[[^\\[\\]]+\\|(.+?)\\]\\]\")\n\n for line in fileinput.input(\"-\"):\n\n r = re.match(p1, line)\n if r:\n r1 = re.match(p2, r.group())\n if r1:\n print(r1.group(1))\n else:\n print(r.group(1))\n # r1 = p2.sub(\"\\1\", line.rstrip())\n # r = p1.sub(\"\\1\", r1)\n\n # print(line)\n\n\n\n # result = {}\n # for line in file.split(\"\\n\"):\n # m = re.match(\"(?:.*\\|)?(.+)\\s=\\s(.+)\", line)\n # tmp1 = \"\"\n # tmp2 = \"\"\n # if m:\n # m2 = re.match(r\"[^\\[\\]]*\", m.group(1))\n # if m2:\n # tmp1 = m2.group()\n #\n # print(m.group(2))\n # m2 = re.match(r\"(.[^\\[\\]'])*\", m.group(2))\n # if m2:\n # tmp2 = m2.group()\n # print(m2.group())\n #\n # result.update({tmp1: tmp2})\n #\n # for key, obj in result.items():\n # print(\"{0}, {1}\".format(key, obj))\n\n\n\n","sub_path":"02/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"218598094","text":"from chatterbot import ChatBot\nfrom gtts import gTTS as gt\nimport os\nfrom googletrans import Translator\nimport speech_recognition as sr\nfrom polyglot.transliteration import Transliterator\nfrom chatterbot.trainers import ListTrainer\nfrom pocketsphinx import LiveSpeech\n\ntr = Translator()\nr = sr.Recognizer()\nr.energy_threshold = 270\ndef speak(audioString, langOp):\n\tprint(\"Speak: \", audioString)\n\ttts = gt(text=audioString, lang=langOp)\n\ttts.save(\"audio_files/audio.mp3\")\n\tos.system(\"mpg321 audio_files/audio.mp3 -q\")\n\ndef botTrainer(name):\n\tbot = ChatBot(name)\n\ttrainers = ListTrainer(bot)\n\n\tfor files in os.listdir(\"chatterbot_corpus/data/hindi\"):\n\t\tdata = open(\"chatterbot_corpus/data/hindi/\" + files, 'r').readlines()\n\t\ttrainers.train(data)\n\treturn bot\n\ndef recordAudio():\n\twith sr.Microphone() as source:\n\t\tr.adjust_for_ambient_noise(source, duration=2)\n\t\t#print(\"main suun rahi hoon!\")\n\n\t\tdata = \"\"\n\t\twhile 1:\n\t\t\ttry:\n\t\t\t\tspeak(translateLang(\"I am listening\", \"hi\"), \"hi\")\n\t\t\t\taudio = r.listen(source)\n\t\t\t\tdata = r.recognize_google(audio)\n\t\t\t\tprint(\"You said: \" + data)\n\t\t\t\treturn data\n\t\t\texcept sr.UnknownValueError:\n\t\t\t\tspeak(\"Main apko samjhi nahi\", \"hi\")\n\t\t\t\tprint(\"Google Speech Recognition could not understand audio\")\n\ndef translateLang(audioString, toLang):\n\tif(toLang == \"hi\"):\n\t\t#print(\"\")\n\t\tdata = tr.translate(audioString, dest='hi').text\n\t\tprint(\"tranlateLang: \",data)\n\t\treturn data\t\n\n\telif(toLang == \"en\"):\n\t\t#print(\"\")\n\t\tdata = tr.translate(audioString, dest='en').text\n\t\tprint(\"tranlateLang: \",data)\n\t\treturn data\t\n\n\telse:\n\t\tprint(\"Ambiguious Language Translation Required, Exiting!\")\n\t\texit()\n\ndef transliterationLang(textString):\n\ttrl = Transliterator(source_lang=\"en\", target_lang=\"hi\")\n\tdata = trl.transliterate(textString)\n\tprint(\"transliterationLang: \",data)\n\treturn data\n'''\nname = input(\"What will be the name for your bot? \")\nmyBot = botTrainer(name)\ninput1 = translateLang(\"aap kaise hai\", \"hi\")\nprint(\"INPUT1: \", input1)\nreply = myBot.get_response(input1).text\nprint(\"REPLY: \", reply)\nspeak(translateLang(reply, \"hi\"),\"hi\")\n'''\n#speak(translateLang(\"namaskar\", \"en\"), \"en-us\")\nname = input(\"What will be the name for your bot? \")\nmyBot = botTrainer(name)\nwhile 1:\n\taudioString = recordAudio()\n\tinput1 = translateLang(audioString, \"hi\")\n\treply = myBot.get_response(input1).text\n\tspeak(reply, \"hi\")\n\n","sub_path":"HindiChatterBot.py","file_name":"HindiChatterBot.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"45423957","text":"from redis_db import pool\nimport redis\n\nr=redis.Redis(\n connection_pool=pool\n)\ntry:\n r.sadd(\"employee\",8001,8002,8003)\n r.srem(\"employee\",8001)\n result=r.smembers(\"employee\")\n for one in result:\n print(one.decode(\"utf-8\")) #utf-8需要\" \"括起来\n r.zadd(\"keyword\",{\"kiki\":0,\"lisa\":0,\"apple\":0}) #zset这里和redis指令有不同!!\n r.zincrby(\"keyword\",\"10\",\"kiki\")\n result=r.zrevrange(\"keyword\",0,-1)\n for one in result:\n print(one.decode(\"utf-8\")) #utf-8需要\" \"括起来\nexcept Exception as e:\n print(e)\nfinally:\n del r\n","sub_path":"use_redis/example_set&zset.py","file_name":"example_set&zset.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"388255589","text":"# implementation of card game - Memory\n\nimport simpleguitk as simplegui, random\nfrom math import floor as floor\n\nexposed = []\ncards = [] \n# helper function to initialize globals\ndef new_game():\n global cardNumbers, exposed, state, cards\n # create shuffled list of 2×8 numbers\n state = 0\n l1, l2 = [], []\n for i in range(8):\n l1.append(i)\n l2.append(i)\n cardNumbers = l1+l2 # concenate the 2 lists\n random.shuffle(cardNumbers) # shuffle concentated list\n \n # clear exposed cards\n exposed = []\n\n x1,y1,x2,y2 = 0,0,50,100\n for I in cardNumbers:\n cards.append([[x1, 0], [x2, 0], [x2,100], [x1,100], [x1,0], 2, 'silver', 'green',I])\n x1 += 50\n x2 += 50\n\n \n\ndef mouseclick(pos):\n global state, exposed, card\n click1, click2 = 0,0\n mpos = floor(pos[0] / 50)\n # if click, run the following\n \n for i in range(len(cardNumbers)):\n if mpos == i:\n print('Mouse:', mpos, 'int:', i, 'cardnum:', cardNumbers[i])\n cardNum = cardNumbers[i]\n \n print(card)\n\n # for card in cards: \n # # print(pos[0]/50,math.floor(pos[0]/50))\n\n # if math.floor(pos[0] / 50) == card[6]:\n # print('card',card[6] ,'found')\n # print(card[5])\n # card[5] = ''\n\n if state == 0:\n state = 1\n print('state', state)\n exposed.append(cardNum)\n elif state == 1:\n state = 2\n print('state', state)\n exposed.append(cardNum)\n if exposed[0] == exposed[1]:\n print('\\n\\nScore')\n else:\n state = 1\n print('state', state)\n exposed = []\n exposed.append(cardNum)\n\n print('exposed', exposed)\n\n\ndef draw(canvas):\n global card, Card\n num_x = 15\n x1,y1,x2,y2 = 0,0,50,100\n for num in cardNumbers:\n canvas.draw_text(num, [num_x, 70], 24, \"White\")\n num_x += 50\n\n for card in cards:\n # cards.append([[x1, 0], [x2, 0], [x2,100], [x1,100], [x1,0], 2, 'silver', 'green', i])\n # 0 1 2 3 4 5 6 7 8\n\n canvas.draw_polygon([card[0], card[1], card[2], card[3], card[4]], card[5], card[6], card[7])\n\n if card in exposed:\n card[7] = ''\n\n\n # canvas.draw_polygon([[x1, 0], [x2, 0], [x2,100], [x1,100], [x1,0]], 2, 'silver', 'green')\n # x1 += 50\n # x2 += 50\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", 800, 100)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()","sub_path":"Applications/Memory/Resources/Memory v2.0.py","file_name":"Memory v2.0.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"26229588","text":"# 作用:模拟客户端,向server_host:server_port建立连接并请求推荐列表\n# 启动参数:两个\n# - server_host\n# - server_port\nimport socket\nimport sys\nimport pickle\n\ndef getArgs():\n argv = sys.argv[1:]\n return argv\n\ndef init_socket(host,port):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((host,port))\n return client_socket\n\ndef send_data(cs, datas):\n byteStream = pickle.dumps(datas)\n length = len(byteStream)\n byteStream = bytes(f\"{length:<16}\", 'utf-8')+byteStream\n cs.sendall(byteStream)\n\ndef recv_data(cs):\n msg = cs.recv(1024)\n length = int(msg[:16])\n full_msg = b''\n full_msg += msg[16:]\n nowsize = len(full_msg)\n while nowsize < length:\n more = cs.recv(length - nowsize)\n full_msg = full_msg + more\n nowsize += len(more)\n return pickle.loads(full_msg)\n\nif __name__==\"__main__\":\n argv = getArgs()\n client_socket = init_socket(argv[0],int(argv[1]))\n while True:\n data = input().strip()\n send_data(client_socket, data)\n response = recv_data(client_socket)\n print(f\"Recommend_List:[userid={data}]\")\n for i, moview_name in enumerate(response):\n print(f\"[Movie-{i+1:<6}: {moview_name}]\")\n \n\n","sub_path":"code/server-client/recommend_client.py","file_name":"recommend_client.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"539954909","text":"from django.http import JsonResponse\nfrom django.http import HttpResponseNotAllowed\nfrom .models import User\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q\nfrom rest_framework import viewsets\nfrom .serializers import UserSerializer\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import cache_page\n\n@login_required\ndef profile(request, prof_id):\n if request.method == \"GET\":\n profile = User.objects.values('id', 'username', 'nick')\n profile = get_object_or_404(profile, id=prof_id)\n return JsonResponse({'profile': profile})\n return HttpResponseNotAllowed(['GET'])\n\n@cache_page(60)\n@login_required\ndef contacts(request):\n if request.method == \"GET\":\n return JsonResponse({'contacts': 'test'})\n return HttpResponseNotAllowed(['GET'])\n\n@login_required\ndef search_profile(request, nick):\n if request.method == \"GET\":\n users = User.objects.filter(\n Q(nick__icontains=nick)|\n Q(last_name__icontains=nick)|\n Q(first_name__icontains=nick)\n ).values('nick')\n return JsonResponse({'users': list(users)})\n return HttpResponseNotAllowed(['GET'])\n\nclass UserViewSet(viewsets.ModelViewSet):\n\n serializer_class = UserSerializer\n queryset = User.objects.all()\n\n @action(detail=True, methods=['GET'])\n def profile(self, request, pk):\n users = self.get_queryset()\n profile = get_object_or_404(users, id=pk)\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(profile, many=False)\n return Response({'profile': serializer.data})\n\n @cache_page(60)\n @action(detail=False, methods=['GET'])\n def contacts(self, request):\n users = self.get_queryset()\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(users, many=True)\n return Response({'contacts': serializer.data})\n\n @action(detail=True, methods=['GET'])\n def search_profile(self, request, pk):\n users = self.get_queryset()\n profiles = User.objects.filter(\n Q(nick__icontains=pk)|\n Q(last_name__icontains=pk)|\n Q(first_name__icontains=pk)\n )\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(profiles, many=True)\n return Response({'profiles': serializer.data})\n","sub_path":"messenger/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"124241294","text":"import discord\nimport platform\nfrom discord.ext import commands\nfrom configs.config import bot_discord_token\n\n# Load all of the extensions here.\nstartupExtensions = [\n 'gears.Greetings'\n]\nbot = commands.Bot(command_prefix='!', description=\"main bot commands.\")\nbot.load_extension('gears.Greetings')\n\n\n# Main commands go here.\n@bot.event\nasync def on_ready():\n python_version = \"Python \" + platform.python_version()\n activity = discord.Activity(name=python_version, type=discord.ActivityType.playing)\n print('Successfully authenticated. Username: {}, User ID: {}'.format(bot.user.name, bot.user.id))\n print('--------------------------------------------')\n await bot.change_presence(status=discord.Status.online, activity=activity)\n\nbot.run(bot_discord_token)\n","sub_path":"mainDriver.py","file_name":"mainDriver.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"487871274","text":"def check(seen):\r\n for i in seen:\r\n if i == 0:\r\n return False\r\n return True\r\n\r\nf = open('A-large.in', 'r')\r\nfo = open('CSlarge.out', 'w')\r\nt = int(f.readline())\r\nfor i in range(t):\r\n seen = listofzeros = [0] * 10\r\n n = int(f.readline())\r\n j=1\r\n if n != 0:\r\n while(not check(seen)):\r\n k = n*j\r\n for l in str(k):\r\n seen[int(l)] = 1\r\n j+=1\r\n fo.write(\"Case #%d: %d\\n\"%((i+1), n*(j-1)))\r\n else:\r\n fo.write(\"Case #%d: INSOMNIA\\n\"%((i+1)))\r\nf.close()\r\nfo.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_1/JLeow00/CountingSheep.py","file_name":"CountingSheep.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157506544","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nfrom .form import SaveForm\nfrom .models import Produto\n\ndef lista(request):\n produtos_lista = Produto.objects.all()\n paginacao = Paginator(produtos_lista, 7) \n\n # Mostra 25 contatos por página\n # Make sure page request is an int. If not, deliver first page.\n # Esteja certo de que o `page request` é um inteiro. Se não, mostre a primeira página.\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n # Se o page request (9999) está fora da lista, mostre a última página.\n try:\n produtos = paginacao.page(page)\n except (EmptyPage, InvalidPage):\n produtos = paginacao.page(paginacao.num_pages)\n return render(request,\"produto/list.html\",{'produtos':produtos})\n\ndef form(request):\n form = SaveForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n p = Produto()\n p.descricao = request.POST.get('descricao')\n p.valor = request.POST.get('valor')\n p.save()\n return render(request, \"produto/form.html\", {'form':form})\n\n\ndef save(request):\n form = SaveForm(request.POST)\n if form.is_valid():\n form.save()\n return render(request, \"produto/list.html\" ,{'msg': 'Produto Salvo'}) \n else:\n return render(request, \"produto/list.html\" ,{'msg': 'Formulário Invalido'})\n \n\n\n","sub_path":"produto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"449669183","text":"import pathlib\nimport collections\n\npath = '.'\n\ndef getFirstLine(f):\n ''' Get the first line from file, for CSV it is header line ''' \n fileHandler = open (f, \"r\")\n line = fileHandler.readline()\n fileHandler.close()\n return line\n\n\n######## Main #########\n\nCT = collections.Counter()\n\nfor tsv_file in pathlib.Path(path+'/tsv').glob('*.tsv'):\n header = getFirstLine(tsv_file)\n columns = header.split('\\t')\n print('{0} columns in {1}'.format(len (columns),tsv_file) )\n for c in columns: \n CT[c.rstrip('\\n')] += 1\n \nL = list (CT)\n \nprint ('Total columns {0}'.format (len (L)))\nprint (L)\n\n# save header to file \nfileHandler = open (path + '/header.txt', \"w\")\nline = fileHandler.write('\\n'.join(L))\nfileHandler.close()","sub_path":"header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"78121795","text":"\nimport os\nimport pygame\nimport sys\nimport pyglet.clock\nfrom nave import NaveJugador\nfrom nodriza import nodriza\nfrom backgroung import Background\nfrom font import Create_text\n\npygame.init()\n\nSCREEN_ANCHO = 800\nSCREEN_ALTO = 600\nSCREEN = pygame.display.set_mode((SCREEN_ANCHO, SCREEN_ALTO))\nNEGRO = (0, 0, 0)\nBLANCO = (255, 255, 255)\nFPS = 60\nPOSICION = [350, 420]\n\ndef main():\n global life\n life = 3\n BackGround = Background('background.jpg', [0, 0])\n font = Create_text()\n juego = True\n Nave = NaveJugador(POSICION [0], POSICION[1])\n Nodriza = nodriza(12)\n os.environ['SDL_VIDEO_WINDOW_POS'] = (str(POSICION[0]) + \",\" + str(POSICION[1]))\n cambio = 0\n clock = pyglet.clock.Clock()\n clock.set_fps_limit(FPS)\n\n\n while juego:\n\n\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT:\n juego = False\n\n elif evento.type == pygame.KEYDOWN:\n\n if evento.key == pygame.K_SPACE:\n Nave.disparar()\n\n if evento.key == pygame.K_LEFT:\n cambio -= 1\n\n if evento.key == pygame.K_RIGHT:\n cambio += 1\n\n elif evento.type == pygame.KEYUP:\n if evento.key == pygame.K_LEFT:\n cambio = 0\n\n elif evento.key == pygame.K_RIGHT:\n cambio = 0\n\n dt = clock.tick\n\n\n font.titleText.draw(SCREEN)\n Nave.mover(cambio, dt)\n listaAliens = Nodriza.getLista()\n listaNave = [Nave]\n Nave.canon.mover_disparo(SCREEN, dt, listaAliens, 'nave')\n\n for alien in listaAliens:\n alien.canon.mover_disparo(SCREEN, dt, listaNave, 'alien')\n\n\n Nave.pintar(SCREEN)\n\n Nodriza.mover_aliens(SCREEN)\n\n if Nave.vivo == False:\n life -= 1\n\n font.gameover.draw(SCREEN)\n juego = False\n\n pygame.display.update()\n SCREEN.blit(BackGround.image, BackGround.rect)\n\n\n\n\n\n\nmain()\npygame.quit()\nsys.exit()\n","sub_path":"PyNave.py","file_name":"PyNave.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"324616179","text":"import numpy as np\nimport tensorflow as tf\nfrom a_nice_mc.objectives import Energy\nfrom a_nice_mc.utils.evaluation import effective_sample_size, acceptance_rate\nfrom a_nice_mc.utils.logger import save_ess, create_logger, ensure_directory\n\n\n\n\nclass NN:\n def __init__(self, data, labels, arch, act=tf.nn.tanh, batch_size=None,\n loc=0.0, prec=1.0):\n \"\"\"\n Bayesian Neural Net model (assume Normal prior)\n :param data: data for Logistic Regression task\n :param labels: label for Logistic Regression task\n :param batch_size: batch size for Logistic Regression; setting it to None\n adds flexibility at the cost of speed.\n :param loc: mean of the Normal prior\n :param scale: std of the Normal prior\n \"\"\"\n self.arch = arch\n self.theta_dim = np.sum([arch[i] * arch[i + 1] for i in range(len(arch) - 1)])\n self.act = act\n self.x_dim = data.shape[1]\n self.y_dim = labels.shape[1]\n self.prec_prior = prec\n\n self.data = tf.constant(data, tf.float32)\n self.labels = tf.constant(labels, tf.float32)\n ensure_directory('logs/net.log')\n self.logger = create_logger(__name__, log_dir=\"logs/net/\", file_name='net.log')\n\n def _unflatten(self, theta):\n \"\"\"theta is assumed to have shape (num_chains, target_dim)\"\"\"\n m = tf.shape(theta)[0] # num chains\n weights = []\n start = 0\n for i in range(len(self.arch) - 1):\n size = self.arch[i] * self.arch[i + 1]\n w = tf.reshape(theta[:, start:start + size],\n (m, self.arch[i], self.arch[i + 1]))\n weights.append(w)\n start += size\n return weights\n\n def energy_fn(self, theta, x, y):\n \"\"\" theta has shape (num_chains, target_dim)\"\"\"\n h = tf.expand_dims(x, 0)\n h = tf.concat([h, tf.ones((1, h.shape[1], 1))], axis=2)\n h = tf.tile(h, [tf.shape(theta)[0], 1, 1])\n weights = self._unflatten(theta)\n for W in weights[:-1]:\n h = self.act(h @ W)\n mean = h @ weights[-1]\n mahalob = 0.5 * tf.reduce_sum((y - mean) ** 2, axis=2)\n prior = 0.5 * tf.reduce_sum(theta ** 2, axis=1, keepdims=True)\n\n return tf.reduce_sum(mahalob + self.prec_prior * prior, axis=1)\n\n def __call__(self, v):\n return self.energy_fn(v, self.data, self.labels)\n\n","sub_path":"utils/neural_network_regression/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"345807283","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n for line in fh.readlines():\n if \"Version\" in line:\n version = line.split(\":\")[1].strip().rstrip('\\n')\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nrequirements = [\n 'requests',\n 'urllib3',\n 'cython',\n 'pyyaml',\n 'boto3',\n 'pymysql',\n 'docker',\n 'GPUtil',\n 'psycopg2-binary'\n]\n\nprint(\"Build: ddlworker\")\nprint(\". Version: {}\".format(version))\nprint(\". Requirements: {}\".format(requirements))\n\nsetuptools.setup(\n name=\"ddlworker\",\n version=version,\n author=\"YL & SW\",\n author_email = 'nedlitex0053@gmail.com',\n description=\"cli for distributed deep learning worker\",\n url=\"https://githublu.github.io/DeepLearningCluster/\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(exclude=['*.test', '*.test.*', 'test.*', 'test', 'data_container', ]),\n entry_points={\n 'console_scripts': [\n 'ddlworker=main.worker_main:main',\n ],\n },\n install_requires=requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n)","sub_path":"pypi_install_script/ddlworker-1.2.9.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"438040385","text":"import matplotlib.pyplot as plt\r\n\r\ninput_values = list(range(-1001,1001))\r\nsquares = [x**3 for x in input_values]\r\nplt.scatter(input_values,squares,c=squares,cmap=plt.cm.Blues, edgecolor='none',s=2)\r\n#plt.plot(input_values,squares, linewidth=5)\r\nplt.title(\"Square Numbers\", fontsize=24)\r\nplt.xlabel(\"Value\", fontsize=14)\r\nplt.ylabel(\"Square of Value\", fontsize=14)\r\nplt.tick_params(axis='both',labelsize=14)\r\n\r\nplt.axis([-1001,1001,-1100000000,1100000000])\r\n\r\nplt.show()\r\n","sub_path":"mpl_squares.py","file_name":"mpl_squares.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"170031702","text":"#! /usr/bin/env python\n\n# Opjective: transfer a file via DCC\n\n\nimport shutil\nimport hashlib\nimport time\nimport threading\nimport random\nimport string\nimport logging\nimport tempfile\nimport os\nimport struct\nimport sys\nimport subprocess\nimport shlex\nimport irc.client\n\n\nVALID_NICK_CHARS = string.ascii_lowercase + string.ascii_uppercase\nSERVER = \"irc.quakenet.org\"\nPORT = 6667\nSENDER_NICK = ''.join(random.choice(VALID_NICK_CHARS) for _ in range(12))\nRECEIVER_NICK = ''.join(random.choice(VALID_NICK_CHARS) for _ in range(12))\nIN_FILEPATH = tempfile.mktemp()\nIN_FILENAME = os.path.split(IN_FILEPATH)[1]\nOUT_DIR = tempfile.mkdtemp()\nOUT_FILEPATH = os.path.join(OUT_DIR, IN_FILENAME)\nLOG_FORMAT = \"%(levelname)s %(name)s %(lineno)d: %(message)s\"\nlogging.basicConfig(format=LOG_FORMAT, level=logging.INFO)\nlogger = logging.getLogger(\"dcctransfer\")\n\n\ndef cleanup():\n with open(IN_FILEPATH, 'rb') as f:\n input_sha256 = hashlib.sha256(f.read()).hexdigest()\n with open(OUT_FILEPATH, 'rb') as f:\n output_sha256 = hashlib.sha256(f.read()).hexdigest()\n assert(input_sha256 == output_sha256)\n logger.info(\"Data transfered successfully!\")\n os.remove(IN_FILEPATH)\n shutil.rmtree(OUT_DIR)\n\n\nclass DCCSend(irc.client.SimpleIRCClient):\n\n def __init__(self, receiver, filename):\n irc.client.SimpleIRCClient.__init__(self)\n self.receiver = receiver\n self.filename = filename\n self.filesize = os.path.getsize(self.filename)\n self.file = open(filename, 'rb')\n self.sent_bytes = 0\n\n def on_welcome(self, connection, event):\n self.dcc = self.dcc_listen(\"raw\")\n msg_parts = map(str, (\n 'SEND',\n os.path.basename(self.filename),\n irc.client.ip_quad_to_numstr(self.dcc.localaddress),\n self.dcc.localport,\n self.filesize,\n ))\n msg = subprocess.list2cmdline(msg_parts)\n self.connection.ctcp(\"DCC\", self.receiver, msg)\n\n def on_dcc_connect(self, connection, event):\n if self.filesize == 0:\n self.dcc.disconnect()\n return\n self.send_chunk()\n\n def on_dcc_disconnect(self, connection, event):\n print(\"Sent file %s (%d bytes).\" % (self.filename, self.filesize))\n self.connection.quit()\n\n def on_dccmsg(self, connection, event):\n acked = struct.unpack(\"!I\", event.arguments[0])[0]\n if acked == self.filesize:\n self.dcc.disconnect()\n self.connection.quit()\n elif acked == self.sent_bytes:\n self.send_chunk()\n\n def on_disconnect(self, connection, event):\n sys.exit(0)\n\n def on_nosuchnick(self, connection, event):\n print(\"No such nickname:\", event.arguments[0])\n self.connection.quit()\n\n def send_chunk(self):\n data = self.file.read(1024)\n self.dcc.send_bytes(data)\n self.sent_bytes += len(data)\n\n\nclass DCCReceive(irc.client.SimpleIRCClient):\n\n def __init__(self):\n irc.client.SimpleIRCClient.__init__(self)\n self.received_bytes = 0\n\n def on_ctcp(self, connection, event):\n payload = event.arguments[1]\n parts = shlex.split(payload)\n command, filename, peer_address, peer_port, size = parts\n assert(filename == IN_FILENAME)\n if command != \"SEND\":\n return\n self.filename = OUT_FILEPATH\n if os.path.exists(self.filename):\n print(\"A file named\", self.filename,\n \"already exists. Refusing to save it.\")\n self.connection.quit()\n return\n self.file = open(self.filename, \"wb\")\n peer_address = irc.client.ip_numstr_to_quad(peer_address)\n peer_port = int(peer_port)\n self.dcc = self.dcc_connect(peer_address, peer_port, \"raw\")\n\n def on_dccmsg(self, connection, event):\n data = event.arguments[0]\n self.file.write(data)\n self.received_bytes += len(data)\n self.dcc.send_bytes(struct.pack(\"!I\", self.received_bytes))\n\n def on_dcc_disconnect(self, connection, event):\n self.file.close()\n print(\"Received file %s (%d bytes).\" % (self.filename,\n self.received_bytes))\n self.connection.quit()\n cleanup()\n\n def on_disconnect(self, connection, event):\n sys.exit(0)\n\n\ndef main():\n\n logger.info(\"TEST SETUP\")\n logger.info(\"Server: \" + SERVER)\n logger.info(\"Port: \" + str(PORT))\n logger.info(\"Sender nick: \" + SENDER_NICK)\n logger.info(\"Receiver nick: \" + RECEIVER_NICK)\n logger.info(\"Input file: \" + IN_FILEPATH)\n logger.info(\"Output dir: \" + OUT_DIR)\n\n # create random file\n with open(IN_FILEPATH, 'wb') as fout:\n fout.write(os.urandom(1024)) # 1K random data\n\n # start receiver\n def receiver_main():\n logger.info(\"Starting receiver\")\n client_receiver = DCCReceive()\n try:\n client_receiver.connect(SERVER, PORT, RECEIVER_NICK)\n except irc.client.ServerConnectionError as x:\n print(x)\n sys.exit(1)\n client_receiver.start()\n\n # start sender\n def sender_main():\n logger.info(\"Starting sender\")\n client_sender = DCCSend(RECEIVER_NICK, IN_FILEPATH)\n try:\n client_sender.connect(SERVER, PORT, SENDER_NICK)\n except irc.client.ServerConnectionError as x:\n print(x)\n sys.exit(1)\n client_sender.start()\n\n # start threads\n receiver_thread = threading.Thread(target=receiver_main)\n receiver_thread.start()\n time.sleep(10)\n sender_thread = threading.Thread(target=sender_main)\n sender_thread.start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"experiments/dcctransfer.py","file_name":"dcctransfer.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"519286589","text":"# Ejercicio Examen 3\n#Tabla de multiplicar\n\"\"\"\nNombre: Mario Enrique Quijano Torres\t\nMateria: Programacion visual\n\"\"\"\nnum = int(input(\"¿Que numero deseas conocer su tabla de multiplicar? (Solo enteros): \"))\nnumv = int(input(\"¿Hasta que numero quieres conocer? (Solo enteros): \"))\nnumv2 = numv +1\nfor numv in range(0, numv2):\n\tprint(num, \"x \", numv, \" = \", num*numv)","sub_path":"Examen1parcial/Tabla.py","file_name":"Tabla.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"312253185","text":"# Embeder by tasure\r\ntitle = r\"\"\"\r\n ______ _ _ \r\n | ____| | | | | \r\n | |__ _ __ ___ | |__ ___ __| | ___ _ __ \r\n | __| | '_ ` _ \\| '_ \\ / _ \\/ _` |/ _ \\ '__|\r\n | |____| | | | | | |_) | __/ (_| | __/ | \r\n |______|_| |_| |_|_.__/ \\___|\\__,_|\\___|_| \r\n V 1.0 by tasuren#5161\r\n\r\n\"\"\"\r\n\r\nprint(title)\r\n\r\n\r\nfrom json import load,dump,dumps\r\nfrom discord import Embed\r\nfrom requests import post\r\n\r\n\r\n# お手軽Embed\r\ndef easy_embed(content,color=Embed.Empty):\r\n\tes = \">>\"\r\n\tspl = content.splitlines()\r\n\ttitle = spl[0][len(es):]\r\n\tdesc,fields = [],{}\r\n\tfooter = None if not ';;' in spl[-1] else spl[-1][2:]\r\n\tif footer: spl.pop(-1)\r\n\tspl.pop(0)\r\n\tf = None\r\n\tfor c in spl:\r\n\t\tif c == \"\":continue\r\n\t\tif c[0] == '<':\r\n\t\t\tf = c[1:] if '!'!=c[1] else c[2:]\r\n\t\t\tfields[f] = {'i':True if '!'!=c[1] else False,'c':[]}\r\n\t\t\tcontinue\r\n\t\tif f:\r\n\t\t\tfields[f]['c'].append(c)\r\n\t\t\tcontinue\r\n\t\tdesc.append(c)\r\n\te = Embed(\r\n\t\ttitle=title,\r\n\t\tdescription='\\n'.join(desc),\r\n\t\tcolor=color\r\n\t)\r\n\tfor f in fields.keys():\r\n\t\te.add_field(\r\n\t\t\tname=f,\r\n\t\t\tvalue='\\n'.join(fields[f]['c']),\r\n\t\t\tinline=fields[f]['i']\r\n\t\t)\r\n\tif footer: e.set_footer(text=footer)\r\n\treturn e.to_dict()\r\n\r\ndef write(data):\r\n\twith open(\"data.json\",\"w\") as f:\r\n\t\tdump(data,f,indent=2)\r\n\r\n\r\n# Data load\r\ntry:\r\n\twith open(\"data.json\",\"r\") as f:\r\n\t\tdata = load(f)\r\nexcept:\r\n\twith open(\"data.json\",\"w\") as f:\r\n\t\tf.write('{\\n\t\"first\": false,\\n\t\"tokens\": []\\n}')\r\n\r\nhelp_text = \"\"\"# Embeder HELP\r\n普通ユーザーからEmbedを好きなチャンネルに送信することができます。\r\n**使用は自己責任です!**\r\n\r\nhelp\r\n コマンドリストを表示\r\ntoken set \r\n TOKENリストにTOKENを追加します。\r\ntoken del \r\n TOKENリストからTOKENを削除します。\r\ntokens\r\n TOKENリストを表示します。\r\nsend \r\n Embedを送信します。\r\n に`set`で設定したアカウントのTOKENの登録名を入れてください。\r\nExit\r\n 終了します。\r\n\"\"\"\r\nad = [\"set\",\"del\"]\r\n\r\nget_headers = lambda token:{\r\n 'Content-Type': 'application/json',\r\n 'authorization': token,\r\n}\r\n\r\nprint(\"Embederへようこそ。helpでコマンドリストを確認できます。\\n注意:Discordの利用規約違反なので使用は自己責任です。\")\r\n\r\n\r\nwhile True:\r\n\tcmd = input(\">>>\")\r\n\r\n\tif cmd == \"\":\r\n\t\tcontinue\r\n\telse:\r\n\t\tcmd = cmd.split()\r\n\r\n\t# HELP\r\n\tif cmd[0] == \"help\":\r\n\t\tprint(help_text)\r\n\t# TOKENリスト管理\r\n\tif cmd[0] == \"token\" and len(cmd) > 2:\r\n\t\tif not cmd[1] in ad:continue\r\n\r\n\t\tif cmd[1] == \"set\" and len(cmd) > 3:\r\n\t\t\tdata[\"tokens\"][cmd[2]] = cmd[3]\r\n\t\t\twrite(data)\r\n\t\tif cmd[1] == \"del\":\r\n\t\t\tif not cmd[2] in data[\"tokens\"]:\r\n\t\t\t\tprint(\"その名前でTOKENは保存されていません。\")\r\n\t\t\t\tcontinue\r\n\t\t\tdel data[\"tokens\"][cmd[2]]\r\n\t\t\twrite(data)\r\n\t# TOKENリスト表示\r\n\tif cmd[0] == \"tokens\" and len(data[\"tokens\"]) != 0:\r\n\t\tfor d in data[\"tokens\"].keys():print(f'{d}\\n {data[\"tokens\"][d]}')\r\n\t# 送信\r\n\tif cmd[0] == \"send\" and len(cmd) > 1:\r\n\t\tif not cmd[1] in data[\"tokens\"]:\r\n\t\t\tprint(\"その名前でTOKENは保存されていません。\")\r\n\t\t\tcontinue\r\n\r\n\t\tchannel = input(\"送信先チャンネルIDを入力:\")\r\n\t\ttitle = input(\"タイトルを入力:\")\r\n\t\tdescription = input(\"説明を入力:\")\r\n\t\tprint(\"他に登録したい場合はreadmeの例にならって書いてください。\\n終わったら!end!と入力してください。\")\r\n\t\tetc = \"\"\r\n\t\twhile not \"!end!\" in etc:etc += \"\\n\"+input(\":\")\r\n\t\tif \"!end!\" in etc:etc = etc.replace(\"!end!\",\"\")\r\n\t\tprint(\"\")\r\n\r\n\t\tprint(\"作成中...\")\r\n\t\tsend_data = {\r\n\t\t\t\"content\": None,\r\n\t\t\t\"tts\": False,\r\n\t\t\t\"embed\": easy_embed(f\">>{title}\\n{description}{etc}\")\r\n\t\t}\r\n\r\n\t\tprint(\"送信中...\")\r\n\t\tresponse = post(\r\n\t\t\tf'https://discord.com/api/v6/channels/{channel}/messages',\r\n\t\t\theaders=get_headers(data[\"tokens\"][cmd[1]]),\r\n\t\t\tdata=dumps(send_data)\r\n\t\t)\r\n\t\tst = response.status_code\r\n\t\tif st in [200,201,204,304]:print(\"送信に成功しました。\")\r\n\t\telif st == 400:print(\"送信に失敗しました。\\n サーバーまたは、送信したデータにエラーがあります。\")\r\n\t\telif st == 401:print(\"送信に失敗しました。\\n TOKENがあっているか確認をしてください。\")\r\n\t\telif st == 403:print(\"送信に失敗しました。\\n TOKENがあっているか確認をしてください。\\n またチャンネルIDがあっているか確認してください。\")\r\n\t\telif st == 404:print(\"送信に失敗しました。\\n チャンネルIDがあっているか確認してください。\")\r\n\t\telif st == 429:print(\"送信に失敗しました。\\n 送信のしすぎで制限を受けています。\")\r\n\t\telse:print(f\"なんらかの理由で送信に失敗しました。\\n エラーコード:{st}\")\r\n\t# 終了\r\n\tif cmd[0] == \"exit\":break","sub_path":"embeder.py","file_name":"embeder.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"77109883","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nfrom functions import sigmoid, softmax, cross_entropy_error\n\n\n# In[2]:\n\n\nclass TwoLayerNN:\n \"\"\" a neural network with one hidden layer \"\"\"\n def __init__(self, X, Y, hidden_size):\n # parameters\n self.X = X\n self.Y = Y\n self.data_num = X.shape[0] # 총 데이터 크기\n self.feature_size = X.shape[1] # X데이터의 크기를 이용해 feature size 추출\n self.output_size = Y.shape[1] # Y데이터의 크기를 이용해 output size 추출(one-hot encoding 된 라벨 데이터)\n \n \"\"\"initialize parameters W1, b1, W2, b2\"\"\"\n self.params = {}\n # W1, b1 --> hidden layer\n self.params[\"W1\"] = np.random.randn(self.feature_size,hidden_size)\n self.params[\"b1\"] = np.random.randn(hidden_size)\n \n # W2, b2 : hidden layer --> output layer\n self.params[\"W2\"] = np.random.randn(hidden_size, self.output_size)\n self.params[\"b2\"] = np.random.randn(self.output_size)\n \n \n def predict(self,x):\n \"\"\" given input x, calculate output using current parameters : W1,b1,W2,b2 \"\"\"\n # input --> hidden layer\n l1 = np.dot(x,self.params[\"W1\"])+self.params[\"b1\"]\n l1 = sigmoid(l1)\n \n # hidden layer --> output layer\n l2 = np.dot(l1,self.params[\"W2\"])+self.params[\"b2\"]\n self.score = softmax(l2)\n \n return self.score\n \n def loss(self,y,t):\n # loss를 계산\n error = cross_entropy_error(y,t)\n \n return error\n \n def accuracy(self, y, t):\n # 예측값\n predicted = np.argmax(y, axis=1)\n t = np.argmax(t, axis=1)\n \n # 정확도 계산\n return np.sum(predicted==t)/t.shape[0]\n \n def numerical_gradient(self):\n h=1e-4\n # gradient를 담을 dictionary 생성\n self.gradient = {}\n # parameter들의 gradient 계산 및 저장\n for par, item in self.params.items():\n grad = np.zeros(item.shape)\n # flatten 시켜서 개별 원소들이 idx 값을 가지도록 함\n for idx, i in enumerate(item.flatten()):\n # 만약 벡터 1개로 구성된 parameter 이라면\n if item.shape == item.flatten().shape:\n self.params[par][idx] = i+h\n y1 = self.loss(self.predict(self.batch_X),self.batch_Y)\n self.params[par][idx] = i-h\n y2 = self.loss(self.predict(self.batch_X),self.batch_Y)\n grad[idx] = (y1-y2)/(2*h)\n self.params[par][idx] = i\n continue\n \n # 만약 벡터 여러개로 구성된 parameter 이라면\n # 해당 idx 으로 실제 numpy 값에서 어떤 위치에 있는지 추출\n row,col = idx//item.shape[1], idx%item.shape[1]\n self.params[par][row,col] = i+h\n y1 = self.loss(self.predict(self.batch_X),self.batch_Y)\n self.params[par][row,col] = i-h\n y2 = self.loss(self.predict(self.batch_X),self.batch_Y)\n grad[row,col] = (y1-y2)/(2*h)\n self.params[par][row,col] = i\n \n # 해당 파라미터의 gradient 저장\n self.gradient[par]=grad\n return self.gradient\n \n def learn(self, lr, epoch, batch_size = 40): # batch_size default = 40\n # input 데이터 수와 설정한 batch_size 수를 비교하여 batch_size 결정\n batch_size = min(batch_size, self.data_num)\n # batch size에 따른 한 epoch 안에서의 반복 횟수\n batch_num = self.data_num // batch_size\n # epoch 당 loss를 담을 list 생성\n self.loss_list = []\n # epoch 당 accuracy를 담을 list 생성\n self.accuracy_list = []\n \n # 총 데이터에 대해 epoch 만큼 반복 학습\n for e in range(epoch):\n # 한 epoch 당 loss & accuracy를 계산\n loss = 0\n accuracy = 0\n for b in range(batch_num):\n # batch size만큼의 batch data 생성\n self.batch_X, self.batch_Y = self.X[batch_size*b:batch_size*(b+1),], self.Y[batch_size*b:batch_size*(b+1),]\n\n # 해당 batch_size 만큼의 데이터 input 하여 각 label에 대한 score 계산\n score = self.predict(self.batch_X)\n # loss 계산\n l = self.loss(score, self.batch_Y)\n loss+=l\n # accuracy 계산\n a = self.accuracy(score,self.batch_Y)\n accuracy += a\n \n # W update\n gradient = self.numerical_gradient()\n for par, item in gradient.items():\n self.params[par] = self.params[par]-lr*gradient[par]\n \n # 한 epoch 당 loss 평균\n loss = loss/batch_num\n # 한 epoch 당 accuracy 평균\n accuracy = accuracy/batch_num\n print(\"Epoch : {:5}, Loss : {:.6f}, Accuracy : {:.4%}\".format(e,loss, accuracy))\n self.loss_list.append(loss) \n self.accuracy_list.append(accuracy) \n\n","sub_path":"NeuralNetwork_class.py","file_name":"NeuralNetwork_class.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"201650938","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nimport datetime\nimport io\nimport re\nimport subprocess\nfrom os import path\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.sdist import sdist as base_sdist\nfrom setuptools.command.bdist_egg import bdist_egg as base_bdist_egg\n\nfrom wagtailfontawesome import __version__\n\n\nwith open('README.rst', 'r') as readme:\n long_description = readme.read()\n\n\nclass assets_mixin:\n def compile_assets(self):\n try:\n subprocess.check_call(['npm', 'run', 'build'])\n except (OSError, subprocess.CalledProcessError) as e:\n print('Error compiling assets: ' + str(e))\n raise SystemExit(1)\n\n\nclass sdist(base_sdist, assets_mixin):\n def run(self):\n self.compile_assets()\n base_sdist.run(self)\n\n\nclass bdist_egg(base_bdist_egg, assets_mixin):\n def run(self):\n self.compile_assets()\n base_bdist_egg.run(self)\n\n\nsetup(\n name='wagtailfontawesome',\n version=__version__,\n description='Add FontAwesome icons to StreamField.',\n long_description=long_description,\n url='https://gitlab.com/alexgleason/wagtailfontawesome',\n author='Alex Gleason',\n author_email='alex@alexgleason.me',\n license='MIT',\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n 'Topic :: Internet :: WWW/HTTP',\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n ],\n keywords='development',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n \"wagtail>=1.4.0\",\n \"Django>=1.7.1\",\n ],\n cmdclass={\n 'sdist': sdist,\n 'bdist_egg': bdist_egg,\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"561470243","text":"from flask import Flask, request, url_for, session, redirect, render_template\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport time\nimport json\nimport os.path\nfrom os import path\n\napp = Flask(__name__)\n\n# assign 'key' variables for Spotify to verify application\napp.secret_key = \"ca46715f11484acdac1036ae59e8b93e\"\napp.config['SESSION_COOKIE_NAME'] = 'Christians Cookie'\nTOKEN_INFO = \"token_info\"\n\n#check to ensure previous login info is removed and delete if necessary to allow for new user login\nif(path.exists(\"/Users/cphackelman/Desktop/Spotify_Project/.cache\")):\n os.remove(\"/Users/cphackelman/Desktop/Spotify_Project/.cache\")\n\n#create login page for user\n@app.route('/')\ndef login():\n sp_oauth = create_spotify_oauth()\n auth_url = sp_oauth.get_authorize_url()\n return redirect(auth_url)\n\n#create redirect page to refresh access tokens from Spotify API and redirect to getTopLists once tokens are verified\n@app.route('/redirect')\ndef redirectPage():\n sp_oauth = create_spotify_oauth()\n session.clear()\n code =request.args.get('code')\n token_info = sp_oauth.get_access_token(code)\n session[TOKEN_INFO] = token_info\n return redirect(url_for('getTopLists', _external=True))\n\n@app.route('/getTopLists')\ndef getTopLists():\n#ensure user session is still valid\n try:\n token_info = get_token()\n except:\n print(\"user not logged in\")\n return redirect(\"/\")\n sp = spotipy.Spotify(auth = token_info['access_token'])\n#specify range of user data to retrieve as well as how much data to retrieve\n#'long_term': spans several years\n#'medium_term': spans last 6 months\n#'short_term': spans last month\n results = sp.current_user_top_tracks(limit=50, offset = 0, time_range='long_term')\n#Retrieve user data into list\n for song in range(50):\n list = []\n list.append(results)\n#Convert list to json file for further tokenization\n with open('top50_data.json', 'w', encoding='utf-8') as f:\n json.dump(list, f, ensure_ascii=False, indent=4)\n#load in json file for tokenization\n with open('top50_data.json') as f:\n data = json.load(f)\n#take json file and store into list, 'results'\n#create lists to separate differing values generated by Spotify API\n list_of_results = data[0][\"items\"]\n list_of_artist_names = []\n list_of_artist_uri = []\n list_of_song_names = []\n list_of_song_uri = []\n list_of_durations_ms = []\n list_of_explicit = []\n list_of_albums = []\n list_of_popularity = []\n#populate each list according to variable\n for result in list_of_results:\n result[\"album\"]\n this_artists_name = result[\"artists\"][0][\"name\"]\n list_of_artist_names.append(this_artists_name)\n this_artists_uri = result[\"artists\"][0][\"uri\"]\n list_of_artist_uri.append(this_artists_uri)\n list_of_songs = result[\"name\"]\n list_of_song_names.append(list_of_songs)\n song_uri = result[\"uri\"]\n list_of_song_uri.append(song_uri)\n list_of_duration = result[\"duration_ms\"]\n list_of_durations_ms.append(list_of_duration)\n song_explicit = result[\"explicit\"]\n list_of_explicit.append(song_explicit)\n this_album = result[\"album\"][\"name\"]\n list_of_albums.append(this_album)\n song_popularity = result[\"popularity\"]\n list_of_popularity.append(song_popularity)\n#create lists to store songs and artists into respective tier\n s_tier_songs = []\n a_tier_songs = []\n b_tier_songs = []\n c_tier_songs = []\n d_tier_songs = []\n f_tier_songs = []\n\n s_tier_artist_names = []\n a_tier_artist_names = []\n b_tier_artist_names = []\n c_tier_artist_names = []\n d_tier_artist_names = []\n f_tier_artist_names = []\n\n #slice each list into appropriate tiers\n s_tier_artist_names = list_of_artist_names[0:8]\n s_tier_songs = list_of_song_names[0:8]\n a_tier_artist_names = list_of_artist_names[8:16]\n a_tier_songs = list_of_song_names[8:16]\n b_tier_artist_names = list_of_artist_names[16:24]\n b_tier_songs = list_of_song_names[16:24]\n c_tier_artist_names = list_of_artist_names[24:34]\n c_tier_songs = list_of_song_names[24:34]\n d_tier_artist_names = list_of_artist_names[34:42]\n d_tier_songs = list_of_song_names[34:42]\n f_tier_artist_names = list_of_artist_names[42:50]\n f_tier_songs = list_of_song_names[42:50]\n\n #merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]\n #merge lists into tiers accordingly\n s_tier = [(s_tier_songs[i], s_tier_artist_names[i]) for i in range(0, len(s_tier_songs))]\n a_tier = [(a_tier_songs[i], a_tier_artist_names[i]) for i in range(0, len(a_tier_songs))]\n b_tier = [(b_tier_songs[i], b_tier_artist_names[i]) for i in range(0, len(b_tier_songs))]\n c_tier = [(c_tier_songs[i], c_tier_artist_names[i]) for i in range(0, len(c_tier_songs))]\n d_tier = [(d_tier_songs[i], d_tier_artist_names[i]) for i in range(0, len(d_tier_songs))]\n f_tier = [(f_tier_songs[i], f_tier_artist_names[i]) for i in range(0, len(f_tier_songs))]\n#utilize Jinja2 for variable use in html document\n return render_template('top50.html', \n sTier = s_tier, \n aTier = a_tier,\n bTier = b_tier,\n cTier = c_tier,\n dTier = d_tier,\n fTier = f_tier)\n\n#retrieve access token from Spotify API\ndef get_token():\n token_info = session.get(TOKEN_INFO, None)\n if not token_info:\n raise \"exception\"\n now = int(time.time())\n is_expired = token_info['expires_at'] - now < 60\n if (is_expired):\n sp_oauth = create_spotify_oauth()\n token_info = sp_oauth.refresh_access_token(token_info['refresh_token'])\n return token_info\n\n#define what scope of data we want to retrieve, scope is how SPotify defines various segments of available user data\ndef create_spotify_oauth():\n return SpotifyOAuth(\n client_id = \"9828445b08644a13a067e7959d1a4b6d\",\n client_secret = \"ca46715f11484acdac1036ae59e8b93e\",\n redirect_uri=url_for('redirectPage', _external=True),\n scope=\"user-top-read\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"631436876","text":"import logging\n\nfrom asreview.ascii import welcome_message\nfrom asreview.config import DEFAULT_N_PRIOR_EXCLUDED\nfrom asreview.config import DEFAULT_N_PRIOR_INCLUDED\nfrom asreview.entry_points.base import BaseEntryPoint, _base_parser\nfrom asreview.review import review_simulate\n\n\nclass SimulateEntryPoint(BaseEntryPoint):\n description = \"Simulate the performance of ASReview.\"\n\n def execute(self, argv):\n parser = _simulate_parser()\n args = parser.parse_args(argv)\n\n args_dict = vars(args)\n path = args_dict.pop(\"dataset\")\n\n verbose = args_dict.get(\"verbose\", 0)\n if verbose == 0:\n logging.getLogger().setLevel(logging.WARNING)\n elif verbose == 1:\n logging.getLogger().setLevel(logging.INFO)\n elif verbose >= 2:\n logging.getLogger().setLevel(logging.DEBUG)\n\n print(welcome_message())\n review_simulate(path, **args_dict)\n\n\nDESCRIPTION_SIMULATE = \"\"\"\nAutomated Systematic Review (ASReview) for simulation runs.\n\nThe simulation modus is used to measure the performance of our\nsoftware on existing systematic reviews. The software shows how many\npapers you could have potentially skipped during the systematic\nreview.\"\"\"\n\n\ndef _simulate_parser(prog=\"simulate\", description=DESCRIPTION_SIMULATE):\n parser = _base_parser(prog=prog, description=description)\n # Active learning parameters\n # File path to the data.\n parser.add_argument(\n \"dataset\",\n type=str,\n nargs=\"*\",\n help=\"File path to the dataset or one of the built-in datasets.\"\n )\n # Initial data (prior knowledge)\n parser.add_argument(\n \"--n_prior_included\",\n default=DEFAULT_N_PRIOR_INCLUDED,\n type=int,\n help=\"Sample n prior included papers. \"\n \"Only used when --prior_included is not given. \"\n f\"Default {DEFAULT_N_PRIOR_INCLUDED}\")\n\n parser.add_argument(\n \"--n_prior_excluded\",\n default=DEFAULT_N_PRIOR_EXCLUDED,\n type=int,\n help=\"Sample n prior excluded papers. \"\n \"Only used when --prior_excluded is not given. \"\n f\"Default {DEFAULT_N_PRIOR_EXCLUDED}\")\n\n parser.add_argument(\n \"--prior_idx\",\n default=[],\n nargs=\"*\",\n type=int,\n help=\"Prior indices by id.\"\n )\n parser.add_argument(\n '--init_seed',\n default=None,\n type=int,\n help=\"Seed for setting the prior indices if the --prior_idx option is \"\n \"not used. If the option --prior_idx is used with one or more \"\n \"index, this option is ignored.\"\n )\n parser.add_argument(\n \"--verbose\", \"-v\",\n default=0,\n type=int,\n help=\"Verbosity\"\n )\n\n return parser\n","sub_path":"asreview/entry_points/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"61372990","text":"import logging\nfrom asyncore import dispatcher\n\nfrom scrapy import signals\n\nfrom sitemap_crawler.ph.elastic_parsers.elastic_parsers import parse_canonical, parse_redirect_error, parse_crawl\nfrom sitemap_crawler.ph.elastic_search_connection.elastic_search_connection import connection\nfrom sitemap_crawler.ph.spiders.sitemap.ph_sitemap_analyzer import PhSitemapSpider\n\nlogger = logging.getLogger(__name__)\n\n\nclass SitemapAnalyzer(PhSitemapSpider):\n name = 'sitemap_analyzer'\n sitemap_urls = []\n allowed_domains = []\n handle_httpstatus_list = [404, 500, 503, 302, 301, 303]\n\n def __init__(self, *a, **kw):\n self.total_urls_for_file = kw['total_urls']\n self.domain = kw['domain']\n self.sitemap_index_url = kw['sitemap_url']\n self.allowed_domains.append(self.domain)\n self.sitemap_urls.append(self.sitemap_index_url)\n\n self.elastic_connection = connection()\n\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n super(SitemapAnalyzer, self).__init__(*a, **kw)\n\n def parse(self, response):\n self.validate_response(response)\n\n def spider_closed(self):\n doc = parse_crawl(self.urls_counter, self.sub_sitemaps_counts, self.sitemap_date_updated,\n self.sitemap_generated)\n self.elastic_connection.index(index='sitemap', doc_type='crawl', body=doc)\n\n def validate_response(self, response):\n self.validate_canonical(response)\n self.validate_redirects(response)\n self.validate_error(response)\n\n def validate_canonical(self, response):\n canonical, request_url = self.get_urls(response)\n if canonical != request_url:\n doc = parse_canonical(canonical, request_url, response.statu)\n self.elastic_connection.index(index='sitemap', doc_type='canonicals', body=doc)\n\n def validate_redirects(self, response):\n if self.is_redirect(response.status):\n doc = parse_redirect_error(response)\n self.elastic_connection.index(index='sitemap', doc_type='redirects', body=doc)\n\n def validate_error(self, response):\n if self.is_error(response.status):\n doc = parse_redirect_error(response)\n self.elastic_connection.index(index='sitemap', doc_type='error', body=doc)\n\n @staticmethod\n def get_urls(response):\n return response.css('head link[rel=canonical]::attr(href)').extract_first(), response.request.url\n\n @staticmethod\n def is_redirect(status):\n return status == 301 or status == 302 or status == 303\n\n @staticmethod\n def is_error(status):\n return status == 404 or status == 500 or status == 503\n","sub_path":"build/lib.linux-x86_64-2.7/sitemap_crawler/spiders/sitemap_analyzer.py","file_name":"sitemap_analyzer.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"181837714","text":"n=int(input())\r\nl=list(map(int,input().split()))\r\np=[]\r\nf=0\r\nfor i in range(n):\r\n if(l[i]==i):\r\n f=1\r\n p.append(l[i])\r\np=sorted(p)\r\nif(f==1):\r\n for i in p:\r\n print(i,end=\" \")\r\nelse:\r\n print(-1)\r\n \r\n","sub_path":"indexvalue.py","file_name":"indexvalue.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"387583459","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 24 11:41:35 2019\r\n\r\n@author: binxi\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def combinationSum(self, candidates, target):\r\n \"\"\"\r\n :type candidates: List[int]\r\n :type target: int\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n if len(candidates) == 1:\r\n if target % candidates[0] == 0:\r\n return [candidates*int(target/candidates[0])]\r\n else:\r\n return []\r\n exit\r\n \r\n comb = []\r\n for i in range(0,int(target/candidates[0])+1,1):\r\n rest = self.combinationSum(candidates[1:],target-i*candidates[0])\r\n for j in rest:\r\n comb += [i*[candidates[0]]+j]\r\n \r\n return comb\r\n ","sub_path":"Leetcode/#39 Combination Sum.py","file_name":"#39 Combination Sum.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"553200822","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 21 14:53:41 2019\n\n@author: Simon Zhou\n\"\"\"\nimport time\nimport requests\nfrom .error_codes import _codes\n\nclass YMClient:\n baseURL = \"http://api.fxhyd.cn/UserInterface.aspx\"\n def __init__(self, username, password, token=None, itemid=None, mobile=None):\n self.username = username\n self.password = password\n self.token = token\n if self.token == None:\n self.token = self.get_token()\n if self.token[0] != 'success':\n print(self.token)\n else:\n self.token = self.token[1]\n \n self.itemid = itemid\n self.mobile = mobile\n \n def get_token(self):\n params = {}\n params['action'] = 'login'\n params['username'] = self.username\n params['password'] = self.password\n response = requests.get(self.baseURL, params=params).text.split(\"|\")\n if response[0] == 'success':\n return tuple(response)\n else:\n return '获取TOKEN错误'+response[0]+':'+_codes.get(int(response[0]))\n \n def get_account_info(self):\n params = {}\n params['action'] = 'getaccountinfo'\n params['token']=self.token\n response = requests.get(self.baseURL, params).text.split(\"|\")\n if response[0] == 'success':\n info = {k:v for k,v in zip([\"用户名\",\"账户状态\",\"账户等级\",\"账户余额\",\"冻结金额\",\"账户折扣\",\"获取号码最大数量\"],response[1:])}\n return info\n else:\n return '获取账户信息错误'+response[0]+':'+_codes.get(int(response[0]))\n \n def get_mobile(self, itemid, isp=None, province=None, city=None, mobile=None, excludeno=None):\n params = {}\n params['action'] = 'getmobile'\n params['token'] = self.token\n params['itemid'] = itemid\n response = requests.get(self.baseURL, params=params).text.split(\"|\")\n if response[0] == 'success':\n return int(response[1])\n else:\n return '错误'+response[0]+':'+_codes.get(int(response[0]))\n \n def get_sms(self, itemid, mobile, release=None, getsendno=None):\n params = {}\n params['action'] = 'getsms'\n params['token'] = self.token\n params['itemid'] = itemid\n params['mobile'] = mobile\n params['release'] = release\n response = requests.get(self.baseURL, params=params)\n response.encoding = 'utf-8'\n response = response.text.split(\"|\", 1)\n if response[0] == 'success':\n return ('success', response[1])\n else:\n return (int(response[0]), _codes.get(int(response[0])))\n \n def fetch_sms_until_succeed(self, itemid, mobile, release=1, getsendno=None, timeout=90):\n stime = time.time()\n while time.time()-stime <= timeout:\n a = self.get_sms(itemid, mobile, release=release)\n if a[0] == 3001:\n print(a[1]+\",5秒后重试...\")\n time.sleep(5)\n elif a[0] == 'success':\n print(\"获取短信成功,短信内容为:\\n\"+a[1])\n return a[1]\n break\n else:\n raise Exception(a[1])\n if a[0] != 'success':\n raise Exception(\"Timeout\")\n \n def send_sms(self, itemid, mobile, sms, number=None):\n params = {}\n params['action'] = 'sendsms'\n params['token'] = self.token\n \n def get_send_sms_state(self, itemid, mobile):\n params = {}\n params['action'] = 'getsendsmsstate'\n params['token'] = self.token\n \n def release(self, itemid, mobile):\n params = {}\n params['action'] = 'release'\n params['token'] = self.token\n params['itemid'] = itemid\n params['mobile'] = mobile\n response = requests.get(self.baseURL, params).text\n if response == 'success':\n return response\n else:\n return '错误'+response+':'+_codes.get(int(response))\n\n def add_ignore(self, itemid, mobile):\n params = {}\n params['action'] = 'addignore'\n params['token'] = self.token\n params['itemid'] = itemid\n params['mobile'] = mobile\n response = requests.get(self.baseURL, params).text\n if response == 'success':\n return response\n else:\n return '错误'+response+':'+_codes.get(int(response))\n\nif __name__ == '__main__': \n pass\n","sub_path":"build/lib/yima/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"427369328","text":"#!/usr/bin/env python\n\"\"\"\n\nJoshua Dawes - CMS, CERN - The University of Manchester\n\nFile to provide a class, with methods defined, to interact with usage data for the db upload service.\nUltimately, this will be used as a layer to extract data for db upload monitoring tools.\n\nAt the moment, contains code to write to usage database (temporarily sqlite for development), and to create logs for user download\nat the end of the upload session.\n\n\"\"\"\n\nimport sqlalchemy\nfrom datetime import datetime, timedelta\nimport logging\nimport time\nimport os\nfrom flask import g, request\nimport traceback\nfrom itsdangerous import Serializer\nimport hashlib\nimport json\n\nfrom errors import *\nimport app.ldapAuth as ldap\nimport CondDBFW\nfrom CondDBFW.models import to_timestamp\nimport database\nimport keeperService\n\nfrom app import app\n\n# get utility functions\nfrom CondDBFW.utils import to_timestamp, to_datetime, friendly_since\n\nclass Usage():\n\t\"\"\"\n\tClass for controlling upload sessions (authentication and tag locks).\n\t\"\"\"\n\n\tdef __init__(self, upload_session_id=None, connection=None):\n\t\t# set up connection to destination Database\n\t\t# this has to be stored so self.end_usage() will be able to close the conddbfw connection\n\t\t# given to this object at instantiation\n\t\tif connection != None:\n\t\t\tself.conddbfw_connection = connection.get_conddbfw_con()\n\t\telse:\n\t\t\t# in this case, there is no existing connection to be passed\n\t\t\t# will be for creating the upload session\n\t\t\tself.conddbfw_connection = database.Connection().get_conddbfw_con()\n\t\tself.connection = self.conddbfw_connection.engine.connect()\n\t\t# will be detected in production instances\n\t\t# will tell us whether we are on dev, point 5 (prod) or possibly tier0\n\t\tself.instance_name = keeperService.getProductionLevel()\n\t\tif upload_session_id != None:\n\t\t\tself.upload_session_id = upload_session_id\n\t\t\tlogging.debug(\"USAGE OBJECT INSTANTIATED\")\n\t\t\t# make sure the tag lock the token belongs to is still in place\n\t\t\ttag_lock_intact = self.tag_lock_intact()\n\t\t\tif tag_lock_intact:\n\t\t\t\tlog_file_name = self.get_log_file_name()\n\t\t\t\tself.log_handle = open(log_file_name, \"a\")\n\t\t\telse:\n\t\t\t\traise Exception(\"Tag lock has expired.\")\n\t\telse:\n\t\t\tself.upload_session_id = None\n\t\t\tself.log_handle = None\n\n\tdef tag_lock_intact(self):\n\t\t# make sure the tag lock is intact in the usage database\n\t\ttag_lock = self.connection.execute(\"select * from upload_session where token = :1 and expiry_time > to_timestamp(:2, 'YYYY-MM-DD HH24:MI:SS.FF')\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t[self.upload_session_id, to_timestamp(datetime.now())]).fetchall()\n\t\t# tag lock is only intact if we found a row with an expiry date > current time\n\t\treturn len(tag_lock) == 1\n\n\tdef get_upload_session_tag_name(self):\n\t\ttag_name = self.connection.execute(\"select tag_name from upload_session where token = :1\", [self.upload_session_id]).fetchall()[0][0]\n\t\treturn tag_name\n\n\tdef tag_lock_expiry_time(self, tag_name):\n\t\tcurrent_tag_lock = self.connection.execute(\"select expiry_time from upload_session where tag_name = :1 order by expiry_time desc\", [tag_name]).fetchall()\n\t\treturn current_tag_lock[0][0]\n\n\tdef new_upload_session(self, destination_tag, username_or_token, password):\n\t\t\"\"\"\n\n\t\tAuthentication with username_or_token and password follows the same idea as the old upload service:\n\n\t\t1) Client sends username, password and tag name - if user authenticates, and tag is not locked, token valid for 5 minutes is returned.\n\t\t2) Client uses token throughout the upload session.\n\t\t3) Since Metadata upload is the last request in the upload process, at the start of processing set the expiry of the tag lock to an extra 30 seconds into the future\n\t\tif the tag lock expires in less than 10 seconds. At the end of processing, before returning a response, release the tag lock.\n\n\t\t\"\"\"\n\n\t\t# authenticate on username and password, if we have a username and password\n\t\tif password == None:\n\t\t\tif username_or_token == None:\n\t\t\t\treturn {\"error\" : \"If no password is given, it is assumed that you have provided a password.\"}\n\t\t\telse:\n\n\t\t\t\t# use the token given, check if there is a valid tag lock in place\n\t\t\t\t# and is held for at least another 30 seconds - if not, refresh it\n\t\t\t\ttag_lock = self.connection.execute(\"select expiry_time from upload_session where token = :1\", [username_or_token]).fetchall()\n\t\t\t\t# if the newest tag lock expires during the next 30 seconds, refresh it\n\t\t\t\tnow = datetime.now()\n\t\t\t\tif str(now) <= tag_lock[0][0] < str(now + timedelta(seconds=30)):\n\t\t\t\t\t# no tag lock exists - check that an old tag lock (no longer valid) exists, and update it\n\t\t\t\t\t# if no tag lock exists, there was an error when creating the upload session and this request shouldn't have happened\n\t\t\t\t\ttag_lock = self.connection.execute(\"select * from upload_session where token = :1\", [username_or_token]).fetchall()\n\t\t\t\t\tif len(tag_lock) == 1:\n\t\t\t\t\t\tself.connection.execute(\"update upload_session set expiry_time = to_timestamp(:1, 'YYYY-MM-DD HH24:MI:SS.FF') where token = :2\",\\\n\t\t\t\t\t\t\t\t\t\t\t\t[to_timestamp(datetime.now() + timedelta(seconds=30)), username_or_token])\n\t\t\t\t\t\t# tag lock has been refreshed for 30 seconds\n\t\t\t\t\telse:\n\t\t\t\t\t\traise UploadSessionProblemException()\n\t\t\t\telif tag_lock[0][0] >= str(now + timedelta(seconds=30)):\n\t\t\t\t\t# don't do anything - the tag lock is valid for more than 30 seconds longer\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t# the tag lock is no longer valid\n\t\t\t\t\tupper_bound_expiry_time = self.tag_lock_expiry_time(destination_tag)\n\t\t\t\t\traise TagLockedException(time=now, tag_name=destination_tag, time_to_retry=upper_bound_expiry_time)\n\n\t\telse:\n\t\t\tlogging.debug(\"AUTHENTICATING WITH USERNAME AND PASSWORD\")\n\t\t\t# authenticate the user based on their username and password\n\t\t\tauthenticated = ldap.authenticate(username_or_token, password)\n\n\t\t\tif authenticated:\n\t\t\t\t# check the tag lock\n\t\t\t\t# first, make sure the given destination_tag is not locked\n\t\t\t\tnow = datetime.now()\n\t\t\t\texisting_tag_locks = self.connection.execute(\"select * from upload_session where tag_name = :1 and expiry_time >= to_timestamp(:2, 'YYYY-MM-DD HH24:MI:SS.FF')\",\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[destination_tag, to_timestamp(datetime.now())]).fetchall()\n\t\t\t\tif len(existing_tag_locks) != 0:\n\t\t\t\t\t# find the time that the tag lock is released\n\t\t\t\t\tupper_bound_expiry_time = self.tag_lock_expiry_time(destination_tag)\n\t\t\t\t\traise TagLockedException(time=now, tag_name=destination_tag, time_to_retry=upper_bound_expiry_time)\n\t\t\t\telse:\n\t\t\t\t\t# create token that expires in 5 minutes\n\t\t\t\t\ttime = datetime.now()\n\t\t\t\t\t# hash dictionary of username and timestamp with sha1 - this will be stored, and matched against for authentication\n\t\t\t\t\ttoken_dictionary = {\"username\" : username_or_token, \"timestamp\" : to_timestamp(datetime.now())}\n\t\t\t\t\ttoken_string_for_hashing = json.dumps(token_dictionary)\n\t\t\t\t\t# don't serialize the token with itsdangerous - hashing is enough for now\n\t\t\t\t\ttoken = hashlib.sha1(token_string_for_hashing).hexdigest()\n\t\t\t\t\texpiry_time = time + timedelta(seconds=60)\n\t\t\t\t\tservice_instance = self.instance_name\n\t\t\t\t\tlog_name = \"app/upload_session_logs/log_%s\" % token\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.connection.execute(\"insert into upload_session values(:1, to_timestamp(:2, 'YYYY-MM-DD HH24:MI:SS.FF'), to_timestamp(:3, 'YYYY-MM-DD HH24:MI:SS.FF'), :4, :5, :6)\",\\\n\t\t\t\t\t\t\t\t\t\t\t\t[token, to_timestamp(time), to_timestamp(expiry_time), service_instance, log_name, destination_tag])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t# can't use usage logging here, because we don't have an upload session\n\t\t\t\t\t\tlogging.error(\"Problem when querying usage database.\")\n\t\t\t\t\t\tlogging.error(traceback.format_exc())\n\t\t\t\t\t\treturn {\"error\" : \"Problem when creating upload session usage database.\", \"traceback\" : traceback.format_exc()}\n\n\t\t\t\t\t# token for session is now used as id - client side stores this in exactly the same way\n\t\t\t\t\tself.upload_session_id = token\n\t\t\t\t\treturn {\"id\":token, \"log_file\":log_name}\n\t\t\telse:\n\t\t\t\t# could mean either the user did not exist, their password was incorrect, or they were not in the cms-cond-dropbox egroup\n\t\t\t\traise InvalidUserException(username_or_token)\n\n\tdef get_log_file_name(self, upload_session_id=None):\n\n\t\tif self.upload_session_id == None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\tupload_sessions = self.connection.execute(\"select log_file from upload_session where token = :1\", [self.upload_session_id]).fetchall()\n\t\texcept Exception as e:\n\t\t\timport traceback\n\t\t\ttraceback.print_exc()\n\t\t\traise ConnectionException(e)\n\t\t# only need to check for data not found, since if it is found we're guaranteed to have a unique result\n\t\tif len(upload_sessions) == 0:\n\t\t\traise DataNotFoundException(class_name=\"upload_session\", id=upload_session_id)\n\n\t\tlog_file_name = upload_sessions[0][0]\n\t\treturn log_file_name\n\n\tdef end_usage(self):\n\t\tself.conddbfw_connection.close_session()\n\t\tself.connection.close()\n\t\tself.close_log_handle()\n\n\tdef close_log_handle(self):\n\t\tif self.log_handle != None:\n\t\t\tlogging.debug(\"CLOSING FILE HANDLE FOR USAGE LOG\")\n\t\t\tself.log_handle.close()\n\n\tdef close_upload_session(self, upload_session_id=None):\n\t\ttry:\n\t\t\t# we can log because, for now, the session is only closed by the final request - when an upload_session_id is set\n\t\t\tg.usage.log(\"Closing upload session.\")\n\t\t\t# close upload session by setting expiry_time to a datetime in the past\n\t\t\tself.connection.execute(\"update upload_session set expiry_time = to_timestamp(:1, 'YYYY-MM-DD HH24:MI:SS.FF') where token = :2\",\\\n\t\t\t\t\t\t\t\t\t[to_timestamp(datetime(2016, 1, 1, 1, 1, 1)), self.upload_session_id])\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\timport traceback\n\t\t\tlogging.debug(traceback.format_exc())\n\t\t\traise ConnectionException(e)\n\n\tdef log(self, message, upload_session_id=None):\n\t\tif self.upload_session_id != None:\n\t\t\tlog_file_name = self.get_log_file_name()\n\t\t\tself.log_handle = open(log_file_name, \"a\")\n\t\telse:\n\t\t\traise NoUploadSessionException()\n\n\t\ttime_now = str(datetime.now())\n\t\tself.log_handle.write(\"[%s] %s\\n\" % (time_now, message))\n\t\tself.log_handle.close()\n\n\t\tlogging.info(message)\n\n\tdef get_log_data(self, upload_session_id=None):\n\t\ttry:\n\t\t\tlog_file_name = self.get_log_file_name(self.upload_session_id)\n\t\texcept Exception as e:\n\t\t\t# couldn't find the log file name\n\t\t\tif isinstance(e, DataNotFoundException) or isinstance(e, ConnectionException):\n\t\t\t\traise e\n\t\t\telse:\n\t\t\t\treturn \"\"\n\n\t\tif log_file_name == None:\n\t\t\treturn \"\"\n\n\t\thandle = open(log_file_name, \"r\")\n\t\tlog_data = handle.read()\n\t\thandle.close()\n\t\treturn log_data\n\nif __name__ == \"__main__\":\n\tusage = Usage()\n\tprint(usage.new_upload_session())\n","sub_path":"app/usage.py","file_name":"usage.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"552544986","text":"import aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup\nimport motor.motor_asyncio\n\nimport json\nimport os\nimport time\nimport sys\nimport configparser\nfrom contextlib import contextmanager\n\n\"\"\"\nhttps://www.lagou.com/gongsi/70.html\nhttps://www.lagou.com/gongsi/interviewExperiences.html?companyId=70\n\n
    \\nThese are the top talking IP addresses sorted by \\\namount of packets:
    \t\\n\" + str(packet['IP_Src']) + \"\t\\n\" + str(packet['Amount']) + \"
    \\nThese are the top talking IP \\\naddresses sorted by total bytes size :
    \t\\n\" + str(packet['IP_Src']) + \"\t\\n\" + str(packet['Size']) + \"
    These are the top used Protocols sorted by percentage \\\nof traffic :
    \t\\n\")\n # f2.write(str(packet['Proto']))\n protocol_name = ''\n for char in str(packet['Proto']):\n if (char != '<') and (char != '>'):\n protocol_name += char\n f1.write(protocol_name)\n f1.write(\"\t\\n\" + str((int((float(packet['Size']) / total_buf)*100))) + \"\\\n %\"+\"