diff --git "a/1999.jsonl" "b/1999.jsonl" new file mode 100644--- /dev/null +++ "b/1999.jsonl" @@ -0,0 +1,643 @@ +{"seq_id":"543859537","text":"# Copyright (c) The Diem Core Contributors\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"This module defines util functions for encoding and decoding offchain specific JWS messages.\n\nThe `serialize` and `deserialize` functions handle JWS message with the following requirements:\n\n 1. Protected header must be `{\"alg\": \"EdDSA\"}`\n 2. Characters encoding must be `utf-8`\n 3. JWS encoding must be `compact`\n\n\"\"\"\n\nimport base64, json, typing\n\nfrom . import CommandRequestObject, CommandResponseObject, to_json, from_json\n\n\nPROTECTED_HEADER: bytes = base64.urlsafe_b64encode(b'{\"alg\":\"EdDSA\"}')\nENCODING: str = \"UTF-8\"\n\nT = typing.TypeVar(\"T\")\n\n\ndef serialize(\n obj: typing.Union[CommandRequestObject, CommandResponseObject],\n sign: typing.Callable[[bytes], bytes],\n headers: typing.Optional[typing.Dict[str, typing.Any]] = None,\n) -> bytes:\n return serialize_string(to_json(obj), sign, headers=headers)\n\n\ndef deserialize(\n msg: bytes,\n klass: typing.Type[T],\n verify: typing.Callable[[bytes, bytes], None],\n) -> T:\n decoded_body, sig, signing_msg = deserialize_string(msg)\n verify(sig, signing_msg)\n return from_json(decoded_body, klass)\n\n\ndef serialize_string(\n json: str, sign: typing.Callable[[bytes], bytes], headers: typing.Optional[typing.Dict[str, typing.Any]] = None\n) -> bytes:\n header = PROTECTED_HEADER if headers is None else encode_headers(headers)\n payload = base64.urlsafe_b64encode(json.encode(ENCODING))\n sig = sign(signing_message(payload, header=header))\n return b\".\".join([header, payload, base64.urlsafe_b64encode(sig)])\n\n\ndef deserialize_string(msg: bytes) -> typing.Tuple[str, bytes, bytes]:\n parts = msg.split(b\".\")\n if len(parts) != 3:\n raise ValueError(\n \"invalid JWS compact message: %s, expect 3 parts:
..\" % msg.decode(ENCODING)\n )\n\n header, body, sig = parts\n header_text = decode(header).decode(ENCODING)\n try:\n protected_headers = json.loads(header_text)\n except json.decoder.JSONDecodeError as e:\n raise ValueError(f\"invalid JWS message header: {header_text}\") from e\n\n if not isinstance(protected_headers, dict) or protected_headers.get(\"alg\") != \"EdDSA\":\n raise ValueError(f\"invalid JWS message header: {header}, expect alg is EdDSA\")\n\n return (\n decode(body).decode(ENCODING),\n decode(sig),\n signing_message(body, header=header),\n )\n\n\ndef signing_message(payload: bytes, header: bytes) -> bytes:\n return b\".\".join([header, payload])\n\n\ndef encode_headers(headers: typing.Dict[str, typing.Any]) -> bytes:\n return base64.urlsafe_b64encode(json.dumps(headers).encode(ENCODING))\n\n\ndef decode(msg: bytes) -> bytes:\n return base64.urlsafe_b64decode(fix_padding(msg))\n\n\ndef fix_padding(input: bytes) -> bytes:\n return input + b\"=\" * (4 - (len(input) % 4))\n","sub_path":"src/diem/offchain/jws.py","file_name":"jws.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"441518196","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport sys\nimport numpy as np\n'''\nlower_yellow = np.array([20,0,0])\nupper_yellow = np.array([45,255,255])\nlower_red_1 = np.array([0,30,25])\nupper_red_1 = np.array([20,255,255])\nlower_red_2 = np.array([140,0,0])\nupper_red_2 = np.array([179,255,255])\n'''\nlower_yellow = np.array([20,0,0])\nupper_yellow = np.array([29,255,255])\nlower_red_1 = np.array([0,20,0])\nupper_red_1 = np.array([9,255,255])\nlower_red_2 = np.array([166,0,0])\nupper_red_2 = np.array([179,255,255])\n\n\nkernal = np.ones((5, 5), np.uint8)\n\ndef findYellowCnts(img):\n mask = cv2.inRange(img, lower_yellow, upper_yellow)\n blur = cv2.GaussianBlur(mask, (11,11),0)\n canny = cv2.Canny(blur, 30, 150)\n closing = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernal)\n opening = cv2.morphologyEx(closing, cv2.MORPH_CLOSE, kernal)\n (_,cnts, _) = cv2.findContours(opening, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n #print(\"Count {} Yellow\". format(len(cnts)))\n if(len(cnts) > 0):\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)\n return cnts[0]\n else:\n return None\n\ndef findRedCnts(img):\n mask_1 = cv2.inRange(img, lower_red_1, upper_red_1)\n mask_2 = cv2.inRange(img, lower_red_2, upper_red_2)\n mask = mask_1 + mask_2\n blur = cv2.GaussianBlur(mask, (11,11),0)\n canny = cv2.Canny(blur, 30, 150)\n closing = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernal)\n opening = cv2.morphologyEx(closing, cv2.MORPH_CLOSE, kernal)\n (_,cnts, _) = cv2.findContours(opening, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n #print(\"Count {} Red\". format(len(cnts)))\n if(len(cnts) > 0):\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)\n return cnts[0]\n else:\n return None\n \ndef findCentroid(cnts):\n M = cv2.moments(cnts)\n cx = int(M[\"m10\"]/(M[\"m00\"]+1e-6))\n cy = int(M[\"m01\"]/(M[\"m00\"]+1e-6))\n return cx, cy\n\n\ndef focusCenter(img, cx_y, cx_r):\n bar_cx = (float(cx_y) + float(cx_r)) / 2\n view_cx = img.shape[1]/2\n delta_lr = (view_cx - bar_cx) / img.shape[1]\n #print(cx_y,cx_r,bar_cx,view_cx,img.shape[1])\n \n '''\n print(\"delta_lr: {}\".format(delta_lr))\n if(delta_lr > 20):\n print(\"GO LEFT\")\n elif(delta_lr < -20):\n print(\"GO RIGHT\")\n else:\n print(\"GO FORWARD\") \n '''\n return delta_lr\n\ndef headTilt(img, cy_y, cy_r):\n bar_cy = (cy_y + cy_r) / 2\n view_cy = img.shape[0]/2\n delta_head = (view_cy - bar_cy) / img.shape[0] \n '''\n print(\"delta_head: {}\".format(delta_head))\n\n if(delta_head > 10):\n print(\"HEAD UP\")\n elif(delta_head < -10):\n print(\"HEAD DOWN\")\n else:\n print(\"HEAD KEEP\") \n '''\n return delta_head\n\ndef slope(cx_y, cy_y, cx_r, cy_r):\n \n bar_slope = (cy_y - cy_r) / (cx_y - cx_r + 1e-6)\n '''\n print(\"bar_slope: {}\".format(bar_slope))\n if(bar_slope > 0.1):\n print(\"Turn facing right\")\n elif(bar_slope < -0.1):\n print(\"Turn facing left\")\n else:\n print(\"Keep facing forward\")\n '''\n return bar_slope\n\n \n'''\ncap = cv2.VideoCapture(0)\nwhile not rospy.is_shutdown():\n ret, frame = cap.read()\n frame = cv2.resize(frame, (0,0),fx=0.5,fy=0.5, interpolation=cv2.INTER_CUBIC)\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n cnts_yellow = findYellowCnts(hsv_frame)\n cnts_red = findRedCnts(hsv_frame)\n cx_y, cy_y = findCentroid(cnts_yellow)\n cx_r, cy_r = findCentroid(cnts_red)\n\n \n focusCenter(hsv_frame, cx_y, cx_r)\n headTilt(hsv_frame, cy_y, cy_r)\n \n slope(cx_y, cy_y, cx_r, cy_r)\n \n cv2.drawContours(hsv_frame, cnts_yellow, -1, (255,0,0), 2)\n cv2.drawContours(hsv_frame, cnts_red, -1, (10,235,290), 2)\n \n cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / 2)),5,(130, 40, 255), -1)\n cv2.circle(hsv_frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)),5,(130, 40, 255), -1)\n \n cv2.circle(hsv_frame, (cx_y, cy_y),5,(130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_r, cy_r),5,(130, 40, 255), -1)\n\n cv2.imshow('Current view',hsv_frame)\n \n cv2.waitKey(33)\n''' \n","sub_path":"src/fira_weightlifting/scripts/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"298567665","text":"from multiprocessing import Process\nfrom multiprocessing import Queue\nimport os\nimport time\nimport random\n\n#写数据进程执行的代码\ndef write(q):\n\tprint('Process to write : %s' % os.getpid())\n\tfor value in ['A', 'B', 'C']:\n\t\tprint('Put %s to queue...' % value)\n\t\tq.put(value)\n\t\ttime.sleep(random.random())\n\t\t\n\t\t\n#读数据进程执行的代码\ndef read(q):\n\tprint('Process to read :%s' % os.getpid())\n\twhile(True):\n\t\tvalue = q.get(True)\n\t\tprint(\"Get %s from queeu.\" % value)\n\t\t\nif __name__ == '__main__':\n\t\n\t#父进程创建queue,并开始各个进程:\n\tq = Queue()\n\tpw = Process(target = write, args = (q,))\n\tpr = Process(target = read, args = (q,))\n\tpw.start()\n\tpr.start()\n\tpw.join()\n\tpr.terminate()#强行停止\n\t","sub_path":"process/process_queue.py","file_name":"process_queue.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"216352590","text":"import argparse\nimport sys\nimport os\nimport cv2\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\ntry:\n import tensorflow as tf\n from tensorflow import keras\n # NB: Eager execution needs to be disabled before any model loading.\n tf.compat.v1.disable_eager_execution ()\nexcept:\n import keras\n\nfrom utils import *\n\ndef deepconcolic(test_object, outs):\n report_args = { 'save_input_func': test_object.save_input_func,\n 'inp_ub': test_object.inp_ub,\n 'outs': outs}\n if test_object.criterion=='nc': ## neuron cover\n from nc import setup as nc_setup\n if test_object.norm=='linf':\n from pulp_norms import LInfPulp\n from nc_pulp import NcPulpAnalyzer\n engine = nc_setup (test_object = test_object,\n setup_analyzer = NcPulpAnalyzer,\n input_metric = LInfPulp ())\n elif test_object.norm=='l0':\n from nc_l0 import NcL0Analyzer\n engine = nc_setup (test_object = test_object,\n setup_analyzer = NcL0Analyzer,\n input_shape = test_object.raw_data.data[0].shape,\n eval_batch = test_object.eval_batch)\n else:\n print('\\n not supported norm... {0}\\n'.format(test_object.norm))\n sys.exit(0)\n engine.run (**report_args)\n elif test_object.criterion=='ssc':\n from ssc import SScAttackBasedAnalyzer, setup as ssc_setup\n engine = ssc_setup (test_object = test_object,\n setup_analyzer = SScAttackBasedAnalyzer,\n ref_data = test_object.raw_data)\n engine.run (**report_args)\n elif test_object.criterion=='ssclp':\n from pulp_norms import LInfPulp\n from mcdc_pulp import SScPulpAnalyzer\n from ssc import setup as ssc_setup\n engine = ssc_setup (test_object = test_object,\n setup_analyzer = SScPulpAnalyzer,\n input_metric = LInfPulp ())\n engine.run (**report_args)\n elif test_object.criterion=='svc':\n outs = setup_output_dir (outs)\n from run_ssc import run_svc\n print('\\n== Starting DeepConcolic tests for {0} =='.format (test_object))\n run_svc(test_object, outs)\n else:\n print('\\n not supported coverage criterion... {0}\\n'.format(test_object.criterion))\n sys.exit(0)\n\n\ndef main():\n\n parser=argparse.ArgumentParser(description='Concolic testing for neural networks' )\n parser.add_argument(\n '--model', dest='model', default='-1', help='the input neural network model (.h5)')\n parser.add_argument(\"--inputs\", dest=\"inputs\", default=\"-1\",\n help=\"the input test data directory\", metavar=\"DIR\")\n parser.add_argument(\"--outputs\", dest=\"outputs\", default=\"-1\",\n help=\"the outputput test data directory\", metavar=\"DIR\")\n parser.add_argument(\"--training-data\", dest=\"training_data\", default=\"-1\",\n help=\"the extra training dataset\", metavar=\"DIR\")\n parser.add_argument(\"--criterion\", dest=\"criterion\", default=\"nc\",\n help=\"the test criterion\", metavar=\"nc, ssc...\")\n parser.add_argument(\"--labels\", dest=\"labels\", default=\"-1\",\n help=\"the default labels\", metavar=\"FILE\")\n parser.add_argument(\"--mnist-dataset\", dest=\"mnist\", help=\"MNIST dataset\", action=\"store_true\")\n parser.add_argument(\"--cifar10-dataset\", dest=\"cifar10\", help=\"CIFAR-10 dataset\", action=\"store_true\")\n parser.add_argument(\"--vgg16-model\", dest='vgg16', help=\"vgg16 model\", action=\"store_true\")\n parser.add_argument(\"--norm\", dest=\"norm\", default=\"l0\",\n help=\"the norm metric\", metavar=\"linf, l0\")\n parser.add_argument(\"--input-rows\", dest=\"img_rows\", default=\"224\",\n help=\"input rows\", metavar=\"INT\")\n parser.add_argument(\"--input-cols\", dest=\"img_cols\", default=\"224\",\n help=\"input cols\", metavar=\"INT\")\n parser.add_argument(\"--input-channels\", dest=\"img_channels\", default=\"3\",\n help=\"input channels\", metavar=\"INT\")\n parser.add_argument(\"--cond-ratio\", dest=\"cond_ratio\", default=\"0.01\",\n help=\"the condition feature size parameter (0, 1]\", metavar=\"FLOAT\")\n parser.add_argument(\"--top-classes\", dest=\"top_classes\", default=\"1\",\n help=\"check the top-xx classifications\", metavar=\"INT\")\n parser.add_argument(\"--layer-index\", dest=\"layer_indexes\",\n nargs=\"+\", type=int,\n help=\"to test a particular layer\", metavar=\"INT\")\n parser.add_argument(\"--feature-index\", dest=\"feature_index\", default=\"-1\",\n help=\"to test a particular feature map\", metavar=\"INT\")\n\n args=parser.parse_args()\n\n\n criterion=args.criterion\n norm=args.norm\n cond_ratio=float(args.cond_ratio)\n top_classes=int(args.top_classes)\n\n raw_data=None\n img_rows, img_cols, img_channels = int(args.img_rows), int(args.img_cols), int(args.img_channels)\n\n dnn = None\n inp_ub = 1\n save_input = None\n if args.model!='-1':\n dnn = keras.models.load_model (args.model)\n dnn.summary()\n save_input = save_an_image\n elif args.vgg16:\n dnn = keras.applications.VGG16 ()\n inp_ub = 255\n dnn.summary()\n save_input = save_an_image\n else:\n print (' \\n == Please specify the input neural network == \\n')\n sys.exit(0)\n\n if args.inputs!='-1':\n \n xs=[]\n print ('Loading input data... ', end = '', flush = True)\n for path, subdirs, files in os.walk(args.inputs):\n for name in files:\n fname=(os.path.join(path, name))\n if fname.endswith('.jpg') or fname.endswith('.png'):\n try:\n image = cv2.imread(fname)\n image = cv2.resize(image, (img_rows, img_cols))\n image = image.astype('float')\n xs.append((image))\n except: pass\n x_test = np.asarray(xs)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, img_channels)\n raw_data = raw_datat(x_test, None)\n print (len(xs), 'loaded.')\n elif args.mnist:\n from keras.datasets import mnist\n print ('Loading MNIST data... ', end = '', flush = True)\n img_rows, img_cols, img_channels = 28, 28, 1\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, img_channels)\n x_test = x_test.astype('float32')\n x_test /= 255\n raw_data = raw_datat(x_test, y_test)\n print ('done.')\n elif args.cifar10:\n from keras.datasets import cifar10\n print ('Loading CIFAR10 data... ', end='', flush = True)\n img_rows, img_cols, img_channels = 32, 32, 3\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_test=x_test[0:3000]\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, img_channels)\n x_test = x_test.astype('float32')\n x_test /= 255\n raw_data = raw_datat(x_test, y_test)\n print ('done.')\n else:\n print (' \\n == Please input dataset == \\n')\n sys.exit(0)\n\n outs=None\n if args.outputs!='-1':\n outs=args.outputs\n else:\n print (' \\n == Please specify the output directory == \\n')\n sys.exit(0)\n\n\n test_object=test_objectt(dnn, raw_data, criterion, norm)\n test_object.cond_ratio = cond_ratio\n test_object.top_classes = top_classes\n test_object.inp_ub = inp_ub\n test_object.save_input_func = save_input\n if args.layer_indexes is not None:\n try:\n test_object.layer_indices=[]\n for layer_index in tuple(args.layer_indexes):\n layer = dnn.get_layer (index = int (layer_index))\n test_object.layer_indices.append (dnn.layers.index (layer))\n except ValueError as e:\n sys.exit (e)\n if args.feature_index!='-1':\n test_object.feature_indices=[]\n test_object.feature_indices.append(int(args.feature_index))\n print ('feature index specified:', test_object.feature_indices)\n if args.training_data!='-1':\n tdata=[]\n print ('To load the extra training data...')\n for path, subdirs, files in os.walk(args.training_data):\n for name in files:\n fname=(os.path.join(path, name))\n if fname.endswith('.jpg') or fname.endswith('.png'):\n try:\n image = cv2.imread(fname)\n image = cv2.resize(image, (img_rows, img_cols))\n image=image.astype('float')\n tdata.append((image))\n except: pass\n print ('The extra training data loaded: ', len(tdata))\n test_object.training_data=tdata\n\n if args.labels!='-1':\n labels=[]\n lines = [line.rstrip('\\n') for line in open(args.labels)]\n for line in lines:\n for l in line.split():\n labels.append(int(l))\n test_object.labels=labels\n\n test_object.check_layer_indices ()\n deepconcolic(test_object, outs)\n\nif __name__==\"__main__\":\n try:\n main ()\n except KeyboardInterrupt:\n sys.exit('Interrupted.')\n","sub_path":"src/deepconcolic.py","file_name":"deepconcolic.py","file_ext":"py","file_size_in_byte":8764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"28917100","text":"def largeGroupPositions(self, S):\n \"\"\"\n :type S: str\n :rtype: List[List[int]]\n \"\"\"\n slow = 0\n fast = 0\n result = []\n while slow < len(S):\n count = 0\n temp = []\n while fast < len(S) and S[fast] == S[slow]:\n fast += 1\n count += 1\n if count >= 3:\n temp.append(slow)\n temp.append(fast-1)\n result.append(temp)\n slow = fast\n return result","sub_path":"Leetcode Problem 830.py","file_name":"Leetcode Problem 830.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"210604142","text":"import turtle\nt = turtle.Turtle()\n\ns = turtle.Screen()\nc = 0\nwhile 1:\n\tfor i in range(4):\n\t\tt.forward(250)\n\t\tt.right(90)\n\tt.right(3)\n\tc += 1\n\tif c>= 360/3:\n\t\tcontinue\n\nturtle.done()\n\n\n","sub_path":"Square_pattern.py","file_name":"Square_pattern.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"319731088","text":"import torch\nimport functools\n\n__all__ = [\n \"Downsample2d\",\n]\n\nclass Downsample2d(torch.nn.Module):\n def __init__(self,\n scale: float=0.5,\n mode: str=\"bilinear\"\n ):\n super(Downsample2d, self).__init__()\n self.downsample = functools.partial(torch.nn.functional.interpolate,\n scale_factor=scale,\n mode=mode\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.downsample(x)","sub_path":"moai/nn/sampling/spatial/downsample/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"295933732","text":"from flask_restplus import Namespace, Resource, fields\nfrom flask import *\nimport DataLayer.DocumentDb.Queries as queries\nimport DataLayer.Repositories\nimport os\napi = Namespace('Alerts', description='Alerts related operations')\nimport os\nimport platform\nimport DataLayer.DocumentDb.ConnectionManager\n\n\nalert_advanced_data = api.model('advanced_alerts_data', {\n 'refkeynumber': fields.String(required=True),\n 'name': fields.String(required=True),\n})\n\n\ncreate_alert_response_model = api.model('create_alert_response_model', {\n 'alert_id': fields.String(required=True, description='The alert identifier'),\n 'InternalData': fields.List(fields.Nested(alert_advanced_data)),\n 'alert_name': fields.String(required=True, description='The alert name')\n\n})\n\n\n\nget_all_alerts_response_model = api.model('get_all_alerts_response', {\n 'id': fields.String(required=True, description='The alert name'),\n 'alert_name': fields.String(required=True, description='The alert name'),\n 'alert_category': fields.String(required=True, description='The alert name')\n})\n\n\nclass xx(object):\n def __init__(self):\n self.refkeynumber=\"sdfsdf\"\n self.name =\"dsafsdf\"\n\n\nclass alertres(object):\n def __init__(self,alertid,alertname,internaldata):\n self.alert_id = alertid\n self.alert_name = alertname\n self.InternalData = internaldata\n self.test_avi = \"sdfsdf\"\n self.data=\"dataaa!\"\n\n\ncreate_alert_request_model = api.model('create_alert_request_model', {\n 'Alert_Name': fields.String(description='Name of the Alert', required=True),\n 'Alert_Id': fields.String(description='Id of the alert', required=True),\n 'InternalData': fields.List(fields.Nested(alert_advanced_data), required=True),\n 'Data': fields.String(description='Id of the alert', required=False),\n })\n\n\nAlerts = [\n {'id': 'this_is_alert_id', 'name': 'xsg33ghd3w2667e233'},\n]\n\n\n\n# @api.route('/aaa')\n# class Cat(Resource):\n# @api.doc('list_alerts')\n# #@api.marshal_list_with(create_alert_response_model)\n# def get(self):\n# '''List all alerts'''\n# mydocument = queries.get_alert()\n# return mydocument\n\n@api.route('/')\n@api.param('id', 'The alert identifier')\n@api.response(404, 'alert not found')\nclass Cat(Resource):\n @api.doc('Get_Alert_By_Id')\n @api.marshal_with(create_alert_response_model)\n def get(self, id):\n '''Fetch an alert given its identifier'''\n for cat in Alerts:\n if cat['id'] == id:\n return cat\n api.abort(404)\n\n\n@api.route('/')\n@api.response(201, 'alert created')\nclass Cat(Resource):\n @api.doc('create_new_alert')\n @api.marshal_with(create_alert_response_model)\n @api.expect(create_alert_request_model)\n def post(self):\n '''Creates new alert'''\n x = xx()\n xxxx =list()\n xxxx.append(x)\n adf= alertres(\"id\",\"name\",xxxx)\n json = request.get_json(force=True)\n return adf\n #return jsonpickle.encode(adf)\n '''Fetch an alert given its identifier'''\n\n\n @api.doc('list_alerts')\n @api.marshal_with(get_all_alerts_response_model)\n #@api.marshal_list_with(create_alert_response_model)\n def get(self):\n '''List all alerts'''\n a = DataLayer.Repositories.AlertsRepository.get_all_alerts()\n return a\n # return mydocument\n\n\n\n@api.route('/health')\n@api.response(200, 'server is healthy')\nclass HealthCheck(Resource):\n @api.doc('check alert server health')\n def post(self):\n '''Checks alert server health state'''\n xxx =os.path.dirname(os.path.realpath(__file__))\n yyy = platform.system()\n return \"im 0kay :). i mean it\"\n '''Fetch an alert given its identifier'''\n\n\n\n\n","sub_path":"Controllers/Alerts.py","file_name":"Alerts.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"594345769","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('225.jpeg', 0)\nret, thresh = cv2.threshold(img, 127, 255, 0)\nimage, contours, hierarchy = cv2.findContours(thresh, 1, 2)\n\ncnt = contours[0]\n\n# 边界矩形的长宽比\n\nx, y, w, h = cv2.boundingRect(cnt)\naspect_ratio = float(w) / h\nprint(aspect_ratio)\n\n# 轮廓面积与边界面积的比 Extent\n\narea = cv2.contourArea(cnt)\nx, y, w, h = cv2.boundingRect(cnt)\nrect_area = w * h\nextent = float(area) / rect_area\nprint(extent)\n\n\n# Solidity 轮廓面积与凸包面积的比\n\narea = cv2.contourArea(cnt)\nhull = cv2.convexHull(cnt)\nhull_area = cv2.contourArea(hull)\nSolidity = float(area) / hull_area\nprint(Solidity)\n\n# Equivalent Diameter 与轮廓相等的圆形面积的直径\narea = cv2.contourArea(cnt)\nequi_diameter = np.sqrt(4 * area / np.pi)\nprint(equi_diameter)\n\n\n# 方向,对象的方向,会返回长轴和短轴的长度\n(x, y), (MA, ma), angle = cv2.fitEllipse(cnt)\nprint((x, y), (MA, ma), angle)\n\n\n# 掩模和像素点\n\nimg = cv2.imread('225.jpeg')\nimgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nmask = np.zeros(imgray.shape, np.uint8)\n\n# 这里参数一定要选择 -1 ,绘制填充的整个轮廓\npixelpoints1 = cv2.drawContours(mask, [cnt], 0, 255, -1)\n# np.nonzero 返回非零的元素��引\n'''要按元素而不是维度对索引进行分组,请使用:\ntranspose(nonzero(a))\n结果总是一个二维数组,每个非零元素都有一行。'''\npixelpoints2 = np.transpose(np.nonzero(mask)) # ????\n\nprint(pixelpoints1) # openCV返回的是(x,y)的格式,而Numpy 返回的是(row,colum),也就是非零元素的位置\nprint(pixelpoints2)\n# 两者互换,row = x,colunm = y\n\n# 最大值和最小值以及它们的位置\nmin_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray, mask=mask)\nprint(min_val, max_val, min_loc, max_loc)\n\n# 颜色以及平均灰度\n# 我们可以使用相同的掩模求一个对象的平均颜色或平均灰度\n\nmean_val = cv2.mean(img, mask=mask)\nprint(mean_val)\n\n\n# 极点\n# 一个对象最上面,最下面,最左边,最右边的点\nleftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])\nrightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])\ntopmost = tuple(cnt[cnt[:, :, 1].argmin()][0])\nbottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])\nprint(leftmost, rightmost, topmost, bottommost)\n","sub_path":"OpenCv/opencv16_3.py","file_name":"opencv16_3.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"649242283","text":"# coding: utf-8\n\nimport os\nimport commands\nfrom dcmp.clouddisks.utils import clouddisk_root_path, default_quota, check_domainuser\n\n\nclass Quota(object):\n\tdef __init__(self, name, limit=default_quota):\n\t\tos.environ['value'] = str(limit)\n\t\tos.environ['username'] = str(name)\n\t\tos.environ['cloudpath'] = str(clouddisk_root_path)\n\t\tself.name = name\n\n\tdef set_quota(self):\n\t\tif not check_domainuser(self.name):\n\t\t\treturn False\n\t\telse:\n\t\t\tstatus, result = commands.getstatusoutput('xfs_quota -x -c \"limit -u bsoft=$value bhard=$value $username\" $cloudpath')\n\t\tif status == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False","sub_path":"dcmp/clouddisks/cloudDisk/quota.py","file_name":"quota.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"388350330","text":"# This is my code to train n model and saved them\n\nimport random\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport shutil\nimport tempfile\n\nfrom tensorflow.python import pywrap_tensorflow\n\nfrom baseline_constants import BYTES_WRITTEN_KEY, BYTES_READ_KEY, LOCAL_COMPUTATIONS_KEY\nfrom mh_constants import VARIABLE_PARAMS, MODEL_PARAMS\nfrom mlhead_utilfuncs import input_fn\nfrom kmean_model import KmeanModel\n\nclass Mlhead_Clus_Server:\n \n def __init__(self, client_model, dataset, model, num_clusters):\n self.modeldir = \"/scratch/tmpmodel\"\n if not os.path.exists(self.modeldir):\n os.makedirs(self.modeldir)\n# if num_clusters < 2:\n# raise Exception(\"Sorry, cluster number must be 2 or more\")\n if num_clusters > 10:\n raise Exception(\"Sorry, cluster number must less than 10\") \n if num_clusters != -1:\n self.model = client_model.get_params() # global model of the server.\n self.selected_clients = [] # this variable keeps random, unclustered clients\n self.set_model_path(dataset, model)\n self._x_dimensions = self.get_model_x_dimensions(dataset, model)\n self._variable = self.get_model_variable(dataset, model)\n self._num_clusters = num_clusters\n self._learned = None\n self._shuffledkeys = None\n \"\"\"\n cluster_membership is a list of cluster dictionary,\n each contains {'member':list of clients, \n 'center': a center vector, 'attention': a attention vector,\n 'loss': mean valdation loss of each client of this cluster}\n \"\"\"\n self._cluster_membership = list()\n for _ in range(num_clusters):\n self._cluster_membership.append({\"member\": [], \"center\": None, \"attention\": [], \"loss\": None}) \n\n @property\n def path(self):\n return self._path\n \n @property\n def x_dimensions(self):\n return self._x_dimensions\n \n @property\n def variable(self):\n return self._variable\n\n @property\n def selected(self):\n return [c for c in self.selected_clients]\n \n @property\n def clusters(self):\n return [c for c in self._clusters_membership]\n\n def select_clients(self, my_round, possible_clients):\n \"\"\"Selects num_clients clients randomly from possible_clients.\n \n Note that within function, num_clients is set to\n min(num_clients, len(possible_clients)).\n Args:\n possible_clients: Clients from which the server can select.\n num_clients: Number of clients to select; default 20\n Return:\n list of (num_train_samples, num_test_samples)\n \"\"\"\n num_clients = len(possible_clients)\n np.random.seed(my_round * 50)\n self.selected_clients = np.random.choice(possible_clients, num_clients, replace=False)\n\n return [(c.num_train_samples, c.num_test_samples) for c in self.selected_clients]\n\n def train_model(self):\n \"\"\"Trains self.model on given clients.\n\n \"\"\"\n clients = self.selected_clients\n\n tot_clients = len(clients)\n done_idx = 0\n for counter, c in enumerate(clients, 1):\n \"\"\"\n Note: this is a trick. and it's equal to clear the session of client\n and make sure the graph has been re-initialized.\n \"\"\"\n c_file = self.get_chkpfile( 'write_%s.ckpt' % c.id )\n\n \"\"\"\n I think this one will run faster than training\n since now I have already initial the value with normal\n distribution but no training at first round\n \"\"\"\n \n if os.path.exists(c_file):\n os.remove(c_file)\n c.save_model(c_file) \n\n done_percentil = float(done_idx + 1) * 25\n if self.get_percentil(counter, tot_clients) >= done_percentil:\n done_idx += 1\n print( \"%g%% clients has done\" % done_percentil)\n\n def get_percentil(self, counter, len):\n fraction = float((counter + 1) / len)\n return fraction * 100 \n\n\n def set_model_path(self, dataset, model):\n \tself._path = os.path.join('/scratch/leaf/ckpt_runtime', dataset, model)\n \tif not os.path.exists(self._path):\n \tos.makedirs(self._path)\n \n def get_model_x_dimensions(self, dataset, model):\n key = \"%s.%s\" % (dataset, model)\n d = MODEL_PARAMS[ key ]\n return d[0] * d[1]\n \n def get_model_variable(self, dataset, model):\n key = \"%s.%s\" % (dataset, model)\n v = VARIABLE_PARAMS[ key ] \n return v\n\n def get_chkpfile(self, id_ckpt):\n return os.path.join(self._path, id_ckpt)\n\n def train_iteation(self, data):\n train_data = lambda: input_fn(data)\n self._kmeans.train(train_data)\n cluster_centers = self._kmeans.cluster_centers()\n # print(\"cluster centers:\", cluster_centers)\n score = self._kmeans.score(train_data)\n return score\n \n def get_init_point_data(self):\n #points = np.random.normal(loc=0.5, scale=0.5, size= (len(self.selected), self._x_dimensions)) \n points = np.random.uniform(-0.149, 0.149, (len(self.selected), self._x_dimensions))\n c_dict = {}\n for x, client in enumerate(self.selected):\n c_dict[client.id] = points[x]\n self._shuffledkeys = list(c_dict.keys())\n return c_dict\n\n def run_clustering(self, prev_score, data):\n seed = np.random.randint(5667799881, size=1)[0]\n features = np.concatenate([np.array(data[x]) for x in self._shuffledkeys])\n features = np.reshape(features, (len(self._shuffledkeys), self._x_dimensions))\n model = KmeanModel(features.shape[0], self._x_dimensions, self._num_clusters, seed)\n self._learned = model.assign_clusters(features)\n random.shuffle(self._shuffledkeys)\n return None, len(self._learned)\n \n def train_kmeans(self, prev_score, data):\n \"\"\"We are using pre-made tensorflow estimators to \n train and predict.\n \n Args:\n prev_score: a sum of the distance between each sample\n to their nearest center\n data: list of weights of user model\n Return:\n updated score\n \n \"\"\"\n\n seed = np.random.randint(5667799881, size=1)[0]\n temp_name = next(tempfile._get_candidate_names())\n temp_modeldir = os.path.join(self.modeldir, temp_name)\n if not os.path.exists(temp_modeldir): \n os.makedirs(temp_modeldir)\n self._kmeans = tf.contrib.factorization.KMeansClustering(\n model_dir=temp_modeldir,\n random_seed=seed,\n num_clusters=self._num_clusters, use_mini_batch=False) \n\n # composed of weights of every client model \n\n score = self.train_iteation(data)\n \n # evaluate the samples to compute the distance between\n # each sample and each center, forming a matrix have each \n # row for each sample, and the column is distance to each\n # center.\n y = self._kmeans.transform(lambda: input_fn(data))\n point_distance = list(y)\n self._learned = np.argmin(point_distance , 1)\n # removing not used files of this model\n #shutil.rmtree(temp_modeldir, ignore_errors=True)\n return None, score\n\n \n def eval_clustermembership(self, num_clusters):\n \"\"\"Transfrom the input data,\n get the min distance of each point to cluster centers, \n then return the index of cluster center whose distance for a sample\n is the mininum\n \n return a list of (num_clients, clients)\n \"\"\"\n # print(str(self._learned))\n\n for _, cluster in enumerate(self._cluster_membership):\n cluster[\"member\"] = list()\n \n sort_clients = list()\n # to sync the sorted clients with shuffle step\n for i in self._shuffledkeys:\n for client in self.selected_clients:\n if client.id == i:\n sort_clients.append(client)\n break\n \n for x in range(len(self._learned)):\n grp_id = self._learned[x] \n self._cluster_membership[grp_id][\"member\"].append(sort_clients[x])\n return [ (len(cluster[\"member\"]), cluster[\"member\"]) for cl_id, cluster in enumerate(self._cluster_membership)]\n ","sub_path":"all_baselines/fed-cluster/models/mlhead_clus_server.py","file_name":"mlhead_clus_server.py","file_ext":"py","file_size_in_byte":8537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"104290533","text":"name = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\nhandle = open(name)\ndict ={}\nfor line in handle:\n if not line.startswith(\"From \"):\n continue\n email = (line.split()[1]).strip()\n dict[email] = dict.get(email,0) + 1\n \nmax = None\nmost_profilic = None \nfor email, count in dict.items():\n if max is None or max< count:\n max = count\n most_profilic = email\n\nprint(most_profilic, max)","sub_path":"Python Data Structures/Assignment7.py","file_name":"Assignment7.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"233433275","text":"#!/usr/bin/env python\n#\n# __init__.py -\n#\n# Author: Paul McCarthy \n#\n\nimport os\nimport gc\nimport time\nimport shutil\nimport logging\nimport tempfile\nimport traceback\nimport contextlib\n\nimport wx\n\nimport matplotlib as mpl\nmpl.use('WxAgg') # noqa\n\nimport fsleyes_props as props\nimport fsl.utils.idle as idle\nimport fsleyes\nimport fsleyes.frame as fslframe\nimport fsleyes.main as fslmain\nimport fsleyes.actions.frameactions as frameactions # noqa\nimport fsleyes.gl as fslgl\nimport fsleyes.colourmaps as colourmaps\nimport fsleyes.displaycontext as dc\nimport fsleyes.overlay as fsloverlay\n\n\nfrom .compare_images import compare_images\n\n\n# Under GTK, a single call to\n# yield just doesn't cut it\ndef realYield(centis=10):\n for i in range(int(centis)):\n wx.YieldIfNeeded()\n time.sleep(0.01)\n\n\n@contextlib.contextmanager\ndef tempdir():\n \"\"\"Returnsa context manager which creates and returns a temporary\n directory, and then deletes it on exit.\n \"\"\"\n\n testdir = tempfile.mkdtemp()\n prevdir = os.getcwd()\n try:\n\n os.chdir(testdir)\n yield testdir\n\n finally:\n os.chdir(prevdir)\n shutil.rmtree(testdir)\n\n\ninitialised = [False]\n\ndef run_with_fsleyes(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` and run the given function. \"\"\"\n\n logging.getLogger().setLevel(logging.WARNING)\n\n gc.collect()\n idle.idleReset()\n\n propagateRaise = kwargs.pop('propagateRaise', True)\n startingDelay = kwargs.pop('startingDelay', 500)\n finishingDelay = kwargs.pop('finishingDelay', 5)\n callAfterApp = kwargs.pop('callAfterApp', None)\n\n result = [None]\n raised = [None]\n frame = [None]\n app = [None]\n\n def init():\n fsleyes.initialise()\n props.initGUI()\n colourmaps.init()\n initialised[0] = True\n fslgl.bootstrap((2, 1))\n wx.CallAfter(run)\n\n def finish():\n frame[0].Close(askUnsaved=False, askLayout=False)\n app[0].ExitMainLoop()\n\n def run():\n\n overlayList = fsloverlay.OverlayList()\n displayCtx = dc.DisplayContext(overlayList)\n lockGroup = dc.OverlayGroup(displayCtx, overlayList)\n\n displayCtx.overlayGroups.append(lockGroup)\n\n frame[0] = fslframe.FSLeyesFrame(None,\n overlayList,\n displayCtx)\n\n app[0].SetOverlayListAndDisplayContext(overlayList, displayCtx)\n app[0].SetTopWindow(frame[0])\n\n frame[0].Show()\n\n try:\n if func is not None:\n result[0] = func(frame[0],\n overlayList,\n displayCtx,\n *args,\n **kwargs)\n\n except Exception as e:\n traceback.print_exc()\n raised[0] = e\n\n finally:\n wx.CallLater(finishingDelay, finish)\n\n app[0] = fslmain.FSLeyesApp()\n dummy = wx.Frame(None)\n panel = wx.Panel(dummy)\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(panel, flag=wx.EXPAND, proportion=1)\n dummy.SetSizer(sizer)\n\n if callAfterApp is not None:\n callAfterApp()\n\n dummy.SetSize((100, 100))\n dummy.Layout()\n dummy.Show()\n\n if not initialised[0]:\n wx.CallLater(startingDelay,\n fslgl.getGLContext,\n parent=panel,\n ready=init)\n else:\n wx.CallLater(startingDelay, run)\n\n app[0].MainLoop()\n dummy.Close()\n\n time.sleep(1)\n\n if raised[0] and propagateRaise:\n raise raised[0]\n\n return result[0]\n\n\ndef run_with_viewpanel(func, vptype, *args, **kwargs):\n def inner(frame, overlayList, displayCtx, *a, **kwa):\n panel = frame.addViewPanel(vptype)\n return func(panel, overlayList, displayCtx, *a, **kwa)\n return run_with_fsleyes(inner, *args, **kwargs)\n\n\ndef run_with_orthopanel(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` with an ``OrthoPanel`` and run the given\n function.\n \"\"\"\n from fsleyes.views.orthopanel import OrthoPanel\n return run_with_viewpanel(func, OrthoPanel, *args, **kwargs)\n\n\ndef run_with_lightboxpanel(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` with a ``LightBoxPanel`` and run the given\n function.\n \"\"\"\n from fsleyes.views.lightboxpanel import LightBoxPanel\n return run_with_viewpanel(func, LightBoxPanel, *args, **kwargs)\n\n\ndef run_with_scene3dpanel(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` with a ``Scene3DPanel`` and run the given\n function.\n \"\"\"\n from fsleyes.views.scene3dpanel import Scene3DPanel\n return run_with_viewpanel(func, Scene3DPanel, *args, **kwargs)\n\n\ndef run_with_timeseriespanel(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` with a ``TimeSeriesPanel`` and run the given\n function.\n \"\"\"\n from fsleyes.views.timeseriespanel import TimeSeriesPanel\n return run_with_viewpanel(func, TimeSeriesPanel, *args, **kwargs)\n\n\ndef run_with_histogrampanel(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` with a ``HistogramPanel`` and run the given\n function.\n \"\"\"\n from fsleyes.views.histogrampanel import HistogramPanel\n return run_with_viewpanel(func, HistogramPanel, *args, **kwargs)\n\n\ndef run_with_powerspectrumpanel(func, *args, **kwargs):\n \"\"\"Create a ``FSLeyesFrame`` with a ``PowerSpectrumPanel`` and run the\n given function.\n \"\"\"\n from fsleyes.views.powerspectrumpanel import PowerSpectrumPanel\n return run_with_viewpanel(func, PowerSpectrumPanel, *args, **kwargs)\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612613184","text":"\"\"\"\nMichael S. Emanuel\nMon Apr 30 14:29:09 2018\n\"\"\"\n\nfrom sys import argv\nfrom Database_Connection import getConnection\nfrom Euler.Primes import PrimeTable\nfrom Euler.Timing import Timer\n\n\ndef demo():\n \"\"\"Demonstrate getting a connection and executing a SQL statement.\"\"\"\n conn = getConnection()\n # print(conn)\n curs = conn.cursor()\n query = '''\n SELECT\n p.p\n FROM\n dbo.Prime as p;\n '''\n # print(query)\n curs.execute(query)\n for row in curs:\n print(row)\n conn.close()\n\n\ndef truncatePrimes(curs) -> None:\n \"\"\"Truncate the staging table r.Prime.\"\"\"\n sql = '''TRUNCATE TABLE r.Prime'''\n curs.execute(sql)\n curs.commit()\n\n\ndef insertPrimes(curs, pt: PrimeTable, p0: int, p1: int) -> int:\n \"\"\"Insert primes between p0 and p1 into the staging table r.Prime.\"\"\"\n # Extend primes up to p1 if necessary\n pt.buildPrimesUpTo(p1)\n # Prime index for start and end\n i0: int = pt.primeIndex(p0)\n i1: int = pt.primeIndex(p1)\n # Sql string to insert one prime\n sqlInsert = '''\n INSERT INTO r.Prime (p) VALUES (?)\n '''\n # Iterate over primes index i0 to i1\n for i in range(i0, i1):\n p: int = pt.nthPrime(i+1)\n curs.execute(sqlInsert, p)\n # Commit\n curs.commit()\n # Return last prime inserted\n return p\n\n\ndef insertPrimesByIndex(curs, pt: PrimeTable, i0: int, i1: int) -> int:\n \"\"\"Insert primes indexed i0 to i1 into the staging table r.Prime.\"\"\"\n # Prime index for start and end\n # i0: int = pt.primeIndex(p0)\n # i1: int = pt.primeIndex(p1)\n # Sql string to insert one prime\n sqlInsert = '''\n INSERT INTO r.Prime (p) VALUES (?)\n '''\n # Iterate over primes index i0 to i1\n for i in range(i0, i1):\n p: int = pt.nthPrime(i+1)\n curs.execute(sqlInsert, p)\n # Commit\n curs.commit()\n # Return last prime inserted\n return p\n\n\ndef importPrimes(curs, pMax) -> None:\n \"\"\"Import primes from the staging table r.Prime to dbo.Prime.\"\"\"\n # Sql string to insert one prime\n sqlImport = '''\n EXEC ImportPrimes @pMax=?;\n '''\n curs.execute(sqlImport, pMax)\n # Commit\n curs.commit()\n\n\ndef main():\n # Unpack arguments\n argc = len(argv)-1\n if argc != 2:\n print('Usage: python Database_InsertPrimes.py pMin pMax.')\n print('This will insert all primes from and including pMin, up to but not including pMax.')\n return\n pMin: int = int(argv[1])\n pMax: int = int(argv[2])\n # Get DB connection\n conn = getConnection()\n # Cursor\n curs = conn.cursor()\n # Status update\n tc: Timer = Timer()\n # Instantiate PrimeTable\n pt = PrimeTable(pMin)\n msg: str = f'Prime table loaded up to {pMin}.'\n tc.status(msg)\n\n # Insert primes one page at a time\n pageSize: int = 10**6\n kMin: int = pMin // pageSize\n kMax: int = pMax // pageSize\n for pageNum in range(kMin, kMax):\n # Start and end of the page to be inserted\n p0: int = pageNum * pageSize\n p1: int = (pageNum+1) * pageSize\n # Insert these primes into r.Prime\n p: int = insertPrimes(curs, pt, p0, p1)\n # Import them into dbo.Prime with prime index\n importPrimes(curs, p1)\n # Status update\n n: int = (pageNum+1)*pageSize\n primeCount: int = len(pt.prime)\n msg = f'Completed search to n={n}. Have {primeCount} primes through p={p}.'\n tc.status(msg, pageNum-kMin, kMax-kMin)\n # Close cursor\n curs.close()\n # Close connection\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Database_InsertPrimes.py","file_name":"Database_InsertPrimes.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"367289909","text":"import pandas as pd\nimport gc\nimport catboost\n\ndef subsample(data, fraction):\n data_label_1 = data.loc[data.is_click == 1].sample(frac=fraction)\n data_label_0 = data.loc[data.is_click == 0].sample(frac=fraction)\n data = pd.concat([data_label_0, data_label_1])\n data = data.sample(frac=1)\n return data\n\n\ndef train_test_split(data):\n train = data.loc[(data.refresh_day >= 3) & (data.refresh_day <= 10)]\n train = subsample(train, 0.1)\n\n valid = data.loc[data.refresh_day == 11]\n valid = subsample(valid, 0.1)\n\n test = data.loc[data.refresh_day == 12]\n test = subsample(test, 0.1)\n return train, valid, test\n\n\ndef main():\n data1 = pd.read_csv('../data/raw_data.csv', index_col=False)\n data2 = pd.read_csv('../data/raw_data2.csv', index_col=False)\n data3 = pd.read_csv('../data/raw_data3.csv', index_col=False)\n data = pd.concat([data1, data2, data3])\n del data1, data2, data3\n gc.collect()\n data['category'].fillna('other', inplace=True)\n train, valid, test = train_test_split(data)\n del data\n gc.collect()\n train.to_csv('../data/train_raw.csv', index=False)\n valid.to_csv('../data/valid_raw.csv', index=False)\n test.to_csv('../data/test_raw.csv', index=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"feature/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"79771651","text":"from setuptools import find_packages, setup\n\nwith open('README.md') as file:\n long_description = file.read()\n\nsetup(\n name=\"le-utils\",\n packages = find_packages(),\n version=\"0.0.9c24\",\n description=\"LE Utils and constants shared across Kolibri, Ricecooker and the Content Curation Server.\",\n long_description=long_description,\n license=\"MIT\",\n url=\"https://github.com/learningequality/le-utils\",\n download_url=\"https://github.com/learningequality/le-utils/tarball/0.0.9c14\",\n keywords=\"le-utils le_utils le utils kolibri ricecooker content curation\",\n package_data={\"le_utils\": [\"resources/*.json\"], },\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Programming Language :: Python\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Utilities\",\n ],\n author=\"Jordan Yoshihara\",\n author_email=\"jordan@learningequality.org\", )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"518035206","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Ana\")\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n##-------------------- Communicate with the DB -----------------------\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.GlobalTag.globaltag = 'GR_R_42_V19::All'\n#process.load(\"CondCore.DBCommon.CondDBCommon_cfi\")\n#from CondCore.DBCommon.CondDBSetup_cfi import *\n#process.jec = cms.ESSource(\"PoolDBESSource\",\n# DBParameters = cms.PSet(\n# messageLevel = cms.untracked.int32(0)\n# ),\n# timetype = cms.string('runnumber'),\n# toGet = cms.VPSet(\n# cms.PSet(\n# record = cms.string('JetCorrectionsRecord'),\n# tag = cms.string('JetCorrectorParametersCollection_Jec11V2_AK5PF'),\n# label = cms.untracked.string('AK5PF')\n# ),\n# cms.PSet(\n# record = cms.string('JetCorrectionsRecord'),\n# tag = cms.string('JetCorrectorParametersCollection_Jec11V2_AK7PF'),\n# label = cms.untracked.string('AK7PF')\n# ),\n# cms.PSet(\n# record = cms.string('JetCorrectionsRecord'),\n# tag = cms.string('JetCorrectorParametersCollection_Jec11V2_AK5Calo'),\n# label = cms.untracked.string('AK5Calo')\n# ),\n# cms.PSet(\n# record = cms.string('JetCorrectionsRecord'),\n# tag = cms.string('JetCorrectorParametersCollection_Jec11V2_AK7Calo'),\n# label = cms.untracked.string('AK7Calo')\n# )\n# ), \n# connect = cms.string('sqlite:Jec11V2.db')\n#)\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.Geometry_cff')\nprocess.load('RecoJets.Configuration.RecoPFJets_cff')\nprocess.load('RecoJets.Configuration.RecoJets_cff')\n##-------------------- Import the JEC services -----------------------\nprocess.load('JetMETCorrections.Configuration.DefaultJEC_cff')\n#process.es_prefer_jec = cms.ESPrefer('PoolDBESSource','jec')\n############# Set the number of events #############\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(100)\n)\n############# Format MessageLogger #################\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n############# Define the source file ###############\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n '/store/data/Run2011A/Jet/AOD/PromptReco-v6/000/173/244/E2AB1AE4-50C7-E011-937E-003048F11CF0.root'\n )\n)\n############# processed tree producer ##################\nprocess.TFileService = cms.Service(\"TFileService\",fileName = cms.string('ProcessedTree_data.root'))\n\nprocess.ak7 = cms.EDAnalyzer('ProcessedTreeProducer',\n ## jet collections ###########################\n pfjets = cms.InputTag('ak7PFJets'),\n calojets = cms.InputTag('ak7CaloJets'),\n ## database entry for the uncertainties ######\n PFPayloadName = cms.string('AK7PF'),\n CaloPayloadName = cms.string('AK7Calo'),\n ## calojet ID and extender for the JTA #######\n calojetID = cms.InputTag('ak7JetID'),\n calojetExtender = cms.InputTag('ak7JetExtender'),\n ## set the conditions for good Vtx counting ##\n offlineVertices = cms.InputTag('offlinePrimaryVertices'),\n goodVtxNdof = cms.double(4), \n goodVtxZ = cms.double(24),\n ## rho #######################################\n srcCaloRho = cms.InputTag('kt6CaloJets','rho'),\n srcPFRho = cms.InputTag('kt6PFJets','rho'),\n ## preselection cuts #########################\n maxY = cms.double(5.0), \n minPFPt = cms.double(20),\n minPFFatPt = cms.double(10),\n maxPFFatEta = cms.double(2.5),\n minCaloPt = cms.double(20),\n minNPFJets = cms.int32(1),\n minNCaloJets = cms.int32(1), \n minJJMass = cms.double(-1),\n ## trigger ###################################\n printTriggerMenu = cms.untracked.bool(True),\n processName = cms.string('HLT'),\n triggerName = cms.vstring('HLT_Jet30_v1','HLT_Jet30_v2','HLT_Jet30_v3','HLT_Jet30_v4','HLT_Jet30_v5','HLT_Jet30_v6',\n 'HLT_Jet60_v1','HLT_Jet60_v2','HLT_Jet60_v3','HLT_Jet60_v4','HLT_Jet60_v5','HLT_Jet60_v6',\n 'HLT_Jet80_v1','HLT_Jet80_v2','HLT_Jet80_v3','HLT_Jet80_v4','HLT_Jet80_v5','HLT_Jet80_v6',\n 'HLT_Jet110_v1','HLT_Jet110_v2','HLT_Jet110_v3','HLT_Jet110_v4','HLT_Jet110_v5','HLT_Jet110_v6',\n 'HLT_Jet150_v1','HLT_Jet150_v2','HLT_Jet150_v3','HLT_Jet150_v4','HLT_Jet150_v5','HLT_Jet150_v6',\n 'HLT_Jet190_v1','HLT_Jet190_v2','HLT_Jet190_v3','HLT_Jet190_v4','HLT_Jet190_v5','HLT_Jet190_v6',\n 'HLT_Jet240_v1','HLT_Jet240_v2','HLT_Jet240_v3','HLT_Jet240_v4','HLT_Jet240_v5','HLT_Jet240_v6',\n 'HLT_Jet300_v1', 'HLT_Jet300_v2','HLT_Jet300_v3','HLT_Jet300_v4','HLT_Jet300_v5',\n 'HLT_Jet370_v1','HLT_Jet370_v2','HLT_Jet370_v3','HLT_Jet370_v4','HLT_Jet370_v5','HLT_Jet370_v6'),\n triggerResults = cms.InputTag(\"TriggerResults\",\"\",\"HLT\"),\n triggerEvent = cms.InputTag(\"hltTriggerSummaryAOD\",\"\",\"HLT\"),\n ## jec services ##############################\n pfjecService = cms.string('ak7PFL1FastL2L3Residual'),\n calojecService = cms.string('ak7CaloL1L2L3Residual')\n)\n\nprocess.ak5 = process.ak7.clone(\n pfjets = 'ak5PFJets',\n calojets = 'ak5CaloJets',\n PFPayloadName = 'AK5PF',\n CaloPayloadName = 'AK5Calo',\n calojetID = 'ak5JetID',\n calojetExtender = 'ak5JetExtender',\n pfjecService = 'ak5PFL1FastL2L3Residual',\n calojecService = 'ak5CaloL1L2L3Residual',\n printTriggerMenu = False \n)\n############# turn-on the fastjet area calculation needed for the L1Fastjet ##############\n############# applied only to PFJets because if CaloJets are re-recoed the JetID map will be lost #####\nprocess.kt6PFJets.doRhoFastjet = True\nprocess.kt6PFJets.Rho_EtaMax = cms.double(5.0)\nprocess.kt6CaloJets.doRhoFastjet = True\nprocess.kt6CaloJets.Rho_EtaMax = cms.double(5.0)\nprocess.ak7PFJets.doAreaFastjet = True\nprocess.ak7PFJets.Rho_EtaMax = cms.double(5.0)\nprocess.ak7PFJets.jetPtMin = cms.double(5.0)\nprocess.ak5PFJets.doAreaFastjet = True\nprocess.ak5PFJets.Rho_EtaMax = cms.double(5.0)\nprocess.ak5PFJets.jetPtMin = cms.double(5.0)\n\nprocess.path = cms.Path(process.kt6PFJets * process.kt6CaloJets * process.ak5PFJets * process.ak7PFJets * process.ak5 * process.ak7)\n\n\n","sub_path":"KKousour/QCDAnalysis/test/ProcessedTreeProducer_data_cfg.py","file_name":"ProcessedTreeProducer_data_cfg.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"429173476","text":"import cv2\nimport numpy as np\nimport PIL.Image as Image\n\nfrom carla.image_converter import labels_to_cityscapes_palette\n\nfrom erdos.op import Op\nfrom erdos.utils import setup_logging\n\n\nclass SegmentedVideoOperator(Op):\n def __init__(self, name, log_file_name=None):\n super(SegmentedVideoOperator, self).__init__(name)\n self._logger = setup_logging(self.name, log_file_name)\n\n @staticmethod\n def setup_streams(input_streams, filter_name):\n input_streams.filter_name(filter_name)\\\n .add_callback(SegmentedVideoOperator.display_frame)\n return []\n\n def display_frame(self, msg):\n frame_array = labels_to_cityscapes_palette(msg.data)\n img = Image.fromarray(np.uint8(frame_array)).convert('RGB')\n open_cv_image = np.array(img)\n cv2.imshow(self.name, open_cv_image)\n cv2.waitKey(1)\n\n def execute(self):\n self.spin()\n","sub_path":"examples/pylot/segmented_video_operator.py","file_name":"segmented_video_operator.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"474853340","text":"'''\n1676번 팩토리얼 0의 개수\n문제:\nN!에서 뒤에서부터 처음 0이 아닌 숫자가 나올 때까지 0의 개수를 구하는 프로그램을 작성하시오.\n입력:\n첫째 줄에 N이 주어진다. (0 ≤ N ≤ 500)\n출력:\n첫째 줄에 구한 0의 개수를 출력한다.\n'''\n\nN = int(input())\nvalue = 1\nfor i in range(N, 0, -1):\n value *= i\n\nvalue = str(value)\ncount = 0;\nfor i in range(len(value)-1, -1, -1):\n if value[i] == '0':\n count += 1\n else:\n break\n\nprint(count)","sub_path":"백준/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"270684312","text":"# coding: utf-8\nimport zerorpc\nimport logging\nimport sys\nimport lenovotf.util.deployHelper as dhelper\nimport os\nimport re\nimport lenovotf.util.confReader as reader\n\nlogging.basicConfig(level=logging.INFO)\n\nserver = None\ngendpoint = None\nWEB_SERVER_CODE_DIR = \"/data/tfcodes/upload/\"\n\n\nclass CentralRPCServer(object):\n \"\"\"the rpc server run on central\"\"\"\n\n def __init__(self, endpoint):\n super(CentralRPCServer, self).__init__()\n self.endpoint = endpoint\n\n def do_train(self, app_id, train_params):\n '''start distribute training, include:\n 1. start training on every worker/ps/chief nodes\n '''\n logging.info(\"ready to start training for app: %s\" % app_id)\n # TODO 根据参数启动各节点训练任务,启动前需要对sy.path进行设置,将打包的依赖添加到应用的path中\n\n def do_dis_code(self, app_id, cluster):\n '''deprecated,根据appid 和集群信息分发代码'''\n # TODO 对源码进行二次打包\n logging.info(\"start to distribute codes to nodes...\")\n source_dir = '/data/tfcode/%s' % app_id\n target_dir = '/data/tfcode/%s' % app_id\n cmd = \"fab -f ../../util/deployHelper.py deploy_code:%s,%s,%s\" % (app_id, source_dir, cluster)\n os.system(cmd)\n # dhelper.deploy_code(source_dir=source_dir, cluster=cluster, app_id=app_id)\n logging.info(\"end to distribute codes to nodes %s...\" % cluster)\n\n def do_disCode(self, filename, cluster):\n '''根据源码文件和集群信息分发代码'''\n # 对源码进行二次打包,这里文件名是用户环境源码文件完整路径,需要进行处理\n filename = re.sub(\"\\\\\\\\\", \"/\", filename)\n # XXX.zip\n filename = re.split(\"/\", filename)[-1]\n logging.info(\"repackaging codes for file %s....\" % filename)\n fullname = reader.get(\"web\", \"upload_store_path\") + filename\n pkcmd = \"python ./repackage.py %s\" % fullname\n os.system(pkcmd)\n\n # {\n # 'cluster': {'chief': ['192.168.11.39:2222'],\n # 'ps': ['192.168.11.39:2223', '192.168.11.39:2224'],\n # 'worker': ['192.168.11.39:2224', '192.168.11.39:2225']},\n # 'hosts': [{'host': '192.168.11.39:2222', 'task': {'type': 'chief', 'index': 0}},\n # {'host': '192.168.11.39:2224', 'task': {'type': 'worker', 'index': 0}},\n # {'host': '192.168.11.39:2225', 'task': {'type': 'worker', 'index': 1}}]\n # }\n hosts = cluster['hosts']\n host_list = []\n for host in hosts:\n host_list.append(re.split(\":\", host['host'])[0])\n # self.upload_code(file, cluster)\n logging.info(\"start to distribute source code:%s to nodes%s\" % (fullname, host_list))\n re_packed_name = re.split(\"\\.\", fullname)[0] + \"_with_dependences.zip\"\n cmd = \"fab -f ./deployHelper.py deploy_code:%s,%s\" % (re_packed_name, cluster)\n os.system(cmd)\n # dhelper.deploy_code(source_dir=source_dir, cluster=cluster, app_id=app_id)\n logging.info(\"end to distribute codes %s to nodes %s...\" % (re_packed_name, host_list))\n\n\ndef deploy_model(self, app_id):\n logging.info(\"ready to start deploying model for app: %s\" % app_id)\n # TODO 模型上线\n\n\ndef start(endpoint):\n global gendpoint\n global server\n gendpoint = endpoint\n logging.info('starting WorkerRPCServer on ' + endpoint)\n server = zerorpc.Server(CentralRPCServer(endpoint))\n server.bind(endpoint)\n # self.server.bind('tcp://0.0.0.0:4243')\n server.run()\n\n\ndef stop(self):\n logging('stopping WorkerRPCServer on %s' % self.endpoint)\n server.close()\n global gendpoint\n gendpoint = None\n\n\nif __name__ == '__main__':\n endpoint = 'tcp://0.0.0.0:14243'\n if len(sys.argv) > 1:\n # print(sys.argv[1])\n start(sys.argv[1])\n else:\n start(endpoint)\n","sub_path":"lenovotf/rpc/central/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"384833138","text":"from math import sin, cos, pi\nfrom random import random\n\nimport pyglet\n\nimport stars\n\nclass TestGalaxy(object):\n def __init__(self, galaxy_map):\n self.galaxy = galaxy_map\n self.batch = self.galaxy.batch\n self.points = []\n with open('points.txt', 'r') as f:\n for line in f:\n self.points.append(float(line[:-1]))\n self.stars = [stars.Star(self) for x in range(len(self.points))]\n self.x, self.y = 300, 300\n \n def disperse_stars(self):\n for s in self.stars:\n ind = self.stars.index(s)\n s.x = ind\n s.y = self.points[ind] * 10\n","sub_path":"game/modules/naive_galaxy.py","file_name":"naive_galaxy.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"180047437","text":"from django.contrib import admin\nfrom companys.models import Companys, Job, Welfare\n\n\n# Register your models here.\n\n\nclass CompanysAdmin(admin.ModelAdmin):\n list_display = ('name', 'type', 'logoImg', 'addr', 'introduce', 'personnel', 'company_start_time')\n\n search_fields = ('name', 'type')\n\n list_per_page = 50\n\n\nclass JobAdmin(admin.ModelAdmin):\n list_display = ('job', 'company', 'min_salary', 'max_salary', 'describe', 'working_years', 'education', 'recruitment')\n\n search_fields = ('job', 'education')\n\n list_per_page = 50\n\n\nclass WelfareAdmin(admin.ModelAdmin):\n list_display = ('name', 'is_delete')\n\n search_fields = ('name',)\n\n list_per_page = 50\n\n\nadmin.site.register(Companys, CompanysAdmin)\nadmin.site.register(Job, JobAdmin)\nadmin.site.register(Welfare, WelfareAdmin)\n\nadmin.site.site_header = '仿无忧管理系统'\nadmin.site.site_title = '登录系统后台'\nadmin.site.index_title = '后台管理'\n","sub_path":"apps/companys/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"359326464","text":"import argparse\nimport tensorflow as tf\nfrom config import *\nfrom os.path import join, exists\nfrom os import makedirs\nimport pickle\nimport math\n\nfrom sklearn.model_selection import train_test_split\n\nFLAGS = None\n\ndef standardize(X):\n return (X - X.mean())/ X.std()\n\n\ndef load_data():\n \"\"\"\n load data from pickle\n :return:\n \"\"\"\n with open(join(FLAGS.source_data), 'rb') as f:\n data_x = pickle.load(f)\n data_y = pickle.load(f)\n\n return standardize(data_x), data_y\n\n\ndef train(loss, global_step):\n MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.\n\n opt = tf.train.RMSPropOptimizer(0.0001)\n grads = opt.compute_gradients(loss)\n\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n return train_op\n\ndef get_data(data_x, data_y):\n \"\"\"\n split data from loaded data\n :param data_x:\n :param data_y:\n :return: Arrays\n \"\"\"\n print('Data X Length', len(data_x), 'Data Y Length', len(data_y))\n print('Data X Example', data_x[0])\n print('Data Y Example', data_y[0])\n \n train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.4, random_state=40)\n dev_x, test_x, dev_y, test_y, = train_test_split(test_x, test_y, test_size=0.5, random_state=40)\n \n print('Train X Shape', train_x.shape, 'Train Y Shape', train_y.shape)\n print('Dev X Shape', dev_x.shape, 'Dev Y Shape', dev_y.shape)\n print('Test Y Shape', test_x.shape, 'Test Y Shape', test_y.shape)\n return train_x, train_y, dev_x, dev_y, test_x, test_y\n\n\ndef main():\n data_x, data_y = load_data()\n n_classes = data_y.shape[-1]\n train_x, train_y, dev_x, dev_y, test_x, test_y = get_data(data_x, data_y)\n train_steps = math.ceil(train_x.shape[0] / FLAGS.train_batch_size)\n dev_steps = math.ceil(dev_x.shape[0] / FLAGS.dev_batch_size)\n test_steps = math.ceil(test_x.shape[0] / FLAGS.test_batch_size)\n \n global_step = tf.Variable(-1, trainable=False, name='global_step')\n \n # train and dev dataset\n train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)).shuffle(10000)\n train_dataset = train_dataset.batch(FLAGS.train_batch_size)\n \n dev_dataset = tf.data.Dataset.from_tensor_slices((dev_x, dev_y))\n dev_dataset = dev_dataset.batch(FLAGS.dev_batch_size)\n \n test_dataset = tf.data.Dataset.from_tensor_slices((test_x, test_y))\n test_dataset = test_dataset.batch(FLAGS.test_batch_size)\n \n print(train_dataset.output_types, test_dataset.output_shapes)\n \n # a reinitializable iterator\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n \n train_initializer = iterator.make_initializer(train_dataset)\n dev_initializer = iterator.make_initializer(dev_dataset)\n test_initializer = iterator.make_initializer(test_dataset)\n \n # input Layer\n with tf.variable_scope('inputs'):\n # x.shape = [-1, 60, 160, 3]\n x, y_label = iterator.get_next()\n print(x.shape)\n \n keep_prob = tf.placeholder(tf.float32, [])\n # layer1\n\n def conv_layer(x, layer_num, kernel_num, kernel_size):\n for _ in range(layer_num):\n x = tf.layers.conv2d(x, kernel_num, kernel_size=kernel_size, padding=\"same\",\n activation=tf.nn.relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n x = tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding=\"same\")\n return x\n\n x = conv_layer(x, 3, 32, 3)\n x = conv_layer(x, 3, 64, 3)\n x = conv_layer(x, 3, 128, 3)\n\n x = tf.layers.flatten(x)\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu)\n\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu)\n # x = tf.layers.dropout(x, rate=keep_prob)\n x = tf.layers.dense(x, n_classes)\n\n # loss\n cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=x, labels=y_label))\n max_index_predict = tf.argmax(tf.reshape(x, [-1, CAPTCHA_LENGTH, VOCAB_LENGTH]), axis=2)\n print('Max Index Predict', max_index_predict)\n max_index_label = tf.argmax(tf.reshape(y_label, [-1, CAPTCHA_LENGTH, VOCAB_LENGTH]), axis=2)\n print('Max Index Label', max_index_label)\n\n correct_predict = tf.equal(max_index_predict, max_index_label)\n print('Correct predict', correct_predict)\n accuracy = tf.reduce_mean(tf.reshape(tf.cast(correct_predict, tf.float32), [-1]))\n \n # Train\n train_op = tf.train.RMSPropOptimizer(0.0001).minimize(cross_entropy, global_step=global_step)\n # train_op = train(cross_entropy, global_step)\n \n # Saver\n saver = tf.train.Saver()\n \n # Iterator\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n \n # Global step\n gstep = 0\n \n if FLAGS.train:\n\n for epoch in range(FLAGS.epoch_num):\n tf.train.global_step(sess, global_step_tensor=global_step)\n # Train\n sess.run(train_initializer)\n for step in range(int(train_steps)):\n loss, acc, gstep, _ = sess.run([cross_entropy, accuracy, global_step, train_op],\n feed_dict={keep_prob: FLAGS.keep_prob})\n # Print log\n if step % FLAGS.steps_per_print == 0:\n print('Global Step', gstep, 'Step', step, 'Train Loss', loss, 'Accuracy', acc)\n \n if epoch % FLAGS.epochs_per_dev == 0:\n # Dev\n sess.run(dev_initializer)\n for step in range(int(dev_steps)):\n if step % FLAGS.steps_per_print == 0:\n print('Dev Accuracy', sess.run(accuracy, feed_dict={keep_prob: 1}), 'Step', step)\n \n # Save model\n if epoch % FLAGS.epochs_per_save == 0:\n saver.save(sess, FLAGS.checkpoint_dir, global_step=gstep)\n \n else:\n # Load model\n ckpt = tf.train.get_checkpoint_state('ckpt')\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('Restore from', ckpt.model_checkpoint_path)\n sess.run(test_initializer)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Captcha')\n parser.add_argument('--train_batch_size', help='train batch size', default=128)\n parser.add_argument('--dev_batch_size', help='dev batch size', default=256)\n parser.add_argument('--test_batch_size', help='test batch size', default=256)\n parser.add_argument('--source_data', help='source size', default='./data/data.pkl')\n parser.add_argument('--num_layer', help='num of layer', default=2, type=int)\n parser.add_argument('--num_units', help='num of units', default=64, type=int)\n parser.add_argument('--time_step', help='time steps', default=32, type=int)\n parser.add_argument('--embedding_size', help='time steps', default=64, type=int)\n parser.add_argument('--category_num', help='category num', default=5, type=int)\n parser.add_argument('--learning_rate', help='learning rate', default=0.001, type=float)\n parser.add_argument('--epoch_num', help='num of epoch', default=10000, type=int)\n parser.add_argument('--epochs_per_test', help='epochs per test', default=100, type=int)\n parser.add_argument('--epochs_per_dev', help='epochs per dev', default=2, type=int)\n parser.add_argument('--epochs_per_save', help='epochs per save', default=2, type=int)\n parser.add_argument('--steps_per_print', help='steps per print', default=2, type=int)\n parser.add_argument('--steps_per_summary', help='steps per summary', default=100, type=int)\n parser.add_argument('--keep_prob', help='train keep prob dropout', default=0.5, type=float)\n parser.add_argument('--checkpoint_dir', help='checkpoint dir', default='ckpt/model.ckpt', type=str)\n parser.add_argument('--summaries_dir', help='summaries dir', default='summaries/', type=str)\n parser.add_argument('--train', help='train', default=True, type=bool)\n \n FLAGS, args = parser.parse_known_args()\n \n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"531375608","text":"def mq_to_arc(number_of_days=30):\n from app.utils import get_stations\n from microquake.core import UTCDateTime\n from microquake.IMS import web_api\n import pandas as pd\n site = get_stations()\n site.stations()[0]\n site.stations()[0].loc\n\n endtime = UTCDateTime.now()\n starttime = endtime - number_of_days * 24 * 3600\n\n base_url = 'http://10.95.74.35:8002/ims-database-server/databases/mgl'\n\n cat = web_api.get_catalogue(base_url, starttime, endtime, site, event=True, blast=False)\n events_xyz = []\n events_mag = []\n events_time =[]\n for event in cat:\n events_xyz.append(event.preferred_origin().loc)\n events_mag.append(event.preferred_magnitude().mag)\n events_time.append(event.preferred_origin().time)\n\n df_xyz = pd.DataFrame(events_xyz)\n df_mag = pd.DataFrame(events_mag)\n df_time =pd.DataFrame(events_time)\n\n df = pd.merge(df_xyz,df_mag,left_index=True,right_index=True)\n df = pd.merge(df, df_time, left_index=True, right_index=True)\n\n df = df.rename(columns={'0_x': 'x',1:'y',2:'z','0_y':'mag', 0: 'time'})\n df = df[df['mag'] > -100]\n df = df[df['z'] < 300]\n\n pseudo_events = 10 ** (df['mag'] * 3 / 2)\n df_pseudo_events = pseudo_events.to_frame('pseudo_events')\n\n df_tarp_mag = 10**df['mag']\n df_tarp_mag = df_tarp_mag.to_frame('df_tarp_mag')\n\n df = pd.merge(df,df_tarp_mag,left_index=True,right_index=True)\n df = pd.merge(df, df_pseudo_events, left_index=True, right_index=True)\n\n return df\n\ndef eom_report(instrument_paths=['/Underground/MPBX/MDT MPBXs',\n '/Underground/MPBX/MDT SmartCables',\n '/Underground/MPBX/YieldPoint'],\n unit_of_time='month', length_of_data=2):\n '''\n :param instrument_paths: list of instrument paths in canary\n :param unit_of_time: used to filter data eg. day, week, month, year\n :param length_of_data: number of unit_of_time to be return from today\n defaults recalls all data from the previous month\n :return:\n '''\n\n import gsct.canary.tools as tools\n import gsct.montoring.report as report\n unread_i = tools.unread_instrument(instrument_paths=instrument_paths,\n length_of_data=length_of_data,\n unit_of_time='year')\n\n max_d = tools.maxdisp_xyz_latest(instrument_paths=instrument_paths,\n unit_of_time=unit_of_time,\n length_of_data=length_of_data,\n dipfrom=-91)\n\n # sd = report.mq_to_arc()\n\n return unread_i, max_d#, sd","sub_path":"gsct/montoring/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"538364948","text":"# 40. Combination Sum II\n# https://leetcode.com/problems/combination-sum-ii/description/\n\n# Runtime: 48 ms\n# Memory Usage: 13.2 MB\n# runtime beats 92.78 % of python3 submissions.\n# memory usage beats 66.34 % of python3 submissions.\n\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n if not candidates:\n return []\n \n candidates.sort()\n ans = []\n \n def backtrack(comb, idx, target):\n if target == 0:\n ans.append(comb)\n return\n if idx >= len(candidates) or candidates[idx] > target:\n return\n for i in range(idx, len(candidates)):\n if i > idx and candidates[i] == candidates[i-1]:\n continue\n if target >= candidates[i]:\n backtrack(comb + [candidates[i]], i+1, target - candidates[i])\n \n backtrack([], 0, target)\n \n return ans","sub_path":"Algorithm/HW04_Backtracking/leetcode040_Combination_Sum_II.py","file_name":"leetcode040_Combination_Sum_II.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"60595076","text":"# Copyright 2021 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom gpu_tests import path_util\n\nsys.path.insert(0,\n os.path.join(path_util.GetChromiumSrcDir(), 'build', 'fuchsia'))\nfrom common_args import (AddCommonArgs, AddTargetSpecificArgs, ConfigureLogging,\n GetDeploymentTargetForArgs)\nfrom symbolizer import RunSymbolizer\n\n\ndef RunTestOnFuchsiaDevice(script_cmd):\n \"\"\"Preps Fuchsia device with pave and package update, then runs script.\"\"\"\n\n parser = argparse.ArgumentParser()\n AddCommonArgs(parser)\n AddTargetSpecificArgs(parser)\n runner_script_args, test_args = parser.parse_known_args()\n ConfigureLogging(runner_script_args)\n\n # If out_dir is not set, assume the script is being launched\n # from the output directory.\n if not runner_script_args.out_dir:\n runner_script_args.out_dir = os.getcwd()\n\n # Create a temporary log file that Telemetry will look to use to build\n # an artifact when tests fail.\n temp_log_file = False\n if not runner_script_args.system_log_file:\n runner_script_args.system_log_file = os.path.join(tempfile.mkdtemp(),\n 'system-log')\n temp_log_file = True\n\n package_names = ['web_engine_with_webui', 'web_engine_shell']\n web_engine_dir = os.path.join(runner_script_args.out_dir, 'gen', 'fuchsia',\n 'engine')\n\n # Pass all other arguments to the gpu integration tests.\n script_cmd.extend(test_args)\n listener_process = None\n symbolizer_process = None\n try:\n with GetDeploymentTargetForArgs(runner_script_args) as target:\n target.Start()\n fuchsia_device_address, fuchsia_ssh_port = target._GetEndpoint()\n script_cmd.extend(\n ['--chromium-output-directory', runner_script_args.out_dir])\n script_cmd.extend(['--fuchsia-device-address', fuchsia_device_address])\n script_cmd.extend(['--fuchsia-ssh-config', target._GetSshConfigPath()])\n if fuchsia_ssh_port:\n script_cmd.extend(['--fuchsia-ssh-port', str(fuchsia_ssh_port)])\n script_cmd.extend(\n ['--fuchsia-system-log-file', runner_script_args.system_log_file])\n # Add to the script\n if runner_script_args.verbose:\n script_cmd.append('-v')\n\n # Set up logging of WebEngine\n listener_process = target.RunCommandPiped(['log_listener'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n build_ids_paths = map(\n lambda package_name: os.path.join(web_engine_dir, package_name,\n 'ids.txt'), package_names)\n symbolizer_process = RunSymbolizer(\n listener_process.stdout, open(runner_script_args.system_log_file,\n 'w'), build_ids_paths)\n\n # Keep the Amber repository live while the test runs.\n with target.GetAmberRepo():\n # Install necessary packages on the device.\n far_files = map(\n lambda package_name: os.path.join(web_engine_dir, package_name,\n package_name + '.far'),\n package_names)\n target.InstallPackage(far_files)\n return subprocess.call(script_cmd)\n finally:\n if temp_log_file:\n shutil.rmtree(os.path.dirname(runner_script_args.system_log_file))\n if listener_process:\n listener_process.kill()\n if symbolizer_process:\n symbolizer_process.kill()\n","sub_path":"content/test/gpu/fuchsia_util.py","file_name":"fuchsia_util.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"137221155","text":"from time import sleep\nfrom dbmodules.dbconnection import *\n#from multiprocessing import Process\nimport os, signal\nimport sys\nfrom commandFile import *\nimport threading\nimport itertools\n\n\noutput_lock = threading.Lock()\nttl = sys.argv[2]\n\ndbClient = MongoConnection()\n\n\nneighbors = dbClient.getNeighbors()\n\nif sys.argv[1] == \"4\":\n\n pktId = find_peers(output_lock, neighbors, ttl)\n dbClient.insertPeersPktId(pktId)\n\nelse:\n searchStr = sys.argv[3]\n pktId = sys.argv[4]\n info = find_file(output_lock, neighbors, pktId, searchStr, ttl)\n dbClient.insertFilePktId(info)\n\nsleep(5)\n\nif sys.argv[2] == \"3\":\n dbClient.finishSearchFile(sys.argv[1])\nelse:\n dbClient.finishSearchPeers(sys.argv[1])\n","sub_path":"helpermodules/sleepProcess.py","file_name":"sleepProcess.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"643179832","text":"# syft absolute\nimport syft as sy\nfrom syft.lib.python.complex import Complex\n\n\ndef test_serde() -> None:\n syft_complex = Complex(\"2+3j\")\n\n serialized = sy.serialize(syft_complex)\n\n deserialized = sy.deserialize(serialized)\n\n assert isinstance(deserialized, Complex)\n assert deserialized == syft_complex\n\n\ndef test_send(client: sy.VirtualMachineClient) -> None:\n syft_complex = Complex(\"2+3j\")\n ptr = syft_complex.send(client)\n # Check pointer type\n assert ptr.__class__.__name__ == \"ComplexPointer\"\n\n # Check that we can get back the object\n res = ptr.get()\n assert res == syft_complex\n\n\ndef test_complex_bytes() -> None:\n # Testing if multiple serialization of the similar object results in same bytes\n value_1 = Complex(5, 3)\n value_2 = Complex(5, 3)\n assert sy.serialize(value_1, to_bytes=True) == sy.serialize(value_2, to_bytes=True)\n","sub_path":"packages/syft/tests/syft/lib/python/complex/complex_serde_test.py","file_name":"complex_serde_test.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"159227304","text":"# polynomial regression\n# multiple varaiables\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n# parameteres\n# theta0, theta1,theta2\ntheta0 = 300\ntheta1 = 1\ntheta2 = 0.1\ntheta3 = 0.01\n\ndef gradient_descent(x:list, y:list):\n cost_functions = []\n x1 = x\n x2 = []\n x3 = []\n # the number of training set\n m = len(y)\n\n # learning rate alpha\n alpha = 0.0004\n\n # caculate\n for i in range(0, m):\n x2.append(x[i] * x[i])\n x3.append(x[i] * x[i] * x[i])\n\n global theta0\n global theta1 \n global theta2\n global theta3\n \n # gredient descent loop\n k = 1\n while k< 100001:\n total = 0\n for i in range(0, m):\n total = total + pow((theta0 + theta1 * x1[i] + theta2 * x2[i] + theta3 * x3[i] - y[i]), 2)\n cost_function = total / (2 * m)\n cost_functions.append(cost_function)\n print(f\"time:{k},cost:{cost_function}\")\n\n total_theta0 = 0\n for i in range(0, m):\n total_theta0 = total_theta0 + (theta0 + theta1 * x1[i] + theta2 * x2[i] + theta3 * x3[i] - y[i])\n\n total_theta1 = 0\n for i in range(0, m):\n total_theta1 = total_theta1 + (theta0 + theta1 * x1[i] + theta2 * x2[i] + theta3 * x3[i] - y[i]) *x1[i]\n\n total_theta2 = 0\n for i in range(0, m):\n total_theta2 = total_theta2 + (theta0 + theta1 * x1[i] + theta2 * x2[i] + theta3 * x3[i] - y[i]) * x2[i]\n \n total_theta3 = 0\n for i in range(0, m):\n total_theta3 = total_theta3 + (theta0 + theta1 * x1[i] + theta2 * x2[i] + theta3 * x3[i] - y[i]) * x3[i]\n\n temp0 = theta0 - alpha * total_theta0 / m\n temp1 = theta1 - alpha * total_theta1 / m\n temp2 = theta2 - alpha * total_theta2 / m\n temp3 = theta3 - alpha * total_theta3 / m\n theta0 = temp0\n theta1 = temp1\n theta2 = temp2\n theta3 = temp3\n\n k = k + 1\n return cost_functions\n\ndef plot_cost_function(cost_functions:list):\n x = np.arange(1,100001)\n y = np.array(cost_functions)\n plt.title(\"Gradient Descent\") \n plt.xlabel(\"times\") \n plt.ylabel(\"cost function\") \n plt.plot(x, y) \n plt.show()\n\ndef plot_figure(x:list, y:list):\n # TODO\n pass\n\nif __name__ == \"__main__\":\n with open(\"data/distances.txt\", 'r') as f:\n distances = f.read().split(\"\\n\")\n with open(\"data/prices.txt\", 'r') as f:\n prices = f.read().split(\"\\n\")\n \n m = len(prices)\n\n for i in range(0, m):\n distances[i] = float(distances[i])\n for i in range(0, m):\n prices[i] = float(prices[i])\n\n # mean normalization\n temp = []\n for i in range(0, m):\n mean = sum(distances) / m\n temp.append(round((distances[i] - mean) / (max(distances) - min(distances)), 2))\n normolization_distances = temp\n # result\n cost_function = gradient_descent(x=normolization_distances, y=prices)\n plot_cost_function(cost_function)\n plot_figure(x1=distances, y=prices)\n ","sub_path":"week2/algorithm3.py","file_name":"algorithm3.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"277609992","text":"import os\nimport xmlrunner\nimport unittest\nimport requests\nfrom selenium import webdriver\nfrom pages.main_page import MainPage\n\n\ndata = {}\n\n\n\nclass GoogleTest1(unittest.TestCase):\n def setUp(self):\n if os.name == 'nt':\n self.driver = webdriver.Chrome(executable_path='./chrome_driver/chromedriver.exe')\n else:\n self.driver = webdriver.Chrome(executable_path='./chrome_driver/chromedriver')\n self.driver.implicitly_wait(10)\n\n self.driver.get('http://google.com.ua')\n\n self.main_page = MainPage(self.driver)\n self.result = self.main_page.search('tanks')\n\n def test_search_results_count(self):\n count = self.result.search_results_count\n data['search_results_count'] = count\n self.assertGreaterEqual(12, count) # or self.assertTrue(12 >= count)\n\n def test_is_domain_in_results(self):\n result_domains = self.result.search_results_domain_list\n self.assertTrue('worldoftanks.ru' in result_domains)\n\n def tearDown(self):\n data['last_url'] = self.driver.current_url\n self.driver.quit()\n\n\nclass GoogleTest2(unittest.TestCase):\n def setUp(self):\n HEADERS = {\n 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3',\n 'User-Agent': 'googlebot',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Referer': ''\n }\n self.s = requests.Session()\n self.s.headers.update(HEADERS)\n\n def test_url_available(self):\n try:\n self.s.head(data['last_url'])\n except Exception as e:\n assert False, 'Network connection error'\n\n def test_is_domain_in_html(self):\n try:\n r = self.s.get(data['last_url'])\n except Exception as e:\n assert False, 'Network connection error'\n\n self.assertIn('worldoftanks.ru', r.text)\n\n def test_search_results_count(self):\n try:\n r = self.s.get(data['last_url'])\n except Exception as e:\n assert False, 'Network connection error'\n # results_count = len(findall(r'h3 class=\"r\"', r.text)) # using regexp\n results_count = r.text.count('h3 class=\"r\"')\n self.assertEqual(results_count, data['search_results_count'])\n\n\n@unittest.skip\nclass GoogleMailTest1(unittest.TestCase):\n def setUp(self):\n if os.name == 'nt':\n self.driver = webdriver.Chrome(executable_path='./chrome_driver/chromedriver.exe')\n else:\n self.driver = webdriver.Chrome(executable_path='./chrome_driver/chromedriver')\n self.driver.implicitly_wait(10)\n\n self.driver.get('http://google.com.ua')\n self.main_page = MainPage(self.driver)\n\n def test_login_get_1st_email(self):\n email_page = self.main_page.open_email()\n if not email_page.is_signed_in():\n email_page.login_with('ex3me0@gmail.com', 'crashc0derf0rmat')\n print(email_page.get_first_email().text.strip())\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == '__main__':\n runner = xmlrunner.XMLTestRunner(output='reports')\n unittest.main(testRunner=runner, warnings='ignore')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"557292206","text":"from django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.contenttypes.models import ContentType\nfrom comments.models import Comment\nfrom .models import Post\nfrom .forms import PostForm\nfrom comments.forms import CommentForm\nfrom django.views.generic import DetailView, ListView\n\n\n\n\n@login_required(login_url=\"/login\")\ndef post_create(request): # Create post (PUT)\n if not request.user.is_authenticated:\n raise Http404\n form = PostForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n instance=form.save(commit=False)\n instance.user = request.user\n instance.save()\n form.save_m2m()\n print(form.cleaned_data)\n # Will put message saying success\n messages.success(request, \"Successfully created\")\n return HttpResponseRedirect(instance.get_absolute_url())\n # else:\n # messages.error(request, \"Not successfully created\")\n tags = Post.tag.all()\n lenTags = int(len(tags)/2)\n tags_1st = tags[:lenTags]\n tags_2nd = tags[lenTags:]\n context = {\n 'title': 'Ask A Question',\n \"form\": form,\n 'tags_1st': tags_1st,\n 'tags_2nd': tags_2nd\n }\n return render(request, \"posts/post_form.html\", context)\n\n\n\ndef post_detail(request, id): # Retrive post (GET)\n instance=get_object_or_404(Post, id=id)\n\n initial_data = {\n \"content_type\": instance.get_content_type,\n \"object_id\": instance.id,\n }\n\n form = CommentForm(request.POST or None, request.FILES or None, initial=initial_data)\n if form.is_valid() and request.user.is_authenticated:\n print(form.cleaned_data)\n c_type = form.cleaned_data.get(\"content_type\")\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n model_pic = form.cleaned_data.get(\"image\")\n new_comment, created = Comment.objects.get_or_create(\n user = request.user,\n content_type=content_type,\n object_id = obj_id,\n content=content_data,\n model_pic=model_pic,\n )\n\n if created:\n print(\"yeah\")\n\n\n content_type=ContentType.objects.get_for_model(Post)\n obj_id=instance.id\n comments=Comment.objects.filter(content_type=content_type, object_id=obj_id)\n tags = Post.tag.all()\n lenTags = int(len(tags)/2)\n tags_1st = tags[:lenTags]\n tags_2nd = tags[lenTags:]\n context={\n 'title': instance.title,\n 'instance': instance,\n 'comments': comments,\n 'comment_form': form,\n 'tags_1st': tags_1st,\n 'tags_2nd': tags_2nd,\n }\n return render(request, \"posts/post_detail.html\", context)\n\n\ndef post_list(request): # Retrive post (GET)\n queryset_list = Post.objects.all().order_by(\"-timestamp\")\n # print(\"queryset_list: \", len(queryset_list))\n query = request.GET.get(\"q\")\n if query:\n queryset_list=queryset_list.filter(\n Q(title__icontains=query) |\n Q(content__icontains=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query)\n ).distinct()\n\n paginator = Paginator(queryset_list, 10) # Show 25 contacts per page\n page = request.GET.get('page')\n queryset = paginator.get_page(page)\n\n\n tags = Post.tag.all()\n print(tags)\n lenTags = int(len(tags)/2)\n tags_1st = tags[:lenTags]\n tags_2nd = tags[lenTags:]\n context={\n 'title': 'List',\n 'object_list': queryset,\n 'queryset_list': len(queryset_list),\n 'tags_1st': tags_1st,\n 'tags_2nd': tags_2nd\n }\n\n return render(request, \"posts/index.html\", context)\n\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, request.FILES or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n # Will put message saying success\n messages.success(request, \"Successfully updated\")\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {\n 'title': instance.title,\n 'instance': instance,\n 'form': form,\n }\n return render(request, \"posts/post_form.html\", context)\n\n\ndef post_delete(request, id=None):\n if not request.user.is_authenticated:\n raise Http404\n\n instance = get_object_or_404(Post, id=id)\n instance.delete()\n messages.success(request, \"Successfully deleted\")\n return redirect('posts:list')\n\n\ndef TagIndexView(request, slug):\n print(slug)\n\n queryset_list = Post.objects.filter(tag__slug=(slug)).order_by(\"-timestamp\")\n print(queryset_list)\n tags = Post.tag.all()\n lenTags = int(len(tags)/2)\n tags_1st = tags[:lenTags]\n tags_2nd = tags[lenTags:]\n context = {\n 'title': 'List',\n 'object_list': queryset_list,\n 'queryset_list': len(queryset_list),\n 'tags_1st': tags_1st,\n 'tags_2nd': tags_2nd\n }\n\n return render(request, \"posts/index.html\", context)\n\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"503985650","text":"from ..plots import *\nfrom pymc import psample, Slice, Metropolis, find_hessian, sample\n\n\ndef test_plots():\n\n # Test single trace\n from pymc.examples import arbitrary_stochastic as asmod\n\n with asmod.model as model:\n\n start = model.test_point\n h = find_hessian(start)\n step = Metropolis(model.vars, h)\n trace = sample(3000, step, start)\n\n forestplot(trace)\n\n autocorrplot(trace)\n\n\ndef test_multichain_plots():\n\n from pymc.examples import disaster_model as dm\n\n with dm.model as model:\n # Run sampler\n step1 = Slice([dm.early_mean, dm.late_mean])\n step2 = Metropolis([dm.switchpoint])\n start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 50}\n ptrace = psample(1000, [step1, step2], start, threads=2)\n\n forestplot(ptrace, vars=['early_mean', 'late_mean'])\n\n autocorrplot(ptrace, vars=['switchpoint'])\n","sub_path":"pymc/tests/test_plots.py","file_name":"test_plots.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"268840891","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom nltk.tokenize import WordPunctTokenizer\nfrom sklearn import preprocessing\nfrom sklearn import metrics\nimport re\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom nltk.corpus import stopwords\nfrom autocorrect import Speller\n\nnltkstop = set(stopwords.words('english'))\nstop = ['an', 'and', 'a', 'for', 'was', 'we','they', 'do', 'of', 'am',\n 'who', 'as', 'from', 'had', 'in', 'those']\ncombined_pat = \"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\"\n\ndef tweet_cleaner(text):\n soup = BeautifulSoup(text, 'lxml')\n souped = soup.get_text()\n stripped = re.sub(combined_pat, '', souped)\n try:\n clean = stripped.decode(\"utf-8-sig\").replace(u\"\\ufffd\", \"?\")\n except:\n clean = stripped\n letters_only = re.sub(\"[^a-zA-Z]\", \" \", clean)\n lower_case = letters_only.lower()\n # During the letters_only process two lines above, it has created unnecessay white spaces,\n # Tokenize and join together to remove unneccessary white spaces\n\n words = tok.tokenize(lower_case) \n #words = tok.tokenize(lower_case)\n #words = [spell(w) for w in words2]\n\n #words = [w for w in words if w not in stopwords.words('english')]\n return (\" \".join(words)).lower().strip()\n\ndf = pd.read_csv('./datasets/training80k.csv', names=['label', 'sentence'], sep='\\t', engine='python')\ntok = WordPunctTokenizer()\n\nX = df['sentence'].values\ny_train = df['label'].values\n\ndf_test = pd.read_csv('./datasets/test20k.csv', names=['label', 'sentence'], sep='\\t', engine='python')\n\n\nX_val, X_test, y_val, y_test = train_test_split(df_test['sentence'].values, df_test['label'].values,random_state = 9, test_size=0.5)\n\nspell = Speller(fast=True)\n\nX_train = [tweet_cleaner(t) for t in X]\nX_test = [tweet_cleaner(t) for t in X_test]\nX_val = [tweet_cleaner(t) for t in X_val]\n\ntfidf = TfidfVectorizer(max_df = 0.08, ngram_range = (1,2)) #Can add stopwords here\nX = tfidf.fit_transform(X_train)\nle = preprocessing.LabelEncoder()\nle.fit(y_train)\ntarget_labels = le.classes_\ntrain_y = le.transform(y_train)\n\ncls = LogisticRegression(verbose = 1, random_state=0, solver='lbfgs', max_iter=400)\ncls.fit(X, train_y)\nyp = cls.predict(X)\nacc = metrics.accuracy_score(train_y, yp)\nprint(\" Accuracy on %s is: %s\" % ('train', acc))\n\nvalid = tfidf.transform(X_val)\nyp = cls.predict(valid)\nacc = metrics.accuracy_score(y_val, yp)\nprint(\" Accuracy on %s is: %s\" % ('valid', acc))\n\ntest = tfidf.transform(X_test)\nyp = cls.predict(test)\nacc = metrics.accuracy_score(y_test, yp)\nprint(\" Accuracy on %s is: %s\" % ('test', acc))\n\n\n","sub_path":"logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"592965655","text":"from Circle import Circle\n\ndef main():\n # Create a circle with radius 1\n circle1 = Circle()\n print(\"반지름이 \", circle1.radius,\n \"인 원의 넓이는 \", circle1.getArea(), \"입니다.\")\n\n\n # Create a circle with radius 25\n circle2 = Circle(25)\n print(\"반지름이 \", circle2.radius,\n \"인 원의 넓이는 \", circle2.getArea(), \"입니다.\")\n\n # Create a circle with radius 125\n circle3 = Circle(125)\n print(\"반지름이 \", circle3.radius,\n \"인 원의 넓이는 \", circle3.getArea(), \"입니다.\")\n\n # Modify circle radius\n circle2.radius = 100\n print(\"반지름이 \", circle2.radius,\n \"인 원의 넓이는 \", circle2.getArea(), \"입니다.\")\n\nmain() # Call the main function\n","sub_path":"lec07/TestCircle.py","file_name":"TestCircle.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"630743119","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\n\nfrom django.http import HttpResponse,Http404\nfrom django.template.loader import get_template\nimport random\nimport time\nfrom datetime import datetime\nfrom dell.models import DellServer\nfrom rest_framework import serializers, viewsets\n\ndef index(request):\n template = get_template('index.html')\n quotes = [ '今日事,今日毕',\n '要收获,先付出',\n '知识就是力量',\n '无言的爱',\n '无欲则刚',\n '喜欢一个人没有道理',\n '一个人的个性就是他的命']\n html = template.render({'quote': random.choice(quotes)})\n return HttpResponse(html)\n \ndef bootstrap(request):\n template = get_template('bootstrap.html')\n quotes = [ '今日事,今日毕',\n '要收获,先付出',\n '知识就是力量',\n '无言的爱',\n '无欲则刚',\n '喜欢一个人没有道理',\n '一个人的个性就是他的命']\n html = template.render({'quote': random.choice(quotes)})\n return HttpResponse(html)\n\ndef bank(request):\n template = get_template('bank.html')\n quotes = [ '今日事,今日毕',\n '要收获,先付出',\n '知识就是力量',\n '无言的爱',\n '无欲则刚',\n '喜欢一个人没有道理',\n '一个人的个性就是他的命']\n html = template.render({'quote': random.choice(quotes)})\n return HttpResponse(html)\n\ndef alldellserver(request):\n dellserver = DellServer.objects.all()\n template = get_template('dellserver.html')\n now = datetime.now()\n servernum = len(dellserver)\n html = template.render(locals())\n return HttpResponse(html)\n\n\ndef server_info(request,stg):\n try:\n server = DellServer.objects.get(stg=stg)\n except alldellserver.DoesNotExit:\n raise Http404('Not find ')\n expiretime2 = server.endTime\n expiretime = str(server.endTime).split('+')[0]\n\n template = get_template('serverdisp.html')\n nowtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n expire1 = datetime.strptime(expiretime,\"%Y-%m-%d %H:%M:%S\")\n nowtime1 = datetime.strptime(nowtime,\"%Y-%m-%d %H:%M:%S\")\n remaintime = str(expire1 - nowtime1)\n avlTime = remaintime.split('days')[0]\n\n html = template.render(locals())\n return HttpResponse(html)\n\ndef search(request):\n request.encoding = 'utf-8'\n dellserver = []\n if 'q' in request.GET:\n searchstr = request.GET['q'].encode('utf-8')\n message = '你搜索的内容为: ' + request.GET['q'].encode('utf-8')\n aproducts = DellServer.objects.all()\n for each in range(len(aproducts)):\n if searchstr in aproducts[each].stg or searchstr in aproducts[each].MachineDescription:\n dellserver.append(aproducts[each])\n if len(dellserver) > 0:\n template = get_template('search.html')\n servernum = len(dellserver)\n html = template.render(locals())\n #html = template.render({'dellserver': dellserver})\n return HttpResponse(html)\n else:\n message = '你搜索的内容为空'\n return HttpResponse(message)\n \n #p = Product.objects.get(sku=sku)\n else:\n message = '你搜索的内容为空'\n return HttpResponse(message)\n\nclass DellSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = DellServer\n #fields = ('pmodel', 'nickname','description', 'year', 'price')\n fields = ('stg','MachineDescription', 'ServiceLevelDescription', 'startTime', 'endTime')\n\nclass DellviewSet(viewsets.ModelViewSet):\n queryset = DellServer.objects.all()\n serializer_class = DellSerializer\n\n","sub_path":"django/idcs/dell/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"541704082","text":"school = [\"locker\" , \"classroom\"]\nprint (school)\nprint(\"You are the Class President of your school and you misplaced your bag after many meetings. Your job is to find your bag before the school closes!\")\nchoice = input(\"Your beginning location is the School. You have the choice of looking at your locker or the classroom your meetings were in. Which one? \\n\")\nchoice = choice.lower()\n\nif choice == \"locker\":\n locker = [\"textbook\", \"gym clothes\"]\n print(\"These are the items in your \" + str(choice) + str(locker))\n lockerChoice = input(\"You can check under the textbook or under the gym clothes. Which one? \\n\")\n lockerChoice = lockerChoice.lower()\n if lockerChoice == \"textbook\":\n print(\"You looked under your textbook but there was nothing! Try again!\")\n elif lockerChoice == \"gym clothes\":\n print(\"Your gym clothes did not cover your bag! Try again!\")\nelif choice == \"classroom\":\n classroom = [\"desk\", \"window sill\"]\n print(\"These are the places where you usually place your bag down in the \" + str(choice) + str(classroom))\n classroomChoice = input(\"Where would you like to look? \\n\")\n classroomChoice = classroomChoice.lower()\n if classroomChoice == \"desk\":\n print(\"Sorry, there was nothing on your desk! Try again!\")\n elif classroomChoice == \"window sill\":\n print(\"You found your bag! Congrats!\")\n \n ","sub_path":"Text Adventure/textadventure.py","file_name":"textadventure.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"317282933","text":"from PyQt5.QtWidgets import QWidget, QApplication, QVBoxLayout, \\\n QLabel, QPushButton, QTableWidget, QTableWidgetItem\nfrom PyQt5.QtCore import Qt\nimport sys\nimport json\nimport requests\n\nclass 표위젯(QWidget): \n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self): \n self.table = QTableWidget(150, 5, self)\n header = ['종목', '전일 종가', '변동가', '변동률', '시가']\n\n bithumbUrl = 'https://api.bithumb.com/public/ticker/ALL_KRW' \n # 빗썸 오픈 API : https://apidocs.bithumb.com/docs/ticker 참고\n data = json.loads(requests.get(bithumbUrl).text)\n\n # print(data)\n for index, coin in enumerate(data['data']): # data에서 data항목만 뽑아서 순서를 정해 줌\n if coin == 'date':\n break\n self.table.setItem(index, 0, QTableWidgetItem(coin)) \n self.table.setItem(index, 1, QTableWidgetItem(data['data']\\\n [coin]['prev_closing_price']+'원')) # 전일종가\n self.table.setItem(index, 2, QTableWidgetItem(data['data']\\\n [coin]['fluctate_24H']+'원')) # 최근24시간 변동가\n self.table.setItem(index, 3, QTableWidgetItem(data['data']\\\n [coin]['fluctate_rate_24H']+\" %\")) # 최근24시간 변동률\n self.table.setItem(index, 4, QTableWidgetItem(data['data']\\\n [coin]['opening_price']+\"원\")) # 시가\n \n self.table.setHorizontalHeaderLabels(header)\n self.table.cellClicked.connect(self.showCellPosition)\n\n self.label = QLabel()\n vbox = QVBoxLayout()\n vbox.addWidget(self.table)\n vbox.addWidget(self.label, alignment=Qt.AlignCenter)\n\n self.setLayout(vbox)\n\n self.setWindowTitle('QTableWidget')\n self.setGeometry(300, 300, 720, 500)\n self.show()\n\n def showCellPosition(self):\n 행 = self.table.currentColumn()\n 열 = self.table.currentRow()\n self.label.setText(f'{행} 행, {열} 열입니다.')\n \napp = QApplication(sys.argv)\n실행인스턴스 = 표위젯()\napp.exec_()\n","sub_path":"419_표위젯(QTableWidget).py","file_name":"419_표위젯(QTableWidget).py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"640960851","text":"__author__ = 'nghia'\n\nfrom pymir import AudioFile\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_wav(filePath, windowTime):\n '''\n\n read a music file & return\n\n :param filePath:\n :param windowTime:\n :return: list (windows, wavData)\n '''\n wavData = AudioFile.open(filePath)\n windows = wavData.frames(wavData.sampleRate*windowTime/1000)\n return [windows, wavData]\n\n\ndef spectral_centroid(window):\n return window.spectrum().centroid()\n\n\ndef short_term_energy(window):\n xi = 0 # the short-term energy for the i-th window\n for m in range(0, len(window) - 1):\n hamming = 0.54 + 0.46 * np.cos(2 * np.pi * (len(window) - m - 1) / (len(window) - 1)) # use hamm)ing window\n xi += np.power(window[m], 2) * np.power(hamming, 2)\n return xi\n\n\ndef test():\n windows = read_wav(\"input/example.wav\", 10)\n X = [short_term_energy(window) for window in windows]\n Y = [spectral_centroid(window) for window in windows]\n\n # plot the first 1024 samples\n plt.plot(Y)\n\n # label the axes\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (samples)\")\n\n # set the title\n plt.title(\"Flute Sample\")\n # display the plot\n plt.show()","sub_path":"MusicRecogProj/main/extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"369722153","text":"#On a chessboard, positions are marked with a letter between a and h for the column and a number between 1 and 8 for the row.\r\n#Given an input string with a letter and number, print whether it is in a corner, at the border, or in the inside of the chess board.\r\n\r\ninputStr = 'a1'\r\ncorner=['a1','h1','a8','h8']\r\nborder=['b1','c1','d1','e1','f1','g1','a2','a3','a4','a5','a6','a7','b8','c8','d8','e8','f8','g8','h2','h3','h4','h5','h6','h7']\r\nif inputStr in corner:\r\n print(\"corner\")\r\nelif(inputStr in border):\r\n print(\"Border\")\r\nelse:\r\n print(\"Inside\")\r\n","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"240995566","text":"from lxml import html\nimport requests\nimport json\nfrom validity import valid\n# from flask import Flask\n\nclass get_recipe(object):\n def __init__(self, list, pg):\n self.list = list\n self.pg = pg\n def return_recipe(self):\n api_key='a7117cb7dc7f8b768ec323b949e752dd'\n # for i in range(len(self.list)):\n # ingr = ingr+self.list[i]+','\n # ingr = ingr[:-1]\n response = []\n num = 0\n page = requests.get('http://food2fork.com/api/search?key='+api_key+'&q='+self.list+'&page='+str(self.pg))\n parsed = json.loads(page.text)\n count = parsed['count']\n for i in range(0,count):\n if(valid(parsed['recipes'][i]['publisher'])):\n temp = json.dumps({'title':parsed['recipes'][i]['title'],'id':parsed['recipes'][i]['recipe_id']},separators=(',',':'))\n response.append(temp)\n num=num+1\n return str(response).replace(\"'\",'')\n\nif __name__ == '__main__':\n obj = get_recipe(\"shredded chicken\",1)\n obj.return_recipe()\n","sub_path":"getrecipe.py","file_name":"getrecipe.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"182289908","text":"# -*- coding: utf-8 -*-\n# coding: utf8\n\nimport os\nimport sys\nimport io\nimport codecs\nimport locale\nimport subprocess\nfrom subprocess import *\nimport re\n\n\n\nIP = input(\"Введите IP кассы: \")\nFSRAR_ID_NEW = input(\"Введите Новый FSRAR_ID____БЕЗ НУЛЯ впереди!: \")\nFSRAR_ID_OLD = input(\"Введите Старый FSRAR_ID____БЕЗ НУЛЯ впереди!: \")\nRoadKassa = \"ssh -o\" + \" \" \"StrictHostKeyChecking=no\" + \" \" + \"root@\" + IP\nroadCS = \"ssh root@192.168.0.15\"\nroadPuppet = \"ssh root@192.168.0.12\"\n\n\n\n##Подключение к SSH:\n \nos.chdir(\"C:\\\\OpenSSH\")\n\n##Создание нового VPN сертификата на КС\n\ndef CreateCertVpnOnKS():\n\n Command8 = \"/opt/certificate/gencert.sh\" + \" \" + \"cash-\" + FSRAR_ID_NEW + \"-\" + FSRAR_ID_NEW + \"1\"\n Qwerty8 = roadCS + \" \" + \"\\\"\" + Command8 + \"\\\"\" \n print(\"Создание нового VPN сертификата на КС: \" + Qwerty8 + \"\\n\")\n res8 = Popen(Qwerty8, shell=True, stdout=PIPE)\n out8 = str(res8.communicate()[0].decode(\"utf8\"))\n print (\"Создание нового VPN сертификата на КС:\\n\\n \" + out8) \n\n \n\n##Удаление сертификатов на Puppet \n\ndef DelCertPuppet():\n\n Command6 = \"puppet cert clean\" + \" \" + \"cash-\" + FSRAR_ID_OLD + \"-\" + FSRAR_ID_OLD + \"1\"\n Qwerty6 = roadPuppet + \" \" + \"\\\"\" + Command6 + \"\\\"\"\n print(\"Удаление старых сертификатов на Puppet: \" + Qwerty6 + \"\\n\")\n res6 = Popen(Qwerty6, shell=True, stdout=PIPE)\n out6 = str(res6.communicate()[0].decode(\"utf8\"))\n print (\"Удаление старых сертификатов на Puppet:\\n\\n \" + out6)\n\n Command9 = \"puppet cert clean\" + \" \" + \"cash-\" + FSRAR_ID_NEW + \"-\" + FSRAR_ID_NEW + \"1\"\n Qwerty9 = roadPuppet + \" \" + \"\\\"\" + Command9 + \"\\\"\"\n print(\"Удаление новых(если есть) сертификатов на Puppet: \" + Qwerty9 + \"\\n\")\n res9 = Popen(Qwerty9, shell=True, stdout=PIPE)\n out9 = str(res9.communicate()[0].decode(\"utf8\"))\n print (\"Удаление новых(если есть) сертификатов на Puppet:\\n\\n \" + out9)\n\n\n##Переименовываем кассу:\ndef per():\n\n Command1 = \"sed -i 's/[0-9]\\{12\\}/\" + \"0\" + FSRAR_ID_NEW + \"/g'\" + \" \" + \"/linuxcash/cash/data/cash.reg\" + \" && \" + \"sed -i 's/[0-9]\\{11\\}/\" + FSRAR_ID_NEW + \"/g'\" + \" \" + \"/etc/hosts\" + \" && \" + \"sed -i 's/[0-9]\\{11\\}/\" + FSRAR_ID_NEW + \"/g'\" + \" \" + \"/etc/hostname\" + \" && \" + \"sed -i 's/[0-9]\\{11\\}/\" + FSRAR_ID_NEW + \"/g'\" + \" \" + \"/etc/puppet/puppet.conf\" + \" && \" + \"service hostname restart\"\n Qwerty1 = RoadKassa + \" \" + \"\\\"\" + Command1 + \"\\\"\"\n print(\"Переименовываем кассу: \" + Qwerty1 + \"\\n\")\n res1 = Popen(Qwerty1, shell=True, stdout=PIPE)\n out1 = str(res1.communicate()[0].decode(\"utf8\"))\n print (\"Переименовываем кассу:\\n\\n \" + out1)\n\n##Запускаем puppet agent:\n\ndef agent():\n\n Command3 = \"rm -rf /var/lib/puppet/ssl && service puppet restart && sleep 20 && puppet agent -t\"\n Qwerty3 = RoadKassa + \" \" + \"\\\"\" + Command3 + \"\\\"\"\n print(\"Запуск puppet: \" + Qwerty3 + \"\\n\")\n## res3 = Popen(Qwerty3, shell=True, stdout=PIPE)\n## out3 = str(res3.communicate()[0].decode(\"utf8\"))\n print (\"Запуск puppet:\\n\\n \")\n os.system(Qwerty3)\n\n\n##Удаляем старые сертификаты VPN из кассы:\n\ndef vpn():\n\n Command2 = \"rm -f /etc/openvpn/\" + \"*\" + FSRAR_ID_OLD + \"*.*\" + \" \" + \"&&\" + \" \" + \"rm -f /var/run/openvpn/\" + \"*\" + FSRAR_ID_OLD + \"*.*\"\n Qwerty2 = RoadKassa + \" \" + \"\\\"\" + Command2 + \"\\\"\"\n print(\"Удаляем старые сертификаты VPN из кассы: \" + Qwerty2 + \"\\n\")\n res2 = Popen(Qwerty2, shell=True, stdout=PIPE)\n out2 = str(res2.communicate()[0].decode(\"utf8\"))\n print (\"Удаляем старые сертификаты VPN из кассы:\\n\\n \" + out2)\n\n\n##Инициализация данных в БД:\n\ndef BD():\n \n Command4 = \"mysql -uroot dictionaries < /linuxcash/cash/tools/tools_avail/initial_data/dictionaries-full.sql\"\n Qwerty4 = RoadKassa + \" \" + \"\\\"\" + Command4 + \"\\\"\"\n print(\"Инициализация данных в БД: \" + Qwerty4 + \"\\n\")\n res4 = Popen(Qwerty4, shell=True, stdout=PIPE)\n out4 = str(res4.communicate()[0].decode(\"utf8\"))\n print (\"Инициализация данных в БД:\\n\\n \" + out4)\n\n\n##Вывести список IP адресов:\n\ndef ip():\n\n Command5 = \"sudo ifconfig | grep\" + \" \" + \"\\'\" + \"inet addr\" + \"\\'\"\n Qwerty5 = RoadKassa + \" \" + \"\\\"\" + Command5 + \"\\\"\" \n print(\"Вывести список IP адресов: \" + Qwerty5 + \"\\n\")\n res5 = Popen(Qwerty5, shell=True, stdout=PIPE)\n out5 = str(res5.communicate()[0].decode(\"utf8\"))\n print (\"Вывести список IP адресов:\\n\\n \" + out5)\n\n\n##Удаление старого сертификата VPN на КС:\n\ndef DelCertVPN():\n\n Command7 = \"cd /opt/certificate/\" + \" && \" + \"svn del\" + \" \" + \"cash-\" + FSRAR_ID_OLD + \"-\" + FSRAR_ID_OLD + \"1\" + \".crt\" + \" \" + \"cash-\" + FSRAR_ID_OLD + \"-\" + FSRAR_ID_OLD + \"1\" + \".csr\" + \" \" + \"cash-\" + FSRAR_ID_OLD + \"-\" + FSRAR_ID_OLD + \"1\" + \".key\" + \" && \" + \"svn commit -m\" + \" \" + \"\\'\" + \"del bag cert\" + \"\\'\"\n Qwerty7 = roadCS + \" \" + \"\\\"\" + Command7 + \"\\\"\" \n print(\"Удаление старого сертификата VPN на КС: \" + Qwerty7 + \"\\n\")\n res7 = Popen(Qwerty7, shell=True, stdout=PIPE)\n out7 = str(res7.communicate()[0].decode(\"utf8\"))\n print (\"Удаление старого сертификата VPN на КС:\\n\\n \" + out7) \n\nCreateCertVpnOnKS() ##Создаем сертификат VPN на КС\nDelCertPuppet() ##Удаляем старый и новый сертификат Puppet\nper() ##Прописываем везде новый FSRAR на кассе\nagent() ##Запуск агента Puppet\nvpn() ##Удалить старые сертификаты vpn\nBD() ##Инициализация данных в БД\nip() ##Вывести список IP адресов\nDelCertVPN() ##Удалить старый сертификат VPN\n\n##Выход из консоли:\ninput('\\nНажмите Enter для выхода\\n')\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ProjectPython/Django/MyCassAdmin/CommandOnKass/static/commandOnKass/Site/File/ChangeOrgazization.py","file_name":"ChangeOrgazization.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"254802840","text":"import numpy as np\nimport pickle\n\nmask = np.array([[0., -1., 1., 0., 1., 0., 1.]] * 3) # no horizontal rotation\nmask2 = np.array([[0., 0., 0., 0., 0., 1.5, 0.]] * 3) # horizontal hand\n\n\nclass Population():\n\n def __init__(self, size, elite=None, mutation=None):\n if elite is None:\n self.individuals = [self._create_individual() for _ in range(size)]\n else:\n elite = [self._create_individual(e['weights'], e['id']) for e in elite]\n\n self.individuals = elite + self._mutate(elite, mutation)\n while len(self.individuals) < size:\n self.individuals += [self._create_individual()]\n\n def _mutate(self, elite, mutation):\n if mutation == 'crossover':\n if len(elite) % 2 == 1:\n elite = elite[:-1]\n children = [self._mutate_cross_over(elite[i], elite[i + 1]) for i in range(0, len(elite), 2)]\n return list(sum(children, ()))\n elif mutation == 'single_crossover':\n if len(elite) % 2 == 1:\n elite = elite[:-1]\n children = [self._mutate_single_cross_over(elite[i], elite[i + 1]) for i in range(0, len(elite), 2)]\n return list(sum(children, ()))\n elif mutation == 'random':\n return [self._mutate_individual(e) for e in elite]\n else:\n raise ValueError('unknown mutation: {}'.format(mutation))\n\n def _create_individual(self, weights=None, id=None):\n if weights is None:\n return {'weights': self._get_syn_weights(), 'distance': -1., 'id': id}\n else:\n return {'weights': weights, 'distance': -1., 'id': id}\n\n def _get_syn_weights(self):\n weights = (np.random.rand(3, 7) * 5)\n return (weights * mask + mask2).tolist()\n\n def _mutate_individual(self, individual):\n weights = individual['weights']\n weights += np.random.rand(3, 7) - .5\n weights *= mask\n weights += mask2\n return self._create_individual(weights.tolist())\n\n def _mutate_cross_over(self, individual1, individual2):\n weights1 = individual1['weights']\n weights2 = individual2['weights']\n\n mask1 = np.randon.randint(2, size=len(weights1))\n mask2 = np.ones(len(weights2)) - mask1\n\n new1 = self._create_individual((weights1 * mask1 + weights2 * mask2).tolist())\n new2 = self._create_individual((weights1 * mask2 + weights2 * mask1).tolist())\n return new1, new2\n\n def _mutate_single_cross_over(self, individual1, individual2):\n weights1 = individual1['weights']\n weights2 = individual2['weights']\n\n random = np.random.randint(len(weights1))\n\n mask1 = np.concatenate((np.ones(random), np.zeros(len(weights1) - random)))\n\n mask2 = np.ones(len(weights2)) - mask1\n\n new1 = self._create_individual((weights1 * mask1 + weights2 * mask2).tolist())\n new2 = self._create_individual((weights1 * mask2 + weights2 * mask1).tolist())\n return new1, new2\n\n def get_elite(self, size):\n size = min(size, len(self.individuals))\n return sorted(self.individuals, key=lambda i: i['distance'], reverse=True)[:size]\n\n def get_population_size(self):\n return len(self.individuals)\n\n def load(self, filename):\n with open(filename, 'r') as f:\n individuals = pickle.load(f)\n self.individuals = individuals\n\n\nclass EvolutionaryAlgo():\n\n def __init__(self, generation_size, population_size, seed=None, mutation='random'):\n self.seed = seed\n self.population_size = population_size\n self.generation_size = generation_size\n self.generations = [Population(population_size)]\n self.mutation = mutation\n\n def set_distance(self, distance, generation, individual):\n self.generations[generation].individuals[individual]['distance'] = distance\n\n def set_id(self, generation, individual):\n if self.get_id(generation, individual) is None:\n id = (generation, individual)\n self.generations[generation].individuals[individual]['id'] = id\n\n def get_id(self, generation, individual):\n return self.generations[generation].individuals[individual]['id']\n\n def get_weights(self, generation, individual):\n return self.generations[generation].individuals[individual]['weights']\n\n def mutate(self):\n elite = self.generations[-1].get_elite(self.population_size / 2)\n self.generations += [Population(self.population_size, elite=elite, mutation=self.mutation)]\n\n def _get_filename(self, generation):\n return 'evolution_results/generation_{}.pickle'.format(generation)\n\n def save(self, generation):\n filename = self._get_filename(generation)\n with open(filename, 'w') as f:\n pickle.dump(self.generations[generation].individuals, f)\n return filename\n\n def load(self, generation):\n filename = self._get_filename(generation)\n pop = Population(self.population_size)\n pop.load(filename)\n self.population_size = pop.get_population_size()\n self.generations = [pop]\n\n# evol = EvolutionaryAlgo(3,2)\n#\n# population = evol.generations[0]\n#\n# for ind in range(len(population.individuals)):\n# evol.set_distance(1000.-ind, 0, ind)\n#\n# print(evol.generations[0].individuals)\n# population = evol.generations[0]\n#\n# print(sorted(population.individuals, key=lambda i: i['distance'], reverse=True))\n#\n# evol.mutate()\n# print(evol.generations[1].individuals)\n","sub_path":"evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"100046811","text":"#Time complexity - O(n)\n#space complexity - O(n)\n#Problem done in Leetcode - Yes\n#Any challenges faced - No\n\n#Algorithm :\n#we use two dictionaries to solve the problem .one dictionary is used to store the unique replacement characters from s to t and another dictionary is used to store the unique replacement characters from t to s.while storing the replacements if they dont match with ones that are already present in the dictionary then we return false else we return true.\n\n\nclass Solution(object):\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n d=dict()\n d1=dict()\n for i in range(len(s)):\n if s[i] not in d:\n d[s[i]]=t[i]\n else:\n if d[s[i]]!=t[i]:\n return False\n if t[i] not in d1:\n d1[t[i]]=s[i]\n else:\n if d1[t[i]]!=s[i]:\n return False\n return True","sub_path":"Isomorphic_strings.py","file_name":"Isomorphic_strings.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"493854620","text":"# ZSS - Various file records\n\nclass ZSSRecord: # Generic file record\n def __init__(self, qinfo):\n self.item_name = qinfo.fileName()\n self.item_path = qinfo.filePath()\n self.isFile = int(qinfo.isFile())\n self.isDir = int(qinfo.isDir())\n\n def getDict(self):\n return {\n 'item_name': self.item_name,\n 'item_path': self.item_path,\n 'isFile': self.isFile,\n 'isDir': self.isDir,\n }\n\nclass ZSSRecordDocument(ZSSRecord): # File record - Document\n def __init__(self, qinfo):\n super().__init__(qinfo)\n self.author = qinfo.owner()\n\n def getDict(self):\n return {\n 'item_name': self.item_name,\n 'item_path': self.item_path,\n 'isFile': self.isFile,\n 'isDir': self.isDir,\n 'author': self.author\n }","sub_path":"experimental/zss_record.py","file_name":"zss_record.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"209822785","text":"from account.decorators import super_admin_required\nfrom utils.api import APIView, validate_serializer\n\nfrom faq.models import FAQ\nfrom faq.serializers import (FAQSerializer, CreateFAQSerializer,\n EditFAQSerializer)\nimport logging\nlogger=logging.getLogger(__name__)\n\nclass FAQAdminAPI(APIView):\n @validate_serializer(CreateFAQSerializer)\n @super_admin_required\n def post(self, request):\n \"\"\"\n publish faq\n \"\"\"\n data = request.data\n faq = FAQ.objects.create(question=data[\"question\"],\n answer=data[\"answer\"],\n created_by=request.user,\n visible=data[\"visible\"])\n return self.success(FAQSerializer(faq).data)\n\n @validate_serializer(EditFAQSerializer)\n @super_admin_required\n def put(self, request):\n \"\"\"\n edit faq\n \"\"\"\n data = request.data\n try:\n faq = FAQ.objects.get(id=data.pop(\"id\"))\n except FAQ.DoesNotExist:\n return self.error(\"FAQ does not exist\")\n\n for k, v in data.items():\n setattr(faq, k, v)\n faq.save()\n\n return self.success(FAQSerializer(faq).data)\n\n @super_admin_required\n def get(self, request):\n \"\"\"\n get faq list / get one faq\n \"\"\"\n faq_id = request.GET.get(\"id\")\n if faq_id:\n try:\n faq = FAQ.objects.get(id=faq_id)\n return self.success(FAQSerializer(faq).data)\n except FAQ.DoesNotExist:\n return self.error(\"FAQ does not exist\")\n faq = FAQ.objects.all().order_by(\"-create_time\")\n if request.GET.get(\"visible\") == \"true\":\n faq = faq.filter(visible=True)\n return self.success(self.paginate_data(request, faq, FAQSerializer))\n\n @super_admin_required\n def delete(self, request):\n if request.GET.get(\"id\"):\n FAQ.objects.filter(id=request.GET[\"id\"]).delete()\n return self.success()\n","sub_path":"faq/views/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"96505630","text":"'''\n小新媽媽上超市買水果 她都先把不喜歡的水果刪除 再將剩下的水果買\n回家 請為小新媽媽設計一個輸入不喜歡的水果後刪除的程式 並顯示剩\n下的水果 如果輸入 Enter 就結束輸入 。\n'''\n\nfruit_list = ['香蕉', '蘋果', '橘子', '鳳梨', '西瓜']\n\nwhile True:\n fruit = input('請輸入要刪去的水果品項(輸入Enter刪除結束)')\n if fruit != 'Enter':\n try:\n fruit_list.remove(fruit)\n except:\n print(\"The data you've inserted is not correct or not in the list, check again\")\n else:\n break\n\nprint('須購買水果品項清單')\nfor i, j in enumerate(fruit_list):\n print(j)","sub_path":"W6_a3.py","file_name":"W6_a3.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"551544167","text":"import ctypes\r\nimport hashlib\r\nimport base64\r\nimport os\r\nimport sys\r\n\r\nfrom _io import BytesIO\r\nfrom Tea.stream import BaseStream\r\n\r\n\r\ndef int_overflow(val):\r\n maxint = 2147483647\r\n if not -maxint - 1 <= val <= maxint:\r\n val = (val + (maxint + 1)) % (2 * (maxint + 1)) - maxint - 1\r\n return val\r\n\r\n\r\ndef unsigned_right_shift(n, i):\r\n if n < 0:\r\n n = ctypes.c_uint32(n).value\r\n\r\n if i < 0:\r\n return -int_overflow(n << abs(i))\r\n\r\n return int_overflow(n >> i)\r\n\r\n\r\nclass CRC64:\r\n POLY = 0xc96c5795d7870f42\r\n\r\n def __init__(self):\r\n self.value = 0\r\n\r\n @property\r\n def table(self):\r\n return int(''.join(self._table))\r\n\r\n @property\r\n def _table(self):\r\n table = []\r\n for n in range(256):\r\n crc = n\r\n for j in range(8):\r\n if crc & 1 == 1:\r\n crc = unsigned_right_shift(crc, 1) ^ self.POLY\r\n else:\r\n crc = unsigned_right_shift(crc, 1)\r\n table.append(crc)\r\n\r\n return table\r\n\r\n def get_value(self):\r\n first = unsigned_right_shift(self.value, 1) / 5\r\n second = self.value - first * 10\r\n return first + second\r\n\r\n def update(self, bt):\r\n for b in bt:\r\n self.value = ~self.value\r\n self.value = self._table[(self.value ^ b) & 0xff] ^ unsigned_right_shift(self.value, 8)\r\n self.value = ~self.value\r\n\r\n\r\nclass VerifyStream(BaseStream):\r\n def __init__(self, file, res, size=1024):\r\n super().__init__(size)\r\n self.file = file\r\n self.size = size\r\n self.crc = CRC64()\r\n self.ref = res\r\n self.md5 = hashlib.md5()\r\n if isinstance(file, BytesIO):\r\n self.file_size = file.getbuffer().nbytes\r\n else:\r\n self.file_size = os.path.getsize(file.name)\r\n self._file_size = self.file_size\r\n\r\n def __len__(self):\r\n return self.file_size\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def __next__(self):\r\n return self.read(size=self.size, loop=True)\r\n\r\n def read(self, size=None, loop=False):\r\n res = self.file.read(size)\r\n if size is None:\r\n size = sys.maxsize\r\n\r\n if isinstance(res, str):\r\n bres = res.encode('utf-8')\r\n else:\r\n bres = res\r\n\r\n if size <= self._file_size:\r\n self.crc.update(bres)\r\n self.md5.update(bres)\r\n else:\r\n self.ref['md5'] = base64.b64encode(self.md5.digest()).decode('utf-8')\r\n self.ref['crc'] = self.crc.get_value()\r\n\r\n if not res:\r\n self.refresh()\r\n if loop:\r\n raise StopIteration\r\n else:\r\n return res\r\n\r\n if size == sys.maxsize or size >= self.file_size:\r\n self.crc.update(bres)\r\n self.md5.update(bres)\r\n self.ref['md5'] = base64.b64encode(self.md5.digest()).decode('utf-8')\r\n self.ref['crc'] = self.crc.get_value()\r\n return res\r\n\r\n self._file_size -= len(bres)\r\n return res\r\n\r\n def refresh(self):\r\n self.crc = CRC64()\r\n self.md5 = hashlib.md5()\r\n self.file.seek(0, 0)\r\n self._file_size = self.file_size\r\n\r\n def close(self):\r\n self.file.close()\r\n","sub_path":"util/python/alibabacloud_oss_util/verify_stream.py","file_name":"verify_stream.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"326410050","text":"import sys\nimport os\nimport time\n\npath = '/Users/poojitha/Desktop/273-Lab1/input/'\n\ndef ext_merge_sort(lis):\n if len(lis)<2 :\n return lis\n else:\n midval = len(lis)//2\n l_list = ext_merge_sort(lis[:midval])\n r_list = ext_merge_sort(lis[midval:])\n f_list = merge_lists(l_list,r_list)\n return f_list \n\ndef merge_lists(l_list1,r_list1):\n l_iter,r_iter =0,0\n l_list,r_list= [],[]\n l_list.extend(l_list1)\n r_list.extend(r_list1)\n res=[]\n while l_iter < len(l_list) and r_iter < len(r_list):\n if int(r_list[r_iter]) > int(l_list[l_iter]):\n #print(l_list[l_iter])\n res.append(l_list[l_iter])\n l_iter += 1\n else:\n res.append(r_list[r_iter])\n r_iter += 1\n \n res.extend(l_list[l_iter:])\n res.extend(r_list[r_iter:])\n #print (res)\n return res\n\ndef writedata(sorted_data):\n op_file = open('sorted.txt','a')\n for x in sorted_data:\n op_file.write(str(x))\n op_file.write('\\n')\n op_file.close()\n\ndef sortdata(unsorted_data):\n sorted_data = ext_merge_sort(unsorted_data)\n writedata(sorted_data) \n\ndef readdata(): \n file_names = ['unsorted_'+str(i)+'.txt' for i in range(1,11)]\n unsorted_data =[]\n time.sleep(1)\n for fi in file_names:\n fpath = os.path.join(path,fi)\n f_read =open(fpath,'r')\n unsorted_data += [int(x.strip()) for x in f_read.readlines()]\n time.sleep(1)\n f_read.close()\n sortdata(unsorted_data)\n\ndef main():\n readdata() \n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"ext_merge_sort.py","file_name":"ext_merge_sort.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"343860998","text":"# -*- coding: utf-8 -*-\n#\n# A Fabric file for installing, deploying and running Invenio\n#\n# Lars Holm Nielsen \n#\n# Copyright (C) 2012 CERN.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see .\n\n\"\"\"\nFabric tasks for bootstrapping, installing, deploying and running Atlantis.\n\nExamples\n--------\n\nSetup virtual environment and database with Python 2.4 and Invenio 1.0.0:\n fab loc_py24 invenio:v1.0.0 bootstrap\n fab loc_py24 invenio:v1.0.0 invenio_create_demosite\n\nSetup virtual environment and database with Python 2.5 and Invenio master:\n fab loc_py25 invenio:origin/master bootstrap\n\nDump (database and virtual environment), load and drop:\n fab loc_py24 invenio:v1.0.0 dump\n fab loc_py24 invenio:v1.0.0 load\n fab loc_py24 invenio:v1.0.0 drop\n\"\"\"\n\nfrom fabric.api import task, abort\nimport os\n\nfrom inveniofab.env import env_create, env_get\nfrom inveniofab.venv import *\n\n@task\ndef loc(prefix, activate=True, version=None, db_suffix='', **kwargs):\n \"\"\" Local environment (default Python) \"\"\"\n env = env_create('loc', activate=activate, **kwargs)\n\n env.CFG_SRCDIR = os.path.expanduser(\"~/src/\")\n\n env.CFG_INVENIO_SRCDIR = os.path.join(env.CFG_SRCDIR, 'invenio')\n env.CFG_INVENIO_PREFIX = prefix\n env.CFG_INVENIO_PORT_HTTP = \"4000\"\n env.CFG_INVENIO_PORT_HTTPS = \"4000\"\n env.CFG_INVENIO_USER = env.user\n env.CFG_INVENIO_APACHECTL = 'service apache2'\n env.CFG_INVENIO_ADMIN = 'nobody@localhost'\n\n env.CFG_INVENIO_REPOS = [\n ('invenio', {\n 'repository' : 'http://invenio-software.org/repo/invenio/',\n 'ref': 'origin/maint-1.1',\n 'bootstrap_targets': ['all', 'install', 'install-mathjax-plugin', 'install-ckeditor-plugin', 'install-pdfa-helper-files', 'install-jquery-plugins', ],\n 'deploy_targets': ['all', 'check-upgrade', 'install', ],\n }),\n ]\n\n env.CFG_DATABASE_DUMPDIR = prefix\n env.CFG_DATABASE_HOST = 'localhost'\n env.CFG_DATABASE_NAME = _shorten_mysqlname('atlantis%s' % db_suffix)\n env.CFG_DATABASE_USER = _shorten_mysqlname('atlantis%s' % db_suffix)\n env.CFG_DATABASE_PASS = _shorten_mysqlname('atlantis%s' % db_suffix)\n env.CFG_DATABASE_DROP_ALLOWED = True\n\n env.CFG_MISCUTIL_SMTP_HOST = '127.0.0.1'\n env.CFG_MISCUTIL_SMTP_PORT = '1025'\n\n\n@task\ndef loc_py24(prefix=None, version=None, activate=True, **kwargs):\n \"\"\" Local environment (Python 2.4) \"\"\"\n prefix = _get_prefix(prefix, 'atlantis-py24')\n kwargs['python'] = os.path.expanduser('~/.pythonbrew/pythons/Python-2.4.6/bin/python')\n return loc(prefix, activate=activate, version=None, db_suffix='_py24', **kwargs)\n\n\n@task\ndef loc_py25(prefix=None, version=None, activate=True, **kwargs):\n \"\"\" Local environment (Python 2.5) \"\"\"\n prefix = _get_prefix(prefix, 'atlantis-py25')\n kwargs['python'] = os.path.expanduser('~/.pythonbrew/pythons/Python-2.5.4/bin/python')\n return loc(prefix, activate=activate, version=None, db_suffix='_py25', **kwargs)\n\n\n@task\ndef loc_py26(prefix=None, version=None, activate=True, **kwargs):\n \"\"\" Local environment (Python 2.6) \"\"\"\n prefix = _get_prefix(prefix, 'atlantis-py26')\n kwargs['python'] = os.path.expanduser('~/.pythonbrew/pythons/Python-2.6.7/bin/python')\n return loc(prefix, activate=activate, version=None, db_suffix='_py26', **kwargs)\n\n\n@task\ndef loc_py27(prefix=None, version=None, activate=True, **kwargs):\n \"\"\" Local environment (Python 2.7) \"\"\"\n prefix = _get_prefix(prefix, 'atlantis-py27')\n kwargs['python'] = os.path.expanduser('~/.pythonbrew/pythons/Python-2.7.3/bin/python')\n return loc(prefix, activate=activate, version=None, db_suffix='_py27', **kwargs)\n\n\n#\n# Helpers\n#\ndef _get_prefix(prefix, name):\n if not prefix:\n prefix = os.getenv('WORKON_HOME', '~/envs')\n prefix = os.path.join(prefix, name)\n return os.path.expandvars(os.path.expanduser(prefix))\n\ndef _shorten_mysqlname(name):\n if len(name) > 16:\n name = name.replace(\"_\", \"\", len(name) - 16)\n return name[:16]\n return name","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"395864306","text":"#!/usr/bin/python\n# encoding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nimport sqlite3\nfrom sqlite3 import Error\nfrom workflow import Workflow3, ICON_INFO, ICON_WARNING, ICON_ERROR\n\n\ndef main(wf):\n\n github_school = \"https://git.txstate.edu\"\n github_personal = \"https://github.com/kevin-funderburg\"\n github_icon = \"icons/Icon-elusive-github@2x.png\"\n database = r\"classdata.db\"\n sql = \"SELECT * FROM fall2020;\"\n\n # create a database connection\n conn = create_connection(database)\n with conn:\n # execute_sql(conn, sql)\n cur = conn.cursor()\n cur.execute(sql)\n rows = cur.fetchall()\n\n for row in rows:\n name = row[0]\n folderPath = row[1]\n bookPath = row[2]\n oneNote = row[3]\n website = row[4]\n zoom = row[5]\n it = wf.add_item(uid=name,\n title=name,\n subtitle=\"open OneNote section\",\n arg=oneNote,\n autocomplete=name,\n valid=True,\n icon=\"icon.png\",\n icontype=\"file\")\n it.add_modifier('cmd',\n subtitle=\"go to class website: \" + website,\n arg=website,\n valid=True)\n it.add_modifier('alt',\n subtitle=\"browse in Alfred\",\n arg=folderPath,\n valid=True)\n it.add_modifier('shift',\n subtitle=bookPath,\n arg=bookPath,\n valid=True)\n it.add_modifier('ctrl',\n subtitle=\"go to zoom meeting\",\n arg=zoom,\n valid=True)\n\n\n it = wf.add_item(uid=github_school,\n title=\"github - texas state\",\n subtitle=github_school,\n arg=github_school,\n valid=True,\n icon=github_icon,\n icontype=\"file\")\n\n it = wf.add_item(uid=github_personal,\n title=\"github - personal\",\n subtitle=github_personal,\n arg=github_personal,\n valid=True,\n icon=github_icon,\n icontype=\"file\")\n\n wf.send_feedback()\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn\n\n\nif __name__ == '__main__':\n wf = Workflow3()\n log = wf.logger\n sys.exit(wf.run(main))\n","sub_path":"getClassData.py","file_name":"getClassData.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"262800690","text":"\"\"\"\nFunctions and techniques for in-memory caching of persistent data,\nin particular, for using memcache.\n\"\"\"\nimport logging\n\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\n\n\ndef fetch_path(*args, **kwargs):\n cachekey = '-'.join(str(a) for a in args)\n result = memcache.get(cachekey)\n\n if result is None:\n result = dbfetch_path(*args, **kwargs)\n success = memcache.set(cachekey, result)\n if not success:\n logging.error(\"Memcache set failed for key: {0}\".format(cachekey))\n return result\n\n\ndef dbfetch_path(*args, **kwargs):\n d_k = db.Key.from_path(*args, **kwargs)\n result = db.get(d_k)\n return result\n","sub_path":"core/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"564669590","text":"import configparser\nfrom datetime import datetime\nimport os\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import TimestampType, StringType\nfrom pyspark.sql.functions import udf, col, monotonically_increasing_id\nfrom pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, dayofweek\n\n\n# config = configparser.ConfigParser()\n# config.read('dl.cfg')\n\n# os.environ['AWS_ACCESS_KEY_ID']=config['AWS_ACCESS_KEY_ID']\n# os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_SECRET_ACCESS_KEY']\n\n\ndef create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n\n\ndef process_song_data(spark, input_data, output_data):\n \"\"\"\n The function that retrieves song data from s3, processes it and uploads it to other s3 bucket.\n \n Parameters: \n spark (object): SparkSession which helps ro process big data.\n input_data (string): path to s3 bucket where input data stored. \n output_data (string): path to s3 bucket where data should be uploaded. \n \n \"\"\"\n # get filepath to song data file\n song_data = input_data + 'song_data/*/*/*/*.json'\n \n # read song data file\n df = spark.read.json(song_data)\n \n df.printSchema()\n# df.show(5)\n\n # extract columns to create songs table\n df.createOrReplaceTempView('songs_table')\n \n songs_table = spark.sql('''\n SELECT song_id, title, artist_id, year, duration \n FROM songs_table\n ORDER BY song_id\n ''')\n songs_table.printSchema()\n songs_table.show(5, truncate = False)\n \n # write songs table to parquet files partitioned by year and artist\n songs_table_output_path = output_data + 'songs_table.parquet'\n \n songs_table.dropDuplicates().write.mode('overwrite').partitionBy('year', 'artist_id').parquet(songs_table_output_path)\n print('Songs table done!!!!')\n\n # extract columns to create artists table\n df.createOrReplaceTempView('artist_table')\n \n artists_table = spark.sql('''\n SELECT artist_id AS artist_id,\n artist_name AS name,\n artist_location AS location,\n artist_latitude AS latitude,\n artist_longitude AS longitude\n FROM artist_table\n ORDER BY artist_id desc\n ''')\n artists_table.printSchema()\n artists_table.show(5, truncate = False)\n \n # write artists table to parquet files\n artists_table_output_path = output_data + 'artists_table.parquet'\n \n artists_table.dropDuplicates().write.mode('overwrite').parquet(artists_table_output_path)\n print('Artists table done!!!!')\n\n\ndef process_log_data(spark, input_data, output_data):\n \"\"\"\n The function that retrieves song data and log data from s3, processes it and uploads it to other s3 bucket.\n \n Parameters: \n spark (object): SparkSession which helps ro process big data.\n input_data (string): path to s3 bucket where input data stored. \n output_data (string): path to s3 bucket where data should be uploaded. \n \n \"\"\"\n # get filepath to log data file\n log_data = input_data + \"log_data/*.json\"\n\n # read log data file\n df = spark.read.json(log_data)\n \n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n\n # extract columns for users table \n df.createOrReplaceTempView('logs_table')\n \n users_table = spark.sql(\"\"\"\n SELECT DISTINCT userId as user_id,\n firstName as first_name,\n lastName as last_name,\n gender, \n level\n FROM logs_table\n ORDER BY user_id\n \"\"\")\n \n # write users table to parquet files\n users_table_output_path = output_data + 'users_table.parquet'\n \n users_table.write.mode('overwrite').parquet(users_table_output_path)\n print('Users table done!!!!')\n\n # create timestamp column from original timestamp column\n get_timestamp = udf(lambda x: datetime.fromtimestamp(x / 1000.0), TimestampType())\n df = df.withColumn(\"timestamp\", get_timestamp(df.ts))\n \n # create datetime column from original timestamp column\n get_datetime = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%Y-%m-%d %H:%M:%S'), StringType())\n df = df.withColumn(\"datetime\", get_datetime(df.ts))\n \n # extract columns to create time table\n df.createOrReplaceTempView('logs_time_table')\n \n time_table = spark.sql(\"\"\"\n SELECT DISTINCT datetime as start_time,\n hour(timestamp) as hour,\n dayofmonth(timestamp) as day,\n weekofyear(timestamp) as week,\n month(timestamp) as month,\n year(timestamp) as year,\n dayofweek(timestamp) as weekday\n FROM logs_time_table\n ORDER BY start_time\n \"\"\")\n \n time_table.printSchema()\n time_table.show(5, truncate = False)\n \n # write time table to parquet files partitioned by year and month\n time_table_output_path = output_data + 'time_table.parquet'\n \n time_table.write.mode('overwrite').partitionBy('year', 'month').parquet(time_table_output_path)\n print('Time table done!!!!')\n\n # read in song data to use for songplays table\n song_data = input_data + 'song_data/*/*/*/*.json'\n song_df = spark.read.json(song_data)\n\n # extract columns from joined song and log datasets to create songplays table\n songplays_df = df.join(song_df, (song_df.title == df.song) & (song_df.artist_name == df.artist))\n songplays_df = songplays_df.withColumn(\"songplay_id\", monotonically_increasing_id())\n songplays_df.createOrReplaceTempView('songplays_table')\n \n songplays_table = spark.sql(\"\"\"\n SELECT songplay_id,\n datetime as start_time,\n userId as user_id, \n level, \n song_id, \n artist_id, \n sessionId as session_id,\n location,\n userAgent as user_agent,\n month(timestamp) as month,\n year(timestamp) as year\n FROM songplays_table\n \"\"\")\n songplays_table.printSchema()\n songplays_table.show(5, truncate = False)\n\n # write songplays table to parquet files partitioned by year and month\n songplays_table_output_path = output_data + 'songplays_table.parquet'\n \n songplays_table.dropDuplicates().write.mode('overwrite').partitionBy('year', 'month').parquet(songplays_table_output_path)\n print('Songplays table done!!!!')\n\n\ndef main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"554171750","text":"import os\nimport urllib.request\n\ndef url_open(url):\n request = urllib.request.Request(url)\n request.add_header('User-Agent',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36')\n response = urllib.request.urlopen(url)\n html = response.read()\n return html\n\ndef get_page(url):\n html = str(url_open(url).decode('utf-8'))\n a = html.find('current-comment-page')+23\n b = html.find(']',a)\n return html[a:b]\n\ndef find_imgs(page_url):\n html = str(url_open(page_url).decode('utf-8'))\n img_addrs = []\n a = html.find('img src=')\n while a!=-1:\n b = html.find('.jpg',a,a+255)\n if b!=-1:\n img_addrs.append('http:'+html[a+9:b+4])\n else:\n b=a+9\n a = html.find('img src=',b)\n\n for each in img_addrs:\n print(each)\n\n return img_addrs\n\n\ndef save_imgs(float, img_addrs):\n for each in img_addrs:\n filename = str(each).split('/')[-1]\n with open(filename,'wb') as file:\n img = url_open(each)\n file.write(img)\n\n\ndef download_mz(folder='OOXX', pages=2):\n os.mkdir(folder)\n os.chdir(folder)\n\n url = 'http://jandan.net/ooxx/'\n page_num = int(get_page(url))\n\n for i in range(pages):\n page_num -= i\n page_url = url + 'page-' + str(page_num) + '#comments'\n img_addrs = find_imgs(page_url)\n save_imgs(float,img_addrs)\n\n print('下载完成')\n\nif __name__ == \"__main__\":\n download_mz(2)","sub_path":"study/网络爬虫/爬煎蛋网妹子图/download_mz.py","file_name":"download_mz.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"618163777","text":"import env\nimport argparse\nimport pathlib\nfrom time import sleep\n\n\ndef make_sequence(S):\n ret = []\n for s in S:\n ret.append(list(float(q) for q in s[1:-3].split(',')))\n return ret\n\n\np = argparse.ArgumentParser()\np.add_argument('task')\np.add_argument('seq')\np.add_argument('--fps', type=int, default=5)\np.add_argument('--fallback', action='store_true')\n\nargs = p.parse_args()\nif args.fps <= 0:\n raise ValueError('FPS too small')\n\nslp = 1/args.fps\nE, end, emp = env.load_task(pathlib.Path(args.task),\n render=True,\n fallback=args.fallback)\n\nR = E.robot\n\nwith open(args.seq, 'r') as seqfile:\n S = seqfile.readlines()\n\nQ = make_sequence(S)\n\nfor q in Q:\n sleep(slp)\n R.state = q\n\ninput()\n\ndel R\ndel E\n","sub_path":"seq-viewer.py","file_name":"seq-viewer.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"66682564","text":"# -*- coding:utf-8 -*-\n\nimport threading\nimport logging\nimport sys\nimport io\nimport os\n\nfrom time import sleep\nfrom PIL import Image\nimport numpy as np\nfrom cv2 import cvtColor, Canny\nfrom cv2 import COLOR_BGR2GRAY, bilateralFilter\n\nfrom paint import *\nimport config\n\n\nclass ScreenshotThread(threading.Thread):\n def __init__(self, screenshot_queue, image_processing_queue):\n threading.Thread.__init__(self)\n self.screenshot_queue = screenshot_queue\n self.image_processing_queue = image_processing_queue\n\n def run(self):\n while True:\n with threading.Lock():\n dahua = self.screenshot_queue.get()\n if config.snapshots_enabled:\n self.make_snapshots(dahua)\n self.screenshot_queue.task_done()\n\n def make_snapshots(self, dahua):\n model = dahua.model\n channels_count = dahua.channels_count\n logging.debug(f' Make snapshot from {dahua.ip} (DM: {dahua.model}, channels: {channels_count})')\n config.trash_cam[dahua.ip] = 0\n dead_counter = 0\n capturing = 0\n total_channels = config.ch_count\n for channel in range(channels_count):\n # Ускорение / Performance\n if dead_counter > 4 or config.trash_cam[dahua.ip] > 2:\n logging.debug(f' {dead_counter} dead channels in a row. Skipping this cam')\n break\n try:\n jpeg = dahua.get_snapshot(channel)\n dead_counter = 0\n capturing += 1\n name = f\"{dahua.ip}_{dahua.port}_{dahua.login}_{dahua.password}_{channel + 1}_{model}.jpg\"\n grabster = channels_count - capturing\n print(fore_green(f\"Brute progress: [{config.state}] Grabbing snapshots for {dahua.ip}.. \\n\") # Left {str(grabster)} channels.. Trash: {str(config.trash_cam[dahua.ip])}\\n\")\n + back_yellow(f\"Writing snapshots.. Total saved {config.snapshots_counts} from {total_channels}\"), end='\\r')\n sleep(0.05)\n self.image_processing_queue.put([name, jpeg], block=False, timeout=20)\n # self.image_processing(jpeg)\n except Exception as e:\n logging.debug(f' Channel {channel + 1} of {dahua.ip} is dead {str(e)}{\" \"*40}')\n dead_counter += 1\n continue\n logging.debug(\"%s exit from make_snapshots()\" % dahua.ip)\n return\n\n\nclass ImageProcessingThread(threading.Thread):\n def __init__(self, image_processing_queue):\n threading.Thread.__init__(self)\n self.image_processing_queue = image_processing_queue\n\n def run(self):\n while True:\n with threading.Lock():\n # print(self.image_processing_queue.get())\n name, image = self.image_processing_queue.get()\n self.processing(name, image)\n self.image_processing_queue.task_done()\n\n def is_dark(self, image):\n x = np.sum(image)/image.size\n if x < 50:\n return True\n else:\n return False\n\n def is_interesting(self, image):\n gray = cvtColor(image, COLOR_BGR2GRAY)\n gray = bilateralFilter(gray, 11, 17, 17)\n edged = Canny(gray, 30, 200)\n if np.sum(edged[:, :]**2) < 2500:\n return False\n else:\n return True\n\n def processing(self, name, image_bytes):\n n_name = name.split(\"_\")\n n_ip = n_name[0]\n try:\n pil_image = Image.open(io.BytesIO(image_bytes))\n image = np.array(pil_image)\n if self.is_dark(image) or not self.is_interesting(image):\n self.save_image(os.path.join('trash', name), image_bytes)\n config.trash_cam[n_ip] += 1\n return False\n else:\n self.save_image(name, image_bytes)\n config.trash_cam[n_ip] = 0\n return True\n except Exception as e:\n config.trash_cam[n_ip] += 1\n #print(\"PIL Issue: \" + str(e))\n logging.debug(f'{fore_red(\"Cannot save screenshot\")} - {name.split(\"_\")[0]} - {back_red(\"CORRUPTED FILE\")}{\" \"*40}')\n pass\n\n def save_image(self, name, image_bytes):\n n_name = name.split(\"_\")\n n_ip = n_name[0]\n try:\n with open(os.path.join(config.snapshots_folder, name), 'wb') as outfile:\n outfile.write(image_bytes)\n config.snapshots_counts += 1\n logging.debug(f' {fore_green(f\"Saved snapshot - {name}\")}{\" \"*40}')\n except Exception as e:\n config.trash_cam[n_ip] += 1\n #print(\" Outfile: \" + e)\n logging.debug(f'{fore_red(\"Cannot save screenshot\")} - {name}{\" \"*40}')\n","sub_path":"snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"335172263","text":"import datetime\nimport io\nimport logging\n\nfrom requests.adapters import BaseAdapter\nfrom requests.models import Response\nfrom requests.structures import CaseInsensitiveDict\nfrom requests.utils import get_encoding_from_headers\n\ntry:\n from http.client import responses\nexcept ImportError:\n from httplib import responses\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\ntry:\n timedelta_total_seconds = datetime.timedelta.total_seconds\nexcept AttributeError:\n def timedelta_total_seconds(timedelta):\n return (\n timedelta.microseconds + 0.0 +\n (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Content(object):\n\n def __init__(self, content):\n self._len = len(content)\n self._read = 0\n self._bytes = io.BytesIO(content)\n\n def __len__(self):\n return self._len\n\n def read(self, amt=None):\n if amt:\n self._read += amt\n return self._bytes.read(amt)\n\n def readline(self):\n line = self._bytes.readline()\n self._read += len(line)\n return line\n\n def stream(self, amt=None, decode_content=None):\n while self._read < self._len:\n yield self.read(amt)\n\n def release_conn(self):\n pass\n\n\nclass WSGIAdapter(BaseAdapter):\n server_protocol = 'HTTP/1.1'\n wsgi_version = (1, 0)\n\n def __init__(self, app, multiprocess=False, multithread=False, run_once=False, log_function=None):\n self.app = app\n self.multiprocess = multiprocess\n self.multithread = multithread\n self.run_once = run_once\n self._log = log_function or self._log\n self.errors = io.BytesIO()\n\n def send(self, request, *args, **kwargs):\n start = datetime.datetime.utcnow()\n\n urlinfo = urlparse(request.url)\n\n if not request.body:\n data = b''\n # requests>=2.11.0 makes request body a bytes object which no longer needs\n # encoding\n elif isinstance(request.body, bytes):\n data = request.body\n else:\n data = request.body.encode('utf-8')\n\n environ = {\n 'CONTENT_TYPE': request.headers.get('Content-Type', 'text/plain'),\n 'CONTENT_LENGTH': len(data),\n 'PATH_INFO': urlinfo.path,\n 'REQUEST_METHOD': request.method,\n 'SERVER_NAME': urlinfo.hostname,\n 'QUERY_STRING': urlinfo.query,\n 'SERVER_PORT': urlinfo.port or ('443' if urlinfo.scheme == 'https' else '80'),\n 'SERVER_PROTOCOL': self.server_protocol,\n 'wsgi.version': self.wsgi_version,\n 'wsgi.url_scheme': urlinfo.scheme,\n 'wsgi.input': Content(data),\n 'wsgi.errors': self.errors,\n 'wsgi.multiprocess': self.multiprocess,\n 'wsgi.multithread': self.multithread,\n 'wsgi.run_once': self.run_once,\n 'wsgi.url_scheme': urlinfo.scheme,\n }\n\n environ.update(dict(\n ('HTTP_{0}'.format(name).replace('-', '_').upper(), value)\n for name, value in request.headers.items()\n ))\n\n response = Response()\n\n def start_response(status, headers):\n response.status_code = int(status.split(' ')[0])\n response.reason = responses.get(response.status_code, 'Unknown Status Code')\n response.headers = CaseInsensitiveDict(headers)\n response.encoding = get_encoding_from_headers(response.headers)\n response.elapsed = datetime.datetime.utcnow() - start\n self._log(response)\n\n response.request = request\n response.url = request.url\n\n response.raw = Content(b''.join(self.app(environ, start_response)))\n\n return response\n\n def close(self):\n pass\n\n def _log(self, response):\n if response.status_code < 400:\n log = logger.info\n elif response.status_code < 500:\n log = logger.warning\n else:\n log = logger.error\n\n summary = '{status} {method} {url} ({host}) {time}ms'.format(\n status=response.status_code,\n method=response.request.method,\n url=response.request.path_url,\n host=urlparse(response.url).hostname,\n time=round(timedelta_total_seconds(response.elapsed) * 1000, 2),\n )\n\n log(summary)\n","sub_path":"wsgiadapter.py","file_name":"wsgiadapter.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"542827232","text":"import io\nimport string\nimport subprocess as sp\n\ndef shell_exec(cmd):\n p = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)\n return p.stdout\n\n\ndef shell_get_strings(file, prefix):\n cmd = ['strings', file]\n sio = io.StringIO(shell_exec(cmd).decode())\n strings = []\n for line in sio:\n strings.append(line.rstrip())\n strings.sort()\n return strings\n\n\ndef get_strings(data, prefix):\n s = \"\"\n printable = set(string.printable)\n strings = set()\n i = 0\n for c in data:\n if 0 == i % 2**20:\n T(\"%d\", i >> 20)\n if c in printable:\n s += c\n else:\n if \"\" != s:\n if s.startswith(prefix):\n strings.add(s)\n s = \"\"\n i += 1\n strings = list(strings)\n strings.sort()\n return strings\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"415590879","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 6 18:29:39 2015\n\n@author: npingel\n\"\"\"\nimport numpy as np\nfrom astropy.io import fits \nimport matplotlib.pyplot as pyplot\nimport matplotlib \n\nmatplotlib.rc('font', family='sans-serif') \nmatplotlib.rc('font', serif='Helvetica Neue') \nmatplotlib.rc('text', usetex='false') \nmatplotlib.rcParams.update({'font.size': 14})\n\nsourceList = ['NGC891','NGC925','NGC4414', 'NGC4565']\ndistList = [9.2,9.1,17.8,10.8]\n\nfor sourceIdx in range(0,len(sourceList)):\n src = sourceList[sourceIdx]\n dist = distList[sourceIdx]\n print(\"Analyzing source: \"+src)\n infData = '/Users/npingel/Desktop/Research/GBT-HALOGAS/MOM0/'+src+'/'+src+'_WSRT_MOM0_CONTSUB_SM.FITS'\n sdData = '/Users/npingel/Desktop/Research/GBT-HALOGAS/MOM0/'+src+'/'+src+'_GBT_MOM0_CONTSUB_SM.FITS' \n intNoiseData = '/Users/npingel/Desktop/Research/GBT-HALOGAS/MOM0/'+src+'/'+src+'_WSRT_HI_NOISE_MAP_SM.FITS'\n sdNoiseData = '/Users/npingel/Desktop/Research/GBT-HALOGAS/MOM0/'+src+'/'+src+'_GBT_HI_NOISE_MAP_SM.FITS'\n\n sdHduList = fits.open(sdData)\n interHduList = fits.open(infData)\n sdErrHduList = fits.open(sdNoiseData)\n intErrHduList = fits.open(intNoiseData)\n if src == 'NGC891':\n dv = 8.24\n else:\n dv = 4.12\n \n sdImage = np.nan_to_num(sdHduList[0].data)*1.82e18*dv\n intImage = np.nan_to_num(interHduList[0].data)*1.82e18*dv\n intNoiseIm = np.nan_to_num(intErrHduList[0].data)\n sdNoiseIm = np.nan_to_num(sdErrHduList[0].data)\n interXPixSize = interHduList[0].header['CDELT1']*3600\n interYPixSize = interHduList[0].header['CDELT2']*3600\n minHI = np.max(intNoiseIm)*dv*1.82e18*3\n\n\n pixArea = 16/3600.\n pixLinScale = 2*dist*np.tan(np.deg2rad(4./3600./2))*1e3\n pixPhysArea = pixLinScale**2\n\n sdXSize = sdImage.shape[1]\n sdYSize = sdImage.shape[0]\n\n infXSize = intImage.shape[1]\n infYSize = intImage.shape[0]\n\n levs = np.logspace(np.log10(minHI),20.0,10,endpoint='False')\n sdAngLevs = [] \n sdAngLevsErr = []\n intAngLevs = []\n intAngLevsErr = []\n\n for ind in range(len(levs)):\n print('Determining cumulative Area of level: '+str(levs[ind]))\n sdCnt = 0.\n intCnt= 0.\n for i in range(infXSize):\n for j in range(infYSize):\n intVal = intImage[j,i] \n noiseVal = intNoiseIm[j,i]\n if intVal >= levs[ind]:\n intCnt+=1\n\n \n for i in range(sdXSize):\n for j in range(sdYSize):\n sdVal = sdImage[j,i]\n if sdVal >= levs[ind] and sdVal > 0:\n sdCnt+=1\n \n sdAngLevs.append(sdCnt*pixPhysArea) \n sdAngLevsErr.append(np.sqrt(sdCnt)*pixPhysArea)\n intAngLevs.append(intCnt*pixPhysArea)\n intAngLevsErr.append(np.sqrt(intCnt)*pixPhysArea)\n\n\n## print out area Levs\n#print('The GBT area in total sq. arcmins : ', sdMassLevs)\n#print('The WSRT area in total sq. arcmins : ', intMassLevs)\n#print('The ratio of the GBT and WSRT areas ', np.divide(sdMassLevs,intMassLevs))\n\n sdAreaLevsErr_Log = sdAngLevsErr/(10*np.log(10.))\n infAreaLevsErr_Log = sdAngLevsErr/(10*np.log(10.))\n\n##Plot\n pyplot.figure()\n pyplot.xlabel(r'Log$_{10}$(N$_{HI}$) Level [cm$^{-2}$]')\n pyplot.ylabel('Cumulative Area [kpc$^2$]')\n pyplot.title('Cumulative HI Area')\n pyplot.xlim(17,20.5)\n pyplot.errorbar(np.log10(levs),sdAngLevs,yerr=sdAngLevsErr,color='black', fmt='s',label='GBT')\n pyplot.errorbar(np.log10(levs),sdAngLevs,yerr=sdAngLevsErr,color='black')\n pyplot.errorbar(np.log10(levs),intAngLevs,yerr=intAngLevsErr,color='black', fmt='--o', label='WSRT') \n pyplot.legend(loc=0,prop={'size':14},fontsize='large')\n pyplot.errorbar(np.log10(levs),sdAngLevs,yerr=sdAngLevsErr,color='black',fmt='s')\n pyplot.errorbar(np.log10(levs),sdAngLevs,yerr=sdAngLevsErr,color='black')\n pyplot.errorbar(np.log10(levs),intAngLevs,yerr=intAngLevsErr,fmt='--o',color='black')\n pyplot.savefig('/Users/npingel/Desktop/'+src+'Area_NHI.pdf', bbox_inches='tight') \n pyplot.show()\n\n","sub_path":"PxArea.py","file_name":"PxArea.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"137377933","text":"import os\r\nimport sys\r\nimport datetime\r\nimport time\r\n\r\ndef main():\r\n f = open('test.txt', 'r')\r\n o = open('test-fixed.txt', 'w')\r\n for line in f.readlines():\r\n line = line.rstrip(\"\\r\\n\")\r\n line = line.replace('\"', '\\\\\"')\r\n o.write(line)\r\n f.close()\r\n o.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Python/rmLine/fixFile.py","file_name":"fixFile.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"412953550","text":"import sqlite3\r\nfrom math import*\r\nimport numpy\r\nimport scipy\r\nfrom numpy import*\r\nfrom scipy import spatial\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#Create connection to the db\r\ndb_name = 'test.db'\r\n# Try to connect\r\n\t\r\nconn = sqlite3.connect(db_name)\r\nconn.text_factory = str\r\nc = conn.cursor()\r\n\r\nclass Data:\r\n\tdef __init__(self,jobs,goals):\r\n\t\tself.jobs = jobs\r\n\t\tself.goals = goals\r\n\t\t\r\nclass Job:\r\n\tdef __init__(self,name,current_weight,recommended_weight,recommended_rank,max_growth,max_security,max_exploration,max_innovation,max_education):\r\n\t\tself.name = name\r\n\t\tself.current_weight = current_weight\r\n\t\tself.recommended_rank = recommended_rank;\r\n\t\tself.recommended_weight = recommended_weight;\r\n\t\tself.goals = [max_growth,max_security,max_exploration,max_innovation,max_education];\r\n\t\tself.max_growth = max_growth\r\n\t\tself.max_security = max_security\r\n\t\tself.max_exploration = max_exploration\r\n\t\tself.max_innovation = max_innovation\r\n\t\tself.max_education = max_education\r\n\t\t\r\nclass Goal:\r\n\tdef __init__(self,name,value):\r\n\t\tself.name = name\r\n\t\tself.value = value\r\n\r\n\r\n#Helper functions---------------------------------------------------------\r\ndef initialize_db(job_goals_values):\r\n\t# Create table\r\n\tc.execute('''CREATE TABLE goals\r\n\t\t\t\t (uid real,name text, value text)''')\r\n\t\t\t\t \r\n\tc.execute('''CREATE TABLE skills_recommendation\r\n\t\t\t\t (uid real,skill text)''')\r\n\t\t\t\t \r\n\tc.execute('''CREATE TABLE job_weight\r\n\t\t\t\t (uid real,name text, \r\n\t\t\t\t current_weight real, \r\n\t\t\t\t current_rank real, \r\n\t\t\t\t recommended_weight real, \r\n\t\t\t\t recommended_rank real, \r\n\t\t\t\t max_growth real, \r\n\t\t\t\t max_security real, \r\n\t\t\t\t max_exploration real, \r\n\t\t\t\t max_innovation real, \r\n\t\t\t\t max_education real)''')\r\n\r\n\t# Insert a row of data into goals table\r\n\tc.execute(\"INSERT INTO goals VALUES (0,'Security',1)\")\r\n\tc.execute(\"INSERT INTO goals VALUES (1,'Wealth',3)\")\r\n\tc.execute(\"INSERT INTO goals VALUES (2,'Exploration',4)\")\r\n\tc.execute(\"INSERT INTO goals VALUES (3,'Innovation',1)\")\r\n\tc.execute(\"INSERT INTO goals VALUES (4,'Expansioin',1)\")\r\n\r\n\r\n\t# Insert a row of data into job_weight\r\n\tc.execute(\"INSERT INTO job_weight VALUES (0,'Maintenance Worker',0.16,0,0.16,0,'%d','%d','%d','%d','%d')\" %(job_goals_values[0][0],job_goals_values[0][1],job_goals_values[0][2],job_goals_values[0][3],job_goals_values[0][4]))\r\n\tc.execute(\"INSERT INTO job_weight VALUES (1,'Lead Process Planner',0.16,1,0.16,1,'%d','%d','%d','%d','%d')\" %(job_goals_values[1][0],job_goals_values[1][1],job_goals_values[1][2],job_goals_values[1][3],job_goals_values[1][4]))\r\n\tc.execute(\"INSERT INTO job_weight VALUES (2,'Ressource Excavator',0.16,2,0.16,2,'%d','%d','%d','%d','%d')\" %(job_goals_values[2][0],job_goals_values[2][1],job_goals_values[2][2],job_goals_values[2][3],job_goals_values[2][4]))\r\n\tc.execute(\"INSERT INTO job_weight VALUES (3,'Medic',0.16,3,0.16,3,'%d','%d','%d','%d','%d')\" %(job_goals_values[3][0],job_goals_values[3][1],job_goals_values[3][2],job_goals_values[3][3],job_goals_values[3][4]))\r\n\tc.execute(\"INSERT INTO job_weight VALUES (4,'Harvester',0.16,4,0.16,4,'%d','%d','%d','%d','%d')\" %(job_goals_values[4][0],job_goals_values[4][1],job_goals_values[4][2],job_goals_values[4][3],job_goals_values[4][4]))\r\n\tc.execute(\"INSERT INTO job_weight VALUES (5,'Gardien',0.16,5,0.16,5,'%d','%d','%d','%d','%d')\" %(job_goals_values[5][0],job_goals_values[5][1],job_goals_values[5][2],job_goals_values[5][3],job_goals_values[5][4]))\r\n\t\t\r\n\r\n\tc.execute(\"INSERT INTO skills_recommendation VALUES (0,'Structure')\")\r\n\tc.execute(\"INSERT INTO skills_recommendation VALUES (1,'Electricity')\")\r\n\tc.execute(\"INSERT INTO skills_recommendation VALUES (2,'Machine')\")\r\n\tc.execute(\"INSERT INTO skills_recommendation VALUES (3,'Problem Solving')\")\r\n\tc.execute(\"INSERT INTO skills_recommendation VALUES (4,'Programming')\")\r\n\tc.execute(\"INSERT INTO skills_recommendation VALUES (5,'Battle')\")\r\n\r\n\t# Save (commit) the changes\r\n\tconn.commit()\r\n\r\ndef get_goal_value(uid,col):\r\n\tc.execute('SELECT ({coi}) FROM {tn} WHERE uid=\"{cn}\"'.\\\r\n format(coi=col, tn='goals', cn=uid))\r\n\t\r\n\treturn c.fetchone()[0]\t\r\n\t\r\ndef get_job_value(uid,col):\r\n\tc.execute('SELECT ({coi}) FROM {tn} WHERE uid=\"{cn}\"'.\\\r\n\t\tformat(coi=col, tn='job_weight', cn=uid))\r\n\treturn c.fetchone()[0]\r\n\t\r\ndef populate_jobs(job_count):\r\n\tjobs = []\r\n\tfor i in range(0,job_count):\r\n\t\tname = get_job_value(i,\"name\");\r\n\t\tcurrent_weight = get_job_value(i,\"current_weight\");\r\n\t\tcurrent_rank = get_job_value(i,\"current_rank\");\r\n\t\trecommended_rank = get_job_value(i,\"recommended_rank\");\r\n\t\trecommended_weight = get_job_value(i,\"recommended_weight\");\r\n\t\tmax_growth = get_job_value(i,\"max_growth\");\r\n\t\tmax_security = get_job_value(i,\"max_security\");\r\n\t\tmax_exploration = get_job_value(i,\"max_exploration\");\r\n\t\tmax_innovation = get_job_value(i,\"max_innovation\");\r\n\t\tmax_education = get_job_value(i,\"max_education\");\r\n\t\tjob = Job(name,current_weight,recommended_weight,recommended_rank,max_growth,max_security,max_exploration,max_innovation,max_education)\r\n\t\tjobs.append(job)\r\n\treturn jobs\r\n\t\r\ndef populate_goals(goals_count):\r\n\tgoals = []\r\n\tfor i in range(0,goals_count):\r\n\t\tname = get_goal_value(i,\"name\");\r\n\t\tvalue = get_goal_value(i,\"value\");\r\n\t\tgoal = Goal(name,value)\r\n\t\tgoals.append(goal)\r\n\treturn goals\t\r\n\r\ndef populate_goals_value():\r\n\tgoals_array = []\r\n\t#Security, Wealth, Exploration, Innovation and Expansion\r\n\tgoals_array.append([1,2,2,3,2])\r\n\tgoals_array.append([1,1,1,3,4])\r\n\tgoals_array.append([1,4,3,1,1])\r\n\tgoals_array.append([3,3,1,2,1])\r\n\tgoals_array.append([1,3,3,1,2])\r\n\tgoals_array.append([5,2,1,1,1])\r\n\treturn goals_array\r\n\r\n\t\r\ndef populate_skill_array():\r\n\tskills_array = []\r\n\tskills_array.append([1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0])\r\n\tskills_array.append([1,0,1,1,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0])\r\n\tskills_array.append([0,1,1,0,1,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0])\r\n\tskills_array.append([0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,1,1])\r\n\tskills_array.append([0,0,0,0,0,0,0,1,0,0,1,0,0,1,1,1,0,0,1,0])\r\n\tskills_array.append([0,0,0,1,0,1,0,0,0,1,0,0,0,0,1,1,0,0,0,1])\r\n\treturn skills_array\r\n\t\r\ndef populate_skill_dict():\r\n\tskill_dict = {}\r\n\tskill_name = ['Structure','Electricity','Machine','Problem Solving','Programming',\r\n\t\t\t\t\t'Battle','Geography','Planning','Project Management','Peace Keeping',\r\n\t\t\t\t\t'Geology','Materials','Research','Biology','Manual Labor','Health',\r\n\t\t\t\t\t'Cleaning','Pharmacy','Nutrition','Psychology']\r\n\tfor i in range(0,20):\r\n\t\tskill_dict[i] = skill_name[i]\r\n\treturn skill_dict\r\n\t\r\ndef update_goals(new_goals):\r\n\tindex = 0\r\n\tfor goals in new_goals:\r\n\t\tc.execute(\"UPDATE goals SET value = '%d' WHERE uid = %d\" %(goals,index));\r\n\t\tindex = index + 1\r\n\tconn.commit();\r\n\t\r\ndef update_recommendation(skill_dict,ranked_skills):\r\n\tfor index in range(0,6):\r\n\t\tc.execute(\"UPDATE skills_recommendation SET skill = '%s' WHERE uid = %d\" %(skill_dict[ranked_skills[index][0]],index));\r\n\tconn.commit();\r\n\t\r\ndef update_job_col(job_ranking):\r\n\tindex = 0\r\n\tfor job_tuple in job_ranking:\r\n\t\tc.execute(\"UPDATE job_weight SET recommended_rank = '%f' WHERE uid= %d\" %(index,job_tuple[0]));\r\n\t\tc.execute(\"UPDATE job_weight SET recommended_weight = '%f' WHERE uid= %d\" %(job_tuple[1],job_tuple[0]));\r\n\t\tindex = index + 1\r\n\tconn.commit();\r\n\t\r\n\r\n#-------------------------------------------------------------------------\r\n\r\n#Job Ranking algorithm----------------------------------------------------\r\ndef calculate_euclidian_distance(target,current):\r\n\tA = numpy.array(target)\r\n\tB = numpy.array(current)\r\n\treturn scipy.spatial.distance.euclidean(A,B)\r\n\t\r\ndef rank_jobs(data):\r\n\tjob_ranking = {}\r\n\tdata_goals = []\r\n\ttotal = 0;\r\n\tfor goal in data.goals:\r\n\t\tdata_goals.append(float(goal.value));\r\n\t\r\n\ti = 0\r\n\tfor job in data.jobs:\r\n\t\tjob_ranking[i] = calculate_euclidian_distance(data_goals,job.goals)\r\n\t\ttotal = total + job_ranking[i]\r\n\t\ti += 1\r\n\tjob_ranking = {key: total-value for key, value in job_ranking.iteritems()}\t\r\n\tjob_ranking = sorted(job_ranking.items(), key=lambda t: t[1])\r\n\treturning_job_rank = {}\r\n\t\r\n\tindex = 0\r\n\ttotal = 0;\r\n\tfor job_tuple in job_ranking:\r\n\t\ttotal += job_tuple[1];\r\n\t\r\n\tfor job_tuple in job_ranking:\r\n\t\treturning_job_rank[job_tuple[0]]=(job_tuple[1]/total)\r\n\t\tindex = index + 1\r\n\t\t\r\n\treturning_job_rank = sorted(returning_job_rank.items(), key=lambda t: t[1],reverse=True)\r\n\treturn returning_job_rank\r\n#-------------------------------------------------------------------------\r\n\t\r\n#Skill Ranking Calculation\r\ndef calculate_skill_ranking(aggregated_skills,job_ranking,job_count,skill_count,data):\r\n\tcumulative_skills_score = {}\r\n\tfor i in range(0,skill_count):\r\n\t\tcumulative_skills_score[i] = 0\r\n\t\t\r\n\tfor i in range(0,job_count):\r\n\t\tfor j in range(0,skill_count):\r\n\t\t\tcumulative_skills_score[j] += (job_ranking[i][1] -data.jobs[i].recommended_weight)*(aggregated_skills[i][j])\r\n\t\r\n\tcumulative_skills_score= {key: value for key, value in cumulative_skills_score.iteritems()}\t\r\n\tcumulative_skills_score = sorted(cumulative_skills_score.items(), key=lambda t: t[1],reverse=True)\r\n\treturn cumulative_skills_score\r\n\t\t\r\n\r\n#Initialize the database\r\n#initialize_db(populate_goals_value())\r\n\r\n#Initialize the data and Simple euclidian distance\r\njob_count = 6\r\ngoals_count = 5\r\ndata = Data(populate_jobs(job_count),populate_goals(goals_count))\r\nupdate_goals([5,1,1,1,2])\r\njob_ranking = rank_jobs(data)\r\nindex = 0\r\nprint(\"Goals inputed : Security = 5, Wealth = 1, Exploration = 1 , Innovation = 1, Expension = 2\")\r\n\r\nprint(\"Job ranking :\")\r\nfor job_tuple in job_ranking:\r\n\tprint('Job : %s ranking is %d with new target = %f%%' %(data.jobs[job_tuple[0]].name,index,job_tuple[1]))\r\n\tindex = index + 1\r\nupdate_job_col(job_ranking);\r\n\t\r\n\r\n#Skill selection:\r\nskill_array = populate_skill_array()\r\nskill_count = 20\r\nranked_skills = calculate_skill_ranking(skill_array,job_ranking,job_count,skill_count,data)\r\n\r\nskill_dict = populate_skill_dict()\r\nfor i in range(0,skill_count):\r\n\tprint(\"Skill : %s is ranked %d\" %(skill_dict[ranked_skills[i][0]],i))\r\nupdate_recommendation(skill_dict,ranked_skills)\r\n\r\n\r\n\r\n#Show the plot with matplotlib\r\n#ranked_skills and skill_dict, got the skill list ranked\r\n#job_ranking is where my job are in ranked order\r\n\r\ndata = Data(populate_jobs(job_count),populate_goals(goals_count))\r\nrecommended_weight_plot = []\r\ncurrent_weight_plot = []\r\nfor i in range(0,6):\r\n\trecommended_weight_plot.append(data.jobs[i].recommended_weight);\r\n\tcurrent_weight_plot.append(data.jobs[i].current_weight)\r\nrecommended_weight_plot = numpy.array(recommended_weight_plot)\r\ncurrent_weight_plot = numpy.array(current_weight_plot)\r\n\r\n\r\nlabels = ['test','Maintenance Worker','Lead Process Planner','Ressource Excavator','Medic','Harvester','Guardian']\r\ndata = [current_weight_plot,\r\n recommended_weight_plot]\r\n\r\nf, (ax1, ax2) = plt.subplots(2, 1, sharey=True)\r\nX = numpy.arange(6)\r\nrects_1 = ax1.bar(X -0.125, data[0], color = 'g', width = 0.25,label='Current')\r\nrects_2 = ax1.bar(X + 0.125, data[1], color = 'gold', width = 0.25,label='Recommended')\r\nax1.set_ylim([min(recommended_weight_plot)-0.002,max(recommended_weight_plot)+0.01])\r\nax1.set_xticklabels(labels)\r\nax1.set_title('Comparison of Actual and Recommended Job Partition')\r\nax1.legend(loc='upper left')\r\n\r\ndef autolabel(rects,data):\r\n\tindex = 0\r\n\tfor rect in rects:\r\n\t\theight = data[index]\r\n\t\tax1.text(rect.get_x() + rect.get_width()/2., 1.01*height,\r\n\t\t\t'%.2f' % (height),\r\n\t\t\tha='center', va='bottom')\r\n\t\tindex += 1\r\n\t\t\t\t\t\r\nautolabel(rects_1,data[0])\r\nautolabel(rects_2,data[1])\r\n\r\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\r\nlabels = []\r\nlabels.append('test')\r\nfor i in range(0,6):\r\n\tlabels.append(skill_dict[ranked_skills[i][0]])\r\n\t\r\nlabel_value = max(recommended_weight_plot)\r\ndata = [label_value, label_value, label_value, label_value, label_value,label_value]\r\n\r\nax2.bar(X, data[0], color = 'b', width = 0.25)\r\nax2.set_xticklabels(labels)\r\nax2.set_ylim([min(recommended_weight_plot)-0.002,max(recommended_weight_plot)+0.01])\r\nax2.set_title('Proposed Skills to Promote')\r\nmng = plt.get_current_fig_manager()\r\nmng.window.state('zoomed') #works fine on Windows!\r\n\r\nplt.show()\r\n\r\n\r\n\r\nconn.close()\r\n","sub_path":"ai part/repartition_hack.py","file_name":"repartition_hack.py","file_ext":"py","file_size_in_byte":12050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"242591291","text":"\"\"\"\ntcp server\n\"\"\"\n\nfrom socket import *\n# tcp socket\ntcp_socket_s = socket(AF_INET,SOCK_STREAM)\ntcp_socket_s.bind(('0.0.0.0',65535))\n\n\"\"\n#listen socket 看成一个列队设置\ntcp_socket_s.listen(20)\n\n#wait accept client\nwhile 1:\n print('waiting for connect...')\n c_connfd,c_addr = tcp_socket_s.accept() #与客户端connect呼应\n print('ok,connet from',c_addr,'生成客户端连接套接字',c_connfd)\n\n\n # recive and sendback\n while 1:\n re_data = c_connfd.recv(5)\n print('收到来自',c_addr,\"的消息:\",re_data.decode())\n c_connfd.send(b'ok,thanks') #会产生BrokenPipeError\n # 异常关闭或主动关闭\n if re_data == b\"##\" :\n print(c_addr,'退出')\n break\n elif not re_data:\n print(c_addr, '异常退出')\n break\n\n c_connfd.close()\n\n\n","sub_path":"fancy_month02/day11_tcp/day11_0111_note/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"308936886","text":"\"\"\"\nLayer Parser\n\nShould define a layer as a combination of rule selectors\nand a sequence of agendas\n\neg:\nΓ::a.layer:\n\t| $pipeline_var |\n\n\t;; *must* only produce a single context?\n\tan.agenda.set.$agenda(::Σ)?\n\ta.rule.selector.$x?\n\tanother.rule.selector.$y?\n\n\tdfs(@x, (::ρ, #a_tag)) -> $z(::[ρ])\n\tleaves(@y, (::ρ)) -> $q(::[ρ])\n\tmerge($z, $q) -> $i\n\tLayerRunRules($i) -> $b\n\tLayerRunAgenda($agenda, $b) -> $c\n\n\tLayerPerform($c)\nend\n\n\"\"\"\nimport logging as root_logger\nimport pyparsing as pp\n\nfrom acab.config import AcabConfig\n\nfrom acab.abstract.parsing import util as PU\nfrom acab.abstract.rule.production_operator import ProductionContainer\nfrom acab.abstract.pipeline.layer import Layer, make_layer\n\nlogging = root_logger.getLogger(__name__)\n\nutil = AcabConfig.Get()\nQUERY_S = util(\"Parsing.Structure\", \"QUERY_S\")\nTRANSFORM_S = util(\"Parsing.Structure\", \"TRANSFORM_S\")\nACTION_S = util(\"Parsing.Structure\", \"ACTION_S\")\n\nHOTLOAD_BASIC_SEN = pp.Forward()\nHOTLOAD_QUERY = pp.Forward()\nHOTLOAD_TRANSFORM = pp.Forward()\nHOTLOAD_ACTION = pp.Forward()\n\n# Layers should be a special case of rule\nconditions = PU.N(QUERY_S , HOTLOAD_QUERY + PU.gap)\ntransforms = PU.N(TRANSFORM_S , HOTLOAD_TRANSFORM + PU.gap)\nvar_setting = PU.NG(ACTION_S , HOTLOAD_ACTION + PU.component_gap)\n\nlayer_body = PU.op(conditions) + PU.op(transforms) + PU.op(var_setting)\n\nlayer_stmt = PU.STATEMENT_CONSTRUCTOR(PU.LAYER_HEAD,\n HOTLOAD_BASIC_SEN,\n layer_body)\n\nlayer_body.setParseAction(make_layer)\n\nparse_point = layer_stmt\n# parse_point.setFailAction(lambda s, loc, expr, err: print(\"{}\\n{}\".format(str(err), err.markInputline())))\n\ndef parseString(s):\n return parse_point.parseString(s)\n","sub_path":"acab/modules/structures/layer/LayerParser.py","file_name":"LayerParser.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"235800783","text":"#!/usr/bin/env python3\n\nimport sys\nimport markdown2\nimport requests\nfrom jinja2 import Template\nfrom bleach import linkify\n\nimport siteconf\n\nhtml_context = {'menu': siteconf.menu, 'menu_active': 'Home'}\n\nwith open('index.html', 'w') as idx:\n\n r = requests.get(\n 'https://github.com/alttch/pptop/blob/master/README.md?raw=true')\n if not r.ok:\n raise RuntimeError('http code {}'.format(r.code))\n\n data = markdown2.markdown(r.text)\n\n with open('tpl/index_header.html') as fh:\n template = Template(fh.read())\n idx.write(template.render(html_context))\n\n data = ('

' +\n data[data.find('ppTOP is'):data.find('p.s. Code in ')].replace(\n 'shell', ''))\n\n for d in data.split('\\n'):\n if d.find('\"asciicast\"') != -1:\n asciinema_id = d.split('\"')[1].split('/')[-1]\n d = ((\n '

').format(i=asciinema_id))\n elif d.find(' href ') == -1 and d.find('https://') != -1:\n d = linkify(d).replace(' rel=\"nofollow\"', '')\n d = d.replace('?@[\\\\]^_`{|}~\\''\n\ndef _convert_data(data):\n\tdata = data.lower()\n\tfor char in EN_BLACKLIST:\n\t\tdata = data.replace(char, \"\")\n\treturn data\n\ndef _read_words(filename):\n\twith tf.gfile.GFile(filename, \"r\") as f:\n\t\tdata = _convert_data(f.read())\n\t\treturn data.split()\n\ndef _build_vocab(filename):\n\tdata = _read_words(filename)\n\tcounter = collections.Counter(data)\n\tcount_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n\n\twords, _ = list(zip(*count_pairs))\n\tword_to_id = dict(zip(words, range(len(words))))\n\n\treturn word_to_id\n\ndef _read_line(filename):\n\twith tf.gfile.GFile(filename, \"r\") as f:\n\t\treturn [_convert_data(x.strip()) for x in f]\n\ndef _read_words_line(data):\n\treturn [line.split() for line in data]\n\ndef _file_to_word_ids(filename, word_to_id):\n\tdata = _read_line(filename)\n\tdt = []\n\tfor line in _read_words_line(data):\n\t\tdt.append([word_to_id[word] for word in line])\n\treturn dt\n\ndef ptb_raw_data(data_path=None):\n\n\tFAQ_path = os.path.join(data_path, \"FAQ_data.txt\")\n\n\tword_to_id = _build_vocab(FAQ_path)\n\tFAQ_id = _file_to_word_ids(FAQ_path, word_to_id)\n\tFAQ_string = _read_line(FAQ_path)\n\n\treturn FAQ_id, FAQ_string, word_to_id","sub_path":"ChatbotSimple/src/Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"362161199","text":"import pytest\n\nimport pygorpho as pg\nimport numpy as np\n\ndef test_dilate():\n vol = np.zeros((7,7,7))\n vol[3,3,3] = 1\n\n lineSteps = np.array([[1,0,0],[0,1,0],[0,0,1]])\n lineLens = np.array([3, 4, 5])\n\n expected = np.zeros((7,7,7))\n expected[2:5,2:6,1:6] = 1\n\n actual1 = pg.flat.linear_morph(vol, lineSteps, lineLens, pg.DILATE)\n\n np.testing.assert_equal(actual1, expected)\n\n actual2 = pg.flat.linear_dilate(vol, lineSteps, lineLens)\n np.testing.assert_equal(actual2, expected)\n\n\ndef test_erode():\n vol = np.ones((7,7,7))\n vol[3,3,3] = 0\n\n lineSteps = np.array([[1,0,0],[0,1,0],[0,0,1]])\n lineLens = np.array([3, 4, 5])\n\n expected = np.ones((7,7,7))\n expected[2:5,2:6,1:6] = 0\n\n actual1 = pg.flat.linear_morph(vol, lineSteps, lineLens, pg.ERODE)\n np.testing.assert_equal(actual1, expected)\n\n actual2 = pg.flat.linear_erode(vol, lineSteps, lineLens)\n np.testing.assert_equal(actual2, expected)\n\n\ndef test_invalid_op():\n with pytest.raises(AssertionError):\n pg.flat.linear_morph([], [], [], 99)\n\n\ndef test_resize():\n vol = np.ones((3,3))\n line_steps = np.array([1,0,0])\n line_lens = np.ones(1)\n res = pg.flat.linear_dilate(vol, line_steps, line_lens)\n assert res.shape == vol.shape\n\n\ndef test_non_numpy_input():\n vol = [0, 0, 1, 0, 0]\n line_steps = [0, 1, 0]\n line_lens = 3\n expected = [0, 1, 1, 1, 0]\n actual = pg.flat.linear_dilate(vol, line_steps, line_lens)\n np.testing.assert_equal(actual, expected)\n\n\ndef test_valid_dims():\n vol = []\n line_steps = 1\n line_lens = 1\n with pytest.raises(AssertionError):\n pg.flat.linear_dilate(vol, line_steps, line_lens)\n","sub_path":"tests/test_flat_linear.py","file_name":"test_flat_linear.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"418671675","text":"import ROOT\nimport numpy\nimport scipy\nimport StatisticalTest\n\nclass LogLikelihoodTest(StatisticalTest.StatisticalTest) :\n\n def __init__(self) :\n StatisticalTest.StatisticalTest.__init__(self)\n\n def doTest(self, dataHist, bkgHist, firstBinToUse, lastBinToUse) :\n\n dataCore = dataHist.histogram\n bkgCore = bkgHist.histogram\n \n assert dataCore.GetNbinsX() == bkgCore.GetNbinsX()\n\n # Find first and last bins with data\n # If reasonable, overwrite with user's choice\n firstBin = dataHist.firstBinWithData\n lastBin = dataHist.lastBinWithData\n if firstBinToUse>0 and firstBinToUse > firstBin and firstBinToUse < lastBin : firstBin = firstBinToUse\n if lastBinToUse > firstBinToUse and lastBinToUse>0 and lastBinToUse > firstBin and lastBinToUse < lastBin :\n lastBin = lastBinToUse\n\n answer = 0.0\n for bin in range (firstBin, lastBin+1) :\n\n if self.excludeWindow :\n if (bin > self.firstBinToExclude - 1 and bin < self.lastBinToExclude+1) : continue\n\n data = dataCore.GetBinContent(bin)\n if (data==0) : continue\n bkg = bkgCore.GetBinContent(bin)\n deltaB = bkgCore.GetBinError(bin)\n\n if data < 0.0 or bkg < 0.0 :\n thisterm = -1E10\n elif data == 0.0 :\n thisterm = -1.0*bkg\n else :\n thisterm = data * numpy.log(bkg) - bkg - scipy.special.gammaln(data+1)\n answer = answer + thisterm\n\n return answer","sub_path":"LogLikelihoodTest.py","file_name":"LogLikelihoodTest.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"527651623","text":"from functools import wraps\n\nfrom flask import current_app, Blueprint, flash, redirect, render_template, url_for, session, request, abort, jsonify\n\nfrom .forms import BookForm, BookFormEdit, AuthorForm, AuthorFormEdit, HiddenForm\nfrom .models import db, Book, Author\n\nlibrary = Blueprint(\"library\", __name__)\n\ndef login_required(F):\n \"\"\"User login check decorator\"\"\"\n @wraps(F)\n def _F(*args, **kargs):\n if not session.get('logged_in'):\n abort(401)\n else:\n return F(*args, **kargs)\n return _F\n\n\ndef auth_required(F):\n \"\"\"User authentication check decorator\"\"\"\n @wraps(F)\n def _F(*args, **kargs):\n if not session.get('auth'):\n abort(401)\n else:\n return F(*args, **kargs)\n return _F\n\n\n@library.route(\"/\")\ndef view_index():\n return render_template(\"index.html\")\n\n\n@library.route(\"/books\")\n@auth_required\ndef list_books():\n query = Book.query.filter(Book.id >= 0)\n books = query.order_by(Book.title).all()\n return render_template(\"books_list.html\", title=\"Books List\", books=books)\n\n\n@library.route(\"/authors\")\n@auth_required\ndef list_authors():\n query = Author.query.filter(Author.id >= 0)\n authors = query.order_by(Author.name).all()\n return render_template(\"authors_list.html\", title=\"Authors List\", authors=authors)\n\n\n@library.route(\"/book/\")\n@auth_required\ndef view_book(book_id=None):\n book = Book.query.get_or_404(book_id)\n title = \"Book: \" + book.title\n form = HiddenForm(record_id=book_id)\n return render_template(\"book.html\", book=book, title=title, form=form)\n\n\n@library.route(\"/delete_book\", methods=(\"POST\", ))\n@login_required\ndef delete_book():\n form = HiddenForm()\n if form.validate_on_submit():\n book = Book.query.get_or_404(form.record_id.data)\n db.session.delete(book)\n db.session.commit()\n flash(\"The book had been removed\")\n return redirect(url_for(\"library.list_books\"))\n return render_template(\"validation_error.html\", form=form)\n\n\n@library.route(\"/update_book/\")\n@login_required\ndef update_book_form(book_id):\n book = Book.query.get_or_404(book_id)\n book_form = BookFormEdit()\n book_form.book_id.default = book.id\n book_form.title.default = book.title\n book_form.authors.default = [a.id for a in book.authors]\n book_form.process()\n return render_template('add_book.html', form=book_form)\n\n\n@library.route(\"/author/\")\n@auth_required\ndef view_author(author_id=None):\n author = Author.query.get_or_404(author_id)\n title = \"Author: \" + author.name\n form = HiddenForm(record_id=author.id)\n return render_template(\"author.html\", author=author, title=title, form=form)\n\n\n@library.route(\"/delete_author\", methods=(\"POST\", ))\n@login_required\ndef delete_author():\n form = HiddenForm()\n if form.validate_on_submit():\n author = Author.query.get_or_404(form.record_id.data)\n db.session.delete(author)\n db.session.commit()\n flash(\"The author had been removed\")\n return redirect(url_for(\"library.list_authors\"))\n return render_template(\"validation_error.html\", form=form)\n\n\n@library.route(\"/update_author/\")\n@login_required\ndef update_author_form(author_id):\n author = Author.query.get_or_404(author_id)\n author_form = AuthorFormEdit()\n author_form.author_id.default = author.id\n author_form.name.default = author.name\n author_form.books.default = [b.id for b in author.books]\n author_form.process()\n return render_template('add_author.html', form=author_form)\n\n\n@library.route(\"/add_book\")\n@login_required\ndef add_book_form():\n book_form = BookForm()\n return render_template('add_book.html', form=book_form)\n\n\n@library.route(\"/add_book\", methods=(\"POST\", ))\n@login_required\ndef add_book():\n form = BookForm()\n if form.validate_on_submit():\n book = Book.query.get_or_404(form.book_id.data) if form.book_id.data else Book()\n form.populate_obj(book)\n if not book.id:\n db.session.add(book)\n flash_msg = \"Added book\"\n else:\n flash_msg = \"Updated book\"\n db.session.commit()\n flash(flash_msg)\n return redirect(url_for(\"library.view_book\", book_id=book.id))\n return render_template(\"validation_error.html\", form=form)\n\n\n@library.route(\"/add_author\")\n@login_required\ndef add_author_form():\n author_form = AuthorForm()\n return render_template('add_author.html', form=author_form)\n\n\n@library.route(\"/add_author\", methods=(\"POST\", ))\n@login_required\ndef add_author():\n form = AuthorForm()\n if form.validate_on_submit():\n author = Author.query.get_or_404(form.author_id.data) if form.author_id.data else Author()\n form.populate_obj(author)\n if not author.id:\n db.session.add(author)\n flash_msg = \"Added author\"\n else:\n flash_msg = \"Updated author\"\n db.session.commit()\n flash(flash_msg)\n return redirect(url_for(\"library.view_author\", author_id=author.id))\n return render_template(\"validation_error.html\", form=form)\n\n\n@library.route('/login', methods=('GET', 'POST'))\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] == current_app.config['USERNAME'] and \\\n request.form['password'] == current_app.config['PASSWORD']:\n session['logged_in'] = True\n session['auth'] = True\n flash('You were logged in')\n elif request.form['username']:\n session['auth'] = True\n flash('You were authorized')\n else:\n error = 'Wrong credentials'\n return render_template('login.html', error=error)\n return redirect(url_for('library.view_index'))\n return render_template('login.html', error=error)\n\n\n@library.route('/logout')\ndef logout():\n if session.pop('logged_in', None): flash('You were logged out')\n if session.pop('auth', None): flash('You were unauthorized')\n return redirect(url_for('library.view_index'))\n\n\n@library.route('/search')\n@auth_required\ndef search_form():\n return render_template('search.html')\n\n\n@library.route('/search', methods=('POST', ))\n@auth_required\ndef search():\n if 'book_title' in request.form and request.form['book_title']:\n list = Book.query.filter(Book.title.contains(request.form['book_title']))\n return render_template(\"books_list.html\", title=\"Found Books\", books=list)\n elif 'author_name' in request.form and request.form['author_name']:\n list = Author.query.filter(Author.name.contains(request.form['author_name']))\n return render_template(\"authors_list.html\", title=\"Found Authors\", authors=list)\n error = \"Need a parameter\"\n return render_template('search.html', error=error)\n\n\n@library.route('/search_book_api')\n@auth_required\ndef search_book_api():\n req = request.args.get('term', 'test', type=str)\n list = Book.query.filter(Book.title.contains(req)).order_by(Book.title).limit(10)\n res_dict = dict([(str(b.id), b.title) for b in list])\n return jsonify(**res_dict)\n\n\n@library.route('/search_author_api')\n@auth_required\ndef search_author_api():\n req = request.args.get('term', 'test', type=str)\n list = Author.query.filter(Author.name.contains(req)).order_by(Author.name).limit(10)\n res_dict = dict([(str(a.id), a.name) for a in list])\n return jsonify(**res_dict)","sub_path":"library/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"495419512","text":"import peewee as pw\n\nfrom . import BaseAction\nfrom ..db.controllers import MoviesController, ServerController, IMDBInfoController\nfrom ..util import get_imdb_info, cleanup_messages\nfrom . import logger\n\n\nclass SuggestAction(BaseAction):\n action_name = \"suggest\"\n controller = MoviesController()\n server_controller = ServerController()\n imdb_controller = IMDBInfoController()\n\n async def action(self, msg):\n server_id = msg.guild.id\n server_row = self.server_controller.get_by_id(server_id)\n message_timeout = server_row.message_timeout\n if server_row.block_suggestions:\n server_msg = await msg.channel.send(\n \"Suggestions are currently disabled on the server\"\n )\n if message_timeout > 0:\n await cleanup_messages([msg, server_msg], sec_delay=message_timeout)\n return\n suggestion = self.get_message_data(msg)\n suggestion = suggestion.title()\n\n imdb_row = None\n if server_row.check_movie_names:\n imdb_info = get_imdb_info(suggestion)\n if not imdb_info:\n server_msg = await msg.channel.send(\n \"Could not find the movie title you suggested in IMDb.\"\n )\n if message_timeout > 0:\n await cleanup_messages([msg, server_msg], sec_delay=message_timeout)\n return\n\n imdb_data = {\n \"imdb_id\": imdb_info.movieID,\n \"title\": imdb_info[\"title\"],\n \"canonical_title\": imdb_info[\"canonical title\"],\n \"year\": imdb_info[\"year\"],\n \"thumbnail_poster_url\": imdb_info[\"cover url\"],\n \"full_size_poster_url\": imdb_info[\"full-size cover url\"],\n }\n try:\n imdb_row = self.imdb_controller.create(imdb_data)\n except pw.IntegrityError as e:\n # IMDB entry already added, so ignore error\n logger.debug(\n \"IMDB entry insert error: {}\\n{}\".format(imdb_data, str(e))\n )\n pass\n\n movie_data = {\n \"server\": server_id,\n \"movie_name\": suggestion,\n \"suggested_by\": msg.author.name,\n \"imdb_id\": imdb_row,\n }\n try:\n self.controller.create(movie_data)\n except pw.IntegrityError as e:\n logger.debug(\"Movie insert error: {}\\n{}\".format(movie_data, str(e)))\n server_msg = await msg.channel.send(\n f\"{suggestion} has already been suggested in this server.\"\n )\n if message_timeout > 0:\n await cleanup_messages([msg, server_msg], sec_delay=message_timeout)\n return\n server_msg = await msg.channel.send(\n f\"Your suggestion of {suggestion} has been added to the list.\"\n )\n if message_timeout > 0:\n await cleanup_messages([msg, server_msg], sec_delay=message_timeout)\n\n @property\n def help_text(self):\n return (\n \"Adds the supplied movie to the suggestions list. There is a chance this movie will now \"\n \"show up on future votes\"\n )\n\n @property\n def help_options(self):\n return [\"[movie name]\"]\n","sub_path":"movienightbot/actions/suggest.py","file_name":"suggest.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"612593579","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 26 11:16:32 2017\n\n\n\n\"\"\"\n\nimport goslate\n\nUGLY_TEXT = \"Uglytext\"\nFIXED_TEXT = \"FixedText\"\n\n# read text\nfh = open(UGLY_TEXT, \"r\")\nuglyText = fh.read()\nfh.close()\n\n# fix text\nfixText1 = uglyText.replace(\" \",\"\")\nfixText2 = fixText1.replace(\"\\n\",\" \")\n\n# consult google\ngs = goslate.Goslate()\nfixText3 = gs.translate(fixText2, 'es')\n\n# write traduction\nfh = open(FIXED_TEXT, \"w\")\nfh.write(fixText2)\nfh.write(\"\\n---------------Traduccion----------------\\n\")\nfh.write(fixText3)\nfh.close()\n","sub_path":"fix_ugly_text.py","file_name":"fix_ugly_text.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"100195822","text":"\nimport os\nimport copy\nimport re\nimport csv\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport time \n\nimport requests \nfrom bs4 import BeautifulSoup \nfrom selenium import webdriver \nfrom selenium.webdriver.common.keys import Keys \nimport time \n\nimport os\n\nfrom datetime import datetime\n\n#url of the page we want to scrape \nurls = []\n\nurl = \"https://www.psacard.com/pop/tcg-cards/2006/pokemon-ex-crystal-guardians/87132\"\nurls.append(url)\n\nurl = \"https://www.psacard.com/pop/tcg-cards/2007/pokemon-pop-series-5/101536\"\nurls.append(url)\n\nurl = 'https://www.psacard.com/pop/tcg-cards/2005/pokemon-japanese-golden-sky-silvery-ocean/100014'\nurls.append(url)\n\nurl = \"https://www.psacard.com/pop/tcg-cards/2006/pokemon-ex-dragon-frontiers/87126\"\nurls.append(url)\n\n# initiating the webdriver. Parameter includes the path of the webdriver. \n\nwith open('gold_stars.csv', \"a\") as fp:\n wr = csv.writer(fp, dialect='excel')\n wr.writerow(['name','1','2','3','4','5','6','7','8','9','10','Total_pop','date'])\n\n\n\n\ndef get_pop(url):\n driverpath = os.path.realpath(r'/usr/local/bin/chromedriver')\n chrome_options = Options() \n\n driver = webdriver.Chrome(driverpath, options=chrome_options) \n driver.get(url) \n \n # this is just to ensure that the page is loaded \n time.sleep(5) \n \n html = driver.page_source \n \n # this renders the JS code and stores all \n # of the information in static HTML code. \n \n # Now, we could simply apply bs4 to html variable \n soup = BeautifulSoup(html, \"html.parser\") \n\n\n n = 0\n\n text = ''\n\n for k in soup.find_all('td'):\n temp = []\n \n text += k.get_text()\n\n\n text = text.split('\\n')\n\n indeces = [0,12,20,24,28,32,36,40,44,48,52,56]\n\n data = []\n i = 59\n \n \n today = datetime.today()\n\n date = ''\n date += (str(today.year) + '-' + str(today.month)) \n\n set_name = url.split('/')\n set_name = set_name[-2]\n set_name = set_name[8:]\n\n while i < len(text):\n entry = []\n temp = text[i:(i+62)]\n temp[0] = temp[0] + ' ' + temp[1]\n entry = []\n for k in indeces:\n entry.append(temp[k].strip())\n\n entry.append(set_name)\n entry.append(date)\n\n data.append(entry)\n i += 62\n\n\n\n \n\n today = datetime.today()\n\n\n file_name = set_name + '.csv'\n \n\n with open(file_name, \"a\") as fp:\n wr = csv.writer(fp, dialect='excel')\n wr.writerow(['name','1','2','3','4','5','6','7','8','9','10','Total_pop','date'])\n for i in data:\n wr.writerow(i)\n\n\n with open('gold_stars.csv', \"a\") as fp:\n wr = csv.writer(fp, dialect='excel')\n for i in data:\n if \"Gold Star\" in i[0]:\n wr.writerow(i)\n \n\n\n\n\n print(data)\n\n \n \n driver.close()\n\n\n\nfor url in urls:\n get_pop(url)\n\n\n","sub_path":"scrape_goldstars.py","file_name":"scrape_goldstars.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"20186369","text":"from tkinter import *\n\nroot=Tk()\n\nmiframe=Frame(root,width=500,height=400)\nmiframe.pack()\n\nmilabel=Label(miframe,text=\"Hola mundo\", fg=\"red\", font=(\"Comic Sans MS\",18)) #tamaño 18 de color rojo\nmilabel.place(x=100,y=200) #100 pixeles desde el borde izq de la pantalla hasta el texto. 200 pixeles desde la parte superior hasta el texto.\n\nmiImagen=PhotoImage(file=\"mouse.png\")\nLabel(miframe, image=miImagen).place(x=20,y=10)\n\nroot.mainloop()","sub_path":"FreeWork/Curse/py_gui/Labels&Images.py","file_name":"Labels&Images.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"286734443","text":"from django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response, render\nfrom django.contrib.auth.models import User\nfrom event.models import Product, Event\nfrom django.db.models import Sum\nfrom decimal import *\nfrom datetime import datetime\nimport simplejson as json\n\ndef index(request):\n recent = Event.objects.order_by('-timestamp')[:10]\n return render(request, 'index.html', {'events': recent})\n\ndef track(request, user_id, barcode):\n b = get_object_or_404(Product, barcode=barcode)\n u = get_object_or_404(User, username=user_id)\n e = Event.objects.create(user=u, amount=1, value=1, product=b, action='C')\n return HttpResponse()\n\ndef deposit(request):\n data = list()\n for p in Product.objects.all():\n sumobject = Event.objects.filter(product=p).aggregate(Sum('amount'))\n if not sumobject['amount__sum']:\n sumobject['amount__sum'] = 0\n data.append({'label': p.name, 'value': sumobject['amount__sum']})\n return render(request, 'deposit.html', {'products': json.dumps(data).replace('"', '\"')})\n\ndef sync(request):\n drinks = []\n for p in Product.objects.all():\n drinks.append({'name': p.name, 'barcode': p.barcode})\n return HttpResponse(json.dumps(drinks))\n\ndef product(request, product_id):\n p = Product.objects.get(id=product_id)\n data = dict()\n data['name'] = p.name\n data['img'] = p.url\n return render(request, 'product.html', {'data': data})\n\ndef user(request, user_id):\n u = get_object_or_404(User, username=user_id)\n data = {'name': u.username, 'balance': Decimal(0)}\n for a in Event.ACTIONS:\n allEvents = Event.objects.filter(user=u, action=a[0])\n obj = allEvents.aggregate(Sum('value'), Sum('amount'))\n value = obj['value__sum']\n amount = obj['amount__sum']\n if not value:\n value = Decimal(0)\n if not amount:\n amount = 0\n data[a[0]] = amount\n if a[0] == 'C':\n data['balance'] -= value\n else:\n data['balance'] += value\n allEvents = Event.objects.filter(user=u, action='C')\n last = allEvents.latest(field_name='timestamp')\n delta = datetime.utcnow() - last.timestamp.replace(tzinfo=None)\n if delta.seconds < 1:\n text = \"right now\"\n elif delta.seconds < 60:\n text = \"%d seconds ago\" %delta.seconds\n elif delta.seconds < 3600:\n text = \"%d seconds ago\" %(delta.seconds / 60)\n elif delta.seconds < 86400:\n text = \"%d hours ago\" %(delta.seconds/60**2)\n else:\n text = \"%d days ago\" %delta.days\n data['last'] = last.product.name + \", \" + text\n return render(request, 'user.html', {'data': data})\n\ndef productlist(request):\n products = Product.objects.all().exclude(name='dummy')\n return render(request, 'productlist.html', {'products': products})\n\ndef userlist(request):\n users = User.objects.all().exclude(username='admin')\n return render(request, 'userlist.html', {'users': users})\n","sub_path":"event/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488799723","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 22 21:13:01 2015\n\n@author: Da Vinci\n\"\"\"\nimport os\nimport IndexList\nimport DocVectorList\n\n#print string.punctuation\nil = IndexList.IndexList()\ndcl = DocVectorList.DocVectorList()\nil.setDocVectorList(dcl)\ndcl.setIndexList(il)\nif os.name == 'posix':\n dataPath = 'Reuters'\nelse:\n dataPath = \"C:\\\\Users\\\\Da Vinci\\\\Desktop\\\\ir\\\\Reuters\"\n# il.readAllFromDir(\"./Reuters\")\n# il.writeIndexToFile(\"index_record.txt\")\n# dcl.writeDocVectorToFile(\"doc_vector.txt\")\n# il._printkey();\nil.readFromIndexFile('index_record.txt')\ndcl.readDocVectorFromFile('doc_vector.txt')\nprint('index file read done')\n#resset = il.getDocSet('wednesday', False)\n# print resset\n\n# il.readAllFromDir(dataPath)\n# il.writeIndexToFile(\"index_record.txt\")\n# dcl.writeDocVectorToFile(\"doc_vector.txt\")\n# il.writeDocIDToFile(\"docID.txt\")\n#il._printkey();\n","sub_path":"Instance.py","file_name":"Instance.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"528577571","text":"from turtle import *\n\nlength = 10\nangle = 90\n\ndef draw_path(axiom):\n for symbol in axiom:\n if symbol == 'F':\n forward(length)\n elif symbol == '-':\n left(angle)\n elif symbol == '+':\n right(angle)\n\ndef apply_rule(axiom,n):\n rule = \"F-F+F+FF-F-F+F\"\n for i in range(n):\n axiom=axiom.replace(\"F\",rule)\n return axiom\n\nspeed(0)\naxiom = \"F-F-F-F\"\ndraw_path(apply_rule(axiom,2))\nexitonclick()\n","sub_path":"Code/L-system.py","file_name":"L-system.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"21282654","text":"\ndef median(A, B):\n\tm, n = len(A), len(B)\n\n\tif m > n: # swap two arrays (we want len(A) < len(B))\n\t\tA, B, m, n = B, A, n, m\n\n\tif n == 0:\n\t\traise ValueError\n\n\n\timin = 0\n\timax = m\n\thalf_len = (m + n + 1) / 2 # +1 is for lengths\n\n\twhile imin <= imax:\n\t\t# reindex A with i, B with j\n\t\ti = (imin + imax) / 2\n\t\tj = half_len - i\n\t\t# note that we want i + j = half_len\n\n\t\tif i < m and B[j-1] > A[i]:\n\t\t\t# i is too small; must increase\n\t\t\t# bottom left to top right comparison\n\t\t\timin = i+1\n\t\telif i > 0 and A[i-1] > B[j]:\n\t\t\t# is is too large; must decrease\n\t\t\t# top right to bottom left comparison\n\t\t\timax = i-1\n\t\telse:\n\t\t\t# i is perfect\n\n\t\t\tif i == 0:\n\t\t\t\tmax_of_left = B[j-1]\n\t\t\telif j == 0:\n\t\t\t\tmax_of_left = A[i-1]\n","sub_path":"Reference/LeetCode/4 Median of Two Sorted Arrays.py","file_name":"4 Median of Two Sorted Arrays.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"278718739","text":"def is_palindrome(n):\n return n == int(str(n)[::-1])\n\ndef main(inpath):\n outfile = open(\"out.txt\", \"w\")\n lines = open(inpath, \"r\").read().split('\\n')\n cases = int(lines[0])\n palindromes = []\n for i in xrange(1, 3 * 10**7 + 1):\n if is_palindrome(i) and is_palindrome(i**2):\n palindromes.append(i**2)\n \n for i in xrange(1, cases + 1):\n [x, y] = lines[i].split(' ')\n x = int(x)\n y = int(y)\n\n j = 0\n k = len(palindromes) - 1\n while x > palindromes[j]:\n j += 1\n while y < palindromes[k]:\n k -= 1\n outfile.write(\"Case #\" + str(i) + \": \" + str(k - j + 1) + \"\\n\")\n\nmain(\"input.txt\")\n","sub_path":"solutions_2463486_0/Python/handorff/problemb.py","file_name":"problemb.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"471154024","text":"# -*- coding: utf-8 -*-\n# source : http://www.runoob.com/python/python-100-examples.html\n#Python 练习实例1\n# 题目:有四个数字:1、2、3、4,能组成多少个互不相同且无重复数字的三位数?各是多少?\n# 程序分析:可填在百位、十位、个位的数字都是1、2、3、4。组成所有的排列后再去 掉不满足条件的排列。\ndef py_exe01():\n count = 0\n for i in range(1, 5):\n for j in range(1, 5):\n for k in range(1, 5):\n if i != j != k and i != k:\n print(i, j, k)\n count += 1\n continue\n print(\"The total num is {}\".format(count))\n\npy_exe01()\n\n#Python 练习实例2\n# 题目:企业发放的奖金根据利润提成。\n# 利润(I)低于或等于10万元时,奖金可提10%;\n# 利润高于10万元,低于20万元时,低于10万元的部分按10%提成,高于10万元的部分,可提成7.5%;\n# 20万到40万之间时,高于20万元的部分,可提成5%;\n# 40万到60万之间时高于40万元的部分,可提成3%;\n# 60万到100万之间时,高于60万元的部分,可提成1.5%,高于100万元时,\n# 超过100万元的部分按1%提成,从键盘输入当月利润I,求应发放奖金总数?\n\ndicMoney_re= [[100000, 0.1], [200000, 0.075], [400000, 0.05], [600000, 0.03], [1000000, 0.01]]\ndicMoney = [[1000000, 0.01], [600000, 0.015], [400000, 0.03], [200000, 0.05], [100000, 0.075], [0, 0.1]]\n\ndef py_exe02():\n money_inhand = 0\n money = int(input('Please input the money:'))\n for i in range(0, len(dicMoney)):\n if money > dicMoney[i][0]:\n money_inhand += (money - dicMoney[i][0]) * dicMoney[i][1]\n money = dicMoney[i][0]\n print(money_inhand)\n\n#Python 练习实例3\n# 一个整数,它加上100后是一个完全平方数,再加上168又是一个完全平方数,请问该数是多少?\n# x + 100 = m^2\n# x + 100 + 168 = n^2\n# m^2 - n^2 = 168\n# i = (m + n) j = (m - n)\n# i * j = 168\n# i (1, 168 / 2 + 1) j [2, 168)\ndef py_exe03():\n for i in range(1, 85):\n if 168 % i == 0:\n j = 168 / i\n if (i + j) % 2 == 0 and (i - j) % 2 == 0 and i > j:\n m = (i + j) / 2\n n = (i - j) / 2\n x = n * n - 100\n print(int(x))\n\npy_exe03()\n\n#Python 练习实例4\n# 输入某年某月某日,判断这一天是这一年的第几天?\np = [31,28,31,30,31,30,31,31,30,31,30,31] # 平年\nr = [31,29,31,30,31,30,31,31,30,31,30,31] # 闰年\n\ndef py_exe04():\n year = int(input('year: '))\n month = int(input('month: '))\n day = int(input('day: '))\n if year % 4 == 0:\n if year % 100 == 0 and year % 400 != 0:\n y = r\n else:\n y = p\n else:\n y = p\n\n sum = day\n for i in range(0, month - 1):\n sum += y[month - 1]\n\n print(\"The total days : {}\".format(sum))\n\npy_exe04()\n\n#Python 练习实例5\n# 题目:输入三个整数x,y,z,请把这三个数由小到大输出。\ndef py_exe05():\n num = input('input the three numbers:(like 1,3,2)')\n l = num.split(',')\n l.sort()\n print(l)\n\npy_exe05()\n\n#Python 练习实例6\n# 题目:斐波那契数列\n# 1 1 2 3 5 8 13 21\n# 效率最快方式\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n #print(b)\n yield b\n a, b = b, a+b\n n += 1\n\ndef test_fib():\n f = fib(10)\n for i in f:\n print(i)\ntest_fib()\n\ndef py_exe06(n):\n a , b = 1 , 1\n i = 1\n while i < n:\n a , b = b , a + b\n i += 1\n print(a)\n\npy_exe06(10)\n\n# 递归方式\ndef py_exe06_recu(n):\n if n == 1 or n == 2:\n return 1\n else:\n return py_exe06_recu(n - 2) + py_exe06_recu(n - 1)\n\nprint(py_exe06_recu(10))\n\n#Python 练习实例7\n# 题目:将一个列表的数据复制到另一个列表中。\ndef py_exe07():\n a = [1, 2, 3]\n print(a)\n b = a[:]\n print(b)\n\npy_exe07()\n\n#Python 练习实例8\n# 题目:输出 9*9 乘法口诀表\ndef py_exe08():\n for i in range(1,10):\n for j in range(1, i+1):\n print(\"{} * {} = {}\".format(j, i, i*j, ), end=\"\\t\")\n #print(\"%d * %d = %d\"%(j, i, i*j), end=\"\\t\")\n print()\n\npy_exe08()\n\n#Python 练习实例9\n# 题目:暂停一秒输出\nimport time\ndef py_exe09():\n for i in range(10):\n print(\"{}s\".format(i + 1))\n time.sleep(1)\n\npy_exe09()\n\n#Python 练习实例10\n# 题目:暂停一秒输出,并格式化当前时间\ndef py_exe10():\n print(time.strftime(\"%Y:%m:%d %H:%M:%S\", time.localtime()))\n time.sleep(1)\n print(time.strftime(\"%a, %b, %d %H:%M:%S %Y\",time.localtime()))\npy_exe10()","sub_path":"python练习/菜鸟教程_python100题/python_1_10.py","file_name":"python_1_10.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"105244174","text":"from ftplib import FTP\r\nimport sys\r\n\r\nglobal ftp_use\r\n\r\nclass FTP_COMMON():\r\n\tdef __init__(self,ftp_config):\r\n\t\tself.addr = ftp_config[\"addr\"]\r\n\t\tself.username = ftp_config[\"username\"]\r\n\t\tself.password = ftp_config[\"password\"]\r\n\t\t\r\n\t\tself.ftp = FTP()\r\n\t\tself.ftp.connect(self.addr)\r\n\t\tself.ftp.login(self.username,self.password)\r\n\t\r\n\tdef __del__(self,ftp_config):\r\n\t\tself.ftp.close()\r\n\t\r\n\tdef file_is_exsit_in_ftp(self,remote_path,filename):\r\n\t\tself.ftp.cwd(remote_path)\r\n\t\tslist = self.ftp.nlst()\r\n\t\tif filename in str(slist):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef rm_file_in_ftp(self,remote_path,filename):\r\n\t\ttry:\r\n\t\t\tself.ftp.cwd(remote_path)\r\n\t\t\tslist = self.ftp.nlst()\r\n\t\t\tif filename in str(slist):\r\n\t\t\t\tself.ftp.delete(filename)\r\n\t\t\telse:\r\n\t\t\t\tprint(\"%s is not exsit!\" % filename)\r\n\t\texcept Exception as e:\r\n\t\t\tprint('Exception : rm_file_in_ftp')\r\n\t\r\n\tdef down_file_from_ftp(self,remote_path,filename,bufsize=10000):\r\n\t\twith open(filename,'wb+') as fp:\r\n\t\t\ttry:\r\n\t\t\t\tself.ftp.retrbinary(\"RETR %s\"%remote_path+\"/\"+filename, fp.write, bufsize)\r\n\t\t\t\tfp.flush() \r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Download %s failed!\" % filename)\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\r\n\r\n\tdef upload_file_to_ftp(self,remote_path,filename,bufsize=10000):\r\n\t\tif self.file_is_exsit_in_ftp(remote_path,filename) == True:\r\n\t\t\t#print(\"%s is exsit in FTP! Delete it!\" % filename)\r\n\t\t\tself.rm_file_in_ftp(remote_path,filename)\r\n\t\twith open(filename,'rb') as file_object:\r\n\t\t\ttry:\r\n\t\t\t\tself.ftp.storbinary('STOR %s' % remote_path + '/' + filename, file_object, bufsize)\r\n\t\t\t\tself.ftp.set_debuglevel(0)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(\"Upload %s failed!\" % filename)\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef create_remote_dir(self,remote_path,new_dir):\r\n\t\tself.ftp.cwd(remote_path)\r\n\t\tslist = self.ftp.nlst()\r\n\t\tif new_dir not in str(slist):\r\n\t\t\tself.ftp.mkd(new_dir)\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef rm_dir_in_ftp(self,remote_path):\t\r\n\t\tdir_name = remote_path.split(\"/\")[-1].strip()\r\n\t\ttry:\r\n\t\t\tself.ftp.cwd(remote_path)\r\n\t\t\tslist = self.ftp.nlst()\r\n\t\t\tfor file in slist:\r\n\t\t\t\tself.ftp.delete(file)\r\n\t\t\tself.ftp.cwd(\"..\")\r\n\t\t\tself.ftp.rmd(dir_name)\r\n\t\texcept:\r\n\t\t\tprint('Delete %s failed' % dir_name)\r\n\r\nif __name__ == '__main__':\r\n\tprint(str(sys.argv[0]) + \" enter\")\r\n\t\r\n\tftp_config = {\r\n\t\t\"addr\":\"ftp.asrmicro.com\",\r\n\t\t\"username\":\"otaupdate\",\r\n\t\t\"password\":\"9eLtBmea\"\r\n\t}\r\n\t\r\n\tftp = FTP_COMMON(ftp_config)\r\n\tftp.down_file_from_ftp(\"/Setting\",\"P_user.list\")\r\n\tftp.upload_file_to_ftp(\"/test/xxxxx\",\"P_user.list\",10000)\r\n\t#ftp.create_remote_dir(\"/test\",\"xxxxx\")\r\n\tftp.rm_files_in_ftp(\"/test/xxxxx\")\r\n\t\r\n\t\r\n\t\r\n\t","sub_path":"PythonTools/Common/ftp_common.py","file_name":"ftp_common.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"139777916","text":"from collections import namedtuple\nimport statistics as stat\nfrom math import pi\nimport sys\n\nimport lcg\nfrom distributions import ExponentialDistribution, GammaDistribution, GaussianDistribution, UniformDistribution, TriangularDistribution, SimpsonDistribution\nfrom histogram import draw_histogram\nfrom reader import read_positive_int\n\n\nLcgParameters = namedtuple('LcgParameters', ['initial', 'multiplyer', 'base'])\n\nDISTRIBUTIONS_DESCRIPTION = [\n UniformDistribution(),\n GaussianDistribution(),\n ExponentialDistribution(),\n GammaDistribution(),\n TriangularDistribution(),\n SimpsonDistribution()\n]\n\nDEFAULT_LCG_PARAMS = LcgParameters(\n base=1046527,\n initial=65537,\n multiplyer=32771\n)\n\nRANDOM_VECTOR_LENGTH = 100000\n\ndef print_result(name, actual_result, reference_value_representation='', reference_value=None):\n if actual_result is not None:\n result = '{}: {}'.format(name, actual_result)\n if reference_value:\n result += ' (reference value = {}; delta: {})'.format(\n reference_value_representation + ' = ' + str(reference_value)\n if reference_value_representation\n else\n reference_value\n , abs(reference_value - actual_result)\n )\n else:\n result = 'Can not find {}'.format(name)\n print(result)\n\n\ndef read_lcg_parameters():\n return LcgParameters(\n base=read_positive_int(\"m\"),\n initial=read_positive_int(\"R0\"),\n multiplyer=read_positive_int(\"a\")\n )\n\n\ndef lcg_demo():\n params = read_lcg_parameters()\n result = list(lcg.random_vector(RANDOM_VECTOR_LENGTH, params))\n print_result('mean', stat.mean(result), reference_value=1/2, reference_value_representation='1/2')\n print_result('variance', stat.variance(result), reference_value=1/12, reference_value_representation='1/12')\n print_result('standart deviation', stat.stdev(result))\n print_result('2K/N', lcg.uniform_ratio(result), reference_value=pi/4, reference_value_representation='pi/4')\n period = lcg.period(lambda length: lcg.random_vector(length, params))\n print_result('period', period)\n if period:\n print_result('aperiodic interval', lcg.aperiodic_interval(lambda length: lcg.random_vector(length, params), period))\n draw_histogram(result)\n\n\ndef print_menu():\n for i, distribution in enumerate(DISTRIBUTIONS_DESCRIPTION):\n print('\\t{} : {}'.format(i+1, distribution.name))\n print('\\t0 : exit')\n\n\ndef read_command():\n valid = False\n while not valid:\n try:\n command = int(input('>> '))\n valid = command >= 0 and command <= len(DISTRIBUTIONS_DESCRIPTION)\n except ValueError:\n print('Invalid input')\n print_menu()\n return command\n\n\ndef distributions_demo():\n print_menu()\n command = read_command()\n while command != 0:\n distribution = DISTRIBUTIONS_DESCRIPTION[command - 1]\n result = list(distribution.generate(RANDOM_VECTOR_LENGTH, DEFAULT_LCG_PARAMS))\n print_result('mean', stat.mean(result))\n print_result('variance', stat.variance(result))\n print_result('standart deviation', stat.stdev(result))\n draw_histogram(result)\n command = read_command()\n\n\ndef main():\n modes = {\n 'lcg': lcg_demo,\n 'dist': distributions_demo\n }\n if (len(sys.argv) == 2) and (sys.argv[1] in modes):\n command = modes[sys.argv[1]]\n else:\n print('Usage main.py dist|lcg')\n return\n command()\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"225501943","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# falloutModel.py\r\n# \r\n# Copyright 2015 Jad \r\n# \r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\r\n# MA 02110-1301, USA.\r\n# \r\n# \r\n__author__ = \"Jad\"\r\n__date__ = \"$15/11/2015 4:03:54 PM$\"\r\n\r\nimport falloutEvent \r\nfrom pygame.locals import *\r\nfrom random import *\r\nfrom time import gmtime, strftime\r\n\r\nclass RunTime():\r\n \"\"\"\r\n runtime details\r\n \"\"\"\r\n STATE_LOADING = 0\r\n STATE_RUNNING = 1\r\n STATE_SHUTDOWN = 2\r\n \r\n def __init__(self,evManager):\r\n self.evManager = evManager\r\n self.evManager.registerListener(self)\r\n self.state = RunTime.STATE_LOADING\r\n print(\"...Pip-Boy Loading...\")\r\n \r\n def run(self):\r\n self.state = RunTime.STATE_RUNNING\r\n ev = falloutEvent.FalloutStartedEvent()\r\n self.evManager.post(ev)\r\n print(\"...Pip-Boy Running...\")\r\n \r\n def getState(self):\r\n return self.state\r\n \r\n def Notify(self, event):\r\n if isinstance(event,falloutEvent.TickEvent):\r\n if self.state == RunTime.STATE_LOADING:\r\n self.run()\r\n\r\nclass FalloutClock():\r\n #Default Clock object for Fallout Screen - drawn by default\r\n def __init__(self,evManager):\r\n self.evManager = evManager\r\n self.evManager.registerListener(self)\r\n self.clockText = \"\"\r\n self.x = 0\r\n self.y = 0\r\n print(\"...Clock Initialised...\")\r\n \r\n def TickClock(self):\r\n self.clockText = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\r\n return self.clockText\r\n \r\n def setPos(self,x,y):\r\n self.x = x\r\n self.y = y\r\n \r\n def getPos(self):\r\n return (self.x,self.y)\r\n \r\n def Notify(self,event):\r\n if isinstance(event,falloutEvent.FalloutStartedEvent):\r\n self.TickClock()\r\n elif isinstance(event,falloutEvent.TickEvent):\r\n self.TickClock()\r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"falloutModel.py","file_name":"falloutModel.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"216178225","text":"\"\"\"\nProject Toxic Span Detection\nPostprocessing for model LSTM with attention\n@authors: Julia Kłos, Patrycja Cieplicka\n@date: 12.01.2020\n\"\"\"\nimport numpy as np\n\ndef getWordsByAttention(attention, tokenized, word_vect, treshold):\n \"\"\"\n Function returning toxic words, given word vector, attention weights, word_indexes and treshold\n \"\"\"\n tokenized = tokenized[0] > 0\n weights = attention[tokenized]\n weights = weights > treshold\n words = [word for k, word in enumerate(word_vect.split(' '))]\n toxic_words = [b for a, b in zip(weights, words) if a]\n return toxic_words\n\ndef wordAttentionWeights(sequenceSentence,weights):\n \"\"\"\n The same function as the AttentionLayer class - calculate the weights of attention layer\n \"\"\"\n uit = np.dot(sequenceSentence, weights[0]) + weights[1]\n uit = np.tanh(uit)\n\n ait = np.dot(uit, weights[2])\n ait = np.squeeze(ait)\n ait = np.exp(ait)\n ait /= np.sum(ait)\n \n return ait","sub_path":"src/attention_exp.py","file_name":"attention_exp.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"496326173","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport pprint\nimport argparse\nimport functools\nimport numpy as np\nimport tensorflow as tf\nfrom data_processing import datasets\nfrom model_code import models, utils\nimport tensorflow_addons as tfa\nimport matplotlib.pyplot as plt\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nmodel_index = {\n 'baseline': functools.partial(\n models.Baseline, \n input_shape=datasets.IMAGE_SHAPE),\n 'CNN' : models.CNN,\n}\n\n\ndef get_compiled_model(model_type, model_params=None, compile_params=None):\n \"\"\"\n Returns a compiled model with given model/compile parameters\n \"\"\"\n if model_params:\n model = model_type(**model_params)\n else:\n model = model_type()\n\n model.compile(**compile_params)\n\n return model\n\n\n\n# -----------------------------------------------------------------------------\n# Subcommand functions\n# -----------------------------------------------------------------------------\ndef predict(args):\n if args.use_h5:\n model = tf.keras.models.load_model(args.model_path)\n else:\n model = model_index[args.model_type]()\n model.load_weights(filepath=args.model_path).expect_partial()\n\n if args.image.endswith('.txt'): # list of images\n # with open(args.image, 'r') as file:\n # for image in file:\n # pred = utils.display_prediction(image.rstrip(), model,\n # augmentation=args.augmentation)\n # print(os.path.splitext(os.path.basename(image))[0], \n # *pred, sep='\\t')\n dataset = datasets.DataWriter.get_basic_dataset(args.image, args.num_processes)\n dataset = dataset.batch(args.batch_size, drop_remainder=False) \\\n .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n for filenames, images in dataset:\n predictions = model(images)\n for filename, prediction in zip(filenames, predictions):\n f = os.path.splitext(os.path.basename(filename.numpy()))[0]\n f = f.decode().split('_')\n print(*f[:3], sep='\\t', end='\\t')\n print(*prediction.numpy(), sep='\\t')\n else:\n pred = utils.display_prediction(args.image, model,\n augmentation=args.augmentation)\n print(os.path.splitext(os.path.basename(args.image))[0], \n *pred, sep='\\t')\n\ndef evaluate(args):\n if args.use_h5:\n model = tf.keras.models.load_model(args.model_path)\n else:\n model = model_index[args.model_type]()\n model.load_weights(filepath=args.model_path).expect_partial()\n utils.evaluate_model(model, data_dir= args.data_dir, \n batch_size=args.batch_size)\n\ndef train(args):\n pprint.pprint(vars(args))\n\n # load data\n training_set, n_train = datasets.DataReader(\n augmentation=True,\n num_processes=args.processes,\n batch_size=args.batch_size,\n data_list=args.train_list,\n tfrec_list=args.train_tfrec_list).get_dataset()\n\n val_set, n_val = datasets.DataReader(\n augmentation=False,\n num_processes=args.processes,\n batch_size=args.batch_size,\n data_list=args.val_list,\n tfrec_list=args.val_tfrec_list).get_dataset()\n\n print(f\"Train on {n_train} examples. Validate on {n_val} examples.\")\n\n # setup training\n callbacks = [\n tf.keras.callbacks.ModelCheckpoint(\n filepath=args.save_to,\n monitor='val_loss',\n verbose=1,\n save_best_only=True),\n ]\n\n lr_schedule = tf.keras.experimental.CosineDecayRestarts(\n initial_learning_rate=args.lr,\n alpha=0.0001,\n t_mul=2.0,\n m_mul=1.25,\n first_decay_steps=np.ceil(n_train/(args.batch_size/4))) # ie. two epochs\n\n\n # TODO eventually I'd like to optionally be able to load these from a JSON\n model_params = {'num_classes': args.num_classes}\n loss = tf.keras.losses.CategoricalCrossentropy\n metrics=['CategoricalAccuracy']\n compile_params = dict(\n loss=loss(\n label_smoothing=args.label_smoothing,\n ),\n optimizer=#tfa.optimizers.Lookahead(\n tfa.optimizers.SGDW(\n weight_decay=args.weight_decay,\n learning_rate=lr_schedule,\n momentum=args.momentum,\n nesterov=True,\n ),\n #),\n metrics=metrics)\n model = get_compiled_model(\n model_index[args.model_type], \n model_params, \n compile_params)\n\n model.fit(\n training_set,\n steps_per_epoch=np.ceil(n_train/args.batch_size),\n validation_data=val_set,\n validation_steps=np.ceil(n_val/args.batch_size),\n epochs=args.epochs,\n callbacks=callbacks,\n verbose=args.verbose\n )\n\n\n# -----------------------------------------------------------------------------\n# Get arguments\n# -----------------------------------------------------------------------------\nparser = argparse.ArgumentParser()\nsubparsers = parser.add_subparsers(title='Subcommands')\n\n# prediction subcommand -------------------------------------------------------\npredict_parser = subparsers.add_parser(\n 'predict', help='Use a trained model to classify an image.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\npredict_parser.add_argument(\n '--model-path', '-mp', dest='model_path', type=str, required=True,\n help='Path of trained model')\npredict_parser.add_argument(\n '--model-type', '-mt', dest='model_type', type=str, required=False,\n choices=model_index.keys(), default='CNN', help='Type of model to load.')\npredict_parser.add_argument(\n '--use-h5', '-h5', dest='use_h5', action='store_true')\npredict_parser.add_argument(\n '--augmentation', '-a', dest='augmentation', action='store_true',\n help='Use test time augmentation.')\npredict_parser.add_argument(\n '--image', '-i', dest='image', type=str, required=True,\n help='Path of image')\npredict_parser.add_argument(\n '--num_processes', '-n', dest='num_processes', type=int, default=1,\n help='number of processes to use when loading images')\npredict_parser.add_argument(\n '--batch_size', '-b', dest='batch_size', type=int, default=32)\npredict_parser.set_defaults(\n func=predict)\n\n# evaluation subcommand -------------------------------------------------------\neval_parser = subparsers.add_parser(\n 'evaluate', help='Evaluate a trained model on a labelled dataset',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\neval_parser.add_argument(\n '--model-path', '-mp', dest='model_path', type=str, required=True,\n help=\"Path of trained model (before the dot '.')\")\neval_parser.add_argument(\n '--model-type', '-mt', dest='model_type', type=str, required=False,\n choices=model_index.keys(), default='CNN', help='Type of model to load.')\neval_parser.add_argument(\n '--use-h5', '-h5', dest='use_h5', action='store_true')\neval_parser.add_argument(\n '--data-dir', '-d', dest='data_dir', type=str, required=True,\n help='Root data directory for test set.')\neval_parser.add_argument(\n '--batch-size', '-b', dest='batch_size', type=int, required=False,\n default=80, help='Number of images to feed to model at a time.')\neval_parser.set_defaults(\n func=evaluate)\n\n# training subcommand ---------------------------------------------------------\ntrain_parser = subparsers.add_parser(\n 'train', help='Train a new model.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\ntrain_parser.add_argument(\n '--verbose', '-v', dest='verbose', type=int, default=1,\n help='verbosity level passed to keras fit function.')\ntrain_parser.add_argument(\n '--processes', '-p', dest='processes', type=int, default=1,\n help='Number of processes used in fetching data.')\ntrain_parser.add_argument(\n '--batch-size', '-b', dest='batch_size', type=int, required=False,\n default=80, help='Number of images to feed to model at a time.')\ntrain_parser.add_argument(\n '--epochs', '-e', dest='epochs', type=int, required=False,\n default=100, help='Max number of epochs to train model.')\ntrain_parser.add_argument(\n '--model-type', '-mt', dest='model_type', type=str, required=False,\n default='CNN', help='Type of model to train.')\ntrain_parser.add_argument(\n '--num-classes', '-n', dest='num_classes', type=int, required=False,\n default='3', help='Number of possible class labels')\ntrain_parser.add_argument(\n '--train-list', dest='train_list', type=str, required=True,\n help='File containing list of images in train set.')\ntrain_parser.add_argument(\n '--val-list', dest='val_list', type=str, required=True,\n help='File containing list of images in val set.')\ntrain_parser.add_argument(\n '--train-tfrec-list', dest='train_tfrec_list', type=str, required=True,\n help='File containing list of train tfrecord paths (s3 or local).')\ntrain_parser.add_argument(\n '--val-tfrec-list', dest='val_tfrec_list', type=str, required=True,\n help='File containing list of val tfrecord paths (s3 or local).')\ntrain_parser.add_argument(\n '--learning-rate', '-lr', dest='lr', type=float, required=False,\n default=1e-4, help='Learning rate for optimizer.')\ntrain_parser.add_argument(\n '--momentum', '-mom', dest='momentum', type=float, required=False,\n default=0.9, help='Momentum term in SGD optimizer.')\ntrain_parser.add_argument(\n '--weight-decay', '-w', dest='weight_decay', type=float, required=False,\n default=0, help='Weight decay strength')\ntrain_parser.add_argument(\n '--label-smoothing', '-ls', dest='label_smoothing', type=float, required=False,\n default=0.0, help='Strength of label smoothing (0-1).')\ntrain_parser.add_argument(\n '--save-to', '-s', dest='save_to', type=str, required=False,\n default=None, help='filename if you want to save your trained model.')\ntrain_parser.set_defaults(\n func=train)\n\nargs = parser.parse_args()\n\nif len(sys.argv) == 1:\n parser.print_help()\n parser.exit()\n\nargs.func(args)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"285326938","text":"from django.http.response import HttpResponse\nimport jwt\n\n# Django imports\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.models import update_last_login\nfrom django.conf import settings\n\n# Third Party Imports\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n# Local Imports\nfrom users.serializers import (\n UserSignupSerializer,\n PasswordSerializer,\n SigninSerializer,\n UserSerializer,\n)\nfrom users.models import User\nfrom users.utils import generate_email, user_tokens\n\n# Create your views here.\n\n\ndef home(request):\n return HttpResponse(\"Welcome to TODO API\")\n\n\n@api_view((\"POST\",))\ndef signup(request):\n # custom serializer to validate user data\n serializer = UserSignupSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n\n email = validated_data.get(\"email\")\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n user = serializer.create(validated_data)\n\n if user.is_active is True:\n content = {\"message\": f\"{email} already active\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n generate_email(\n user=user,\n to_email=user.email,\n subject=\"Verification Email - Example\",\n )\n content = {\"message\": f\"Activation link send to {email}\"}\n return Response(content, status=status.HTTP_201_CREATED)\n\n\n@api_view((\"POST\",))\ndef resend_verification_email(request):\n email = request.POST.get(\"email\")\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n content = {\"message\": f\"{email} not exist\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n if user.is_active is True:\n content = {\"message\": f\"{email} already active\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n generate_email(\n user,\n to_email=user.email,\n subject=\"Verification Email - Example\",\n )\n content = {\"message\": f\"Activation link send to {email}\"}\n return Response(content, status=status.HTTP_200_OK)\n\n\n@api_view((\"PUT\",))\ndef verify_user(request, token):\n try:\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=\"HS256\")\n\n except jwt.DecodeError as identifier:\n return Response(\n {\"message\": \"Activation link invalid\"}, status=status.HTTP_400_BAD_REQUEST\n )\n except jwt.ExpiredSignatureError as identifier:\n return Response(\n {\"message\": \"Activation link expired\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n user = User.objects.get(id=payload[\"user_id\"])\n if user.is_active is True:\n content = {\"message\": f\"{user.email} already active,Sign In\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n serializer = PasswordSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n # create the user with requested password\n user = serializer.create(validated_data, user)\n # activate the user if password has been created\n user.is_active = True\n user.save()\n\n response = {\n \"message\": f\"{user.email} is now active.\",\n \"User\": UserSerializer(user).data,\n \"tokens\": user_tokens(user),\n }\n return Response(data=response, status=status.HTTP_200_OK)\n\n\n@api_view((\"POST\",))\ndef signin(request):\n # custom serializer to validate user data\n serializer = SigninSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n validated_data = serializer.validated_data\n\n email = validated_data.get(\"email\")\n password = validated_data.get(\"password\")\n try:\n user = User.objects.get(email=email)\n except User.DoesNotExist:\n content = {\"message\": f\"{email} not exist\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n if user.is_active is False:\n content = {\"message\": f\"{email} not active, Sign Up\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n user = authenticate(email=email, password=password)\n if user:\n update_last_login(None, user)\n data = {\"user\": UserSerializer(user).data, \"tokens\": user_tokens(user)}\n return Response(data)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"579385038","text":"# Operadores Aritméticos\n# Soma +\nsomando = 10 + 10\n\n# Subtração –\nsubtraindo = 10 - 2\n\n# Multiplicação *\nmultiplicando = 10 * 5\n\n# Divisão /\ndividindo = 10 / 2\n\n# Potencia **\npotencia = 2 ** 4\n\n# Resto de uma divisão %\nresto1 = 15 % 2\nresto2 = 15 % 5\n\nn1 = 7\nn2 = 9\nn3 = 8\nn4 = 9\nmedia = (n1 + n2 + n3 + n4) / 4\n\nprint('Somando 10 + 10 =', somando)\nprint('Subtraindo 10 - 2 =', subtraindo)\nprint('Multiplicando 10 * 5 =', multiplicando)\nprint('Dividindo 10 / 2 =', dividindo)\nprint('2 elevado a 4 =', potencia)\nprint('O resto da divisao entre 15 por 2 é:', resto1)\nprint('O resto da divisao entre 15 por 5 é:', resto2)\n","sub_path":"aprendendo_a_sintaxe/operadores_aritmeticos.py","file_name":"operadores_aritmeticos.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"412205573","text":"import os, csv, random\nimport requests\nimport urllib.request\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom colorama import init, Fore, Back, Style\n\n\ndef assortment_status():\n\tlines()\n\tprint(to_green(\"\\tЩо робимо?\"))\n\tlines()\n\tprint(\" 1. Список товарiв на складi.\")\n\tprint(\" 2. Поповнити кiлькiсть товарiв.\")\n\tprint(\" 3. Змiнити цiну товару.\")\n\tprint(\" 4. Додати новий товар до ассортименту.\")\n\tprint(\" 5. Видалити товар з ассортименту.\")\n\tprint(\" 6. Завершити роботу.\")\n\tlines()\n\t\ndef wrong(text):\n\tlines()\n\tprint(Fore.RED + text + Fore.RESET)\n\tlines()\n\ndef to_green(text):\n\ta = Fore.GREEN + str(text) + Fore.RESET\n\treturn a\n\ndef to_red(text):\n\ta = Fore.RED + str(text) + Fore.RESET\n\treturn a\n\ndef to_violet(text):\n\ta = Fore.MAGENTA + str(text) + Fore.RESET\n\treturn a\n\ndef to_yellow(text):\n\ta = Fore.YELLOW + str(text) + Fore.RESET\n\treturn a\n\ndef assortment_list():\n\tos.system(\"cls\")\n\twith open(\"data/storage_and_assortment.csv\", \"r\", encoding=\"utf-8\", newline=\"\") as file:\n\t\treader = csv.reader(file)\n\t\tprint(Fore.MAGENTA + \" Код\" + Fore.CYAN + \"\\t| \" + Fore.MAGENTA + \"К-сть\" + Fore.CYAN + \"\\t| \" + Fore.MAGENTA + \"Цiна\"+ Fore.CYAN + \"\\t| \\t\" + Fore.MAGENTA + \"Назва\" + Fore.RESET)\n\t\tlines()\n\t\tfor ind, val in enumerate(reader):\n\t\t\tif ind > 0:\n\t\t\t\tprint(Fore.GREEN + val[0] + Fore.CYAN + \"\\t| \" + Fore.YELLOW + val[4] + Fore.CYAN + \"\\t| \" + Fore.GREEN + val[5] + Fore.CYAN + \" \\t| \" + Fore.RESET + val[1])\n\t\tlines()\n\ndef yes_or_no():\n\tlines()\n\tprint(Fore.MAGENTA + \"\\t\\t\\t Пiдтвердити операцiю?\")\n\tprint(\"\\n\\n\\t\\t\" + Fore.GREEN + \" Tак\\t\\t\\t\\t\\t\" + Fore.RED + \"Нi\\n\\n\\t\\t\" + Fore.GREEN + \"Enter\\t\\t\\t\\t\" + Fore.RED + \"будь-що + Enter\" + Fore.RESET)\n\tarrow()\n\ta = input()\n\treturn True if a == \"\" else False\n\ndef press_enter():\n\tprint(Fore.MAGENTA)\n\tinput(\"\\n\\t\\t\\tНатисни Enter, щоб продовжити\")\n\tprint(Fore.RESET)\n\ndef arrow():\n\tprint(Fore.GREEN + \"\\n-->\" + Fore.RESET, end=\" \")\n\ndef lines():\n\tprint(Fore.CYAN + \"-\"*79 + Fore.RESET)\n\ndef to_green(text):\n\ta = Fore.GREEN + text + Fore.RESET\n\treturn a\n\ndef reader_checker(number_of_column, value_to_compare):\n\tchecker = 0\n\twith open(\"data/storage_and_assortment.csv\", \"r\", encoding=\"utf-8\", newline=\"\") as file:\n\t\treader = csv.reader(file)\n\t\tfor row in reader:\n\t\t\tif row[number_of_column] == value_to_compare:\n\t\t\t\tchecker += 1\n\treturn checker\n\ndef add_exemplar():\n\t# Початок генерації і записування даних\n\n\t# Код товару\n\twhile True:\n\t\tos.system(\"cls\")\n\t\tunit_code_pre = \"\"\n\t\tlines()\n\t\tprint(to_green(\" Код товару\") + \" (6 цифр)\\n(пусте поле, щоб згенерувати випадковий)\\n(щоб вийти в головне меню без збереження змiн, введи - )\")\n\t\tlines()\n\t\tarrow()\n\t\tsome = input()\n\t\tif some == \"-\":\n\t\t\treturn True\n\t\tif some == \"\":\n\t\t\tfor i in range(6):\n\t\t\t\tunit_code_pre += str(random.randint(0, 9))\n\t\telif len(some) == 6 and some.isdigit():\n\t\t\tunit_code_pre = some\n\t\telse:\n\t\t\twrong(\" Щось неправильно, спробуй ще.\")\n\t\t\tpress_enter()\n\t\t\tcontinue\n\n\t\tif reader_checker(0, unit_code_pre) == 0:\n\t\t\tunit_code = unit_code_pre\n\t\t\tbreak\n\t\telse:\n\t\t\twrong(\" Такий код вже зайнято. Потрiбна повторна генерацiя, або ж напиши свiй.\")\n\t\t\tpress_enter()\n\t\t\tcontinue\n\n\t#Ім`я товару\n\twhile True:\n\t\tos.system(\"cls\")\n\t\tlines()\n\t\tprint(to_green(\" Назва товару\") + \" (10 i бiльше символiв)\\n(щоб вийти в головне меню без збереження змiн, введи - ):\")\n\t\tlines()\n\t\tarrow()\n\t\tsome = input()\n\t\tif some == \"-\":\n\t\t\treturn True\n\t\tif len(some) >= 10:\n\t\t\tif reader_checker(1, some) == 0:\n\t\t\t\tunit_name = some\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\twrong(\" Така назва вже зайнята. Спробуй iншу.\")\n\t\t\t\tpress_enter()\n\t\t\t\tcontinue\n\t\telse:\n\t\t\twrong(\" Назва закоротка.\")\n\t\t\tpress_enter()\n\t\t\tcontinue\n\t\n\t#Посилання на торгову площадку\t\n\twhile True:\n\t\tos.system(\"cls\")\n\t\tlines()\n\t\tprint(to_green(\" Посилання на торгову площадку\") + \":\\n(щоб вийти в головне меню без збереження змiн, введи - )\")\n\t\tlines()\n\t\tarrow()\n\t\tsome = input()\n\t\tif some == \"-\":\n\t\t\treturn True\n\t\tif \"http\" in some:\n\t\t\tif reader_checker(2, some) == 0:\n\t\t\t\tlink_to_shop = some\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\twrong(\" Таке посилання вже є у iншого товару. Добавити це посилання(так), чи спробувати iнше(нi)?\")\n\t\t\t\tif yes_or_no():\n\t\t\t\t\tlink_to_shop = some\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\telse:\n\t\t\twrong(\" Посилання невiрне. Воно повинно починатись з \\\"http\\\"\")\n\t\t\tpress_enter()\n\t\t\tcontinue\n\n\t#Фото\n\tphoto_in_system = \"\"\n\tif \"aliexpress\" in link_to_shop:\n\t\tos.system(\"cls\")\n\t\tlines()\n\t\tprint(Fore.MAGENTA + \"Йде завантаження фото\" + Fore.RESET)\n\t\tlines()\n\t\tpage = requests.get(link_to_shop)\n\t\tif page.status_code >= 200 and page.status_code < 300:\n\t\t\tsoup = BeautifulSoup(page.text, \"html.parser\")\n\t\t\tphoto_link = str(soup.find_all(\"meta\", property=\"og:image\"))[16:-24]\n\t\t\tphoto_in_system = \"images/\" + unit_code + \".\" + photo_link.split(\".\")[-1]\n\t\t\turllib.request.urlretrieve(photo_link, photo_in_system)\n\t\t\tos.system(\"cls\")\n\t\t\tlines()\n\t\t\tprint(Fore.MAGENTA + \"Фото успiшно завантажено\" + Fore.RESET)\n\t\t\tlines()\n\t\t\tpress_enter()\n\t\telse:\n\t\t\tos.system(\"cls\")\n\t\t\twrong(\" Нажаль, фото не вдалось завантажити.\")\n\t\t\tprint(\" Ви можете завантажити його вручну, а потiм помiстити у папку \" + Fore.MAGENTA + \"/images\" + Fore.RESET)\n\t\t\tprint(\" Iм`я повинне бути: номер_товару.jpg\")\n\t\t\tpress_enter()\n\telse:\n\t\tos.system(\"cls\")\n\t\twrong(\" Нажаль, фото не вдалось завантажити.\")\n\t\tprint(\" Ви можете завантажити його вручну, а потiм помiстити у папку \" + Fore.MAGENTA + \"/images\" + Fore.RESET)\n\t\tprint(\" Iм`я повинне бути: номер_товару.jpg\")\n\t\tpress_enter()\n\n\t# Кількість товару на складі\n\twhile True:\n\t\tos.system(\"cls\")\n\t\tlines()\n\t\tprint(to_green(\"Кількість товару, яка прибула:\"))\n\t\tlines()\n\t\tprint(\" пусте поле, якщо \" + to_violet(\"0\"))\n\t\tlines()\n\t\tarrow()\n\t\tsome = input()\n\t\tif some == \"\":\n\t\t\tquantity_in_stock = 0\n\t\t\tbreak\n\t\telse:\n\t\t\tif some.isdigit():\n\t\t\t\tquantity_in_stock = int(some)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\twrong(\"Це повинно бути число\")\n\t\t\t\tpress_enter()\n\n\n\t# Ціна для продажу: \n\n\twhile True:\n\t\tos.system(\"cls\")\n\t\tlines()\n\t\tprint(to_green(\"Цiна для продажу (грн):\"))\n\t\tlines()\n\t\tarrow()\n\t\tselling_price = input()\n\t\tif selling_price.isdigit():\n\t\t\tbreak\n\t\telse:\n\t\t\twrong(\"Це має бути число.\")\n\t\t\tpress_enter()\n\t\t\tcontinue\n\t# Кінець генерації і записування даних\n\n\t# Додаємо дані до ассортименту\n\twith open(\"data/storage_and_assortment.csv\", \"a\", encoding=\"utf-8\", newline=\"\") as file:\n\t\twriter = csv.writer(file)\n\t\twriter.writerow([\n\t\t\tunit_code,\n\t\t\tunit_name,\n\t\t\tlink_to_shop,\n\t\t\tphoto_in_system,\n\t\t\tquantity_in_stock,\n\t\t\tselling_price\n\t\t])\n\tsave_and_load()\n\tos.system(\"cls\")\n\tlines()\n\tprint(Fore.MAGENTA + \"Товар додано до асортименту:\" + Fore.RESET)\n\tlines()\n\tprint(Fore.CYAN + unit_code + \" | \" + unit_name + \" | \" + photo_in_system + Fore.RESET)\n\n\tpress_enter()\n\n\ndef delete_exemplar():\n\twhile True:\n\t\tassortment_list()\n\t\tlines()\n\t\tprint(to_green(\"Номер товару для видалення:\\n\") + \"(порожнє поле, щоб вийти в головне меню)\\n(добав в кiнцi - ,щоб звiрити код товару з фото)\")\n\t\tlines()\n\t\tarrow()\n\t\tcode_to_delete = input()\n\t\tif code_to_delete == \"\":\n\t\t\tbreak\n\t\tfuture_list = []\n\t\tif code_to_delete.isdigit() and len(code_to_delete) == 6 or len(code_to_delete) == 7 and code_to_delete[-1] == \"-\" and code_to_delete[0:6].isdigit():\n\t\t\tif reader_checker(0, code_to_delete[0:6]) > 0:\n\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"r\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\treader = csv.reader(file)\n\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\tif row[0] != code_to_delete[0:6]:\n\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif \"-\" in code_to_delete:\n\t\t\t\t\t\t\t\tos.startfile(row[3].replace(\"/\", \"\\\\\"))\n\t\t\t\t\t\t\tstuff_to_delete = row[0] + \" | \" + row[1] + \" | \" + row[2]\n\t\t\t\t\t\t\tphoto_to_delete = row[3]\n\t\t\t\tlines()\n\t\t\t\tos.system(\"cls\")\n\t\t\t\tlines()\n\t\t\t\tprint(Fore.MAGENTA + \"Видалити цей товар?\\n\" + Fore.YELLOW + stuff_to_delete + Fore.RESET)\n\t\t\t\tlines()\n\t\t\t\tif yes_or_no():\n\t\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\t\twriter = csv.writer(file)\n\t\t\t\t\t\tfor i in future_list:\n\t\t\t\t\t\t\twriter.writerow(i)\n\t\t\t\t\tos.remove(photo_to_delete)\n\t\t\t\t\tsave_and_load()\n\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\tlines()\n\t\t\t\t\tprint(Fore.MAGENTA + \"Товар успiшно видалено.\" + Fore.RESET)\n\t\t\t\t\tlines()\n\t\t\t\t\tpress_enter()\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tlines()\n\t\t\t\tprint(Fore.RED + \"Товару з таким кодом не iснує.\" + Fore.RESET)\n\t\t\t\tlines()\n\t\t\t\tpress_enter()\n\t\telse:\n\t\t\tlines()\n\t\t\tprint(Fore.RED + \"Невiрне значення.\" + Fore.RESET)\n\t\t\tlines()\n\t\t\tpress_enter()\n\ndef add_quantity():\n\twhile True:\n\t\tfuture_list = []\n\t\tassortment_list()\n\t\tlines()\n\t\tprint(to_green(\"Який товар потрiбно поповнити? (номер товару)\\n\") + \"(порожнє поле, щоб вийти в головне меню)\\n(добав в кiнцi - ,щоб звiрити код товару з фото)\")\n\t\tlines()\n\t\tarrow()\n\t\tcode_to_quantify = input()\n\t\tif code_to_quantify == \"\":\n\t\t\tbreak\n\t\tif code_to_quantify.isdigit() and len(code_to_quantify) == 6 or len(code_to_quantify) == 7 and code_to_quantify[-1] == \"-\" and code_to_quantify[0:6].isdigit():\n\t\t\tif reader_checker(0, code_to_quantify[0:6]) > 0:\n\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"r\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\treader = csv.reader(file)\n\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\tif row[0] != code_to_quantify[0:6]:\n\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif \"-\" in code_to_quantify:\n\t\t\t\t\t\t\t\tos.startfile(row[3].replace(\"/\", \"\\\\\"))\n\t\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tprint(Fore.YELLOW + row[0] + \" | \" + row[1] + Fore.RESET)\n\t\t\t\t\t\t\t\tprint(to_green(\"Скiльки товару прибуло?\\n(пусту поле, щоб вийти в головне меню.)\"))\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tarrow()\n\t\t\t\t\t\t\t\tquantity_to_add = input()\n\t\t\t\t\t\t\t\tif quantity_to_add == \"\":\n\t\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\t\t\telif quantity_to_add.isdigit() or quantity_to_add[0] == \"-\" and quantity_to_add[1:].isdigit():\n\t\t\t\t\t\t\t\t\tquantity_to_add = int(quantity_to_add)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\t\tprint(Fore.RED + \"Це повинно бути число.\" + Fore.RESET)\n\t\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\t\tpress_enter()\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\tprint(Fore.YELLOW + row[0] + \" | \" + row[1] + Fore.RESET)\n\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\tprint(Fore.RED + str(row[4]) + Fore.MAGENTA + \" --> \" + Fore.GREEN + str(int(quantity_to_add) + int(row[4])) + Fore.RESET)\n\t\t\t\t\t\t\tif yes_or_no():\n\t\t\t\t\t\t\t\trow[4] = int(row[4]) + quantity_to_add\n\t\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tprint(Fore.RED + \"Операцiю вiдмiнено.\" + Fore.RESET)\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tpress_enter()\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\t\twriter = csv.writer(file)\n\t\t\t\t\t\tfor i in future_list:\n\t\t\t\t\t\t\twriter.writerow(i)\t\t\n\t\t\t\t\tsave_and_load()\n\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\tlines()\n\t\t\t\t\tprint(Fore.MAGENTA + \"Товар успiшно поповнено\" + Fore.RESET)\n\t\t\t\t\tlines()\n\t\t\t\t\tpress_enter()\n\t\t\telse:\n\t\t\t\tlines()\n\t\t\t\tprint(Fore.RED + \"Такого коду не iснує.\" + Fore.RESET)\n\t\t\t\tlines()\n\t\t\t\tpress_enter()\n\t\telse:\n\t\t\tlines()\n\t\t\tprint(Fore.RED + \"Невiрний код.\" + Fore.RESET)\n\t\t\tlines()\n\t\t\tpress_enter()\n\ndef change_price():\n\twhile True:\n\t\tfuture_list = []\n\t\tassortment_list()\n\t\tlines()\n\t\tprint(to_green(\"Для якого товару мiняємо цiну? (номер товару)\\n\") + \"(порожнє поле, щоб вийти в головне меню)\\n(добав в кiнцi - ,щоб звiрити код товару з фото)\")\n\t\tlines()\n\t\tarrow()\n\t\tcode_to_price = input()\n\t\tif code_to_price == \"\":\n\t\t\tbreak\n\t\tif code_to_price.isdigit() and len(code_to_price) == 6 or len(code_to_price) == 7 and code_to_price[-1] == \"-\" and code_to_price[0:6].isdigit():\n\t\t\tif reader_checker(0, code_to_price[0:6]) > 0:\n\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"r\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\treader = csv.reader(file)\n\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\tif row[0] != code_to_price[0:6]:\n\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif \"-\" in code_to_price:\n\t\t\t\t\t\t\t\tos.startfile(row[3].replace(\"/\", \"\\\\\"))\n\t\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tprint(Fore.YELLOW + row[0] + Fore.CYAN + \" | \" + Fore.YELLOW + row[1] + Fore.RESET + \"\\nПоточна цiна на товар: \" + row[5])\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tprint(to_green(\"Нова цiна на товар:\\n\") + \"(пусте поле, щоб вийти в головне меню.)\")\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tarrow()\n\t\t\t\t\t\t\t\tprice_to_be = input()\n\t\t\t\t\t\t\t\tif price_to_be == \"\":\n\t\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\t\t\t\telif price_to_be.isdigit():\n\t\t\t\t\t\t\t\t\tprice_to_be = int(price_to_be)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\t\tprint(Fore.RED + \"Це повинно бути число.\" + Fore.RESET)\n\t\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\t\tpress_enter()\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\tprint(Fore.YELLOW + row[0] + \" | \" + row[1] + Fore.RESET)\n\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\tprint(Fore.RED + str(row[5]) + Fore.MAGENTA + \" --> \" + Fore.GREEN + str(price_to_be) + Fore.RESET)\n\t\t\t\t\t\t\tif yes_or_no():\n\t\t\t\t\t\t\t\trow[5] = price_to_be\n\t\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfuture_list.append(row)\n\t\t\t\t\t\t\t\tlines()\n\t\t\t\t\t\t\t\tprint(Fore.RED + \"Операцiю вiдмiнено.\" + Fore.RESET)\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\t\twriter = csv.writer(file)\n\t\t\t\t\t\tfor i in future_list:\n\t\t\t\t\t\t\twriter.writerow(i)\n\t\t\t\t\tsave_and_load()\n\t\t\t\t\tos.system(\"cls\")\n\t\t\t\t\tlines()\n\t\t\t\t\tprint(Fore.MAGENTA + \"Цiну товару успiшно змiнено.\" + Fore.RESET)\n\t\t\t\t\tlines()\n\t\t\t\t\tpress_enter()\n\t\t\telse:\n\t\t\t\twrong(\"Такого коду не iснує.\")\n\t\t\t\tpress_enter()\n\t\t\t\tcontinue\n\t\telse:\n\t\t\twrong(\"Невiрний код.\")\n\t\t\tpress_enter()\n\t\t\tcontinue\n\ndef watch_foto():\n\tlines()\n\tprint(Fore.MAGENTA + \"Введи номер товару, щоб переглянути фото\" + Fore.RESET + \"\\n(порожнє поле, щоб вийти у головне меню)\")\n\tlines()\n\tarrow()\n\tsome = input()\n\tsome_to_show = some + \".\" + \"jpg\"\n\tif some == \"\":\n\t\treturn True\n\tif some.isdigit() and len(some) == 6:\n\t\tif reader_checker(0, some) > 0:\n\t\t\tif some_to_show in os.listdir(\"images/\"):\n\t\t\t\twith open(\"data/storage_and_assortment.csv\", \"r\", encoding=\"utf-8\", newline=\"\") as file:\n\t\t\t\t\treader = csv.reader(file)\n\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\tif row[0] == some:\n\t\t\t\t\t\t\tos.startfile(row[3].replace(\"/\", \"\\\\\"))\n\t\t\t\t\t\t\tassortment_list()\n\t\t\telse:\n\t\t\t\twrong(\"Зображення з таким кодом не iснує.\")\n\t\t\t\tpress_enter()\n\t\telse:\n\t\t\twrong(\"Зображення з таким кодом не iснує.\")\n\t\t\tpress_enter()\n\telse:\n\t\twrong(\"Невiрне значення.\")\n\t\tpress_enter()\n\ndef save_and_load():\n\tos.system(\"cls\")\n\tos.system(\"git pull\")\n\tos.system(\"git add --all\")\n\tcommit = \"git commit -m \\\"\" + str(datetime.today())[0:16] + \"\\\"\"\n\tos.system(commit)\n\tos.system(\"git push\")\n\nos.system(\"git pull\")\n\ninit()\n\nif not os.path.exists(\"images\"):\n\tos.makedirs(\"images\")\nif not os.path.exists(\"data\"):\n\tos.makedirs(\"data\")\nif \"storage_and_assortment.csv\" not in os.listdir(\"data/\"):\n\twith open(\"data/storage_and_assortment.csv\", \"w\", encoding=\"utf-8\", newline=\"\") as file:\n\t\twriter = csv.writer(file)\n\t\twriter.writerow([\"Код товару\", \"Ім`я товару\", \"Посилання на торгову площадку\", \"Шлях до фото\", \"Кількість на складі\", \"Цiна\"])\n\nwhile True:\n\tos.system(\"cls\")\n\tassortment_status()\n\tarrow()\n\tnext_to_do = input()\n\tif next_to_do == \"1\":\n\t\twhile True:\n\t\t\tassortment_list()\n\t\t\tif watch_foto():\n\t\t\t\tbreak\n\telif next_to_do == \"2\":\n\t\tadd_quantity()\n\telif next_to_do == \"3\":\n\t\tchange_price()\n\telif next_to_do == \"4\":\n\t\tadd_exemplar()\n\telif next_to_do == \"5\":\n\t\tdelete_exemplar()\n\telif next_to_do == \"6\":\n\t\tos.system(\"cls\")\n\t\texit()","sub_path":"kaori_storage.py","file_name":"kaori_storage.py","file_ext":"py","file_size_in_byte":17207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"345747694","text":"from Player import Player\nfrom Board import Board\nimport numpy as np\nimport IPython.core.debugger\ndbg = IPython.core.debugger.Pdb()\n\n\nclass Value_Iteration_AI(Player):\n LOSING_STATES = -100\n WINNING_STATES = 100\n\n def __init__(self, opponent, player_id=1, discount_factor=0.5, board=None):\n self.player_id = player_id\n self.discount_factor = discount_factor\n self.states = []\n self.value_function = []\n self.policy = []\n self.opponent = opponent\n self.board = board\n\n if self.board is None:\n self.board = Board()\n\n self.value_iteration()\n\n def reward_function(self, state_info1, state_info2):\n if self.board.is_game_over():\n if state_info2[1] == 0 and state_info2[3] == 0: # winning state\n return 100\n elif state_info2[0] == 0 and state_info2[2] == 0: # losing state\n return -100\n else:\n return 0 # draw state\n else:\n # if my player eats a opponent's piece, gain 5. if my player eats a opponent's king, gain 10\n gained_reward = 5*(state_info2[0] - state_info1[0]) + 10 * (state_info2[2] - state_info1[2])\n # if opponent eats my piece, punish -5. if opponent eats my king, punish -10\n lost_reward = 5*(state_info2[1] - state_info1[1]) + 10 * (state_info2[3] - state_info1[3])\n\n return gained_reward - lost_reward\n\n def get_reward(self, current_spots, next_spots):\n current_status = self.board.get_states_from_boards_spots([current_spots])\n next_status = self.board.get_states_from_boards_spots([next_spots])\n\n return self.reward_function(current_status[0], next_status[0])\n\n def get_transition_probabilities(self, actions, opponent_action):\n # the probability of taking the action is calculated by 1 / (number of actions x number of opponent actions)\n return 1 / (len(actions) * len(opponent_action))\n\n def get_value(self, state):\n try: # if the state has already observed, find state's index and state's value and return them\n index = self.states.index(state)\n\n return self.value_function[index], index\n except ValueError: # if the state has not been observed yet, create a new state and add it to states array\n self.states.append(state)\n self.value_function.append(0)\n index = len(self.value_function) - 1\n\n return 0, index\n\n def calculate_value_of_action(self, state, possible_moves, opponent_moves):\n next_state = self.board.spots # determine next state\n next_state_value = self.get_value(next_state)[0] # obtain value of next state. If the state is not in the states array, this function creates the state and adds to the array\n reward = self.get_reward(state, next_state)\n prob = self.get_transition_probabilities(possible_moves, opponent_moves)\n\n return prob * (reward + self.discount_factor * next_state_value)\n\n def calculate_expected_value(self, state):\n if self.board.is_game_over():\n return [self.LOSING_STATES]\n\n possible_moves = self.board.get_possible_next_moves()\n expected_value = np.zeros(len(possible_moves))\n\n for i in range(len(possible_moves)):\n move = possible_moves[i]\n self.board.set_spots(state) # recover board to state condition\n\n self.board.make_move(move) # make my move\n opponent_moves = [self.opponent.get_next_move()] # determine possible opponent's moves\n\n if self.board.is_game_over():\n expected_value[i] = self.calculate_value_of_action(state, possible_moves, opponent_moves)\n self.board.switch_turn()\n\n continue\n\n for opp_move in opponent_moves: # maybe there can be more than one opponent moves\n self.board.make_move(opp_move) # make opponent move to obtain next state\n expected_value[i] += self.calculate_value_of_action(state, possible_moves, opponent_moves)\n\n return expected_value\n\n def value_iteration(self, theta=0.0001):\n self.states.append(self.board.spots)\n self.value_function.append(0)\n\n while True:\n delta = 0\n\n for state in self.states:\n self.board.set_spots(state) # make the board look like same as the state\n\n v, index = self.get_value(state)\n\n expected_value = self.calculate_expected_value(state)\n\n self.value_function[index] = np.max(expected_value)\n\n delta = max(delta, np.abs(v - self.value_function[index]))\n\n if delta < theta:\n break\n\n self.board.reset_board()\n self.calculate_policy()\n\n def calculate_policy(self):\n for state in self.states:\n expected_value = self.calculate_expected_value(state) # get values of actions\n self.policy[state] = [0 for i in expected_value] # init policy's values for this state\n\n best_action = np.argmax(expected_value) # find best action in this state\n self.policy[state, best_action] = 1.0 # assign best action to 1\n\n def game_completed(self):\n pass\n\n def get_next_move(self):\n \"\"\"\n Gets the desired next move from the AI.\n \"\"\"\n current_state = self.board.spots # determine current state\n determine_policies = np.array(self.policy[current_state]) # obtain policy array for current state\n possible_actions = self.board.get_possible_next_moves() # obtain available actions\n\n return possible_actions[determine_policies == 1] # return selected action whose value is 1\n\n","sub_path":"Value_Iteration_AI.py","file_name":"Value_Iteration_AI.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"427931207","text":"from util import *\n\n\n@apply\ndef apply(given):\n ((f, (z, xi, direction)), _f), (_xi, a, b) = given.of(All[Equal[Limit]])\n assert direction == 0\n assert xi == _xi\n assert _f == f._subs(z, xi)\n assert b >= a\n return Any[xi:a:b](All[z:a:b](f >= _f))\n\n\n@prove\ndef prove(Eq):\n from axiom import calculus\n\n a = Symbol(real=True)\n b = Symbol(real=True, domain=Interval(a, oo, left_open=True))\n f = Function(real=True)\n from axiom.calculus.all_eq.imply.all_any_eq.intermediate_value_theorem import is_continuous\n Eq << apply(is_continuous(f, a, b))\n\n\nif __name__ == '__main__':\n run()\n# created on 2020-06-13\n","sub_path":"axiom/calculus/is_continuous/imply/any_all_ge/extreme_value_theorem.py","file_name":"extreme_value_theorem.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"218399992","text":"# Work with Python 3.6\r\nfrom discord.ext.commands import Bot\r\nimport discord\r\nfrom collections import defaultdict\r\n\r\nimport xlrd\r\nfrom xlrd import open_workbook\r\nfrom xlutils.copy import copy\r\nimport xlwt\r\nfrom xlwt import Workbook\r\n\r\nTOKEN = 'NjQwNjczMTQzNDg4MzgwOTI4.Xb-CLA.r-15GdxMJ8Ewv5hxCgWB1yMqsOI'\r\n\r\nloc = (\"JerkData.xls\")\r\n\r\nclient = discord.Client()\r\n\r\nbot = Bot('!')\r\npoints = defaultdict(int)\r\n\r\n@client.event\r\nasync def on_ready():\r\n game = discord.Game(\"with Myself\")\r\n await client.change_presence(status=discord.Status.idle, activity=game)\r\n print(\"Logged in as: {0.user}\".format(client))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n onSheet = 0;\r\n # we do not want the bot to reply to itself\r\n if message.author == client.user:\r\n return\r\n\r\n wb = xlrd.open_workbook(loc)\r\n sheet = wb.sheet_by_index(0);\r\n mentionUser = message.author.mention;\r\n rows = sheet.nrows;\r\n userRow = 0;\r\n userCount = 0;\r\n\r\n for i in range(0, rows):\r\n if sheet.cell_value(i, 0) == message.author.name:\r\n userRow = i;\r\n userCount = sheet.cell_value(i, 1);\r\n onSheet = 1;\r\n\r\n wb = copy(wb);\r\n sheet = wb.get_sheet('Sheet 1');\r\n if onSheet == 0:\r\n sheet.write(rows, 0, message.author.name);\r\n userRow = rows;\r\n\r\n if message.content.startswith('jerk'):\r\n userCount += 1.0;\r\n sheet.write(userRow, 1, userCount);\r\n await message.channel.send(\"{} has jerked off {} time.\".format(mentionUser, userCount));\r\n\r\n\r\n wb.save('JerkData.xls') \r\nclient.run(TOKEN)","sub_path":"JerkBot/JerkBot.py","file_name":"JerkBot.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"484520111","text":"import pygame\nimport sys\nimport math\nimport time\nimport random\n\ndef dreptunghi(screen, x, y, l, w, color):\n pygame.draw.line(screen, color, [x, y], [x + l, y])\n pygame.draw.line(screen, color, [x+ l,y], [x + l,y + w])\n pygame.draw.line(screen, color, [x + l,y + w], [x ,y + w])\n pygame.draw.line(screen, color, [x ,y + w],[x ,y ])\n \ndef paleta(screen, X, Y, L, W,color):\n pygame.draw.line(screen, color, [X, Y], [X + L, Y])\n pygame.draw.line(screen, color, [X + L,Y],[X +L,Y+ W])\n pygame.draw.line(screen, color, [X + L,Y + W], [X,Y + W])\n pygame.draw.line(screen, color, [X,Y + W],[X,Y])\n \ndef minge(screen, x, y, r, color, w):\n pygame.draw.circle(screen, color,[x, y], r, w)\n \ndef intersects(mx, my, r, left, top, right, bottom):\n if mx < left:\n closestX = left\n else:\n if mx > right:\n closestX = right\n else:\n closestX = mx\n\n if my < top:\n closestY = top\n else:\n if my > bottom:\n closestY = bottom\n else:\n closestY = my\n\n dx = closestX - mx\n dy = closestY - my\n\n return (dx * dx + dy * dy) <= r * r \n\n#culorile sunt RED,GREEN,BLUE \nRED = (255 ,0, 0)\nWHITE=(255,255,255)\nBLUE=(0,0,255)\nGREEN=(0,255,0)\nBLACK=(0,0,0)\npygame.init()\n\nscore=0\n\nsize=[500, 350]\nscreen=pygame.display.set_mode(size)\n\npx=200\npy=280\n\nr=7\n\nmx=250\nmy=50\n\n#ui=random.randint(0,360)* math.pi/180\nui=0\n\npdir=0\n\ncycle=0\n\nkeypush= -1\n\ncaramizi=[1,1,1,1,1,1,1,1,1]\n\nwon=False\nlose=False\n\nlives=3\ntextcount=0\n\npygame.display.set_caption(\"PRO-LEVEL BREAK-OUT\")\nfont=pygame.font.SysFont('Calibri',25,True,False)\n\n\nmdir=2\ndone=False\nclock=pygame.time.Clock()\n\npygame.key.set_repeat(1,10)\n\nwhile not done: \n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n done=True\n keys = pygame.key.get_pressed()\n \n if keys[pygame.K_RIGHT]:\n px=px + 5\n if pdir<0:\n pdir=0\n pdir=pdir + 1\n if pdir>5:\n pdir=5\n keypush=cycle\n if px+100>=500:\n px=400\n \n if keys[pygame.K_LEFT]: \n px=px - 5\n if pdir>0:\n pdir=0\n pdir=pdir - 1\n if pdir<-5:\n pdir=-5\n keypush=cycle\n if px<=0:\n px=0\n \n \n if abs(keypush-cycle) >15:\n pdir=0\n \n \n \n screen.fill(WHITE)\n dreptunghi(screen,0,300,500,5,BLACK)\n textscore= font.render(\"score: \" + str(score),True,BLACK)\n screen.blit(textscore,[225,325])\n textlives= font.render(\"lives: \"+ str(lives),True,BLACK)\n screen.blit(textlives,[10,325])\n \n \n \n if score==90:\n wintext=font.render(\"YOU WIN! ;)\",True,GREEN)\n screen.blit(wintext,[225,175]) \n done=True\n won=True\n \n if lives==0:\n losetext=font.render(\"YOU LOSE! :(\",True,RED)\n screen.blit(losetext,[225,175])\n done=True\n lose=True\n \n if my + r >= 300 :\n lives=lives-1\n textcount=200\n \n if textcount>0:\n textcount=textcount-1 \n textliveslost=font.render(\"- 1 LIFE\",True,BLACK)\n screen.blit(textliveslost,[400,325])\n mx=250\n my=50\n \n for i in range(9):\n if caramizi[i]==1:\n dreptunghi(screen, 25 + i*50, 10, 50, 25, RED)\n \n paleta(screen, px, py,100,10,BLUE)\n \n mx=mx + mdir * math.sin(ui) \n if mx <= 0 + r:\n ui= - ui\n \n if mx>= 500 - r:\n ui= - ui\n my=my + mdir * math.cos(ui) \n \n if my <= 0 + r:\n ui= math.pi - ui\n \n #AICI E PALETA\n if intersects(mx, my, r, px,py,px+ 100,py+10) : \n # if pdir!=0:\n # ui=math.pi -ui + pdir * math.pi/16\n #else:\n # ui=math.pi -ui\n #ui=math.pi - math.atan2(my,mx) \n #ui=math.pi/2 + math.atan2(my,mx)\n #ui=3*math.pi/2 - math.atan2(my,mx)\n if px <= mx <= px + 50:\n ui= 3*math.pi/2 - math.atan2(my,mx)\n else:\n ui=math.pi/2 + math.atan2(my,mx)\n \n\n \n if my>= 300 - r : \n ui= math.pi -ui\n # AICI E CARAMIDA \n for i in range(9):\n \n #if caramizi[i]==1 and (10 +25+r -2<=int(my)<=10 + 25 + r+2) and (25 + i*50<=mx<=25 + i*50 + 50) :\n #caramizi[i]=0\n #score=score+10\n #ui=math.pi - ui\n if caramizi[i]==1 and intersects(mx,my,r,25+i*50,10,25+i*50+50,25):\n caramizi[i]=0\n score=score+10\n ui=math.pi - math.atan2(my,mx)\n if caramizi[i]==1 and (10-2<=my <= 10 +25+2) and (mx + r-2<=25+i*50<=mx+r+2):\n caramizi[i]=0\n ui= -ui\n score=score+10\n if caramizi[i]==1 and (10-2<=my <= 10 +25+2) and (mx + r-2<=25+50+i*50<=mx+r+2):\n caramizi[i]=0\n ui= -ui\n score=score+10\n \n minge(screen,int(mx),int(my), r,GREEN, 0)\n \n pygame.display.flip()\n \n clock.tick(100)\n cycle=cycle + 1\n cycle=cycle %60 \n \nif won:\n time.sleep(5)\nif lose:\n time.sleep(5)\n \npygame.quit()\nsys.exit() ","sub_path":"breakout4.py","file_name":"breakout4.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"387325622","text":"from eduvpn.utils import logger\nimport gi\ngi.require_version('Gtk', '3.0') # noqa: E402\nfrom gi.repository import Gtk, GObject\n\n\n# ui thread\ndef error_helper(parent: GObject, # type: ignore\n msg_big: str,\n msg_small: str) -> None:\n \"\"\"\n Shows a GTK error message dialog.\n args:\n parent (GObject): A GTK Window\n msg_big (str): the big string\n msg_small (str): the small string\n \"\"\"\n logger.error(f\"{msg_big}: {msg_small}\")\n error_dialog = Gtk.MessageDialog( # type: ignore\n parent,\n 0,\n Gtk.MessageType.ERROR, # type: ignore\n Gtk.ButtonsType.OK, # type: ignore\n str(msg_big),\n )\n error_dialog.format_secondary_text(str(msg_small)) # type: ignore\n error_dialog.run() # type: ignore\n error_dialog.hide() # type: ignore\n\n\ndef show_ui_component(builder, component: str, show: bool):\n \"\"\"\n Set the visibility of a UI component.\n \"\"\"\n component = builder.get_object(component)\n if show:\n component.show() # type: ignore\n else:\n component.hide() # type: ignore\n\n\ndef link_markup(link: str) -> str:\n try:\n scheme, rest = link.split(':', 1)\n if rest.startswith('//'):\n rest = rest[2:]\n except ValueError:\n return link\n else:\n return f'{rest}'\n","sub_path":"eduvpn/ui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"160824229","text":"'''\nAggregate and summarize 33 million rows of accelerator and gyroscope\nsensor data from smart watches and mobile phones. Output processed data\nto HDF files to preserve metadata.\n'''\n# Join and summarize 33 million rows of smart watch and phone sensor\n# with data Pandas using MapReduce paradigm.\n# output to HDF5 files\n\n# file streaming. chunk as generator object.\n#\n# Group and summarize activity 33 million rows of sensor data by activity,\n# user, device and sensor type. Concatenate all summarized data into dataframe.\n# Output to HDF5 format (binary format for preserve metadata.\n#\n# Project assumes that that column names are consistent across CSVs. MapReduce\n# paradigm/approach to calculate mean, standard deviation and variance.\n#\n# Acceleration: rate of change of the velocity of an object.\n#\n# Gyroscope: measure rotational motion\n#\n# 3-axis: x left/right, y forward/backward, z up/down\n#\n# pandas, sensor data, aggregation, HDF5, python, multiindex\n#\n# More than 80% of the processing time is concentrated in grouping and\n# aggregating the data frame chunks. __next__, get_chunk, read functions\n\n\nimport os\n\nimport numpy as np\nimport pandas as pd\n\n\nclass SensorInput:\n '''Read CSV data into data frame chunks.'''\n\n def __init__(self, dir_data, csv_file, column_names, data_types,\n chunk_size):\n self.dir_data = dir_data\n self.csv_file = csv_file\n self.column_names = column_names\n self.data_types = data_types\n self.chunk_size = chunk_size\n\n def read_data(self):\n '''Read CSV data into data frame chunks.\n\n Returns\n -------\n df: pandas.io.parsers.TextFileReader\n '''\n csv_path = os.path.join(self.dir_data, self.csv_file)\n df = pd.read_csv(csv_path, usecols=self.column_names,\n dtype=self.data_types, chunksize=self.chunk_size)\n return df\n\n\nclass SensorProcess:\n '''Process data frame chunks into aggregated and summarized data.'''\n\n def __init__(self, agg_funcs, cols_axes, group_by):\n self.agg_funcs = agg_funcs\n self.cols_axes = cols_axes\n self.group_by = group_by\n\n @staticmethod\n def _reduce_mean(count, mean):\n '''Reduce mapped means.\n\n Parameters\n ----------\n count : pandas.Series\n mean : pandas.Series\n\n Returns\n -------\n float\n '''\n return count.multiply(mean).sum() / count.sum()\n\n def _reduce_variance(self, count, mean, variance):\n '''Reduce mapped variances.\n\n Parameters\n ----------\n count : pandas.Series\n mean : pandas.Series\n variance : pandas.Series\n\n Returns\n -------\n reduced_variance : float\n '''\n reduced_mean = self._reduce_mean(count, mean)\n delta = mean.subtract(reduced_mean).pow(2)\n reduced_variance = ((count.multiply(variance).sum() +\n count.multiply(delta).sum()) /\n count.sum())\n return reduced_variance\n\n def _reduce_summary_statistics(self, s, g, cols):\n '''Calculate summary statistics.\n\n Parameters\n ----------\n s : dict\n g : pandas.DataFrame\n Data frame of grouped data.\n cols : list\n List of summary statistics columns names.\n\n Returns\n -------\n s : dict\n '''\n count, max_, mean, min_, std, sum_, var = cols\n s[count] = g[count].sum()\n s[sum_] = g[sum_].sum()\n s[mean] = self._reduce_mean(g[count], g[mean])\n s[min_] = g[min_].min()\n s[max_] = g[max_].max()\n s[var] = self._reduce_variance(g[count], g[mean], g[var])\n s[std] = np.sqrt(s[var])\n return s\n\n def _reduce_groups(self, group, cols_stats):\n '''Apply summary statistics function to subsets of columns.\n\n The summary statistics columns (e.g. count, sum, etc.) are\n repeated for each x, y and z direction. Apply the summary\n statistics function to each direction subgroup.\n\n Parameters\n ----------\n group : pandas.DataFrame\n Data frame of grouped data.\n cols_stats : list\n List of data frame column names.\n\n Returns\n -------\n pandas.Series\n '''\n series = {}\n\n for i in range(len(self.cols_axes)):\n cols_direction = [col for col in cols_stats\n if col.startswith(self.cols_axes[i])]\n cols_direction.sort()\n series = self._reduce_summary_statistics(series, group,\n cols_direction)\n\n index = cols_stats\n return pd.Series(series, index=index)\n\n def _reduce_chunks(self, df):\n '''Apply reduce functions to grouped data frame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Data frame of aggregated data.\n\n Returns\n -------\n df_reduced : pandas.DataFrame\n '''\n std_col_names = zip(self.cols_axes, ['std'] * len(self.cols_axes))\n cols_std = [''.join(s) for s in std_col_names]\n cols_stats = df.columns.tolist() + cols_std\n\n df_reduced = (df.groupby(self.group_by)\n .apply(lambda group: self._reduce_groups(group,\n cols_stats)))\n df_reduced.sort_index(axis='columns', inplace=True)\n df_reduced.drop(self.group_by, axis='columns', inplace=True)\n return df_reduced\n\n def _aggregate_chunks(self, df):\n '''Group and aggregate TextFileReader chunks.\n\n Parameters\n ----------\n df : pandas.io.parsers.TextFileReader\n\n Returns\n -------\n df_agg : pandas.DataFrame\n '''\n intermediate_stats = []\n\n for chunk in df:\n chunk = (chunk.groupby(self.group_by)\n .aggregate(self.agg_funcs)\n .reset_index())\n chunk.columns = ['_'.join(col) if col[1] else col[0] for col\n in chunk.columns]\n intermediate_stats.append(chunk)\n\n df_agg = (pd.concat(intermediate_stats, axis='index')\n .reset_index(drop=True))\n return df_agg\n\n def process_sensor(self, df):\n '''Run the processing functions.\n\n Parameters\n ----------\n df : pandas.io.parsers.TextFileReader\n\n Returns\n -------\n df : pandas.DataFrame\n '''\n df = self._aggregate_chunks(df)\n df = self._reduce_chunks(df)\n return df\n\n\nclass SensorOutput:\n '''Output aggregated and summarized data to HDF to preserve metadata.'''\n\n def __init__(self, dir_output, output_file_name):\n self.dir_output = dir_output\n self.output_file_name = output_file_name\n\n def to_hdf(self, df):\n '''Write summarized data frame to HDF.\n\n Parameters\n ----------\n df : pandas.DataFrame\n\n Returns\n -------\n None\n '''\n path_output = os.path.join(self.dir_output, self.output_file_name)\n hdf_key = self.output_file_name.split('.')[0]\n df.to_hdf(path_output, key=hdf_key, format='table', mode='w')\n\n\ndef main():\n '''Run processing program.'''\n\n dir_data = os.path.abspath('data/')\n dir_output = os.path.abspath('output/')\n csv_files = [\n 'Phones_accelerometer.csv',\n 'Phones_gyroscope.csv',\n 'Watch_accelerometer.csv',\n 'Watch_gyroscope.csv'\n ]\n hdf_files = [\n 'phone_accel.h5',\n 'phone_gyro.h5',\n 'watch_accel.h5',\n 'watch_gyro.h5'\n ]\n\n column_names = ('x', 'y', 'z', 'User', 'Model', 'Device', 'gt')\n data_types = {\n 'x': np.float32,\n 'y': np.float32,\n 'z': np.float32,\n 'User': 'category',\n 'Model': 'category',\n 'Device': 'category',\n 'gt': 'category'\n }\n chunk_size = 100000\n\n agg_funcs = ['count', 'sum', 'mean', 'min', 'max', 'var']\n group_by = ['gt', 'User', 'Device']\n cols_axes = ('x_', 'y_', 'z_')\n\n input_output = zip(csv_files, hdf_files)\n\n for input_file, output_file in input_output:\n\n sensor_input = SensorInput(dir_data, input_file, column_names,\n data_types, chunk_size)\n df = sensor_input.read_data()\n\n sensor_process = SensorProcess(agg_funcs, cols_axes, group_by)\n df = sensor_process.process_sensor(df)\n\n sensor_output = SensorOutput(dir_output, output_file)\n sensor_output.to_hdf(df)\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"activity-sensors-batch-processing/batch_process_activity_sensors.py","file_name":"batch_process_activity_sensors.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"387335695","text":"l1 = int(input())\r\nw1 = int(input())\r\nh1 = int(input())\r\nl2 = int(input())\r\nw2 = int(input())\r\nh2 = int(input())\r\nlc = int(input())\r\nwc = int(input())\r\nhc = int(input())\r\nnumber2 = (lc >= l1 + l2) and (wc >= w1) and (wc >= w2)\r\nnumber3 = (lc >= l1 + w2) and (wc >= w1) and (wc >= l2)\r\nnumber4 = (lc >= w1 + w2) and (wc >= l1) and (wc >= l2)\r\nnumber5 = (lc >= w1 + l2) and (wc >= w2) and (wc >= l1)\r\nnumber6 = (wc >= l1 + l2) and (lc >= w1) and (lc >= w2)\r\nnumber7 = (wc >= l1 + w2) and (lc >= w1) and (lc >= l2)\r\nnumber8 = (wc >= w1 + w2) and (lc >= l1) and (lc >= l2)\r\nnumber9 = (wc >= w1 + l2) and (lc >= w2) and (lc >= l1)\r\nnumber0 = (lc >= l1) and (wc >= w1)\r\nnumber11 = (lc >= l2) and (wc >= w2)\r\nnumber12 = (lc >= w1) and (wc >= l1)\r\nnumber13 = (lc >= w2) and (wc >= l2)\r\nif (hc < h1) or (hc < h2):\r\n print('NO')\r\nelif (number11 or number13) and (number12 or number13):\r\n if hc >= (h1 + h2):\r\n print('YES')\r\n elif number2 or number3 or number4 or number5:\r\n print('YES')\r\n elif number6 or number7 or number8 or number9:\r\n print('YES')\r\n else:\r\n print('NO')\r\nelse:\r\n print('NO')\r\n","sub_path":"2_упаковка3.py","file_name":"2_упаковка3.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"560382207","text":"import TagScriptEngine\nfrom unittest import TestCase\n\nclass test_edgecase_functionality(TestCase):\n\n def setUp(self):\n \"\"\" Sets up engine and other variables that might be needed between tests \"\"\"\n self.engine = TagScriptEngine.Engine()\n def tearDown(self):\n \"\"\" Cleans the plate to make tests consistent \"\"\"\n self.engine.Clear_Variables()\n self.engine = None\n\n # Actual tests below\n # ======\n def test_edgecase_missing_var_in_math(self):\n\n self.engine.Add_Variable(\"1\", \"30\")\n self.engine.Add_Variable(\"2\", \"2\")\n #self.engine.Add_Variable(\"3\", \"0\")\n trouble = self.engine.Process(\"m{($1+1+0$3=)*$2}\")\n\n self.assertEqual(\"62\", trouble)\n\n def test_edgecase_missing_var_in_math_substitute(self):\n\n self.engine.Add_Variable(\"1\", \"30\")\n self.engine.Add_Variable(\"2\", \"2\")\n #self.engine.Add_Variable(\"3\", \"0\")\n trouble = self.engine.Process(\"m{($1+1+0$3=0)*$2}\")\n\n self.assertEqual(\"62\", trouble)\n\n def test_edgecase_variable_in_variable_assignment_isnt_replaced(self):\n self.engine.Add_Variable(\"user\", \"Carl#0001\")\n\n trouble = self.engine.Process(\"!{f=#{hello $user~hello $user}}$f\")\n\n self.assertEqual(\"hello Carl#0001\", trouble)\n \n def test_edgecase_reusable_list_in_var_assignement(self):\n seen_yen = False\n seen_carl = False\n seen_py = False\n for _ in range(150):\n phrase = self.engine.Process(\"#user{carl,yenni,pysnow} !{player1=$user} $player1\").strip(' ')\n if \"pysnow\" in phrase:\n seen_py = True\n if \"carl\" in phrase:\n seen_carl = True\n if \"yenni\" in phrase:\n seen_yen = True\n self.assertTrue(seen_carl and seen_py and seen_yen)\n ","sub_path":"TagScriptTests/test_edgecases.py","file_name":"test_edgecases.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"461178383","text":"import hashlib,os,pickle\r\nclass HashType:\r\n MD5=0\r\n SHA1=1\r\n SHA256=2\r\n SHA384=3\r\n SHA512=4\r\nclass HashItem:\r\n def __init__(self,path,hashType,hashValue):\r\n self.meth=path\r\n self.tipp=hashType\r\n self.val=hashValue\r\n def equals(self,otherGuy):\r\n if type(otherGuy)==type(self):\r\n if otherGuy.val:return self.val==otherGuy.val\r\n else:return False\r\n elif type(otherGuy)==bytes:\r\n return self.val==otherGuy.val\r\ndef GetHashObjFromType(heshType):\r\n if heshType==HashType.MD5:return hashlib.md5()\r\n elif heshType==HashType.SHA1:return hashlib.sha1()\r\n elif heshType==HashType.SHA256:return hashlib.sha256()\r\n elif heshType==HashType.SHA384:return hashlib.sha384()\r\n elif heshType==HashType.SHA512:return hashlib.sha512()\r\n else:return None\r\ndef GetHashObjOfFyle(puth,heshType=HashType.MD5,buffSize=1024*1024*10):\r\n hesh=GetHashObjFromType(heshType)\r\n with open(puth,'rb') as inp:\r\n data=inp.read(buffSize)\r\n while len(data)>0:\r\n hesh.update(data)\r\n data=inp.read(buffSize)\r\n return hesh\r\ndef GetHashDigest(put,heshType=HashType.MD5,buffSize=1024*1024*10):\r\n return GetHashObjOfFyle(put,heshType,buffSize).digest()\r\ndef GetHexDigest(puth,heshType=HashType.MD5,buffSize=1024*1024*10):\r\n return GetHashObjOfFyle(puth,heshType,buffSize).hexdigest()\r\nclass HashShite:\r\n def __init__(self):\r\n self.items=list()\r\n def leard(self,peth,heshType=HashType.MD5,bufferSize=1024*1024*512,debug=True):\r\n for root,dirs,fils in os.walk(peth):\r\n for f in fils:\r\n if debug:print(\"Getting hash value for %s\" % os.path.join(root,f))\r\n hashVal=GetHashDigest(os.path.join(root,f),heshType,bufferSize)\r\n if debug:print(hashVal)\r\n self.items.append(HashItem(os.path.join(root,f),heshType,hashVal))\r\nheshShit=HashShite()\r\nheshShit.leard(\"Grand Theft Auto V\")\r\nwith open(\"heshes.shite\",'wb') as out:\r\n pickle.dump(heshShit,out)\r\n","sub_path":"unsorted_guff/Hash Check/getHashes.py","file_name":"getHashes.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"392395397","text":"import re\n\n\nclass ShowInfo:\n\n parser = re.compile(\n r\"^(?P\\d+)$|^(?P\\d+)\\+$|^(?P^\\d+)\\-(?P\\d+)$\")\n\n def __init__(self, begin=1, end=None, min_steps=None):\n assert begin >= 1\n assert end is None or end >= 1\n self.begin = begin\n self.end = end\n if min_steps:\n self._min_steps = min_steps\n else:\n self._min_steps = max(begin, end if end else 1)\n\n @classmethod\n def parse(cls, obj):\n if obj is None:\n return ShowInfo()\n if isinstance(obj, int):\n return ShowInfo(obj, obj)\n if isinstance(obj, str):\n m = cls.parser.match(obj)\n if m is None:\n raise Exception(\"Invalid format of 'show' string: {!r}\"\n .format(obj))\n m = m.groupdict()\n\n if m[\"exact\"] is not None:\n exact = int(m[\"exact\"])\n return ShowInfo(exact, exact)\n\n if m[\"from\"] is not None:\n return ShowInfo(int(m[\"from\"]))\n return ShowInfo(int(m[\"begin\"]), int(m[\"end\"]))\n else:\n raise Exception(\"Invalid show argument\")\n\n def min_steps(self):\n return self._min_steps\n\n def ensure_steps(self, steps):\n min_steps = max(self._min_steps, steps)\n return ShowInfo(self.begin, self.end, min_steps)\n\n def is_visible(self, step):\n return self.begin <= step and (self.end is None or step <= self.end)\n\n def __repr__(self):\n return \"\".format(\n self.begin, self.end, self._min_steps)\n","sub_path":"elsie/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"551144003","text":"import numpy as np\n\nclass Genome:\n def __init__(self, x_dim, y_dim, random_weights=True):\n self.genome_type = \"survived\"\n self.x_dim = x_dim\n self.y_dim = y_dim\n self.h_dim = 1\n self.score = 0\n self.fitness = 0\n # fixed bias for simplicity\n # for optimal search, inputs should be normalized in advance\n self.bias = 1\n\n if random_weights:\n self.w1 = np.random.random((\n self.h_dim, self.x_dim + 1\n )) * 2 - 1\n self.w2 = np.random.random((\n self.y_dim, self.h_dim\n )) * 2 - 1\n else:\n self.w1 = np.zeros((self.h_dim, self.x_dim + 1))\n self.w2 = np.zeros((self.y_dim, self.h_dim))\n return\n\n # x, h, and y are input vector, hidden vector, and output vector respectively\n def predict(self, x):\n # append bias to inputs\n x = np.append(self.bias, x)\n\n # multiply by weight and push to hidden layer\n h = np.dot(self.w1, x.reshape(-1, 1))\n # h = self.layer_output(x, self.w1)\n\n # apply relu activation to h\n # sigmoid activation commented out below\n # h = 1 / (1 + np.exp(-1 * h))\n h = h * (h > 0)\n\n # multiply by weight and push to output\n y = np.dot(self.w2, h.reshape(-1, 1))\n\n # return formatted output\n y = np.ndarray.flatten(y > 0)\n return y\n","sub_path":"neat/genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"293337066","text":"from pycompss.api.task import task\nfrom pycompss.api.api import compss_wait_on\n\n\n@task(returns=1)\ndef create_greeting(message, use_storage):\n \"\"\"\n Instantiates a persistent object and populates it with the received\n message.\n :param message: String with the information to store in the psco.\n :return: The populated persistent object.\n \"\"\"\n if use_storage:\n from storage_model.classes import hello\n else:\n from model.classes import hello\n hi = hello()\n hi.message = message\n if use_storage:\n hi.make_persistent(\"greet\")\n return hi\n\n\n@task(returns=1)\ndef greet(greetings):\n \"\"\"\n Retrieves the information contained in the given persistent object.\n :param greetings: Persistent object.\n :return: String with the psco content.\n \"\"\"\n content = greetings.message\n return content\n\n\n@task(returns=1)\ndef check_greeting(content, message):\n \"\"\"\n Checcks that the given content is equal to the given message.\n :param content: String with content.\n :param message: String with message.\n :return: Boolean (True if equal, False otherwise).\n \"\"\"\n return content == message\n\n\ndef parse_arguments():\n \"\"\"\n Parse command line arguments. Make the program generate\n a help message in case of wrong usage.\n :return: Parsed arguments\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Hello world.')\n parser.add_argument('--use_storage', action='store_true',\n help='Use storage?')\n return parser.parse_args()\n\n\ndef main(use_storage):\n message = \"Hello world\"\n greeting = create_greeting(message, use_storage)\n content = greet(greeting)\n result = check_greeting(content, message)\n result_wrong = check_greeting(content, message + \"!!!\")\n result = compss_wait_on(result)\n result_wrong = compss_wait_on(result_wrong)\n if result != result_wrong:\n print(\"THE RESULT IS OK\")\n else:\n msg = \"SOMETHING FAILED!!!\"\n print(msg)\n raise Exception(msg)\n\n\nif __name__ == \"__main__\":\n options = parse_arguments()\n main(**vars(options))\n","sub_path":"python/examples_with_persistent_storage/hello_world/src/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"422161841","text":"class Solution:\n # @param A, a list of integers\n # @return a boolean\n # Method 1: > O(n):\n \"\"\"\n def canJump(A):\n record = [0 for value in range(0, len(A))]\n record[0] = 1\n for i in range(0, len(A)-1):\n if A[i] > 0:\n if i + A[i] >= len(A)-1:\n return True\n else:\n for j in range(1, A[i]+1):\n record[i+j] += 1\n if record[len(A)-1] == 0:\n return False \n \"\"\"\n # Method 2: record the max steps\n def canJump(self, A):\n max_steps = 0\n for i in range(0, len(A)):\n if i <= max_steps:\n max_steps = max(max_steps, i + A[i])\n if max_steps >= len(A)-1:\n return True\n else:\n return False\n return True\n \n \n \n","sub_path":"Python/jump_game.py","file_name":"jump_game.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"12947433","text":"#!/usr/bin/env python\n\nimport mimetypes, os, sys, time, subprocess, datetime, boto, urllib.parse, math\nfrom .lib import S3 as S3lib\nfrom io import StringIO\nfrom multiprocessing import Pool\nfrom housepy import config, log\n\n\ndef upload(source, dest=None, num_processes=2, split=50, force=True, reduced_redundancy=False, verbose=False):\n if dest is None:\n dest = source\n log.info(\"s3.upload %s to %s/%s...\" % (source, config['s3']['bucket'], dest))\n src = open(source)\n s3 = boto.connect_s3(config['s3']['access_key_id'], config['s3']['secret_access_key'])\n bucket = s3.lookup(config['s3']['bucket'])\n key = bucket.get_key(dest)\n if key is not None:\n if not force:\n raise ValueError(\"--> '%s' already exists\" % dest)\n part_size = max(5 * 1024 * 1024, 1024 * 1024 * split)\n src.seek(0,2)\n size = src.tell()\n num_parts = int(math.ceil(size / part_size))\n if size < 5 * 1024 * 1024:\n src.seek(0)\n t1 = time.time()\n k = boto.s3.key.Key(bucket, dest)\n k.set_contents_from_file(src)\n t2 = time.time() - t1\n s = size/1024./1024.\n log.info(\"--> finished uploading %0.2fM in %0.2fs (%0.2fMbps)\" % (s, t2, s/t2))\n return\n mpu = bucket.initiate_multipart_upload(dest, reduced_redundancy=reduced_redundancy)\n log.info(\"--> initialized upload: %s\" % mpu.id)\n def gen_args(num_parts, fold_last):\n for i in range(num_parts+1):\n part_start = part_size*i\n if i == (num_parts-1) and fold_last is True:\n yield (bucket.name, mpu.id, src.name, i, part_start, part_size*2)\n break\n else:\n yield (bucket.name, mpu.id, src.name, i, part_start, part_size)\n fold_last = ((size % part_size) < 5*1024*1024)\n try:\n pool = Pool(processes=num_processes)\n t1 = time.time()\n pool.map_async(do_part_upload, gen_args(num_parts, fold_last)).get(9999999)\n t2 = time.time() - t1\n s = size/1024./1024.\n src.close()\n mpu.complete_upload()\n log.info(\"--> finished uploading %0.2fM in %0.2fs (%0.2fMbps)\" % (s, t2, s/t2))\n return True\n except Exception as err:\n log.error(\"--> encountered an error, canceling upload\") \n log.error(log.exc(err))\n mpu.cancel_upload()\n return False\n \n\n\ndef do_part_upload(args):\n \"\"\"\n Upload a part of a MultiPartUpload\n\n Open the target file and read in a chunk. Since we can't pickle\n S3Connection or MultiPartUpload objects, we have to reconnect and lookup\n the MPU object with each part upload.\n\n :type args: tuple of (string, string, string, int, int, int)\n :param args: The actual arguments of this method. Due to lameness of\n multiprocessing, we have to extract these outside of the\n function definition.\n\n The arguments are: S3 Bucket name, MultiPartUpload id, file\n name, the part number, part offset, part size\n \"\"\"\n\n bucket_name, mpu_id, fname, i, start, size = args\n s3 = boto.connect_s3(config['s3']['access_key_id'], config['s3']['secret_access_key'])\n bucket = s3.lookup(bucket_name)\n mpu = None\n for mp in bucket.list_multipart_uploads():\n if mp.id == mpu_id:\n mpu = mp\n break\n if mpu is None:\n raise Exception(\"--> could not find MultiPartUpload %s\" % mpu_id)\n fp = open(fname, 'rb')\n fp.seek(start)\n data = fp.read(size)\n fp.close()\n if not data:\n raise Exception(\"--> unexpectedly tried to read an empty chunk\")\n def progress(x,y):\n log.info(\"Part %d: %0.2f%%\" % (i+1, 1.*x/y))\n t1 = time.time()\n mpu.upload_part_from_file(StringIO(data), i+1, cb=progress)\n t2 = time.time() - t1\n s = len(data)/1024./1024.\n log.info(\"--> uploaded part %s (%0.2fM) in %0.2fs at %0.2fMbps\" % (i+1, s, t2, s/t2))\n\n\ndef delete(path):\n log.info(\"s3.delete\") \n conn = S3lib.AWSAuthConnection(config['s3']['access_key_id'], config['s3']['secret_access_key'])\n log.info(\"--> deleting %s/%s\" % (config['s3']['bucket'], path)) \n try:\n response = conn.delete(config['s3']['bucket'], path)\n except Exception as e:\n log.error(\"--> failed: %s\" % log.exc(e))\n return False\n log.info(\"--> %s\" % response.message)\n return True\n \ndef list_contents():\n log.info(\"s3.list\")\n connection = boto.connect_s3(config['s3']['access_key_id'], config['s3']['secret_access_key'])\n log.info(\"--> listing %s\" % (config['s3']['bucket'])) \n try:\n bucket = connection.get_bucket(config['s3']['bucket'])\n contents = [key.name for key in bucket.list()]\n except Exception as e:\n log.error(\"--> failed: %s\" % log.exc(e))\n return False\n log.info(\"--> %s\" % contents)\n return contents\n\ndef download(path, destination=None):\n if destination is None:\n destination = path\n log.info(\"s3.download\") \n connection = boto.connect_s3(config['s3']['access_key_id'], config['s3']['secret_access_key'])\n log.info(\"--> downloading %s/%s\" % (config['s3']['bucket'], path)) \n try:\n bucket = connection.get_bucket(config['s3']['bucket'])\n key = bucket.get_key(path) \n key.get_contents_to_filename(destination)\n except Exception as e:\n log.error(\"--> failed: %s\" % log.exc(e))\n return False\n log.info(\"--> successfully wrote %s\" % destination)\n return True\n \n","sub_path":"s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"197970600","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Distributed under the terms of the GNU General Public License v2\n#\n# I'm very _very_ lazy.\n#\n# Typing: repoman commit -m \"cat/pkg: msg\" is a massive PITA to me.\n# This script is a convenient helper so that I don't have to invoke repoman and\n# feed him with the \"cat/pkg\" prefix every time.\n# It works out the prefix from the CWD.\n#\n# That's it really.\nimport os, sys, subprocess\n\ndef is_git(cwd):\n git_repository = '{0}{1}/.git'.format(os.sep, os.sep.join(cwd[:-2]))\n return os.path.isdir(git_repository)\n\ndef format_commit_message(directory, msg):\n return '{0}: {1}'.format(directory, msg)\n\ndef repoman_commit(msg):\n subprocess.call(['repoman', 'commit', '-m', msg])\n\ndef main(msg):\n cwd = os.getcwd().split(os.sep)[1:]\n if not is_git(cwd):\n print('gcommit: Please cd into your Gentoo git repository. Aborting commit.')\n sys.exit(1)\n\n repoman_commit(format_commit_message(os.sep.join(cwd[-2:]), msg))\n\nif __name__ == '__main__':\n try:\n main(sys.argv[1])\n except IndexError:\n print('gcommit: Please specify a commit message!')\n sys.exit(0)\n","sub_path":"gcommit.py","file_name":"gcommit.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"369556236","text":"import unittest\n\nfrom sort import mergesort\n\n\nclass SortingCorrectnessTest(unittest.TestCase):\n known_values = (\n (\n [42, 23, 16, 15, 8, 4],\n [4, 8, 15, 16, 23, 42]\n ),\n (\n [],\n []\n ),\n (\n [1],\n [1]\n ),\n (\n ['h', 'b', 'o'],\n ['b', 'h', 'o']\n ),\n )\n\n def test_sorting_correctness(self):\n \"\"\"should give known result with known input\"\"\"\n\n for input_value, output_value in self.known_values:\n \"\"\"Python Built-in sorting\"\"\"\n self.assertEqual(\n output_value,\n sorted(input_value)\n )\n\n \"\"\"Merge Sort\"\"\"\n self.assertEqual(\n output_value,\n mergesort(input_value)\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"sort/sort_test.py","file_name":"sort_test.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"340013055","text":"import sys\n\npossibilites = [\" SET \", \" TO \", \" IF \", \" THEN \", \" ALSO IF \", \" OTHERWISE \", \" WHILE \", \" FOR \", \" IN \", \" ENDLOOP \", \" EXIT \", \" SAY \", \" GIVEBACK \", \" MAKEAFUNCTION \", \" WITHARGUMENTS \", \" CALL \", \" WITHPARAMETERS \"]\nreplacements = [\"# = \", \"#\\n\", \"if #:\\n\\t\", \"\", \"\\belif #:\\n\\t\", \"\\belse:\\n\\t#\\n\\b\", \"while #:\\n\\t\", \"for #\", \"in #:\\n\\t\", \"\\b\", \"break\\n\\b\", \"print ('#')\\n\", \"return #\\n\", \"def # ( \",\" # ):\\n\\t\", \"#(\", \"#)\"]\n\n\n\ndef syntaxify(s, possibilities, replacements):\n s = \" \"+s\n output = \"\"\n locations = []\n for possibility in possibilities:\n length = 0\n while length < len(s):\n length = s.find(possibility, length)\n if length == -1:\n break\n else:\n locations.append(length)\n length += 1\n locationstep = 0\n operatorsplit = []\n locations.sort()\n #print(locations)\n for location in locations:\n if len(locations)>(locationstep+1):\n newstr=s[location:locations[locationstep+1]]\n #print(\"newstr:\" + newstr)\n else:\n newstr=s[location:]\n count = 0\n cap = False\n '''\n for char in newstr.split():\n while cap != False:\n cap = char.isupper()\n operatorsplit.append(char)\n count += 1\n operator = ''.join(operatorsplit)\n operatorlocation = possibilities.index(operator)\n outputinitial = replacements[operatorlocation]\n '''\n operator = \" \" + newstr.split()[0] + \" \"\n possloc=possibilities.index(operator)\n outputinitial = replacements[possloc]\n #print (outputinitial)\n output = output + outputinitial.replace(\"#\", newstr.split()[1])\n #print (output)\n locationstep += 1\n return output\n #if end != \"quit\":\n #output = output+syntaxify(end, possibilites, replacements);\n ##print (output)\n #else:\n #print (\"Have a nice day!\");\n\n'''\nif __name__ == \"__main__\":\n if '-' in sys.argv:\n read_from_stdin()\n else:\n prompt_user()\n'''\n\n\nfoutput = \"\"\ns = input(\"Hey! Start typing your code (but make sure to have the right easySyntax):\")\n\noutput = syntaxify(s, possibilites, replacements)\nfoutput = foutput + output\nprint(\"Current Output:\\n\" + foutput)\nend = \"\"\nwhile end != 'quit':\n end = input(\"Anything else? (type code/'quit')\")\n output = syntaxify(end, possibilites, replacements)\n foutput = foutput + output\n print(\"Current Output:\\n\" + foutput)\n\nprint(\"Output:\\n\" + foutput)\nmycode = open('mycode.py', 'a')\nmycode.write(foutput)\nprint(\"File Made.\")\nprint('Have a nice day!')\n# syntaxify(s, possibilites, replacements)\n","sub_path":"coding.py","file_name":"coding.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"594193591","text":"from __future__ import absolute_import\n\nimport sys\n\nif sys.version_info[:2] < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\nfrom enstaller.main import main\n\nfrom .common import without_any_configuration\n\nclass TestEnstallerMainActions(unittest.TestCase):\n @without_any_configuration\n def test_print_version(self):\n # XXX: this is lousy test: we'd like to at least ensure we're printing\n # the correct version, but capturing the stdout is a bit tricky. Once\n # we replace print by proper logging, we should be able to do better.\n try:\n main([\"--version\"])\n except SystemExit as e:\n self.assertEqual(e.code, 0)\n\n @without_any_configuration\n def test_help_runs_and_exits_correctly(self):\n try:\n main([\"--help\"])\n except SystemExit as e:\n self.assertEqual(e.code, 0)\n\n @without_any_configuration\n def test_print_env(self):\n try:\n main([\"--env\"])\n except SystemExit as e:\n self.assertEqual(e.code, 0)\n","sub_path":"tests/functional/test_simple.py","file_name":"test_simple.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"488440612","text":"import json\nimport os\nimport mmap\nimport pickle\nimport random\nimport time\nimport torch\n\nfrom threading import Lock\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import RandomSampler, BatchSampler\nfrom itertools import accumulate\n\nclass LazyDataset(Dataset):\n \"\"\"A dataset for lazy loading from disk.\n \"\"\"\n\n def __init__(self, path, use_mmap=True):\n self.path = path\n self.use_mmap = use_mmap\n\n if not os.path.exists(path):\n raise ValueError('File `%s` does not exist.' % path)\n\n self.lazy_dir = self.get_lazy_dir()\n self.lazy_file = os.path.join(self.lazy_dir, 'data')\n self.lazy_index = os.path.join(self.lazy_dir, 'index.pkl')\n if not os.path.exists(self.lazy_dir):\n self.create_index()\n\n self.file = open(self.lazy_file, 'rb')\n if self.use_mmap:\n print('Memory map `%s`' % path)\n # In distributed setting, it's useful to cache file in ramdisk\n self.file = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)\n\n self.read_lock = Lock()\n\n with open(self.lazy_index, 'rb') as f_index:\n self.offsets = pickle.load(f_index)\n\n def get_lazy_dir(self):\n return os.path.splitext(self.path)[0]+'_lazy'\n\n def create_index(self):\n # In distributed setting, only first process creates the index..\n # Note that we assume that the file is stored on a shared file system\n if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:\n os.makedirs(self.lazy_dir)\n print(\"Creating index for lazy loading of `%s`\" % self.path)\n offsets = list()\n cnt = 0\n with open(self.path, 'rb') as f_in, open(self.lazy_file, 'wb') as f_out:\n for line in f_in:\n f_out.write(line)\n cnt += len(line)\n offsets.append(cnt)\n with open(self.lazy_index, 'wb') as f_index:\n pickle.dump(offsets, f_index)\n else:\n while not os.path.exists(self.lazy_index):\n time.sleep(1)\n\n\n def __getitem__(self, index):\n start = 0 if index == 0 else self.offsets[index-1]\n end = self.offsets[index] - 1\n\n self.read_lock.acquire()\n self.file.seek(start)\n txt = self.file.read(end - start)\n self.read_lock.release()\n return txt.decode('utf-8')\n\n def __len__(self):\n return len(self.offsets)\n\n\nclass JSONDataset(Dataset):\n\n def __init__(self, dataset, key=None):\n self.dataset = dataset\n self.key = key\n\n def __getitem__(self, index):\n item = json.loads(self.dataset[index])\n if self.key:\n item = item[self.key]\n return item\n\n def __len__(self):\n return len(self.dataset)\n\nclass BertDataset(Dataset):\n\n def __init__(self, dataset, tokenizer, max_seq_length=512, short_seq_prob=0.1,\n masked_lm_prob=0.15, max_predictions_per_seq=80):\n self.dataset = dataset\n self.tokenizer = tokenizer\n self.max_seq_length = max_seq_length\n self.short_seq_prob = short_seq_prob\n self.masked_lm_prob = masked_lm_prob\n self.max_predictions_per_seq = max_predictions_per_seq\n\n\n def __getitem__(self, index):\n # get rng state corresponding to index (allows deterministic random pair)\n rng = random.Random(index)\n # get seq length, subtract [CLS], .., [SEP], .. [SEP] tokens\n target_seq_length = self.max_seq_length - 3\n\n if rng.random() < self.short_seq_prob:\n target_seq_length = rng.randint(2, target_seq_length)\n\n # get sentence pair and label\n tokens_a, tokens_b, is_random_next = self.create_random_instance(target_seq_length, rng)\n self.truncate_seq_pair(tokens_a, tokens_b, target_seq_length, rng)\n\n # [CLS], tokens_a.. [SEP] tokens_b... [SEP]\n tokens = list()\n segment_ids = list()\n tokens.append('[CLS]')\n for tok in tokens_a:\n tokens.append(tok)\n segment_ids.append(0)\n\n tokens.append('[SEP]')\n segment_ids.append(0)\n\n for tok in tokens_b:\n tokens.append(tok)\n segment_ids.append(1)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n # pad sequences to max length\n attention_mask = [1]*512\n attention_mask[len(tokens):] = [0]*(512-len(tokens))\n self.pad_seq(tokens, self.max_seq_length, '[PAD]')\n self.pad_seq(segment_ids, self.max_seq_length, 0)\n\n # randomly mask inputs\n input_tokens, lm_labels = self.create_masked_lm_predictions(tokens, rng)\n\n input_tokens = self.tokenizer.convert_tokens_to_ids(input_tokens)\n\n sample = dict()\n sample['input_tokens'] = torch.LongTensor(input_tokens)\n sample['segment_ids'] = torch.LongTensor(segment_ids)\n sample['attention_mask'] = torch.LongTensor(attention_mask)\n sample['lm_labels'] = torch.LongTensor(lm_labels)\n sample['is_random_next'] = torch.LongTensor([int(is_random_next)])\n return sample\n\n def pad_seq(self, seq, max_len, val):\n while len(seq) < max_len:\n seq.append(val)\n\n def sentence_split(self, document):\n \"\"\"split document into sentences\"\"\"\n return [line for line in document.split('\\n') if line]\n\n def get_random_doc(self, rng):\n doc_idx = rng.randint(0, len(self.dataset) - 1)\n doc = self.sentence_split(self.dataset[doc_idx])\n doc = [self.tokenizer.tokenize(sentence) for sentence in doc]\n return doc, doc_idx\n\n def remaining_doc_length(self, doc):\n segment_lengths = [len(sentence) for sentence in doc]\n return list(accumulate(reversed(segment_lengths)))[::-1]\n\n def create_random_instance(self, target_seq_length, rng):\n \"\"\"Fetches a random sentence pair corresponding to rng state similar to\n https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294\"\"\"\n\n # sample random doc, make sure it contains more than `target_seq_length` tokens\n doc_a, doc_a_idx = self.get_random_doc(rng)\n remaining_doc_length = self.remaining_doc_length(doc_a)\n len_doc_a = remaining_doc_length[0]\n while len_doc_a < target_seq_length:\n doc_a, doc_a_idx = self.get_random_doc(rng)\n remaining_doc_length = self.remaining_doc_length(doc_a)\n len_doc_a = remaining_doc_length[0]\n\n # start at random segment (but ensure that we keep more than `target_seq_length` tokens)\n segment_id = rng.choice([i for i, l in enumerate(remaining_doc_length) if l >= target_seq_length])\n segments = list()\n num_tokens = 0\n for segment in doc_a[segment_id:]:\n segments.append(segment)\n num_tokens += len(segment)\n if num_tokens >= target_seq_length:\n break\n\n a_end = 1\n if len(segments) > 1:\n a_end = rng.randint(1, len(segments) - 1)\n\n a_tokens = list()\n for seg in segments[:a_end]:\n a_tokens.extend(seg)\n\n if len(segments) == 1 or random.random() > 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(a_tokens)\n\n # ensure that doc_a != doc_b\n doc_b, doc_b_idx = self.get_random_doc(rng)\n remaining_doc_b_length = self.remaining_doc_length(doc_b)\n while doc_a_idx == doc_b_idx or remaining_doc_b_length[0] < target_b_length:\n doc_b, doc_b_idx = self.get_random_doc(rng)\n remaining_doc_b_length = self.remaining_doc_length(doc_b)\n\n remaining_doc_b_length = self.remaining_doc_length(doc_b)\n segment_id = rng.choice([i for i, l in enumerate(remaining_doc_b_length) if l >= target_b_length])\n\n b_tokens = list()\n for seg in doc_b[segment_id:]:\n b_tokens.extend(seg)\n if len(b_tokens) >= target_b_length:\n break\n else:\n is_random_next = False\n b_tokens = list()\n for seg in segments[a_end:]:\n b_tokens.extend(seg)\n\n return a_tokens, b_tokens, is_random_next\n\n def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\n def create_masked_lm_predictions(self, tokens, rng):\n cand_indexes = [i for i, tok in enumerate(tokens) if tok not in ('[SEP]', '[CLS]', '[PAD]')]\n rng.shuffle(cand_indexes)\n output_tokens = list(tokens)\n num_to_predict = min(self.max_predictions_per_seq,\n max(1, int(round(len(cand_indexes) * self.masked_lm_prob))))\n\n lm_labels = [-1]*len(tokens)\n for index in cand_indexes[:num_to_predict]:\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n random_tok_ind = rng.randint(0, len(self.tokenizer.vocab) - 1)\n masked_token = self.tokenizer.ids_to_tokens[random_tok_ind]\n\n output_tokens[index] = masked_token\n lm_labels[index] = self.tokenizer.vocab[tokens[index]]\n\n return output_tokens, lm_labels\n\n def __len__(self):\n return len(self.dataset)\n\nclass PreprocessedBertDataset(Dataset):\n\n def __init__(self, dataset, tokenizer, max_seq_length=512,\n masked_lm_prob=0.15, max_predictions_per_seq=80):\n self.dataset = dataset\n self.tokenizer = tokenizer\n self.max_seq_length = max_seq_length\n self.masked_lm_prob = masked_lm_prob\n self.max_predictions_per_seq = max_predictions_per_seq\n\n def __getitem__(self, index):\n # random.seed(index)\n sample = self.dataset[index]\n tokens_a = sample['tokens_a']\n tokens_b = sample['tokens_b']\n is_random_next = sample['is_random_next']\n target_seq_length = sample['target_seq_length']\n\n self.truncate_seq_pair(tokens_a, tokens_b, target_seq_length, random)\n\n # [CLS], tokens_a.. [SEP] tokens_b... [SEP]\n tokens = list()\n segment_ids = list()\n tokens.append('[CLS]')\n for tok in tokens_a:\n tokens.append(tok)\n segment_ids.append(0)\n\n tokens.append('[SEP]')\n segment_ids.append(0)\n\n for tok in tokens_b:\n tokens.append(tok)\n segment_ids.append(1)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n # pad sequences to max length\n attention_mask = [1] * self.max_seq_length\n attention_mask[len(tokens):] = [0] * (self.max_seq_length - len(tokens))\n self.pad_seq(tokens, self.max_seq_length, '[PAD]')\n self.pad_seq(segment_ids, self.max_seq_length, 0)\n\n # randomly mask inputs\n input_tokens, lm_labels = self.create_masked_lm_predictions(tokens, random)\n\n input_tokens = self.tokenizer.convert_tokens_to_ids(input_tokens)\n\n sample = dict()\n sample['input_tokens'] = torch.LongTensor(input_tokens)\n sample['segment_ids'] = torch.LongTensor(segment_ids)\n sample['attention_mask'] = torch.LongTensor(attention_mask)\n sample['lm_labels'] = torch.LongTensor(lm_labels)\n sample['is_random_next'] = torch.LongTensor([int(is_random_next)])\n return sample\n\n def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\n def create_masked_lm_predictions(self, tokens, rng):\n cand_indexes = [i for i, tok in enumerate(tokens) if tok not in ('[SEP]', '[CLS]', '[PAD]')]\n rng.shuffle(cand_indexes)\n output_tokens = list(tokens)\n num_to_predict = min(self.max_predictions_per_seq,\n max(1, int(round(len(cand_indexes) * self.masked_lm_prob))))\n lm_labels = [-1]*len(tokens)\n for index in cand_indexes[:num_to_predict]:\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n random_tok_ind = rng.randint(0, len(self.tokenizer.vocab) - 1)\n masked_token = self.tokenizer.ids_to_tokens[random_tok_ind]\n\n output_tokens[index] = masked_token\n lm_labels[index] = self.tokenizer.vocab[tokens[index]]\n\n return output_tokens, lm_labels\n\n def pad_seq(self, seq, max_len, val):\n while len(seq) < max_len:\n seq.append(val)\n\n def __len__(self):\n return len(self.dataset)\n\nif __name__ == '__main__':\n from pytorch_pretrained_bert.tokenization import BertTokenizer\n\n tokenizer = BertTokenizer.from_pretrained('bert-large-uncased', cache_dir='./data')\n\n dataset = JSONDataset(LazyDataset('/home/hdvries/data/prep_128.txt'))\n dataset = PreprocessedBertDataset(dataset, tokenizer, max_seq_length=128)\n\n\n # wiki_dataset = LazyDataset('/home/nathan/data/wiki/enwiki.txt', use_mmap=True)\n # wiki_dataset = JSONDataset(wiki_dataset, key='text')\n #\n # dataset = BertDataset(wiki_dataset, tokenizer)\n sampler = BatchSampler(RandomSampler(dataset), 4, True)\n\n iterator = DataLoader(dataset, batch_sampler=sampler, num_workers=1, pin_memory=True)\n\n for batch in iterator:\n print(batch['input_tokens'].shape)","sub_path":"pytorch_pretrained_bert/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":15095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"595124078","text":"# pylint: disable=undefined-variable\nimport logging\n\n\nfrom telegram import Update\nfrom telegram.ext import CallbackContext\nfrom telegram.chataction import ChatAction\nfrom telegram.message import ParseMode\nfrom telegram.inline.inlinequeryresultarticle import InlineQueryResultArticle\nfrom telegram.inline.inputtextmessagecontent import InputTextMessageContent\n\nfrom telegram.ext.dispatcher import run_async\n\nfrom utils import validUrl, name_and_id, update_object_type, players\nfrom replies import error, info, reply_markup\nfrom db import data\nlogging.basicConfig(\n handlers=(\n logging.FileHandler('command_logs.log'),\n logging.StreamHandler()),\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nwelcome_text = (\"Minecraft Server Status Bot\\n\"\n\"\\n\"\n\"To check players online on the server use @msmpbot in the text input line in any Telegram chat.\\n\"\n\"\\n\"\n\"Commands:\\n\"\n\"/status - Server info\\n\"\n\"/players - Online players on the server\\n\"\n\"\\n\"\n\"To change the monitored server send me its address.\")\n\n# ==========================\n# Inline Query Handler\n# ==========================\n\ndef inline_status(update: Update, context: CallbackContext):\n address = context.user_data['url']\n\n q = list()\n\n q.append(InlineQueryResultArticle(\n id='1', title=_('Address: ') + address,\n input_message_content=InputTextMessageContent(\n message_text=_('Address: ') + address))\n )\n\n online, str_players = players(address, True)\n\n text = ngettext('{0} player online', '{0} players online', online).format(online)\n\n q.append(InlineQueryResultArticle(\n id='2', title=text,\n description=str_players,\n input_message_content=InputTextMessageContent(\n message_text=text if online == 0 else text + ': ' + str_players)))\n\n update.inline_query.answer(q, cache_time=60)\n logging.info(\"inline answer sent\")\n \n\n\n# ==========================\n# Message Handler\n# ==========================\n\ndef message(update: Update, context: CallbackContext):\n \"\"\"Usage: \"\"\"\n user_data = context.user_data\n \n if not update.message.text: return\n \n text = update.message.text.lower()\n if text == (\"ru\" or \"en\"):\n user_data['lang'] = text\n return\n\n if text != 'default':\n if not validUrl(text):\n logging.info(\"Invalid URL, too long\")\n return\n\n data(update.effective_user.id, text)\n\n user_data['url'] = data(update.effective_user.id)\n\n update.message.reply_chat_action(ChatAction.TYPING)\n update.message.reply_text(_('Server addess changed to ') + user_data['url'], ParseMode.MARKDOWN)\n\n\n# ==========================\n# Commands\n# ==========================\n\ndef cmd_start(update: Update, context: CallbackContext):\n \"\"\"Usage: /start\"\"\"\n message = update.message\n message.reply_chat_action(ChatAction.TYPING)\n message.reply_text(_(welcome_text), ParseMode.MARKDOWN)\n\n\ndef cmd_status(update: Update, context: CallbackContext):\n \"\"\"Usage: /status url\"\"\"\n update.message.reply_chat_action(ChatAction.TYPING)\n\n url = context.user_data['url']\n\n update.message.reply_text(\n text=info.status(url),\n reply_markup=reply_markup(),\n parse_mode=ParseMode.MARKDOWN)\n\n logging.info(\"/status %s online\" % url)\n\n\ndef cmd_players(update: Update, context: CallbackContext):\n \"\"\"Usage: /players url\"\"\"\n update.message.reply_chat_action(ChatAction.TYPING)\n\n url = context.user_data['url']\n\n update.message.reply_text(\n text=info.players(url),\n reply_markup=reply_markup(),\n parse_mode=ParseMode.MARKDOWN)\n\n logging.info(\"/players %s online\" % url)\n\n\n# ==========================\n# CallBacks\n# ==========================\n\ndef cb_status(update: Update, context: CallbackContext):\n update.callback_query.message.edit_text(\n text=info.status(context.user_data['url']),\n reply_markup=reply_markup(),\n parse_mode=ParseMode.MARKDOWN\n )\n\n\ndef cb_players(update: Update, context: CallbackContext):\n update.callback_query.message.edit_text(\n text=info.players(context.user_data['url']),\n reply_markup=reply_markup(),\n parse_mode=ParseMode.MARKDOWN\n )\n update.callback_query.edit_message_text\n\n \ndef cb_about(update: Update, context: CallbackContext):\n update.callback_query.message.edit_text(\n text=_(welcome_text),\n reply_markup=reply_markup(),\n parse_mode=ParseMode.MARKDOWN\n )\n\n# ==========================\n# Before all handlers\n# ==========================\n\nfrom os import getenv\nimport gettext\ndomain = getenv(\"HEROKU_APP_NAME\")\nlang_code = None\n\ndef check_database(update : Update, context: CallbackContext):\n user_data = context.user_data\n\n try:\n user_data['url']\n\n except KeyError:\n user_data['url'] = data(update.effective_user.id)\n \n global lang_code\n lc = update.effective_user.language_code\n \n user_data['lang'] = (\n lc \n if lc and gettext.find(domain, \"locale\", [lc])\n else 'en'\n )\n \n if lang_code != user_data['lang']:\n lang_code = user_data['lang']\n \n lang = gettext.translation(domain, \"locale\", [lang_code])\n lang.install(['ngettext'])\n\n logging.info(update_object_type(update).__class__.__name__ + \" recieved from \" + name_and_id(update.effective_user))\n \n# ==========================\n# Error handler\n# ==========================\n\nfrom socket import timeout as TimeoutError\nfrom telegram.error import BadRequest\n\ndef error_handler(update : Update, context : CallbackContext):\n if type(context.error) is TimeoutError:\n logging.error(\"Timeout error\")\n else:\n logging.exception(context.error)\n\n if type(context.error) is BadRequest: return\n\n if update.inline_query:\n error.inline(context.bot, update.inline_query.id)\n elif update.message:\n error.message(context.bot, update.message.chat_id, context.user_data['url'])\n elif update.callback_query:\n error.callback(context.bot, update.callback_query.message, context.user_data['url'])\n","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"335780796","text":"class Solution:\n def longestCommonPrefix(self, strs):\n if len(strs) == 0:\n return \"\"\n j = len(strs[0])\n # find the shortest word\n for word in strs:\n if len(word) < j:\n j = len(word)\n for i in range(0, j):\n for w in range(0, len(strs)-1):\n if strs[w][i] != strs[w+1][i]:\n return strs[0][:i]\n return strs[0][:j]\n","sub_path":"0. Easy/0014. Longest Common Prefix/longest_prefix.py","file_name":"longest_prefix.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"222585734","text":"#This is a List of Items program.\n\n#add list\ndef addToInventory(inventory, addedItems):\n\n for i in range(len(addedItems)):\n inventory.setdefault(addedItems[i],0)\n inventory[addedItems[i]] += 1\n\n return inventory\n \n#print List\ndef displayInventory(inventory):\n print('Inventory:')\n item_total = 0\n for k,v in inventory.items():\n print(str(v) + ' ' + k)\n item_total += v\n print('\\nTotal number of items: ' + str(item_total))\n\ndragonLoot = ['gold coin','dagger','gold coin','gold coin','ruby']\ninv = {'gold coin':42, 'rope':1}\ninv = addToInventory(inv,dragonLoot)\ndisplayInventory(inv)","sub_path":"PrintListItems.py","file_name":"PrintListItems.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"132903587","text":"import shodan\nimport time\nimport pickle\nfrom tqdm import tqdm\n\napi = shodan.Shodan('APIKEY')\n\nip_list = []\nprocessed_ips = set()\n\ntry:\n with open(\"processed_ips\", \"rb\") as fp: # for handling many IPs, this way processed IPs will be stored and reloaded\n processed_ips = pickle.load(fp)\nexcept FileNotFoundError:\n print(\"There aren't any stored IPs.\")\n\nwith open(\"ip_list\", \"r\", encoding=\"utf-8\") as fp: # IP list should be provided here each for each line\n for line in fp:\n ip_list.append(line.split(\" \")[1].strip())\n\nwith open(\"output_file\", \"a\", encoding=\"UTF-8\") as fp: # output file\n for ip in tqdm(ip_list):\n if ip in processed_ips:\n continue\n try:\n info = api.host(ip)\n time.sleep(5)\n # parsed = json.dumps(info)\n # parsed = json.loads(parsed)\n # print(json.dumps(parsed, indent=4, sort_keys=True))\n try:\n vulnerabilities = info[\"vulns\"]\n except KeyError as e:\n print(\"No vulns for {}\".format(info[\"ip_str\"]))\n vulnerabilities = [\"None\"]\n try:\n asn = info[\"asn\"]\n except KeyError as e:\n asn = \"none\"\n fp.write(\"{},{},{},{},{}\\n\".format(info[\"ip_str\"],\n info[\"os\"],\n asn,\n \" \".join(str(port) for port in info[\"ports\"]),\n \" \".join(vulnerabilities)))\n with open(\"processed_ips\", \"wb\") as pfp:\n processed_ips.add(ip)\n pickle.dump(processed_ips, pfp)\n except shodan.exception.APIError as e:\n time.sleep(5)\n with open(\"processed_ips\", \"wb\") as pfp:\n processed_ips.add(ip)\n pickle.dump(processed_ips, pfp)\n print(\"For IP:\" + ip + \" this error is produced:\" + str(e))\n","sub_path":"shodan_bulk_search.py","file_name":"shodan_bulk_search.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"365255306","text":"import requests\nimport re\nimport sys\n\n#payload for post request in requests.post\npayload = {\n 'sessionID':'0',\n 'requestID':'0',\n 'type_origin':'any',\n #from \"Mozarthaus/Kolping\n 'name_origin': sys.argv[1],\n 'type_destination':'any',\n #to \"Königsplatz\"\n 'name_destination':'101',\n 'maxChanges':'0'\n}\n\n#function to remove all HTML from the o\ndef a(test_str):\n ret = ''\n skip1c = 0\n skip2c = 0\n for i in test_str:\n if i == '<':\n skip1c += 1\n elif i == '(':\n skip2c += 1\n elif i == '>' and skip1c > 0:\n skip1c -= 1\n elif i == ')'and skip2c > 0:\n skip2c -= 1\n elif skip1c == 0 and skip2c == 0:\n ret += i\n return ret\n\n#post request gets result in a html page\nr = requests.post('https://efa.avv-augsburg.de/stawa2/XSLT_TRIP_REQUEST2', data=payload)\nr = r.text\n\n#applies function to remove all html tags\no = a(r)\n\ndetpos = o.find('Detail')\n\nfahrten = []\neinsfahrpos = o.find('1. Fahrt',detpos)\nfahrten.append(o[einsfahrpos+8:einsfahrpos+14])\nzweifahrpos = o.find('2. Fahrt',detpos)\nfahrten.append(o[zweifahrpos+8:zweifahrpos+14])\ndreifahrpos = o.find('3. Fahrt',detpos)\nfahrten.append(o[dreifahrpos+8:dreifahrpos+14])\nvierfahrpos = o.find('4. Fahrt',detpos)\nfahrten.append(o[vierfahrpos+8:vierfahrpos+14])\n\nprint (fahrten[0])\nprint (fahrten[1])\nprint (fahrten[2])\nprint (fahrten[3])\nprint ('--------')\nprint (o)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"186558754","text":"# model settings\n_base_ = [\n \"htc_without_semantic_r50_fpn_1x_coco.py\",\n \"../../helper/dataset.py\",\n \"../../helper/runtime.py\",\n \"../../helper/schedule.py\",\n]\n\n# backbone\npretrained = (\n \"https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth\" # noqa\n)\nmodel = dict(\n backbone=dict(\n _delete_=True,\n type=\"SwinTransformer\",\n pretrain_img_size=384,\n embed_dims=192,\n depths=[2, 2, 18, 2],\n num_heads=[6, 12, 24, 48],\n window_size=12,\n mlp_ratio=4,\n qkv_bias=True,\n qk_scale=None,\n drop_rate=0.0,\n attn_drop_rate=0.0,\n drop_path_rate=0.2,\n patch_norm=True,\n out_indices=(0, 1, 2, 3),\n with_cp=False,\n convert_weights=True,\n init_cfg=dict(type=\"Pretrained\", checkpoint=pretrained),\n ),\n neck=dict(in_channels=[192, 384, 768, 1536]),\n)\n\n\n# work_dir, wandb exp name\nexp = \"htc_swin_l\"\nwork_dir = f\"./work_dirs/{exp}\"\n\n# Wandb Log\nlog_config = dict(\n hooks=[\n dict(type=\"TextLoggerHook\"),\n dict(\n type=\"WandbLoggerHook\",\n init_kwargs=dict(\n project=\"object-detection-recycling-trash\", entity=\"boostcamp-2th-cv-02team\", name=f\"{exp}\"\n ),\n ),\n ]\n)\n\nlr_config = dict(step=[8, 12])\nrunner = dict(type=\"EpochBasedRunner\", max_epochs=15)\n","sub_path":"template/mmdetection/configs/custom/models/htc/htc_swin_l.py","file_name":"htc_swin_l.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"261072570","text":"# 에라토스테네스의 체\n#\n# 비트마스크(2권 597p) 이용\n#\nfrom math import sqrt\n\nM, N = map(int, input().split())\nnumbers = [i for i in range(N + 1)]\nsieve = [1] * int((N + 7) / 8 + 1)\n\n\ndef isPrime(k):\n global sieve\n return sieve[k >> 3] & (1 << (k & 7))\n\n\ndef setComposite(k):\n global sieve\n sieve[k >> 3] &= ~(1 << (k & 7)) % 256\n\n\ndef eratosthenes():\n global numbers\n for i in range(2, int(sqrt(N)) + 1):\n if isPrime(i):\n for j in range(i * i, N + 1, i):\n setComposite(j)\n\n\neratosthenes()\nfor i in range(16):\n print(isPrime(i))\nprint(sieve)\n","sub_path":"SsangWoo/python/baekjoon/1929.py","file_name":"1929.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"105880255","text":"import re\nimport string\n\nwith open(\"../../../data/2018/5/data.txt\") as f:\n x = f.read().strip()\n\nrepat = '|'.join([''.join(i) for i in zip(list(string.ascii_uppercase), list(string.ascii_lowercase))] + [''.join(i) for i in zip(list(string.ascii_lowercase), list(string.ascii_uppercase))])\npattern = re.compile(repat)\nlengths = []\nfor letter in string.ascii_uppercase:\n y = re.sub(\"%s|%s\" % (letter, letter.lower()), \"\", x)\n while True:\n if pattern.findall(y):\n y = pattern.sub(\"\", y)\n else:\n lengths.append(len(y))\n break\n\nprint(min(lengths))\n","sub_path":"python/2018/5/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"311401122","text":"expected_output = {\n \"trustpoints\": {\n \"CISCO_IDEVID_SUDI\": {\n \"associated_trustpoints\": {\n \"certificate\": {\n \"status\": \"Available\",\n \"serial_number_in_hex\": \"793B572700000003750B\",\n \"subject\": {\n \"name\": \"WS-C3850-24P-0057D21BC800\",\n \"pid\": \"WS-C3850-24P\",\n \"cn\": \"WS-C3850-24P-0057D21BC800\",\n \"serial_number\": \"FCW1947C0GF\",\n },\n \"issuer\": {\"cn\": \"Cisco Manufacturing CA SHA2\", \"o\": \"Cisco\"},\n \"crl_distribution_points\": \"http://www.cisco.com/security/pki/crl/cmca2.crl\",\n \"usage\": \"General Purpose\",\n \"validity_date\": {\n \"start_date\": \"00:34:52 UTC Nov 20 2015\",\n \"end_date\": \"00:44:52 UTC Nov 20 2025\",\n },\n },\n \"ca_certificate\": {\n \"status\": \"Available\",\n \"serial_number_in_hex\": \"02\",\n \"subject\": {\"cn\": \"Cisco Manufacturing CA SHA2\", \"o\": \"Cisco\"},\n \"issuer\": {\"cn\": \"Cisco Root CA M2\", \"o\": \"Cisco\"},\n \"crl_distribution_points\": \"http://www.cisco.com/security/pki/crl/crcam2.crl\",\n \"usage\": \"Signature\",\n \"validity_date\": {\n \"start_date\": \"13:50:58 UTC Nov 12 2012\",\n \"end_date\": \"13:00:17 UTC Nov 12 2037\",\n },\n },\n }\n }\n }\n}\n","sub_path":"src/genie/libs/parser/iosxe/tests/ShowCryptoPkiCertificates/cli/equal/golden_output_c3850_expected.py","file_name":"golden_output_c3850_expected.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"269024896","text":"import numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef main():\n\n\tN = 500\n\n\tnpzfiles = ['Q_output.npz','SARSA_output.npz','NN_output.npz','Optimal_output.npz','Random_output.npz']\n\tlegend_labels = ['Q Learning', 'SARSA', 'NN Q Learning', 'Optimal Strategy','Random Agent']\n\n\tfig1, ax1 = plt.subplots()\n\tfor ind,npzfile in enumerate(npzfiles):\n\t\tprint(npzfile)\n\n\t\tnpzdict = np.load('output/'+npzfile)\n\t\trewards = npzdict['rewards']\n\n\t\titerations = npzdict['iterations']\n\t\titerations_vec = np.array(range(N,iterations+1))\n\n\t\trunning_mean_reward = running_mean(x=rewards,N=N)\n\t\tax1.plot(iterations_vec, running_mean_reward, label=str(legend_labels[ind]))\n\n\tax1.grid()\n\tax1.set(xlabel = 'Iteration', ylabel = 'Mean reward over last 500 iterations', title='Various Agents - OpenAI Gym Nchain ', xlim=(0,iterations))\n\tax1.legend()\n\tfig1.tight_layout()\n\tfig1.savefig(\"output/reward.png\", dpi = 300)\n\ndef running_mean(x, N):\n\t#x is data, N is window size.\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"nchain/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"587620999","text":"import glob\r\nimport pandas as pd\r\nimport chardet\r\nimport codecs\r\nimport sys,xlrd\r\nfrom xlrd import open_workbook\r\nimport os.path\r\n\r\nvar = pd.DataFrame\r\nmyvar = pd.DataFrame\r\nrootDir = 'C:/Users/vinicius.dourado.MPT/Dropbox/tabnet/spabr/'\r\nfor dirName, subdirList, fileList in os.walk(rootDir, topdown=False):\r\n print('Found directory: %s' % dirName)\r\n count = 1\r\n for fname in fileList:\r\n filepath = os.path.join(rootDir, dirName, fname)\r\n myvar = pd.read_csv(filepath, sep=\";\")\r\n if (count == 1):\r\n var = myvar\r\n else:\r\n var = pd.concat([var, myvar], axis=0) \r\n count +=1\r\n \r\nvar = var[~(var['Municipio'].str.contains('Total|&'))]\r\n\r\nvar[\"cod_municipio\"], var[\"desc_municipio\"] = zip(*var[\"Municipio\"].str.split(' ',1).tolist())\r\ndel var['Municipio']\r\nvar[\"cod_municipio\"] = var.cod_municipio.astype(int)\r\n\r\nvar[\"mes\"], var[\"ano\"] = zip(*var[\"periodo\"].str.split('_',1).tolist())\r\ndel var['periodo']\r\n\r\nvar['Quantidade aprovada'] = var['Quantidade aprovada'].replace(['-'], '0')\r\nvar['Valor aprovado'] = var['Valor aprovado'].replace(['-'], '0')\r\nvar\r\n\r\nvar.to_csv('C:/Users/vinicius.dourado.MPT/Dropbox/tabnet/spabr/Dados detalhados das AIH - por local internação - Brasil_2008emdiante.csv',sep=\";\" , columns=[\"cod_municipio\",\"desc_municipio\",\"Quantidade aprovada\",\"Valor aprovado\",\"mes\",\"ano\"], index=False)\r\n\r\n\r\n","sub_path":"agragacaodoscsv.py","file_name":"agragacaodoscsv.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"336300276","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date: 2018/8/2\n\n\"\"\"\n 阿里 api 调用出口\n\n 封装更易用的api\n\"\"\"\n\nfrom .pay.basic import AliPay\nfrom .yun.basic import AliYunClient\n\nfrom .config import Config\n\n\nclass AliApi(object):\n\n config_class = Config\n\n default_config = {\n \"DEBUG\": True,\n \"CLOUD\": {\n \"APP_ID\": None,\n \"SECRET\": None\n },\n \"PAY\": {\n \"APP_ID\": None,\n \"PUBLIC_KEY_PATH\": None,\n \"PRIVATE_KEY_PATH\": None,\n \"NOTIFY_URL\": None,\n \"RETURN_URL\": None\n }\n }\n\n def __init__(self, ):\n self.config = self.make_config()\n\n def make_config(self):\n defaults = dict(self.default_config)\n return self.config_class(defaults)\n\n @property\n def pay(self):\n\n pay_config = self.config[\"PAY\"]\n app_id = pay_config[\"APP_ID\"]\n public_key_path = pay_config[\"PUBLIC_KEY_PATH\"]\n private_key_path = pay_config[\"PRIVATE_KEY_PATH\"]\n notify_url = pay_config[\"NOTIFY_URL\"]\n return_url = pay_config[\"RETURN_URL\"]\n\n return AliPay(\n app_id=app_id, public_key_path=public_key_path,\n private_key_path=private_key_path, notify_url=notify_url,\n return_url=return_url, debug=self.config[\"DEBUG\"]\n )\n\n @property\n def yun(self):\n\n cloud_config = self.config[\"CLOUD\"]\n app_id = cloud_config[\"APP_ID\"]\n secret = cloud_config[\"SECRET\"]\n\n return AliYunClient(\n app_id=app_id, secret=secret\n )\n","sub_path":"aliopenapi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"18316662","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import (ListView, DetailView, View, TemplateView)\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.conf import settings\nfrom .models import Group, Membership, Post\nfrom .forms import GroupForm, PostForm\nfrom django.utils import timezone\nfrom django.template.response import TemplateResponse\n\n\nimport pdb\n\n\ndef home_page(request):\n return render(request, template_name='homepage.html')\n\ndef user_login(request):\n if request.method == \"GET\":\n return render(request, template_name='login.html')\n elif request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n try:\n user = User.objects.get(username=username)\n if user.check_password(password):\n login(request, user)\n return HttpResponseRedirect('/')\n else:\n return render(request, 'login.html', {'error': settings.LOGIN_ERROR_MESSAGE}, status=401)\n # return HttpResponse(settings.LOGIN_ERROR_MESSAGE, status=401)\n # return TemplateResponse(request, 'login.html',\n # context={'error': settings.LOGIN_ERROR_MESSAGE},\n # status=400)\n except User.DoesNotExist:\n return HttpResponse(settings.LOGIN_ERROR_MESSAGE, status=401)\n\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect('/')\n\ndef user_registration(request):\n if request.method == \"GET\":\n return render(request, template_name='registration.html')\n elif request.method == \"POST\":\n username = request.POST.get('username')\n email = request.POST.get('email')\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n try:\n user = User.objects.get(username=username)\n return HttpResponse(settings.REGISTRATION_USER_EXISTS_ERROR_MESSAGE,\n status=400)\n except User.DoesNotExist:\n if password1 == password2:\n user = User.objects.create_user(username, email)\n user.set_password(password1)\n user.save()\n return HttpResponseRedirect('/')\n else:\n return HttpResponse(settings.REGISTRATION_PASSWORDS_ERROR_MESSAGE,\n status=400)\n\n\nclass GroupsList(LoginRequiredMixin, ListView):\n\n model = Group\n template_name = 'groups/groups_list.html'\n\n\nclass GroupPage(LoginRequiredMixin, TemplateView):\n\n def get(self, request, group_id):\n template_name = 'groups/group_info.html'\n group = get_object_or_404(Group, pk=group_id)\n post_list = Post.objects.filter(group=group_id).values()\n posts = [elem for elem in post_list]\n user = request.user\n is_member = self.is_member(user, group)\n is_creator = self.is_creator(user.pk, group_id)\n return render(request, template_name, {'is_member': is_member,\n 'is_creator': is_creator,\n 'group': group,\n 'posts': posts})\n\n def post(self, request, group_id):\n group = get_object_or_404(Group, pk=group_id)\n user = request.user\n is_member = self.is_member(user, group)\n if is_member:\n membership = Membership.objects.get(group=group, user=user)\n membership.delete()\n else:\n Membership.objects.create(group=group, user=user,\n date_joined=timezone.now())\n return HttpResponseRedirect(f'/groups/{group_id}/')\n\n\n @staticmethod\n def is_member(user, group):\n try:\n Membership.objects.get(group=group, user=user)\n is_member = True\n except Membership.DoesNotExist:\n is_member = False\n return is_member\n\n @staticmethod\n def is_creator(user_id, group_id):\n user = User.objects.get(pk=user_id)\n group = Group.objects.get(pk=group_id)\n creator = group.creator\n if creator.pk == user.pk:\n return True\n return False\n\n\nclass GroupCreate(LoginRequiredMixin, TemplateView):\n\n def get(self, request):\n template_name = \"groups/group_create.html\"\n form = GroupForm()\n return render(request, template_name, {'form': form})\n\n def post(self, request):\n\n name = request.POST.get('name')\n theme = request.POST.get('theme')\n is_private = request.POST.get('private')\n\n creator = request.user\n data = {'name': name,\n 'theme': theme,\n 'creator': creator.pk}\n form = GroupForm(data)\n if form.is_valid():\n group = form.instance\n if is_private is None:\n group.save()\n else:\n group.is_private = True\n group.save()\n membership = Membership(user=creator, group=group, date_joined=timezone.now())\n membership.save()\n return HttpResponseRedirect('/groups/')\n return HttpResponse(\"Data is not valid\", status=400)\n\n\nclass GroupUpdate(LoginRequiredMixin, TemplateView):\n\n def get(self, request, group_id):\n group = Group.objects.get(pk=group_id)\n form = GroupForm(instance=group)\n user = request.user\n template_name = \"groups/group_form.html\"\n if GroupPage.is_creator(user.pk, group_id):\n return render(request, template_name, {'form': form})\n return render(request, template_name, {'form': form,\n 'error': \"Only creator is allowed to update the group\"})\n\n def post(self, request, group_id):\n user = request.user\n group = get_object_or_404(Group, id=group_id)\n if GroupPage.is_creator(user.pk, group_id):\n name = request.POST.get('name')\n theme = request.POST.get('theme')\n data = {'name': name, 'theme': theme, 'creator': group.creator.pk}\n form = GroupForm(data=data, instance=group)\n if form.is_valid():\n form.save()\n # return render(request, \"groups/group_info.html\")\n return HttpResponseRedirect(f'/groups/{group_id}/')\n return render(request, \"groups/group_form.html\", {'error': \"Only creator is allowed to update the group\"})\n\n\nclass GroupDelete(LoginRequiredMixin, TemplateView):\n template_name = 'groups/group_form.html'\n\n def post(self, request, group_id):\n group = get_object_or_404(Group, pk=group_id)\n try:\n group.delete()\n return HttpResponseRedirect('/groups/')\n except:\n return HttpResponse(\"Couldn't delete\", status=400)\n\n\n@login_required\ndef invite(request, group_id):\n if request.method == \"POST\":\n user_current = request.user\n username = request.POST.get('invited_user')\n group = get_object_or_404(Group, pk=group_id)\n is_creator = GroupPage.is_creator(user_current.pk, group.pk)\n data = {'group': group,\n 'is_member': True,\n 'is_creator': is_creator}\n try:\n user = User.objects.get(username=username)\n try:\n member = Membership.objects.get(user=user, group=group)\n return render(request, 'groups/group_info.html',\n {**data,'message': \"User is a member already\"})\n except Membership.DoesNotExist:\n membership = Membership(user=user, group=group, date_joined=timezone.now())\n membership.save()\n return render(request, 'groups/group_info.html', {**data,'message': \"User was invited\"} )\n\n except User.DoesNotExist:\n return render(request, 'groups/group_info.html', {**data, 'message': \"User doesn't exist\",})\n\n\n\n# class PostPage(LoginRequiredMixin, TemplateView):\n#\n# def get(self, request, **args):\n# template_name = 'posts/posts_list.html'\n#\n#\n\nclass PostsList(LoginRequiredMixin, ListView):\n\n model = Post\n template_name = 'posts/posts_list.html'\n\nclass PostInfo(LoginRequiredMixin, DetailView):\n\n model = Post\n template_name = 'posts/post_info.html'\n\nclass PostCreate(LoginRequiredMixin, TemplateView):\n\n def get(self, request, group_id):\n template_name = 'posts/post_create.html'\n form = PostForm()\n creator = request.user\n group = get_object_or_404(Group, pk=group_id)\n if GroupPage.is_member(creator, group):\n return render(request, template_name, {'form': form})\n return render(request, 'posts/post_create.html',\n {'error': \"You must join group to create posts\"})\n\n def post(self, request, group_id):\n title = request.POST.get('title')\n text = request.POST.get('text')\n creator = request.user\n publish = request.POST.get('publish')\n group = get_object_or_404(Group, pk=group_id)\n if GroupPage.is_member(creator, group):\n data = {'title': title,\n 'text': text,\n 'creator': creator.pk,\n 'group': group.pk}\n form = PostForm(data)\n if form.is_valid():\n post = form.instance\n if publish is None:\n post.date_created = timezone.now()\n post.save()\n if group.is_private == True:\n post.is_private = True\n post.save()\n return HttpResponseRedirect('/posts/')\n return render(request, 'posts/post_create.html', {'error': \"You must join group to create posts\"})\n\nclass PostUpdate(LoginRequiredMixin, TemplateView):\n\n @staticmethod\n def is_creator(user_id, post_id):\n user = User.objects.get(pk=user_id)\n post = Post.objects.get(pk=post_id)\n creator = post.creator\n if creator.pk == user.pk:\n return True\n return False\n\n def get(self, request, post_id):\n user_id = request.user.pk\n template_name = 'posts/post_form.html'\n post = get_object_or_404(Post, pk=post_id)\n form = PostForm(instance=post)\n if self.is_creator(user_id, post_id):\n return render(request, template_name, {'form': form})\n else:\n return render(request, template_name, {'form': form, 'error':\"Only creator is allowed to update the group\" })\n\n\n def post(self, request, post_id):\n user_id = request.user.pk\n if self.is_creator(user_id, post_id):\n title = request.POST.get('title')\n text = request.POST.get('text')\n post = get_object_or_404(Post, pk=post_id)\n data = {'title': title, 'text': text, 'creator': post.creator.pk,\n 'group': post.group.pk}\n form = PostForm(data=data, instance=post)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(f'/posts/{post_id}')\n return render(request, 'posts/post_form.html', {'error': \"Only creator is allowed to update the group\"})\n\nclass PostDelete(LoginRequiredMixin, TemplateView):\n\n def post(self, request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n try:\n post.delete()\n return HttpResponseRedirect('/posts/')\n except:\n return HttpResponse(\"Couldn't delete\", status=400)\n\n\nclass DraftsList(LoginRequiredMixin, ListView):\n\n def get(self, request):\n user = request.user\n posts = Post.objects.filter(creator=user, date_created=None).values()\n drafts = [elem for elem in posts]\n template_name = 'posts/drafts_list.html'\n return render(request, template_name, {'drafts': drafts})\n\n\n@login_required\ndef publish(request, draft_id):\n if request.method == \"POST\":\n post = Post.objects.get(pk=draft_id)\n post.date_created = timezone.now()\n post.save()\n return HttpResponseRedirect('/drafts/')\n\n\n\n\n\n\n\n\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"548670481","text":"from core.test_ft import Fine_tuning as test_ft\nfrom scipy import misc\nimport numpy as np\n\nfile_name = 'cman'\nfile_name_clean = file_name + '_clean.png'\nfile_path = './data/'\n\nnoise_mean = 0\nnoise_sigma = 30\n\nif noise_sigma not in [15, 25, 30, 50, 75]:\n print ('No weight file')\n exit()\n \nclean_image = misc.imread(file_path + file_name_clean)\nnoisy_image = clean_image + np.random.normal(noise_mean, noise_sigma, clean_image.shape)\n\nt_ft = test_ft(clean_image, noisy_image, noise_sigma)\ndenoised_img, psnr, ssim = t_ft.fine_tuning()\n\nmisc.imsave(file_name +'_denoised_ft.png', denoised_img)\n\nprint ('PSNR : ' + str(round(psnr,2)) + '\\nSSIM : ' + str(round(ssim,4)))","sub_path":"test_fc_aide_ft.py","file_name":"test_fc_aide_ft.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"142581384","text":"# -*- coding: utf-8 -*-\nimport json\nimport traceback\n\nimport requests\n\n\nclass WikiDataItem:\n def __init__(self, wd_item_id, init_at_once=True):\n self.wd_item_id = wd_item_id\n self.property_name_list = None\n self.data_dict = None\n self.relation_property_name_list = None\n self.source_wd_dict_json = None\n self.is_init = False\n if init_at_once:\n self.init_wikidata_item(self.wd_item_id)\n\n def get_en_name(self):\n if self.source_wd_dict_json:\n if 'labels' in self.source_wd_dict_json.keys() and \"en\" in self.source_wd_dict_json['labels'].keys():\n return self.source_wd_dict_json['labels'][\"en\"][\"value\"]\n return None\n\n @staticmethod\n def __merge_value_from_dict_list(dict_list, value_key='value'):\n values = []\n for d in dict_list:\n if value_key in d:\n values.append(d[value_key])\n # return ',,'.join(values)\n return values\n\n def __parse_wd_json_to_dict(self, wd_item_json):\n wd_item_property_dict = {}\n self.__extract_muilti_language_property(wd_item_json, 'aliases', wd_item_property_dict)\n self.extract_muilti_language_property_from_dict(wd_item_json, 'labels', wd_item_property_dict)\n self.extract_muilti_language_property_from_dict(wd_item_json, 'descriptions', wd_item_property_dict)\n\n self.__extract_sitelinks(wd_item_json, wd_item_property_dict)\n property_name_list, relation_property_name_list = WikiDataItem.__extract_claims(wd_item_json=wd_item_json,\n wd_item_property_dict=wd_item_property_dict)\n if 'labels_en' in wd_item_property_dict:\n wd_item_property_dict['name'] = wd_item_property_dict['labels_en']\n return wd_item_property_dict, property_name_list, relation_property_name_list\n\n @staticmethod\n def __extract_metadata(entity_metadata):\n metadata_dict = {}\n metadata_dict['wd_item_id'] = entity_metadata['id']\n metadata_dict['lastrevid'] = entity_metadata['lastrevid']\n metadata_dict['modified'] = entity_metadata['modified']\n return metadata_dict\n\n @staticmethod\n def __extract_sitelinks(wd_item_json, wd_item_property_dict):\n if 'sitelinks' in wd_item_json.keys():\n sitelinks = wd_item_json['sitelinks']\n if not sitelinks:\n return\n for k, v in sitelinks.items():\n k_title = \"site:\" + k\n wd_item_property_dict[k_title] = v['url']\n\n @staticmethod\n def __extract_claims(wd_item_json, wd_item_property_dict):\n claims_dict = wd_item_json['claims']\n if not claims_dict:\n return [], []\n property_name_list = []\n relation_property_name_list = []\n for k, v in claims_dict.items():\n property_name_list.append(k)\n wd_item_property_dict[k] = WikiDataItem.__extract_claims_property_from_value_list(v)\n if v and WikiDataItem.is_property_relation(v[0]):\n relation_property_name_list.append(k)\n return property_name_list, relation_property_name_list\n\n @staticmethod\n def __extract_claims_property_from_value_list(value_list):\n extract_values = []\n for value in value_list:\n t = WikiDataItem.__extract_claims_property_item_from_value_list(value)\n if t:\n extract_values.append(t)\n # return ',,'.join(extract_values)\n return extract_values\n\n @staticmethod\n def is_property_relation(value_dict):\n try:\n mainsnak = value_dict['mainsnak']\n if mainsnak['snaktype'] == 'novalue' or mainsnak['snaktype'] == 'somevalue':\n return False\n data_type = mainsnak['datatype']\n if data_type == 'wikibase-item':\n return True\n else:\n return False\n except Exception as e:\n traceback.print_exc()\n return False\n\n @staticmethod\n def __extract_claims_property_item_from_value_list(value_dict):\n mainsnak = value_dict['mainsnak']\n if mainsnak['snaktype'] == 'novalue' or mainsnak['snaktype'] == 'somevalue':\n return None\n data_type = mainsnak['datatype']\n value = ''\n\n string_value_type = ['string', 'external-id', 'math', 'url', 'commonsMedia', 'geo-shape']\n\n if data_type in string_value_type:\n value = mainsnak['datavalue']['value']\n if data_type == 'wikibase-item':\n value = mainsnak['datavalue']['value']['id']\n if data_type == 'wikibase-property':\n value = 'P' + str(mainsnak['datavalue']['value']['numeric-id'])\n if data_type == 'time':\n value = mainsnak['datavalue']['value']['time']\n if data_type == 'monolingualtext':\n value = mainsnak['datavalue']['value']['text']\n if data_type == 'quantity':\n value = mainsnak['datavalue']['value']['amount']\n if data_type == 'globe-coordinate':\n value = str(mainsnak['datavalue']['value']['latitude']) + ',' + str(\n mainsnak['datavalue']['value']['longitude'])\n\n return value\n\n def __extract_muilti_language_property(self, wd_item_json, key, wd_item_property_dict):\n language_dict = wd_item_json[key]\n if not language_dict:\n return\n for k, v in language_dict.items():\n try:\n full_property_name = key + '_' + k\n wd_item_property_dict[full_property_name] = self.__merge_value_from_dict_list(v)\n except Exception as error:\n print(error)\n\n @staticmethod\n def extract_muilti_language_property_from_dict(wd_item_json, key, wd_item_property_dict):\n language_dict = wd_item_json[key]\n if not language_dict:\n return\n for k, v in language_dict.items():\n try:\n full_property_name = key + '_' + k\n wd_item_property_dict[full_property_name] = v['value']\n except Exception as error:\n print(error)\n\n def init_wikidata_item_from_json_string(self, data_json_string):\n try:\n\n dict_json = self.parse_illegal_json_string(data_json_string)\n\n self.source_wd_dict_json = dict_json\n self.wd_item_id = dict_json[\"id\"]\n self.__parse()\n self.is_init = True\n return self\n except Exception as error:\n traceback.print_exc()\n self.source_wd_dict_json = None\n self.is_init = False\n return self\n\n def init_wikidata_item(self, wd_item_id):\n try:\n r = requests.get(\n \"https://www.wikidata.org/wiki/Special:EntityData/{wd_item_id}.json\".format(wd_item_id=wd_item_id))\n json_response = r.content.decode(encoding='utf-8')\n dict_json = json.loads(json_response, encoding='utf-8')\n self.source_wd_dict_json = dict_json[\"entities\"][wd_item_id]\n self.wd_item_id = wd_item_id\n self.__parse()\n self.is_init = True\n return self\n except Exception as error:\n traceback.print_exc()\n self.source_wd_dict_json = None\n self.is_init = False\n return self\n\n def init_wikidata_item_from_wikipedia_title(self, wikipedia_title):\n try:\n r = requests.get(\n \"https://en.wikipedia.org/w/api.php?action=query&prop=pageprops&ppprop=wikibase_item&redirects=1&format=json&titles=\" + wikipedia_title)\n\n json_response = r.content.decode(encoding='utf-8')\n query_json = json.loads(json_response, encoding='utf-8')\n wd_item_id = None\n pages = query_json[\"query\"][\"pages\"]\n for page_index, page in pages.items():\n if page_index == \"-1\":\n break\n wd_item_id = page[\"pageprops\"][\"wikibase_item\"]\n self.wd_item_id = wd_item_id\n if wd_item_id is None:\n self.is_init = False\n return self\n return self.init_wikidata_item(wd_item_id=wd_item_id)\n\n except Exception as error:\n traceback.print_exc()\n self.source_wd_dict_json = None\n self.is_init = False\n return self\n\n def init_wikidata_item_from_wikipedia_url(self, wikipedia_url):\n wikipedia_title = wikipedia_url.replace(\"https://en.wikipedia.org/wiki/\", \"\")\n return self.init_wikidata_item_from_wikipedia_title(wikipedia_title)\n\n def __parse_wikidata_item_to_dict(self):\n property_dict, property_name_list, relation_property_name_list = self.__parse_wd_json_to_dict(\n self.source_wd_dict_json)\n metadata_dict = self.__extract_metadata(self.source_wd_dict_json)\n\n return dict(property_dict, **metadata_dict), property_name_list, relation_property_name_list\n\n def __parse(self):\n data_dict, property_name_list, relation_property_name_list = self.__parse_wikidata_item_to_dict()\n self.data_dict = data_dict\n self.property_name_list = property_name_list\n self.relation_property_name_list = relation_property_name_list\n\n def get_wikidata_item_property_dict(self):\n return self.data_dict\n\n def get_relation_property_name_list(self):\n return self.relation_property_name_list\n\n def get_wikidata_item_property_name_list(self):\n return self.property_name_list\n\n def exist(self):\n return self.is_init\n\n def get_en_wiki_url(self):\n pass\n\n def get_en_wiki_title(self):\n try:\n if self.source_wd_dict_json:\n if \"sitelinks\" in self.source_wd_dict_json.keys() and \"enwiki\" in self.source_wd_dict_json[\n 'sitelinks'].keys():\n return self.source_wd_dict_json['sitelinks'][\"enwiki\"][\"title\"]\n except Exception:\n traceback.print_exc()\n return None\n\n @staticmethod\n def parse_illegal_json_string(data_json_string):\n\n if data_json_string == None or data_json_string == \"\":\n return None\n\n try:\n try:\n json_instance = json.loads(data_json_string, encoding='utf-8')\n return json_instance\n except Exception:\n data_json_string = json.dumps(eval(data_json_string))\n\n json_instance = json.loads(data_json_string, encoding='utf-8')\n return json_instance\n except Exception:\n traceback.print_exc()\n return None\n\n @staticmethod\n def is_valid_json_string(data_json_string):\n try:\n json_instance = WikiDataItem.parse_illegal_json_string(data_json_string)\n if json_instance == None:\n return False\n if json_instance[\"claims\"] == {}:\n return False\n return True\n except Exception:\n traceback.print_exc()\n return False\n","sub_path":"venv/Lib/site-packages/sekg/wiki/WikiDataItem.py","file_name":"WikiDataItem.py","file_ext":"py","file_size_in_byte":11106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"564746556","text":"#!/usr/bin/env python3\n\n#\n# Python script that parses a yacpm.json and downloads necessary libraries into\n# yacpkgs/ directory.\n#\n\nfrom io import TextIOWrapper\nfrom typing import Any, Union, Tuple\nfrom copy import deepcopy\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport urllib.error\nimport urllib.request\n\nYACPM_BRANCH = \"main\"\n\n# global variables (do not touch lines above [not including imports] or merge conflict will happen)\nDIR_ARG = sys.argv[1] if len(sys.argv) > 1 else None\nTOP_LEVEL_CMAKE_DIR = os.path.abspath(DIR_ARG or os.getcwd())\n\n# utility functions\n\ndef dict_try_get(value, key: str, return_val_instead: bool = False) -> Any:\n return_val = value if return_val_instead else None\n return value.get(key) if isinstance(value, dict) else return_val\n\ndef dict_get_set(dict_input: dict, key: str, set_value):\n if key not in dict_input:\n dict_input[key] = set_value\n return dict_input[key]\n\ndef get_include_list(dictionary: dict):\n array = dictionary.get(\"include\", [])\n include_list = \"\"\n for item in array:\n include_list += f' \"{item}\"'\n return include_list\n\ndef error(msg: str, print_wrapper: bool = True):\n text = f\"==== YACPM ERROR: {msg}\" if print_wrapper else msg \n print(text, file=sys.stderr)\n exit(1)\n\ndef info(msg: str, print_wrapper: bool = True):\n msg = msg.strip()\n text = f\"==== {msg}\" if print_wrapper else msg\n # normal printing doesn't update realtime with cmake\n subprocess.run(f\"\\\"{sys.executable}\\\" -c \\\"print('''{text}''')\\\"\", shell=True)\n\ndef open_read_write(filename: str, parse_json: bool = False) -> Tuple[TextIOWrapper, Any]:\n file = open(filename, \"r+\")\n content = json.load(file) if parse_json else file.read()\n file.seek(0)\n return (file, content)\n\ndef write_json(data: dict, file: TextIOWrapper):\n json.dump(data, file, indent=4)\n file.truncate()\n file.close()\n\ndef download_if_missing(path: str, outfile: str) -> bool:\n if not os.path.exists(outfile):\n if path.startswith(\"http\"):\n urllib.request.urlretrieve(path, outfile)\n else:\n file_path = os.path.join(TOP_LEVEL_CMAKE_DIR, path)\n shutil.copyfile(file_path, outfile)\n return True\n else:\n return False\n\ndef exec_shell(command: str) -> str:\n proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if proc.returncode != 0:\n error(proc.stderr.decode(\"utf-8\"), False)\n\n stdout = proc.stdout.decode(\"utf-8\")\n \n if verbose:\n info(f\"> {command}\", False)\n if stdout: \n info(stdout, False)\n\n return stdout\n\n# Main functions\n\ndef parse_package_version(package_version: str, package_repository: str) -> str:\n git_ref = package_version.replace(\"+\", \"\")\n # get default branch if no version specifed\n if git_ref == \"\":\n result = exec_shell(f\"git remote show {package_repository}\")\n git_ref = re.findall(\"(?<=HEAD branch: ).+\", result)[0]\n\n # fetch minimal info from repo with filter and depth 1 \n exec_shell(f\"git fetch --depth=1 --filter=blob:none origin {git_ref}\")\n exec_shell(\"git sparse-checkout init\")\n exec_shell(\"git checkout FETCH_HEAD\")\n\n # freeze if not starting with +\n if not package_version.startswith(\"+\"):\n rev_name = exec_shell(\"git name-rev HEAD\").strip()\n # ref-name/version is a branch\n if not rev_name.endswith(\"undefined\"):\n # get commit hash\n package_version = exec_shell(\"git rev-parse HEAD\").strip()\n # don't set default branch if it's ++\n elif not package_version.startswith(\"++\"):\n package_version = \"+\" + git_ref\n\n return package_version\n\n# returns remote that was downloaded from (if actually did download)\ndef download_package_metadata(remotes: set, package_name: str) -> Union[str, None]:\n for remote in remotes:\n if remote == \"DEFAULT_REMOTE\":\n remote = f\"https://github.com/Calbabreaker/yacpm/raw/{YACPM_BRANCH}/packages\"\n\n package_path = f\"{remote}/{package_name}\"\n try:\n did_download = download_if_missing(f\"{package_path}/yacpkg.json\", \"yacpkg.json\")\n did_download = download_if_missing(f\"{package_path}/CMakeLists.txt\", \"CMakeLists-downloaded.txt\")\n # try next remote if fail to download\n except (urllib.error.HTTPError, FileNotFoundError) as err:\n if isinstance(err, FileNotFoundError) or err.code == 404:\n continue\n else:\n raise\n\n # else return successfully\n return remote if did_download else None\n\n error(f\"{package_name} was not found on {', '.join(remotes)}!\")\n\ndef generate_cmake_variables(package_info: Union[str, dict]) -> str:\n cmake_variables = \"\"\n if isinstance(package_info, dict):\n # set cmake variables using CACHE FORCE to configure package\n for variable, value in package_info.get(\"variables\", {}).items():\n if isinstance(value, bool):\n value = \"ON\" if value else \"OFF\"\n type_str = \"BOOL\"\n elif isinstance(value, str):\n value = f'\"{value}\"'\n type_str = \"STRING\"\n else:\n error(\"{variable} needs to be a string or boolean!\")\n\n if variable == \"BUILD_SHARED_LIBS\":\n cmake_variables += f\"set({variable} {value})\\n\"\n else:\n cmake_variables += f'set({variable} {value} CACHE {type_str} \"\" FORCE)\\n'\n return cmake_variables\n\n# calc sparse checkout list and download the neccessery package sources\ndef download_package_files(yacpkg: dict, package_info: Union[dict, str], progress_print: str):\n # get lists of includes from the yacpm.json package declaration and yacpkg.json package \n # config and combines them\n sparse_checkout_list = \"\"\n sparse_checkout_list += get_include_list(yacpkg)\n if isinstance(package_info, dict):\n sparse_checkout_list += get_include_list(package_info)\n\n if yacpkg.get(\"^sparse_checkout_list\") != sparse_checkout_list:\n info(progress_print)\n exec_shell(f\"git sparse-checkout set {sparse_checkout_list}\")\n yacpkg[\"^sparse_checkout_list\"] = sparse_checkout_list\n\n# gets all packages inside a yacpm.json and put it in a combined package\n# dependencies dict to combine all the includes, variables, ect.\ndef get_package_dependencies(package_deps_combined: dict, remotes: set, name_to_dependent: dict, dependent_name: str):\n package_yacpm = json.load(open(\"yacpm.json\"))\n\n for package_name, package_info in package_yacpm[\"packages\"].items():\n package_in_combined = package_deps_combined.get(package_name)\n if not isinstance(package_in_combined, dict):\n package_in_combined = {}\n package_deps_combined[package_name] = package_in_combined\n\n dependents = package_in_combined.get(\"dependents\", [])\n if not isinstance(dependents, set):\n package_in_combined[\"dependents\"] = set(dependents)\n package_in_combined[\"dependents_left\"] = set(dependents) \n\n if not package_in_combined.get(\"version\"):\n package_in_combined[\"version\"] = dict_try_get(package_info, \"version\", True)\n\n if isinstance(package_info, dict):\n dict_get_set(package_in_combined, \"include\", []).extend(package_info.get(\"include\", []))\n variables = dict_get_set(package_in_combined, \"variables\", {})\n for key, value in package_info.get(\"variables\", {}).items():\n variables[key] = value\n\n package_in_combined[\"dependents\"].add(dependent_name)\n package_in_combined[\"dependents_left\"].discard(dependent_name)\n\n dict_get_set(name_to_dependent, package_name, []).append(dependent_name)\n\n # add only unique remotes from yacpm.json\n remotes |= set(package_yacpm.get(\"remotes\", []))\n\n# main loop that gets all package code\ndef get_packages(package_list: dict, remotes: set, package_deps_combined: dict, p_name_to_dependent: dict = None):\n package_names = p_name_to_dependent or list(package_list.keys())\n name_to_dependent = {}\n\n for i, package_name in enumerate(package_names):\n package_info = package_list[package_name]\n\n # if haven't parsed all dependents config yet\n dependents_left = dict_try_get(package_info, \"dependents_left\")\n if dependents_left and len(dependents_left) != 0:\n continue\n\n progress_indicator = f\"[{i + 1}/{len(package_names)}]\"\n\n output_dir = f\"yacpkgs/{package_name}\"\n # make the package output dir (repository dir as well for later use)\n os.makedirs(f\"{output_dir}/repository\", exist_ok=True) \n os.chdir(output_dir)\n\n package_version = dict_try_get(package_info, \"version\", True)\n package_repository = dict_try_get(package_info, \"repository\")\n specified_cmake_file = dict_try_get(package_info, \"cmake\") \n\n if specified_cmake_file != None:\n download_if_missing(specified_cmake_file, \"CMakeLists-downloaded.txt\")\n\n # if the user has specifed both the package repo and CMakeLists then we can\n # just use that instead downloading the package metadata\n if specified_cmake_file == None or package_repository == None:\n remote_used = download_package_metadata(remotes, package_name)\n if remote_used:\n info(f\"{progress_indicator} Downloaded {package_name} package metadata from {remote_used}\")\n\n if not os.path.exists(\"yacpkg.json\"):\n open(\"yacpkg.json\", \"w\").write(\"{}\")\n\n yacpkg_file, yacpkg = open_read_write(\"yacpkg.json\", True)\n\n package_repository = package_repository or yacpkg[\"repository\"]\n os.chdir(\"repository\")\n\n # initialize git repository\n if not os.path.exists(\".git\"):\n exec_shell(\"git init\")\n exec_shell(f\"git remote add origin {package_repository}\")\n yacpkg[\"^current_version\"] = None\n\n # all keys with ^ at the front was created by this script\n if yacpkg.get(\"^current_version\") != package_version:\n info(f\"{progress_indicator} Fetching {package_name}@{package_version} at {package_repository}\")\n\n # freeze package versions that use commit hashes\n package_version = parse_package_version(package_version, package_repository)\n\n if isinstance(package_info, str):\n package_list[package_name] = package_version\n else:\n package_info[\"version\"] = package_version\n\n if package_name in package_deps_combined:\n package_deps_combined[package_name][\"version\"] = package_version\n\n yacpkg[\"^current_version\"] = package_version\n yacpkg[\"^sparse_checkout_list\"] = \"\"\n\n prepend_cmake = generate_cmake_variables(package_info)\n\n cmake_lists_content = open(\"../CMakeLists-downloaded.txt\").read()\n open(\"../CMakeLists.txt\", \"w\").write(prepend_cmake + cmake_lists_content)\n\n download_print = f\"{progress_indicator} Downloading files for {package_name}\"\n if p_name_to_dependent and package_name in p_name_to_dependent:\n download_print += f\" (required by {', '.join(p_name_to_dependent[package_name])})\"\n download_package_files(yacpkg, package_info, download_print)\n write_json(yacpkg, yacpkg_file)\n\n # run potential yacpm config inside the yacpkgs config\n if \"yacpm\" in yacpkg:\n json.dump(yacpkg[\"yacpm\"], open(\"yacpm.json\", \"w\"))\n exec_shell(f\"\\\"{sys.executable}\\\" {__file__} {TOP_LEVEL_CMAKE_DIR}\")\n\n if os.path.isfile(\"yacpm.json\"):\n get_package_dependencies(package_deps_combined, remotes, name_to_dependent, package_name)\n\n os.chdir(TOP_LEVEL_CMAKE_DIR)\n\n # use package_dep_names since package_deps_combined is a combination of all\n # iteration while package_dep_names contains package names only from this iteration\n if name_to_dependent:\n info(f\"Calculating dependencies: {', '.join(name_to_dependent.keys())}\")\n get_packages(package_deps_combined, remotes, package_deps_combined, name_to_dependent)\n\ndef update_package_list_deps(dependency_packages: dict, package_list: dict, package_deps_combined: dict):\n for package_name, package_info in package_deps_combined.items():\n # remove missing dependents\n dependents = package_info[\"dependents\"]\n has_parsed_dep = isinstance(dependents, set)\n if has_parsed_dep:\n dependents.difference_update(package_info[\"dependents_left\"])\n\n # if no package depends on this package move it back to normal package list\n if len(dependents) == 0 or not has_parsed_dep:\n package_list[package_name] = dependency_packages[package_name]\n dependency_packages.pop(package_name)\n continue\n\n if package_name in package_list:\n # move package from yacpm.packages to dependency packages list\n pkg_list_pkg = package_list[package_name]\n dependency_packages[package_name] = { \"version\": pkg_list_pkg } if isinstance(pkg_list_pkg, str) else pkg_list_pkg\n package_list.pop(package_name)\n elif not isinstance(dependency_packages.get(package_name), dict):\n dependency_packages[package_name] = { \"version\": package_info[\"version\"] }\n\n dependency_packages[package_name][\"dependents\"] = list(dependents)\n\nif __name__ == \"__main__\":\n # load yacpm.json\n yacpm_file, yacpm = open_read_write(\"yacpm.json\", True)\n verbose = yacpm.get(\"verbose\")\n \n package_list = yacpm[\"packages\"]\n if not isinstance(package_list, dict):\n error(\"Expected yacpm.json to have a packages field that is an object!\")\n\n if not os.path.isdir(\"yacpkgs\"):\n os.mkdir(\"yacpkgs\")\n\n all_package_names = list(package_list.keys())\n\n # only do if is top level yacpm or if the top level yacpm.json doesn't exist\n # in order to handle multiple packages using the same package\n if TOP_LEVEL_CMAKE_DIR == os.getcwd() or not os.path.isfile(f\"{TOP_LEVEL_CMAKE_DIR}/yacpm.json\"):\n remotes = set(yacpm.get(\"remotes\", [\"DEFAULT_REMOTE\"]))\n dependency_packages = yacpm.get(\"dependency_packages\", {})\n package_deps_combined = deepcopy(dependency_packages)\n get_packages(package_list, remotes, package_deps_combined)\n\n if package_deps_combined:\n update_package_list_deps(dependency_packages, package_list, package_deps_combined)\n all_package_names.extend(dependency_packages.keys())\n if \"dependency_packages\" not in yacpm:\n yacpm[\"dependency_packages\"] = dependency_packages\n\n write_json(yacpm, yacpm_file)\n\n # prune unused packages in yacpkgs\n for directory in next(os.walk(\"yacpkgs\"))[1]:\n if directory not in package_list and directory not in dependency_packages:\n info(f\"Removing unused package {directory}\")\n shutil.rmtree(f\"yacpkgs/{directory}\")\n\n # write yacpkgs/packages.cmake\n packages_cmake_output = f\"set(YACPM_PKGS {' '.join(all_package_names)})\\n\\n\"\n for name in all_package_names:\n packages_cmake_output += f\"if(NOT TARGET {name})\\n\"\n packages_cmake_output += f\" add_subdirectory(${{CMAKE_SOURCE_DIR}}/yacpkgs/{name} yacpkgs/{name})\\n\"\n packages_cmake_output += \"endif()\\n\"\n open(\"yacpkgs/packages.cmake\", \"w\").write(packages_cmake_output)\n","sub_path":"yacpm.py","file_name":"yacpm.py","file_ext":"py","file_size_in_byte":15497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"207206827","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n\n\ndef hourglassSum(arr):\n for column in arr[0]:\n for row in column:\n print(row)\n \n\n return 0\n\nif __name__ == '__main__':\n arr = [\n [1, 1, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [0, 0, 2, 4, 4, 0],\n [0, 0, 0, 2, 0, 0],\n [0, 0, 1, 2, 4, 0],\n ]\n result = hourglassSum(arr)\n print(result)","sub_path":"Arrays/Hour Glass/HourGlass.py","file_name":"HourGlass.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"322298712","text":"\nfrom prompt_toolkit import PromptSession\nfrom prompt_toolkit.completion import WordCompleter\n\nfrom concurrent.futures import ThreadPoolExecutor\nimport logging\nimport os\nimport shlex\n\nfrom state import State\nfrom completer import ShellCompleter\nfrom handler import meta_handlers, task_handlers, load_handlers\nimport config\nimport constants\nimport mlogger\n\n# Configurations\nload_handlers()\n\ndef handle_command(command_with_args, state, logger):\n command, *args = command_with_args\n \n if command in task_handlers:\n try:\n with ThreadPoolExecutor(max_workers=len(state.browsers), thread_name_prefix=\"thread\") as executor:\n for browser in state.get_target_browser_names():\n executor.submit(task_handlers[command],\n browser,\n state.browsers[browser],\n logger,\n args)\n logger.info(\"Finished.\")\n except Exception as e:\n logger.error(\"An error occured while executing {} in {} with arguments {}.\".format(command, state.browsers, args))\n logger.debug(\"{}\".format(e))\n\n elif command in meta_handlers:\n meta_handlers[command](state, logger, args)\n else:\n logger.error(\"Command not found: {}\".format(command))\n \n# Act as an interpreter until EOF.\ndef interpreter():\n logger = mlogger.getLogger(__name__)\n state = State(logger)\n \n command_completer = ShellCompleter(state)\n psession = PromptSession(completer=command_completer)\n\n\n if config.is_state_saving_enabled:\n if os.path.exists(config.default_path_to_state_file):\n logger.info(\"Restoring state ...\")\n state.restore(config.default_path_to_state_file)\n \n while True:\n try:\n input_raw = psession.prompt(\"[{}]> \".format(state.current_target))\n try:\n tokens = list(shlex.shlex(input_raw, punctuation_chars=True))\n colon_indices = [i for i in range(0, len(tokens)) if tokens[i] == \";\"]\n for start_index, end_index in zip([0] + list(map(lambda x: x+1, colon_indices)), colon_indices + [len(tokens)]):\n command_with_args = [t[1:-1] if t.startswith('\"') and t.endswith('\"') else t\n for t in tokens[start_index:end_index]]\n handle_command(command_with_args, state, logger)\n except Exception as e:\n logger.error(\"An error ocurred when handling commands.\")\n logger.debug(\"{}\".format(e))\n if not state.is_running:\n break \n except KeyboardInterrupt:\n continue\n \n except EOFError:\n break\n \n except Exception as e:\n logger.error(\"An unknown error occured :-(\")\n logger.debug(\"{}\".format(e))\n break\n\n logger.info(\"Saving state...\")\n state.save(config.default_path_to_state_file)\n logger.info(\"Done. Disconnecting...\")\n handle_command([\"disconnect\", \"all\"], state, logger)\n logger.info(\"Done. Bye! :-)\")\n\n\nif __name__ == '__main__':\n interpreter()\n","sub_path":"src/interpreter/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"339649883","text":"import keras\nfrom keras.models import Sequential, load_model\nfrom keras.optimizers import Adam\nfrom keras.layers import Dense, Dropout, LSTM, Flatten, BatchNormalization, TimeDistributed, Activation\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport numpy as np\nfrom log import log\n\nX = np.load(\"gesture_data/x.npy\")\ny = np.load(\"gesture_data/y.npy\")\n\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 4)\n\nx_train = keras.utils.normalize(x_train, axis = 1)\nx_test = keras.utils.normalize(x_test, axis = 1)\n\nepochs = 200\nbatch_size = 67\n_dropout = 0.5\n_activation='relu'\n_optimizer='adam'\ninput_shape = x_train.shape[1], x_train.shape[2]\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=input_shape, return_sequences = True))\nmodel.add(Dropout(_dropout)) \nmodel.add(BatchNormalization())\n\nmodel.add(LSTM(128, input_shape=input_shape, return_sequences = True))\nmodel.add(Dropout(_dropout)) \nmodel.add(BatchNormalization())\n\nmodel.add(LSTM(64, input_shape=input_shape, return_sequences = True))\nmodel.add(Dropout(_dropout)) \nmodel.add(BatchNormalization())\n\nmodel.add(LSTM(32, input_shape=input_shape, return_sequences = True))\nmodel.add(Dropout(_dropout)) \nmodel.add(BatchNormalization())\n\nmodel.add(TimeDistributed(Dense(3))) \n\nmodel.add(Flatten())\nmodel.add(Dense(y_train.shape[1], activation = tf.nn.softmax))\nmodel.add(Activation(_activation))\n\nmodel.compile(optimizer=_optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=epochs,batch_size=batch_size)\nmodel.summary()\nloss, acc = model.evaluate(x_test, y_test,\n batch_size=batch_size)\n\nlog('Loss: {:.3}'.format(loss))\nlog('Acc: {:.3}'.format(acc))\n\nmodel.save('gesture_data/pointing.h5')\n","sub_path":"vision/gesture/gesture_data/rnn_lstm_classifier.py","file_name":"rnn_lstm_classifier.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"602894267","text":"valor=input(\"Qual o valor da casa?\")\nsalario=input(\"Qual o valor do seu salário?\")\nanos=input(\"Em quantos anos planeja pagar?\")\nmeses=anos*12\nprestacaomensal=valor/meses\nmaximo=salario*0,3\nif prestacaomensal\" and pads sentences using\n \"\" token. Config parameters can have ``preprocessor`` property which\n is used to preprocess the item passed and ``max_length`` property which\n points to maximum length of the sentence/tokens which can be convert to\n indices. If the length is smaller, the sentence will be padded. Parameters\n for \"vocab\" are necessary to be passed.\n\n **Key**: vocab\n\n Example Config::\n\n task_attributes:\n vqa:\n vqa2:\n processors:\n text_processor:\n type: vocab\n params:\n max_length: 14\n vocab:\n type: intersected\n embedding_name: glove.6B.300d\n vocab_file: vocabs/vocabulary_100k.txt\n\n Args:\n config (DictConfig): node containing configuration parameters of\n the processor\n\n Attributes:\n vocab (Vocab): Vocab class object which is abstraction over the vocab\n file passed.\n \"\"\"\n\n MAX_LENGTH_DEFAULT = 50\n PAD_TOKEN = ''\n PAD_INDEX = 0\n\n def __init__(self,\n vocab=dict(\n type='IntersectedVocab',\n vocab_file='textvqa/defaults/extras/vocabs/vocabulary_100k.txt',\n embedding_name='glove.6B.300d'),\n preprocessor=dict(type='SimpleSentenceProcessor'),\n *args,\n **kwargs):\n\n # self.vocab = Vocab(*args, **config.vocab, **kwargs)\n self.vocab = build_vocab(vocab)\n self.max_length = self.MAX_LENGTH_DEFAULT\n self.preprocessor = build_preprocessor(preprocessor)\n\n # self._init_extras(config)\n\n # def _init_extras(self, config, *args, **kwargs):\n # self.writer = registry.get(\"writer\")\n # self.preprocessor = None\n #\n # if hasattr(config, \"max_length\"):\n # self.max_length = config.max_length\n # else:\n # warnings.warn(\n # \"No 'max_length' parameter in Processor's \"\n # \"configuration. Setting to {}.\".format(self.MAX_LENGTH_DEFAULT)\n # )\n # self.max_length = self.MAX_LENGTH_DEFAULT\n #\n # if \"preprocessor\" in config:\n # self.preprocessor = Processor(config.preprocessor, *args, **kwargs)\n #\n # if self.preprocessor is None:\n # raise ValueError(\n # f\"No text processor named {config.preprocessor} is defined.\"\n # )\n\n def __call__(self, item):\n \"\"\"Call requires item to have either \"tokens\" attribute or either\n \"text\" attribute. If \"text\" is present, it will tokenized using the\n preprocessor.\n\n Args:\n item (Dict): Dict containing the \"text\" or \"tokens\".\n\n Returns:\n Dict: Dict containing indices in \"text\" key, \"tokens\" in \"tokens\"\n key and \"length\" of the string in \"length\" key.\n \"\"\"\n indices = None\n if not isinstance(item, dict):\n raise TypeError('Argument passed to the processor must be '\n \"a dict with either 'text' or 'tokens' as \"\n 'keys')\n if 'tokens' in item:\n tokens = item['tokens']\n indices = self._map_strings_to_indices(item['tokens'])\n elif 'text' in item:\n if self.preprocessor is None:\n raise AssertionError('If tokens are not provided, a text ' 'processor must be defined in the config')\n\n tokens = self.preprocessor({'text': item['text']})['text']\n indices = self._map_strings_to_indices(tokens)\n else:\n raise AssertionError(\"A dict with either 'text' or 'tokens' keys \" 'must be passed to the processor')\n\n tokens, length = self._pad_tokens(tokens)\n\n return {'text': indices, 'tokens': tokens, 'length': length}\n\n def _pad_tokens(self, tokens):\n padded_tokens = [self.PAD_TOKEN] * self.max_length\n token_length = min(len(tokens), self.max_length)\n padded_tokens[:token_length] = tokens[:token_length]\n token_length = torch.tensor(token_length, dtype=torch.long)\n return padded_tokens, token_length\n\n def get_pad_index(self):\n \"\"\"Get index of padding token in vocabulary.\n\n Returns:\n int: index of the padding token.\n \"\"\"\n return self.vocab.get_pad_index()\n\n def get_vocab_size(self):\n \"\"\"Get size of the vocabulary.\n\n Returns:\n int: size of the vocabulary.\n \"\"\"\n return self.vocab.get_size()\n\n def _map_strings_to_indices(self, tokens):\n length = min(len(tokens), self.max_length)\n tokens = tokens[:length]\n\n output = torch.zeros(self.max_length, dtype=torch.long)\n output.fill_(self.vocab.get_pad_index())\n\n for idx, token in enumerate(tokens):\n output[idx] = self.vocab.stoi[token]\n\n return output\n","sub_path":"imix/data/vqadata/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"476758633","text":"import argparse\nimport numpy as np\nimport threading\n\nfrom copy import deepcopy\nfrom os import system\n\nimport rospy\n\nimport cv2\n\nfrom cv2 import cv\nfrom cv_bridge import CvBridge\n\nclass image_test():\n def __init__(self, image):\n pass\n\n def _show_image(self):\n self.subLock.acquire(True)\n local_image = deepcopy(self._np_image)\n self.subLock.release()\n\n for idx, points in enumerate(self._roi_points):\n cv2.circle(local_image, (points[0], points[1]), 5, (255, 0, 0), 2)\n\n cv2.polylines(local_image, np.int32([np.array(self._roi_points)]),\n 1, (0, 255, 0), 2)\n\n cv2.imshow(\"Connect Four RGB\", local_image)\n\n cv.SetMouseCallback(\"Connect Four RGB\", self._on_mouse_click, 0)\n cv.CreateTrackbar(\"Gain\", \"Connect Four RGB\", self._gain_slider,\n 100, self._on_gain_slider)\n cv.CreateTrackbar(\"Red Threshold\", \"Connect Four RGB\",\n self._red_thresh, 500, self._on_red_slider)\n cv.CreateTrackbar(\"Yellow Threshold\", \"Connect Four RGB\",\n self._yellow_thresh, 500, self._on_yellow_slider)\n cv.WaitKey(3)\n\n def _on_gain_slider(self, pos):\n self._gain_slider = pos\n self._gain_set = True\n self._slider_time = rospy.Time.now()\n\n def _on_red_slider(self, pos):\n self._red_thresh = pos\n\n def _on_yellow_slider(self, pos):\n self._yellow_thresh = pos\n\n def _on_mouse_click(self, event, x, y, flags, param):\n if event == cv.CV_EVENT_LBUTTONDOWN:\n width = self.cv_image.shape[0]\n height = self.cv_image.shape[1]\n for idx, points in enumerate(self._roi_points):\n if (x <= points[0] + 5 and x >= points[0] - 5\n and y <= points[1] + 5 and y >= points[1] - 5):\n self._roi_move = True\n self._point_selected = idx\n\n elif event == cv.CV_EVENT_MOUSEMOVE and self._roi_move:\n self._roi_points[self._point_selected] = [x, y]\n\n elif event == cv.CV_EVENT_LBUTTONUP and self._roi_move:\n self._roi_move = False\n","sub_path":"temp/open_cv_slider_example.py","file_name":"open_cv_slider_example.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"524989712","text":"from django.shortcuts import render\nfrom robust.checker import *\nfrom team.models import *\nfrom utilities.common import current_time, encode\nfrom django.http import HttpResponse\n\n\ndef push_msg(request):\n data = Checker.request(request, ['user', 'content'])\n user = Member.objects.get(name=data['user'])\n msg = Message()\n msg.user = user\n msg.content = data['content']\n msg.time = current_time()\n msg.save()\n return HttpResponse(encode({\"code\": 100, \"log\": \"Normal\", \"data\": None}))\n\n\ndef pull_msg(request):\n #now = current_time()\n msgs = Message.objects.all()\n #later = lambda t: t >= now\n dict_m = lambda m: {'id': m.user.name, 'time': m.time[0:10]+' '+m.time[11:16], 'content': m.content}\n msg_q = [dict_m(m) for m in msgs]\n msg_q.sort(key=lambda m: m['time'], reverse=True)\n return HttpResponse(encode(msg_q))\n\n\ndef pull_bug(request):\n bugs = Bug.objects.all()\n dict_b = lambda b: {'src': b.source.name, 'desc': b.description,\n 'reptr': b.reporter.name, 'checked': b.checked,\n 'time': b.time, 'id': b.id}\n bug_q = [dict_b(b) for b in bugs if not (b.checked and b.dealt)]\n bug_q.sort(key=lambda b: b['time'], reverse=True)\n return HttpResponse(encode(bug_q))\n\n\ndef report_bug(request):\n data = Checker.request(request, ['src', 'desc', 'reptr'])\n bug = Bug()\n bug.source = Api.objects.get(name=data['src'])\n bug.reporter = Member.objects.get(name=data['reptr'])\n bug.description = data['desc']\n bug.time = current_time()\n bug.checked = True\n bug.save()\n return HttpResponse(encode({\"code\":100, \"log\": \"Normal\", \"data\": None}))\n\n\"\"\"\ndef check_bug(request):\n data = Checker.request(request, ['id'])\n bug = Bug.objects.get(id=data['id'])\n bug.checked = True\n\"\"\"\n\n\ndef deal_bug(request):\n data = Checker.request(request, ['id'])\n bug = Bug.objects.get(id=data['id'])\n bug.checked = True\n bug.dealt = True\n return HttpResponse(encode({\"code\": 100, \"log\": \"Normal\", \"data\": None}))\n\n\ndef update_api(request):\n data = Checker.request(request, ['name', 'digest', 'url', 'response', 'request'])\n api = Api()\n api.request = data['request']\n api.name = data['name']\n api.digest = data['digest']\n api.url = data['url']\n api.response = data['response']\n api.save()\n return HttpResponse(encode({\"code\": 100, \"log\": \"Normal\", \"data\": None}))\n\n\ndef get_api(request):\n data = Checker.request(request, ['name'])\n api = Api.objects.get(name=data['name'])\n return HttpResponse(encode({'url': api.url,\n 'name': api.name,\n 'digest': api.digest,\n 'request': api.request,\n 'response': api.response}))","sub_path":"team/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"142807629","text":"import logging\n\nfrom pajbot.managers import AdminLogManager\nfrom pajbot.models.command import Command\nfrom pajbot.modules import BaseModule\nfrom pajbot.modules import ModuleType\nfrom pajbot.modules.basic import BasicCommandsModule\n\nlog = logging.getLogger(__name__)\n\n\nclass AdminCommandsModule(BaseModule):\n ID = __name__.split('.')[-1]\n NAME = 'Basic admin commands'\n DESCRIPTION = 'All miscellaneous admin commands'\n CATEGORY = 'Feature'\n ENABLED_DEFAULT = True\n MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED\n PARENT_MODULE = BasicCommandsModule\n\n def whisper(self, **options):\n message = options['message']\n bot = options['bot']\n\n if message:\n msg_args = message.split(' ')\n if len(msg_args) > 1:\n username = msg_args[0]\n rest = ' '.join(msg_args[1:])\n bot.whisper(username, rest)\n\n def level(self, **options):\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n msg_args = message.split(' ')\n if len(msg_args) > 1:\n username = msg_args[0].lower()\n new_level = int(msg_args[1])\n if new_level >= source.level:\n bot.whisper(source.username, 'You cannot promote someone to the same or higher level as you ({0}).'.format(source.level))\n return False\n\n # We create the user if the user didn't already exist in the database.\n user = bot.users[username]\n\n old_level = user.level\n user.level = new_level\n\n log_msg = '{}\\'s user level changed from {} to {}'.format(\n user.username_raw,\n old_level,\n new_level)\n\n bot.whisper(source.username, log_msg)\n\n AdminLogManager.add_entry('Userlevel edited', source, log_msg)\n\n return True\n\n bot.whisper(source.username, 'Usage: !level USERNAME NEW_LEVEL')\n return False\n\n def load_commands(self, **options):\n self.commands['w'] = Command.raw_command(self.whisper,\n level=2000,\n description='Send a whisper from the bot')\n self.commands['level'] = Command.raw_command(self.level,\n level=1000,\n description='Set a users level')\n","sub_path":"pajbot/modules/basic/admincommands.py","file_name":"admincommands.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"110146009","text":"#!/usr/bin/env python3\ntry:\n import sys\n import re\n import json\n import argparse\n import time\n import requests\n import html2text\n import hashlib\n import difflib\n import sqlalchemy\n from sqlalchemy import Column, Integer, String, Table, MetaData\n from sqlalchemy.ext.declarative import declarative_base\n from sqlalchemy.orm import sessionmaker\nexcept ImportError:\n print(\"\"\"Import failed make sure you have set up the virtual enviroment.\npython3 -m venv venv\nsource venv/bin/activate\npip install -r requirements.txt\"\"\")\n exit(1)\n\ndef get_text(html):\n \"\"\"\n Input html. Returns utf-8 markdown without links\n\n requests.get().text will be used as the input data\n html2text will be used to remove most of the changing parts of the response\n links will be ignored since most large sites have dynamic links\n if you want to closely monitor a basic site it is probably better to hash\n requests.get().content and not bother stripping the html\n \"\"\"\n h = html2text.HTML2Text()\n h.ignore_links = True\n return h.handle(html)\n\ndef get_md5(html):\n \"\"\"\n Input html. Returns MD5 hash of the text.\n \"\"\"\n return hashlib.md5(get_text(html).encode('utf-8')).hexdigest()\n\ndef failed_connection(check, session):\n current_time = time.time()\n if not check.failed_since:\n check.failed_since = current_time\n session.commit()\n if current_time - check.failed_since >= check.max_down_time:\n print('Warning: Can\\'t connect to {}'.format(check.url))\n\n return ''\n\ndef check_if_recovered(check, session):\n if not check.failed_since:\n return ''\n check.failed_since = 0\n session.commit()\n last_run = check.run_after - check.check_frequency\n if last_run - check.failed_since >= check.max_down_time:\n print('Reastablished connection to {}'.format(check.url))\n\n return ''\n\ndef run_checks():\n \"\"\"Perform hash, string, difference and raw checks for all stored url's\"\"\"\n for check in session.query(MD5Check).filter(MD5Check.run_after <\n time.time()).order_by(MD5Check.id):\n check.run_after = time.time() + check.check_frequency\n session.commit()\n try:\n url_content = requests.get(check.url, timeout=check.check_timeout)\n except requests.exceptions.ConnectionError:\n failed_connection(check, session)\n continue\n\n if url_content.status_code != 200:\n failed_connection(check, session)\n continue\n\n check_if_recovered(check, session)\n try:\n new_hash = get_md5(url_content.text)\n except:\n print('Error: Failed to hash response from {}'.format(check.url))\n continue\n\n if new_hash != check.current_hash:\n if new_hash == check.old_hash:\n print('The md5 for {} has been reverted'.format(check.url))\n else:\n print('The md5 for {} has changed'.format(check.url))\n\n check.old_hash = check.current_hash\n check.current_hash = new_hash\n session.commit()\n\n for check in session.query(StringCheck).filter(StringCheck.run_after <\n time.time()).order_by(StringCheck.id):\n check.run_after = time.time() + check.check_frequency\n session.commit()\n try:\n url_content = requests.get(check.url, timeout=check.check_timeout)\n except requests.exceptions.ConnectionError:\n failed_connection(check, session)\n continue\n\n if url_content.status_code != 200:\n failed_connection(check, session)\n continue\n\n check_if_recovered(check, session)\n string_found = check.string_to_match in get_text(url_content.text)\n if string_found != check.present:\n if check.present:\n print('{} is no longer present on {}'.format(\n check.string_to_match,\n check.url))\n check.present = 0\n else:\n print('{} is now present on {}'.format(check.string_to_match,\n check.url))\n check.present = 1\n\n session.commit()\n\n for check in session.query(DiffCheck).filter(DiffCheck.run_after <\n time.time()).order_by(DiffCheck.id):\n check.run_after = time.time() + check.check_frequency\n session.commit()\n try:\n url_content = requests.get(check.url, timeout=check.check_timeout)\n except requests.exceptions.ConnectionError:\n failed_connection(check, session)\n continue\n\n if url_content.status_code != 200:\n failed_connection(check, session)\n continue\n\n check_if_recovered(check, session)\n text = get_text(url_content.text)\n if text != check.current_content:\n for line in difflib.context_diff(check.current_content.split('\\n'),\n text.split('\\n'),\n fromfile='Old content for {}'.format(check.url),\n tofile='New content for {}'.format(check.url)):\n print(line)\n check.current_content = text\n session.commit()\n\n for check in session.query(RawCheck).filter(RawCheck.run_after <\n time.time()).order_by(RawCheck.id):\n check.run_after = time.time() + check.check_frequency\n session.commit()\n try:\n url_content = requests.get(check.url, timeout=check.check_timeout)\n except requests.exceptions.ConnectionError:\n failed_connection(check, session)\n continue\n\n if url_content.status_code != 200:\n failed_connection(check, session)\n continue\n\n check_if_recovered(check, session)\n try:\n new_hash = hashlib.md5(url_content.text.encode('utf-8')).hexdigest()\n except:\n print('Error: Failed to hash response from {}'.format(check.url))\n continue\n\n if new_hash == check.current_hash:\n continue\n\n check.old_hash = check.current_hash\n session.commit()\n try:\n m = re.search(check.expression, url_content.text, re.S)\n except:\n # I couldn't catch the sre_constants.error I'm looking for so...\n print('Error: invalid regular expression')\n continue\n\n try:\n capture_groups = m.groups()\n except AttributeError:\n print('Error: no matches for regular expression on {}'.format(url))\n continue\n\n try:\n old_capture_groups = tuple(json.loads(check.capture_groups))\n except:\n print('Error: could not retreive data for raw check of {}'.format(\n check.url))\n continue\n\n if capture_groups == old_capture_groups:\n continue\n\n print('RawCheck with expression {} changed for {}').format(\n check.expression,\n check.url)\n for count, old_capture_group in enumerate(old_capture_groups):\n if old_capture_group != capture_groups[count]:\n print('{} has been changed to {}'.format(old_capture_group,\n capture_groups[count]))\n\n check.capture_groups = json.dumps(capture_groups)\n session.commit()\n\n return ''\n\ndef validate_input(max_down_time, check_frequency, check_timeout):\n \"\"\"\n Check's integers are given and that check_timeout is positive.\n\n Negative max_down_time and check_frequency values have no purpose but are\n still a valid input. The check would run each time the script is called and\n alert if a connection failed, values of 0 will have the same effect.\n \"\"\"\n try:\n max_down_time = int(max_down_time)\n except ValueError:\n print('Error: max_down_time {} given, must be an integer'.format(\n max_down_time))\n exit(1)\n\n try:\n check_frequency = int(check_frequency)\n except ValueError:\n print('Error: check_frequency {} given, must be an integer'.format(\n check_frequency))\n exit(1)\n\n try:\n check_timeout = int(check_timeout)\n except ValueError:\n print('Error: check_timeout {} given, must be an integer'.format(\n check_timeout))\n exit(1)\n\n if not check_timeout > 0:\n print('Error: check-timeout {} given, must be greater than 0'.format(\n check_timeout))\n exit(1)\n\n return (max_down_time, check_frequency, check_timeout)\n\ndef add_md5(url, max_down_time, check_frequency, check_timeout):\n \"\"\"\n Add a database entry for a url to monitor the md5 hash of. Returns message\n relating to success.\n \"\"\"\n max_down_time, check_frequency, check_timeout = validate_input(\n max_down_time, check_frequency, check_timeout)\n try:\n url_content = requests.get(url, timeout=check_timeout)\n except requests.exceptions.ConnectionError:\n return 'Error: Could not connect to chosen url {}'.format(url)\n except requests.exceptions.MissingSchema as e:\n return e\n except requests.exceptions.InvalidSchema as e:\n return e\n\n if url_content.status_code != 200:\n return 'Error: {} code from server'.format(url_content.status_code)\n\n try:\n current_hash = get_md5(url_content.text)\n except:\n return 'Error: Failed to hash response from {}'.format(url)\n check = MD5Check(url=url,\n current_hash=current_hash,\n failed_since=0,\n max_down_time=max_down_time,\n run_after=0,\n check_frequency=check_frequency,\n check_timeout=check_timeout)\n session.add(check)\n try:\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n session.rollback()\n return 'Error: An entry for {} is already in database'.format(url)\n else:\n return 'Added MD5 Check for {}'.format(url)\n\ndef add_string(url, string, max_down_time, check_frequency, check_timeout):\n \"\"\"\n Add a database entry for a url to monitor for a string. Returns message\n relating to success.\n \"\"\"\n max_down_time, check_frequency, check_timeout = validate_input(\n max_down_time, check_frequency, check_timeout)\n try:\n url_content = requests.get(url, timeout=check_timeout)\n except requests.exceptions.ConnectionError:\n return 'Error: Could not connect to chosen url {}'.format(url)\n except requests.exceptions.MissingSchema as e:\n return e\n except requests.exceptions.InvalidSchema as e:\n return e\n\n if url_content.status_code != 200:\n return 'Error: {} code from server'.format(url_content.status_code)\n\n string_exists = 0\n if string in get_text(url_content.text):\n string_exists = 1\n\n check = StringCheck(url=url,\n string_to_match=string,\n present=string_exists,\n failed_since=0,\n max_down_time=max_down_time,\n run_after= 0,\n check_frequency=check_frequency,\n check_timeout=check_timeout)\n session.add(check)\n try:\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n session.rollback()\n return 'Error: An entry for {} is already in database'.format(url)\n else:\n if string_exists:\n print('{} is currently present, will alert if this changes'.format(\n string))\n else:\n print('{} is currently not present, will alert if this changes'\n.format(string))\n\n return 'Added String Check for {}'.format(url)\n\ndef add_diff(url, max_down_time, check_frequency, check_timeout):\n \"\"\"\n Add a database entry for a url to monitor for any text changes.\n Returns message relating to success.\n \"\"\"\n max_down_time, check_frequency, check_timeout = validate_input(\n max_down_time, check_frequency, check_timeout)\n try:\n url_content = requests.get(url, timeout=check_timeout)\n except requests.exceptions.ConnectionError:\n return 'Error: Could not connect to chosen url {}'.format(url)\n except requests.exceptions.MissingSchema as e:\n return e\n except requests.exceptions.InvalidSchema as e:\n return e\n\n if url_content.status_code != 200:\n return 'Error: {} code from server'.format(url_content.status_code)\n\n check = DiffCheck(url=url,\n current_content=get_text(url_content.text),\n failed_since=0,\n max_down_time=max_down_time,\n run_after=0,\n check_frequency=check_frequency,\n check_timeout=check_timeout)\n session.add(check)\n try:\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n session.rollback()\n return 'Error: An entry for {} is already in database'.format(url)\n else:\n return 'Added Diff Check for {}'.format(url)\n\ndef add_raw(url, expression, max_down_time, check_frequency, check_timeout):\n \"\"\"\n Add a database entry for a url to monitor for a change using regex.\n Returns message relating to success.\n \"\"\"\n max_down_time, check_frequency, check_timeout = validate_input(\n max_down_time, check_frequency, check_timeout)\n try:\n url_content = requests.get(url, timeout=check_timeout)\n except requests.exceptions.ConnectionError:\n return 'Error: Could not connect to chosen url {}'.format(url)\n except requests.exceptions.MissingSchema as e:\n return e\n except requests.exceptions.InvalidSchema as e:\n return e\n\n if url_content.status_code != 200:\n return 'Error: {} code from server'.format(url_content.status_code)\n\n try:\n # Not sure if I want to hash .text.encode('utf-8') or .content\n current_hash = hashlib.md5(url_content.text.encode('utf-8')).hexdigest()\n except:\n return 'Error: Failed to hash response from {}'.format(url)\n\n try:\n m = re.search(expression, url_content.text, re.S)\n # This regex is too expensive\n #\n # Maybe I can remove the multi line, it is making it harder to match\n # the exact bit I am interested in\n #\n # Maybe the regex should be matched as many times as possible but\n # encourage a simpler check\n #\n # I feel this currently is encouraging an expression like\n # Constant Title(\\w*).*Title tens of lines\n # further down the page(.*?)\n # which is a pain to write and runs for way too long\n #\n # Also when do people care about the html, there should probably at\n # least be an option to have it stripped out all it's doing is making\n # a mess of my regex or putting .* and .*? everywhere\n except:\n # I couldn't catch the sre_constants.error I'm looking for so...\n return 'Error: invalid regular expression'\n\n try:\n capture_groups = m.groups()\n except AttributeError:\n return 'Error: no matches for regular expression on {}'.format(url)\n\n json_capture_groups = json.dumps(capture_groups)\n check = RawCheck(url=url,\n expression=expression,\n current_hash=current_hash,\n capture_groups=json_capture_groups,\n failed_since=0,\n max_down_time=max_down_time,\n run_after=0,\n check_frequency=check_frequency,\n check_timeout=check_timeout)\n session.add(check)\n try:\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n session.rollback()\n return 'Error: An entry for {} is already in database'.format(url)\n else:\n for count, capture_group in enumerate(capture_groups):\n print('{} matched capture group {}, will alert if this changes'.format(\n capture_group,\n count))\n\n return 'Added Raw Check for {}'.format(url)\n\ndef get_longest_md5():\n longest_url = 3\n longest_current_hash = 12\n longest_old_hash = 8\n longest_failed_since = 12\n longest_max_down_time = 14\n longest_run_after = 9\n longest_check_frequency = 15\n longest_check_timeout = 13\n for check in session.query(MD5Check).order_by(MD5Check.id):\n if len(str(check.url)) > longest_url:\n longest_url = len(str(check.url))\n if len(str(check.current_hash)) > longest_current_hash:\n longest_current_hash = len(str(check.current_hash))\n if len(str(check.old_hash)) > longest_old_hash:\n longest_old_hash = len(str(check.old_hash))\n if len(str(check.failed_since)) > longest_failed_since:\n longest_failed_since = len(str(check.failed_since))\n if len(str(check.max_down_time)) > longest_max_down_time:\n longest_max_down_time = len(str(check.max_down_time))\n if len(str(check.run_after)) > longest_run_after:\n longest_run_after = len(str(check.run_after))\n if len(str(check.check_frequency)) > longest_check_frequency:\n longest_check_frequency = len(str(check.check_frequency))\n if len(str(check.check_timeout)) > longest_check_timeout:\n longest_check_timeout = len(str(check.check_timeout))\n\n return (('url', longest_url),\n ('current_hash', longest_current_hash),\n ('old_hash', longest_old_hash),\n ('failed_since', longest_failed_since),\n ('max_down_time', longest_max_down_time),\n ('run_after', longest_run_after),\n ('check_frequency', longest_check_frequency),\n ('check_timeout', longest_check_timeout))\n\ndef get_longest_string():\n longest_url = 3\n longest_string_to_match = 15\n longest_present = 7\n longest_failed_since = 12\n longest_max_down_time = 14\n longest_run_after = 9\n longest_check_frequency = 15\n longest_check_timeout = 13\n for check in session.query(StringCheck).order_by(StringCheck.id):\n if len(str(check.url)) > longest_url:\n longest_url = len(str(check.url))\n if len(str(check.string_to_match)) > longest_string_to_match:\n longest_string_to_match = len(str(check.string_to_match))\n if len(str(check.present)) > longest_present:\n longest_present = len(str(check.present))\n if len(str(check.failed_since)) > longest_failed_since:\n longest_failed_since = len(str(check.failed_since))\n if len(str(check.max_down_time)) > longest_max_down_time:\n longest_max_down_time = len(str(check.max_down_time))\n if len(str(check.run_after)) > longest_run_after:\n longest_run_after = len(str(check.run_after))\n if len(str(check.check_frequency)) > longest_check_frequency:\n longest_check_frequency = len(str(check.check_frequency))\n if len(str(check.check_timeout)) > longest_check_timeout:\n longest_check_timeout = len(str(check.check_timeout))\n\n return (('url', longest_url),\n ('string_to_match', longest_string_to_match),\n ('present', longest_present),\n ('failed_since', longest_failed_since),\n ('max_down_time', longest_max_down_time),\n ('run_after', longest_run_after),\n ('check_frequency', longest_check_frequency),\n ('check_timeout', longest_check_timeout))\n\ndef get_longest_diff():\n \"\"\"\n Called by list_checks to check how much to pad the tables.\n \"\"\"\n longest_url = 3\n longest_current_content = 15\n longest_failed_since = 12\n longest_max_down_time = 14\n longest_run_after = 9\n longest_check_frequency = 15\n longest_check_timeout = 13\n for check in session.query(DiffCheck).order_by(DiffCheck.id):\n if len(str(check.url)) > longest_url:\n longest_url = len(str(check.url))\n # Not checking how long current_content is since it will be long and\n # make the table look silly\n #if len(str(check.current_content)) > longest_current_content:\n # longest_current_content = len(str(check.current_content))\n if len(str(check.failed_since)) > longest_failed_since:\n longest_failed_since = len(str(check.failed_since))\n if len(str(check.max_down_time)) > longest_max_down_time:\n longest_max_down_time = len(str(check.max_down_time))\n if len(str(check.run_after)) > longest_run_after:\n longest_run_after = len(str(check.run_after))\n if len(str(check.check_frequency)) > longest_check_frequency:\n longest_check_frequency = len(str(check.check_frequency))\n if len(str(check.check_timeout)) > longest_check_timeout:\n longest_check_timeout = len(str(check.check_timeout))\n\n return (('url', longest_url),\n ('current_content', longest_current_content),\n ('failed_since', longest_failed_since),\n ('max_down_time', longest_max_down_time),\n ('run_after', longest_run_after),\n ('check_frequency', longest_check_frequency),\n ('check_timeout', longest_check_timeout))\n\ndef get_longest_raw():\n longest_url = 3\n longest_expression = 10\n longest_current_hash = 12\n longest_capture_groups = 14\n longest_failed_since = 12\n longest_max_down_time = 14\n longest_run_after = 9\n longest_check_frequency = 15\n longest_check_timeout = 13\n for check in session.query(RawCheck).order_by(RawCheck.id):\n if len(str(check.url)) > longest_url:\n longest_url = len(str(check.url))\n if len(str(check.expression)) > longest_expression:\n longest_expression = len(str(check.expression))\n if len(str(check.current_hash)) > longest_current_hash:\n longest_current_hash = len(str(check.current_hash))\n if len(str(check.capture_groups)) > longest_capture_groups:\n longest_capture_groups = len(str(check.capture_groups))\n if len(str(check.failed_since)) > longest_failed_since:\n longest_failed_since = len(str(check.failed_since))\n if len(str(check.max_down_time)) > longest_max_down_time:\n longest_max_down_time = len(str(check.max_down_time))\n if len(str(check.run_after)) > longest_run_after:\n longest_run_after = len(str(check.run_after))\n if len(str(check.check_frequency)) > longest_check_frequency:\n longest_check_frequency = len(str(check.check_frequency))\n if len(str(check.check_timeout)) > longest_check_timeout:\n longest_check_timeout = len(str(check.check_timeout))\n\n return (('url', longest_url),\n ('expression', longest_expression),\n ('current_hash', longest_current_hash),\n ('capture_groups', longest_capture_groups),\n ('failed_since', longest_failed_since),\n ('max_down_time', longest_max_down_time),\n ('run_after', longest_run_after),\n ('check_frequency', longest_check_frequency),\n ('check_timeout', longest_check_timeout))\n\ndef list_checks():\n \"\"\"\n List all of the checks from the database in a table like format.\n \"\"\"\n table_skel = '|'\n columns = []\n arguments = []\n for column, longest_entry in get_longest_md5():\n table_skel += (' {{: <{}}} |'.format(longest_entry))\n columns.append(column)\n arguments.append('row.{}'.format(column))\n\n print('{} Checks:'.format('MD5Check'))\n print(table_skel.format(*columns))\n for check in session.query(MD5Check).order_by(MD5Check.id):\n print(table_skel.format(str(check.url),\n str(check.current_hash),\n str(check.old_hash),\n str(check.failed_since),\n str(check.max_down_time),\n str(check.run_after),\n str(check.check_frequency),\n str(check.check_timeout)))\n\n table_skel = '|'\n columns = []\n arguments = []\n for column, longest_entry in get_longest_string():\n table_skel += (' {{: <{}}} |'.format(longest_entry))\n columns.append(column)\n arguments.append('row.{}'.format(column))\n\n print('{} Checks:'.format('StringCheck'))\n print(table_skel.format(*columns))\n for check in session.query(StringCheck).order_by(StringCheck.id):\n print(table_skel.format(str(check.url),\n str(check.string_to_match),\n str(check.present),\n str(check.failed_since),\n str(check.max_down_time),\n str(check.run_after),\n str(check.check_frequency),\n str(check.check_timeout)))\n\n table_skel = '|'\n columns = []\n arguments = []\n for column, longest_entry in get_longest_diff():\n table_skel += (' {{: <{}}} |'.format(longest_entry))\n columns.append(column)\n arguments.append('row.{}'.format(column))\n\n print('{} Checks:'.format('DiffCheck'))\n print(table_skel.format(*columns))\n for check in session.query(DiffCheck).order_by(DiffCheck.id):\n print(table_skel.format(str(check.url),\n str(check.current_content),\n str(check.failed_since),\n str(check.max_down_time),\n str(check.run_after),\n str(check.check_frequency),\n str(check.check_timeout)))\n\n table_skel = '|'\n columns = []\n arguments = []\n for column, longest_entry in get_longest_raw():\n table_skel += (' {{: <{}}} |'.format(longest_entry))\n columns.append(column)\n arguments.append('row.{}'.format(column))\n\n print('{} Checks:'.format('RawCheck'))\n print(table_skel.format(*columns))\n for check in session.query(RawCheck).order_by(RawCheck.id):\n print(table_skel.format(str(check.url),\n str(check.expression),\n str(check.current_hash),\n str(check.capture_groups),\n str(check.failed_since),\n str(check.max_down_time),\n str(check.run_after),\n str(check.check_frequency),\n str(check.check_timeout)))\n\n return ''\n\ndef delete_check(check_type, url):\n if check_type == 'md5':\n check = session.query(MD5Check).filter(MD5Check.url == url)\n elif check_type == 'string':\n check = session.query(StringCheck).filter(StringCheck.url == url)\n elif check_type == 'diff':\n check = session.query(DiffCheck).filter(DiffCheck.url == url)\n elif check_type == 'raw':\n check = session.query(RawCheck).filter(RawCheck.url == url)\n else:\n return 'Chose either md5, string, diff or raw check'\n\n if check.delete():\n session.commit()\n return '{} check for {} removed'.format(check_type, url)\n\n return 'There is no {} check for {}'.format(check_type, url)\n\ndef import_from_file(import_file):\n \"\"\"\n Add's new database entrys from a file\n \"\"\"\n error_message = 'Import failed: {} is not formatted correctly'\n with open(import_file, 'r') as f:\n for line in f:\n line = line.split('#', 1)[0].rstrip()\n if not line:\n continue\n try:\n check_type, data = line.split('|', 1)\n except ValueError:\n return error_message.format(line)\n\n max_down_time = default_max_down_time\n check_frequency = default_check_frequency\n check_timeout = default_check_timeout\n if check_type == 'md5':\n # There are two accepted line formats:\n # check_type|url|max_down_time|check_frequency|check_timeout\n # and check_type|url\n if '|' in data:\n try:\n url, max_down_time, check_frequency, check_timeout\\\n = data.split('|')\n except ValueError:\n return error_message.format(line)\n\n else:\n url = data\n\n print(add_md5(url, max_down_time, check_frequency,\n check_timeout))\n elif check_type == 'string':\n # There are two accepted line formats:\n # check_type|url|string_to_check|max_down_time|check_frequency\n # |check_timeout\n # and check_type|url\n try:\n string_to_check, data = data.split('|', 1)\n except ValueError:\n return error_message.format(line)\n if '|' in data:\n try:\n url, max_down_time, check_frequency, check_timeout\\\n = data.split('|')\n except ValueError:\n return error_message.format(line)\n\n else:\n url = data\n\n print(add_string(url, string_to_check, max_down_time,\n check_frequency, check_timeout))\n elif check_type == 'diff':\n # There are two accepted line formats:\n # check_type|url|max_down_time|check_frequency|check_timeout\n # and check_type|url\n if '|' in data:\n try:\n url, max_down_time, check_frequency, check_timeout\\\n = data.split('|')\n except ValueError:\n return error_message.format(line)\n\n else:\n url = data\n\n print(add_diff(url, max_down_time, check_frequency,\n check_timeout))\n elif check_type == 'raw':\n try:\n expression, data = data.split('|', 1)\n except ValueError:\n return error_message.format(line)\n if '|' in data:\n try:\n url, max_down_time, check_frequency, check_timeout\\\n = data.split('|')\n except ValueError:\n return error_message.format(line)\n\n else:\n url = data\n\n print(add_raw(url, expression, max_down_time,\n check_frequency, check_timeout))\n else:\n return error_message.format(line)\n\n return ''\n\n\nif __name__ == '__main__':\n default_max_down_time = 86400\n default_check_frequency = 3600\n default_check_timeout = 30\n default_database_location = 'web_checks.db'\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--check', action='store_true',\n help='Run checks against all monitored urls')\n parser.add_argument('-l', '--list', action='store_true',\n help='Maximum number of set string that can occur')\n parser.add_argument('-d', '--delete', nargs=2,\n help='The entry to delete id must be used')\n parser.add_argument('-a', '--add', nargs='+',\n help='The type of check to setup and what url to check against')\n parser.add_argument('--max-down-time', type=int,\n default=default_max_down_time,\n help='Number of seconds a site can be down for before warning')\n parser.add_argument('--check-frequency', type=int,\n default=default_check_frequency,\n help='Specify the number of seconds to check after')\n parser.add_argument('--check-timeout', type=int,\n default=default_check_timeout,\n help='Specify the number of seconds to check_timeout after')\n parser.add_argument('--database-location',\n default=default_database_location,\n help='Specify a database name and location')\n parser.add_argument('--import-file',\n help='Chose a file to populate the database from')\n parser.allow_abbrev = False\n args = parser.parse_args()\n\n engine = sqlalchemy.create_engine('sqlite:///{}'.format(\n args.database_location))\n Base = declarative_base()\n metadata = MetaData()\n\n class MD5Check(Base):\n __tablename__ = 'md5s'\n id = Column(Integer, primary_key=True)\n url = Column(String, unique=True)\n current_hash = Column(String)\n old_hash = Column(String)\n failed_since = Column(Integer)\n max_down_time = Column(Integer)\n run_after = Column(Integer)\n check_frequency = Column(Integer)\n check_timeout = Column(Integer)\n def __repr__(self):\n return ''.format(\n self.url,\n self.current_hash,\n self.old_hash,\n self.failed_since,\n self.max_down_time,\n self.run_after,\n self.check_frequency,\n self.check_timeout)\n\n class StringCheck(Base):\n __tablename__ = 'strings'\n id = Column(Integer, primary_key=True)\n url = Column(String)\n string_to_match = Column(String)\n present = Column(Integer)\n failed_since = Column(Integer)\n max_down_time = Column(Integer)\n run_after = Column(Integer)\n check_frequency = Column(Integer)\n check_timeout = Column(Integer)\n def __repr__(self):\n return ''.format(\n self.url,\n self.string_to_match,\n self.present,\n self.failed_since,\n self.max_down_time,\n self.run_after,\n self.check_frequency,\n self.check_timeout)\n\n class DiffCheck(Base):\n __tablename__ = 'diffs'\n id = Column(Integer, primary_key=True)\n url = Column(String)\n current_content = Column(String)\n failed_since = Column(Integer)\n max_down_time = Column(Integer)\n run_after = Column(Integer)\n check_frequency = Column(Integer)\n check_timeout = Column(Integer)\n def __repr__(self):\n return ''.format(\n self.url,\n self.string_to_match,\n self.failed_since,\n self.max_down_time,\n self.run_after,\n self.check_frequency,\n self.check_timeout)\n\n class RawCheck(Base):\n __tablename__ = 'raws'\n id = Column(Integer, primary_key=True)\n url = Column(String)\n expression = Column(String)\n current_hash = Column(String)\n capture_groups = Column(String)\n failed_since = Column(Integer)\n max_down_time = Column(Integer)\n run_after = Column(Integer)\n check_frequency = Column(Integer)\n check_timeout = Column(Integer)\n def __repr__(self):\n return ''.format(\n self.url,\n self.expression,\n self.current_hash,\n self.capture_groups,\n self.failed_since,\n self.max_down_time,\n self.run_after,\n self.check_frequency,\n self.check_timeout)\n\n MD5Check.__table__\n Table('md5s', metadata,\n Column('id', Integer(), primary_key=True, nullable=False),\n Column('url', String(), unique=True),\n Column('current_hash', String()),\n Column('old_hash', String()),\n Column('failed_since', Integer()),\n Column('max_down_time', Integer()),\n Column('run_after', Integer()),\n Column('check_frequency', Integer()),\n Column('check_timeout', Integer()), schema=None)\n\n StringCheck.__table__\n Table('strings', metadata,\n Column('id', Integer(), primary_key=True, nullable=False),\n Column('url', String(), unique=True),\n Column('string_to_match', String()),\n Column('present', Integer()),\n Column('failed_since', Integer()),\n Column('max_down_time', Integer()),\n Column('run_after', Integer()),\n Column('check_frequency', Integer()),\n Column('check_timeout', Integer()), schema=None)\n\n DiffCheck.__table__\n Table('diffs', metadata,\n Column('id', Integer(), primary_key=True, nullable=False),\n Column('url', String(), unique=True),\n Column('current_content', String()),\n Column('failed_since', Integer()),\n Column('max_down_time', Integer()),\n Column('run_after', Integer()),\n Column('check_frequency', Integer()),\n Column('check_timeout', Integer()), schema=None)\n\n RawCheck.__table__\n Table('raws', metadata,\n Column('id', Integer(), primary_key=True, nullable=False),\n Column('url', String(), unique=True),\n Column('expression', String()),\n Column('current_hash', String()),\n Column('capture_groups', String()),\n Column('failed_since', Integer()),\n Column('max_down_time', Integer()),\n Column('run_after', Integer()),\n Column('check_frequency', Integer()),\n Column('check_timeout', Integer()), schema=None)\n\n try:\n metadata.create_all(engine)\n except sqlalchemy.exc.OperationalError:\n print('Could not create or connect to database at {}'.format(\n args.database_location))\n exit(1)\n\n Session = sessionmaker(bind=engine)\n session = Session()\n\n if args.check:\n run_checks()\n elif args.list:\n list_checks()\n elif args.add:\n if args.add[0] == 'md5':\n if len(args.add) != 2:\n print('call as -a \\'md5\\' \\'url-to-check\\'')\n exit(1)\n\n print(add_md5(args.add[1], args.max_down_time, args.check_frequency,\n args.check_timeout))\n elif args.add[0] == 'string':\n if len(args.add) != 3:\n print('call as -a \\'string\\' string-to-check \\'url-to-check\\'')\n exit(1)\n\n print(add_string(args.add[2], args.add[1], args.max_down_time,\n args.check_frequency, args.check_timeout))\n elif args.add[0] == 'diff':\n if len(args.add) != 2:\n print('call as -a \\'diff\\' \\'url-to-check\\'')\n exit(1)\n\n print(add_diff(args.add[1], args.max_down_time,\n args.check_frequency, args.check_timeout))\n elif args.add[0] == 'raw':\n if len(args.add) != 3:\n print('call as -a \\'raw\\' \\'expression\\' \\'url-to-check\\'')\n exit(1)\n\n print(add_raw(args.add[2], args.add[1], args.max_down_time,\n args.check_frequency, args.check_timeout))\n else:\n print('Choose either md5, string, diff or raw.')\n\n elif args.delete:\n if len(args.delete) != 2:\n print('call as -d \\'check_type\\' \\'url-to-remove\\'')\n exit(1)\n\n print(delete_check(args.delete[0], args.delete[1]))\n elif args.import_file:\n error = import_from_file(args.import_file)\n if error:\n print(error)\n exit(1)\n else:\n print(\"\"\"\\\nArguments:\n -h/--help\\t\\tShow the help message and exit\n -c/--check\\t\\tRun checks against all monitored urls\n -l/--list\\t\\tList stored checks from the database\n -a/--add\\t\\tAdds a check to the database:\n \\t\\t\\t\\t-a md5 [url]\n \\t\\t\\t\\t-a string [string] [url]\n \\t\\t\\t\\t-a diff [url]\n \\t\\t\\t\\t-a raw [expression] [url]\n -d/--delete\\t\\tDelete a check:\n \\t\\t\\t\\t-d [check_type] [url]\n --max-down-time\\t\\tNumber of seconds a site can be down for before warning\n --check-frequency\\tNumber of seconds to wait between checks\n --check-timeout\\t\\tNumber of seconds to check_timeout after\n --database-location\\tSpecify a database name and location\n --import-file\\t\\tSpecify a file to populate the database from\\\n \"\"\")\n","sub_path":"web-check.py","file_name":"web-check.py","file_ext":"py","file_size_in_byte":41497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"326157262","text":"#-*- coding:UTF-8 -*-\nimport requests\nimport smtplib\nimport ctypes\nimport os\nif __name__=='__main__':\n\turls = [\n\t\t'http://graduate.dufe.edu.cn/zsgz/sszs/'\n\t]\n\ti=1\n\thttpsession = requests.session()\n\twhile(True):\n\t\tfor url in urls:\n\t\t\tresponse = httpsession.get(url)\n\t\t\tif('调剂' in str(response.text)):\n\t\t\t\tctypes.windll.user32.MessageBoxA(0,u\"有调剂消息\".encode('gb2312'),u' 信息'.encode('gb2312'),0)\n\t\t\telse:\n\t\t\t\tprint(\"第\",i,'次')\n\t\t\ti=i+1\n\n\t\t\t\t#os.system(r'C:\\Workflow\\GitHub\\python\\kaoyan\\test.bat')","sub_path":"kaoyan/tiaoji.py","file_name":"tiaoji.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"651219150","text":"#! usr/bin/env python3\n\n# Original GHS algorithm\nimport csv\nimport random, math\nimport os\n\ndef objective_function(vector, problem_size):\n sum = 0\n for val in vector:\n sum += val ** 2 - 10*math.cos(2*math.pi*val)\n sum += 10*problem_size\n return sum\n\ndef rand_in_bounds(minimum, maximum):\n return minimum + ((maximum - minimum) * random.random())\n\ndef random_vector(search_space):\n i = 0\n limit = len(search_space)\n random_vector = [0 for i in range(limit)]\n \n for i in range(limit):\n random_vector[i] = rand_in_bounds(search_space[i][0], search_space[i][1]) \n return random_vector\n\ndef create_random_harmony(search_space, problem_size):\n harmony = {}\n harmony[\"vector\"] = random_vector(search_space)\n harmony[\"fitness\"] = objective_function(harmony[\"vector\"], problem_size)\n return harmony\n\ndef initialize_harmony_memory(search_space, HMS, problem_size, factor=3):\n memory = [create_random_harmony(search_space, problem_size) for i in range(HMS*factor)]\n #memory = sorted(memory, key=itemgetter('fitness')) \n memory = sorted(memory, key= lambda k: k[\"fitness\"])\n return [memory[i] for i in range(HMS)]\n\ndef create_harmony(search_space, memory, best, HMCR, PARmin, PARmax, max_iter, gn):\n limit = len(search_space)\n vector = [0 for i in range(limit)]\n\n PAR = PARmin + ((PARmax - PARmin)/max_iter)*gn\n\n for i in range(limit):\n if random.random() < HMCR:\n value = memory[random.randint(0, len(memory)-1)][\"vector\"][i]\n if random.random() < PAR:\n value = best[\"vector\"][random.randint(0, limit-1)]\n if value < search_space[i][0]:\n value = search_space[i][0]\n if value > search_space[i][1]:\n value = search_space[i][1]\n\n vector[i] = value\n else:\n vector[i] = rand_in_bounds(search_space[i][0], search_space[i][1])\n \n return {\"vector\": vector}\n\ndef search(search_space, max_iter, HMS, HMCR, PARmin, PARmax, problem_size, total):\n memory = initialize_harmony_memory(search_space, HMS, problem_size)\n best = memory[0]\n for i in range(max_iter):\n harm = create_harmony(search_space, memory, best, HMCR, PARmin, PARmax, max_iter, i)\n harm[\"fitness\"] = objective_function(harm[\"vector\"], problem_size)\n if harm[\"fitness\"] < best[\"fitness\"]:\n best = harm\n memory.append(harm)\n memory = sorted(memory, key= lambda k: k[\"fitness\"])\n del memory[-1]\n\n # write fitness\n if not os.path.exists(os.path.join(os.getcwd(), 'data', 'ghs_cont_ra')):\n os.mkdir(os.path.join(os.getcwd(), 'data', 'ghs_cont_ra'))\n path0 = os.path.join(os.getcwd(), 'data', 'ghs_cont_ra', 'fitness')\n if not os.path.exists(path0):\n os.mkdir(path0)\n path = os.path.join(path0, str(total))\n if not os.path.exists(path):\n os.mkdir(path)\n with open(os.path.join(path, 'fitness.csv'), 'a', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow([i,best['fitness']])\n\n return best\n","sub_path":"1-continuous-opt-benchmark/modules/ghs_cont_ra.py","file_name":"ghs_cont_ra.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"170794084","text":"# Class Definition of Energy Conversion Technology\n# Version 1.2, Edward, 181004\n\n__version__ = 1.2\n\nif __name__ == '__main__':\n print('!!! energy_conver_tech.py is being run by itself.\\n')\nelse:\n print('!!! energy_conver_tech.py is imported from another file.\\n')\n\n\nclass EnergyConverTech:\n \"\"\"Class Definition of Energy Conversion Technology\"\"\"\n number_ect = 0\n\n def __init__(self, identifier, name, output):\n self.id = identifier\n self.name = name\n self.type = output # if \"1\", Energy2Elec; if \"2\", Energy2Heat\n EnergyConverTech.number_ect += 1\n\n def print(self):\n \"\"\"Print the common feature of energy conversion technology\"\"\"\n type_text = [\"Energy to Elec\", \"Energy to Heat\", \"Energy to Elec and Heat\"]\n print('Energy Conversion Technology: {}, type: {}, id: {},'\n .format(self.name, type_text[self.type], self.id), end=\" \")\n\n @classmethod\n def print_number(cls):\n print('Database have {} type(s) of energy conversion technology'.format(EnergyConverTech.number_ect))\n\n\nclass Energy2ElecConverter(EnergyConverTech):\n \"\"\"Class Definition of Energy to Electricity Technology in Energy Conversion Technology\"\"\"\n number = 0\n\n def __init__(self, identifier, name, output, efficiency):\n EnergyConverTech.__init__(self, identifier, name, output)\n self.efficiency = efficiency\n Energy2ElecConverter.number += 1\n print('You have profiled a new energy to electricity technology. \\n')\n Energy2ElecConverter.print_number()\n\n def print(self):\n \"\"\"Print the feature of energy to electricity conversion technology\"\"\"\n EnergyConverTech.print(self)\n print('efficiency: {}.\\n'.format(self.efficiency))\n\n @classmethod\n def print_number(cls):\n print('Number of profiled energy to electricity converters is {}.'.format(Energy2ElecConverter.number))\n\n @classmethod\n def create_input(cls, identifier_ect):\n print('You are profiling a new product, which can convert energy to electricity.')\n return cls(identifier_ect,\n input('Name: '),\n int(1),\n int(input('Efficiency(%): '))\n )\n\n\nclass Energy2HeatConverter(EnergyConverTech):\n \"\"\"Class Definition of Energy to Heat Technology in Energy Conversion Technology\"\"\"\n number = 0\n\n def __init__(self, identifier, name, output, efficiency):\n EnergyConverTech.__init__(self, identifier, name, output)\n self.efficiency = efficiency\n Energy2HeatConverter.number += 1\n print('You have profiled a new energy to heat tech. \\n'\n 'Number of profiled energy to heat technology is {}. \\n'.format(Energy2HeatConverter.number))\n\n def print(self):\n \"\"\"Print the feature of energy to heat conversion technology\"\"\"\n EnergyConverTech.print(self)\n print('efficiency: {}.\\n'.format(self.efficiency))\n\n @classmethod\n def create_input(cls, identifier_ect):\n print('You are profiling a new product, which can convert energy to heat.')\n return cls(identifier_ect,\n input('Name: '),\n int(2),\n int(input('Efficiency(%): '))\n )\n\n\nclass Energy2ElecHeatConverter(EnergyConverTech):\n \"\"\"Class Definition of Energy to Electricity & Heat Technology in Energy Conversion Technology\"\"\"\n number = 0\n\n def __init__(self, identifier, name, output, efficiency_e2e, efficiency_e2h):\n EnergyConverTech.__init__(self, identifier, name, output)\n self.efficiency_e2e = efficiency_e2e\n self.efficiency_e2h = efficiency_e2h\n Energy2ElecHeatConverter.number += 1\n print('You have profiled a new energy to electricity and heat technology. \\n'\n 'Number of profiled energy to electricity and heat technology is {}. \\n'\n .format(Energy2ElecHeatConverter.number))\n\n def print(self):\n \"\"\"Print the feature of energy to electricity conversion technology\"\"\"\n EnergyConverTech.print(self)\n print('e2e efficiency: {}, e2h: efficiency {}. \\n'.format(self.efficiency_e2e, self.efficiency_e2h))\n\n @classmethod\n def create_input(cls, identifier_ect):\n print('You are profiling a new product, which can convert energy to electricity and heat.')\n return cls(identifier_ect,\n input('Name: '),\n int(3),\n int(input('Energy to Electricity Efficiency(%): ')),\n int(input('Energy to Heat Efficiency(%): '))\n )\n\n\n# Initialize default energy technology.\nidentifier_ect = 'eg_001'\neg_001 = Energy2ElecConverter(identifier_ect, 'Electric Generator 001', 1, 50)\nelectricGenerators = {'eg_001': eg_001}\nidentifier_ect = 'he_001'\nhe_001 = Energy2HeatConverter(identifier_ect, 'Heat Exchanger 001', 2, 50)\nheatExchangers = {'he_001': he_001}\nidentifier_ect = 'chp_001'\nchp_001 = Energy2ElecHeatConverter(identifier_ect, 'Combined Heat Power 001', 3, 20, 30)\ncombinedHeatPowers = {'chp_001': chp_001}\nidentifier_ect = 'exit' # Initialize the value of identifier_ect\n\n\ndef check_identifier_ect(identifier_ect):\n \"\"\"Check if the input identifier already exits.\"\"\"\n operand_ect_m = 1\n if identifier_ect == 'exit':\n operand_ect_m = 0\n else:\n for all_identifier_ect in {**electricGenerators, **heatExchangers, **combinedHeatPowers}:\n if identifier_ect == all_identifier_ect:\n operand_ect_m = 2\n break\n return operand_ect_m\n\n\ndef profile_ect():\n \"\"\"Input energy conversion technology from terminal.\"\"\"\n operand_ect_i = 1\n operand_ect_j = 1\n while operand_ect_i <= 3:\n identifier_ect = input('Do you want profile a new model for energy conversion technology? \\n'\n 'Input ID first, or input \"exit\" to main menu: \\n')\n operand_ect_m = check_identifier_ect(identifier_ect)\n if operand_ect_m == 1:\n while operand_ect_j != 0 and operand_ect_j <= 10:\n operand_ect_n = input('Then, input number with following reference to profile '\n 'a certain type of technology:\\n'\n 'Or input \"exit\" to exit to main menu. \\n'\n '1. Profile a product can converse energy to electricity. \\n'\n ' 1.1 Electric Generator \\n'\n '2. Profile a product can converse energy to heat. \\n'\n ' 2.1 Heat Exchanger \\n'\n '3. Profile a product can converse energy to electricity and heat. \\n'\n ' 3.1 Combined Heat and Power Plant \\n')\n if operand_ect_n == '1.1':\n electricGenerators[identifier_ect] = Energy2ElecConverter.create_input(identifier_ect)\n print('The energy to electricity converter profiled is a electric generator.\\n')\n electricGenerators[identifier_ect].print()\n for electricGenerator in electricGenerators.items():\n print(electricGenerator)\n break\n elif operand_ect_n == '2.1':\n heatExchangers[identifier_ect] = Energy2HeatConverter.create_input(identifier_ect)\n print('You have profiled a new heat exchanger.\\n')\n heatExchangers[identifier_ect].print()\n break\n elif operand_ect_n == '3.1':\n combinedHeatPowers[identifier_ect] = Energy2ElecHeatConverter.create_input(identifier_ect)\n print('You have profiled a new combine heat and power.\\n')\n combinedHeatPowers[identifier_ect].print()\n break\n elif operand_ect_n == 'exit':\n operand_ect_i = 0\n break\n else:\n print('\\n! Error, following the instruction below: \\n')\n elif operand_ect_m == 2:\n print('The ID you input is already used.')\n energyConverTechs = {**electricGenerators, **heatExchangers, **combinedHeatPowers}\n energyConverTechs[identifier_ect].print()\n print('---------- Please input another identifier ---------- \\n')\n elif operand_ect_m == 0:\n print('---------- Return to Main Menu ---------- \\n')\n break\n operand_ect_i += 1\n if operand_ect_m != 0:\n print('---------- Too many operations. Return to Main Menu ---------- \\n')\n","sub_path":"energy_conver_tech.py","file_name":"energy_conver_tech.py","file_ext":"py","file_size_in_byte":8696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"86152352","text":"# Time: O(m * k), where m is string length, n is dictionary size, k is word length\n# Space: O(n)\n\n# 30\n# You are given a string, s, and a list of words, words,\n# that are all of the same length. Find all starting indices of substring(s)\n# in s that is a concatenation of each word in words exactly once and\n# without any intervening characters.\n#\n# For example, given:\n# s: \"barfoothefoobarman\"\n# words: [\"foo\", \"bar\"]\n#\n# You should return the indices: [0,9].\n# (order does not matter).\n\n\nimport collections\n\n# Sliding window solution:\n# since the length of all words is same and fixed, treat each word as a UNIT\n# we can convert the problem like: s = 'acxbccacy', find substrings containing ['a', 'b', 'c', 'c']\n# obvious we can use sliding window to solve it in O(len(s)).\n# Now each UNIT is a word, we should try starting from each char in a word in O(len(word[0])).\nclass Solution(object):\n def findSubstring(self, s, words): # USE THIS: much better time complexity\n \"\"\"\n :type s: str\n :type words: List[str]\n :rtype: List[int]\n \"\"\"\n ans, cnt, wlen = [], len(words), len(words[0])\n if len(s) < cnt * wlen:\n return ans\n\n expect = collections.Counter(words)\n for i in range(wlen): # Time: O(k), test i in 0,1,2,..k-1 where k is word length\n # for each i, do sliding window with word treating as a UNIT\n start, curCnt = i, 0\n actual = collections.defaultdict(int)\n for cur in range(i, len(s)-wlen+1, wlen): # Time: O(m / k)\n sub = s[cur : cur+wlen] # Time: O(k)\n if sub not in expect:\n start, curCnt = cur + wlen, 0\n actual = collections.defaultdict(int)\n else:\n actual[sub] += 1\n curCnt += 1\n while actual[sub] > expect[sub]:\n actual[s[start : start+wlen]] -= 1\n curCnt -= 1\n start += wlen\n if curCnt == cnt:\n ans.append(start)\n return ans\n\n\n# Time: O(m * n * k), where m is string length, n is dictionary size, k is word length\n# Space: O(n * k)\nclass Solution2(object):\n def findSubstring(self, s, words):\n \"\"\"\n :type s: str\n :type words: List[str]\n :rtype: List[int]\n \"\"\"\n ans = []\n cnt, wlen = len(words), len(words[0])\n expect = collections.Counter(words)\n for start in range(len(s) - cnt * wlen + 1): # O(m)\n actual = collections.defaultdict(int)\n for i in range(cnt): # O(n)\n sub = s[start+i*wlen : start+(i+1)*wlen] # O(k)\n if sub not in expect or actual[sub] + 1 > expect[sub]:\n break\n actual[sub] += 1\n else: # finish iteration of cnt, no break\n ans.append(start)\n return ans\n\n\n # similar to the above, just not that Pythonic\n def findSubstring2(self, s, words):\n ans, m, n, k = [], len(s), len(words), len(words[0])\n if m < n*k:\n return ans\n\n expect = collections.defaultdict(int)\n for i in words:\n expect[i] += 1 # Space: O(n * k)\n\n for i in xrange(m+1-k*n): # Time: O(m)\n actual, j = collections.defaultdict(int), 0\n while j < n: # Time: O(n)\n word = s[i+j*k : i+j*k+k] # Time: O(k)\n if word not in expect or actual[word] + 1 > expect[word]:\n break\n actual[word] += 1\n j += 1\n if j == n:\n ans.append(i)\n\n return ans\n\nif __name__ == \"__main__\":\n print(Solution().findSubstring(\"barfoothefoobarman\", [\"foo\", \"bar\"])) # [0, 9]\n","sub_path":"Python/substring-with-concatenation-of-all-words.py","file_name":"substring-with-concatenation-of-all-words.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"476857557","text":"import random\n\ncardfaces=[]\ntrap=[\"naval mine\",\"smokescreen\",\"sabotage\"]\nperk=[\"flak armor\",\"far sight\",\"aluminium hull\"]\noffense=[\"FMJ upgrade\",\"Rifling\",\"Advanced Rifling\",\"EMP upgrade\"]\ndefensive=[\"Reinforced hull\",\"Sonar\"]\nhelp=[\"Backup\",\"Extra fuel\",\"Extra fuel 2\",\"Rally\",\"Adrinaline Rush\",]\nspecial=[\"Repair\",\"Hack intel\",\"Jack sparrow\"]\ndeck=[]\nfor i in range(4):\n cardfaces.append(help[i]+\" (help card)\")\n\nfor h in range(4):\n cardfaces.append(offense[h]+\" (offense card)\")\n\nfor q in range(2):\n cardfaces.append(defensive[q]+\" (defensive card)\")\n\nfor a in range(3):\n cardfaces.append(special[a]+\" (special card)\")\n\nfor j in range(3):\n cardfaces.append(perk[j]+ \" (Special perk card)\")\n\nfor k in range(3):\n if trap==\"naval mine\":\n cardfaces.append(trap[k]+\" offensive trap card\")\n elif trap==\"smokescreen\" or \"sabotage\":\n cardfaces.append(trap[k]+\" (Defensive trap card)\")\n else:\n cardfaces.append(trap[k]+\" (trap card)\")\n\n\n\n#print(cardfaces)\n\n\nfor l in range(19):\n card =(cardfaces[l])\n deck.append(card)\n\n\nrandom.shuffle(deck)\n\nfor m in range(19):\n print(deck[m])\n","sub_path":"cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"181360343","text":"def Crypt(phrase, key, alpha):\n res = \"\"\n for i in range(len(phrase)) :\n for j in range (len(alpha)):\n if (phrase[i] == alpha[j]):\n res += key[j]\n #endif\n #endfor\n #endfor\n return res\n#enddef\n\nkey = \"HYLUJPVREAKBNDOFSQZCWMGITX\"\nkeyP = [str(c) for c in str(key)]\n\nalpha = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nalphaP = [str(c) for c in str(alpha)]\n\nphrase = input(\"Saisir une phrase : \")\n\nprint(Crypt(phrase.upper(),keyP, alphaP))","sub_path":"Cnam-Algo/Algo Simple/10.3.py","file_name":"10.3.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"167502466","text":"from Descriptor.Atoms import *\r\n\r\n# (cal/mol-1, 300K)\r\n__Amino_acid_conformational_entropy = {\"LYS\": -189, \"ARG\": -188, \"GLN\": -173, \"MET\": -146, \"GLU\": -146,\r\n \"ILE\": -76, \"LEU\": -71, \"ASN\": -103, \"THR\": -108, \"VAL\": -43,\r\n \"TYR\": -113, \"SER\": -111, \"HIS\": -95, \"ASP\": -78, \"CYS\": -85,\r\n \"TRP\": -99, \"PHE\": -62, \"ALA\": -0, \"PRO\": -6, \"GLY\": 0}\r\n\r\n# read protein atoms and create class ResidueAtom for each atom\r\ndef get_pdb_atoms(pdbfile):\r\n atoms = []\r\n with open(pdbfile, 'r') as pdb:\r\n lines = tuple(atom.strip() for atom in pdb.readlines()[0: -2])\r\n for atom in lines:\r\n identifier = atom[0: 6].strip()\r\n if identifier == 'ATOM':\r\n atom_type = atom[12: 16].strip()\r\n residue = atom[17: 20].strip()\r\n index = atom[22: 26].strip()\r\n atoms.append(ResidueAtom(atom_type=atom_type,\r\n coordinate=tuple(map(float, (atom[30: 38].strip(),\r\n atom[38: 46].strip(),\r\n atom[46: 54].strip()))),\r\n index=index, residue=residue))\r\n return tuple(atoms)\r\n\r\n# read ligand atoms and create class LigandAtom for each atom\r\ndef get_sdf_atoms(sdffile, single=True):\r\n atom_types = []\r\n atoms = []\r\n if single:\r\n with open(sdffile, 'r') as sdf:\r\n lines = tuple(sdf.readlines())\r\n else:\r\n lines = sdffile\r\n split_line = lines[3].strip().split()\r\n natoms = int(split_line[0])\r\n nbonds = int(split_line[1])\r\n if natoms > len(lines):\r\n count = len(split_line[0])\r\n while natoms > len(lines):\r\n natoms = int(split_line[0][:count])\r\n count -= 1\r\n nbonds = int(split_line[0][count + 1:])\r\n atom_lines = lines[4: 4 + natoms]\r\n property_lines = lines[5 + natoms + nbonds:]\r\n\r\n for i, line in enumerate(property_lines):\r\n index = i\r\n while line == \"> \\n\":\r\n next_line = property_lines[index + 1]\r\n if next_line == \"\\n\":\r\n break\r\n atom_types.extend(next_line.strip().split())\r\n index += 1\r\n\r\n for i, atom in enumerate(atom_lines):\r\n split_atom = atom.strip().split()\r\n atom_type = atom_types[i]\r\n coordinate = tuple(map(float, split_atom[0: 3]))\r\n atoms.append(LigandAtom(atom_type=atom_type, coordinate=coordinate))\r\n\r\n return tuple(atoms)\r\n\r\n\"\"\" function of lost water entropy and side chain conformational\r\nentropy calculation\"\"\"\r\ndef cal_lw_fs(ligand, protein):\r\n ligand_protein_contacts = {}\r\n total_flexible_entropy = 0\r\n total_protein_water_loss = 0\r\n total_ligand_water_loss = 0\r\n deduct_water = 1\r\n protein_contacted_water = {}\r\n contacted_atom = set()\r\n # obtain contacted atoms in protein \r\n # contacted atom distance is set to 3.5\r\n for ligand_atom in ligand:\r\n ligand_atom_coordinate = ligand_atom.get_coordinate()\r\n ligand_protein_contacts[ligand_atom] = set()\r\n for residue_atom in protein:\r\n residue_atom_coordinate = residue_atom.get_coordinate()\r\n dist = ligand_atom_coordinate.cal_dist(residue_atom_coordinate)\r\n if dist > 3.5:\r\n pass\r\n else:\r\n ligand_protein_contacts[ligand_atom].add(residue_atom)\r\n\r\n for latm in ligand_protein_contacts:\r\n\r\n # calculate ligand water loss\r\n if len(ligand_protein_contacts[latm]) >= latm.get_water_count():\r\n ligand_water_loss = latm.get_water_count()\r\n total_ligand_water_loss += ligand_water_loss\r\n else:\r\n total_ligand_water_loss += len(ligand_protein_contacts[latm])\r\n\r\n for atm in ligand_protein_contacts[latm]:\r\n # calculate flexibility entropy\r\n flexible_entropy = 0\r\n index, residue = atm.get_residue()\r\n if index not in contacted_atom:\r\n contacted_atom.add(index)\r\n flexible_entropy = __Amino_acid_conformational_entropy[residue]\r\n total_flexible_entropy += flexible_entropy\r\n\r\n # calculate protein water loss\r\n if atm not in protein_contacted_water:\r\n protein_contacted_water[atm] = atm.get_water_count()\r\n original_water = protein_contacted_water[atm]\r\n if original_water == 0:\r\n pass\r\n else:\r\n left_water = original_water - deduct_water\r\n total_protein_water_loss += deduct_water\r\n protein_contacted_water.update({atm: left_water})\r\n total_water_loss = total_ligand_water_loss + total_protein_water_loss\r\n water_entropy = total_water_loss * 10 * 300 * 0.001\r\n return water_entropy, total_flexible_entropy\r\n","sub_path":"SampleCode/Descriptor/calEntropy.py","file_name":"calEntropy.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"326819207","text":"import os\n#\nrootdir = \"F:\\\\work\\\\workspace\\\\workspace1.1\\\\file\\\\IT\\\\项目级\\\\TMS-周迭代\\\\3.0.31\\\\\"\nquery = \"SETTLE_DT\"\n\ndef walk_all_files(rootdir ,query):\n print(os.walk(rootdir))\n for parent,dirnames,filenames in os.walk(rootdir):\n # for dirname in dirnames:\n # print(\"parent is :\"+parent)\n # print(\"dirname is:\"+dirname)\n # pass\n ##print(filenames)\n for filename in filenames:\n #print(\"fileName:\"+filename)\n is_file_contain_word(os.path.join(parent,filename),query)\n\ndef is_file_contain_word(file_,query_word):\n #print(\"search file:\" + file_)\n file_suffix = os.path.splitext(file_)[1]\n if file_suffix != '.sql':\n #print(\"file_suffix:\"+file_suffix)\n return\n\n if query_word in open(file_,encoding='UTF-8').read():\n print (file_)\n filecontext = open(file_,encoding='UTF-8').read()\n lines = filecontext.split('\\n')\n for line in lines:\n if query_word in line:\n print(line)\n\nwalk_all_files(rootdir,query.upper())\n\nprint(\"done\")","sub_path":"SearchFile2.py","file_name":"SearchFile2.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"53980861","text":"# class type Player that holds multiple functions\nclass Player(object):\n \"\"\" Adds players and gives access to view the team \"\"\"\n# function that contains variables that is unique to the input, meaning each input has its own name, age, and amount of goals\n def __init__(self, name, age, goals):\n self.name = name\n self.age = age\n self.goals = goals\n# function that displays the player's name, age, and amount of goals\n def getStats(self):\n summary = \"Player: \" + self.name + \"\\n\"\n summary = summary + \"Age: \" + str(self.age) + \"\\n\"\n summary = summary + \"Goals: \" + str(self.goals) + \"\\n\"\n return summary\n\n\n\n# execution starts here\n\n# empty list that is used to add the players beng inputed \nmyPlayers = []\n\n\n\nkeepRunning = True\n# while the code runs, the prompts below appear \nwhile keepRunning:\n print(\"What would you like to do? Enter your choice and press 'enter'.\")\n print(\"(1) Add players.\")\n print(\"(2) View team.\")\n print(\"(0) Exit.\")\n # the user's input is labeled as \"response\" in order to use their responses for other functions\n response = input()\n# user inputs \"0\", the code ends\n if response == \"0\":\n keepRunning = False\n# if user inputs \"1\", then they are asked to input the new player's name, age, and score. Then the player's information is added to the empty list containing the team\n elif response == \"1\":\n print(\"what is the player's name?\")\n playerName = input()\n print(\"What is the player's age?\")\n playerAge = int(input())\n print(\"How many goals have they scored?\")\n playerGoals = int(input())\n newPlayer = Player(playerName, playerAge, playerGoals)\n myPlayers.append(newPlayer)\n# if the user inputs \"2\", the list is accessed and a summary of each player is displayed\n elif response == \"2\":\n for p in myPlayers:\n print(p.getStats())\n \n\n \n \n \n ","sub_path":"ps6/jose.py","file_name":"jose.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"21902137","text":"#!/usr/bin/env python3\n'''\nusage: hzip.py [-h] [-o OUTFILE] [-s SUFFIX] [-f] [-l LEVEL] [-a] infile\n\nCompress files using the hzlib module.\n\npositional arguments:\n infile\n\noptional arguments:\n -h, --help show this help message and exit\n -o OUTFILE, --outfile OUTFILE\n Name of output file\n -s SUFFIX, --suffix SUFFIX\n Suffix to use instead of .hz\n -f, --force Force compression and overwrite output file if it\n exists\n -l LEVEL, --level LEVEL\n Maximum levels of compression\n -a, --alwayscompress Compress to max level even if it would make output\n larger\n\nFormat of saved file is the following:\nThe string of bytes MAGIC from hzlib, followed by one byte containing the\ncompression level of the data, followed by the data.\n\nCompression level 0 is the raw input. The data used in compression level\nn+1 is the result of compressing the result provided by compression\nlevel n. Note that each level includes its codebook in its data, but does\nnot include the magic number.\n'''\nfrom hzlib import *\n\nDEFAULT_EXTENSION = '.hz'\nMAX_COMPRESSION_LEVEL = 255\nMIN_COMPRESSION_LEVEL = 0\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(\n description='Compress files using the hzlib module.')\n## parser.add_argument(\"infile\")\n parser.add_argument(\"-o\", \"--outfile\", type=str, default=None,\n help='Name of output file')\n parser.add_argument(\"-s\", \"--suffix\", type=str, default=DEFAULT_EXTENSION,\n help=('Suffix to use instead of ' +\n DEFAULT_EXTENSION))\n parser.add_argument(\"-f\", \"--force\", action='store_true',\n help=('Force compression and overwrite output ' +\n 'file if it exists'))\n parser.add_argument(\"-l\", \"--level\", type=int,\n default=MAX_COMPRESSION_LEVEL,\n help='Maximum levels of compression')\n parser.add_argument(\"-a\", \"--alwayscompress\", action='store_true',\n help=('Compress to max level even if it would ' +\n 'make output larger'))\n args = parser.parse_args()\n\n \n args.outfile = \"ziptest\"\n args.infile = \"unziptest\"\n args.level = 5\n args.alwayscompress = True\n\n\n # open files\n level = args.level\n with open(args.outfile + args.suffix, \"wb\") as outfile:\n with open(args.infile, \"rb\") as data_file:\n data = data_file.read()\n\n # code it once for each level\n for level in range(1, args.level+1):\n codebook = build_canonical_codebook(\n build_codebook(make_huffman_tree(\n symbol_count(data))))\n code = bytearray((byte for byte in join(pad(compress(\n data, codebook)), codebook)))\n\n # If you do not want to overwrite and\n # the code contains more bytes\n # use last data.\n if not args.alwayscompress and len(data) <= len(code):\n level -= 1\n break\n data = code\n\n # If compression is 0 return it as is\n if level == 0:\n start = b\"\"\n\n # Otherwise add the starting chars to it\n else:\n start = MAGIC + bytearray([level])\n if type(data) != bytearray:\n outfile.write(data)\n else:\n start = bytearray(start)\n start.extend(data)\n outfile.write(start)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ex09/testings/hzip.py","file_name":"hzip.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"578750775","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 18 13:07:35 2020\n\n@author: hemma\n\"\"\"\n\nimport random \nfrom funcs import *\n\nwith open('names/first_names.txt', 'r') as first, \\\n open('names/last_names.txt', 'r') as last, \\\n open('names/movies.txt', 'r') as movies:\n # first = first.read().splitlines()\n # last = last.read().splitlines()\n f = first.read().splitlines()\n l = last.read().splitlines()\n m = movies.read().splitlines()\n for i in range(2):\n \n a = random.randint(0, 20)\n if a < 5:\n movie = m[0]\n elif a < 8:\n movie = m[1]\n elif a < 9:\n movie = m[2]\n elif a < 11:\n movie = m[3]\n elif a < 12:\n movie = m[4]\n elif a < 15:\n movie = m[5]\n else:\n movie = m[6]\n \n\n text = \"\"\n text = text + \"Name \" + random.choice(f) + \" \" + random.choice(l) + \"\\n\"\n text = text + \"Customer number \" + \"\".join([str(r) for r in random.sample(range(10), 10)]) + \"\\n\"\n text = text + \"Movie title \" + movie + \"\\n\"\n text = text + \"Adult \" + str(random.randint(0, 5)) + \"\\n\"\n \n if movie==m[0]:\n text = text + \"Child \" + str(random.randint(0, 5)) + \"\\n\"\n else:\n text = text + \"Child \" + str(0) + \"\\n\"\n \n text = text + \"Concession \" + str(random.randint(0, 3)) + \"\\n\"\n \n encrypt\n text = encrypt(random.randint(1, 25), text)\n \n \n with open(\"sample_data/file_\" + str(i) + \".txt\", \"w\") as file:\n file.write(text)\n \n print(\"\")\n \n \n \n \n \n \n \n \n \n \n ","sub_path":"assignment_2020_V2/supplemetary_files/Intro_to_Python_Assignment_2020/sample_data_generator.py","file_name":"sample_data_generator.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"613444904","text":"import openpyxl\nimport Functions\n\nEntry = openpyxl.load_workbook('BookForProgramDirector.xlsx')\nData = openpyxl.load_workbook('BookForPython.xlsx')\n\nDUTY_SCHEDULE_SHEET = Entry[\"Duty Schedule\"]\nFULL_DAY_SHEET = Entry[\"Full Day\"]\nHALF_DAY_SHEET = Entry[\"Half Day\"]\nSESSION_DATES_SHEET = Entry[\"Session Dates\"]\n\nB1_START = (SESSION_DATES_SHEET.cell(row=4, column=2)).value\nC_START = (SESSION_DATES_SHEET.cell(row=6, column=2)).value\n\nDUTY_SCHEDULE_NAME_COLUMN = 2\nDAY_OFF_DATES_COLUMN = 2\n\nEND_DATE = B1_START\n\n# Print A session day offs to full day and half day sheets\n\ndate_row_acc = 3\nwhile (FULL_DAY_SHEET.cell(row=date_row_acc, column=DAY_OFF_DATES_COLUMN)).value != END_DATE:\n\n date_column_acc = date_row_acc + 1\n\n name_row_acc = 3\n half_day_off_slot_acc = 3\n full_day_off_slot_acc = 3\n while (DUTY_SCHEDULE_SHEET.cell(row=name_row_acc, column=DUTY_SCHEDULE_NAME_COLUMN)).value:\n\n name = (DUTY_SCHEDULE_SHEET.cell(row=name_row_acc, column=DUTY_SCHEDULE_NAME_COLUMN)).value\n\n if (DUTY_SCHEDULE_SHEET.cell(row=name_row_acc, column=date_column_acc)).value == \"F\":\n FULL_DAY_SHEET.cell(row=date_row_acc, column=full_day_off_slot_acc, value=name)\n full_day_off_slot_acc += 1\n\n elif (DUTY_SCHEDULE_SHEET.cell(row=name_row_acc, column=date_column_acc)).value in [\"H\", \"T/H\"]:\n HALF_DAY_SHEET.cell(row=date_row_acc, column=half_day_off_slot_acc, value=name)\n half_day_off_slot_acc += 1\n\n name_row_acc += 1\n\n date_row_acc += 1\n\nEntry.save('BookForProgramDirector.xlsx')\n","sub_path":"FillDayOffSheets.py","file_name":"FillDayOffSheets.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"156378429","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\r\n\r\nfrom saenopy import FiniteBodyForces\r\n \r\n# initialize the object\r\nM = FiniteBodyForces()\r\n\r\n\r\nfrom saenopy.materials import SemiAffineFiberMaterial\r\n\r\n# provide a material model\r\nmaterial = SemiAffineFiberMaterial(1645, 0.0008, 0.0075, 0.033)\r\nM.setMaterialModel(material)\r\n\r\n\r\nimport numpy as np\r\n\r\n# define the coordinates of the nodes of the mesh\r\n# the array has to have the shape N_v x 3\r\nR = np.array([[0., 0., 0.], # 0\r\n [0., 1., 0.], # 1\r\n [1., 1., 0.], # 2\r\n [1., 0., 0.], # 3\r\n [0., 0., 1.], # 4\r\n [1., 0., 1.], # 5\r\n [1., 1., 1.], # 6\r\n [0., 1., 1.]]) # 7\r\n\r\n# define the tetrahedra of the mesh\r\n# the array has to have the shape N_t x 4\r\n# every entry is an index referencing a verces in R (indices start with 0)\r\nT = np.array([[0, 1, 7, 2],\r\n [0, 2, 5, 3],\r\n [0, 4, 5, 7],\r\n [2, 5, 6, 7],\r\n [0, 7, 5, 2]])\r\n\r\n\r\n# provide the node data\r\nM.setNodes(R)\r\n# and the tetrahedron data\r\nM.setTetrahedra(T)\r\n\r\n\r\n# the displacements of the nodes which shall be fitted\r\n# during the solving\r\nU = np.array([[0 , 0, 0], # 0\r\n [0 , 0, 0], # 1\r\n [0.01, 0, 0], # 2\r\n [0.01, 0, 0], # 3\r\n [0 , 0, 0], # 4\r\n [0.01, 0, 0], # 5\r\n [0.01, 0, 0], # 6\r\n [0 , 0, 0]]) # 7\r\n\r\n# hand the displacements over to the class instance\r\nM.setTargetDisplacements(U)\r\n\r\n\r\n# relax the mesh and move the \"varible\" nodes\r\nM.regularize(stepper=0.1, alpha=0.001)\r\n\r\n\r\nresults = M.computeForceMoments(rmax=100e-6)\r\n\r\n# store the forces of the nodes\r\nM.storeF(\"F.dat\")\r\n# store the positions and the displacements\r\nM.storeRAndU(\"R.dat\", \"U.dat\")\r\n# store the center of each tetrahedron and a combined list with energies and volumina of the tetrahedrons\r\nM.storeEandV(\"RR.dat\", \"EV.dat\")\r\n\r\nM.U\r\n\r\nM.viewMesh(50, 1)\r\n\r\n","sub_path":"docs/regularization.py","file_name":"regularization.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"72812207","text":"from geopy.geocoders import Bing\nimport ssl\nimport numpy as np\nimport pandas as pd\nimport os\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nimport urllib\nfrom flask import Flask, flash, redirect, render_template, request, session, abort, make_response, url_for\n\napp = Flask(__name__)\n\n# appened static content with version number to overcome caching\n\n\n@app.context_processor\ndef override_url_for():\n return dict(url_for=dated_url_for)\n\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n\n\n# need this to avoid ssl certificate error\nssl._create_default_https_context = ssl._create_unverified_context\n\n# dont need to update this because gov.uk returns whatever the most recently uploaded file is\nurl = 'https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/762070/Monthly_museums_and_galleries_October_2018.csv'\n# url = 'Monthly_museums_and_galleries_October_2018.csv'\ndata = pd.read_csv(url, encoding='latin1')\n\n# data checks\ndata.columns\ndata.museum.unique()\ndata.year.unique()\ndata.month.unique()\ndata.visits.unique()\ndata.dtypes\n\n\n\n# data.to_csv('data.csv', index=False)\n# data = pd.read_csv('data.csv')\n\n# convert column names to lower case\nmycols = data.columns.tolist()\nmycols = [x.lower().strip() for x in mycols]\ndata.columns = mycols\n\ndata['day'] = 1\ndata['date'] = pd.to_datetime(data[['month', 'year', 'day']])\n\n\n# remove any records for months after the current one\n# set current date\nmyfilename = urllib.request.urlopen(urllib.request.Request(url, method='HEAD')).info().get_filename()\nfilename_date = myfilename[46:-4].lower()\n\nif filename_date != '':\n current_month_string = filename_date[:-5]\n current_month = datetime.datetime.strptime(current_month_string, '%B').month\n current_year = int(filename_date[-4:])\n current_date = pd.datetime(current_year, current_month, 1)\n\n nextMonth = current_date + relativedelta(months=1)\n nextMonthLastYear = nextMonth - relativedelta(years=1)\n\n new_month = current_date.strftime(\"%b %Y\")\n old_month = nextMonthLastYear.strftime(\"%b %Y\")\n\n data = data.loc[data.date <= current_date, ]\nelse:\n current_date = pd.datetime(2019, 3, 1)\n nextMonth = current_date + relativedelta(months=1)\n nextMonthLastYear = nextMonth - relativedelta(years=1)\n new_month = current_date.strftime(\"%b %Y\")\n old_month = nextMonthLastYear.strftime(\"%b %Y\")\n\n# check for NaNs\ndata.isnull().any()\n\n# null_mus = data.loc[data.visits.isnull(), ].index.copy()\n# data.loc[null_mus, ]\n\n# check for duplicates\n# as of october 2018, there is an exact duplicate of ROYAL MUSEUMS GREENWICH Sep 2018, and dupes with missing visits value for NATURAL HISTORY MUSEUM (TRING) and TATE BRITAIN Jan 2018\ndata.shape\ndata.loc[data.duplicated(['museum', 'year', 'month'])]\n\n# remove duplicates\ndata = data.loc[~(data.duplicated(['museum', 'year', 'month']))]\n\n# delete previous records with missing values\n# data['visits'] = data['visits'].replace(np.nan, 0)\n\n# check values in visits column\ndata.visits.unique().size\ndata.visits.isnull().sum()\ndata[data['visits'] > 0].head()\n\n# clean up string numbers in visits column and convert to int\n# replace - with 0 and convert visit number strings to numeric - should fix this in CSV\ndata.loc[data.visits.isin(['-']), 'visits'] = '0'\ndata.loc[data.visits.isin([' - ']), 'visits'] = '0'\n# data['visits'] = data['visits'].str.replace(',', '')\ndata['visits'] = data['visits'].fillna(0)\ndata.dtypes\n\ndata['visits'] = data['visits'].astype(int)\ndata.loc[data['visits'] == 0, 'visits'] = np.nan\n\ncurrent_museums = data.loc[(data['date'] == current_date) & ~(pd.isna(data['visits'])), 'museum'].copy()\nall_museums = data.loc[:, 'museum'].copy()\nnolongersponsored = all_museums[~all_museums.isin(current_museums)].unique()\n\n#locale.setlocale(locale.LC_NUMERIC, '')\n#data['visits'] = pd.DataFrame({'temp': data.visits}).applymap(atof)\n\n# museums_list = data.museum.unique().tolist()\n# years_list = data.year.unique().tolist()\nmuseums_list = data.museum.unique()\nyears_list = data.year.unique()\n\nraw_data = data.copy()\n\n# pivot data to create column for each series (museum)\ndata = data.pivot(index='date', columns='museum', values='visits')\ndata = data.reset_index()\n\n# for each museum create moving average column\nfor col in museums_list:\n data[col + '_MA'] = data.rolling(window=12)[col].mean()\n\ndata['date'] = data['date'].dt.strftime('%Y-%m-%d')\ndata = data.transpose().reset_index()\ndata = data.to_dict(orient='split')['data']\n\nseasonal_data = {}\nfor mus in museums_list:\n df = raw_data.copy()\n df['date'] = df['date'].dt.strftime('%Y-%m-%d')\n # data = data.transpose().reset_index()\n df = df.loc[df['museum'] == mus]\n df['day'] = 1\n df['backupyear'] = df['year'].copy()\n df['year'] = 2018\n df['month'] = pd.to_datetime(df[['month', 'year', 'day']])\n df['year'] = df['backupyear'].copy()\n df['year'] = df['year'].apply(str)\n df['month'] = df['month'].dt.strftime('%Y-%m-%d')\n df = df.pivot(index='month',columns='year',values='visits')\n df = df.reset_index()\n df = df.transpose().reset_index()\n df = df.to_dict(orient='split')['data']\n seasonal_data[mus] = df.copy()\n\n\n# [x for x in museums_list if 'TOTAL' not in x]\nmuseums_list_individual = {\n 'BRITISH MUSEUM': ['WC1B 3DG', 51.518970, -0.126500],\n 'GEFFRYE MUSEUM': ['E2 8EA', 51.531556, -0.076271],\n 'HORNIMAN MUSEUM (Excluding visits to the Garden)': ['SE23 3PQ', 51.441131, -0.060762],\n 'IMPERIAL WAR MUSEUM LONDON': ['SE1 6HZ', 51.496008, -0.108353],\n 'HMS BELFAST (IMPERIAL WAR MUSEUM)': ['SE1 2JH', 51.506048, -0.081481],\n 'CHURCHILL WAR ROOMS (IMPERIAL WAR MUSEUM)': ['SW1A 2AQ', 51.501764, -0.129108],\n 'IMPERIAL WAR MUSEUM DUXFORD ': ['CB22 4QR', 52.094764, 0.128180],\n 'IMPERIAL WAR MUSEUM NORTH': ['M17 1TZ', 53.469713, -2.298734],\n 'NATIONAL GALLERY': ['WC2N 5DN', 51.509097, -0.127683],\n '(NHM) SOUTH KENSINGTON': ['SW7 5BD', 51.496563, -0.176892],\n '(NHM) TRING': ['HP23 6AP', 51.791524, -0.660652],\n 'ROYAL MUSEUMS GREENWICH ': ['SE10 9NF', 51.481154, -0.003746],\n 'NATIONAL MUSEUMS LIVERPOOL': ['L3 1DG', 53.406165, -2.995119],\n 'NATIONAL COAL MINING MUSEUM FOR ENGLAND': ['WF4 4RH', 53.643479, -1.619416],\n 'SCIENCE MUSEUM GROUP SOUTH KENSINGTON ': ['SW7 2DD', 51.497295, -0.176503],\n 'SCIENCE MUSEUM GROUP NATIONAL MEDIA MUSEUM': ['BD1 1NQ', 53.790557, -1.756460],\n 'SCIENCE MUSEUM GROUP NATIONAL RAILWAY MUSEUM': ['YO26 4XJ', 53.960767, -1.096551],\n 'SCIENCE MUSEUM GROUP LOCOMOTION AT SHILDON': ['DL4 2RE'],\n 'SCIENCE MUSEUM GROUP MUSEUM OF SCIENCE AND INDUSTRY, MANCHESTER': ['M3 4FP'],\n 'SCIENCE MUSEUM GROUP SWINDON (WROUGHTON)': ['SN4 9LT'],\n 'NATIONAL PORTRAIT GALLERY': ['WC2H 0HE'],\n '(RA) LEEDS': ['LS10 1LT'],\n '(RA) FORT NELSON ': ['PO17 6AN'],\n '(RA) WHITE TOWER (BASED AT THE TOWER OF LONDON) ': ['EC3N 4AB'],\n \"SIR JOHN SOANE'S MUSEUM\": ['WC2A 3BP'],\n 'TATE BRITAIN ': ['SW1P 4RG'],\n 'TATE MODERN ': ['SE1 9TG'],\n 'TATE LIVERPOOL': ['L3 4BB'],\n 'TATE ST IVES': ['TR26 1TG'],\n '(V&A) SOUTH KENSINGTON': ['SW7 2RL'],\n '(V&A) MUSEUM OF CHILDHOOD, BETHNAL GREEN': ['E2 9PA'],\n '(V&A) BLYTHE HOUSE': ['W14 0QX'],\n 'WALLACE COLLECTION': ['W1U 3BN'],\n '(T&W) ARBEIA': ['NE33 2BB'],\n '(T&W) DISCOVERY': ['NE1 4JA'],\n '(T&W) GREAT NORTH MUSEUM': ['NE2 4PT'],\n '(T&W) LAING': ['NE1 8AG'],\n '(T&W) WASHINGTON F PIT': ['NE37 1BN'],\n '(T&W) SEGEDUNUM': ['NE28 6HR'],\n '(T&W) SHIPLEY': ['NE8 4JB'],\n '(T&W) SOUTH SHIELDS': ['NE33 2JA'],\n '(T&W) HATTON GALLERY': ['NE1 7RU'],\n '(T&W) STEPHENSON ': ['NE29 8DX'],\n 'MUSEUM OF LONDON': ['EC2Y 5HN'],\n 'MUSEUM IN DOCKLANDS': ['E14 4AL'],\n}\n\n# add lon lat to postcodes\ngeolocator = Bing(\n api_key='As--8aijMhZO5UZPjONPEaePK5nn16TgjznsOZEYbDTEwgsaL3C364fnOIwzEg8N')\n\n# from geopy.geocoders import GoogleV3\n# geolocator = GoogleV3()\n\n# from geopy.geocoders import Nominatim\n# geolocator = Nominatim(user_agent=\"ithinkthiscanbeanything\")\n\n##api_key\n#location = geolocator.geocode(\"BRITISH MUSEUM\")\n#print((location.latitude, location.longitude))\n#geolocator.geocode(\"WC1B 3DG\").latitude\n#geolocator.geocode(\"YO26 4XJ\").latitude\n#geolocator.geocode(\"HORNIMAN MUSEUM (Excluding visits to the Garden)\").longitude\n\n# mus_locs = pd.DataFrame.from_dict(museums_list_individual, orient='index')\n# mus_locs['location'] = mus_locs[0].apply(geolocator.geocode)\n# mus_locs['lon'] = mus_locs['location'].apply(lambda x: x.longitude)\n# mus_locs['lat'] = mus_locs['location'].apply(lambda x: x.latitude)\n#mus_locs.to_csv('mus_locs.csv')\n# mus_locs = pd.read_csv('museum_locations.csv', index_col=0)\n\n\nmus_locs = pd.read_csv('museum_locations.csv')\nmus_locs_json = mus_locs.to_dict('records')\n\n\n# define number format function\ndef format_nums(num):\n if num >= 999000:\n fnum = str(round(num / 1000000, 1)) + 'M'\n elif num >= 1000:\n fnum = str(int(round(num / 1000, 0))) + 'k'\n else:\n fnum = str(int(num))\n return(fnum)\n\n\n# kpi data\ndf = pd.read_csv('museum-dashboard-kpi2.csv')\ndf = df.set_index(['Museum', 'Year'])\ndf = df.stack().reset_index()\ndf.columns = ['Museum', 'Year', 'Indicator', 'Value']\nkpi_list = df.Indicator.unique()\nkpi_museums_list = df.Museum.unique()\n# kpi_list = df.Indicator.unique().tolist()\n# kpi_museums_list = df.Museum.unique().tolist()\n\nkpi_raw = df.copy()\n\nkpi_data = {}\nfor kpi in kpi_list:\n df = kpi_raw.copy()\n df = df.loc[df['Indicator'] == kpi]\n df = df.pivot(index='Year',columns='Museum',values='Value')\n df = df.reset_index()\n df = df.transpose().reset_index()\n df = df.to_dict(orient='split')['data']\n kpi_data[kpi] = df.copy()\n\n\n\n# for j in indicator:\n# j2 = j\n# if j == 'Facilitated and self directed visits by visitors under 18 years old and in formal education':\n# j2 = 'Formal Education'\n# if j == 'Number of instances where visitors under 18 years old participated in on-site activites':\n# j2 = 'On-site activities'\n\n# leaderboard df\n\n# summarise total visits in last 12 months\nmusy = raw_data.copy()\ntoday = max(musy.date)\n# only keep records from previous year\nmusy = musy.loc[(musy['date'] > (today - pd.DateOffset(years=1)))\n & (musy['date'] <= today)]\nlast_year = musy.groupby(['museum']).sum()\nlast_year = last_year.reset_index()\nlast_year = last_year.loc[last_year['visits'] > 0]\nlast_year['visits_format'] = last_year['visits'].apply(format_nums)\n\n# IMPERIAL WAR MUSEUM (TOTAL) is no longer IMPERIAL WAR MUSEUM TOTAL\n# need to keep the museum names consistent otherwise wont be able to match to postcodes\n\nleaderboard_mus = ['BRITISH MUSEUM',\n 'IMPERIAL WAR MUSEUM TOTAL',\n 'NHM TOTAL',\n 'SCIENCE MUSEUM GROUP TOTAL',\n '(RA) TOTAL',\n 'TATE TOTAL',\n '(V&A) TOTAL',\n 'TYNE & WEAR TOTAL',\n 'HORNIMAN MUSEUM',\n 'NATIONAL GALLERY',\n 'NATIONAL PORTRAIT GALLERY']\n# leaderboard_df = last_year.loc[last_year['museum'].isin(leaderboard_mus)]\n# leaderboard_df = leaderboard_df.sort_values(by='visits', ascending=True)\n\nexcl_mus = ['TOTAL VISITOR FIGURES', 'TATE MODERN ', '(NHM) SOUTH KENSINGTON', 'SCIENCE MUSEUM GROUP SOUTH KENSINGTON ',\n '(V&A) SOUTH KENSINGTON', '(RA) WHITE TOWER (BASED AT THE TOWER OF LONDON) ', 'TATE BRITAIN ']\nleaderboard_df = last_year.sort_values(by='visits', ascending=False)\nleaderboard_df = leaderboard_df.loc[~last_year['museum'].isin(excl_mus)]\n# leaderboard_df = leaderboard_df.tail(10)\nleaderboard_df = leaderboard_df.transpose()\nleaderboard_df = leaderboard_df.reset_index()\nleaderboard_json = leaderboard_df.to_dict(orient='split')['data']\n\nprint('starting')\nprint(app.static_folder)\nprint(app.static_url_path)\nprint(app.template_folder)\n\n\n@app.route('/')\ndef index():\n \"\"\"Render home page.\"\"\"\n return 'hi there' # we can render templates as usual\n\n@app.route('/data-tools/museum-visits')\ndef index_with_path():\n \"\"\"Render home page.\"\"\"\n print(app.static_folder)\n print(app.static_url_path)\n print(app.template_folder)\n return render_template('index.html', leaderboard_data=leaderboard_json, map_data=mus_locs_json, ts_data=data, museums_list=museums_list, years_list=years_list, seasonal_data=seasonal_data, kpi_data=kpi_data, kpi_list=kpi_list, kpi_museums_list=kpi_museums_list, old_month=old_month, new_month=new_month, nolongersponsored=nolongersponsored) # we can render templates as usual\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"414085489","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nimport arrow\r\nimport csv\r\nimport calendar\r\nimport math\r\n\r\nwin = tk.Tk()\r\nwin.title(\"Budgeting App\")\r\n\r\nTAX_PERSONAL_ALLOWANCE = 456\r\nTAX_BASE_LIMIT = 1327\r\nTAX_HIGHER_LIMIT = 5770\r\nNATIONAL_INSURANCE_ALLOWANCE = 324\r\nTAX_BASE_RATE = 0.2\r\nTAX_HIGHER_RATE = 0.4\r\nTAX_ADDITIONAL_RATE = 0.45\r\nNATIONAL_INSURANCE_RATE = 0.12\r\nDAYS_BETWEEN_PAY = 14\r\nSAVINGS = 200\r\n\r\nexp = []\r\nnames = []\r\ncosts = []\r\ndates = []\r\n\r\n\r\ndef radCall():\r\n pass\r\n\r\n\r\ndef taxCalc():\r\n\r\n money = int(pay.get())\r\n\r\n if money >= TAX_HIGHER_LIMIT:\r\n addTaxAmt = money - TAX_HIGHER_LIMIT\r\n higTaxAmt = TAX_HIGHER_LIMIT - TAX_BASE_LIMIT\r\n basTaxAmt = TAX_BASE_LIMIT - TAX_PERSONAL_ALLOWANCE\r\n tax = math.floor((addTaxAmt * TAX_ADDITIONAL_RATE) + (higTaxAmt * TAX_HIGHER_RATE) + (basTaxAmt * TAX_BASE_RATE))\r\n elif money >= TAX_BASE_LIMIT:\r\n higTaxAmt = money - TAX_BASE_LIMIT\r\n basTaxAmt = TAX_BASE_LIMIT - TAX_PERSONAL_ALLOWANCE\r\n tax = math.floor((higTaxAmt * TAX_HIGHER_RATE) + (basTaxAmt * TAX_BASE_RATE))\r\n elif money >= TAX_PERSONAL_ALLOWANCE:\r\n tax = math.floor(((money - TAX_PERSONAL_ALLOWANCE) * TAX_BASE_RATE))\r\n else:\r\n pass\r\n\r\n if money >= NATIONAL_INSURANCE_ALLOWANCE:\r\n NI = math.floor((money - NATIONAL_INSURANCE_ALLOWANCE) * NATIONAL_INSURANCE_RATE)\r\n else:\r\n pass\r\n\r\n money -= tax\r\n exp.append(['Tax', str(tax), str(money)])\r\n ttk.Label(win, text=\"Tax: £\" + str(tax) + \", £\" + str(money)).grid(column=0, row=8, columnspan=3)\r\n money -= NI\r\n exp.append(['National Insurance', str(NI), str(money)])\r\n ttk.Label(win, text='National Insurance: £' + str(NI) + \", £\" + str(money)).grid(column=0, row=9, columnspan=3)\r\n\r\n save(money)\r\n\r\n\r\ndef restBudget(m):\r\n\r\n a = arrow.utcnow()\r\n month = a.month\r\n days = 0\r\n rent = 60\r\n\r\n with open('expenses.txt', 'r') as csvfile:\r\n readers = csv.reader(csvfile, delimiter=\",\")\r\n for row1 in readers:\r\n names.append(row1[0])\r\n costs.append(row1[1])\r\n dates.append(row1[2])\r\n\r\n rows = 10\r\n\r\n if (month == '2') and (a.year // 4) == 0:\r\n month = 'February2'\r\n elif month == '2':\r\n month = 'February1'\r\n else:\r\n month = calendar.month_name[month]\r\n print(month)\r\n\r\n with open('months.txt', 'r') as csvfile:\r\n readers = csv.reader(csvfile, delimiter=\",\")\r\n for row in readers:\r\n if row[0] == month:\r\n days = row[1]\r\n else:\r\n pass\r\n\r\n x = 0\r\n days = int(days)\r\n nextpay = int(date.get()) + DAYS_BETWEEN_PAY\r\n\r\n while x != len(names):\r\n if int(date.get()) <= days:\r\n if int(dates[x]) >= int(date.get()) and int(dates[x]) < nextpay:\r\n rows += 1\r\n m -= int(costs[x])\r\n ttk.Label(win, text=names[x] + \": £\" + costs[x] + \", £\" + str(m)).grid(column=0, row=rows, columnspan=3)\r\n else:\r\n pass\r\n else:\r\n daysend = days - int(date.get())\r\n payday = DAYS_BETWEEN_PAY - daysend\r\n print(payday)\r\n if dates[x] < nextpay:\r\n rows += 1\r\n m -= int(costs[x])\r\n ttk.Label(win, text=names[x] + \": £\" + costs[x] + \", £\" + str(m)).grid(column=0, row=rows, columnspan=3)\r\n else:\r\n pass\r\n x += 1\r\n\r\n m -= rent\r\n ttk.Label(win, text=\"Rent: £\" + str(rent) + \", £\" + str(m)).grid(column=0, row=rows + 1, columnspan=3)\r\n ttk.Label(win, text=\"Remaining Money: £\" + str(m)).grid(column=0, row=7, columnspan=3)\r\n\r\n\r\ndef save(money):\r\n\r\n if savings.get() == 1:\r\n money -= SAVINGS\r\n exp.append([\"Savings\", str(SAVINGS), str(money)])\r\n print(exp)\r\n restBudget(money)\r\n ttk.Label(win, text=\"Savings: £\" + str(SAVINGS) + \", £\" + str(money)).grid(column=0, row=10, columnspan=3)\r\n else:\r\n restBudget(money)\r\n\r\n\r\n# adding a label for instructions\r\nttk.Label(win, text=\"Enter Date number of the Pay Day\").grid(column=0, row=0, columnspan=3)\r\n\r\n# adding another entry for the date\r\ndate = tk.StringVar()\r\ndateEntered = ttk.Entry(win, width=12, textvariable=date)\r\ndateEntered.grid(column=0, row=1, columnspan=3)\r\n\r\n# Adding a label for instructions\r\nttk.Label(win, text='Enter the amount to be paid before tax').grid(column=0, row=2, columnspan=3)\r\n\r\n# Adding a Textbox Entry Widget to enter the amount\r\npay = tk.StringVar()\r\nnameEntered = ttk.Entry(win, width=12, textvariable=pay)\r\nnameEntered.grid(column=0, row=3, columnspan=3)\r\n\r\n# Adding a check button for saving\r\nsavings = tk.IntVar()\r\ncheck1 = tk.Checkbutton(win, text='Saving', variable=savings).grid(column=0, row=4, columnspan=3)\r\n\r\n# Adding a button to workout the budget\r\nworking = ttk.Button(win, text=\"Workout the Budget\", command=taxCalc).grid(column=0, row=6, columnspan=3)\r\n\r\nwin.mainloop()\r\n","sub_path":"Budgeter/bugeterv2.py","file_name":"bugeterv2.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"231024146","text":"# The MIT License\n#\n# Copyright 2016-2017 UB Dortmund .\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport babelfish\nimport bibtexparser\nfrom bibtexparser.bibdatabase import BibDatabase\nimport datetime\nimport requests\nimport simplejson as json\nimport uuid\n\ntry:\n import local_secrets as secrets\nexcept ImportError:\n import secrets\n\n\nORCID_PUBTYPES = {\n 'Monograph': 'BOOK',\n 'AudioBook': 'BOOK',\n 'Chapter': 'BOOK_CHAPTER',\n 'ChapterInLegalCommentary': 'BOOK_CHAPTER',\n 'ChapterInMonograph': 'BOOK_CHAPTER',\n 'review': 'BOOK_REVIEW',\n 'Thesis': 'DISSERTATION',\n 'Conference': 'CONFERENCE_PAPER',\n 'Collection': 'EDITED_BOOK',\n 'ArticleJournal': 'JOURNAL_ARTICLE',\n 'SpecialIssue': 'JOURNAL_ISSUE',\n 'InternetDocument': 'ONLINE_RESOURCE',\n 'ArticleNewspaper': 'NEWSPAPER_ARTICLE',\n 'Report': 'REPORT',\n 'ResearchData': 'DATA_SET',\n 'Patent': 'PATENT',\n 'Lecture': 'LECTURE_SPEECH',\n 'Standard': 'STANDARDS_AND_POLICY'\n}\n\nWTF_PUBTYPES = {\n 'OTHER': 'Other',\n 'REPORT': 'Report',\n 'JOURNAL_ARTICLE': 'ArticleJournal',\n 'DATA_SET': 'ResearchData',\n 'DISSERTATION': 'Thesis',\n 'EDITED_BOOK': 'Collection',\n 'BOOK_CHAPTER': 'Chapter'\n}\n\nBIBTEX_PUBTYPES = {\n 'Monograph': 'book',\n 'AudioBook': 'book',\n 'Chapter': 'inbook',\n 'ChapterInLegalCommentary': 'inbook',\n 'ChapterInMonograph': 'inbook',\n 'review': 'misc',\n 'Thesis': 'phdthesis',\n 'Conference': 'proceedings',\n 'Collection': 'book',\n 'ArticleJournal': 'article',\n 'SpecialIssue': 'misc',\n 'InternetDocument': 'misc',\n 'ArticleNewspaper': 'article',\n 'Report': 'unpublished',\n 'ResearchData': 'misc',\n 'Patent': 'misc',\n 'Lecture': 'misc',\n 'Standard': 'misc'\n}\n\n\ndef orcid2mms(orcid_id='', orcid_work_record=None):\n\n mms_json = {}\n\n if orcid_work_record and (orcid_work_record.get('visibility') == 'PUBLIC' or orcid_work_record.get('visibility') == 'LIMITED'):\n mms_json['id'] = str(uuid.uuid4())\n\n orcid_sync = {\n 'orcid_id': orcid_id,\n 'orcid_put_code': '',\n 'orcid_visibility': str(orcid_work_record.get('visibility'))\n }\n mms_json['orcid_sync'] = [orcid_sync]\n for extid in orcid_work_record.get('external-ids').get('external-id'):\n if extid.get('external-id-type') == 'doi':\n # print('\\tadd scopus_id \"%s\"' % extid.get('external-id-value'))\n mms_json['DOI'] = [extid.get('external-id-value')]\n if extid.get('external-id-type') == 'eid':\n # print('\\tadd scopus_id \"%s\"' % extid.get('external-id-value'))\n mms_json['scopus_id'] = extid.get('external-id-value')\n if extid.get('external-id-type') == 'wosuid':\n # print('\\tadd wosuid \"%s\"' % extid.get('external-id-value'))\n mms_json['WOSID'] = extid.get('external-id-value').replace('WOS:', '')\n if extid.get('external-id-type') == 'pmid':\n # print('\\tadd pmid \"%s\"' % extid.get('external-id-value'))\n mms_json['PMID'] = extid.get('external-id-value')\n if extid.get('external-id-type') == 'urn':\n # print('\\tadd pmid \"%s\"' % extid.get('external-id-value'))\n mms_json['uri'] = [extid.get('external-id-value')]\n\n mms_json['title'] = orcid_work_record.get('title').get('title').get('value')\n if orcid_work_record.get('title').get('subtitle'):\n mms_json['subtitle'] = orcid_work_record.get('title').get('subtitle').get('value')\n\n mms_json['pubtype'] = WTF_PUBTYPES.get(orcid_work_record.get('type'))\n if not mms_json['pubtype']:\n WTF_PUBTYPES.get('OTHER')\n\n issued = orcid_work_record.get('publication-date').get('year').get('value')\n if orcid_work_record.get('publication-date').get('month'):\n issued += '-%s' % orcid_work_record.get('publication-date').get('month').get('value')\n if orcid_work_record.get('publication-date').get('day'):\n issued += '-%s' % orcid_work_record.get('publication-date').get('day').get('value')\n mms_json['issued'] = issued\n\n mms_json['editorial_status'] = 'new'\n mms_json['note'] = 'added by ORCID synchronization'\n mms_json['created'] = timestamp()\n mms_json['changed'] = timestamp()\n mms_json['owner'] = ['daten.ub@tu-dortmund.de']\n\n return mms_json\n\n\ndef mms2orcid(affiliation='', mms_records=None):\n orcid_records = []\n\n # logging.info('wtf_records: %s' % wtf_records)\n if mms_records is None:\n mms_records = []\n\n if len(mms_records) > 0:\n for record in mms_records:\n\n orcid_record = {}\n db = BibDatabase()\n db.entries = []\n bibtex_entry = {}\n\n # work type\n orcid_type = ORCID_PUBTYPES.get(record.get('pubtype'))\n if orcid_type is None:\n orcid_type = 'OTHER'\n orcid_record.setdefault('type', orcid_type)\n\n bibtex_type = BIBTEX_PUBTYPES.get(record.get('pubtype'))\n if bibtex_type is None:\n bibtex_type = 'misc'\n bibtex_entry.setdefault('ENTRYTYPE', bibtex_type)\n\n external_ids = {}\n external_id = []\n # ids - record id (source-work-id)\n ext_id = {}\n ext_id.setdefault('external-id-type', 'source-work-id')\n ext_id.setdefault('external-id-value', record.get('id'))\n ext_id.setdefault('external-id-relationship', 'SELF')\n if affiliation and affiliation in secrets.AFFILIATION_URL.keys():\n ext_id.setdefault('external-id-url', '%s%s/%s' % (secrets.AFFILIATION_URL.get(affiliation), record.get('pubtype'), record.get('id')))\n external_id.append(ext_id)\n bibtex_entry.setdefault('ID', record.get('id'))\n\n # ids - ISBN (isbn)\n if record.get('ISBN'):\n for isbn in record.get('ISBN'):\n if isbn:\n ext_id = {}\n ext_id.setdefault('external-id-type', 'isbn')\n ext_id.setdefault('external-id-value', isbn)\n ext_id.setdefault('external-id-relationship', 'SELF')\n external_id.append(ext_id)\n\n # ids - ISSN (issn)\n if record.get('ISSN'):\n for issn in record.get('ISSN'):\n if issn:\n ext_id = {}\n ext_id.setdefault('external-id-type', 'issn')\n ext_id.setdefault('external-id-value', issn)\n ext_id.setdefault('external-id-relationship', 'SELF')\n external_id.append(ext_id)\n\n # ids - ZDB (other-id)\n if record.get('ZDBID'):\n for zdbid in record.get('ZDBID'):\n if zdbid:\n ext_id = {}\n ext_id.setdefault('external-id-type', 'other-id')\n ext_id.setdefault('external-id-value', zdbid)\n ext_id.setdefault('external-id-url', 'http://ld.zdb-services.de/resource/%s' % zdbid)\n ext_id.setdefault('external-id-relationship', 'SELF')\n external_id.append(ext_id)\n\n # ids - PMID (pmc)\n if record.get('PMID'):\n ext_id = {}\n ext_id.setdefault('external-id-type', 'pmid')\n ext_id.setdefault('external-id-value', record.get('PMID'))\n ext_id.setdefault('external-id-url', 'http://www.ncbi.nlm.nih.gov/pubmed/%s' % record.get('PMID'))\n ext_id.setdefault('external-id-relationship', 'SELF')\n external_id.append(ext_id)\n\n # ids - WOS-ID (wosuid)\n if record.get('WOSID'):\n ext_id = {}\n ext_id.setdefault('external-id-type', 'doi')\n ext_id.setdefault('external-id-value', record.get('WOSID'))\n ext_id.setdefault('external-id-url', 'http://ws.isiknowledge.com/cps/openurl/service?url_ver=Z39.88-2004&rft_id=info:ut/%s' % record.get('WOSID'))\n ext_id.setdefault('external-id-relationship', 'SELF')\n external_id.append(ext_id)\n\n # ids - doi\n if record.get('DOI'):\n for doi in record.get('DOI'):\n if doi:\n ext_id = {}\n ext_id.setdefault('external-id-type', 'doi')\n ext_id.setdefault('external-id-value', doi)\n ext_id.setdefault('external-id-url', 'https://doi.org/%s' % doi)\n ext_id.setdefault('external-id-relationship', 'SELF')\n external_id.append(ext_id)\n bibtex_entry.setdefault('doi', record.get('DOI')[0])\n\n if external_id:\n external_ids.setdefault('external-id', external_id)\n\n orcid_record.setdefault('external-ids', external_ids)\n\n # titles\n title = {}\n title.setdefault('title', record.get('title'))\n if record.get('subtitle'):\n title.setdefault('subtitle', record.get('subtitle'))\n orcid_record.setdefault('title', title)\n\n title = record.get('title')\n if record.get('subtitle'):\n title += ': %s' % record.get('subtitle')\n bibtex_entry.setdefault('title', title)\n\n # issued\n if record.get('issued'):\n publication_date = {}\n date_parts = []\n for date_part in str(record.get('issued')).replace('[', '').replace(']', '').split('-'):\n date_parts.append(date_part)\n publication_date.setdefault('year', int(date_parts[0]))\n bibtex_entry.setdefault('year', date_parts[0])\n if len(date_parts) > 1:\n publication_date.setdefault('month', int(date_parts[1]))\n bibtex_entry.setdefault('month', date_parts[1])\n if len(date_parts) > 2:\n publication_date.setdefault('day', int(date_parts[2]))\n bibtex_entry.setdefault('day', date_parts[2])\n orcid_record.setdefault('publication-date', publication_date)\n\n # contributors\n contributors = {}\n contributor = []\n author_str = ''\n for author in record.get('person'):\n if 'aut' in author.get('role'):\n con = {}\n con.setdefault('credit-name', author.get('name'))\n if author.get('orcid'):\n con.setdefault('contributor-orcid', {'uri': 'http://orcid.org/%s' % author.get('orcid')})\n contributor_attributes = {}\n contributor_attributes.setdefault('contributor-role', 'AUTHOR')\n con.setdefault('contributor-attributes', contributor_attributes)\n contributor.append(con)\n if author_str != '':\n author_str += ' and '\n author_str += author.get('name')\n contributors.setdefault('contributor', contributor)\n orcid_record.setdefault('contributors', contributors)\n\n bibtex_entry.setdefault('author', author_str)\n\n # language\n if record.get('language') and record.get('language')[0] and record.get('language')[0] != 'None':\n lang_code = ''\n try:\n lang_code = str(babelfish.Language.fromalpha3b(record.get('language')[0]))\n except babelfish.exceptions.LanguageReverseError:\n pass\n if lang_code:\n orcid_record.setdefault('language-code', lang_code)\n\n # is_part_of\n hosts = []\n if record.get('is_part_of'):\n hosts = record.get('is_part_of')\n\n for host in hosts:\n if host.get('is_part_of') != '':\n try:\n response = requests.get('%s/%s/%s' % (secrets.MMS_API, 'work',\n host.get('is_part_of')),\n headers={'Accept': 'application/json'},\n )\n status = response.status_code\n if status == 200:\n myjson = json.loads(response.content.decode('utf8').get('wtf_json'))\n\n title = myjson.get('title')\n if myjson.get('subtitle'):\n title += ': %s' % myjson.get('subtitle')\n orcid_record.setdefault('journal-title', title)\n if bibtex_entry.get('ENTRYTYPE') == 'article':\n bibtex_entry.setdefault('journal', title)\n elif bibtex_entry.get('ENTRYTYPE') == 'inbook':\n bibtex_entry.setdefault('booktitle', title)\n elif bibtex_entry.get('ENTRYTYPE') == 'inproceedings':\n bibtex_entry.setdefault('booktitle', title)\n elif bibtex_entry.get('ENTRYTYPE') == 'incollection':\n bibtex_entry.setdefault('booktitle', title)\n else:\n bibtex_entry.setdefault('series', title)\n except AttributeError as e:\n print('ERROR: ' % e)\n elif host.get('host_title'):\n orcid_record.setdefault('journal-title', host.get('host_title'))\n\n if host.get('volume') != '':\n bibtex_entry.setdefault('volume', host.get('volume'))\n else:\n bibtex_entry.setdefault('volume', '')\n\n if bibtex_entry:\n db.entries.append(bibtex_entry)\n\n citation = {}\n citation.setdefault('citation-type', 'BIBTEX')\n citation.setdefault('citation-value', bibtexparser.dumps(db))\n orcid_record.setdefault('citation', citation)\n\n orcid_records.append(orcid_record)\n\n return orcid_records\n\n\ndef timestamp():\n date_string = str(datetime.datetime.now())[:-3]\n if date_string.endswith('0'):\n date_string = '%s1' % date_string[:-1]\n\n return date_string\n","sub_path":"orcid_mms.py","file_name":"orcid_mms.py","file_ext":"py","file_size_in_byte":15698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"291857365","text":"# Feestlunch\n\nAantalCroissantjes = 17\nAantalStokbroden = 2 \nAantalKorting = 3\n\nPrijsCroissantjes = 0.39\nPrijsStokbroden = 2.78\nPrijsKorting = -0.50\n\nTotaalprijsCroissantjes = AantalCroissantjes * PrijsCroissantjes\nTotaalprijsStokbroden = AantalStokbroden * PrijsStokbroden\nKorting = AantalKorting * PrijsKorting\n\ntotaalprijs = TotaalprijsCroissantjes + TotaalprijsStokbroden + Korting\n\nprint(\"De feestlucnh kost je bij de bakker €\" + str(totaalprijs) + \" voor de 17 croissantjes en de 2 stokboden als de 3 kortingsbonnen nog geldig zijn\")","sub_path":"Feestlunch.py","file_name":"Feestlunch.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"632522357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 18 12:07:50 2018\n\n@author: yumi.zhang\n\"\"\"\n\nimport datetime\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom tabledef import *\n\nengine = create_engine('sqlite:///tutorial.db', echo = True)\n\nSession = sessionmaker(bind = engine)\nsession = Session()\n\n#manually add users information into the database\nuser = User(\"1\", \"1\")\nsession.add(user)\n\nuser2 = User(\"2\", \"2\")\nsession.add(user2)\n\nuser3 = User(\"3\", \"3\")\nsession.add(user3)\n\nsession.commit()\nsession.commit()","sub_path":"dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"447424907","text":"import sys\nimport os\n\nimport csv\nfrom lxml import etree\n\n\ndef apply_translations(csv_path, xml_path, from_lang_key, to_lang_key):\n # Reads csv\n csv_file = open(csv_path, \"r\")\n reader = csv.DictReader(csv_file, delimiter=\";\")\n\n # Verifies both languages are present in read csv\n if from_lang_key not in reader.fieldnames:\n print(f\"'{from_lang_key}' not found in '{os.path.basename(csv_path)}'\")\n return\n\n if to_lang_key not in reader.fieldnames:\n print(f\"'{to_lang_key}' not found in '{os.path.basename(csv_path)}'\")\n return\n\n # Parses xml\n tree = etree.parse(xml_path)\n context_list = tree.findall(\"context\")\n\n for context in context_list:\n if context.find(\"name\").text == \"CardsData\":\n # Saves all message elements for faster lookup\n messages = {}\n for message in context.findall(\"message\"):\n text = \"\\\\n\".join(message.find(\"source\").text.split(\"\\n\")).strip()\n messages[text] = message\n break\n\n for line in reader:\n # Skips empty lines\n if line[from_lang_key] == \"\":\n continue\n\n from_text = line[from_lang_key].strip()\n to_text = line[to_lang_key].strip()\n\n message = messages.get(from_text)\n\n if message is not None:\n tr = message.find(\"translation\")\n tr.text = to_text.replace(\"\\\\n\", \"\\n\")\n # Marks current translation as done\n tr.attrib.pop(\"type\")\n\n tree.write(xml_path, encoding=\"utf-8\", pretty_print=True)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 5:\n print(\"Usage:\")\n print(\"babel-tower.py \")\n print(\" and must be the name of columns contained in \")\n exit()\n\n apply_translations(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n","sub_path":"babel-tower.py","file_name":"babel-tower.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"86285027","text":"from flask import Flask, jsonify, request\nfrom pymongo import MongoClient\nimport os\nimport base64\nfrom userScraper import user_scraping\nfrom evaluate import eval\nimport shutil\nfrom create_model import text_pre_process\n\nclient = MongoClient('localhost', 27017)\ndatabase = client.ReachOut\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home_function():\n return \"This is the home route request on a different one!\"\n\n@app.route('/register', methods = [\"POST\"])\ndef register_function():\n try:\n creds = request.get_json()\n collection = database.register\n collection.insert_one(creds)\n return \"Done\"\n except:\n return \"Failed\"\n\n@app.route('/login', methods = [\"POST\"])\ndef login_function():\n try:\n log_creds = request.get_json()\n search = {\"email\" : log_creds[\"email\"]}\n collection = database.register\n search_creds = collection.find_one(search)\n if search_creds is not None:\n if(log_creds[\"password\"] == search_creds[\"password\"]):\n return search_creds[\"name\"]\n else:\n return \"NF\"\n else:\n return \"NF\"\n except:\n return \"NF\"\n\n@app.route('/analyze', methods = [\"POST\"])\ndef analyze_function():\n try:\n analyze_creds = request.get_json()\n result = dict()\n result[\"audio\"] = \"none\"\n result[\"class\"] = \"none\"\n path = \"data/\" + analyze_creds[\"email\"]\n if(analyze_creds[\"audio\"] != \"\"):\n audioString = analyze_creds[\"audio\"]\n audioString = bytes(audioString, encoding=\"utf-8\")\n if not os.path.exists(path):\n os.makedirs(path)\n file = path + \"/temp.wav\"\n with open(file, \"wb+\") as f:\n f.write(base64.decodebytes(audioString))\n result[\"audio\"] = eval(file, 1)\n\n print(analyze_creds[\"link\"])\n if(analyze_creds[\"link\"] != \"\"):\n user_scraping(analyze_creds[\"link\"], path)\n\n file = path + \"/twitter_output.txt\"\n print(analyze_creds[\"messages\"])\n if(analyze_creds[\"messages\"] != \"\"):\n if not os.path.exists(path):\n os.makedirs(path)\n lines = analyze_creds[\"messages\"].split(\";\")\n with open(file, \"a+\") as f:\n for i in lines:\n f.write(i)\n f.write(\"\\n\")\n if os.path.exists(file):\n result[\"class\"] = eval(file, 2)\n result[\"class\"] = round(result[\"class\"], 2)\n print(result)\n result_creds = jsonify(result)\n shutil.rmtree(path)\n return result_creds\n except Exception as e:\n print(e)\n return \"NF\"\n\n\nif __name__ == \"__main__\":\n app.run(port=8000, debug=True, host=\"0.0.0.0\")\n","sub_path":"Flask Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"190878832","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport re\ntarget_file = sys.argv[1]\nvocab_file = sys.argv[2]\nall_words = []\nvocab = []\nwith open(target_file,\"r\",encoding=\"utf-8\") as f:\n\tfor line in f:\n\t\tif line != \"\\n\":\n\t\t\tline = re.sub(\"\\n\",\"\",line)\n\t\t\tline = line.split(\" \")\n\t\t\tall_words.extend(line)\n\nall_words = set(all_words)\nwith open(vocab_file,\"r\",encoding=\"utf-8\") as g:\n\tfor line in g:\n\t\tline = re.sub(\"\\n\",\"\",line)\n\t\tvocab.append(line)\nvocab = set(vocab)\nintersection = all_words & vocab\ndifference = all_words - vocab\nprint(difference)\ncoverage = len(intersection)/len(all_words)\ntag = sys.argv[3]\n\nwith open(\"result_coverage.txt\",\"a\",encoding=\"utf-8\") as h:\n\tresult = tag +\": \"+str(coverage)+\"\\n\"\n\th.write(result)\n\n\n\n","sub_path":"check_info/check_coverage_for_NE.py","file_name":"check_coverage_for_NE.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"569200068","text":"'''\nCreated on Aug 8, 2011\n\n@package: ally core\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the integration of the additional arguments into the main arguments.\n'''\n\nfrom ally.api.type import Input, typeFor\nfrom ally.core.spec.resources import Invoker, Path\nfrom ally.design.processor.attribute import requires, defines\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.handler import HandlerProcessorProceed\n\n# --------------------------------------------------------------------\n\nclass RequestProvide(Context):\n '''\n The request context.\n '''\n # ---------------------------------------------------------------- Defined\n arguments = defines(dict, doc='''\n @rtype: dictionary{string, object}\n The dictionary containing the arguments that will be passes to the invoker that provides the response object.\n ''')\n argumentsOfType = defines(dict, doc='''\n @rtype: dictionary{Type, object}\n A dictionary containing as a key the argument type, this dictionary needs to be populated by the \n processors with any system values that might be used for invoking, the actual use of this arguments depends\n ''')\n\n# --------------------------------------------------------------------\n\nclass ArgumentsPrepareHandler(HandlerProcessorProceed):\n '''\n Implementation for a processor that provides the integration of the additional arguments into the invoke arguments.\n This processor will provide the argument by type.\n '''\n\n def process(self, request:RequestProvide, **keyargs):\n '''\n @see: HandlerProcessorProceed.process\n \n Provides the additional arguments by type to be populated.\n '''\n assert isinstance(request, RequestProvide), 'Invalid request %s' % request\n\n request.argumentsOfType = {}\n request.arguments = {}\n\n# --------------------------------------------------------------------\n\nclass Request(Context):\n '''\n The request context.\n '''\n # ---------------------------------------------------------------- Required\n path = requires(Path)\n invoker = requires(Invoker)\n argumentsOfType = requires(dict)\n arguments = requires(dict)\n\n# --------------------------------------------------------------------\n\nclass ArgumentsBuildHandler(HandlerProcessorProceed):\n '''\n Implementation for a processor that provides the integration of the additional arguments into the invoke arguments.\n '''\n\n def process(self, request:Request, **keyargs):\n '''\n @see: HandlerProcessorProceed.process\n \n Transpose the additional arguments into the main arguments.\n '''\n assert isinstance(request, Request), 'Invalid request %s' % request\n if request.invoker is None: return # If there is no invoker it means that no arguments need to be processed\n assert isinstance(request.path, Path), 'Invalid request path %s' % request.path\n assert isinstance(request.invoker, Invoker), 'Invalid request invoker %s' % request.invoker\n\n if request.argumentsOfType:\n for inp in request.invoker.inputs:\n assert isinstance(inp, Input), 'Invalid input %s' % inp\n\n if inp.name in request.arguments: continue\n\n for argType, value in request.argumentsOfType.items():\n if typeFor(argType) == inp.type:\n if inp.name not in request.arguments: request.arguments[inp.name] = value\n break\n\n request.arguments.update(request.path.toArguments(request.invoker))\n","sub_path":"components/ally-core/ally/core/impl/processor/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"404850833","text":"from MarkdownTestCase import *\n\nclass XSSTestCase(MarkdownTestCase):\n\n\tdef test_preventImgSrcError(self):\n\t\tinputText = [\n\t\t\t'',\n\t\t]\n\t\t\n\t\texpectedResult = [\n\t\t\t'

<img src="xss.png" onerror="alert("xss")">

\\n'\n\t\t]\n\t\t\n\t\tfor i in range(0, len(inputText)):\n\t\t\tactualResult = parser.parse(inputText[i])\n\t\t\tself.assertEqual(expectedResult[i], actualResult)\n\t\n\tdef test_blockScriptTag(self):\n\t\tinputText = [\n\t\t\t'',\n\t\t\t'',\n\t\t]\n\t\t\n\t\texpectedResult = [\n\t\t\t'',\n\t\t\t'',\n\t\t]\n\t\t\n\t\tfor i in range(0, len(inputText)):\n\t\t\tactualResult = parser.parse(inputText[i])\n\t\t\tself.assertEqual(expectedResult[i], actualResult)\n\t\t\t\n\tdef test_rejectsRawHTML(self):\n\t\tinputText = [\n\t\t\t'',\n\t\t\t'

xss

'\n\t\t]\n\t\t\n\t\texpectedResult = [\n\t\t\t'',\n\t\t\t'',\n\t\t]\n\t\t\n\t\tfor i in range(0, len(inputText)):\n\t\t\tactualResult = parser.parse(inputText[i])\n\t\t\tself.assertEqual(expectedResult[i], actualResult)\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t","sub_path":"markdown/tests/test_XSS.py","file_name":"test_XSS.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"299148472","text":"from queue import Queue\n\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\n\n\nclass Reddit(scrapy.Spider):\n name = 'reddit'\n queue = None\n\n def parse(self, response):\n for thing in response.css('div.thing'):\n upvotes = int(thing.css('::attr(data-score)').extract_first())\n if upvotes > 5000:\n params = {\n 'subreddit': response.request.url.rsplit('/', 2)[1],\n 'title': thing.css('a.title::text').extract_first(),\n 'votes': upvotes,\n 'thread_link': thing.css('a.title::attr(href)').extract_first(),\n 'comments_link': thing.css('ul.buttons a.comments::attr(href)').extract_first(),\n }\n if params['thread_link'] in params['comments_link']:\n params['thread_link'] = params['comments_link']\n self.queue.put(params)\n\n response.css(\"div.quote\")\n for href in response.css('span.next-button a::attr(href)'):\n yield response.follow(href, self.parse)\n\n\ndef crawler(subreddits):\n \"\"\"\n Scrapy reddit crawler.\n \"\"\"\n queue = Queue()\n urls = ['https://old.reddit.com/r/' + subreddit for subreddit in subreddits.split(';')]\n\n process = CrawlerProcess({'LOG_ENABLED': False})\n spider = Reddit\n spider.queue = queue\n spider.start_urls = urls\n process.crawl(spider)\n process.start()\n\n return queue\n\n\ndef format_thread(data):\n for d in sorted(data, key=lambda x: x['votes'], reverse=True):\n yield ('SubReddit: {subreddit} \\n'\n 'Title: {title} \\n'\n 'Votes: {votes} \\n'\n 'Thread link: {thread_link} \\n'\n 'Comments link: {comments_link} \\n'\n ''.format(subreddit=d['subreddit'], title=d['title'],\n votes=d['votes'], thread_link=d['thread_link'],\n comments_link=d['comments_link']))\n","sub_path":"spider/subreddit.py","file_name":"subreddit.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"119453126","text":"\"\"\" Functions used for generating and interpreting typical filename patterns\nused in rockstar hlist analysis.\n\"\"\"\nimport string\nimport os\nimport fnmatch\nimport numpy as np\n\n\n__all__ = ('base_10_signed_int_to_base_n_signed_int', 'tree_subvol_substring_from_int')\n\n\ndef base_10_signed_int_to_base_n_signed_int(i, n):\n digs = string.digits + string.letters\n\n if i < 0:\n sign = -1\n elif i == 0:\n return digs[0]\n else:\n sign = 1\n i *= sign\n\n digits = []\n while i:\n digits.append(digs[i % n])\n i /= n\n\n if sign < 0:\n digits.append('-')\n\n digits.reverse()\n\n return int(''.join(digits))\n\n\ndef tree_subvol_substring_from_int(i, n):\n \"\"\" From any non-negative integer i, and n subdivisions per dimension,\n return the substring designating the corresponding subvolume.\n\n For an explicit example, the following (i, n) pairs yield the values below:\n\n (2, 5) --> '0_0_2'\n (5, 5) --> '0_1_0'\n (24, 5) --> '0_4_4'\n (25, 5) --> '1_0_0'\n \"\"\"\n error_msg = (\"The `tree_subvol_substring_from_int` function \"\n \"is only intended \\nto work with 3d subvolumes \"\n \"with at most 10 subdivisions per dimension.\\n\"\n \"You selected n = {0} subdivisions, for which there are \"\n \"at most {1} different subvolumes, \\n\"\n \"exceeding your request for file number i = {2}.\".format(n, n**3, i))\n\n s = str(base_10_signed_int_to_base_n_signed_int(i, n))\n if len(s) == 1:\n return '0_0_'+s\n elif len(s) == 2:\n return '0_'+s[0]+'_'+s[1]\n elif len(s) == 3:\n return '_'.join(s)\n else:\n raise ValueError(error_msg)\n\n\ndef _binary_fname_from_structured_arr_column(arr, colname):\n \"\"\" For column ``colname`` of an input structured array ``arr``,\n use the dtype to create a string that will be used as the basename\n of the file storing a Numpy binary of the data for that column.\n \"\"\"\n msg = \"Column name ``{0}`` does not appear in input array\".format(colname)\n assert colname in arr.dtype.names, msg\n\n type_string = str(arr[colname].dtype.type.__name__)\n return colname + '_data_' + type_string\n\n\ndef fname_generator(input_dirname, filepat):\n \"\"\" Yield all the files in ``input_dirname`` with basenames matching\n the specified file pattern.\n \"\"\"\n for path, dirlist, filelist in os.walk(input_dirname):\n for name in fnmatch.filter(filelist, filepat):\n yield os.path.join(path, name)\n","sub_path":"filename_utils.py","file_name":"filename_utils.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"396609035","text":"from Projetos.models import Materia, Usuario, Atividade, Gradeestudo, Horarioestudo\r\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\r\nfrom django.views import generic\r\nfrom django.shortcuts import render, redirect, get_object_or_404\r\nfrom .forms import atividades_form, horarios_form\r\n\r\ndef Principal(request):\r\n usuario = Usuario.objects.get(id=1)\r\n\r\n form = atividades_form(request.POST or None)\r\n\r\n if form.is_valid():\r\n m = form.cleaned_data\r\n mm = Materia.objects.filter(nome = m[\"materia\"])\r\n cc = m[\"conteudo\"]\r\n Atividade(aluno = usuario, conteudo = cc, materia = mm[0]).save()\r\n\r\n estudos = Horarioestudo.objects.filter(aluno = usuario).values_list('materias', 'horario')\r\n materias = Materia.objects.filter(id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias')).order_by('horario')\r\n atividades = Atividade.objects.filter(aluno=usuario).values_list('materia', 'conteudo')\r\n lista_atividades = []\r\n sla = [] #vai guardar o nome da materia e o conteudo da atividade, respectivamente\r\n lista_estudos = []\r\n for e in estudos:\r\n for m in materias:\r\n if(e[0] == m.id):\r\n sla.append(m.nome)\r\n sla.append(e[1])\r\n lista_estudos.append(sla)\r\n sla = []\r\n\r\n for x in atividades:\r\n a = x[0]\r\n c = Materia.objects.filter(id=a)\r\n if (len(c)>0):\r\n sla.append(c[0].nome)\r\n sla.append(x[1])\r\n lista_atividades.append(sla)\r\n sla = []\r\n return render(request, 'Projetos/principal.html', locals())\r\n\r\ndef horarios_auto(request):\r\n usuario = Usuario.objects.get(id=1)\r\n try:\r\n choice = request.GET[\"escolha\"] # estudar ou revisar. retorna o id das opções cujo nome é \"escolha\"\r\n except:\r\n choice = \"\"\r\n estudos = Horarioestudo.objects.filter(aluno = usuario)\r\n materias = Materia.objects.filter(id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias')).order_by('horario')\r\n sla = ''\r\n k = 0\r\n materias_sem_h = Materia.objects.exclude(id__in=Horarioestudo.objects.filter(aluno=usuario).values_list('materias'))\r\n if choice == \"estudar\":\r\n for x in ['1','2','3','4','5','6']:\r\n dia = Materia.objects.filter(id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias'), horario__startswith = x).order_by('horario') # separando as matérias por dia\r\n if len(dia) > 0 and dia[k] in materias_sem_h: #verifica se\r\n print(\"X:\" + x + \"\\n\")\r\n for y in [\"12\",\"34\",\"56\"]:\r\n if len(Horarioestudo.objects.filter(aluno=usuario, horario = x+\"M\"+y)) == 0: #verifica se ja não existe nada naquele horário\r\n q = Horarioestudo(aluno=usuario, materias=dia[k], horario = x+\"M\"+y) # criando novo horário de estudo\r\n q.save()\r\n k+=1\r\n if k>len(dia)-1:\r\n k = 0\r\n break\r\n elif choice == \"revisar\":\r\n for x in ['1','2','3','4','5','6']:\r\n dia = Materia.objects.filter(id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias'), horario__startswith = x).order_by('horario') # separando as matérias por dia\r\n if len(dia) > 0 and dia[k] in materias_sem_h: #verifica se\r\n for y in [\"12\",\"34\",\"56\"]:\r\n if len(Horarioestudo.objects.filter(aluno=usuario, horario = str(int(x)+1)+\"M\"+y)) == 0: #verifica se ja não existe nada naquele horário\r\n q = Horarioestudo(aluno=usuario, materias=dia[k], horario = str(int(x)+1)+\"M\"+y) # criando novo horário de estudo\r\n q.save()\r\n k+=1\r\n if k>len(dia)-1:\r\n k = 0\r\n break\r\n else:\r\n erro = \"Você não fez uma escolha\"\r\n return Principal(request)\r\n\r\ndef horarios_manual(request):\r\n usuario = Usuario.objects.get(id=1)\r\n estudos = Horarioestudo.objects.filter(aluno = usuario).values_list('id', 'materias', 'horario').order_by('horario')\r\n lista_estudos = []\r\n sla = []\r\n erro = \"\"\r\n for e in estudos:\r\n m = Materia.objects.filter(id=e[1])\r\n sla.append(e[0])\r\n sla.append(m[0].nome)\r\n sla.append(e[2])\r\n lista_estudos.append(sla)# sla = [id do horario de estudo, nome da materia que esta estudando, horario da materia]\r\n sla=[]\r\n\r\n try:\r\n apagar = request.POST.getlist(\"apagar\")\r\n except:\r\n apagar = []\r\n erro = \"Não deu certo\"\r\n if len(apagar) > 0:\r\n for x in apagar:\r\n Horarioestudo.objects.filter(aluno=usuario, id=int(x)).delete()\r\n form = horarios_form(request.POST or None)\r\n \r\n if form.is_valid():\r\n m = form.cleaned_data\r\n ida = m['id']\r\n hh = m['horario']\r\n if len(Horarioestudo.objects.filter(id=ida, aluno=usuario)) > 0: # editando o horario de uma materia\r\n h = Horarioestudo.objects.get(id=ida, aluno = usuario)\r\n if len(hh) != 4 and len(hh) != 9 and len(hh) != 0:\r\n erro = \"o horario preenchido não tem 4 ou 9 caracteres\"\r\n else:\r\n if len(Materia.objects.filter(nome = m['materia'], id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias'))) > 0: #se mandar nulo, o index range é menor que 0:\r\n mm = Materia.objects.filter(nome = m['materia'], id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias')) # recebe a nova materia\r\n else:\r\n mm = Materia.objects.filter(id=h.materias.id, id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias')) # recebe a materia que ja estava lá\r\n\r\n h.delete()\r\n h.horario = hh\r\n h.id = ida\r\n h.save()\r\n\r\n else: #criando um novo horario de estudo\r\n # {'id': 1, 'horario': '1M12', 'materia': 'Inglês Técnico'}\r\n if len(Horarioestudo.objects.filter(horario=hh, aluno = usuario)) > 0: # verifica se o horario de estudo está preenchido\r\n erro = \"horario já preenchido\"\r\n elif len(hh) != 4 and len(hh) != 9:\r\n erro = \"o horario preenchido não tem 4 caracteres\"\r\n else:\r\n mm = Materia.objects.filter(nome = m['materia'], id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias'))\r\n if (len(mm) > 0):\r\n if (len(hh) == 4):\r\n if len(Horarioestudo.objects.filter(horario=hh, aluno = usuario)) > 0:\r\n erro = \"Horário já ocupado\"\r\n else:\r\n Horarioestudo(aluno = usuario, materias=mm[0], horario = hh).save()\r\n elif (len(hh) == 9):\r\n #str = 1M12 2M12\r\n a = hh[:4] #1M12\r\n b = hh[5:] #2M12\r\n if len(Horarioestudo.objects.filter(horario__contains=a, aluno = usuario)) > 0 or len(Horarioestudo.objects.filter(horario__contains=b, aluno = usuario)) > 0:\r\n erro = \"Horario ja ocupado\"\r\n else:\r\n Horarioestudo(aluno = usuario, materias=mm[0], horario = hh).save()\r\n else:\r\n erro = \"Você não esta cadastrado nessa matéria\"\r\n \r\n return render(request, 'Projetos/horarios.html', locals())\r\n\r\ndef apagar_horarios(request):\r\n usuario = Usuario.objects.get(id=1)\r\n estudos = Horarioestudo.objects.filter(aluno = usuario)\r\n materias = Materia.objects.filter(id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias')).order_by('horario')\r\n for x in estudos:\r\n x.delete()\r\n return Principal(request)\r\n\r\ndef editar_atividades(request):\r\n lista_atividades = []\r\n sla = []\r\n form = atividades_form(request.POST or None)\r\n usuario = Usuario.objects.get(id=1)\r\n materias = Materia.objects.filter(id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias')).order_by('horario')\r\n atividades = Atividade.objects.filter(aluno=usuario).values_list('materia', 'conteudo', 'id')\r\n erro = \"\"\r\n\r\n try:\r\n apagar = request.POST.getlist(\"apagar\")\r\n except:\r\n apagar = []\r\n erro = \"Não deu certo\"\r\n if len(apagar) > 0:\r\n for x in apagar:\r\n Atividade.objects.filter(aluno=usuario, id=int(x)).delete()\r\n else:\r\n try:\r\n choice = request.POST[\"escolha\"] # estudar ou revisar. retorna o id das opções cujo nome é \"escolha\"\r\n # Aqui foi usado post por causa no templat, que no form está descrito que o method é um post\r\n except:\r\n choice = \"\"\r\n\r\n if choice == \"apagar_tudo\":\r\n for x in Atividade.objects.filter(aluno=usuario):\r\n x.delete()\r\n elif form.is_valid():\r\n m = form.cleaned_data\r\n cc = m[\"conteudo\"]\r\n mm = Materia.objects.filter(nome = m[\"materia\"], id__in=Gradeestudo.objects.filter(aluno=usuario).values_list('materias'))\r\n if choice == \"apagar_materia\":\r\n for x in Atividade.objects.filter(materia=mm[0],aluno=usuario):\r\n x.delete()\r\n else:\r\n if cc != \"\":\r\n Atividade(materia=mm[0], conteudo=cc, aluno = usuario).save()\r\n \r\n for x in atividades:\r\n a = x[0]\r\n c = Materia.objects.filter(id=a)\r\n if (len(c)>0):\r\n sla.append(c[0].nome) # matéria\r\n sla.append(x[1]) #conteúdo\r\n sla.append(str(x[2])); #id\r\n lista_atividades.append(sla)\r\n sla = []\r\n\r\n return render(request, 'Projetos/atividades.html', locals())","sub_path":"Projetos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"247971102","text":"#!/usr/bin/env python3\nclass CycleGetter():\n def __init__(self, max_time, lift: \"list or func\", start = 0, decrement = True):\n \"\"\"\n :param max_time: 移動回数\n :param start: 初期条件\n :param lift: 遷移の関数(リストの場合はlift[i])\n :return res: max_time 回進んだあとの場所\n front: cycleまでの要素のリスト\n cycle: cycle内の要素のリスト\n end: cycle後の余った部分の要素のリスト\n cnt: cycle回数\n \"\"\"\n self.max_time = max_time\n if hasattr(lift, \"__getitem__\"):\n LIFT = lift\n if decrement:\n LIFT = [None] + LIFT\n max_time += 1\n lift = lambda x: LIFT[x]\n p = start\n front, cycle, end = [], [], []\n cnt = 0\n visit = {p:0}\n L, R = max_time, -1\n P = [p]\n for i in range(1, max_time):\n p = lift(p)\n if p in visit:\n # (L, R) = (サイクルに入るまでに移動した回数, サイクルの終端に着くまでに移動した回数)\n L, R = visit[p], i\n period = R - L\n break\n visit[p] = i\n P.append(p)\n front = P[:L]\n if L != max_time:\n cycle, end = P[L : R], P[L : L + (max_time - L) % period]\n cnt = (max_time - L) // period\n self.front, self.cycle, self.end, self.cnt = front, cycle, end, cnt\n\n def __call__(self):\n return self.front, self.cycle, self.end, self.cnt # self.end[-1] if self.end else self.cycle[-1], \n \n def apply(self, time = None):\n \"\"\"\n :param time: 進む回数\n :return: 進み終わったときの場所\n \"\"\"\n if time is None:\n time = self.max_time\n if time < len(self.front):\n return self.front[time]\n else:\n time -= len(self.front)\n if self.cycle:\n time %= len(self.cycle)\n return self.cycle[time]\n else:\n return self.end()\n \n def sum(self):\n return sum(self.front) + sum(self.cycle) * self.cnt + sum(self.end)\n\nn, x, m = map(int, input().split())\nprint(CycleGetter(n, lambda a: pow(a, 2, m), x).sum())","sub_path":"Contest/ABC179/e/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"171094573","text":"import logging\n\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\ndef construct_zkp(zero_knowledge_proof: List[dict], schema_id: str) -> list:\n if zero_knowledge_proof == [{}]:\n return []\n req_preds = []\n [\n req_preds.append(\n {\n \"name\": item[\"name\"],\n \"p_type\": item[\"p_type\"],\n \"p_value\": item[\"p_value\"],\n \"restrictions\": [{\"schema_id\": schema_id}],\n }\n )\n for item in zero_knowledge_proof\n ]\n return req_preds\n\n\ndef construct_indy_proof_request(\n name_proof_request: str, schema_id: str, attr_req, req_preds\n):\n indy_proof_request = {\n \"name\": name_proof_request,\n \"version\": schema_id.split(\":\")[-1],\n \"requested_attributes\": {\n f\"0_{req_attr['name']}_uuid\": req_attr for req_attr in attr_req\n },\n \"requested_predicates\": {\n f\"0_{req_pred['name']}_GE_uuid\": req_pred for req_pred in req_preds\n },\n }\n return indy_proof_request\n","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"172870306","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 7 18:43:00 2020\n\n@author: kerui\n\"\"\"\n\nimport os\nimport os.path\nimport glob\nimport fnmatch # pattern matching\nimport numpy as np\nfrom numpy import linalg as LA\nfrom random import choice\nfrom PIL import Image\nimport torch\nimport torch.utils.data as data\nimport cv2\nfrom dataloaders import transforms\nfrom dataloaders.pose_estimator import get_pose_pnp\nimport skimage\nimport collections\n\ninput_options = ['d', 'rgb', 'rgbd', 'g', 'gd']\n\n\ndef load_calib():\n \"\"\"\n Temporarily hardcoding the calibration matrix using calib file from 2011_09_26\n \"\"\"\n calib = open(\"dataloaders/calib_cam_to_cam.txt\", \"r\")\n lines = calib.readlines()\n P_rect_line = lines[25]\n\n Proj_str = P_rect_line.split(\":\")[1].split(\" \")[1:]\n Proj = np.reshape(np.array([float(p) for p in Proj_str]),\n (3, 4)).astype(np.float32)\n K = Proj[:3, :3] # camera matrix\n\n # note: we will take the center crop of the images during augmentation\n # that changes the optical centers, but not focal lengths\n K[0, 2] = K[\n 0,\n 2] - 13 # from width = 1242 to 1216, with a 13-pixel cut on both sides\n K[1, 2] = K[\n 1,\n 2] - 11.5 # from width = 375 to 352, with a 11.5-pixel cut on both sides\n return K\n\n\ndef get_paths_and_transform(split, args):\n assert (args.use_d or args.use_rgb\n or args.use_g), 'no proper input selected'\n \n if split == \"train\":\n transform = train_transform\n \n # 1\n glob_pc = os.path.join(\n args.data_folder,\n #'data_depth_velodyne/train/*_sync/proj_depth/velodyne_raw/image_0[2,3]/*.png'\n #'audi_dataset/knn_pc/*.png'\n 'audi_dataset/knn_pc/*.png'\n )\n \n # 2、用于道路分割的label\n glob_road_label = os.path.join(\n args.data_folder,\n #'data_depth_annotated/train/*_sync/proj_depth/groundtruth/image_0[2,3]/*.png'\n #'audi_dataset/road_label/*.png'\n 'audi_dataset/road_label/*.png'\n )\n \n # 3、用于车道线分割的label\n glob_lane_label = os.path.join(\n args.data_folder,\n #'data_depth_annotated/train/*_sync/proj_depth/groundtruth/image_0[2,3]/*.png'\n #'audi_dataset/lane_label/*.png'\n 'audi_dataset/lane_label/*.png'\n )\n \n # 4、\n glob_rgb = os.path.join(\n args.data_folder,\n #'audi_dataset/train_image_2_lane/*.png'\n 'audi_dataset/train_image_2_lane/*.png'\n )\n\n def get_rgb_paths(p):\n rgb_path = '../data/data_rgb/train/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/'\n \n ps = p.split('\\\\')\n# pnew = '/'.join([args.data_folder] + ['data_rgb'] + ps[-6:-4] +\n# ps[-2:-1] + ['data'] + ps[-1:])\n \n pnew = os.path.join(rgb_path, ps[-1])\n \n #print('-------------------1-------------------\\n')\n return pnew\n elif split == \"val\":\n if args.val == \"select\":\n transform = no_transform\n \n # 1\n glob_pc = os.path.join(\n args.data_folder,\n #'data_depth_velodyne/train/*_sync/proj_depth/velodyne_raw/image_0[2,3]/*.png'\n #'audi_dataset/test/knn_pc/*.png'\n 'audi_dataset/test/knn_pc/*.png'\n )\n \n # 2、用于道路分割的label\n glob_road_label = os.path.join(\n args.data_folder,\n #'data_depth_annotated/train/*_sync/proj_depth/groundtruth/image_0[2,3]/*.png'\n #'audi_dataset/test/road_label/*.png'\n 'audi_dataset/test/road_label/*.png'\n )\n \n # 3、用于车道线分割的label\n glob_lane_label = os.path.join(\n args.data_folder,\n #'data_depth_annotated/train/*_sync/proj_depth/groundtruth/image_0[2,3]/*.png'\n #'audi_dataset/test/lane_label/*.png'\n 'audi_dataset/test/lane_label/*.png'\n )\n \n # 4、\n glob_rgb = os.path.join(\n args.data_folder,\n #'audi_dataset/test/train_image_2_lane/*.png'\n 'audi_dataset/test/train_image_2_lane/*.png'\n ) \n \n # 测试, 输入为点云强度和rgb\n elif split == 'test_road_lane_segmentation':\n transform = no_transform\n glob_pc = os.path.join(\n args.data_folder,\n 'reflectance/*.png'\n )\n glob_road_label = None\n glob_lane_label = None\n glob_rgb = os.path.join(\n args.data_folder,\n \"train_image_2_lane/*.png\")\n\n else:\n raise ValueError(\"Unrecognized split \" + str(split))\n\n if glob_lane_label is not None:\n # train or val-full or val-select\n # 点云:原始+强度+高度\n paths_pc = sorted(glob.glob(glob_pc)) \n paths_rgb = sorted(glob.glob(glob_rgb))\n paths_road_label = sorted(glob.glob(glob_road_label))\n paths_lane_label = sorted(glob.glob(glob_lane_label))\n else: \n # test only has d or rgb\n paths_rgb = sorted(glob.glob(glob_rgb))\n paths_road_label = [None]*len(paths_rgb)\n paths_lane_label = [None]*len(paths_rgb)\n paths_pc = sorted(glob.glob(glob_pc))\n \n\n if len(paths_pc) == 0 and len(paths_rgb) == 0 and len(paths_lane_label) == 0 and len(paths_road_label) == 0:\n raise (RuntimeError(\"Found 0 images under {}\".format(paths_lane_label)))\n if len(paths_pc) == 0 and args.use_d:\n raise (RuntimeError(\"Requested sparse depth but none was found\"))\n if len(paths_rgb) == 0 and args.use_rgb:\n raise (RuntimeError(\"Requested rgb images but none was found\"))\n if len(paths_rgb) == 0 and args.use_g:\n raise (RuntimeError(\"Requested gray images but no rgb was found\"))\n if len(paths_rgb) != len(paths_pc) or len(paths_rgb) != len(paths_lane_label):\n raise (RuntimeError(\"Produced different sizes for datasets\"))\n\n paths = {\"rgb\": paths_rgb, \"pc\": paths_pc, \"lane_label\": paths_lane_label, \"road_label\": paths_road_label}\n return paths, transform\n\n#oheight, owidth = 352, 1216\n\ndef rgb_read(filename, args):\n assert os.path.exists(filename), \"file not found: {}\".format(filename)\n img_file = Image.open(filename)\n \n #img_file = img_file.resize((400, 416))\n #img_file = img_file.resize((args.image_height, args.image_width))\n img_file = img_file.resize((args.image_width, args.image_height))\n # 将读入的数据统一成该原始代码所使用的尺寸 oheight, owidth = 352, 1216\n #img_file = img_file.resize((owidth, oheight))\n \n # rgb_png = np.array(img_file, dtype=float) / 255.0 # scale pixels to the range [0,1]\n rgb_png = np.array(img_file, dtype='uint8') # in the range [0,255]\n img_file.close()\n return rgb_png\n\n\ndef depth_read(filename, args):\n # loads depth map D from png file\n # and returns it as a numpy array,\n # for details see readme.txt\n assert os.path.exists(filename), \"file not found: {}\".format(filename)\n img_file = Image.open(filename)\n \n #img_file = img_file.resize((400, 416))\n #img_file = img_file.resize((args.image_height, args.image_width))\n img_file = img_file.resize((args.image_width, args.image_height))\n # 将读入的数据统一成该原始代码所使用的尺寸 oheight, owidth = 352, 1216\n #img_file = img_file.resize((owidth, oheight))\n \n depth_png = np.array(img_file, dtype=int)\n img_file.close()\n # make sure we have a proper 16bit depth map here.. not 8bit!\n assert np.max(depth_png) > 255, \\\n \"np.max(depth_png)={}, path={}\".format(np.max(depth_png),filename)\n\n depth = depth_png.astype(np.float) / 256.\n # depth[depth_png == 0] = -1.\n depth = np.expand_dims(depth, -1)\n return depth\n\n\ndef label_read(filename, args):\n # loads depth map D from png file\n # and returns it as a numpy array,\n # for details see readme.txt\n assert os.path.exists(filename), \"file not found: {}\".format(filename)\n img_file = Image.open(filename)\n \n #img_file = img_file.resize((400, 416))\n #img_file = img_file.resize((args.image_height, args.image_width))\n img_file = img_file.resize((args.image_width, args.image_height))\n # 将读入的数据统一成该原始代码所使用的尺寸 oheight, owidth = 352, 1216\n #img_file = img_file.resize((owidth, oheight))\n \n label_png = np.array(img_file, dtype=int)\n img_file.close()\n # make sure we have a proper 16bit depth map here.. not 8bit!\n assert np.max(label_png) < 2, \\\n \"np.max(label_png)={}, path={}\".format(np.max(label_png),filename)\n\n label = label_png.astype(np.float)\n # depth[depth_png == 0] = -1.\n #label = np.expand_dims(label, -1)\n return label\n\n\noheight, owidth = 352, 1216\n\n\ndef drop_depth_measurements(depth, prob_keep):\n mask = np.random.binomial(1, prob_keep, depth.shape)\n depth *= mask\n return depth\n\ndef train_transform(rgb, sparse, target, segmentation_label, args):\n # s = np.random.uniform(1.0, 1.5) # random scaling\n # angle = np.random.uniform(-5.0, 5.0) # random rotation degrees\n do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip\n\n transform_geometric = transforms.Compose([\n # transforms.Rotate(angle),\n # transforms.Resize(s),\n # 将读入的数据统一改成原始代码所使用的尺寸 oheight, owidth = 352, 1216\n #transforms.BottomCrop((oheight, owidth)),\n #transforms.BottomCrop((args.image_height, args.image_width)),\n #Resize((args.image_height, args.image_width)),\n \n transforms.HorizontalFlip(do_flip)\n ])\n if sparse is not None:\n sparse = transform_geometric(sparse)\n target = transform_geometric(target)\n segmentation_label = transform_geometric(segmentation_label)\n if rgb is not None:\n brightness = np.random.uniform(max(0, 1 - args.jitter),\n 1 + args.jitter)\n contrast = np.random.uniform(max(0, 1 - args.jitter), 1 + args.jitter)\n saturation = np.random.uniform(max(0, 1 - args.jitter),\n 1 + args.jitter)\n transform_rgb = transforms.Compose([\n transforms.ColorJitter(brightness, contrast, saturation, 0),\n transform_geometric\n ])\n rgb = transform_rgb(rgb)\n # sparse = drop_depth_measurements(sparse, 0.9)\n\n return rgb, sparse, target, segmentation_label\n\n\ndef val_transform(rgb, sparse, target, segmentation_label, args):\n transform = transforms.Compose([\n #transforms.BottomCrop((oheight, owidth)),\n #transforms.BottomCrop((args.image_height, args.image_width)),\n # 将读入的数据统一改成原始代码所使用的尺寸 oheight, owidth = 352, 1216\n #transforms.BottomCrop((oheight, owidth)),\n #Resize((args.image_height, args.image_width)),\n ])\n if rgb is not None:\n rgb = transform(rgb)\n if sparse is not None:\n sparse = transform(sparse)\n if target is not None:\n target = transform(target)\n \n if segmentation_label is not None:\n segmentation_label = transform(segmentation_label)\n return rgb, sparse, target, segmentation_label\n\n\ndef no_transform(rgb, sparse, target, segmentation_label, args):\n return rgb, sparse, target, segmentation_label\n\n\nto_tensor = transforms.ToTensor()\nto_float_tensor = lambda x: to_tensor(x).float()\n\n\ndef handle_gray(rgb, args):\n if rgb is None:\n return None, None\n if not args.use_g:\n return rgb, None\n else:\n img = np.array(Image.fromarray(rgb).convert('L'))\n img = np.expand_dims(img, -1)\n if not args.use_rgb:\n rgb_ret = None\n else:\n rgb_ret = rgb\n return rgb_ret, img\n\nclass KittiDepth(data.Dataset):\n \"\"\"A data loader for the Kitti dataset\n \"\"\"\n def __init__(self, split, args):\n self.args = args\n self.split = split\n paths, transform = get_paths_and_transform(split, args)\n self.paths = paths\n self.transform = transform\n self.K = load_calib()\n self.threshold_translation = 0.1\n\n def __getraw__(self, index):\n # rgb图像\n rgb = rgb_read(self.paths['rgb'][index], self.args) if \\\n (self.paths['rgb'][index] is not None and (self.args.use_rgb or self.args.use_g)) else None\n \n # 点云:原始+强度+高度\n point_cloud = rgb_read(self.paths['pc'][index], self.args) if \\\n (self.paths['pc'][index] is not None and self.args.use_d) else None\n \n # 车道标签\n road_label = label_read(self.paths['road_label'][index], self.args) if \\\n self.paths['road_label'][index] is not None else None\n \n # 车道线标签\n lane_label = label_read(self.paths['lane_label'][index], self.args) if \\\n self.paths['lane_label'][index] is not None else None\n \n return rgb, point_cloud, road_label, lane_label\n\n def __getitem__(self, index):\n rgb, point_cloud, road_label, lane_label = self.__getraw__(index)\n rgb, point_cloud, road_label, lane_label = self.transform(rgb, point_cloud, road_label,lane_label\n ,self.args)\n\n rgb, gray = handle_gray(rgb, self.args)\n #gray = None\n candidates = {\"rgb\":rgb, \"pc\":point_cloud, \"road_label\":road_label, \"lane_label\":lane_label ,\\\n \"g\":gray}\n items = {\n key: to_float_tensor(val)\n for key, val in candidates.items() if val is not None\n }\n\n return items\n\n def __len__(self):\n return len(self.paths['road_label'])\n","sub_path":"v3/dataloaders/completion_segmentation_loader_new_.py","file_name":"completion_segmentation_loader_new_.py","file_ext":"py","file_size_in_byte":14015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"81940545","text":"\nfrom discord.ext import commands\nimport discord\nimport asyncio\nimport platform\nimport logging\n\n\nclass moderation():\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(brief='Creates a guild invite')\n @commands.has_permissions(create_instant_invite=True)\n async def invite(self, ctx):\n \"\"\"Used to create a guild invite in the current channel.\"\"\"\n invite = await ctx.channel.create_invite()\n await ctx.send(invite)\n\n @commands.command(brief='Kicks the specified user')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, userName: discord.User):\n \"\"\"Kicks the specified user from the guild!\"\"\"\n await ctx.guild.kick(userName)\n\n @commands.command(brief='Bans the specified user')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, userName: discord.User, *, stor=None):\n \"\"\"Bans the specified user from the guild!\"\"\"\n await ctx.guild.ban(userName, reason=stor, delete_message_days=1)\n\n @commands.command(brief='Softbans the specified user')\n @commands.has_permissions(ban_members=True)\n async def softban(self, ctx, userName: discord.User, *, stor=None):\n \"\"\"Softbans the specified user from the guild! Also deletes their messages from the last 24 hours!\"\"\"\n await ctx.guild.ban(userName, reason=stor, delete_message_days=1)\n await ctx.guild.unban(userName)\n await ctx.send(embed=discord.Embed(title=f'```{userName.name} was softbanned from the guild! Their messages from the last 24 hours have been purged!```', color=0xc71a))\n\n @commands.group(brief='Channel related commands')\n @commands.has_permissions(manage_channels=True)\n async def channel(self, ctx):\n \"\"\"Usage is {prefix}channel >command<\\nChannel commands are as follows:\"\"\"\n pass\n @channel.command(name='create', brief='Creates a channel')\n async def c1(self, ctx, channel):\n \"\"\"Used to create a channel.\"\"\"\n if channel in [I.name for I in ctx.guild.channels]:\n output = 'That channel already exists!'\n else:\n channel = await ctx.guild.create_text_channel(channel)\n output = f\"The channel '{channel}' has been created!\"\n await ctx.send(embed=discord.Embed(title=output, color=0xc71a))\n @channel.command(name='delete', brief='Deletes a channel')\n async def c2(self, ctx, channel: discord.TextChannel):\n \"\"\"Used to delete a channel!\"\"\"\n if channel in [I for I in ctx.guild.channels]:\n channel = await channel.delete()\n output = f\"The channel '{channel}' has been deleted!\"\n await ctx.send(embed=discord.Embed(title=output, color=0xc71a))\n\n @commands.group(brief='Role related commands')\n @commands.has_permissions(manage_roles=True)\n async def role(self, ctx):\n \"\"\"Usage is {prefix}role >command<\\nRole commands are as follows:\"\"\"\n pass\n @role.command(brief='Creates a role')\n async def create(self, ctx, *, crole: str):\n \"\"\"Used to create a role. Role created will have all default permissions\"\"\"\n if crole in [I.name for I in ctx.guild.roles]:\n output = 'That role already exists!'\n else:\n role = await ctx.guild.create_role(name=crole)\n output = f'{role} created successfully!'\n await ctx.send(embed=discord.Embed(title=output, color=0xc71a))\n @role.command(brief='Deletes a role')\n async def delete(self, ctx, role: discord.Role):\n \"\"\"Used to delete a role.\"\"\"\n if role in ctx.guild.roles:\n await role.delete()\n output = f'{role} deleted successfully!'\n else:\n output = 'No such role exists!'\n await ctx.send(embed=discord.Embed(title=output, color=0xc71a))\n @role.command(brief='Adds a role to a user')\n async def add(self, ctx, username:discord.Member, *, role: discord.Role):\n \"\"\"Used to add a role to a user.\"\"\"\n if role in username.roles:\n output = 'The user specified already has that role!'\n else:\n await username.add_roles(role)\n output = f'{role} added to {username}!'\n await ctx.send(embed=discord.Embed(title=output, color=0xc71a))\n @role.command(brief='Removes a role from a user')\n async def remove(self, ctx, modrole, *, username:discord.Member):\n \"\"\"Used to remove a role from a user.\"\"\"\n role = discord.utils.get(ctx.guild.roles, name=f'{modrole}')\n if role in username.roles:\n await username.remove_roles(role)\n output = f'{role} removed from {username}!'\n else:\n output = 'The user specified does not have that role!'\n await ctx.send(embed=discord.Embed(title=output, color=0xc71a))\n\n @commands.command(brief='Deletes a specified number of messages')\n @commands.has_permissions(manage_messages=True)\n async def purge(self, ctx, num=None):\n \"\"\"Purges a specified number of messages from the current channel.\"\"\"\n await ctx.message.delete()\n username = ctx.message.mentions\n try:\n purge = int(num)\n except (ValueError, TypeError):\n purge = 100\n if len(username) == 0:\n if num == None:\n deleted = await ctx.channel.purge(limit=100, check=lambda m: m.author == ctx.bot.user)\n else:\n deleted = await ctx.channel.purge(limit=int(purge))\n else:\n deleted = await ctx.channel.purge(limit=int(purge), check=lambda m: m.author == username[0])\n await ctx.send(embed=discord.Embed(title=f'Deleted {len(deleted)} message(s)', color=0xc71a), delete_after=10)\n\n @commands.command(brief='Changes a users nickname')\n @commands.has_permissions(manage_nicknames=True)\n async def setnick(self, ctx, userName: discord.Member, *, nick):\n \"\"\"Changes the nickname of the specified user.\"\"\"\n await userName.edit(nick=nick)\n await ctx.send(embed=discord.Embed(title=f\"Changed {userName.name}'s nickname to {nick}\", color=0xc71a))\n\n \n @commands.command(brief='Sets/creates a muterole')\n @commands.has_permissions(manage_roles=True)\n async def setmute(self, ctx, role=None):\n async with self.bot.pool.acquire() as conn:\n prefix, muterole = await conn.fetchrow('SELECT prefix, muterole FROM guilds WHERE guild = ($1)', ctx.guild.id)\n check = False\n if muterole is 0:\n for rle in ctx.guild.roles:\n if rle.name == role:\n role = rle\n output = f\"I will now use '{role}' when muting users!\"\n check = True\n break\n if check is False:\n if role is None:\n role = await ctx.guild.create_role(name='Mute')\n output = \"A mute role has been created!\"\n else:\n role = await ctx.guild.create_role(name=role)\n output = f\"A new mute role has been created! I will now use '{role}' when muting users!\"\n for channel in ctx.guild.channels:\n await channel.set_permissions(role, send_messages=False)\n role = role.id\n else:\n try:\n for role in ctx.guild.roles:\n if role.id is muterole:\n await role.delete()\n break\n output = f'Mute role has been deleted! You will need to set the mute role again using \"{prefix}setmute [role]\"'\n except:\n pass\n output = f'I was unable to delete the mute role! The role will need to be deleted manually if you chooes to do so! You will need to set the mute role again using \"{prefix}setmute [role]\"'\n role = 0\n await conn.execute('UPDATE guilds SET muterole = ($1) WHERE guild = ($2)', role, ctx.guild.id)\n await ctx.send(embed=discord.Embed(description=output, color=0x9013fe))\n\n\n @commands.command(brief='Punishes a user and cleans up the channel')\n @commands.has_permissions(manage_roles=True)\n async def subdue(self, ctx, user: discord.Member):\n \"\"\"Mutes the specified user and deletes their last 100 messages!\"\"\"\n async with self.bot.pool.acquire() as conn:\n prefix, muterole = await conn.fetchrow('SELECT prefix, muterole FROM guilds WHERE guild = ($1)', ctx.guild.id)\n async with ctx.typing():\n for _ in range(100):\n try: \n msg = await ctx.channel.history().get(author__name=user.name) \n await msg.delete()\n except:\n break\n if muterole is not 0:\n for role in ctx.guild.roles:\n if role.id == muterole:\n await user.add_roles(role)\n output = f'{user.mention} has been muted! Their last 100 messages have been deleted!'\n else:\n output = f'There is currently no mute role set! Use \"{prefix}setmute [role]\" to set a mute role! {user.mention} has had their last 100 messages deleted!'\n await ctx.send(embed=discord.Embed(description=output, color=0x9013fe))\n\n\n @commands.command(brief='Mutes a specified user')\n @commands.has_permissions(manage_roles=True)\n async def mute(self, ctx, user: discord.Member):\n \"\"\"Mutes the specified user\"\"\"\n async with self.bot.pool.acquire() as conn:\n prefix, muterole = await conn.fetchrow('SELECT prefix, muterole FROM guilds WHERE guild = ($1)', ctx.guild.id)\n if muterole is not 0:\n for role in ctx.guild.roles:\n if role.id == muterole:\n if role not in user.roles:\n await user.add_roles(role)\n output = f'{user.mention} has been muted!'\n break\n else:\n output = f'That user is already muted! Use \"{prefix}unmute [user]\" to unmute the user!'\n break\n else:\n output = f'There is currently no mute role set! Use \"{prefix}setmute [role]\" to set a mute role!'\n try:\n await ctx.send(embed=discord.Embed(description=output, color=0x9013fe))\n except:\n pass\n\n \n @commands.command(brief='Unmutes a specified user')\n @commands.has_permissions(manage_roles=True)\n async def unmute(self, ctx, user: discord.Member):\n \"\"\"Unmutes the specified user\"\"\"\n async with self.bot.pool.acquire() as conn:\n prefix, muterole = await conn.fetchrow('SELECT prefix, muterole FROM guilds WHERE guild = ($1)', ctx.guild.id)\n if muterole is not 0:\n for role in ctx.guild.roles:\n if role.id == muterole:\n if role in user.roles:\n await user.remove_roles(role)\n output = f'{user.mention} has been unmuted!'\n break\n else:\n output = f'That user is not currently muted! Use \"{prefix}mute [user]\" to mute the user!'\n else:\n output = f'There is currently no mute role set! Use \"{prefix}setmute [role]\" to set a mute role!'\n try:\n await ctx.send(embed=discord.Embed(description=output, color=0x9013fe))\n except:\n pass\n\n\n @commands.command(brief='Causes the bot to leave the guild')\n @commands.has_permissions(mange_guild=True)\n async def exit(self, ctx):\n \"\"\"Causes the bot to exit the guild! Must have permission MANAGE_GUILD\"\"\"\n await ctx.send('Leaving...')\n await self.bot.leave()\n\ndef setup(bot):\n bot.add_cog(moderation(bot))\n","sub_path":"src/modules/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":11873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"442876904","text":"\r\n# Author: tHaninG\r\n# Email: tHan.ninG0@outlook.com\r\n# Date: 2017-10-22 21:55:00\r\n# Last Modified time: 2017-11-06 14:19:26\r\n# -*- coding: utf-8 -*-\r\n\r\nprint('作者: tHaninG\\n邮箱: tHan.ninG@outlook.com\\n有任何问题请联系我. \\n载入中......')\r\n\r\nfrom os import system\r\nfrom tkinter import Tk\r\nfrom tkinter import filedialog\r\nfrom openpyxl import load_workbook\r\n\r\n# set time format Y-M-D-T\r\ntime_format = '%Y-%m-%d'\r\n\r\n# initialize Tk()\r\nroot = Tk()\r\nroot.withdraw()\r\n\r\n# get chart's file path\r\nsystem(\"cls\")\r\ninput('按 Enter 选择文件\\n')\r\nfpath = filedialog.askopenfilename()\r\nrecord = load_workbook(fpath) # load xlsx\r\nsheet_name = record.get_sheet_names() # get sheet names\r\n\r\n\r\nn_sheet = len(sheet_name) # number of sheets\r\n\r\nif n_sheet == 1:\r\n sheet = record.get_sheet_by_name(sheet_name[0]) # load sheet\r\nelse:\r\n option = []\r\n brace1 = ''\r\n brace2 = ''\r\n for i in range(1, n_sheet + 1):\r\n option.append(chr(64 + i))\r\n # left-justified\r\n brace1 = brace1 + ' {}'.ljust(2 + len(sheet_name[i - 1]))\r\n brace2 = brace2 + ' {}'\r\n while(1):\r\n print('\\n' + brace1[1:].format(*option) +\r\n '\\n' + brace2[1:].format(*sheet_name))\r\n chose_sheet = str.upper(input('输入字母选择表格:\\n'))\r\n system(\"cls\")\r\n if chose_sheet in option:\r\n sheet = record.get_sheet_by_name(\r\n sheet_name[ord(chose_sheet) - 65]) # lshee\r\n break\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n\r\n# Check all rows and update given ID's record\r\ndef update(ID, c_IDname, c_time, c_name, sheet):\r\n \"\"\"\r\n Update record of student ID\r\n\r\n Arguments:\r\n ID -- student ID ('2017222010032')\r\n c_IDname -- the column name of ID ('B')\r\n c_time -- the column name of times ('J')\r\n c_name -- the column name of student name\r\n sheet -- data sheet\r\n\r\n Return:\r\n 0/1 -- whether update success or not\r\n \"\"\"\r\n n = 0\r\n backup = []\r\n produceID = None\r\n\r\n # find the position of ID\r\n for rowNum in range(2, sheet.max_row):\r\n produceID = sheet[c_IDname + str(rowNum)].value\r\n if str(produceID).find(ID) >= 0:\r\n n = n + 1\r\n # ensure the ID is unique\r\n if n == 1:\r\n sheet[c_time + str(rowNum)].value = chr(10003) # check\r\n backup = str(rowNum)\r\n else:\r\n sheet[c_time + backup].value = None\r\n break\r\n\r\n if n == 0:\r\n print('\\nERROR:can\\'t find this ID!!! \\n请重试')\r\n return 0\r\n\r\n elif n == 1:\r\n print('\\n%s 更新成功' %\r\n sheet[c_name + backup].value)\r\n\r\n # prevent wrong ID\r\n while (1):\r\n opt = input(\r\n '\\n按 Enter 确认\\n(输入 C 取消)\\n')\r\n if opt == 'C':\r\n sheet[c_time + backup].value = None\r\n print('\\n更新取消')\r\n return 0\r\n elif opt == '':\r\n system(\"cls\")\r\n return 1\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n else:\r\n print('\\nERROR:ID is not unique!!! \\n按 Enter 重试')\r\n return 0\r\n\r\n\r\n# Check all rows and update given ID's record and grade\r\ndef update_grade(ID, c_IDname, c_time, c_name, sheet):\r\n \"\"\"\r\n Update grade of student ID\r\n\r\n Arguments:\r\n ID -- student ID ('2017222010032')\r\n c_IDname -- the column name of ID ('B')\r\n c_time -- the column name of times ('J')\r\n c_name -- the column name of student name\r\n sheet -- data sheet\r\n\r\n Return:\r\n 0/1 -- whether update success or not\r\n \"\"\"\r\n n = 0\r\n backup = []\r\n produceID = None\r\n\r\n # find the position of ID\r\n for rowNum in range(2, sheet.max_row):\r\n produceID = sheet[c_IDname + str(rowNum)].value\r\n if str(produceID).find(ID) >= 0:\r\n n = n + 1\r\n # ensure the ID is unique\r\n if n == 1:\r\n backup = str(rowNum)\r\n else:\r\n break\r\n if n == 0:\r\n print('\\nERROR:can\\'t find this ID!!! \\n请重试')\r\n return 0\r\n\r\n elif n == 1:\r\n while (1):\r\n print('\\n请输入 %s 的成绩:' %\r\n sheet[c_name + backup].value)\r\n print('(输入 C 取消)')\r\n grade_input = input()\r\n try:\r\n grade_bu = int(grade_input)\r\n if grade_bu >= 0 and grade_bu <= 100:\r\n sheet[c_time + str(backup)].value = str(grade_bu)\r\n system(\"cls\")\r\n print('%s 更新成功\\n' %\r\n sheet[c_name + backup].value)\r\n return 1\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n except:\r\n if grade_input == 'C':\r\n print('\\n更新取消')\r\n return 0\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n else:\r\n print('\\nERROR:ID is not unique!!! \\n请重试')\r\n return 0\r\n\r\n# Check all rows and update given ID's record and rank\r\n\r\n\r\ndef update_rank(ID, c_IDname, c_time, c_name, sheet):\r\n \"\"\"\r\n Update rank of student ID\r\n\r\n Arguments:\r\n ID -- student ID ('2017222010032')\r\n c_IDname -- the column name of ID ('B')\r\n c_time -- the column name of times ('J')\r\n c_name -- the column name of student name\r\n sheet -- data sheet\r\n\r\n Return:\r\n 0/1 -- whether update success or not\r\n \"\"\"\r\n n = 0\r\n backup = []\r\n produceID = None\r\n\r\n # find the position of ID\r\n for rowNum in range(2, sheet.max_row):\r\n produceID = sheet[c_IDname + str(rowNum)].value\r\n if str(produceID).find(ID) >= 0:\r\n n = n + 1\r\n # ensure the ID is unique\r\n if n == 1:\r\n backup = str(rowNum)\r\n else:\r\n break\r\n if n == 0:\r\n print('\\nERROR:can\\'t find this ID!!! \\n请重试')\r\n return 0\r\n\r\n elif n == 1:\r\n while (1):\r\n print('\\n请输入 %s 的等级:' %\r\n sheet[c_name + backup].value)\r\n print('(输入 CC 取消)')\r\n rank_input = str.upper(input())\r\n\r\n if 'ABCD+-'.find(rank_input) >= 0:\r\n sheet[c_time + str(backup)].value = rank_input\r\n system(\"cls\")\r\n print('%s 更新成功\\n' %\r\n sheet[c_name + backup].value)\r\n return 1\r\n elif rank_input=='CC':\r\n return 0\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n else:\r\n print('\\nERROR:ID is not unique!!! \\n请重试')\r\n return 0\r\n\r\n\r\n# chose record what\r\nwhile(1):\r\n while(1):\r\n rg = str.upper(input('A:成绩 B:等级(A,B,C) C:签到\\n选择任务:\\n'))\r\n if 'ABC'.find(rg)>=0:\r\n opt = input('\\n任务为 '+str(rg)+'\\n按 Enter 确定\\n(输入 \\'CC\\' 取消)\\n')\r\n if opt=='':\r\n system(\"cls\")\r\n break\r\n elif opt=='CC':\r\n pass\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n # input index of ID,name,...\r\n index_name = ['学号列号',\r\n '时间列号', '姓名列号']\r\n index = ['','','']\r\n num = -1\r\n for i in index_name:\r\n num = num + 1\r\n while(1):\r\n index[num]=str.upper(input('输入 ' + str(i) + ':\\n'))\r\n while(1):\r\n opt = input('\\n按 Enter 确定\\n(输入 \\'CC\\' 取消)\\n')\r\n if opt=='' or opt=='CC':\r\n break\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n if opt=='':\r\n system(\"cls\")\r\n break\r\n\r\n\r\n # record check\r\n if rg == 'C':\r\n # update all data\r\n k = 0\r\n while(1):\r\n # input student ID\r\n ID = str(\r\n input('请输入 学号:\\n(输入 stop 退出更新)\\n'))\r\n if ID == 'stop':\r\n system(\"cls\")\r\n break\r\n\r\n # update data\r\n k = k + update(ID, index[0], index[1], index[2], sheet)\r\n break\r\n\r\n # record grade\r\n elif rg == 'A':\r\n # update all data\r\n k = 0\r\n while(1):\r\n # input student ID\r\n ID = str(\r\n input('请输入 学号:\\n(输入 stop 退出更新)\\n'))\r\n if ID == 'stop':\r\n system(\"cls\")\r\n break\r\n\r\n # update data\r\n k = k + update_grade(ID, index[0], index[1], index[2], sheet)\r\n break\r\n\r\n # record rank\r\n elif rg == 'B':\r\n # update all data\r\n k = 0\r\n while(1):\r\n # input student ID\r\n ID = str(\r\n input('请输入 学号:\\n(输入 stop 退出更新)\\n'))\r\n if ID == 'stop':\r\n system(\"cls\")\r\n break\r\n\r\n # update data\r\n k = k + update_rank(ID, index[0], index[1], index[2], sheet)\r\n break\r\n\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n\r\n# save updated recrodchart\r\nwhile(1):\r\n sv = str.upper(input('是否保存? Y/N\\n'))\r\n if sv == 'Y':\r\n while(1):\r\n cover = str.upper(input(\r\n '\\n是否覆盖原文件? Y/N\\n(输入 \\'N\\' 将在原文件的路径保存一个备份)\\n'))\r\n if cover == 'Y':\r\n try:\r\n record.save(fpath)\r\n except:\r\n record.save(fpath[:-5] + '(备份)' + '.xlsx')\r\n print('\\n无法覆盖原文件\\n已保存为备份')\r\n break\r\n elif cover == 'N':\r\n record.save(fpath[:-5] + '(备份)' + '.xlsx')\r\n break\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试')\r\n\r\n system(\"cls\")\r\n print('任务完成!!!' +\r\n '\\n更新数据总数: ' + str(k))\r\n input('\\n按 Enter 退出\\n')\r\n break\r\n\r\n elif sv == 'N':\r\n system(\"cls\")\r\n print('任务取消!!!')\r\n input('\\n按 Enter 退出\\n')\r\n break\r\n else:\r\n print('\\nERROR:Invalid Input\\n请重试\\n')\r\n","sub_path":"python/recordchart/recordchart2.0.py","file_name":"recordchart2.0.py","file_ext":"py","file_size_in_byte":10447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"302608631","text":"import multiprocessing as mp\nimport re\nfrom random import choice\n\nimport dateparser\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef _get_proxy():\n html = requests.get('https://free-proxy-list.net/').text\n soup = BeautifulSoup(html, 'lxml')\n trs = soup.find('table', id='proxylisttable').find_all('tr')[1:100]\n\n proxies = []\n\n for tr in trs:\n\n tds = tr.find_all('td')\n if 'yes' in tds[6].text.strip():\n ip = tds[0].text.strip()\n port = tds[1].text.strip()\n schema = 'https'\n proxy = {'schema': schema, 'address': ip + ':' + port}\n proxies.append(proxy)\n\n return choice(proxies)\n\n\ndef _get_html(url: str):\n p = _get_proxy()\n proxy = {p['schema']: p['address']}\n try:\n r = requests.get(url, proxies=proxy, timeout=5)\n r.encoding = 'cp1251'\n if r.ok:\n return r.text\n return None\n except (requests.ConnectTimeout, requests.ConnectionError):\n _get_html(url)\n\n\ndef _get_datetime_seance(date_str: str):\n\n date_str = date_str.replace('\\n', '')\n date_str = ' '.join(date_str.split())\n date_str = date_str.replace('начало в', 'в')\n\n return dateparser.parse(date_str)\n\n\ndef get_seances(url: str, seance_list: list):\n\n html = _get_html(url)\n if not html:\n return seance_list\n # with open('index.html', 'rb') as f: # for offline test\n # html = f.read()\n soup = BeautifulSoup(html, 'lxml')\n regex = re.compile('.*Repertoire.*')\n seances = soup.find_all('div', {'class': regex})\n for seance in seances:\n event = (seance.find('div', class_='LongerName') or\n seance.find('div', class_='ShortName') or\n seance.find('div', class_='LongName'))\n try:\n event_dict = {\n 'event_name': event.text.strip().strip('\\n'),\n 'event_url': event.find('a')['href'],\n 'starts_at': _get_datetime_seance(seance.find('div', class_='DateTime').text),\n }\n seance_list.append(event_dict)\n except Exception as e:\n print(seance)\n print(e)\n continue\n\n\ndef _get_pages(url: str):\n\n urls = [url]\n next_page = 'https://www.bigteatr.com/cat/229/page/{}/'\n for i in range(2, 20):\n url = next_page.format(i)\n text = _get_html(url)\n if not text:\n break\n # with open('index.html', 'rb') as f: # for offline test\n # text = f.read()\n soup = BeautifulSoup(text, 'lxml')\n check_page_exists = soup.find('div', class_='RepertoireLight') or soup.find('div', class_='RepertoireDark')\n\n if not check_page_exists:\n break\n\n urls.append(url)\n\n return urls\n\n\ndef main():\n\n urls = _get_pages('https://www.bigteatr.com/afisha/')\n pool = mp.Pool(processes=10)\n manager = mp.Manager()\n seance_list = manager.list()\n [pool.apply_async(get_seances, args=[url, seance_list]) for url in urls]\n pool.close()\n pool.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bigteatr.py","file_name":"bigteatr.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"234847851","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys, os, re, datetime\nimport getpass\nimport argparse\nimport cx_Oracle\nimport mysql.connector \nfrom mysql.connector import Error\nimport requests\nfrom configparser import ConfigParser\nimport multimodule\nfrom multiprocessing import Pool\nfrom acds import configuration\n\ndef main(args_temp):\n\taction = (''.join(list(args_temp.keys())))\n\targs = {action: {'ip': None, 'hostname': None, 'sd': None, 'uplink': None, 'office': None, 'structura': None, 'classid': None, 'vendor': None, 'model': None, 'serial': None, 'status': None}}\n\tfor k in args_temp[action]:\n\t\tif k in args[action].keys():\n\t\t\targs[action].update ({\n\t\t\t\tk: args_temp[action][k],\n\t\t\t})\n\n\tuser = getattr(configuration, 'ASTU_USER')\n\ttry:\n\t\tastu_password = getattr(configuration, 'ASTU_PASS')\n\texcept:\n\t\tprint (f\"add ASTU_PASSWORN in configuration file\")\n\t\tsys.exit()\n\n\tos.environ['LINES'] = \"100\"\n\tos.environ['COLUMNS'] = \"80\"\n\n\tdate_now = (datetime.datetime.now().strftime(\"%d.%m.%Y\"))\n\tnodeinfo = 'none'\n\tt = multimodule.FastModulAut()\n\n\tif args[action]['ip'] != None:\n\t\tip = args[action]['ip']\n\telse:\n\t\tprint ('Введите IP --ip')\n\t\tsys.exit()\n\n\thostname = args[action]['hostname']\n\tsd = args[action]['sd']\n\tuplink = args[action]['uplink']\n\toffice = args[action]['office']\n\tstructura = args[action]['structura']\n\tclassid = args[action]['classid']\n\tvendor = args[action]['vendor']\n\tmodel = args[action]['model']\n\tserial = args[action]['serial']\n\tstatus = args[action]['status']\n\n\toptions = {}\n\toptions[action] = {}\n\n\tt.sql_connect('connect')\n\tt.sql_update(f\"INSERT INTO guspk.logs (scr_name, DEVICEID, who, message) VALUES ('astu_data', '{ip}', '{user}', '{action}') \")\n\tif model == None and action == 'insert':\n\t\tif structura != None and vendor != None:\n\t\t\tquery = \"SELECT DEVICEMODELNAME, MODELID FROM guspk.host_model WHERE TYPE_ID LIKE '{}' AND VENDORID LIKE '{}'\".format(classid, vendor)\n\t\t\t# print (query)\n\t\t\tcursor = t.sql_select(query, 'full')\n\t\t\tprint ('Available models list')\n\t\t\tfor models in cursor:\n\t\t\t\tprint (\"{:25}{:5}\".format(models[0], models[1]))\n\t\t\tsys.exit()\n\t\telse:\n\t\t\tprint('Please select only --str and --vendor. And I show you available models list.')\n\t\t\tsys.exit()\n\n\tif action == 'update':\n\t\tuplink = None\n\t\tdevice_data = t.sql_select(f\"SELECT DEVICEID FROM guspk.host WHERE IPADDMGM like '{ip}' or NETWORKNAME like '{ip}'\", 'full')\n\t\tdeviceid = device_data[0][0]\n\t\tif uplink != None:\n\t\t\tquery = f\"SELECT NODEID, BUILDING, NODENAME FROM guspk.host_node WHERE NODEID LIKE '{uplink}'\"\n\t\telse:\n\t\t\tquery = f\"SELECT b.NODEID, b.BUILDING, b.NODENAME FROM guspk.host a, guspk.host_node b WHERE a.IPADDMGM LIKE '{ip}' AND a.NODEID = b.NODEID\"\n\t\toptions[action].update ({\n\t\t\t'id': deviceid,\n\t\t\t'deviceid': deviceid,\n\t\t})\n\n\tif action == 'insert':\n\t\tif uplink != None:\n\t\t\tquery = f\"SELECT NODEID, BUILDING, NODENAME FROM guspk.host_node WHERE NODEID LIKE '{uplink}'\"\n\t\telse:\n\t\t\tprint ('Input --uplink')\n\t\t\tsys.exit()\n\n\t# print (query)\n\tcursor = t.sql_select(query, 'full') \n\t# print (cursor)\n\tt.sql_connect('disconnect')\n\tfor (nodeid, nodebuilding, nodename) in cursor:\n\t\tnodeinfo = (nodeid, nodebuilding, nodename)\n\t# print (nodeinfo)\n\tif nodeinfo == 'none':\n\t\tprint ('Uplink not founded in astu')\n\t\tsys.exit()\n\toptions[action].update({\n\t\t'user':user, \n\t\t'password':astu_password, \n\t\t'ipmgm':ip, \n\t\t'networkname':hostname, \n\t\t'devicedescr':sd, \n\t\t'nodeid':nodeid, \n\t\t'nodename':nodename, \n\t\t'parentid':nodeid, \n\t\t'parenttype':nodeid, \n\t\t'nodebuilding':nodebuilding, \n\t\t'dateinstall':date_now, \n\t\t'datazip':date_now, \n\t\t'strukturalevelid':structura, \n\t\t'objectclassidid':classid, \n\t\t'objectclassid':classid, \n\t\t'vendorid':vendor, \n\t\t'objectmodelid':model, \n\t\t'serialnumber':serial, \n\t\t'devicestatus':status,\n\t\t'office':office,\n\t})\n\n\n\n\tif action == 'update':\n\t\tsave = update(options, action)\n\t\tif save == '':\n\t\t\t# save = 'OK'\n\t\t\tprint ('OK')\n\t\telse:\n\t\t\tprint (save)\n\tif action == 'insert':\n\t\tsave = insert(options, action)\n\t\t# print (save)\n\t\tif save == '':\n\t\t\t# save = 'OK'\n\t\t\tprint ('OK')\n\t\telse:\n\t\t\tprint (save)\n\n\treturn save\n\n\ndef update(options, action):\n\tlogpass = {}\n\tlogpass['user_login'] = options[action]['user']\n\tlogpass['user_pwd'] = options[action]['password']\n\t\n\tdevice_edit = { \n\t\t\"tree\" : \"mdevice\",\n\t}\n\tdevice_edit['id'] = options[action]['id']\n\n\tcheck_uniq = {}\n\tdevicefield_save = {}\n\tif options[action]['networkname'] != None:\n\t\t# check_uniq['networkname'] = options[action]['networkname']\n\t\tdevicefield_save['networkname'] = options[action]['networkname']\n\tif options[action]['deviceid'] != None:\n\t\tcheck_uniq['deviceid'] = options[action]['deviceid']\n\t\tdevicefield_save['deviceid'] = options[action]['deviceid']\n\tif options[action]['ipmgm'] != None:\n\t\tcheck_uniq['ipmgm'] = options[action]['ipmgm']\n\t\tdevicefield_save['ipmgm'] = options[action]['ipmgm']\n\tif \toptions[action]['devicedescr'] != None:\n\t\tcheck_uniq['devicedescr'] = options[action]['devicedescr']\n\t\tdevicefield_save['devicedescr'] = options[action]['devicedescr']\n\tif options[action]['objectclassidid'] != None:\n\t\tcheck_uniq['objectclassidid'] = options[action]['objectclassidid']\n\t\tdevicefield_save['objectclassidid'] = options[action]['objectclassidid']\n\tif options[action]['strukturalevelid'] != None:\n\t\tcheck_uniq['strukturalevelid'] = options[action]['strukturalevelid']\n\t\tdevicefield_save['strukturalevelid'] = options[action]['strukturalevelid']\n\tif options[action]['vendorid'] != None:\n\t\tcheck_uniq['vendorid'] = options[action]['vendorid']\n\t\tdevicefield_save['vendorid'] = options[action]['vendorid']\n\tif options[action]['objectmodelid'] != None:\n\t\tcheck_uniq['objectmodelid'] = options[action]['objectmodelid']\n\t\tdevicefield_save['objectmodelid'] = options[action]['objectmodelid']\n\tif options[action]['serialnumber'] != None:\n\t\tcheck_uniq['serialnumber'] = options[action]['serialnumber']\n\t\tdevicefield_save['serialnumber'] = options[action]['serialnumber']\n\tif options[action]['devicestatus'] != None:\n\t\tcheck_uniq['devicestatus'] = options[action]['devicestatus']\n\t\tdevicefield_save['devicestatus'] = options[action]['devicestatus']\n\tif options[action]['parentid'] != None:\n\t\tcheck_uniq['parentid'] = options[action]['parentid']\n\t\tdevicefield_save['parentid'] = options[action]['parentid']\n\t\tdevicefield_save['nodeid'] = options[action]['nodeid']\n\tif options[action]['parenttype'] != None:\n\t\tcheck_uniq['parenttype'] = options[action]['parenttype']\n\t\tdevicefield_save['parenttype'] = options[action]['parenttype']\n\tif options[action]['office'] != None:\n\t\tdevicefield_save['office'] = options[action]['office']\n\tcheck_uniq['user_login_saved'] = options[action]['user']\n\t\n\t\n\n\twith requests.Session() as s:\n\t\tauth = s.post('http://10.184.67.68/', data = logpass) #If auth ok, return '1' # , headers = headers\n\t\tcookie_phpid = {'PHPSESSID': requests.utils.dict_from_cookiejar(s.cookies)['PHPSESSID']}\n\t\tcheck_uniq['PHPSESSID'] = cookie_phpid['PHPSESSID']\n\t\tdevice_edit = s.post('http://10.184.67.68/device/edit', data = device_edit)\n\t\tdevicefield_save = s.post('http://10.184.67.68/device/device_field/save', data = devicefield_save)\n\t\tdevicefield_get = s.get('http://10.184.67.68/device/device_field/get')\n\t\tcheck_uniq = s.post('http://10.184.67.68/device/check_uniq', data = check_uniq)\n\t\tsave = s.post('http://10.184.67.68/device/save')\n\n\tresult = 'ok'\n\tif save.text:\n\t\tresult = save.text\n\treturn result\n\ndef insert(options, action):\n\tlogpass = {}\n\tlogpass['user_login'] = options[action]['user']\n\tlogpass['user_pwd'] = options[action]['password']\n\n\tdevice_edit = { \n\t\t\"parent\" : \"node\",\n\t\t\"tree\" : \"mdevice\",\n\t}\n\tdevice_edit['parentid'] = options[action]['nodeid']\n\n\tdevicefield_save_b = { \n\t\t\"deviceid\" : \"\",\n\t\t\"devicetype\" : \"device\",\n\t\t\"office\" : \"\",\n\t\t\"numinstall\" : \"\",\n\t\t\"sysname\" : \"\",\n\t\t\"ipmgmid\" : \"\",\n\t\t\"qosid\" : \"0\",\n\t\t\"forvlan\" : \"0\",\n\t\t\"foragg\" : \"0\",\n\t\t\"timeout\" : \"0\",\n\t\t\"port\" : \"\",\n\t\t\"mezhregion\" : \"0\",\n\t\t\"deviceelectricpoint\" : \"0\",\n\t\t\"avalmodeid\" : \"1\",\n\t\t\"meid\" : \"\",\n\t}\n\tdevicefield_save_b['parentid'] = options[action]['parentid']\n\tdevicefield_save_b['parenttype'] = options[action]['parenttype']\n\tdevicefield_save_b['nodeid'] = options[action]['nodeid']\n\tdevicefield_save_b['dateinstall'] = options[action]['dateinstall']\n\tdevicefield_save_b['networkname'] = options[action]['networkname']\n\tdevicefield_save_b['ipmgm'] = options[action]['ipmgm']\n\tdevicefield_save_b['devicedescr'] = options[action]['devicedescr']\n\tdevicefield_save_b['strukturalevelid'] = options[action]['strukturalevelid']\n\n\tfizdevice_edit = { \n\t\t\"tree\" : \"mdevice\",\n\t}\n\tvendor_load = {}\n\tvendor_load['objectclassid'] = options[action]['objectclassid']\n\t\n\tmodel_load = {}\n\tmodel_load['vendor'] = options[action]['vendorid']\n\tmodel_load['objectclassid'] = options[action]['objectclassid']\n\t\n\tdevicefield_save_a = {\n\t\t\"deviceid\" : \"\",\n\t\t\"devicetype\" : \"device\",\n\t\t\"zipregionid\" : \"\",\n\t\t\"zipnodeid\" : \"\",\n\t\t\"deviceinvnumber\" : \"\",\n\t\t\"kategoriyaid\" : \"4\",\n\t\t\"bssid\" : \"\",\n\t\t\"macadr\" : \"\",\n\t\t\"covertype\" : \"\",\n\t\t\"lastmileid\" : \"\",\n\t}\n\tdevicefield_save_a['parentid'] = options[action]['parentid']\n\tdevicefield_save_a['parenttype'] = options[action]['parenttype']\n\tdevicefield_save_a['objectclassidid'] = options[action]['objectclassidid']\n\tdevicefield_save_a['datazip'] = options[action]['datazip']\n\tdevicefield_save_a['vendorid'] = options[action]['vendorid']\n\tdevicefield_save_a['objectmodelid'] = options[action]['objectmodelid']\n\tdevicefield_save_a['serialnumber'] = options[action]['serialnumber']\n\tdevicefield_save_a['devicestatus'] = options[action]['devicestatus']\n\tdevicefield_save_a['office'] = options[action]['office']\n\n\tcheck_uniq = {\n\t\t\"cityname\" : \"null\",\n\t\t\"streetname\" : \"null\",\n\t\t\"nodeoffice\" : \"null\",\n\t\t\"region\" : \"null\",\n\t\t\"avalmodeid\" : \"1\",\t#24*7,\n\t\t\"forvlan\" : \"0\",\n\t\t\"foragg\" : \"0\",\n\t\t\"timeout\" : \"0\",\n\t\t\"treeinfo\" : \"null\",\n\t\t\"deviceid\" : \"false\",\n\t\t\"devicetype\" : \"device\",\n\t\t# \"office\" : \"false\",\n\t\t\"numinstall\" : \"false\",\n\t\t\"sysname\" : \"false\",\n\t\t\"ipmgmid\" : \"false\",\n\t\t\"qosid\" : \"false\",\n\t\t\"port\" : \"false\",\n\t\t\"mezhregion\" : \"0\",\n\t\t\"deviceelectricpoint\" : \"false\",\n\t\t\"meid\" : \"false\",\n\t\t\"user_login_saved\" : \"yuzhakov-da\",\t\t\t#login\n\t\t\"zipregionid\" : \"false\",\n\t\t\"zipnodeid\" : \"false\",\n\t\t\"deviceinvnumber\" : \"false\",\n\t\t\"kategoriyaid\" : \"4\",\n\t\t\"bssid\" : \"false\",\n\t\t\"macadr\" : \"false\",\n\t\t\"covertype\" : \"false\",\n\t\t\"lastmileid\" : \"false\",\n\t}\n\tcheck_uniq['nodename'] = options[action]['nodename']\n\tcheck_uniq['nodebuilding'] = options[action]['nodebuilding']\n\tcheck_uniq['nodeid'] = options[action]['nodeid']\n\tcheck_uniq['networkname'] = options[action]['networkname']\n\tcheck_uniq['dateinstall'] = options[action]['dateinstall']\n\tcheck_uniq['parentid'] = options[action]['parentid']\n\tcheck_uniq['parenttype'] = options[action]['parenttype']\n\tcheck_uniq['ipmgm'] = options[action]['ipmgm']\n\tcheck_uniq['devicedescr'] = options[action]['devicedescr']\n\tcheck_uniq['strukturalevelid'] = options[action]['strukturalevelid']\n\tcheck_uniq['objectclassidid'] = options[action]['objectclassidid']\n\tcheck_uniq['datazip'] = options[action]['datazip']\n\tcheck_uniq['vendorid'] = options[action]['vendorid']\n\tcheck_uniq['objectmodelid'] = options[action]['objectmodelid']\n\tcheck_uniq['serialnumber'] = options[action]['serialnumber']\n\tcheck_uniq['devicestatus'] = options[action]['devicestatus']\n\tcheck_uniq['office'] = options[action]['office']\n\n\twith requests.Session() as s:\n\t\tauth = s.post('http://10.184.67.68/', data = logpass) #If auth ok, return '1' # , headers = headers\n\t\tcookie_phpid = {'PHPSESSID': requests.utils.dict_from_cookiejar(s.cookies)['PHPSESSID']}\n\t\tcheck_uniq['PHPSESSID'] = cookie_phpid['PHPSESSID']\n\t\n\t\tdevice_edit = s.post('http://10.184.67.68/device/edit', data = device_edit) #, headers = header_devicelist\n\t\tdevicefield_save_b = s.post('http://10.184.67.68/device/device_field/save', data = devicefield_save_b) #, headers = headersssave\n\t\tfizdevice_edit = s.post('http://10.184.67.68/device/fizdevice/edit', data = fizdevice_edit)\n\t\tvendor_load = s.post('http://10.184.67.68/vendor/load', data = vendor_load)\n\t\tmodel_load = s.post('http://10.184.67.68/model/load', data = model_load)\n\t\tdevicefield_save_a = s.post('http://10.184.67.68/device/device_field/save', data = devicefield_save_a)\n\t\tdevicefield_get = s.get('http://10.184.67.68/device/device_field/get')\n\t\n\t\tcheck_uniq = s.post('http://10.184.67.68/device/check_uniq', data = check_uniq) #, headers = headers\n\t\tsave = s.post('http://10.184.67.68/device/save') #, headers = headersssave\n\n\tresult = 'ok'\n\tif save.text:\n\t\tresult = save.text\n\treturn result\n\n\n\ndef start():\n\tparser = createParser()\n\tnamespace = parser.parse_args()\n\targs = {}\n\targs[namespace.req] = {\n\t\t'action' : namespace.req,\n\t\t'ip' : namespace.ip,\n\t\t'hostname' : namespace.hostname,\n\t\t'sd' : namespace.sd,\n\t\t'uplink' : namespace.uplink,\n\t\t'office' : namespace.office,\n\t\t'structura' : namespace.str,\n\t\t'classid' : namespace.classid,\n\t\t'vendor' : namespace.vendor,\n\t\t'model' : namespace.model,\n\t\t'serial' : namespace.serial,\n\t\t'status' : namespace.status,\n\t}\n\tresult = main(args)\n\n\nif __name__ == \"__main__\":\n\tstart()","sub_path":"scripts/astu_data.py","file_name":"astu_data.py","file_ext":"py","file_size_in_byte":12991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"411379447","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n################################################################################\n##\n## BFS\n##\n################################################################################\nfrom collections import deque\nclass Solution:\n def longestConsecutive(self, root: Optional[TreeNode]) -> int:\n queue = deque([(root,1)])\n max_len = 0\n while queue:\n curr, path_len = queue.popleft()\n max_len = max(max_len,path_len)\n if curr.left:\n if curr.left.val ==curr.val+1:\n left_path_len = path_len+1\n else:\n left_path_len = 1\n queue.append((curr.left,left_path_len))\n if curr.right:\n if curr.right.val ==curr.val+1:\n right_path_len = path_len+1\n else:\n right_path_len = 1\n queue.append((curr.right,right_path_len))\n return max_len\n\n################################################################################\n##\n## Recursion\n##\n################################################################################\nclass Solution:\n def longestConsecutive(self, root: Optional[TreeNode]) -> int:\n def helper(root):\n nonlocal max_len\n if not root:\n return 0\n left_len = helper(root.left)\n right_len = helper(root.right)\n if root.left and root.left.val == root.val+1:\n left_len += 1\n else:\n left_len = 1\n if root.right and root.right.val == root.val+1:\n right_len += 1\n else:\n right_len = 1\n max_len = max(max_len, left_len,right_len)\n return max(left_len,right_len)\n max_len =0\n helper(root)\n return max_len\n","sub_path":"Problem298_Binary_Tree_Longest_Consecutive_Sequence.py","file_name":"Problem298_Binary_Tree_Longest_Consecutive_Sequence.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"376824101","text":"import numpy as np\nfrom math import sin, cos\n\npoint = {'type':'point', 'color':(255,0,0), 'lw':1, 'body':(10,10)}\npoints = {'type':'points', 'color':(255,0,0), 'lw':1, 'body':[(10,10),(100,200)]}\nline = {'type':'line', 'color':(255,0,0), 'lw':1, 'style':'-', 'body':[(10,10),(100,200),(200,200)]}\nlines = {'type':'lines', 'color':(255,0,0), 'lw':1, 'style':'-', 'body':[[(10,10),(100,200),(200,200)],[(150,10),(50,250)]]}\npolygon = {'type':'polygon', 'color':(255,0,0), 'fcolor':(255,255,0), 'lw':1, 'style':'o', 'body':[(10,10),(100,200),(200,200)]}\npolygons = {'type':'polygons', 'color':(255,0,0), 'fcolor':(255,255,0,30), 'fill':False, 'lw':1, 'style':'o', 'body':[[(10,10),(100,200),(200,200)],[(150,10),(50,250),(288,0)]]}\ncircle = {'type':'circle', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':(100,100,50)}\ncircles = {'type':'circles', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':[(100,100,50),(300,300,100)]}\nellipse = {'type':'ellipse', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':(100,100,100,50,1)}\nellipses = {'type':'ellipses', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':[(100,100,100,50,1),(200,250,50,100,3.14)]}\nrectangle = {'type':'rectangle', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':True, 'body':(100,100,80,50)}\nrectangles = {'type':'rectangles', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':[(100,100,80,50),(200,200,80,100)]}\ntext = {'type':'text', 'color':(255,255,0), 'fcolor':(0,0,0), 'size':8, 'pt':True, 'body':(100,200,'id=0')}\ntexts = {'type':'texts', 'color':(255,255,0), 'fcolor':(0,0,0), 'size':8, 'pt':True, 'body':[(100,200,'id=0'),(180,250,'id=1')]}\n\nlayer = {'type':'layer', 'num':-1, 'clolor':(255,255,0), 'fcolor':(255,255,255), 'fill':False, \n\t\t\t'body':[point, points, line, lines, polygon, polygons, circle, circles, ellipse, ellipses, rectangle, rectangles, text, texts]}\n\t\t\nlayers = {'type':'layers', 'num':-1, 'clolor':(255,255,0), 'fcolor':(255,255,255), 'fill':False, \n\t'body':{1:points, 2:line, 3:layer}}\n\ndef plot(pts, dc, f, **key):\n\tpen, brush = dc.GetPen(), dc.GetBrush()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\tif 'fcolor' in pts:\n\t\tbrush.SetColour(pts['fcolor'])\n\tif 'lw' in pts:\n\t\tpen.SetWidth(pts['lw'])\n\tif 'fill' in pts:\n\t\tbrush.SetStyle((106,100)[pts['fill']])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\t\n\tif pts['type'] == 'point':\n\t\tpen.SetWidth(1)\n\t\tbrush.SetStyle(100)\n\t\tbrush.SetColour(pen.GetColour())\n\t\tdc.SetPen(pen)\n\t\tdc.SetBrush(brush)\n\t\tr = pts['r'] if 'r' in pts else 2\n\t\tx, y = f(*pts['body'])\n\t\tdc.DrawEllipse (x-r,y-r,r*2,r*2)\n\t\tpen.SetWidth(pts['lw'] if 'lw' in pts else width)\n\t\tbrush.SetStyle((106,100)[pts['fill']] if 'fill' in pts else style)\n\t\tbrush.SetColour(pts['fc'] if 'fc' in pts else fcolor)\n\t\tdc.SetPen(pen)\n\t\tdc.SetBrush(brush)\n\telif pts['type'] in {'points','line','polygon'}:\n\t\tlst, plst = [], []\n\t\tr = pts['r'] if 'r' in pts else 2\n\t\tfor p in pts['body']:\n\t\t\tx, y = f(*p)\n\t\t\tlst.append((x-r,y-r,r*2,r*2))\n\t\t\tplst.append((x,y))\n\t\tisline = 'style' in pts and '-' in pts['style']\n\t\tispoint = 'style' in pts and 'o' in pts['style']\n\t\tif pts['type'] == 'polygon':\n\t\t\tdc.DrawPolygon(plst)\n\t\t\n\t\tif isline or pts['type'] == 'line':\n\t\t\tdc.DrawLines(plst)\n\t\t\n\t\tif pts['type']=='points' or ispoint:\n\t\t\tpen.SetWidth(1)\n\t\t\tbrush.SetStyle(100)\n\t\t\tbrush.SetColour(pen.GetColour())\n\t\t\tdc.SetPen(pen)\n\t\t\tdc.SetBrush(brush)\n\t\t\tdc.DrawEllipseList(lst)\n\t\t\tpen.SetWidth(pts['lw'] if 'lw' in pts else width)\n\t\t\tbrush.SetStyle((106,100)[pts['fill']] if 'fill' in pts else style)\n\t\t\tbrush.SetColour(pts['fc'] if 'fc' in pts else fcolor)\n\t\t\tdc.SetPen(pen)\n\t\t\tdc.SetBrush(brush)\n\telif pts['type'] in {'lines','polygons'}:\n\t\tlst, plst = [], []\n\t\tr = pts['r'] if 'r' in pts else 2\n\t\tfor i in pts['body']:\n\t\t\tline = []\n\t\t\tfor p in i:\n\t\t\t\tx, y = f(*p)\n\t\t\t\tlst.append((x-r,y-r,r*2,r*2))\n\t\t\t\tline.append((x,y))\n\t\t\tplst.append(line)\n\t\tisline = 'style' in pts and '-' in pts['style']\n\t\tispoint = 'style' in pts and 'o' in pts['style']\n\t\tif pts['type'] == 'polygons':\n\t\t\tdc.DrawPolygonList(plst)\n\t\t\n\t\tif isline or pts['type'] == 'lines':\n\t\t\tfor line in plst:\n\t\t\t\tdc.DrawLines(line)\n\t\t\n\t\tif pts['type']=='points' or ispoint:\n\t\t\tpen.SetWidth(1)\n\t\t\tbrush.SetStyle(100)\n\t\t\tbrush.SetColour(pen.GetColour())\n\t\t\tdc.SetPen(pen)\n\t\t\tdc.SetBrush(brush)\n\t\t\tdc.DrawEllipseList(lst)\n\t\t\tpen.SetWidth(pts['lw'] if 'lw' in pts else width)\n\t\t\tbrush.SetStyle((106,100)[pts['fill']] if 'fill' in pts else style)\n\t\t\tbrush.SetColour(pts['fc'] if 'fc' in pts else fcolor)\n\t\t\tdc.SetPen(pen)\n\t\t\tdc.SetBrush(brush)\n\n\tpen.SetWidth(width)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\ndef draw_circle(pts, dc, f, **key):\n\tpen, brush = dc.GetPen(), dc.GetBrush()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\tif 'fcolor' in pts:\n\t\tbrush.SetColour(pts['fcolor'])\n\tif 'lw' in pts:\n\t\tpen.SetWidth(pts['lw'])\n\tif 'fill' in pts:\n\t\tbrush.SetStyle((106,100)[pts['fill']])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\n\tif pts['type'] == 'circle':\n\t\tx, y ,r = pts['body']\n\t\tx, y = f(x, y)\n\t\tdc.DrawCircle(x, y, r*key['k'])\n\tif pts['type'] == 'circles':\n\t\tlst = []\n\t\tfor x, y ,r in pts['body']:\n\t\t\tx, y = f(x, y)\n\t\t\tr *= key['k']\n\t\t\tlst.append((x-r,y-r,r*2,r*2))\n\t\tdc.DrawEllipseList(lst)\n\n\tpen.SetWidth(width)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\ndef make_ellipse(l1, l2, ang):\n\tm = np.array([[l1*cos(-ang),-l2*sin(-ang)],\n\t\t\t\t [l1*sin(-ang),l2*cos(-ang)]])\n\ta = np.linspace(0, np.pi*2, 36)\n\txys = np.array((np.cos(a), np.sin(a)))\n\treturn np.dot(m, xys).T\n\ndef draw_ellipse(pts, dc, f, **key):\n\tpen, brush = dc.GetPen(), dc.GetBrush()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\tif 'fcolor' in pts:\n\t\tbrush.SetColour(pts['fcolor'])\n\tif 'lw' in pts:\n\t\tpen.SetWidth(pts['lw'])\n\tif 'fill' in pts:\n\t\tbrush.SetStyle((106,100)[pts['fill']])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\n\tif pts['type'] == 'ellipse':\n\t\tx, y ,l1, l2, a = pts['body']\n\t\telp = make_ellipse(l1,l2,a)\n\t\telp = elp*key['k']+f(x,y)\n\t\tdc.DrawPolygon(elp)\n\tif pts['type'] == 'ellipses':\n\t\tlst = []\n\t\tfor x, y, l1, l2, a in pts['body']:\n\t\t\telp = make_ellipse(l1,l2,a)\n\t\t\tlst.append(elp*key['k']+f(x,y))\n\t\tdc.DrawPolygonList(lst)\n\t\t\n\n\tpen.SetWidth(width)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\ndef draw_rectangle(pts, dc, f, **key):\n\tpen, brush = dc.GetPen(), dc.GetBrush()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\tif 'fcolor' in pts:\n\t\tbrush.SetColour(pts['fcolor'])\n\tif 'lw' in pts:\n\t\tpen.SetWidth(pts['lw'])\n\tif 'fill' in pts:\n\t\tbrush.SetStyle((106,100)[pts['fill']])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\n\tif pts['type'] == 'rectangle':\n\t\tx, y, w, h = pts['body']\n\t\tx, y = f(x, y)\n\t\tw, h = w*key['k'], h*key['k']\n\t\tdc.DrawRectangle(x-w/2, y-h/2, w, h)\n\tif pts['type'] == 'rectangles':\n\t\tlst = []\n\t\tfor x, y, w, h in pts['body']:\n\t\t\tx, y = f(x, y)\n\t\t\tw, h = w*key['k'], h*key['k']\n\t\t\tlst.append((x-w/2, y-h/2, w, h))\n\t\tdc.DrawRectangleList(lst)\n\n\tpen.SetWidth(width)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\ndef draw_text(pts, dc, f, **key):\n\tpen, brush, font = dc.GetPen(), dc.GetBrush(), dc.GetFont()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\tsize = font.GetPointSize()\n\ttcolor = dc.GetTextForeground()\n\tbcolor = dc.GetTextBackground()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\t\tdc.SetTextForeground(pts['color'])\n\tbrush.SetColour(pen.GetColour())\n\tbrush.SetStyle(100)\n\tif 'fcolor' in pts:\n\t\tprint('hahaha')\n\t\tdc.SetTextBackground(pts['fcolor'])\n\tif 'size' in pts:\n\t\tfont.SetPointSize(pts['size'])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\tdc.SetFont(font)\n\n\tif pts['type'] == 'text':\n\t\tx, y, text = pts['body']\n\t\tx, y = f(x, y)\n\t\tdc.DrawText(text, x+3, y+3)\n\t\tif not 'pt' in pts or pts['pt']:\n\t\t\tdc.DrawEllipse(x-2,y-2,4,4)\n\tif pts['type'] == 'texts':\n\t\ttlst, clst, elst = [], [], []\n\t\tfor x, y, text in pts['body']:\n\t\t\tx, y = f(x, y)\n\t\t\ttlst.append(text)\n\t\t\tclst.append((x+3, y+3))\n\t\t\telst.append((x-2, y-2, 4, 4))\n\t\tdc.DrawTextList(tlst, clst)\n\t\tif not 'pt' in pts or pts['pt']:\n\t\t\tdc.DrawEllipseList(elst)\n\n\tfont.SetPointSize(size)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\tdc.SetFont(font)\n\tdc.SetTextForeground(tcolor)\n\tdc.SetTextBackground(bcolor)\n\ndraw_dic = {'points':plot, 'point':plot, 'line':plot, 'polygon':plot, 'lines':plot, 'polygons':plot,\n\t\t\t'circle':draw_circle, 'circles':draw_circle, 'ellipse':draw_ellipse, 'ellipses':draw_ellipse,\n\t\t\t'rectangle':draw_rectangle, 'rectangles':draw_rectangle, 'text':draw_text, 'texts':draw_text}\n\ndef draw(obj, dc, f, **key): draw_dic[obj['type']](obj, dc, f, **key)\n\ndef draw_layer(pts, dc, f, **key):\n\tpen, brush = dc.GetPen(), dc.GetBrush()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\tif 'fcolor' in pts:\n\t\tbrush.SetColour(pts['fcolor'])\n\tif 'lw' in pts:\n\t\tpen.SetWidth(pts['lw'])\n\tif 'fill' in pts:\n\t\tbrush.SetStyle((106,100)[pts['fill']])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\n\tfor i in pts['body']:draw(i, dc, f, **key)\n\n\tpen.SetWidth(width)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\ndraw_dic['layer'] = draw_layer\n\ndef draw_layers(pts, dc, f, **key):\n\tpen, brush = dc.GetPen(), dc.GetBrush()\n\twidth, color = pen.GetWidth(), pen.GetColour()\n\tfcolor, style = brush.GetColour(), brush.GetStyle()\n\t\n\tif 'color' in pts: \n\t\tpen.SetColour(pts['color'])\n\tif 'fcolor' in pts:\n\t\tbrush.SetColour(pts['fcolor'])\n\tif 'lw' in pts:\n\t\tpen.SetWidth(pts['lw'])\n\tif 'fill' in pts:\n\t\tbrush.SetStyle((106,100)[pts['fill']])\n\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\tif key['cur'] in pts['body']:\n\t\tdraw(pts['body'][key['cur']], dc, f, **key)\n\n\tpen.SetWidth(width)\n\tpen.SetColour(color)\n\tbrush.SetColour(fcolor)\n\tbrush.SetStyle(style)\n\tdc.SetPen(pen)\n\tdc.SetBrush(brush)\n\ndraw_dic['layers'] = draw_layers\n\ndefault_color = (255, 255, 0)\ndefault_face = (255, 255, 255)\ndefault_fill = True\ndefault_lw = 1\ndefault_tcolor = (255, 0, 0)\ndefault_tsize = 8\n\ndef drawmark(dc, f, body, **key):\n\tpen, brush, font = dc.GetPen(), dc.GetBrush(), dc.GetFont()\n\tpen.SetColour(default_color or (255,255,0))\n\tbrush.SetColour(default_face or (255,255,255))\n\tbrush.SetStyle((106,100)[default_fill or False])\n\tpen.SetWidth(default_lw or 1)\n\tdc.SetTextForeground(default_tcolor or (255,0,0))\n\tfont.SetPointSize(default_tsize or 8)\n\tdc.SetPen(pen); dc.SetBrush(brush); dc.SetFont(font);\n\tdraw(body, dc, f, **key)\n\nclass GeometryMark:\n\tdef __init__(self, body):\n\t\tself.body = body\n\n\tdef draw(self, dc, f, **key):\n\t\tdrawmark(dc, f, self.body, key)\n\nif __name__ == '__main__':\n\tpass\n\t# print(make_ellipse(0,0,2,1,0))\n","sub_path":"imagepy/ui/canvas/mark.py","file_name":"mark.py","file_ext":"py","file_size_in_byte":11319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"71178","text":"import argparse,re,os\n\nclass Parser:\n def __init__(self,desc):\n # Adding arguments\n self._parser = argparse.ArgumentParser(description=desc)\n self._parser.add_argument('target', help='List one IP Addresses, URL or Hash to query or pass the filename of a file containing IP Addresses, URL or Hash to query each separated by a newline.')\n self._parser.add_argument('-o', '--output', help='This option will output the results to a file.')\n self._parser.add_argument('-w', '--web', help='This option will output the results to an HTML file.')\n self._parser.add_argument('-c', '--csv', help='This option will output the results to a CSV file.')\n self._parser.add_argument('-d', '--delay', type=int, default=2, help='This will change the delay to the inputted seconds. Default is 2.')\n self._parser.add_argument('-s', '--source', help='This option will only run the target against a specific source engine to pull associated domains. Options are defined in the name attribute of the site element in the XML configuration file')\n self._parser.add_argument('--p', action=\"store_true\", help='This option tells the program to post information to sites that allow posting. By default the program will NOT post to sites that require a post.')\n self.args = self._parser.parse_args() \n\n def hasHTMLOutFile(self):\n if self.args.web:\n return True\n else:\n return False\n \n @property \n def HTMLOutFile(self):\n if self.hasHTMLOutFile():\n return self.args.web\n else:\n return None\n \n def hasTextOutFile(self):\n if self.args.output:\n return True\n else:\n return False\n \n @property \n def TextOutFile(self):\n if self.hasTextOutFile():\n return self.args.output\n else:\n return None\n \n def hasCSVOutSet(self):\n if self.args.csv:\n return True\n else:\n return False\n \n @property\n def CSVOutFile(self):\n if self.hasCSVOutSet():\n return self.args.csv\n else:\n return None \n \n @property\n def Delay(self):\n return self.args.delay\n \n def print_help(self):\n self._parser.print_help()\n \n def hasTarget(self):\n if self.args.target == None:\n return False\n else:\n return True\n \n def hasNoTarget(self):\n return not(self.hasTarget())\n \n @property\n def Target(self):\n if self.hasNoTarget():\n return None\n else:\n return self.args.target\n\n def hasInputFile(self):\n if os.path.exists(self.args.target) and os.path.isfile(self.args.target):\n return True\n else:\n return False\n \n @property \n def Source(self):\n if self.hasSource():\n return self.args.source\n else:\n return None\n \n def hasSource(self):\n if self.args.source:\n return True\n else:\n return False\n \n def hasPost(self):\n if self.args.p:\n return True\n else:\n return False\n \n @property \n def InputFile(self):\n if self.hasNoTarget():\n return None\n elif self.hasInputFile():\n return self.Target\n else:\n return None\n \nclass IPWrapper(object):\n \n @classmethod\n def isIPorIPList(self,target):\n #IP Address range using prefix syntax\n ipRangePrefix = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\/\\d{1,2}', re.IGNORECASE)\n ipRgeFind = re.findall(ipRangePrefix,target)\n ipRangeDash = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d{1,3}', re.IGNORECASE)\n ipRgeDashFind = re.findall(ipRangeDash,target)\n ipAddress = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', re.IGNORECASE)\n ipFind = re.findall(ipAddress,target)\n if ((ipRgeFind is None or len(ipRgeFind) == 0) and (ipRgeDashFind is None or len(ipRgeDashFind) == 0) and (ipFind is None and len(ipFind) == 0)):\n return False\n else:\n return True\n \n @classmethod\n def getTarget(self,target):\n #IP Address range using prefix syntax\n ipRangePrefix = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\/\\d{1,2}', re.IGNORECASE)\n ipRgeFind = re.findall(ipRangePrefix,target)\n ipRangeDash = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d{1,3}', re.IGNORECASE)\n ipRgeDashFind = re.findall(ipRangeDash,target)\n ipAddress = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', re.IGNORECASE)\n ipFind = re.findall(ipAddress,target) \n if ipRgeFind is not None and len(ipRgeFind) > 0:\n #this can be used if we ever get bigger than a class C\n #but truthfully we don't need to split the whole address\n #since we'll only be using the last octet.\n iplist = target[:target.index(\"/\")].split(\".\")\n ipprefix=givenipprefix=target[target.index(\"/\")+1:]\n #create a bytearry to hold the one byte\n #this would be 4 bytes for IPv4 and gives us the capability to grow\n #if we ever want to go larger than a class C\n bytearr = bytearray(2)\n bytearr[0] = int(iplist[3])\n #prefix must be class C or larger\n if int(givenipprefix) < 24:\n ipprefix = 24\n if int(givenipprefix) > 32 or int(givenipprefix) == 31:\n ipprefix = 32\n bytearr[1]=0\n else:\n bytearr[1]=pow(2,32-int(ipprefix))#-1\n \n if bytearr[0]>bytearr[1]: \n start=bytearr[0]\n last=bytearr[0]^bytearr[1]\n else:\n start=bytearr[0]\n last=bytearr[1] \n if start == last:\n yield target[:target.rindex(\".\")+1]+str(start)\n if start 0:\n iplist = target[:target.index(\"-\")].split(\".\")\n iplast = target[target.index(\"-\")+1:]\n if int(iplist[3])\\d+)/$',CourseDetailView.as_view(),name=\"course_detail\"),\n\n # 课程详情页面\n url(r'^info/(?P\\d+)/$', CourseInfoView.as_view(), name=\"course_info\"),\n\n # 课程评论页面\n url(r'^comment/(?P\\d+)/$', CourseCommentView.as_view(), name=\"course_comment\"),\n\n # 添加课程评论\n url(r'^add_comment/$', AddCommentsView.as_view(), name=\"add_comment\"),\n\n # 课程视频播放\n url(r'^video/(?P\\d+)/$', VideoPlayView.as_view(), name=\"course_video\"),\n\n ]\n\n","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"518951881","text":"#!/usr/bin/env python3\n\n\nfrom abc import ABC\n\n\nclass Book(ABC):\n def check_for_fine(self, ISBN, title, subject, publisher, language, number_of_pages):\n self.__ISBN = ISBN\n self.__title = title\n self.__subject = subject\n self.__publisher = publisher\n self.__language = language\n self.__number_of_pages = number_of_pages\n self.__authors = []\n","sub_path":"library/book/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"607986753","text":"import datetime\n\nconfig = {\n \"access\": \"public\",\n \"help\": \".mariafuckingwhere || .mariafuckingwhere || Tells you where Maria fucking is\",\n \"reversible\": False\n}\n\ndef command(self, user, channel, msg):\n dt = datetime.date\n times = {\"8\":dt(2013,1,6),\"9\":dt(2013,5,2),\"10\":dt(2013,8,31),\"11\":dt(2014,1,5),\"12\":dt(2014,5,18),\"13\":dt(2014,10,11)}\n show = self.factory.resolve(\"Maria\", channel)\n if show is None:\n return\n ep = int(show[\"current_ep\"]) + 1\n if ep > 12:\n self.msg(channel, \"Maria fucking here! http://www.nyaa.eu/?page=view&tid=418999\")\n else:\n when = times[str(ep)] - dt.today()\n self.msg(channel, \"%s %d will be released in %d days\" % (show[\"series\"], ep, when.days))","sub_path":"commands/mariafuckingwhere.py","file_name":"mariafuckingwhere.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"333258124","text":"# I take a dictionary as input\n# I also take as input a string of three words\n# I want to use the third word, the second and the third words, and all three\n\t# words to predict a possible fourth word.\n\n# Given third word. What is the most frequent next word? What is its probability?\n# I need to loop through all of the keys in a dictionary. I need to find the\n# key with the highest count_key and the value for the count_key.\n\ndef mostFrequentKey(dic):\n\tfrequentKey = \"thisflatoutdidntwork\"\n\tmaxCount= 0\n\ttotalCount = dic['count_key']\n\tfor key in dic.keys():\n\t\tif key != 'count_key':\n\t\t\tcurrentCount = dic[key]['count_key']\n\t\t\tif currentCount > maxCount:\n\t\t\t\tmaxCount = currentCount\n\t\t\t\tfrequentKey = key\n\tprobability = float(maxCount) / totalCount\n\treturn (frequentKey, probability)\n\n# given \"word1 word2 word3\" I will try to predict word4\n\ndef predictWord4(wordString, dic):\n\twordList = wordString.lower().split()\n\tif wordList[2] in dic:\n\t\tonePredict = mostFrequentKey(dic[wordList[2]])\n\tif wordList[1] in dic and wordList[2] in dic[wordList[1]]:\n\t\ttwoPredict = mostFrequentKey(dic[wordList[1]][wordList[2]])\n\tif wordList[0] in dic and wordList[1] in dic[wordList[0]] and wordList[2] in dic[wordList[0]][wordList[1]]:\n\t\tthreePredict = mostFrequentKey(dic[wordList[0]][wordList[1]][wordList[2]])\n\treturn (onePredict, twoPredict, threePredict)\n\n\n","sub_path":"MakePredictions.py","file_name":"MakePredictions.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"506361837","text":"import datajoint as dj\r\nimport re\r\nimport pathlib\r\n\r\nfrom datajoint_utilities.dj_data_copy import diagram_restriction, db_migration\r\n\r\nfrom .tables_for_bk import tables_for_bk\r\n\r\n\r\n# ---- reconstruct schema/table code for the selected tables ----\r\nschema_prefix_update_mapper = {\r\n 'prod_mlims_data': 'group_shared_topopaper_mlims',\r\n 'group_imaging_1b': 'group_shared_topopaper_main_imaging',\r\n 'user_horsto_imaging': 'group_shared_topopaper_horst_imaging',\r\n 'user_horsto_borderscore': 'group_shared_topopaper_borderscore'\r\n }\r\n\r\n\r\ndef main():\r\n all_tables = []\r\n for schema_name, selected_table_names in tables_for_bk.items():\r\n all_tables.extend([f'`{schema_name}`.`{dj.utils.from_camel_case(t).replace(\".\", \"__\")}`' for t in selected_table_names])\r\n\r\n schemas_code, schemas_table_definition = diagram_restriction.generate_schemas_definition_code(\r\n all_tables, schema_prefix_update_mapper,\r\n verbose=True, save_dir=None)\r\n\r\n # ---- replace blob@store with longblob ----\r\n for k, code in schemas_code.items():\r\n schemas_code[k] = re.sub('(blob@\\w+)\\s', 'longblob', code)\r\n\r\n # ---- write code into .py files ----\r\n save_dir = pathlib.Path('./clone_pipeline/pipeline_code').absolute()\r\n save_dir.mkdir(exist_ok=True, parents=True)\r\n for cloned_schema_name, schema_definition_str in schemas_code.items():\r\n with open(save_dir / f'{cloned_schema_name}.py', 'wt') as f:\r\n f.write(schema_definition_str)\r\n\r\n # ---- execute the pipeline codes ----\r\n # this is where you review the .py files, and run them manually once\r\n # to get the cloned pipeline initiated\r\n # (we can code this up, but this should be a manual step with careful inspection)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"clone_pipeline/generate_pipeline.py","file_name":"generate_pipeline.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"639250236","text":"import tensorflow as tf\nimport numpy as np\nfrom random import shuffle\n\ndef read_process(filname, sep=\",\"):\n df = []\n with tf.gfile.FastGFile(filname) as f:\n for line in f.readlines() :\n df.append([int(row.replace('\"','')) for row in line.split(sep)[1:3]])\n return df\n\ndef getData(datas):\n datas = np.array(datas)\n users = np.unique(datas[:][:,0])\n books = np.unique(datas[:][:,1])\n usersId2Index = dict()\n booksId2Index = dict()\n usersIndex2Id = dict()\n booksIndex2Id = dict()\n for user in users :\n index = len(usersIndex2Id)\n usersIndex2Id[index]=user\n usersId2Index[user]=index\n for book in books:\n index = len(booksIndex2Id)\n booksIndex2Id[index] = book\n booksId2Index[book] = index\n ds = []\n for data in datas :\n ds.append([usersId2Index[data[0]],booksId2Index[data[1]],1])\n return np.array(ds),usersId2Index,booksId2Index,usersIndex2Id,booksIndex2Id\n\ndef getDataSet(datas, tranrate):\n rows = len(datas)\n split_index = int(rows * tranrate)\n indexs = np.random.permutation(rows)\n result = np.array([datas[i] for i in indexs])\n train = result[:split_index]\n test = result[split_index:]\n return train, test\n\n\nclass DataSet(object):\n def __init__(self,datas,usersLen,booksLen):\n df = np.zeros([usersLen, booksLen])\n for data in datas:\n df[data[0],data[1]] = data[2]\n self._df = df\n self._usersLen=usersLen\n self._booksLen=booksLen\n\n @property\n def df(self):\n return self._df\n\n @property\n def usersLen(self):\n return self._usersLen\n\n @property\n def booksLen(self):\n return self._booksLen\n\n\n# datas = read_process(\"/Users/liuyang/ml/data/huabenshelf/huaben-user-shelf.csv\")\n# print(datas[:10])\n# dataset = DataSet([[1,2],[3,4],[2,1],[1,2]])\n# print(dataset.df)\n# shuffle(dataset.df)\n# print(dataset.df)","sub_path":"huaben/bookrecommend/data_set.py","file_name":"data_set.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"175690909","text":"from datetime import datetime\n\n\nclass WMSLayerSummary():\n\n def __init__(self, title=\"\", name=\"\"):\n self.name = name\n self.title = title\n self.layers = []\n self.referenceSystems = []\n\n @staticmethod\n def fromElement(element):\n wmsls = WMSLayerSummary()\n for child in element:\n tag = child.tag.lower()\n if tag == \"name\":\n wmsls.name = child.text\n elif tag == \"title\":\n wmsls.title = child.text\n elif tag == \"srs\":\n wmsls.referenceSystems.append(child.text)\n elif tag == \"layer\":\n wmsls.layers.append(WMSLayerSummary.fromElement(child))\n return wmsls\n\n\nclass WMSServerSummary(WMSLayerSummary):\n\n def __init__(self, title=\"\", name=\"\"):\n WMSLayerSummary.__init__(self, title, name)\n self.timestamp = datetime.now(None)\n\n @staticmethod\n def fromElement(element):\n wmsss = WMSServerSummary()\n for child in element:\n tag = child.tag.lower()\n if tag == \"name\":\n wmsss.name = child.text\n elif tag == \"title\":\n wmsss.title = child.text\n elif tag == \"layer\":\n wmsss.layers.append(WMSLayerSummary.fromElement(child))\n return wmsss\n","sub_path":"ymac/gis/qgis/data/wms/wmsserver.py","file_name":"wmsserver.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"534904816","text":"import asyncio\nimport httpx\nimport time\n\n\nasync def get_book_name(index: int, isbn: int):\n async with httpx.AsyncClient() as client:\n response = await client.get(f\"https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}\")\n response_dict = response.json()\n title = response_dict[\"items\"][0][\"volumeInfo\"][\"title\"]\n print(f\"{index}: {title}\")\n\n\nasync def main():\n isbn_list = [\n 9780007355143,\n 9780008108298,\n 9780547249643,\n 9781405882583,\n 9780316095860,\n 9780930289232,\n 9780486415871,\n ]\n\n task_list = []\n for index, isbn in enumerate(isbn_list):\n task_list.append(get_book_name(index, isbn))\n await asyncio.gather(*task_list)\n\n\n\nif __name__ == \"__main__\":\n start_time = time.monotonic()\n asyncio.run(main())\n print(f\"Time Taken:{time.monotonic() - start_time}\")\n","sub_path":"async/isbn.py","file_name":"isbn.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"571950827","text":"# -*- coding: utf-8 -*-\n'''\nManage dingtalk work flow process.\nDocument available at\nhttps://ding-doc.dingtalk.com/doc#/serverapi2/ca8r99\nand\nhttps://ding-doc.dingtalk.com/doc#/serverapi2/civf9v\n'''\nimport time\nfrom ..util.api import check_none_params, client\nfrom ..util.conf import get_config\nfrom .models import convert_to_dict,\\\n FormComponent,\\\n FormComponentProp,\\\n ProcessInstanceApprover\n\n\ndef process_save(agent_id=None,\n process_code=None,\n name='',\n description='',\n form_component_list=None):\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n request = {\n \"agentid\": agent_id,\n \"process_code\": process_code,\n \"name\": name,\n \"description\": description,\n \"fake_mode\": True\n }\n request['form_component_list'] = convert_to_dict(form_component_list)\n resp = client.call('POST', '/topapi/process/save',\n json={\"saveProcessRequest\": request})\n return resp.json()\n\n\ndef process_delete(agent_id=None,\n process_code=None):\n if not process_code:\n raise Exception(\"Must provide a process_code.\")\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n data = {\n \"agentid\": agent_id,\n \"process_code\": process_code\n }\n resp = client.call('POST', '/topapi/process/delete',\n json=data)\n return resp.json()\n\n\ndef process_workrecord_create(agent_id=None,\n process_code=None,\n originator_user_id=None,\n title=None,\n form_component_values=None,\n url=None):\n if not (process_code and originator_user_id and url):\n raise Exception(\"Must provide a process_code.\")\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n request = {\n \"agentid\": agent_id,\n \"process_code\": process_code,\n \"originator_user_id\": originator_user_id,\n \"title\": title,\n \"form_component_values\": form_component_values,\n \"url\": url\n }\n resp = client.call('POST', '/topapi/process/workrecord/create',\n json={\"request\": request})\n return resp.json()\n\n\ndef process_workrecord_update(agent_id=None,\n process_instance_id=None,\n status=None,\n result=None):\n if not (process_instance_id and status):\n raise Exception(\"Must provide a process_instance_id and status.\")\n if not status in ('COMPLETED', 'TERMINATED'):\n raise Exception(\"Illegal status.\")\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n request = {\n \"agentid\": agent_id,\n \"process_instance_id\": process_instance_id,\n \"status\": status,\n \"result\": result\n }\n resp = client.call('POST', '/topapi/process/workrecord/update',\n json={\"request\": request})\n return resp.json()\n\n\ndef process_workrecord_task_create(agent_id=None,\n process_instance_id=None,\n activity_id=None,\n tasks=None):\n if not (process_instance_id and status):\n raise Exception(\"Must provide a process_instance_id and status.\")\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n if tasks is None:\n tasks = []\n request = {\n \"agentid\": agent_id,\n \"process_instance_id\": process_instance_id,\n \"activity_id\": activity_id,\n \"tasks\": tasks\n }\n resp = client.call('POST', '/topapi/process/workrecord/task/create',\n json={\"request\": request})\n return resp.json()\n\n\ndef process_workrecord_task_update(agent_id=None,\n process_instance_id=None,\n tasks=None):\n if not process_instance_id:\n raise Exception(\"Must provide the process_instance_id.\")\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n request = {\n \"agentid\": agent_id,\n \"process_instance_id\": process_instance_id,\n \"tasks\": tasks\n }\n resp = client.call('POST', '/topapi/process/workrecord/task/update',\n json={\"request\": request})\n return resp.json()\n \n\ndef process_workrecord_taskgroup_cancel(agent_id=None,\n process_instance_id=None,\n activity_id=None):\n if not (process_instance_id and activity_id):\n raise Exception(\"Must provide the process_instance_id and activity_id.\")\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n request = {\n \"agentid\": agent_id,\n \"process_instance_id\": process_instance_id,\n \"activity_id\": activity_id\n }\n resp = client.call('POST', '/topapi/process/workrecord/taskgroup/cancel',\n json={\"request\": request})\n return resp.json()\n\n\ndef processinstance_create(agent_id=None,\n process_code=None,\n originator_user_id=None,\n dept_id=None,\n approvers=None,\n approvers_v2=None,\n cc_list=None,\n cc_position=None,\n form_component_values=None):\n check_none_params(process_code=process_code,\n originator_user_id=originator_user_id,\n dept_id=dept_id,\n form_component_values=form_component_values)\n if agent_id is None:\n agent_id = get_config().get('agent_id')\n data = {\n 'agent_id': agent_id,\n 'process_code': process_code,\n 'originator_user_id': originator_user_id,\n 'dept_id': dept_id,\n 'approvers': approvers,\n 'approvers_v2': convert_to_dict(approvers_v2),\n 'cc_list': cc_list,\n 'cc_position': cc_position,\n 'form_component_values': convert_to_dict(form_component_values)\n }\n resp = client.call('POST', '/topapi/processinstance/create',\n json=data)\n return resp.json()\n\n\ndef processinstnce_listids(process_code=None,\n start_time=None,\n end_time=None,\n size=20,\n cursor=0,\n userid_list=None):\n check_none_params(process_code=process_code,\n start_time=start_time)\n data = {\n \"process_code\": process_code,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"size\": size,\n \"cursor\": cursor,\n \"userid_list\": userid_list\n }\n resp = client.call('POST',\n '/topapi/processinstance/listids',\n json=data)\n return resp.json()\n\n\ndef processinstance_get(process_instance_id):\n resp = client.call('POST', '/topapi/processinstance/get',\n json={'process_instance_id': process_instance_id})\n return resp.json()\n\n\ndef process_gettodonum(userid):\n data = {\"userid\": userid}\n resp = client.call('POST',\n '/topapi/process/gettodonum',\n json=data)\n return resp.json()\n\n\ndef process_listbyuserid(userid=None,\n offset=0,\n size=100):\n check_none_params(userid=userid)\n data = {\n \"userid\": userid,\n \"offset\": offset,\n \"size\": size\n }\n resp = client.call('POST',\n '/topapi/process/listbyuserid',\n json=data)\n return resp.json()\n\n\ndef processinstance_cspace_info(user_id=None):\n check_none_params(user_id=user_id)\n data = {'user_id': user_id}\n resp = client.call('POST', '/topapi/processinstance/cspace/info',\n json=data)\n return resp.json()\n","sub_path":"dingle/dingtalk/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"135692495","text":"# -*- coding: utf-8 -*-\r\n\r\nimport modules.var as v\r\nimport numpy as np\r\nfrom scipy.integrate import ode\r\n\r\n\r\ndef update():\r\n\r\n # calculate water depth as difference between hydraulic head and surface\r\n depoN = np.zeros((v.nNode, v.nClsMat), dtype=np.float64)\r\n\r\n for node in range(v.nNode):\r\n totalDepo = 0.0\r\n aix = v.wDepth[node]-v.wMin\r\n for mat in range(v.nTotalMat):\r\n totalDepo = totalDepo+v.depo[node][mat]\r\n if totalDepo <= 0.0 or aix <= 0.0:\r\n continue\r\n elif totalDepo > aix:\r\n x = aix/totalDepo\r\n for mat in range(v.nTotalMat):\r\n if mat < v.nClsMat:\r\n depoN[node][mat] = v.depo[node][mat]*x\r\n # s'ha d'actualitzar conClast\r\n retorn = v.depo[node][mat]-depoN[node][mat]\r\n v.conClast[node][mat] = v.conClast[node][mat]+retorn\r\n v.depo[node][mat] = depoN[node][mat]\r\n else:\r\n v.depo[node][mat] = v.depo[node][mat]*x\r\n\r\n for mat in range(v.nTotalMat):\r\n # reduce surface\r\n v.surface[node] = v.surface[node]+v.depo[node][mat]\r\n # update thickness of clastic sed deposited (grid array)\r\n v.grid[node][v.jti-1][mat] = v.grid[node][v.jti-1][mat]+v.depo[node][mat]\r\n\r\n v.depo = np.zeros((v.nNode, v.nTotalMat), dtype=np.float64)\r\n\r\n\r\ndef dn_dt(t, y, eF, iM, cboP, imp):\r\n ans = 0.0\r\n eq = []\r\n for i in range(v.nCboMat):\r\n for j in range(v.nCboMat):\r\n ans += iM[i][j]*y[i]*y[j]\r\n eq.append(eF[i]*y[i]+ans)\r\n for i in range(v.nCboMat):\r\n if imp[i] != 0.0:\r\n eq.append(cboP[i]*y[i]/imp[i])\r\n else:\r\n eq.append(cboP[i]*y[i]*imp[i])\r\n return eq\r\n\r\n\r\ndef carbonate():\r\n ti = 0.0\r\n tf = v.dt\r\n iM = v.interMatrix\r\n # backend = 'vode'\r\n backend = 'dopri5'\r\n # backend = 'dop853'\r\n\r\n # calculate growthrate of species as function of environmental parameters\r\n for node in range(v.nNode):\r\n if v.wDepth[node] > v.wMin:\r\n eF = np.zeros(v.nCboMat, dtype=np.float64)\r\n invmp = np.zeros(v.nCboMat, dtype=np.float64)\r\n for i in range(v.nCboMat):\r\n v.factorWD[node][v.jti-1][i] = np.interp(v.wDepth[node], v.deptFactorX[i][:], v.deptFactorY[i][:])\r\n v.factorVL[node][v.jti-1][i] = np.interp(v.veloNode[node]/365.25, v.flowFactorX[i][:], v.flowFactorY[i][:])\r\n v.factorSL[node][v.jti-1][i] = np.interp(v.slopeNode[v.jti-1][node], v.slopeFactorX[i][:],\r\n v.slopeFactorY[i][:])\r\n v.factorNU[node][v.jti-1][i] = 1.0\r\n\r\n # factorNU=np.interp(v.nutriConc[node],v.nutrFactorX[i][:],v.nutrFactorY[i][:])\r\n for j in range(v.nClsMat):\r\n v.factorCL[node][v.jti-1][i][j] = np.interp(v.conClast[node][j], v.clsFactorX[i][j][:],\r\n v.clsFactorY[i][j][:])\r\n if v.factorType == 2:\r\n fcl = np.amin(v.factorCL)\r\n v.envFactor[node][v.jti-1][i] = min([v.factorWD[node][v.jti-1][i], v.factorVL[node][v.jti-1][i],\r\n v.factorSL[node][v.jti-1][i], v.factorNU[node][v.jti-1][i],\r\n fcl])\r\n elif v.factorType == 1:\r\n fcl = 1.0\r\n for j in range(v.nClsMat):\r\n fcl = fcl*v.factorCL[node][i][j]\r\n v.envFactor[node][v.jti-1][i] = v.factorWD[node][v.jti-1][i]*v.factorVL[node][v.jti-1][i]\\\r\n * v.factorSL[node][v.jti-1][i]*v.factorNU[node][v.jti-1][i]*fcl\r\n else:\r\n v.envFactor[node][v.jti-1][i] = 1.0\r\n\r\n eF[i] = v.birth[i]*v.envFactor[node][v.jti-1][i]\r\n\r\n if v.specPop[node][v.jti-1][i] < v.specPopMin[i]:\r\n v.specPop[node][v.jti-1][i] = v.specPopMin[i]\r\n\r\n ixx = 0.0\r\n for j in range(v.nCboMat):\r\n if i != j:\r\n ixx += v.interMatrix[i][j]\r\n if ixx != 0.0 and eF[i] > 0.0:\r\n ainv = np.linalg.inv(v.interMatrix)\r\n invmp = np.dot(ainv, -eF)\r\n elif ixx == 0.0 and eF[i] > 0.0:\r\n invmp[i] = -eF[i]/v.interMatrix[i][i]\r\n else:\r\n invmp[i] = 0.0\r\n if any(invmp) > 0.0:\r\n y0 = np.zeros((2*v.nCboMat), dtype=np.float64)\r\n for ix in range(v.nCboMat):\r\n y0[ix] = v.specPop[node][v.jti-1][ix]\r\n y0[v.nCboMat+ix] = 0.0\r\n solver = ode(dn_dt)\r\n solver.set_integrator(backend)\r\n solver.set_initial_value(y0, ti)\r\n solver.set_f_params(eF, iM, v.cboProd, invmp)\r\n solt = []\r\n soly = []\r\n while solver.t < tf:\r\n solver.integrate(tf, step=True)\r\n solt.append(solver.t)\r\n soly.append(solver.y)\r\n # print soly\r\n solyp = np.asarray(soly)\r\n # if (solyp.shape[0])>1:\r\n # print solyp\r\n # print solt\r\n # plt.plot(solt, solyp[:,0])\r\n # plt.plot(solt, solyp[:,1])\r\n # plt.show()\r\n # quit()\r\n for i in range(v.nCboMat):\r\n v.specPop[node][v.jti-1][i] = solyp[solyp.shape[0]-1][i]\r\n if solyp[solyp.shape[0]-1][v.nCboMat+i] > 0.0:\r\n v.depo[node][v.nClsMat+i] = solyp[solyp.shape[0]-1][v.nCboMat+i]\r\n else:\r\n for i in range(v.nCboMat):\r\n v.specPop[node][v.jti-1][i] = 0.0\r\n else:\r\n for i in range(v.nCboMat):\r\n v.specPop[node][v.jti-1][i] = 0.0\r\n\r\n\r\ndef clastic():\r\n # It calculates the clastic sedimentation\r\n for mat in range(v.nClsMat):\r\n for node in range(v.nNode):\r\n # check if velocity is below critical value for sedim type\r\n # velo comes in m/y, vCritic im m/day, set velo to m/day\r\n\r\n if v.veloNode[node]/365.25 <= v.vCritic[mat] and (v.wDepth[node]-v.head[node]) > v.wMin and v.conClast[node][mat] > 0.0:\r\n\r\n # clastmass is suspended mass (in m thickness of sediment)\r\n clastMass = (v.conClast[node][mat]*v.wDepth[node])/v.density[mat]\r\n # calculate settle velocity (comes in m/day)\r\n factor = (v.vCritic[mat]-(v.veloNode[node]/365.25))/v.vCritic[mat]\r\n if factor > 1:\r\n factor = 1.0\r\n if factor < 0:\r\n factor = 0.0\r\n # calculate settle distance for dt\r\n sediPortion = v.setClast[mat]*365.25*factor*v.dt/v.wDepth[node]\r\n if sediPortion > 1.0:\r\n sediPortion = 1.0\r\n # calculate mass deposited in dt\r\n v.depo[node][mat] = clastMass*sediPortion\r\n # calculate new concentration after deposition\r\n clastMass = clastMass-v.depo[node][mat]\r\n v.conClast[node][mat] = (v.density[mat]*clastMass)/(v.wDepth[node]-v.depo[node][mat])\r\n","sub_path":"modules/sedimentation.py","file_name":"sedimentation.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"98327558","text":"\n#!/usr/bin/env python\n\n\nimport smbus #import SMBus module of I2C\nimport time\nimport array as arr\n\nimport statistics as st\nimport math as m\n\n\nPWR_MGMT_1 = 0x6B\nSMPLRT_DIV = 0x19\nCONFIG = 0x1A\nGYRO_CONFIG = 0x1B\nINT_ENABLE = 0x38\nACCEL_XOUT_H = 0x3B\nACCEL_YOUT_H = 0x3D\nACCEL_ZOUT_H = 0x3F\nGYRO_XOUT_H = 0x43\nGYRO_YOUT_H = 0x45\nGYRO_ZOUT_H = 0x47\n\npi = 3.1415926\nto_deg = 180/pi\nto_rad = pi/180\n\ngyro_rate_adjust = 1 # not used\nacc_rate_adjust = 1 # not used\n\nalpha=0.95\nacc_ref = 16834.0\ngyro_ref = 131.0\n\ngyro_angle=[0,0,0]\nprev_acc = [0,0,0]\ncombined_angle = [0,0,0]\n\ngyro_offset_x,gyro_offset_y,gyro_offset_z=0,0,0\n\n\ndef offset_calculation():\n gyro_x_check,gyro_y_check,gyro_z_check = [],[],[]\n global gyro_offset_x,gyro_offset_y,gyro_offset_z\n \n #looping to find no. of samples\n for i in range(1000):\n gyro_x_check.append(read_raw_data(GYRO_XOUT_H))\n gyro_y_check.append(read_raw_data(GYRO_YOUT_H))\n gyro_z_check.append(read_raw_data(GYRO_ZOUT_H))\n \n #offset calculation through mean of 1000 samples\n gyro_offset_x =st.mean(gyro_x_check)\n gyro_offset_y =st.mean(gyro_y_check)\n gyro_offset_z =st.mean(gyro_z_check)\n\n\ndef MPU_Init():\n #write to sample rate register\n bus.write_byte_data(Device_Address, SMPLRT_DIV, 7)\n \n #Write to power management register\n bus.write_byte_data(Device_Address, PWR_MGMT_1, 1)\n \n #Write to Configuration register\n bus.write_byte_data(Device_Address, CONFIG, 0)\n \n #Write to Gyro configuration register\n bus.write_byte_data(Device_Address, GYRO_CONFIG, 0)\n \n #Write to interrupt enable register\n bus.write_byte_data(Device_Address, INT_ENABLE, 1)\n\n\ndef read_raw_data(addr):\n #Accelero and Gyro value are 16-bit\n high = bus.read_byte_data(Device_Address, addr)\n low = bus.read_byte_data(Device_Address, addr+1)\n\n #concatenate higher and lower value\n value = ((high << 8) | low)\n \n #to get signed value from mpu6050\n if(value > 32768):\n value = value - 65536\n return value\n\n\ndef euler_to_quaternion(yaw, pitch, roll):\n qx = m.sin(roll/2) * m.cos(pitch/2) * m.cos(yaw/2) - m.cos(roll/2) * m.sin(pitch/2) * m.sin(yaw/2)\n qy = m.cos(roll/2) * m.sin(pitch/2) * m.cos(yaw/2) + m.sin(roll/2) * m.cos(pitch/2) * m.sin(yaw/2)\n qz = m.cos(roll/2) * m.cos(pitch/2) * m.sin(yaw/2) - m.sin(roll/2) * m.sin(pitch/2) * m.cos(yaw/2)\n qw = m.cos(roll/2) * m.cos(pitch/2) * m.cos(yaw/2) + m.sin(roll/2) * m.sin(pitch/2) * m.sin(yaw/2)\n\n return qx, qy, qz, qw\n\n\ndef read_raw_values():\n global gyro_offset_x,gyro_offset_y,gyro_offset_z\n #read accelerometer raw value\n acc_x_raw = read_raw_data(ACCEL_XOUT_H)\n acc_y_raw = read_raw_data(ACCEL_YOUT_H)\n acc_z_raw = read_raw_data(ACCEL_ZOUT_H)\n #read accelerometer raw value\n gyro_x=read_raw_data(GYRO_XOUT_H) - gyro_offset_x\n gyro_y=read_raw_data(GYRO_YOUT_H) - gyro_offset_y\n gyro_z=read_raw_data(GYRO_ZOUT_H) - gyro_offset_z\n\n return [acc_x_raw, acc_y_raw, acc_z_raw], [gyro_x, gyro_y, gyro_z]\n\n\ndef normalize_acc_data(acc_raw):\n global acc_ref\n #normalize acc values\n a_norm_x = acc_raw[0]/acc_ref\n a_norm_y = acc_raw[1]/acc_ref\n a_norm_z = acc_raw[2]/acc_ref\n \n return [a_norm_x, a_norm_y, a_norm_z]\n\n\ndef normalize_gyro_data(gyro_raw):\n global gyro_ref\n #normalize gyro values\n g_norm_x = gyro_raw[0]/gyro_ref\n g_norm_y = gyro_raw[1]/gyro_ref\n g_norm_z = gyro_raw[2]/gyro_ref\n\n return [g_norm_x, g_norm_y, g_norm_z]\n\n\ndef get_angle_gyro(gyro_angle, gyro, dt):\n #determine angle due to gyro effect\n gyro_angle_x = gyro_angle[0] + gyro[0]*dt\n gyro_angle_y = gyro_angle[1] + gyro[1]*dt\n gyro_angle_z = gyro_angle[2] + gyro[2]*dt\n \n return [gyro_angle_x,gyro_angle_y,gyro_angle_z]\n\n\ndef get_median(acc_x, acc_y, acc_z):\n a_x=st.median(acc_x)\n a_y=st.median(acc_y)\n a_z=st.median(acc_z)\n \n return [a_x, a_y, a_z]\n\n\ndef get_angle_acc(acc):\n try:\n #acc_angle_x = m.atan2(acc[1], acc[2]) * to_deg\n #acc_angle_y = m.atan2((-1*acc[0]), m.sqrt(acc[1]**2 + acc[2]**2)) * to_deg\n #acc_angle_z = m.acos(acc[2] / m.sqrt(acc[0]**2 + acc[1]**2 + acc[2]**2)) * to_deg\n acc_angle_x = m.asin(acc[1] / m.sqrt(acc[0]**2 + acc[1]**2 + acc[2]**2)) * to_deg\n acc_angle_y = m.asin(-acc[0] / m.sqrt(acc[0]**2 + acc[1]**2 + acc[2]**2)) * to_deg\n acc_angle_z = m.acos(acc[2] / m.sqrt(acc[0]**2 + acc[1]**2 + acc[2]**2)) * to_deg\n except Exception as e:\n pass\n \n return [acc_angle_x, acc_angle_y, acc_angle_z]\n\n\ndef get_alpha(prev_acc, acc):\n # Complementary filter is applied on basis of rate of accelrometer measurement\n alpha_x = abs((prev_acc[0] - acc[0]))\n alpha_y = abs((prev_acc[1] - acc[1]))\n alpha_z = abs((prev_acc[2] - acc[2]))\n alpha = max(alpha_x, alpha_y, alpha_z)\n #alpha = alpha * 10\n if alpha > 1:\n alpha = 1\n \n return alpha\n\n\ndef get_combined_angle(acc_angle, gyro_angle, alpha):\n print(alpha)\n combined_angle_x = alpha*gyro_angle[0] + (1-alpha)*acc_angle[0]\n combined_angle_y = alpha*gyro_angle[1] + (1-alpha)*acc_angle[1]\n combined_angle_z = gyro_angle[2]\n \n return [combined_angle_x, combined_angle_y, combined_angle_z]\n\n\ndef get_velocity(vel_prev, acc_norm, dt):\n # accc in m/s2\n acc_actual = [acc_norm[0] * 9.8, acc_norm[1] * 9.8, acc_norm[2] * 9.8]\n vel_x = vel_prev[0] + acc_actual[0]*dt\n vel_y = vel_prev[1] + acc_actual[1]*dt\n vel_z = vel_prev[2] + acc_actual[2]*dt\n \n return [vel_x, vel_y, vel_z]\n\n\ndef get_position(prev_position, prev_velocity, dt):\n # accc in m/s2\n pos_x = prev_position[0] + prev_velocity[0]*dt\n pos_y = prev_position[1] + prev_velocity[1]*dt\n pos_z = prev_position[2] + prev_velocity[2]*dt\n \n return [pos_x, pos_y, pos_z]\n\n\n\ndef gravity_compensation(acc_norm, combined_angle):\n x_acc_linear = acc_norm[0] + m.sin(combined_angle[1] * to_rad); #16384 height latitude compensation\n y_acc_linear = acc_norm[1] - m.sin(combined_angle[0] * to_rad); #16384\n z_acc_linear = acc_norm[2] - m.cos(combined_angle[2] * to_rad); #16384\n \n return [x_acc_linear, y_acc_linear, z_acc_linear]\n\n\nprev_velocity = [0,0,0]\nprev_velocity_raw = [0,0,0]\nprev_position = [0,0,0]\nacc_accumulation_x = []\nacc_accumulation_y = []\nacc_accumulation_z = []\ncount = 0\nno_of_acc_samples = 5\ndef get_data(dt):\n global gyro_angle, prev_acc, prev_velocity, prev_position, combined_angle\n global alpha, prev_velocity_raw, count, acc_accumulation, no_of_acc_samples\n \n gyro_angle = combined_angle\n acc_raw, gyro_raw = read_raw_values()\n \n gyro_norm = normalize_gyro_data(gyro_raw)\n acc_norm = normalize_acc_data(acc_raw)\n \n count = count+1\n acc_accumulation_x.append(acc_raw[0])\n acc_accumulation_y.append(acc_raw[1])\n acc_accumulation_z.append(acc_raw[2])\n \n if count >= no_of_acc_samples: \n acc_median_x = st.median(acc_accumulation_x)\n acc_median_y = st.median(acc_accumulation_y)\n acc_median_z = st.median(acc_accumulation_z)\n \n acc_median = [acc_median_x, acc_median_y, acc_median_z]\n acc_norm = normalize_acc_data(acc_median)\n count = 0\n acc_accumulation_x.clear()\n acc_accumulation_y.clear()\n acc_accumulation_z.clear()\n \n gyro_angle = get_angle_gyro(gyro_angle, gyro_norm, dt)\n else:\n return\n \n acc_angle = get_angle_acc(acc_norm)\n print(\"Accelerometer Angle:\")\n print(\"\\tx: \",acc_angle[0])\n print(\"\\ty: \",acc_angle[1])\n print(\"\\tz: \",acc_angle[2])\n\n gyro_angle = get_angle_gyro(gyro_angle, gyro_norm, dt)\n #print(\"gyro_angle\",gyro_angle)\n print(\"Gyroscope Angle:\")\n print(\"\\tx: \",gyro_angle[0])\n print(\"\\ty: \",gyro_angle[1])\n print(\"\\tz: \",gyro_angle[2])\n\n velocity_raw = get_velocity(prev_velocity_raw, acc_norm, dt)\n\n #alpha = get_alpha(prev_acc, acc_norm)\n #alpha = get_alpha(prev_velocity_raw, velocity_raw)\n print(\"Alpha: \",alpha)\n \n combined_angle = get_combined_angle(acc_angle, gyro_angle, alpha)\n #print(\"Combined angle: \",combined_angle)\n \n angle_for_compensation = [combined_angle[0], combined_angle[1], acc_angle[2]]\n #print(angle_for_compensation)\n print(\"Angle for Compensation:\")\n print(\"\\tx: \",angle_for_compensation[0])\n print(\"\\ty: \",angle_for_compensation[1])\n print(\"\\tz: \",angle_for_compensation[2])\n\n acc_norm_compensated = gravity_compensation(acc_norm, angle_for_compensation)\n #print(\"Raw\",acc_norm)\n #print(\"Compensated\",acc_norm_compensated)\n print(\"Accelerometer Norm:\")\n print(\"\\tx: \",acc_norm[0])\n print(\"\\ty: \",acc_norm[1])\n print(\"\\tz: \",acc_norm[2])\n\n print(\"Compensated Accelerometer Normal:\")\n print(\"\\tx: \",acc_norm_compensated[0])\n print(\"\\ty: \",acc_norm_compensated[1])\n print(\"\\tz: \",acc_norm_compensated[2])\n \n velocity = get_velocity(prev_velocity, acc_norm_compensated, dt)\n #print(\"Velocity: \",velocity)\n print(\"Velocity:\")\n print(\"\\tx: \",velocity[0])\n print(\"\\ty: \",velocity[1])\n print(\"\\tz: \",velocity[2])\n \n position = get_position(prev_position, prev_velocity, dt)\n #print(\"Position: \",position)\n print(\"Position from origin:\")\n print(\"\\tx: \",position[0])\n print(\"\\ty: \",position[1])\n print(\"\\tz: \",position[2])\n print(\"\")\n\n prev_velocity_raw = velocity_raw \n prev_acc = acc_norm\n prev_velocity = velocity\n prev_position = position\n \n #combined_angle_rad = list_to_radian(combined_angle)\n \n x, y, z, w = euler_to_quaternion(combined_angle[0]*to_rad, combined_angle[1]*to_rad, combined_angle[2]*to_rad)\n\n\n\ndef direct_acc_angle():\n acc_raw, gyro_raw = read_raw_values()\n acc_norm = normalize_acc_data(acc_raw)\n acc_angle = get_angle_acc(acc_norm)\n acc_angle[2] = 0\n \n return acc_angle\n\nbus = smbus.SMBus(1) # or bus = smbus.SMBus(0) for older version boards\nDevice_Address = 0x68 # MPU6050 device address\n\nif __name__==\"__main__\":\n MPU_Init()\n offset_calculation()\n print (\" Reading Data of Gyroscope and Accelerometer\")\n t1 = time.time()\n combined_angle = direct_acc_angle()\n \n while True:\n try:\n dt=time.time()-t1\n t1=time.time()\n \n get_data(dt)\n #time.sleep(0.1)\n except Exception as e:\n print(e)\n #pass\n\n\n","sub_path":"imu_temp_sampling_acc_noob.py","file_name":"imu_temp_sampling_acc_noob.py","file_ext":"py","file_size_in_byte":10430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"370707924","text":"# coding = \"utf-8\"\n\n# 爬取所有歌曲的链接信息,写文件代码在获得数据后注销,防止误操作\n\nfrom selenium import webdriver\nimport time\n\nsongFilePath = \"F://RescueMrZhou/Corpus/songSum.txt\"\n\nif __name__==\"__main__\":\n # 启动浏览器\n browser = webdriver.Chrome()\n # 打开要抓取的歌手页面\n browser.get('http://www.kuwo.cn/artist/content?name=周杰伦')\n # 点击歌曲按钮,获取歌曲列表\n songButton = browser.find_element_by_xpath('//div[@class=\"tab\"]//li[@id=\"tab_music\"]//span')\n songButton.click()\n time.sleep(3)\n print(\"songButton\",songButton)\n # songFile = open(songFilePath,\"w\",encoding=\"utf-8\")\n pageNum=1\n while(pageNum<65):\n songList = browser.find_elements_by_xpath('//div[@id=\"song\"]//ul[@class=\"listMusic\"]//li//div[@class=\"name\"]/a')\n for i in songList:\n print(i.text,i.get_attribute(\"href\"))\n # songFile.write(i.text)\n # songFile.write(\":\")\n # songFile.write(i.get_attribute(\"href\"))\n # songFile.write(\"\\n\")\n # 找到下页链接\n next = browser.find_element_by_xpath('//div[@class=\"page\"]//a[@class=\"next\"]')\n browser.execute_script(\"arguments[0].scrollIntoView()\", next)\n next.click()\n time.sleep(10)\n\n # songFile.close()\n browser.close()\n\n","sub_path":"data_crawling/crawlLyricSum.py","file_name":"crawlLyricSum.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"189217478","text":"\"\"\"Contains serializers for models.\"\"\"\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom .models import Game, Player, User\n\n\nclass UserReadOnlySerializer(serializers.ModelSerializer):\n \"\"\"A serializer for a user.\"\"\"\n\n class Meta:\n model = User\n fields = [\"username\", \"date_joined\", \"last_login\", \"is_staff\"]\n\n\nclass PlayerSerializer(serializers.ModelSerializer):\n \"\"\"A serializer for a player.\"\"\"\n\n user = serializers.SlugRelatedField(\n queryset=User.objects.all(),\n slug_field=\"username\",\n required=False,\n allow_null=True,\n )\n\n class Meta:\n model = Player\n fields = [\"id\", \"name\", \"user\"]\n\n\nclass GameSerializer(serializers.ModelSerializer):\n \"\"\"A serializer for a game.\"\"\"\n\n datetime_played = serializers.DateTimeField(\n default=lambda: timezone.localtime().strftime(\n settings.REST_FRAMEWORK[\"DATETIME_FORMAT\"]\n ),\n initial=lambda: timezone.localtime().strftime(\n settings.REST_FRAMEWORK[\"DATETIME_FORMAT\"]\n ),\n )\n winner = serializers.SlugRelatedField(\n queryset=Player.objects.all(), slug_field=\"name\"\n )\n loser = serializers.SlugRelatedField(\n queryset=Player.objects.all(), slug_field=\"name\"\n )\n winner_score = serializers.IntegerField(min_value=0, initial=8)\n loser_score = serializers.IntegerField(min_value=0, initial=0)\n submitted_by = serializers.SlugRelatedField(\n queryset=User.objects.all(), slug_field=\"username\"\n )\n\n class Meta:\n model = Game\n fields = [\n \"id\",\n \"datetime_played\",\n \"winner\",\n \"loser\",\n \"winner_score\",\n \"loser_score\",\n \"submitted_by\",\n ]\n\n def validate(self, attrs):\n \"\"\"Call model's clean method.\"\"\"\n attrs = super().validate(attrs)\n\n try:\n Game(**attrs).clean()\n except ValidationError as e:\n raise serializers.ValidationError(str(e))\n\n return attrs\n","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"184506644","text":"town = input().lower()\nsales = float(input())\n\ncommissions = {}\nhas_errors = False\n\nif sales >= 0 and sales <= 500:\n commissions = {\n \"sofia\": 0.05,\n \"varna\": 0.045,\n \"plovdiv\": 0.055\n }\nelif sales > 500 and sales <= 1000:\n commissions = {\n \"sofia\": 0.07,\n \"varna\": 0.075,\n \"plovdiv\": 0.08\n }\nelif sales > 1000 and sales <= 10_000:\n commissions = {\n \"sofia\": 0.08,\n \"varna\": 0.10,\n \"plovdiv\": 0.12\n }\nelif sales > 10_000:\n commissions = {\n \"sofia\": 0.12,\n \"varna\": 0.13,\n \"plovdiv\": 0.145\n }\n\nif not town in commissions:\n has_errors = True\n\nif has_errors:\n print(\"error\")\nelse:\n commission_percent = commissions[town]\n earned = commission_percent * sales\n print(\"{0:.2f}\".format(earned))\n","sub_path":"PythonBasics/04. ComplexConditionalStatements/08.TradeCommissions.py","file_name":"08.TradeCommissions.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"379459967","text":"from azure.storage.blob import BlobServiceClient,ContentSettings\r\nimport requests\r\nimport json\r\nimport os\r\nimport re\r\nfrom datetime import datetime\r\nimport uuid\r\nimport azure.functions as func\r\n\r\nclass EndpointsClient:\r\n # Azure Public\r\n url = \"https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519\"\r\n # Azure Government\r\n #url = \"https://www.microsoft.com/en-us/download/confirmation.aspx?id=57063\"\r\n # Azure China\r\n #url = \"https://www.microsoft.com/en-us/download/confirmation.aspx?id=57062\"\r\n # Azure Germany\r\n #url = \"https://www.microsoft.com/en-us/download/confirmation.aspx?id=57064\"\r\n RE_PATTERN = 'https:\\\\/\\\\/download\\\\.microsoft\\\\.com\\\\/download\\\\/[a-zA-Z0-9\\\\/\\\\-\\\\_\\\\.]+'\r\n\r\n def __init__(self, storage_connection_string, storage_container_name, working_path):\r\n service_client = BlobServiceClient.from_connection_string(storage_connection_string)\r\n self.client = service_client.get_container_client(storage_container_name)\r\n self.uuid = str(uuid.uuid4())\r\n self.container_name = storage_container_name\r\n self.main_page = 'main.html'\r\n self.out_path = 'artifacts'\r\n self.artifacts_path = working_path + '/' + self.out_path\r\n self.main_page_path = working_path + '/' + self.main_page\r\n if not os.path.exists(self.artifacts_path):\r\n os.mkdir(self.artifacts_path)\r\n def get_service_endpoints(self):\r\n '''\r\n Get Azure Service endpoint IP addresses\r\n '''\r\n regex = re.compile(EndpointsClient.RE_PATTERN)\r\n r = requests.get(EndpointsClient.url)\r\n self.article_text = r.text\r\n m = regex.findall(self.article_text)\r\n r = requests.get(m[0], stream=True)\r\n response = r.raw\r\n self.service_tags = json.load(response)\r\n def export_locally(self,prepend_value=''):\r\n '''\r\n Store obtained data locally\r\n '''\r\n for key in self.service_tags['values']:\r\n with open(f\"{self.artifacts_path}/{prepend_value}{key['id']}.txt\", 'w') as out_file:\r\n for item in key['properties']['addressPrefixes']:\r\n out_file.write(\"%s\\n\" % item)\r\n def new_main_page(self):\r\n '''\r\n Generate main webpage\r\n '''\r\n artifacts_files = os.listdir(self.artifacts_path)\r\n artifacts_files.sort(reverse=True)\r\n main_page_content = '\\n\\n\\n\\n Generated date:
' + str(datetime.now()) + '

Generated list:
'\r\n for item in artifacts_files:\r\n print(item)\r\n main_page_content = main_page_content + '' + item + '\\n
'\r\n main_page_content += ''\r\n with open(self.main_page_path,'w') as out_file:\r\n out_file.write(\"%s\" % main_page_content)\r\n def upload_main_page(self):\r\n '''\r\n Upload main webpage\r\n '''\r\n self.upload_file(self.main_page_path,self.main_page)\r\n def upload(self, source, dest):\r\n '''\r\n Upload a file or directory to a path inside the container\r\n '''\r\n if (os.path.isdir(source)):\r\n self.upload_dir(source, dest)\r\n else:\r\n self.upload_file(source, dest)\r\n def upload_file(self, source, dest):\r\n '''\r\n Upload a single file to a path inside the container\r\n '''\r\n content_settings=ContentSettings(content_type='text/html')\r\n print(f'Uploading {source} to {dest}')\r\n with open(source, 'rb') as data:\r\n self.client.upload_blob(name=dest, data=data, content_settings=content_settings, overwrite=True)\r\n def upload_dir(self, source='', dest=''):\r\n '''\r\n Upload a directory to a path inside the container\r\n '''\r\n if not source:\r\n source = self.artifacts_path\r\n prefix = '' if dest == '' else dest + '/'\r\n prefix += os.path.basename(source) + '/'\r\n for root, dirs, files in os.walk(source):\r\n for name in files:\r\n dir_part = os.path.relpath(root, source)\r\n dir_part = '' if dir_part == '.' else dir_part + '/'\r\n file_path = os.path.join(root, name)\r\n blob_path = prefix + dir_part + name\r\n self.upload_file(file_path, blob_path)\r\n def download(self, source, dest):\r\n '''\r\n Download a file or directory to a path on the local filesystem\r\n '''\r\n if not dest:\r\n raise Exception('A destination must be provided')\r\n blobs = self.ls_files(source, recursive=True)\r\n if blobs:\r\n # if source is a directory, dest must also be a directory\r\n if not source == '' and not source.endswith('/'):\r\n source += '/'\r\n if not dest.endswith('/'):\r\n dest += '/'\r\n # append the directory name from source to the destination\r\n dest += os.path.basename(os.path.normpath(source)) + '/'\r\n blobs = [source + blob for blob in blobs]\r\n for blob in blobs:\r\n blob_dest = dest + os.path.relpath(blob, source)\r\n self.download_file(blob, blob_dest)\r\n else:\r\n self.download_file(source, dest)\r\n def download_file(self, source, dest):\r\n '''\r\n Download a single file to a path on the local filesystem\r\n '''\r\n # dest is a directory if ending with '/' or '.', otherwise it's a file\r\n if dest.endswith('.'):\r\n dest += '/'\r\n blob_dest = dest + os.path.basename(source) if dest.endswith('/') else dest\r\n print(f'Downloading {source} to {blob_dest}')\r\n os.makedirs(os.path.dirname(blob_dest), exist_ok=True)\r\n bc = self.client.get_blob_client(blob=source)\r\n with open(blob_dest, 'wb') as file:\r\n data = bc.download_blob()\r\n file.write(data.readall())\r\n def ls_files(self, path, recursive=False):\r\n '''\r\n List files under a path, optionally recursively\r\n '''\r\n if not path == '' and not path.endswith('/'):\r\n path += '/'\r\n blob_iter = self.client.list_blobs(name_starts_with=path)\r\n files = []\r\n for blob in blob_iter:\r\n relative_path = os.path.relpath(blob.name, path)\r\n if recursive or not '/' in relative_path:\r\n files.append(relative_path)\r\n return files\r\n\r\ndef main(mytimer: func.TimerRequest) -> None:\r\n client = EndpointsClient(storage_connection_string=os.environ['AzureWebJobsStorage'], storage_container_name='$web',working_path='/tmp')\r\n client.get_service_endpoints()\r\n client.export_locally()\r\n client.upload_dir()\r\n client.new_main_page()\r\n client.upload_main_page()\r\n","sub_path":"TimerTrigger/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"221008606","text":"import os\nimport sys\nimport re\nimport glob\nimport logging\nimport subprocess\nimport numpy as np\nfrom itertools import groupby\nfrom collections import namedtuple\nfrom nest.parsers.fastq import Fastq\n\nclass Identifier:\n \"\"\" The Identifier class contains a set of modules that recognize the\n type of fastq file from the fastq the fastq header. The class is initialzed\n by passing a record to the constructor\"\"\"\n\n def __init__(self, record):\n \"\"\"Initialize the Identifier class from a fastq record\"\"\"\n self.rec = record\n\n\n def isIlluminaOld(self):\n \"\"\"Identify fastq headers in the following format\n @HWUSI-EAS100R:6:73:941:1973#0/1\"\"\"\n header_regex = re.compile('@\\w+-?\\w+:\\d+:\\d+:\\d+:\\d+#\\d*')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n\n def isIlluminaNew(self):\n \"\"\"Idetify fastq headers in the following format\n #@D00468:24:H8ELMADXX:1:1101:1470:2237 1:N:0:2\"\"\"\n header_regex = re.compile('@\\w+-?\\w+:\\d+:\\w+-?\\w+:\\d+:\\d+:\\d+:\\d+\\s\\d:\\w+:\\w+:\\w*')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n\n def isSraOld(self):\n \"\"\"Identify fastq headers in the following format\n #@SRR037455.1 HWI-E4_6_30ACL:4:1:0:29 length=35\n #@SRR902931.1 HWI-ST1384:61:D1DJ4ACXX:8:1101:1240:2015 length=50\"\"\"\n header_regex = re.compile('@\\w+\\.?\\w+ \\w+-?\\w+:\\d+:\\d+:\\d+:\\d+ length=\\d+')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n def isSraNew(self):\n \"\"\"Identify fastq headers in the following format\n #@SRR1873770.5 DH1DQQN1:437:HACT2ADXX:1:2204:8270:58140 length=150\"\"\"\n header_regex = re.compile('@\\w+\\.?\\w+ \\w+-?\\w+:\\d+:\\w+:\\d+:\\d+:\\d+:\\d+ length=\\d+')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n def isENA(self):\n \"\"\"Identify fastq headers in the following format\n # ERR161234.14 14 length=100\"\"\"\n header_regex = re.compile('@[\\w\\.]+ \\d+ length=\\d+')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n def isPacbio(self):\n \"\"\"Identify fastq headers in the following format\n #@m160113_152755_42135_c100906712550000001823199104291667_s1_p0/15/7044_26271\"\"\"\n header_regex = re.compile('@\\w+/\\d+/\\d+_\\d+')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n def isFastq(self):\n \"\"\"Identify fastq with any other header format\n @\\w+\"\"\"\n header_regex = re.compile('@.+')\n match = re.fullmatch(header_regex, self.rec.header)\n if match != None:\n return(True)\n else:\n return(False)\n\n\nclass Metrics:\n \"\"\"The Metrics class calculates read length and quality metrics for a given\n fastq file. The class is initialized with the following inputs:\n 1. fastq (str): Path to the fastq file\"\"\"\n\n def __init__(self, fastq):\n self.fastq = fastq\n\n def avgReadLen(self):\n \"\"\"Given a fastq file, calculate average read length\"\"\"\n fastq_reader = Fastq(self.fastq, './', 'phred33')\n total_length = 0\n total_reads = 0\n for lines in fastq_reader.read():\n total_length += len(lines.seq)\n total_reads += 1\n if total_reads >= 100:\n break\n\n avg_length = total_length/float(total_reads)\n return(avg_length)\n\nclass Prepper:\n \"\"\"Prepper class create the config dictionary for a given study. The class\n constructor takes the following input parameters:\n 1. input_path (str) : Path to input directory or sra accession list\n 2. sra_path (str) : Path to fastq-dump executable \"\"\"\n\n def __init__(self, input_path, sra_path):\n self.input_path = os.path.abspath(input_path)\n self.sra_path = sra_path\n self.logger = logging.getLogger('NeST.prepInputs')\n\n def downloadSRA(self):\n \"\"\"Give a SRA accession list, download all the associated fastq files\"\"\"\n out_dir = os.path.dirname(self.input_path)\n sra_list = open(self.input_path)\n for accessions in sra_list:\n self.logger.debug('Downloading : {0}'.format(accessions))\n accessions = accessions.strip()\n fqd_cmd = [self.sra_path, '--gzip', '--split-3', '-O', out_dir,\n '-A', accessions]\n fqd_run = subprocess.Popen(fqd_cmd, shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n fqd_run.wait()\n if fqd_run.returncode != 0:\n self.logger.error('Could not download {0}'.format(accessions))\n self.logger.error(' '.join(fqd_cmd))\n else:\n self.logger.info('Downladed complete: {0}'.format(accessions))\n return(out_dir)\n\n def getFastqPaths(self):\n \"\"\"Given a directory path, extract all the fastq file names\"\"\"\n filenames = list()\n for subdir, dirname, files in os.walk(self.input_path):\n for filename in files:\n if ('.fastq' in filename or '.fastq.gz' in filename or\n 'fq' in filename or 'fq.gz' in filename):\n filepath = subdir + os.sep + filename\n filenames.append(filepath)\n return(filenames)\n\n def getReadPresence(self, file_name, minimum=1):\n \"\"\"Given a fastq file, check for the presence of a minimum number of\n reads\"\"\"\n reader = Fastq(file_name, None, 'phred33')\n read_number = 0\n for rec in reader.read():\n read_number += 1\n if read_number >= minimum:\n return(True)\n else:\n return(False)\n\n def parseMaRS(self, file_name):\n \"\"\"Given a fastq file, check is the file name follows the MaRS regex:\n {2}{2}{2}{2}{1}\n {4}{2}{1}{3}{1}\n If the regex is found, the module decodes the sample information and\n returns a namedtuple for the sample\"\"\"\n\n mars_regex = ('(?P[0-9x]{2})(?P[A-Zx]{2})'\n '(?P[A-Zx]{2})(?P
[0-9]{2})'\n '(?P[A-Kx]{1})(?P[0-9]{4})'\n '(?P[a-zA-Z]{2})(?P[BFPTSx]{1})'\n '(?P[0-9]{3})(?P[0-9]{1})')\n mars_groups = re.match(mars_regex, file_name)\n sample_info = namedtuple('Sample', ['Year', 'Country', 'Site',\n 'TreatmentDay', 'Treatment', 'ID', 'Genus', 'Type', 'Markers',\n 'Replicate'])\n\n if not mars_groups:\n marker_list = np.array(['PfK13', 'PfCRT', 'PfMDR', 'MT', 'CytB',\n 'PfDHPS', 'PfDHFR'])\n sample = sample_info(np.nan, 'xx', 'xx', 'xx', 'xx', 'xxxx', 'xx',\n 'x', marker_list, 1)\n return(sample)\n if mars_groups.group('Year') == 'xx':\n year = np.nan\n else:\n year = int('20' + mars_groups.group('Year'))\n country = mars_groups.group('Country')\n site = mars_groups.group('Site')\n treatDate = mars_groups.group('DT')\n treatment = mars_groups.group('Treatment')\n sampleID = mars_groups.group('SID')\n genusSpecies = mars_groups.group('GS')\n type_dict = {'B': 'Blood', 'F': 'Filter blood spots', 'P': 'Plasma',\n 'T': 'Tissue', 'S': 'Stool', 'x': 'Unknown'}\n type = type_dict[mars_groups.group('ST')]\n marker_list = np.array(['PfK13', 'PfCRT', 'PfMDR', 'MT', 'CytB',\n 'PfDHPS', 'PfDHFR'])\n marker_found = list()\n markers = int(mars_groups.group('Markers'))\n while len(marker_found) < 8:\n markers, rem = divmod(markers, 2)\n marker_found.append(rem)\n\n marker_found = np.array(marker_found[::-1][1:])\n marker_list = np.extract(marker_found, marker_list)\n rep = int(mars_groups.group('Rep'))\n sample = sample_info(year, country, site, treatDate, treatment,\n sampleID, genusSpecies, type, marker_list, rep)\n return(sample)\n\n def prepInputs(self):\n \"\"\"Given a input directory path or sra accession list, iterate through\n the list and create a sample record of reach file. Each sample record is\n added to a dictionary with the sample name as the key and sample record\n as the value\"\"\"\n if os.path.isfile(self.input_path):\n self.logger.info('Found SRA accession list,'\n 'Will download files from SRA')\n self.input_path = self.downloadSRA()\n files = self.getFastqPaths()\n experiment = dict()\n for fastq in files:\n reader = Fastq(fastq, './', 'phred33')\n Sample = namedtuple('Sample', ['sample', 'libname', 'library',\n 'files', 'prep', 'paired', 'year', 'country', 'site',\n 'treatmentDay', 'treatment', 'iD', 'genus', 'type', 'markers',\n 'replicate'])\n readPresence = self.getReadPresence(fastq)\n if not readPresence:\n self.logger.warning('Sample doesn\\'t contain minimum number of required reads; skipping sample : {0}'.format(fastq))\n continue\n rec = next(reader.read())\n identifier = Identifier(rec)\n metric = Metrics(fastq)\n isIllOld = identifier.isIlluminaOld()\n isIllNew = identifier.isIlluminaNew()\n isSraOld = identifier.isSraOld()\n isSraNew = identifier.isSraNew()\n isPac = identifier.isPacbio()\n isENA = identifier.isENA()\n isFastq = identifier.isFastq()\n seqType = ''\n libType = ''\n sample_regex = re.compile('_r1|_r2|_?l001|_?l002|_?l003|_?l004|_R1|_R2|_L001|_?L002|_L003|_L004|_1|_2') #|L001|L002|L003|L004')\n sample = sample_regex.split(os.path.basename(fastq))[0]\n sample_info = self.parseMaRS(sample)\n year = sample_info.Year\n country = sample_info.Country\n site = sample_info.Site\n td = sample_info.TreatmentDay\n treatment = sample_info.Treatment\n sid = sample_info.ID\n gs = sample_info.Genus\n stype = sample_info.Type\n markers = sample_info.Markers\n replicate = sample_info.Replicate\n if isIllOld:\n paired_regex = re.compile('@\\w+-?\\w+:\\d+:\\d+:\\d+:\\d+#\\d')\n lib = re.findall(paired_regex, rec.header)[0]\n paired = False\n seqType = 'Illumina'\n #if metric.avgReadLen(): removing unnecessary call for the time being ##01/24/19\n libType = 'Short'\n elif isIllNew:\n paired_regex = re.compile('@\\w+-?\\w+:\\d+:\\w+-?\\w+:\\d+:\\d+:\\d+:\\d+\\s')\n lib = re.findall(paired_regex, rec.header)[0]\n paired = False\n seqType = 'Illumina'\n #if metric.avgReadLen():\n libType = 'Short'\n elif isSraOld:\n paired_regex = re.compile('@\\w+\\.?\\w+ \\w+-?\\w+:\\d+:\\d+:\\d+:\\d+ length=\\d+')\n lib = re.findall(paired_regex, rec.header)[0]\n paired = False\n seqType = 'Illumina'\n #if metric.avgReadLen():\n libType = 'Short'\n elif isSraNew:\n paired_regex = re.compile('@\\w+\\.?\\w+ \\w+-?\\w+:\\d+:\\w+:\\d+:\\d+:\\d+:\\d+ length=\\d+')\n lib = re.findall(paired_regex, rec.header)[0]\n paired = False\n seqType = 'Illumina'\n #if metric.avgReadLen():\n libType = 'Short'\n elif isENA:\n paired_regex = re.compile('@[\\w\\.]+ \\d+ length=\\d+')\n lib = re.findall(paired_regex, rec.header)[0]\n paired = False\n seqType = 'Illumina'\n #if metric.avgReadLen():\n libType = 'Short'\n elif isPac:\n lib_regex = re.compile('@\\w+_\\d+_\\d+_\\w+')\n lib = re.findall(lib_regex, rec.header)[0]\n paired = False\n seqType = 'Pacbio'\n #if metric.avgReadLen():\n libType = 'Long'\n elif isFastq:\n lib_regex = re.compile('@.+')\n lib = re.findall(lib_regex, rec.header)[0]\n paired = False\n seqType = 'Unknown'\n #if metric.avgReadLen():\n libType = 'Short'\n else:\n self.logger.warning('Read from {0} with header : {1} does not follow any defined fastq header format.Please correct it'.format(fastq, rec.header))\n try:\n paired = True\n experiment[sample] = Sample(sample, lib, seqType,\n [experiment[sample].files[0],fastq], libType, paired,\n year, country, site, td, treatment, sid, gs, stype, markers,\n replicate)\n except (KeyError, AttributeError):\n experiment[sample] = Sample(sample, lib, seqType, [fastq],\n libType, paired, year, country, site, td, treatment, sid, gs,\n stype, markers, replicate)\n self.logger.info('A total of {0} libraries were identified from the given folder {1}'.format(len(experiment), self.input_path))\n self.logger.debug('The following libraries were detected in the given folder : {0}'.format(self.input_path))\n for sample, values in experiment.items():\n self.logger.debug('Sample : {0}; Library: {1} ; Sequence type: {2} ; Files: {3} ; Library type: {4} ; Paired: {5}'.format(\n values.sample, values.libname, values.library, ''.join(values.files), values.prep, values.paired))\n for samples, info in experiment.items():\n if not info.paired:\n self.logger.warning('NeST does not currently support single end runs; skipping sample : {0}'.format(samples))\n experiment.pop(samples)\n return(experiment)\n\nif __name__ == '__main__':\n path = os.path.abspath(sys.argv[1])\n prepper = Prepper(path)\n experiment = prepper.prepInputs()\n rone = list()\n rtwo = list()\n for study in experiment:\n for files in experiment[study].files:\n if re.findall('.*_R1.*', files):\n rone.append(files)\n else:\n rtwo.append(files)\n print(rone)\n print(rtwo)\n","sub_path":"nest/prepinputs.py","file_name":"prepinputs.py","file_ext":"py","file_size_in_byte":15062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"481068993","text":"import matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.image as mpimg\nimport matplotlib.animation as animation\nimport math\nfrom template_config import *\n\n# SPECIFY CONFIG FILE HERE\nfrom config.paper_config import *\n\nfig = plt.figure() # (figsize=(9,7))\n\nif do_densanim :\n\tfileprefix = densanim_direction\n\twritename = densanim_direction + '_dens_'\nelif do_bernoulli :\n\tfileprefix = 'bern'\n\twritename = 'bern_'\n\ndef animate(i):\n\tplt.clf()\n\tdataset = i * frameskip + startingset\n\tfile = fileprefix + '_snap_' + simname + '_ds' + str(dataset) + '.png'\n\timg = mpimg.imread( framepath + file )\n\timgplot = plt.imshow(img)\n\tplt.axis('off')\n\t# plt.tight_layout()\n\tprint('mergeanim: Added file ' + file)\n\treturn imgplot\n\nanim = animation.FuncAnimation(fig, animate, frames = nframes, interval = period, repeat = False)\nsaveas = writepath + writename + simname + '.mp4'\nanim.save( saveas, dpi=500 )\nprint('mergeanim: Saved animation ' + saveas)\n","sub_path":"mergeanim.py","file_name":"mergeanim.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"97974976","text":"'''\r\n相机:0\r\n估计相对位置由无人机相机高度、云台角度和像素点坐标偏移量计算 *通过微调云台角��使目标中心点坐标基本保持在图像的中心位置\r\n融合位置信息是由无人机三维坐标(GPS)和估计相对位置简单加权融合得到\r\n无人机基本到达降落平台正上方时只根据像素点坐标来调整\r\nCamera:0\r\nThe estimated relative position is calculated by the height of the UAV camera, PTZ angles and the pixel coordinate offsets *By fine-tuning the angles of the PTZ, the coordinate of the target center point is basically kept at the center of the image\r\nThe fusion position information is obtained by simple weighted fusion of the UAV three-dimensional coordinate (GPS) and the estimated relative position\r\nWhen the UAV basically reaches the landing platform, it is only adjusted according to the pixel coordinate\r\n'''\r\nimport airsim\r\nimport time\r\nimport math\r\nimport pprint\r\nimport os\r\nimport tempfile\r\nimport numpy as np\r\nimport cv2\r\nimport cv2.aruco as aruco\r\nimport xlwt\r\n\r\nwb = xlwt.Workbook(encoding='utf-8')\r\nws = wb.add_sheet('data')\r\nws.write(0, 0, label = 'UAV_position_x_val')\r\nws.write(0, 1, label = 'UAV_position_y_val')\r\nws.write(0, 2, label = 'UAV_position_z_val')\r\nws.write(0, 3, label = 'estimate_x')\r\nws.write(0, 4, label = 'estimate_y')\r\nws.write(0, 5, label = 'fusion_x')\r\nws.write(0, 6, label = 'fusion_y')\r\nws.write(0, 7, label = 'camera_yaw')\r\nws.write(0, 8, label = 'camera_pitch')\r\nws.write(0, 9, label = 'vx')\r\nws.write(0, 10, label = 'vy')\r\nws.write(0, 11, label = 'vz')\r\n\r\n#连接到Airsim模拟器\r\n#connect to Airsim simulator\r\nclient = airsim.MultirotorClient()\r\nclient.confirmConnection()\r\nclient.enableApiControl(True)\r\nclient.armDisarm(True)\r\n\r\n#相机图片大小\r\n#camera picture size\r\nimage_width = 1080\r\nimage_height = 720\r\n\r\n#设定水平最大速度、最大降落速度、安全降落高度\r\n#set the horizontal maximum speed, the maximum landing speed and the safe landing height\r\nvmax = 5\r\nvzmax = 2\r\nh = 3\r\n\r\n#起飞,飞到目标点附近某位置\r\n#take off and fly to a position near the target point\r\nairsim.wait_key('Press any key to takeoff')\r\nclient.takeoffAsync().join()\r\nprint(\"already flying...\")\r\nclient.hoverAsync().join()\r\nclient.moveToPositionAsync(-15, 12, -16, 2).join()\r\ntime.sleep(10)\r\n\r\n#获取无人机当前位置\r\n#get current location of the UAV\r\nUAV_position_x_val = client.getMultirotorState().kinematics_estimated.position.x_val\r\nUAV_position_y_val = client.getMultirotorState().kinematics_estimated.position.y_val\r\nUAV_position_z_val = -client.getMultirotorState().kinematics_estimated.position.z_val\r\nif UAV_position_x_val > 0:\r\n sgn_x = 1\r\nelse:\r\n sgn_x = -1\r\nif UAV_position_y_val > 0:\r\n sgn_y = 1\r\nelse:\r\n sgn_y = -1\r\n\r\n#无人机与相机高度不同,读取相机高度\r\n#the height of the UAV and the camera is different, read the height of the camera\r\ncamara_height = -client.simGetCameraInfo(0).pose.position.z_val\r\n\r\n#计算相机初始角度,转动相机对准目标点\r\n#calculate the initial angles of the camera, rotate the camera to aim at the target point\r\nx = UAV_position_x_val\r\ny = UAV_position_y_val\r\nz = camara_height\r\ncamera_yaw = math.asin(-y/math.sqrt(x*x + y*y))\r\ncamera_pitch = math.asin(-z/math.sqrt(x*x + y*y + z*z))\r\n#print(camera_yaw)\r\n#print(camera_pitch)\r\nclient.simSetCameraOrientation(0, airsim.to_quaternion(camera_pitch, 0, camera_yaw))\r\n\r\n#计算和比较最小水平移动时间t1和最小降落时间t2,决定降落方案,设定初始速度\r\n#calculate and compare the minimum horizontal moving time t1 and the minimum landing time t2, determine the landing scheme, and set the initial speeds\r\nt1 = math.sqrt(x*x + y*y)/vmax\r\nt2 = UAV_position_z_val/vzmax\r\nif t1 > t2:\r\n vx = -x/math.sqrt(x*x + y*y)*vmax\r\n vy = -y/math.sqrt(x*x + y*y)*vmax\r\n vz = (UAV_position_z_val - h)/t1\r\nelse:\r\n vx = -x/math.sqrt(x*x + y*y)*(t1/t2)*vmax\r\n vy = -y/math.sqrt(x*x + y*y)*(t1/t2)*vmax\r\n vz = vzmax\r\n\r\n\r\n#设定执行周期dt\r\n#set the execution cycle dt\r\ndt = 0.5\r\n\r\n#GPS信号标志位\r\n#the GPS signal marker\r\nGPS_signal = 1\r\n\r\n#GPS信息与视觉信息权重\r\n#the weights of the GPS information and the visual information\r\nGPS_weight = 0.9\r\nvisual_weight = 0.1\r\n\r\n#PID参数\r\n#the PID parameters\r\nKp = 1\r\nKi = 0\r\nKd = 0\r\n\r\nrow = 1\r\n\t\r\n#降落\r\n#landing\r\nwhile UAV_position_z_val > h:\r\n client.moveByVelocityAsync(vx, vy, vz, dt, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, 0)).join\r\n time.sleep(10)\r\n #if abs(UAV_position_x_val) > 0.5 and abs(UAV_position_y_val) > 0.5:\r\n # time.sleep(10)\r\n #else:\r\n\t# time.sleep(0.5)\r\n\t\r\n\t#保存相机图片\r\n\t#save the camera images\r\n responses = client.simGetImages([\r\n airsim.ImageRequest(\"0\", airsim.ImageType.Scene),\r\n airsim.ImageRequest(\"3\", airsim.ImageType.Scene), \r\n airsim.ImageRequest(\"4\", airsim.ImageType.Scene)]) \r\n print('Retrieved images: %d' % len(responses))\r\n\t\r\n tmp_dir = os.path.join(tempfile.gettempdir(), \"airsim_drone\")\r\n print (\"Saving images to %s\" % tmp_dir)\r\n try:\r\n os.makedirs(tmp_dir)\r\n except OSError:\r\n if not os.path.isdir(tmp_dir):\r\n raise\r\n\t\r\n for idx, response in enumerate(responses):\r\n\r\n filename = os.path.join(tmp_dir, str(idx))\r\n \r\n if response.pixels_as_float:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_float)))\r\n airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))\r\n elif response.compress:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\r\n airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)\r\n else:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\r\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) \r\n img_rgb = img1d.reshape(response.height, response.width, 3)\r\n cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb)\r\n\t\r\n\t#识别ArUco\r\n\t#detect the ArUco code\r\n if abs(UAV_position_x_val) > 1 or abs(UAV_position_y_val) > 1:\r\n img = cv2.imread('../Appdata/Local/Temp/airsim_drone/0.png')\r\n else:\r\n\t img = cv2.imread('../Appdata/Local/Temp/airsim_drone/1.png')\r\n aruco_dict = cv2.aruco_Dictionary.get(aruco.DICT_6X6_250)\r\n parameters = cv2.aruco_DetectorParameters.create()\r\n corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(img, aruco_dict, parameters=parameters)\r\n \r\n\t#计算目标坐标偏移量\r\n\t#calculate the target coordinate offsets\r\n x1 = corners[0][0][0][0]\r\n y1 = corners[0][0][0][1]\r\n x2 = corners[0][0][1][0]\r\n y2 = corners[0][0][1][1]\r\n x3 = corners[0][0][2][0]\r\n y3 = corners[0][0][2][1]\r\n x4 = corners[0][0][3][0]\r\n y4 = corners[0][0][3][1]\r\n center_x = (x1 + x2 + x3 + x4)/4\r\n center_y = (y1 + y2 + y3 + y4)/4\r\n print(center_x,center_y)\r\n dx = center_x - image_width/2\r\n dy = center_y - image_height/2\r\n\t\r\n\t#粗调\r\n\t#coarse adjustment\r\n while ((abs(UAV_position_x_val) > 1 or abs(UAV_position_y_val) > 1) and (abs(dx) > 2 or abs(dy) > 2)):\r\n\t #根据偏移调整相机角度\r\n\t\t#adjust the camera angles according to the offsets\r\n if dx > 0:\r\n camera_yaw = camera_yaw + 0.001\r\n else:\r\n camera_yaw = camera_yaw - 0.001\r\n if dy > 0:\r\n camera_pitch = camera_pitch - 0.002\r\n else:\r\n camera_pitch = camera_pitch + 0.002\r\n \r\n #重新调整相机\r\n\t\t#readjust the camera\r\n client.simSetCameraOrientation(0, airsim.to_quaternion(camera_pitch, 0, camera_yaw))\r\n\t\t\r\n\t\t#保存相机图片\r\n\t\t#save the camera images\r\n responses = client.simGetImages([\r\n airsim.ImageRequest(\"0\", airsim.ImageType.Scene),\r\n airsim.ImageRequest(\"3\", airsim.ImageType.Scene), \r\n airsim.ImageRequest(\"4\", airsim.ImageType.Scene)]) \r\n print('Retrieved images: %d' % len(responses))\r\n\t\r\n tmp_dir = os.path.join(tempfile.gettempdir(), \"airsim_drone\")\r\n print (\"Saving images to %s\" % tmp_dir)\r\n try:\r\n os.makedirs(tmp_dir)\r\n except OSError:\r\n if not os.path.isdir(tmp_dir):\r\n raise\r\n\t\r\n for idx, response in enumerate(responses):\r\n\r\n filename = os.path.join(tmp_dir, str(idx))\r\n \r\n if response.pixels_as_float:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_float)))\r\n airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))\r\n elif response.compress:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\r\n airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)\r\n else:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\r\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) \r\n img_rgb = img1d.reshape(response.height, response.width, 3)\r\n cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb)\r\n \r\n #识别ArUco\r\n\t\t#detect the ArUco code\r\n img = cv2.imread('../Appdata/Local/Temp/airsim_drone/0.png')\r\n aruco_dict = cv2.aruco_Dictionary.get(aruco.DICT_6X6_250)\r\n parameters = cv2.aruco_DetectorParameters.create()\r\n corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(img, aruco_dict, parameters=parameters)\r\n \r\n\t #计算目标坐标偏移量\r\n\t\t#calculate the target coordinate offsets\r\n x1 = corners[0][0][0][0]\r\n y1 = corners[0][0][0][1]\r\n x2 = corners[0][0][1][0]\r\n y2 = corners[0][0][1][1]\r\n x3 = corners[0][0][2][0]\r\n y3 = corners[0][0][2][1]\r\n x4 = corners[0][0][3][0]\r\n y4 = corners[0][0][3][1]\r\n center_x = (x1 + x2 + x3 + x4)/4\r\n center_y = (y1 + y2 + y3 + y4)/4\r\n print(center_x,center_y)\r\n dx = center_x - image_width/2\r\n dy = center_y - image_height/2\t\r\n\t\r\n\t#细调\r\n\t#fine adjustment\r\n while ((abs(UAV_position_x_val) > 1 or abs(UAV_position_y_val) > 1) and (abs(dx) > 1 or abs(dy) > 1)):\r\n\t #根据偏移调整相机角度\r\n\t\t#adjust the camera angles according to the offsets\r\n if dx > 0:\r\n camera_yaw = camera_yaw + 0.0005\r\n else:\r\n camera_yaw = camera_yaw - 0.0005\r\n if dy > 0:\r\n camera_pitch = camera_pitch - 0.001\r\n else:\r\n camera_pitch = camera_pitch + 0.001\r\n \r\n #重新调整相机\r\n\t\t#readjust the camera\r\n client.simSetCameraOrientation(0, airsim.to_quaternion(camera_pitch, 0, camera_yaw))\r\n\t\t\r\n\t\t#保存相机图片\r\n\t\t#save the camera images\r\n responses = client.simGetImages([\r\n airsim.ImageRequest(\"0\", airsim.ImageType.Scene),\r\n airsim.ImageRequest(\"3\", airsim.ImageType.Scene), \r\n airsim.ImageRequest(\"4\", airsim.ImageType.Scene)]) \r\n print('Retrieved images: %d' % len(responses))\r\n\t\r\n tmp_dir = os.path.join(tempfile.gettempdir(), \"airsim_drone\")\r\n print (\"Saving images to %s\" % tmp_dir)\r\n try:\r\n os.makedirs(tmp_dir)\r\n except OSError:\r\n if not os.path.isdir(tmp_dir):\r\n raise\r\n\t\r\n for idx, response in enumerate(responses):\r\n\r\n filename = os.path.join(tmp_dir, str(idx))\r\n \r\n if response.pixels_as_float:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_float)))\r\n airsim.write_pfm(os.path.normpath(filename + '.pfm'), airsim.get_pfm_array(response))\r\n elif response.compress:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\r\n airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)\r\n else:\r\n print(\"Type %d, size %d\" % (response.image_type, len(response.image_data_uint8)))\r\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) \r\n img_rgb = img1d.reshape(response.height, response.width, 3)\r\n cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb)\r\n \r\n #识别ArUco\r\n\t\t#detect the ArUco code\r\n img = cv2.imread('../Appdata/Local/Temp/airsim_drone/0.png')\r\n aruco_dict = cv2.aruco_Dictionary.get(aruco.DICT_6X6_250)\r\n parameters = cv2.aruco_DetectorParameters.create()\r\n corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(img, aruco_dict, parameters=parameters)\r\n \r\n\t #计算目标坐标偏移量\r\n\t\t#calculate the target coordinate offsets\r\n x1 = corners[0][0][0][0]\r\n y1 = corners[0][0][0][1]\r\n x2 = corners[0][0][1][0]\r\n y2 = corners[0][0][1][1]\r\n x3 = corners[0][0][2][0]\r\n y3 = corners[0][0][2][1]\r\n x4 = corners[0][0][3][0]\r\n y4 = corners[0][0][3][1]\r\n center_x = (x1 + x2 + x3 + x4)/4\r\n center_y = (y1 + y2 + y3 + y4)/4\r\n print(center_x,center_y)\r\n dx = center_x - image_width/2\r\n dy = center_y - image_height/2\t\t\r\n\t\t\r\n\r\n\t#更新无人机当前位置\r\n\t#update current location of the UAV\r\n UAV_position_x_val = client.getMultirotorState().kinematics_estimated.position.x_val\r\n UAV_position_y_val = client.getMultirotorState().kinematics_estimated.position.y_val\r\n UAV_position_z_val = -client.getMultirotorState().kinematics_estimated.position.z_val\r\n ws.write(row, 0, label = UAV_position_x_val)\r\n ws.write(row, 1, label = UAV_position_y_val)\r\n ws.write(row, 2, label = UAV_position_z_val)\r\n\t\r\n\t#更新相机高度\r\n\t#update the camera height\r\n camara_height = -client.simGetCameraInfo(0).pose.position.z_val\r\n\t\r\n #推算相对位置,计算期望角度\r\n\t#calculate the relative position and the expected angles\r\n estimate_s = camara_height*math.tan(math.pi/2-abs(camera_pitch))\r\n estimate_x = -estimate_s*math.cos(camera_yaw)*(1-dx*2/image_height)\r\n estimate_y = -estimate_s*math.sin(camera_yaw)*(1-dy*2/image_width)\r\n #print(UAV_position_x_val,UAV_position_y_val)\r\n #print(estimate_x,estimate_y)\r\n #estimate_yaw = math.asin(-estimate_y/math.sqrt(estimate_y*estimate_y + camara_height*camara_height))\r\n #estimate_pitch = math.asin(-estimate_x/math.sqrt(estimate_x*estimate_x + estimate_y*estimate_y + camara_height*camara_height))\r\n ws.write(row, 3, label = estimate_x)\r\n ws.write(row, 4, label = estimate_y)\r\n\t\r\n #GPS信息与视觉位置信息做融合,更新相机角度\r\n\t#the GPS information and the visual position information are integrated to update the camera angles\r\n x = ((visual_weight*estimate_x) + (GPS_weight*UAV_position_x_val*GPS_signal))/(visual_weight + GPS_weight*GPS_signal)\r\n y = ((visual_weight*estimate_y) + (GPS_weight*UAV_position_y_val*GPS_signal))/(visual_weight + GPS_weight*GPS_signal)\r\n z = camara_height\r\n print(UAV_position_x_val,UAV_position_y_val,UAV_position_z_val)\r\n print(estimate_s,estimate_x,estimate_y)\r\n print(x,y)\r\n ws.write(row, 5, label = x)\r\n ws.write(row, 6, label = y)\r\n ws.write(row, 7, label = camera_yaw)\r\n ws.write(row, 8, label = math.pi/2 + camera_pitch)\r\n '''\r\n\t#更新无人机速度(仿真)\r\n\t#update the UAV speeds (simulation)\r\n t1 = math.sqrt(x*x + y*y)/vmax\r\n t2 = UAV_position_z_val/vzmax\r\n if abs(UAV_position_x_val) > 0.5 and abs(UAV_position_y_val) > 0.5:\r\n vx = -x/math.sqrt(x*x + y*y)*vmax\r\n vy = -y/math.sqrt(x*x + y*y)*vmax\r\n else:\r\n vx = -x/math.sqrt(x*x + y*y)*vmax*(t1/t2)\r\n vy = -y/math.sqrt(x*x + y*y)*vmax*(t1/t2)\r\n if t1 > t2:\r\n vz = (UAV_position_z_val - h)/t1\r\n else:\r\n vz = vzmax\r\n print(vx,vy,vz)\r\n ws.write(row, 9, label = vx)\r\n ws.write(row, 10, label = vy)\r\n ws.write(row, 11, label = vz)\r\n\t'''\r\n #更新无人机速度(实验)\r\n\t#update the UAV speeds (experiment)\r\n t1 = math.sqrt(x*x + y*y)/vmax\r\n t2 = UAV_position_z_val/vzmax\r\n if abs(UAV_position_x_val) > 1 or abs(UAV_position_y_val) > 1:\r\n vx = -x/math.sqrt(x*x + y*y)*vmax\r\n vy = -y/math.sqrt(x*x + y*y)*vmax\r\n else:\r\n vx = -dy*0.05\r\n vy = dx*0.05\r\n if t1 > t2:\r\n vz = (UAV_position_z_val - h)/t1\r\n else:\r\n vz = vzmax\r\n print(vx,vy,vz)\r\n ws.write(row, 9, label = vx)\r\n ws.write(row, 10, label = vy)\r\n ws.write(row, 11, label = vz)\r\n\t\r\n row = row+1\r\n\r\n\t\t\r\nwb.save('data.xls')\t\r\nclient.landAsync().join()\r\ntime.sleep(5)\r\n\r\n\r\nclient.armDisarm(False)\r\nclient.enableApiControl(False)\r\n","sub_path":"landing/autoland_v2.py","file_name":"autoland_v2.py","file_ext":"py","file_size_in_byte":16818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"607518656","text":"#!/usr/bin/python\n\n# [Icarus] settings.py\n#\n# Mike Bonnington \n# (c) 2015-2019\n#\n# Modular settings editor dialog.\n# Provides a skeleton dialog which can be extended with load-in panels, each\n# with their own UI file and helper module (if required).\n\n\nimport os\nimport sys\n\nfrom Qt import QtCompat, QtCore, QtWidgets\n\n# Import custom modules\nimport ui_template as UI\n\nfrom shared import verbose\n\n# ----------------------------------------------------------------------------\n# Configuration\n# ----------------------------------------------------------------------------\n\ncfg = {}\n\n# Set window title and object names\ncfg['window_title'] = \"Settings\"\ncfg['window_object'] = \"settingsUI\"\n\n# Set the UI and the stylesheet\ncfg['ui_file'] = 'settings.ui'\ncfg['stylesheet'] = 'style.qss' # Set to None to use the parent app's stylesheet\n\n# Other options\ncfg['store_window_geometry'] = True\n\n# ----------------------------------------------------------------------------\n# Main dialog class\n# ----------------------------------------------------------------------------\n\nclass SettingsDialog(QtWidgets.QDialog, UI.TemplateUI):\n\t\"\"\" Settings editor dialog class.\n\t\"\"\"\n\tdef __init__(self, parent=None):\n\t\tsuper(SettingsDialog, self).__init__(parent)\n\t\tself.parent = parent\n\n\t\tself.setupUI(**cfg)\n\n\t\t# Set window flags\n\t\tself.setWindowFlags(QtCore.Qt.Dialog)\n\t\tself.setWindowIcon(self.iconSet('icon_settings.png', tintNormal=False))\n\n\t\t# Set other Qt attributes\n\t\tself.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)\n\n\t\t# Connect signals & slots\n\t\tself.ui.categories_listWidget.currentItemChanged.connect(lambda current: self.openProperties(current.text()))\n\t\t# self.ui.categories_listWidget.itemActivated.connect(lambda item: self.openProperties(current.text()))\n\n\t\tself.ui.settings_buttonBox.button(QtWidgets.QDialogButtonBox.Reset).clicked.connect(self.removeOverrides)\n\t\tself.ui.settings_buttonBox.button(QtWidgets.QDialogButtonBox.Save).clicked.connect(self.saveAllAndClose)\n\t\tself.ui.settings_buttonBox.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(self.reject)\n\n\n\tdef display(self, \n\t\tsettings_type=\"Generic\",\n\t\tself_name=\"\", \n\t\tcategory_list=[], \n\t\tstart_panel=None, \n\t\tprefs_file=None, \n\t\tinherit=None, \n\t\tautofill=False):\n\t\t\"\"\" Display the dialog.\n\n\t\t\t'settings_type' is the name given to the settings dialog.\n\t\t\t'self_name' e.g. the name of the job or shot.\n\t\t\t'category_list' is a list of categories, should correspond to a\n\t\t\tpage of properties defined by a .ui file.\n\t\t\t'start_panel' if set will jump straight to the named panel.\n\t\t\t'prefs_file' is the path to the file storing the settings. Can be\n\t\t\teither an XML or a JSON file.\n\t\t\t'inherit' whether to inherit any values. This should be in the\n\t\t\tform of a path just like the 'prefs_file' argument.\n\t\t\t'autofill' when true, attempt to fill some fields automatically.\n\t\t\"\"\"\n\t\tif start_panel:\n\t\t\tverbose.debug(\"Start Panel: \" + start_panel)\n\t\t\tself.currentCategory = start_panel\n\t\telse:\n\t\t\tself.currentCategory = \"\"\n\n\t\tself.settings_type = settings_type\n\t\tself.self_name = self_name\n\t\tself.category_list = category_list\n\t\tself.prefs_file = prefs_file\n\t\tself.inherit = inherit\n\t\tself.autofill = autofill\n\n\t\tself.reset()\n\n\t\t# Set window title\n\t\tif self_name != \"\":\n\t\t\tself.setWindowTitle(\n\t\t\t\t\"%s %s: %s\" % (settings_type, cfg['window_title'], self_name))\n\t\telse:\n\t\t\tself.setWindowTitle(\n\t\t\t\t\"%s %s\" % (settings_type, cfg['window_title']))\n\n\t\treturn self.exec_()\n\n\n\tdef reset(self):\n\t\t\"\"\" Initialise or reset by reloading data.\n\t\t\"\"\"\n\t\t# self.ui.categories_listWidget.blockSignals(True)\n\n\t\t# Instantiate preferences data class(es)\n\t\tif self.prefs_file is not None:\n\t\t\tself.prefs = self.createPrefs(self.prefs_file)\n\t\t\tif self.inherit:\n\t\t\t\tself.prefs_inherited = self.createPrefs(self.inherit)\n\t\t\telse:\n\t\t\t\tself.prefs_inherited = None\n\n\t\t# Populate categories\n\t\tif self.category_list:\n\t\t\tself.ui.categories_listWidget.clear()\n\n\t\t\tfor cat in self.category_list:\n\t\t\t\tself.ui.categories_listWidget.addItem(cat)\n\n\t\t\t# Set the maximum size of the list widget\n\t\t\tself.ui.categories_listWidget.setMaximumWidth(self.ui.categories_listWidget.sizeHintForColumn(0)*2)\n\n\t\t\t# Select the first item & show the appropriate settings panel\n\t\t\tif self.currentCategory == \"\":\n\t\t\t\tcurrentItem = self.ui.categories_listWidget.item(0)\n\t\t\telse:\n\t\t\t\tcurrentItem = self.ui.categories_listWidget.findItems(self.currentCategory, QtCore.Qt.MatchExactly)[0]\n\n\t\t\tcurrentItem.setSelected(True)\n\t\t\t# self.openProperties(currentItem.text())\n\n\t\t\t# Hide category list if there's only one item\n\t\t\tif len(self.category_list) <= 1:\n\t\t\t\t# self.ui.categories_listWidget.hide() # doesn't work\n\t\t\t\tself.ui.categories_listWidget.setMaximumWidth(0)\n\n\t\t# self.ui.categories_listWidget.blockSignals(False)\n\n\n\tdef openProperties(self, category, storeProperties=True):\n\t\t\"\"\" Open properties panel for selected settings category. Loads UI\n\t\t\tfile and sets up widgets.\n\t\t\"\"\"\n\t\tself.currentCategory = category\n\n\t\t# Show panel & load values into form widgets\n\t\tif self.loadPanel(category):\n\t\t\tif (self.inherit is not None) \\\n\t\t\tand self.ui.settings_frame.property('inheritable'):\n\t\t\t\tverbose.print_(\"Category: %s (values inheritable)\" % category)\n\t\t\t\tself.setupWidgets(\n\t\t\t\t\tself.ui.settings_frame, \n\t\t\t\t\tforceCategory=category, \n\t\t\t\t\tinherit=self.prefs_inherited, \n\t\t\t\t\tstoreProperties=False)\n\t\t\telse:\n\t\t\t\tverbose.print_(\"Category: %s\" % category)\n\t\t\t\tself.setupWidgets(\n\t\t\t\t\tself.ui.settings_frame, \n\t\t\t\t\tforceCategory=category, \n\t\t\t\t\tstoreProperties=storeProperties)\n\n\n\tdef loadPanel(self, category):\n\t\t\"\"\" Load the panel UI (and helper module if required).\n\t\t\tThe exec function is called here to avoid the error: 'unqualified\n\t\t\texec is not allowed in function because it contains a nested\n\t\t\tfunction with free variables' with Python 2.x.\n\t\t\"\"\"\n\t\tui_file = \"settings_%s.ui\" % category\n\t\thelper_module = 'settings_%s' % category\n\t\tpanel_ui_loaded = False\n\t\thelper_module_loaded = False\n\n\t\t# Create new frame to hold properties UI & load into frame\n\t\tself.ui.settings_frame.close()\n\t\ttry:\n\t\t\tuifile = os.path.join(os.environ['IC_FORMSDIR'], ui_file)\n\t\t\tself.ui.settings_frame = QtCompat.loadUi(uifile)\n\t\t\tself.ui.settings_verticalLayout.addWidget(self.ui.settings_frame)\n\t\t\tpanel_ui_loaded = True\n\t\texcept FileNotFoundError:\n\t\t\tmessage = \"Could not open '%s' properties panel UI. \" % category\n\t\t\tverbose.error(message)\n\n\t\t# Load helper module\n\t\ttry:\n\t\t\texec_str = 'from . import %s as sh; helper = sh.helper(self, self.ui.settings_frame)' % helper_module\n\t\t\t# print(exec_str)\n\t\t\texec(exec_str)\n\t\t\thelper_module_loaded = True\n\t\texcept ImportError:\n\t\t\tmessage = \"Could not import '%s' module. \" % helper_module\n\t\t\tverbose.debug(message)\n\n\t\tif panel_ui_loaded: # and helper_module_loaded:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef removeOverrides(self):\n\t\t\"\"\" Remove stored values and instead inherit defaults for widgets on\n\t\t\tthe current panel.\n\t\t\"\"\"\n\t\tfor widget in self.ui.settings_frame.findChildren(QtWidgets.QWidget):\n\t\t\tattr = widget.property('xmlTag')\n\t\t\tif attr:\n\t\t\t\tself.prefs.remove_attr(self.currentCategory, attr)\n\n\t\tself.openProperties(self.currentCategory, storeProperties=False)\n\n\n\tdef saveAllAndClose(self):\n\t\t\"\"\" Save settings and close the dialog.\n\t\t\"\"\"\n\t\t# Store the values from widgets on all pages\n\t\tfor category in self.category_list:\n\t\t\tself.openProperties(category, storeProperties=True)\n\n\t\t# There's a bug where all property panel widgets become visible if a\n\t\t# save fails. As a quick dodgy workaround we exit so we don't see it\n\t\t# happen.\n\t\tif self.save():\n\t\t\tself.accept()\n\t\telse:\n\t\t\t# self.close()\n\t\t\tself.reject()\n\n\n\tdef keyPressEvent(self, event):\n\t\t\"\"\" Override function to prevent Enter / Esc keypresses triggering\n\t\t\tOK / Cancel buttons.\n\t\t\"\"\"\n\t\tif event.key() == QtCore.Qt.Key_Return \\\n\t\tor event.key() == QtCore.Qt.Key_Enter:\n\t\t\treturn\n\n\n\tdef hideEvent(self, event):\n\t\t\"\"\" Event handler for when window is hidden.\n\t\t\"\"\"\n\t\tself.storeWindow() # Store window geometry\n\n\n\t# def closeEvent(self, event):\n\t# \t\"\"\" Event handler for when window is closed.\n\t# \t\"\"\"\n\t# \t#self.save() # Save settings\n\t# \t#self.storeWindow() # Store window geometry\n","sub_path":"tools/settings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"603159756","text":"\"\"\"\n Autograde Lesson 8 assignment\n\n\"\"\"\n\nimport csv\nimport os\nfrom pathlib import Path\nimport pytest\nfrom loguru import logger\nimport inventory as l\n\n\ndef test_add_furniture():\n my_path = Path('test.txt')\n logger.debug('If the test.txt file already exists, remove it.')\n if my_path.is_file():\n os.remove(my_path)\n logger.debug('Make sure the file is gone.')\n assert not my_path.is_file()\n logger.debug('Test file creation on first addition')\n l.add_furniture(\n 'test.txt',\n 'Jim',\n '001',\n 'spaceship',\n 2000\n )\n assert my_path.is_file()\n logger.debug('Test subsequent additions.')\n l.add_furniture(\n 'test.txt',\n 'Naomi',\n '002',\n 'antimatter',\n 12000\n )\n l.add_furniture(\n 'test.txt',\n 'Amos',\n '003',\n 'wrench',\n 11\n )\n test_data = []\n with open(my_path) as f:\n reader = csv.reader(f)\n for line in reader:\n test_data.append(line)\n assert test_data[0] == ['Jim', '001', 'spaceship', '2000']\n assert test_data[1] == ['Naomi', '002', 'antimatter', '12000']\n assert test_data[2] == ['Amos', '003', 'wrench', '11']\n\n logger.debug('Cleaning up test.txt')\n if my_path.is_file():\n os.remove(my_path)\n\ndef test_single_customer():\n my_path = Path('test.txt')\n test_result = [\n ['Alex', 'LR01', 'Small lamp', '7.50'],\n ['Alex', 'LR02', 'Television', '28.00'],\n ['Alex', 'BR07', 'LED lamp', '5.50'],\n ['Alex', 'KT08', 'Basic refrigerator', '40.00']\n ]\n logger.debug('If test.txt exists, delete it.')\n if my_path.is_file():\n os.remove(my_path)\n test_func = l.single_customer('Alex', 'test.txt')\n test_func('../data/test_items.csv')\n test_data = []\n with open(my_path) as f:\n reader = csv.reader(f)\n for line in reader:\n test_data.append(line)\n assert test_data == test_result\n","sub_path":"students/rachel_s/lesson08/assignment/tests/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"255746015","text":"\nimport os\nimport json\nimport logging\nfrom os.path import join\n\nimport pandas as pd\n\n\n__author__ = 'dh8835'\n__email__ = 'dasha.herrmannova@open.ac.uk'\n\n\nclass ResultsParser(object):\n\n def __init__(self, output_dir: str) -> None:\n \"\"\"Initialize class.\n \n :param output_dir: directory for storing results\n :type output_dir: str\n :return: none\n :rtype: None\n \"\"\"\n self._logger = logging.getLogger(__name__)\n self._output_dir = output_dir\n\n def parse_results(self) -> None:\n \"\"\"Parse all results files and put all results in one CSV file.\n \n :return: None\n :rtype: None\n \"\"\" \n experiment_dir = join(self._output_dir, 'experiments')\n all_results = []\n self._logger.info('Parsing all results in %s', experiment_dir)\n for dir_name in os.listdir(experiment_dir):\n dir_path = join(experiment_dir, dir_name)\n if os.path.isdir(dir_path):\n res_path = join(dir_path, 'results.json')\n cfg_path = join(dir_path, 'model_config.json')\n if os.path.exists(res_path) and os.path.exists(cfg_path):\n with open(res_path) as fp:\n results = json.load(fp)\n with open(cfg_path) as fp:\n cfg = json.load(fp)\n results['name'] = cfg['name']\n results['description'] = cfg['description']\n results['dir'] = dir_name\n all_results.append(results)\n self._logger.info('Found %d experiments', len(all_results))\n output_file = join(self._output_dir, 'results.csv')\n self._logger.info('Storing all results in %s', output_file)\n pd.DataFrame(all_results).to_csv(output_file)\n","sub_path":"WP3/Task3.3/src/evaluation/results_parser.py","file_name":"results_parser.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"563522334","text":"# 接雨水\n# 双指针,高度取决于 左右两边比较矮的那个\ndef trap(height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n left = 0\n right = len(height)-1\n left_max = 0\n right_max = 0\n ans = 0\n while left < right:\n if height[left] < height[right]:\n if height[left] >= left_max:\n left_max = height[left]\n else:\n ans = ans + left_max - height[left]\n left = left + 1\n else:\n if height[right] >= right_max:\n right_max = height[right]\n else:\n ans = ans + right_max - height[right]\n right = right - 1\n\n return ans\n\n\nif __name__ == \"__main__\":\n print(trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]))\n","sub_path":"Q42.py","file_name":"Q42.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"127163154","text":"#encoding=utf-8\n\nfrom django.contrib import admin\nfrom django.forms.extras import widgets\n\nfrom django.utils.translation import ugettext as _\n\nfrom mptt.admin import MPTTModelAdmin\nfrom mptt.fields import TreeForeignKey, TreeManyToManyField\n#from mptt.forms import MPTTAdminForm, TreeNodeChoiceField\n\nfrom General.models import * # Tree, Human, Adress, Region, Concept, Type, Being_Type\n\n#class CustomMPTTModelAdmin(MPTTModelAdmin):\n # speficfy pixel amount for this ModelAdmin only:\n #fields = ['name']\n# mptt_level_indent = 20\n# mptt_indent_field = \"name\"\n\n\nfrom itertools import chain\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin import widgets\nfrom django.utils.encoding import smart_unicode, force_unicode\nfrom django.utils.safestring import mark_safe\nfrom django.utils.html import escape, conditional_escape\n\n\n\nclass ProjectAdmin(MPTTModelAdmin): # admin.ModelAdmin):\n #class Media:\n # js = ('mselect-to-mcheckbox.js', 'jquery-ui-1.10.2.custom.js',)\n # css = {\n # 'all': ('css/mselect-to-mcheckbox.css',)\n # }\n\n fieldsets = (\n (None, {\n 'fields':(('name', 'nickname'), ('website', 'socialweb'), ('being_type', 'parent'), ('email', 'email2', 'telephone'))\n }),\n (_(u\"Membres de referencia\"), {\n 'classes': ('collapse',),\n 'fields':('ref_members',),\n #'filter_horizontal': ('members', 'ref_members',)\n }),\n (_(u\"Adreçes\"), {\n 'classes': ('collapse',),\n 'fields': ('addresses',)\n }),\n (_(\"Arts\"), {\n 'classes': ('collapse',),\n 'fields': ('jobs',)\n }),\n (_(u\"Dates inici/fi\"), {\n 'classes': ('collapse',),\n 'fields': (('birth_date', 'dead_date'),)\n })\n )\n filter_horizontal = ('ref_members', 'addresses', 'jobs')\n\n\n\nclass PersonAdmin(admin.ModelAdmin):\n\n #class Media:\n # js = ('mselect-to-mcheckbox.js', 'jquery-ui-1.10.2.custom.js',)\n # css = {\n # 'all': ('css/mselect-to-mcheckbox.css',)\n # }\n\n\n fieldsets = (\n (None, {\n 'fields':(('name', 'surnames', 'id_card'), ('nickname', 'nickname2'), ('email', 'email2'), ('website', 'telephone'))\n }),\n (_(u\"Adreçes\"), {\n 'classes': ('collapse',),\n 'fields': ('addresses',)\n }),\n (_(\"Arts\"), {\n 'classes': ('collapse',),\n 'fields': ('jobs',)\n }),\n (_(u\"Projectes\"), {\n 'classes': ('collapse',),\n 'fields': ('projects',)\n }),\n (_(u\"Dates naixement/mort\"), {\n 'classes': ('collapse',),\n 'fields': (('birth_date', 'dead_date'),)\n })\n )\n filter_horizontal = ('addresses', 'jobs', 'projects',)\n\n\nclass CompanyAdmin(admin.ModelAdmin): # admin.ModelAdmin):\n #class Media:\n # js = ('mselect-to-mcheckbox.js', 'jquery-ui-1.10.2.custom.js',)\n # css = {\n # 'all': ('css/mselect-to-mcheckbox.css',)\n # }\n\n fieldsets = (\n (None, {\n 'fields':(('name', 'nickname'), ('legal_name', 'vat_number'), ('being_type', 'website'), ('email', 'telephone'))\n }),\n (_(u\"Membres de referencia\"), {\n 'classes': ('collapse',),\n 'fields':('ref_members',),\n #'filter_horizontal': ('members', 'ref_members',)\n }),\n (_(u\"Adreçes\"), {\n 'classes': ('collapse',),\n 'fields': ('addresses',)\n }),\n (_(\"Arts\"), {\n 'classes': ('collapse',),\n 'fields': ('jobs',)\n }),\n (_(u\"Dates inici/fi\"), {\n 'classes': ('collapse',),\n 'fields': (('birth_date', 'dead_date'),)\n })\n )\n filter_horizontal = ('ref_members', 'addresses', 'jobs')\n\n\n\n\n# Register your models here.\n\n#admin.site.register(Tree)\n\n#admin.site.register(Being)\n#admin.site.register(Being_Type, MPTTModelAdmin) # Comment this line after creating 'Human', then 'Person', 'Project' and 'Company' under Human, inside Being_Types.\n#admin.site.register(Human)\nadmin.site.register(Person, PersonAdmin)\n\nadmin.site.register(Project, ProjectAdmin)\nadmin.site.register(Project_Type, MPTTModelAdmin)\n\nadmin.site.register(Company, CompanyAdmin)\nadmin.site.register(Company_Type, MPTTModelAdmin)\n\n\n#admin.site.register(Art, MPTTModelAdmin) # Comment this line after creating 'Relation' and 'Job' inside Arts.\nadmin.site.register(Relation, MPTTModelAdmin)\nadmin.site.register(Job, MPTTModelAdmin)\n\n\n#admin.site.register(Artwork)\n#admin.site.register(Artwork_Type, MPTTModelAdmin) # Comment this line after creating 'Unit', 'Record', 'Material' and 'Nonmaterial' inside Artwork_Types\nadmin.site.register(Unit)\nadmin.site.register(Unit_Type, MPTTModelAdmin)\nadmin.site.register(UnitRatio)\n\nadmin.site.register(Nonmaterial)\nadmin.site.register(Nonmaterial_Type, MPTTModelAdmin)\n\nadmin.site.register(Material)\nadmin.site.register(Material_Type, MPTTModelAdmin)\n\nadmin.site.register(Record)\nadmin.site.register(Record_Type, MPTTModelAdmin)\nadmin.site.register(AccountCes)\nadmin.site.register(AccountBank)\n\n\n#admin.site.register(Space)\n#admin.site.register(Space_Type, MPTTModelAdmin) # Comment this line after creating 'Address' and 'Region' inside Space_Types\nadmin.site.register(Address)\nadmin.site.register(Address_Type, MPTTModelAdmin)\n\nadmin.site.register(Region, MPTTModelAdmin)\nadmin.site.register(Region_Type, MPTTModelAdmin)\n\nadmin.site.register(Concept, MPTTModelAdmin)\nadmin.site.register(Type, MPTTModelAdmin) # Comment this line whenever you don't need to edit the main whole Types tree\n","sub_path":"General/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"463672793","text":"from airfoil_NUFT import *\nimport time\nimport csv\n\n# Get list of airfoils to process\nairfoil_list = []\nwith open(\"CaseList.txt\") as list_file:\n\treader = csv.reader(list_file, delimiter = \",\")\n\tline = 0\n\tfor row in reader:\n\t\tif line == 0:\n\t\t\tline += 1\n\t\telif not len(row) == 0: \n\t\t\tif airfoil_list.count(row[0]) == 0:\n\t\t\t\tairfoil_list.append(row[0])\n\nprint('Will process {} airfoils'.format(len(airfoil_list)))\n\t\n\t\n\n\n# Convert all airfoils in airfoil-data\n \ni=0\nstart=time.time()\nfor airfoil in airfoil_list:\n\tif not os.path.exists('data/'+airfoil+'/seligdatfile'):\n\t\tprint('WARNING: Selig .dat file for '+airfoil+' not found. Airfoil not converted and saved.')\n\telse: \n\t\t# Create save file name\n\t\tsave_file='processed_data/'+airfoil+'.pt'\n\t\tif not os.path.exists(save_file):\n\t\t\tairfoil_phys(airfoil, res=(224,224), save_name=save_file, device=\"cpu\", grad=False)\n\t\ti=i+1\n\n\t\tif (i+1)%100==0:\n\t\t\tend=time.time()\n\t\t\tprint(str(i+1)+' airfoils processed! Time elapsed: '+str(end-start))\n\t\t\tstart=time.time()\n \nprint('Processing complete! Processed {} airfoils'.format(i))\n","sub_path":"process_airfoil_shapes_from_csv.py","file_name":"process_airfoil_shapes_from_csv.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"497681844","text":"from __future__ import print_function\nimport sys\nimport os\nsys.path.append(os.path.abspath(\"C:\\\\Users\\\\genia\\\\Documents\\\\Source\\\\Repos\\\\vs_drl_bootcamp1\"))\nsys.path.append(os.path.abspath(\"C:\\\\Program Files (x86)\\\\Microsoft Visual Studio\\\\Shared\\Anaconda3_64\\\\Lib\\\\site-packages\"))\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n\n def __init__(self, n_states, n_actions, n_hidden=64):\n super(Net, self).__init__()\n\n n_hidden_1 = n_hidden\n n_hidden_2 = n_hidden\n #n_hidden_3 = 64\n self.fc1 = nn.Linear(n_states, n_hidden_1)\n self.fc2 = nn.Linear(n_hidden_1, n_hidden_2)\n #self.fc3 = nn.Linear(n_hidden_2, n_hidden_3)\n self.outmu1 = nn.Linear(n_hidden_2, n_actions)\n torch.nn.init.kaiming_uniform_(self.fc2.weight.data)\n torch.nn.init.kaiming_uniform_(self.fc1.weight.data)\n torch.nn.init.kaiming_uniform_(self.outmu1.weight.data)\n self.outmu1.weight.data.mul_(0.01)\n\n self.logstd = nn.Parameter(torch.add(torch.zeros(n_actions), 0))\n\n def forward(self, input):\n x = F.tanh(self.fc1(input))\n x = F.tanh(self.fc2(x))\n #x = F.tanh(self.fc3(x))\n mu = F.tanh(self.outmu1(x))*1\n logstd = self.logstd.expand_as(mu)\n std = torch.exp(logstd)\n\n return mu, std\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n","sub_path":"Box2dEnv/BackupNewer/Curious_net_actor_cont_2.py","file_name":"Curious_net_actor_cont_2.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"603921514","text":"from rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom apps.shared.serializers import DeleteShareSerializer\nfrom apps.shared.utils import custom_retrieve\nfrom .models import Building, BuildingOwners\nfrom .serializers import (\n BuildingRetrieveSerializer, BuildingBaseSerializer,\n CreateUpdateShareSerializer\n)\n\n\nclass BuildingViewSet(viewsets.ModelViewSet):\n \"\"\"\n ViewSet to handle all Building CRUD actions.\n Shareholders is accessible via Retrieve action.\n Also here is 2 custom actions for creating, updating, deleting shares.\n \"\"\"\n queryset = Building.objects.filter()\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return BuildingRetrieveSerializer\n return BuildingBaseSerializer\n\n def retrieve(self, request, *args, **kwargs):\n response = super().retrieve(request, *args, **kwargs)\n custom_retrieve(kwargs, request, response)\n return response\n\n @action(detail=False, name='update', methods=['post', 'put'])\n def update_share(self, request, *args, **kwargs):\n \"\"\"\n Custom action to handle shares create and update actions\n \"\"\"\n serializer = CreateUpdateShareSerializer(data=request.data)\n valid = serializer.is_valid()\n if valid:\n serializer.save()\n # solution to make update and create in single action endpoint\n elif 'non_field_errors' in serializer.errors:\n if serializer.errors.get('non_field_errors')[0] == (\n 'The fields shareholder, shared_object must make a unique set.'):\n BuildingOwners.objects.filter(\n shared_object=serializer.initial_data.get('shared_object'),\n shareholder=serializer.initial_data.get('shareholder'))\\\n .update(share=serializer.initial_data.get('share'))\n else:\n serializer.is_valid(raise_exception=True)\n headers = self.get_success_headers(serializer.validated_data)\n return Response(serializer.data, status=status.HTTP_200_OK,\n headers=headers)\n\n @action(detail=False, name='delete', methods=['delete'])\n def delete_share(self, request, *args, **kwargs):\n \"\"\"\n Custom action to handle shares delete action\n \"\"\"\n serializer = DeleteShareSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n deleted = BuildingOwners.objects.filter(\n shared_object=serializer.initial_data.get('shared_object'),\n shareholder=serializer.initial_data.get('shareholder')).delete()\n if deleted[0]:\n return Response({\"success\": \"Successfully deleted\"},\n status=status.HTTP_204_NO_CONTENT)\n elif not deleted[0]:\n return Response({\"error\": \"No such share found\"},\n status=status.HTTP_404_NOT_FOUND)\n","sub_path":"apps/building/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"100489829","text":"# Copyright 2020, Google, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport time\nimport uuid\n\nfrom google.api_core import exceptions\n\nimport pytest\n\nimport create_cluster\nimport create_realm\nimport delete_cluster\nimport delete_realm\nimport get_cluster\nimport get_realm\nimport list_clusters\nimport list_realms\nimport update_cluster\nimport update_realm\n\nPROJECT_ID = \"python-docs-samples-tests\"\nREALM_LOCATION = \"global\"\nCLUSTER_ID = \"my-cluster\"\nGKE_CLUSTER_NAME = \"projects/gcgs-client-lib-samples/locations/us-central1/clusters/gke-shared-default\"\n\n\n# The format of realm ID. This is used in the unit tests and cleanup below.\nrealm_id_format = 'test-realm-{}-{}'\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef clean_up_old_realms():\n all_realms = list_realms.list_realms(PROJECT_ID, REALM_LOCATION)\n for realm in all_realms:\n realm_name = realm.name\n realm_id = realm_name[realm_name.rfind('/') + 1: len(realm_name)]\n if realm_id.find('test-realm-') == 0:\n time_str = realm_id[realm_id.rfind('-') + 1: len(realm_id)]\n test_date = datetime.datetime.utcfromtimestamp(int(time_str))\n now_date = datetime.datetime.utcfromtimestamp(int(time.time()))\n difftime = now_date - test_date\n\n # *NOTE* Restrict to realms used in the tests older than 2 days\n # to prevent thrashing in the case of async tests\n if (difftime.days > 2):\n print(f\"Cleaning up old realm {realm_id} and its clusters, difftime: {difftime}\")\n clean_up_realm_and_clusters(realm_id)\n\n\n@pytest.fixture(scope=\"function\")\ndef test_realm():\n realm_id = realm_id_format.format(uuid.uuid4().hex, int(time.time()))\n\n print(f\"Creating realm {realm_id} in location {REALM_LOCATION} in project {PROJECT_ID}\")\n create_realm.create_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n\n yield realm_id\n\n print(f\"Cleaning up realm {realm_id} in teardown\")\n clean_up_realm(realm_id)\n\n\ndef clean_up_realm(realm_id):\n # Delete the realm if it still exists.\n print(f\"Deleting realm: {realm_id}\")\n try:\n delete_realm.delete_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n except exceptions.NotFound: # Ignore the non-existent realm\n return\n\n\n@pytest.fixture(scope=\"function\")\ndef test_realm_with_cluster():\n realm_id = realm_id_format.format(uuid.uuid4().hex, int(time.time()))\n\n print(f\"Creating realm {realm_id} in location {REALM_LOCATION} in project {PROJECT_ID}\")\n create_realm.create_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n\n print(f\"Creating cluster {CLUSTER_ID} in realm {realm_id} in project {PROJECT_ID}\")\n create_cluster.create_cluster(PROJECT_ID, REALM_LOCATION, realm_id, CLUSTER_ID, GKE_CLUSTER_NAME)\n\n yield realm_id\n\n print(f\"Cleaning up realm {realm_id} in teardown\")\n clean_up_realm_and_clusters(realm_id)\n\n\ndef clean_up_realm_and_clusters(realm_id):\n # Delete the realm and the game server clusters in the realm.\n try:\n get_realm.get_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n except exceptions.NotFound: # Ignore the non-existent realm\n return\n\n clusters = list_clusters.list_clusters(PROJECT_ID, REALM_LOCATION, realm_id)\n for cluster in clusters:\n cluster_id = cluster.name.rsplit('/', 1)[-1]\n print(f\"Deleting cluster {cluster_id} in realm {realm_id}\")\n try:\n delete_cluster.delete_cluster(PROJECT_ID, REALM_LOCATION, realm_id, cluster_id)\n except exceptions.NotFound: # Ignore the non-existent cluster\n return\n\n print(f\"Deleting realm: {realm_id}\")\n try:\n delete_realm.delete_realm(PROJECT_ID, REALM_LOCATION, realm_id)\n except exceptions.NotFound: # Ignore the non-existent realm\n return\n\n\ndef test_create_realm(test_realm):\n print(f\"Created realm {test_realm} in project {PROJECT_ID}\")\n\n\ndef test_get_realm(test_realm):\n realm = get_realm.get_realm(PROJECT_ID, REALM_LOCATION, test_realm)\n assert realm.name == f\"projects/{PROJECT_ID}/locations/{REALM_LOCATION}/realms/{test_realm}\"\n\n\ndef test_list_realms(test_realm):\n realms = list_realms.list_realms(PROJECT_ID, REALM_LOCATION)\n\n realm_name_list = []\n for realm in realms:\n realm_name_list.append(realm.name)\n\n realm_name = f\"projects/{PROJECT_ID}/locations/{REALM_LOCATION}/realms/{test_realm}\"\n assert realm_name in realm_name_list\n\n\ndef test_update_realm(test_realm):\n update_realm.update_realm(PROJECT_ID, REALM_LOCATION, test_realm)\n realm = get_realm.get_realm(PROJECT_ID, REALM_LOCATION, test_realm)\n assert realm.labels == {\"label-key-1\": \"label-value-1\", \"label-key-2\": \"label-value-2\"}\n\n\ndef test_delete_realm(test_realm):\n delete_realm.delete_realm(PROJECT_ID, REALM_LOCATION, test_realm)\n with pytest.raises(exceptions.NotFound):\n get_realm.get_realm(PROJECT_ID, REALM_LOCATION, test_realm)\n\n\ndef test_create_cluster(test_realm_with_cluster):\n print(f\"Created cluster {CLUSTER_ID} in realm {test_realm_with_cluster} in project {PROJECT_ID}\")\n\n\ndef test_get_cluster(test_realm_with_cluster):\n cluster = get_cluster.get_cluster(PROJECT_ID, REALM_LOCATION, test_realm_with_cluster, CLUSTER_ID)\n assert cluster.name == f\"projects/{PROJECT_ID}/locations/{REALM_LOCATION}/realms/{test_realm_with_cluster}/gameServerClusters/{CLUSTER_ID}\"\n\n\ndef test_list_clusters(test_realm_with_cluster):\n clusters = list_clusters.list_clusters(PROJECT_ID, REALM_LOCATION, test_realm_with_cluster)\n\n cluster_name_list = []\n for cluster in clusters:\n cluster_name_list.append(cluster.name)\n\n cluster_name = f\"projects/{PROJECT_ID}/locations/{REALM_LOCATION}/realms/{test_realm_with_cluster}/gameServerClusters/{CLUSTER_ID}\"\n assert cluster_name in cluster_name_list\n\n\ndef test_update_cluster(test_realm_with_cluster):\n update_cluster.update_cluster(PROJECT_ID, REALM_LOCATION, test_realm_with_cluster, CLUSTER_ID)\n cluster = get_cluster.get_cluster(PROJECT_ID, REALM_LOCATION, test_realm_with_cluster, CLUSTER_ID)\n assert cluster.labels == {\"label-key-1\": \"label-value-1\", \"label-key-2\": \"label-value-2\"}\n\n\ndef test_delete_cluster(test_realm_with_cluster):\n delete_cluster.delete_cluster(PROJECT_ID, REALM_LOCATION, test_realm_with_cluster, CLUSTER_ID)\n with pytest.raises(exceptions.NotFound):\n get_cluster.get_cluster(PROJECT_ID, REALM_LOCATION, test_realm_with_cluster, CLUSTER_ID)\n","sub_path":"samples/snippets/realm_and_cluster_test.py","file_name":"realm_and_cluster_test.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"172123578","text":"import unittest\r\nimport itertools\r\n\r\n\r\n\r\n#Question 1\r\n\r\ndef memoize(func):\r\n cache = {}\r\n def f(*args):\r\n if args not in cache:\r\n v = func(*args)\r\n cache[args] = v\r\n else:\r\n v = cache[args]\r\n return v\r\n return f\r\n\r\n@memoize\r\ndef sum2(x, y):\r\n print('calling sum2')\r\n return x + y\r\n\r\n\r\n#Question 2\r\n\r\nclass Rational:\r\n max = None\r\n \r\n @staticmethod\r\n def gcd(x,y):\r\n while y!=0:\r\n (x,y) = (y,x%y)\r\n return x\r\n \r\n def __init__(self, numerator, denominator=1):\r\n if (denominator<0):\r\n self.numerator = numerator*-1\r\n self.denominator = denominator*-1\r\n else:\r\n self.numerator = numerator\r\n self.denominator = denominator\r\n if Rational.max is None or self > Rational.max:\r\n Rational.max=self\r\n \r\n\r\n \r\n def __add__(self, other):\r\n mult1 = other.denominator\r\n mult2 = self.denominator\r\n num1 = self.numerator*mult1\r\n num2 = other.numerator*mult2\r\n return Rational(num1+num2,mult1*mult2)\r\n \r\n def __sub__(self,other):\r\n return self+Rational(other.numerator*-1,other.denominator)\r\n \r\n def __mul__(self,other):\r\n return Rational(self.numerator*other.numerator,self.denominator*other.denominator)\r\n \r\n def __truediv__(self,other):\r\n return self*Rational(other.denominator,other.numerator)\r\n\r\n def __gt__(self,other):\r\n temp1 = self.numerator * other.denominator\r\n temp2 = self.denominator * other.numerator\r\n return temp1 > temp2\r\n \r\n def __ge__(self,other): \r\n temp1 = self.numerator * other.denominator\r\n temp2 = self.denominator * other.numerator\r\n return temp1 >= temp2\r\n \r\n def __lt__(self,other):\r\n return other > self\r\n\r\n \r\n def __le__(self,other):\r\n return other >= self\r\n\r\n \r\n def __eq__(self,other):\r\n return self.numerator*other.denominator == self.denominator*other.numerator\r\n\r\n \r\n def __ne__(self,other):\r\n return self.numerator*other.denominator!=self.denominator*other.numerator\r\n\r\n def __str__(self):\r\n gcd = Rational.gcd(self.numerator,self.denominator)\r\n newNum = self.numerator//gcd\r\n newDen = self.denominator//gcd\r\n return \"%s/%s\" % (newNum,newDen)\r\n \r\n def __repr__(self):\r\n gcd = Rational.gcd(self.numerator,self.denominator)\r\n newNum = self.numerator//gcd\r\n newDen = self.denominator//gcd\r\n return \"Rational(%r,%r)\" % (newNum,newDen)\r\n\r\n \r\n#Question 3\r\n \r\ndef myZip(iterable1, iterable2, fill=None):\r\n it1 = iter(iterable1)\r\n it2 = iter(iterable2)\r\n elem1 = next(it1,fill)\r\n elem2 = next(it2,fill)\r\n while elem1 is not fill or elem2 is not fill:\r\n result = []\r\n if elem1 is not None and elem2 is not None:\r\n result.append(elem1)\r\n result.append(elem2)\r\n yield tuple(result) \r\n elem1 = next(it1,fill)\r\n elem2 = next(it2,fill)\r\n \r\n\r\n#Question 4\r\n \r\nclass MyZip:\r\n\r\n def __init__(self, iterable1,iterable2,fill=None):\r\n self.iterable1=iterable1\r\n self.iterable2=iterable2\r\n self.fill=fill\r\n\r\n def __iter__(self):\r\n \r\n class Iterator:\r\n \r\n def __init__(self,iterable1,iterable2,fill):\r\n self.it1 = iter(iterable1)\r\n self.it2 = iter(iterable2)\r\n self.fill=fill\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n elem1 = next(self.it1,self.fill)\r\n elem2 = next(self.it2,self.fill)\r\n result = [] \r\n if elem1 is not self.fill or elem2 is not self.fill:\r\n if elem1 is not None and elem2 is not None:\r\n result.append(elem1)\r\n result.append(elem2)\r\n return tuple(result)\r\n else:\r\n raise StopIteration\r\n\r\n else:\r\n raise StopIteration\r\n \r\n return Iterator(self.iterable1,self.iterable2,self.fill)\r\n\r\n \r\n#Question 5\r\n \r\ndef anagram(word):\r\n list = []\r\n fin = open('words.txt')\r\n permutations_of_word = [''.join(permutation) for permutation in itertools.permutations(word)]\r\n for line in fin:\r\n line = line.strip()\r\n if line in permutations_of_word:\r\n list.append(line)\r\n fin.close()\r\n return sorted(list) \r\n\r\n#Question 6 \r\nclass VariableDescriptor:\r\n\r\n\r\n def __set__(self,instance,value):\r\n instance._value=value\r\n instance._history.append(value)\r\n instance.wCount +=1\r\n\r\n def __get__(self,instance,owner):\r\n instance.rCount+= 1\r\n return instance._value\r\n\r\nclass Variable:\r\n\r\n def __init__(self,value):\r\n self.wCount=0\r\n self.rCount=0\r\n self._history = []\r\n self.value=value\r\n\r\n def readCounter(self):\r\n return self.rCount\r\n\r\n def writeCounter(self):\r\n return self.wCount\r\n\r\n def history(self):\r\n return self._history\r\n\r\n value = VariableDescriptor()\r\n\r\n# TESTS\r\n\r\nclass TestExercise2(unittest.TestCase):\r\n\r\n def test_decorator(self):\r\n print(sum2(1,3))\r\n print(sum2(1,3))\r\n print(sum2(1,4))\r\n\r\n def test_rational(self):\r\n half = Rational(5,10)\r\n third = Rational(1,3)\r\n self.assertEqual(repr(half),\"Rational(1,2)\")\r\n self.assertEqual(str(half),\"1/2\")\r\n self.assertEqual(half < third, False)\r\n self.assertEqual(half <= third, False)\r\n self.assertEqual(half > third, True)\r\n self.assertEqual(half >= third, True)\r\n self.assertEqual(half != third, True)\r\n self.assertEqual(half == third, False)\r\n half2 = Rational(10,20)\r\n self.assertEqual(half == half2, True)\r\n self.assertEqual(half+third, Rational(5,6))\r\n self.assertEqual(half-third, Rational(1,6))\r\n self.assertEqual(half*third, Rational(1,6))\r\n self.assertEqual(half/third, Rational(3,2))\r\n\r\n\r\n def test_myZip(self):\r\n g = (3*i for i in range(5))\r\n mzip = myZip(g, [\"a\", \"b\", \"c\"], fill=\"bye\")\r\n self.assertEqual(mzip.__next__(), (0, 'a'))\r\n self.assertEqual(mzip.__next__(), (3, 'b'))\r\n\r\n\r\n def test_MyZip(self):\r\n try1 = MyZip([1,2],[\"a\",\"b\",\"c\"],\"bye\")\r\n it1 = iter(try1)\r\n self.assertEqual(it1.__next__(),(1,\"a\"))\r\n self.assertEqual(it1.__next__(),(2,\"b\"))\r\n self.assertEqual(it1.__next__(),(\"bye\",\"c\"))\r\n try2 = MyZip([1,2],[\"a\",\"b\",\"c\"])\r\n it2 = iter(try2)\r\n self.assertEqual(it2.__next__(),(1,\"a\"))\r\n self.assertEqual(it2.__next__(),(2,\"b\")) \r\n\r\n\r\n def test_descriptor(self):\r\n v = Variable(8)\r\n self.assertEqual(v.history(),[8])\r\n v.value=3\r\n self.assertEqual(v.writeCounter(),2)\r\n self.assertEqual(v.readCounter(),0)\r\n self.assertEqual(v.value,3)\r\n self.assertEqual(v.readCounter(),1)\r\n\r\n def test_anagram(self):\r\n self.assertEqual(anagram('restful'),['fluster', 'fluters', 'restful'])\r\n self.assertEqual(anagram('rrao'),['orra', 'roar'])\r\n \r\n \r\n\r\n \r\n \r\n \r\n\r\n\r\nunittest.main() \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n","sub_path":"ex2 .py","file_name":"ex2 .py","file_ext":"py","file_size_in_byte":7675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"269945081","text":"\"\"\"\r\nИмя проекта: №59\r\nНомер версии: 1.0\r\nИмя файла: practicum-1(№59).py\r\nАвтор: 2020 © Н.Д.Кислицын, Челябинск\r\nЛицензия использования: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)\r\nДата создания: 18/12/2020\r\nДата последней модификации: 18/12/2020\r\nОписание: Решение задач № 59 практикума № 1\r\nописание: Заданы M строк, которые вводятся с клавиатуры. Подсчитать количество пробелов в каждой из строк.\r\n#версия Python: 3.6\r\n\"\"\"\r\n\r\n\"\"\"\r\nОригинал публикации: Практикум №1 - набиваем руку (Бизнес-информатика 2020)\r\n\"\"\"\r\n\r\nimport re\r\nM = 2\r\n\r\nlist_strings = []\r\nfor i in range(0, M):\r\n print(\"Введите строку:\", end=' ')\r\n list_strings.append(input())\r\n\r\nfor string in list_strings:\r\n count_spaces = len(re.findall(r'\\s', string))\r\n print(\"В строке \\\"%s\\\" %s пробелов\" % (string, count_spaces))\r\n","sub_path":"№59.py","file_name":"№59.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"55535721","text":"#!/usr/bin/env python3\n\nfrom sys import argv\n\n'''Quick script to explain the addition of binary numbers'''\n\ndef _convert_to_binary_list(s):\n out = []\n for i in range(len(s)):\n value = int(s[i])\n if 0 <= value <= 1:\n out.append(int(s[i]))\n else:\n raise ValueError('Invalid binary number!')\n return out\n\ndef add_binary_strings(a,b,debug=False):\n ans = []\n digits = max(len(a),len(b))\n a = _convert_to_binary_list(a.zfill(digits))\n b = _convert_to_binary_list(b.zfill(digits))\n carry = 0\n for i in range(digits-1,-1,-1):\n column_value = a[i] + b[i] + carry\n if column_value == 0:\n value = 0\n carry = 0\n elif column_value == 1:\n value = 1\n carry = 0\n elif column_value == 2:\n value = 0\n carry = 1\n elif column_value == 3:\n value = 1\n carry = 1\n ans.insert(0,value)\n if debug:\n print('a[{0}] = {1}, b[{0}] = {2}, value = {4}, carry = {3}'.format(\n i,a[i],b[i],carry,value))\n if debug:\n print('{:>36s} = {}'.format('carry',carry))\n if carry:\n ans.insert(0,1)\n return ''.join([str(digit) for digit in ans])\n\n\nif __name__ == '__main__':\n assert isinstance(add_binary_strings('1010','1101'),str), 'Answer must by of type string'\n assert add_binary_strings('1010','1101') == '10111', 'Binary add gave incorrect answer'\n\n a = argv[1]\n b = argv[2]\n pad = max(len(a),len(b)) + 1\n try:\n ans = add_binary_strings(a,b,True)\n print(' A = {{:>{:d}s}}'.format(pad).format(a))\n print(' B = {{:>{:d}s}}'.format(pad).format(b))\n print('(A+B) = {{:>{:d}s}}'.format(pad).format(ans))\n except ValueError as e:\n print(e)\n","sub_path":"binaryAdd.py","file_name":"binaryAdd.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"93637269","text":"import os\nfrom datetime import datetime\nfrom pytz import timezone\nfrom django.db.models import Q\n\nfrom django.contrib.auth.admin import User\n\nfrom getperson.models import Timeput\nfrom getperson.myfunc import ids_list, datetimeToWeek\nfrom lish import settings\n\n\ntemp_url = \"/media/templates\"\n\n\ndef tp_to_enabletp(tps, u):\n u_and_fr_u_ids = [i.id_friend for i in u.userfriends_set.all()]\n u_and_fr_u_ids.append(u.id)\n u_and_fr_us = User.objects.filter(id__in=u_and_fr_u_ids)\n uet_ids = [i.tp_id for i in u.userenabletp_set.all()]\n\n tp_objs = tps.filter(Q(public=\"2\") | (Q(public=\"1\") & Q(user__in=u_and_fr_us))\\\n | Q(public=\"3\") & (Q(id__in=uet_ids) | Q(user=u)))\n\n pub_response = tp_to_response(u, tp_objs)\n\n response = pub_response\n return response\n\n\n# APIのいたる所\ndef tp_to_response(u_obj, tp):\n p_obj = u_obj.profile\n pin_events = ids_list(p_obj.pin_event)\n res_events = ids_list(p_obj.reserve_event)\n\n response = []\n fav_state = False\n res_state = False\n\n for i in tp:\n is_self = False\n if i.id in pin_events:\n fav_state = True\n if i.id in res_events:\n res_state = True\n if i.user.id == u_obj.id:\n is_self = True\n\n if not i.begin_time == None:\n if not i.finish_time == None:\n # 世界標準時刻使用\n i.begin_time = i.begin_time.astimezone(timezone('Asia/Tokyo'))\n i.finish_time = i.finish_time.astimezone(timezone('Asia/Tokyo'))\n # begin_time を投稿の時間表示形式に変換して渡す\n begin_dotw = \"({0})\".format(datetimeToWeek(i.begin_time))\n begin_datetime = i.begin_time.strftime(\"%m月%d日\")\n begin_time = i.begin_time.strftime(\"%H:%M\")\n finish_dotw = \"({0})\".format(datetimeToWeek(i.finish_time))\n finish_datetime = i.finish_time.strftime(\"%m月%d日\")\n finish_time = i.finish_time.strftime(\"%H:%M\")\n else:\n # 世界標準時刻使用\n i.begin_time = i.begin_time.astimezone(timezone('Asia/Tokyo'))\n # begin_time を投稿の時間表示形式に変換して渡す\n begin_dotw = \"({0})\".format(datetimeToWeek(i.begin_time))\n begin_datetime = i.begin_time.strftime(\"%m月%d日\")\n begin_time = i.begin_time.strftime(\"%H:%M\")\n finish_dotw = \"\"\n finish_datetime = \"終了時間未定\"\n finish_time = \"\"\n elif i.finish_time == None:\n begin_dotw = \"\"\n begin_datetime = \"開始時間未定\"\n begin_time = \"\"\n \n finish_dotw = \"\"\n finish_datetime = \"終了時間未定\"\n finish_time = \"\"\n else:\n i.finish_time = i.finish_time.astimezone(timezone('Asia/Tokyo'))\n begin_dotw = \"\"\n begin_datetime = \"開始時間未定\"\n begin_time = \"\"\n finish_dotw = \"({0})\".format(datetimeToWeek(i.finish_time))\n finish_datetime = i.finish_time.strftime(\"%m月%d日\")\n finish_time = i.finish_time.strftime(\"%H:%M\")\n\n # templatesからの画像か、ユーザ投稿の画像を使うか\n if i.img:\n tp_img = i.img.url\n else:\n tp_img = os.path.join(temp_url, \"{0}.png\".format(i.temp_flag))\n\n response.append({\n \"userId\": i.user.id,\n \"username\": i.user.profile.disp_name,\n \"profileImg\": i.user.profile.profile_img.url,\n \"tpImg\": tp_img,\n \"begin_time\": begin_time,\n \"begin_datetime\": begin_datetime,\n \"begin_dotw\": begin_dotw,\n \"finish_time\": finish_time,\n \"finish_datetime\": finish_datetime,\n \"finish_dotw\": finish_dotw,\n \"title\": i.title,\n \"memo\": i.memo,\n \"location\": i.place,\n \"tpId\":i.id,\n \"fav_state\": fav_state,\n \"res_state\": res_state,\n \"is_self\": is_self,\n })\n fav_state = False\n res_state = False\n return response\n","sub_path":"getperson/myfuncs/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"314119756","text":"from ftw.solr.interfaces import ISolrSearch\nfrom opengever.base.solr.contentlisting import OGSolrContentListing # noqa\nfrom opengever.base.solr.contentlisting import OGSolrContentListingObject # noqa\nfrom opengever.base.solr.contentlisting import OGSolrDocument\nfrom zope.component import getUtility\n\n\ndef solr_doc_from_uuid(uuid, fields):\n solr = getUtility(ISolrSearch)\n resp = solr.search(filters=(\"UID:{}\".format(uuid)), fl=fields)\n if not resp.docs:\n return None\n return OGSolrDocument(resp.docs[0])\n\n\ndef batched_solr_results(**kwargs):\n \"\"\"Returns all Solr results in batches.\n \"\"\"\n solr = getUtility(ISolrSearch)\n last_batch = False\n start = 0\n rows = kwargs.get('rows', 1000)\n while not last_batch:\n resp = solr.search(start=start, **kwargs)\n yield resp.docs\n if start + len(resp.docs) >= resp.num_found or not resp.docs:\n last_batch = True\n start += rows\n","sub_path":"opengever/base/solr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"505078940","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n\n\nfrom datetime import datetime\nfrom jieba import analyse\n\nimport pymysql\n\n\n\n\nclass NewschinaPipeline(object):\n def __init__(self):\n self.conn = pymysql.connect(\n host='.......',\n port=3306,\n database='news_China',\n user='....',\n password='.....',\n charset='utf8'\n )\n # 实例一个游标\n self.cursor = self.conn.cursor()\n\n def process_item(self, item, spider):\n sql = \"\"\"\n insert into ChinaNews(ID, 标题, 关键词, 正文, 数据来源,报道时间,抓取时间)\n values (%s, %s, %s, %s, %s,%s, %s);\"\"\"\n\n values = [\n item['title'],\n item['keywords'],\n item['content'],\n item['source'],\n# item['reported_time']\n item['time']\n ]\n#\n self.cursor.execute(sql, values)\n self.conn.commit()\n\n return item\n\n def close_spider(self, spider):\n self.cursor.close()\n self.conn.close()","sub_path":"mySpider/mySpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"162848537","text":"import unittest\nfrom apple import *\n\nclass ValidateAppleClass(unittest.TestCase):\n \"\"\"Tests for `apple.py`.\"\"\"\n\n def test_apple_diameter(self):\n \"\"\"When you create an Apple, it will have a diameter greater than 0\"\"\"\n apple = Apple()\n self.assertGreater(apple.diameter, 0)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"apple_spec.py","file_name":"apple_spec.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"205558593","text":"from django.shortcuts import get_object_or_404, render\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.core.urlresolvers import reverse\r\n\r\nfrom .models import Choice\r\n\r\n\r\n# Create your views here.\r\nfrom .models import Question\r\n\r\n\r\ndef index(request):\r\n latest_question_list = Question.objects.order_by('-pub_date')[:5]\r\n context = {'latest_question_list': latest_question_list}\r\n return render(request, 'polls/index.html', context)\r\n\r\n\r\ndef detail(request, poll_id):\r\n context = get_object_or_404(Question, pk=poll_id)\r\n return render(request, 'polls/detail.html', {'question': context})\r\n\r\n\r\ndef results(request, poll_id):\r\n poll = get_object_or_404(Question, pk=poll_id)\r\n return render(request, 'polls/results.html', {'poll': poll})\r\n\r\n\r\ndef vote(request, poll_id):\r\n p = get_object_or_404(Question, pk=poll_id)\r\n try:\r\n selected_choice = p.choice_set.get(pk=request.POST['choice'])\r\n except (KeyError, Choice.DoesNotExist):\r\n # Redisplay the question voting form.\r\n return render(request, 'polls/detail.html', {\r\n 'question': p,\r\n 'error_message': \"You didn't select a choice.\",\r\n })\r\n else:\r\n selected_choice.votes += 1\r\n selected_choice.save()\r\n return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))\r\n\r\n","sub_path":"mysite/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"201287140","text":"\n\n#\n# READ a CSV em python\n#\n#\nimport csv \nimport sys\n \nf = open('/home/jadson/Documentos/temp_files/test.csv', 'rt')\nreader = csv.reader(f, delimiter=';')\nfor row in reader:\n\tprint ('row:')\n\tprint (row)\n\tprint ('--- coluns ---')\n\tfor col in row:\n\t\tprint (col)\n\n\tprint ('--- read especific column number ---')\t\n\tcolnum = 0\t\n\tfor col in row:\n\t\tprint ('reading colum '+str(colnum)+': '+row[colnum])\n\t\tcolnum += 1\t\n \nf.close()","sub_path":"python/read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"342044175","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# log_format ui_short '$remote_addr $remote_user $http_x_real_ip [$time_local] \"$request\" '\n# '$status $body_bytes_sent \"$http_referer\" '\n# '\"$http_user_agent\" \"$http_x_forwarded_for\" \"$http_X_REQUEST_ID\" \"$http_X_RB_USER\" '\n# '$request_time';\n\nimport argparse\nfrom datetime import datetime\nimport gzip\nimport json\nimport logging\nimport os\nimport re\nimport sys\n\nconfig = {\n \"REPORT_SIZE\": 1000,\n \"REPORT_DIR\": \"./reports\",\n \"LOG_DIR\": \"./log\",\n \"SELF_LOG_FILE\": \"./tmp/log_analyzer.log\",\n \"TS_FILE\": \"./tmp/log_analyzer.ts\",\n \"REPORT_TEMPLATE\": \"./reports/report.html\"\n}\n\n\ndef merge_configs(config, config_file):\n\n try:\n if os.stat(config_file).st_size == 0:\n return config\n with open(config_file) as f:\n conf_from_file = json.load(f)\n except:\n logging.error(\"Error reading config file\")\n sys.exit(1)\n\n for key in conf_from_file.keys():\n config[key] = conf_from_file[key]\n\n return config\n\n\ndef choose_log_file(log_dir, ts_file):\n LOG_SAMPLE = \"nginx-access-ui.log-\"\n comp = re.compile(r'%s[0-9]{8}' % LOG_SAMPLE)\n max_date = datetime.strptime(\"19010101\", \"%Y%m%d\")\n\n for f in os.listdir(log_dir):\n tmp = comp.match(f)\n if tmp is not None:\n date = datetime.strptime(tmp.string[len(LOG_SAMPLE):len(LOG_SAMPLE)+8], \"%Y%m%d\")\n max_date = date if date > max_date else max_date\n\n if max_date == datetime.strptime(\"19010101\", \"%Y%m%d\"):\n logging.info(\"Nothing to parse: exiting\")\n return None, None\n\n if LOG_SAMPLE + max_date.strftime(\"%Y%m%d\") + \".gz\" in os.listdir(log_dir):\n log_file = LOG_SAMPLE + max_date.strftime(\"%Y%m%d\") + \".gz\"\n else:\n log_file = LOG_SAMPLE + max_date.strftime(\"%Y%m%d\")\n\n mtime = os.path.getmtime(log_dir + \"/\" + log_file)\n\n try:\n with open(ts_file) as f:\n ts = f.readline()\n ts_mtime = float(ts)\n except:\n return log_file, max_date.strftime(\"%Y%m%d\")\n\n if ts_mtime > mtime:\n logging.info(\"Nothing to parse: exiting\")\n return None, None\n else:\n return log_file, max_date.strftime(\"%Y%m%d\")\n\n\ndef parse_log(log_file, log_dir, report_size):\n\n def parse_line(line):\n\n try:\n tmp_line = line.split('] \"')[1]\n '''\n # tmp_line = GET /api/v2/internal/banner/24288647/info HTTP/1.1\"\n 200 351 \"-\" \"-\" \"-\" \"1498697423-2539198130-4708-9752780\"\n \"89f7f1be37d\" 0.072\n '''\n request_time = float(tmp_line.split('\" ')[-1])\n request = tmp_line.split('\" ')[0]\n url = str(request.split(' ')[1])\n\n return url, request_time\n\n except:\n return None, None\n\n def calc_stats(numbers):\n numbers.sort()\n count = len(numbers)\n time_sum = 0\n for n in numbers:\n time_sum += n\n time_avg = time_sum*1.0/count\n time_max = numbers[-1]\n time_med = numbers[int(count/2)]\n\n return count, \\\n round(time_sum, 3), \\\n round(time_avg, 3), \\\n round(time_max, 3), \\\n round(time_med, 3)\n\n try:\n log_path = log_dir+'/'+log_file\n if log_path.endswith(\".gz\"):\n f = gzip.open(log_path, 'rt')\n else:\n f = open(log_path)\n except:\n logging.error(\"Error opening log file\")\n sys.exit(1)\n\n ERROR_LEVEL = 0.1 # acceptable error level during log parsing\n total_requests = 0\n total_time = 0\n lines_processed = 0\n errors = 0\n raw_data = {}\n\n for line in f:\n url, time = parse_line(line)\n if url is None or time is None:\n errors += 1\n if lines_processed > 100 and \\\n errors*1.0/lines_processed > ERROR_LEVEL: # UGLY, just for PEP8\n logging.error(\"Too much errors, during parsing log file\")\n sys.exit(1)\n else:\n total_requests += 1\n total_time += time\n if url not in raw_data.keys():\n raw_data[url] = []\n raw_data[url].append(time)\n f.close()\n\n statistics = []\n time_sums = []\n\n for k in raw_data.keys():\n d = {}\n d['url'] = k\n d['count'], d['time_sum'], d['time_avg'], d['time_max'], d['time_med'] = calc_stats(raw_data[k])\n d['count_perc'] = round(d['count']*100/total_requests, 3)\n d['time_perc'] = round(d['time_sum']*100/total_requests, 3)\n time_sums.append(d['time_sum'])\n statistics.append(d)\n if len(statistics) < report_size:\n return statistics\n else:\n time_sums.sort(reverse=True)\n timesum_border = time_sums[report_size]\n\n result = []\n for s in statistics:\n if s['time_sum'] >= timesum_border:\n result.append(s)\n return result\n\n\ndef create_report(report_dir, template, date, statistics):\n\n try:\n with open(template) as f:\n html = f.read()\n except:\n logging.error(\"Error reading report template file\")\n sys.exit(1)\n\n html = html.replace('$table_json', str(statistics))\n outfile_path = report_dir + \"/report-\" + date + \".html\"\n\n try:\n with open(outfile_path, \"w\") as f:\n f.write(html)\n except:\n logging.error(\"Error writing report file\")\n sys.exit(1)\n\n return outfile_path\n\n\ndef update_ts(ts_file, time):\n try:\n with open(ts_file, \"w\") as f:\n f.write(str(time))\n except:\n logging.error(\"Error writing timestamp to file\")\n sys.exit(1)\n\n\ndef main():\n '''\n 1. Read config file\n 1.1 if fail - exit with error\n 1.2 else - merge with config var\n 2. Get log file\n 2.1 if fail - exit\n 2.2 else - pass to parser\n 3. Parse file, count statistics, including error stat\n 3.1 if error level > acceptable - exit\n 4. Generate html report\n 5. Update/create ts-file\n '''\n\n parser = argparse.ArgumentParser(description='Log Analyzer')\n parser.add_argument('-c', '--config',\n dest='config',\n default=\"./config/log_analyzer.conf\",\n required=False,\n help='Path to config file')\n args = parser.parse_args()\n\n conf = merge_configs(config, args.config)\n\n if \"SELF_LOG_FILE\" not in conf.keys():\n conf[\"SELF_LOG_FILE\"] = None\n else:\n try:\n f = open(conf[\"SELF_LOG_FILE\"], 'r')\n f.close()\n except IOError:\n f = open(conf[\"SELF_LOG_FILE\"], 'w')\n f.close()\n\n logging.basicConfig(format='[%(asctime)s] %(levelname).1s %(message)s',\n datefmt='%Y.%m.%d %H:%m:%S',\n filename=conf[\"SELF_LOG_FILE\"])\n\n log_file, date = choose_log_file(conf[\"LOG_DIR\"], conf[\"TS_FILE\"])\n\n if log_file is None:\n return 0\n\n statistics = parse_log(log_file, conf[\"LOG_DIR\"], conf[\"REPORT_SIZE\"])\n\n report_file = create_report(conf[\"REPORT_DIR\"],\n conf[\"REPORT_TEMPLATE\"],\n date,\n statistics)\n\n mtime = os.path.getmtime(report_file)\n update_ts(conf[\"TS_FILE\"], mtime)\n\n logging.info(\"Report file: %s succcessfully created\" % report_file)\n return 0\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(format='[%(asctime)s] %(levelname).1s %(message)s',\n datefmt='%Y.%m.%d %H:%m:%S',\n filename=config[\"SELF_LOG_FILE\"])\n\n try:\n main()\n except:\n logging.exception(\"Unexpected exception occured\")\n sys.exit(1)\n","sub_path":"hw1/log_analyzer/log_analyzer.py","file_name":"log_analyzer.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"303423148","text":"from ml import NLOneNeuron, NLTwoLayer, NLThreeLayer\nfrom ml import case1\nimport numpy as np\n\n\ndef main():\n training_inputs = np.array([[0, 0, 1],\n [1, 1, 1],\n [1, 0, 1],\n [0, 1, 1]])\n training_outputs = np.array([[0, 1, 1, 0]]).T\n my_nl_one = NLOneNeuron(training_inputs=training_inputs, training_outputs=training_outputs)\n new_inputs = np.array([0, 0, 1]) # new situation\n my_nl_one.calculate(input_values=new_inputs)\n\n training_inputs = np.array([[0, 0, 1],\n [1, 1, 1],\n [1, 0, 1],\n [0, 0, 1]])\n training_outputs = np.array([[0, 1, 1, 0]]).T\n\n my_nl_two = NLTwoLayer(training_inputs=training_inputs, training_outputs=training_outputs, hidden_neurons=4)\n my_nl_two.calculate(input_values=new_inputs)\n\n my_nl_three = NLThreeLayer(training_inputs=training_inputs, training_outputs=training_outputs,\n first_layer_neurons=4, second_layer_neurons=4)\n my_nl_three.calculate(input_values=new_inputs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"428901156","text":"# 使用python 操作mongodb\n# 因为mongodb 是面向对象编程,所以很友好\nimport pymongo\n# api 文档地址 https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html\n# create to mongoclinet\n# 1:链接数据库\nclient=pymongo.MongoClient(host='192.168.5.147',port=27017)\n#2:创建数据库/指定数据库\nmydb=client['lktbz']\n\n# 判断创建的数据库是否存在\n# fordbs=client.list_database_names()\n# if 'lktbz' in fordbs:\n# print(\"数据存在\")\n\n#指定集合\ncollection=mydb.students\n#准备插入数据\nstudent={\n 'id': '20170101',\n 'name': 'Jordan',\n 'age': 20,\n 'gender': 'male'\n}\n\n# result=collection.insert(student)\n# print(result)\n\n# 查询\nresult=collection.find_one({'id':201702023})\nprint(result)\n\n# 多次插入\nstudent_list=[]\n\nfor i in range(1,10):\n student2 = {\n 'id': '20170202'+str(i),\n 'name': 'Mike',\n 'age': 21+i,\n 'gender': 'male'\n }\n student_list.append(student2)\n\n # result=collection.insert_many(student_list)\n # print(result)\n\n #查询全部\n # result=collection.find()\n # for x in result:\n # print(x['id'])\n\n\n# 查询特定的字段\n# try:\n# result=collection.find({'age',20})\n# for res in result:\n# print(res)\n# except:\n# print(\"没有找到合适的数据\")\n\n# 查询大于20的\n# try:\n# myquery = {\"age\":{\"$gt\":22}}\n# result=collection.find(myquery)\n# for res in result:\n# print(res)\n# except:\n# print(\"没有找到合适的数据\")\n\n","sub_path":"01improve/16mangodb学习.py","file_name":"16mangodb学习.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"273141264","text":"import sys\nimport os\n\nfrom collections import Counter\nfrom collections import defaultdict\n\n\ndef get_pics_per_tags(pics):\n \"\"\"\n Format the dict of pics to a dict with tags as keys and set of Picture as values\n \"\"\"\n pictures = pics.values()\n pictures_per_tags = defaultdict(set)\n\n for pic in pictures:\n for tag in pic.tags:\n pictures_per_tags[tag].add(pic.id)\n\n # From default dict to dict\n pictures_per_tags = dict(pictures_per_tags)\n\n return pictures_per_tags\n\n\nif __name__ == '__main__':\n\n from config import INPUT_DIRECTORY\n from config import COUNTER_DIRECTORY\n\n from InputReader import InputReader\n\n\n def write_counter(counter, filename):\n with open(filename, 'w') as f:\n f.write('%s %s\\n' % (len(counter), sum(counter.values())))\n for tag, count in counter.most_common():\n f.write('%s %s\\n' % (tag, count))\n\n\n # Get input file from command line arg list\n input_file = sys.argv[1]\n file_path = os.path.join(INPUT_DIRECTORY, input_file)\n assert os.path.exists(file_path)\n\n # Get objects from input file\n inputReader = InputReader(file_path)\n\n counter = Counter()\n for i, pic in inputReader.photos.items():\n current_counter = Counter(pic.tags)\n counter.update(current_counter)\n\n # Save result to output file\n output_file = os.path.splitext(input_file)[0] + '.count'\n out_path = os.path.join(COUNTER_DIRECTORY, output_file)\n\n write_counter(counter, out_path)\n","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"217281968","text":"def solution(rows, columns, queries):\n field = [[(c + 1) + r * columns for c in range(columns)] for r in range(rows)]\n answer = []\n\n for query in queries:\n y1, x1, y2, x2 = map(lambda x: x - 1, query)\n min_val = 0xffffff\n idx_list = []\n val_list = []\n for a in range(x1, x2 + 1):\n idx_list.append((y1, a))\n val_list.append(field[y1][a])\n if min_val > field[y1][a]:\n min_val = field[y1][a]\n for b in range(y1 + 1, y2 + 1):\n idx_list.append((b, x2))\n val_list.append(field[b][x2])\n if min_val > field[b][x2]:\n min_val = field[b][x2]\n for c in range(x2 - 1, x1 - 1, -1):\n idx_list.append((y2, c))\n val_list.append(field[y2][c])\n if min_val > field[y2][c]:\n min_val = field[y2][c]\n for d in range(y2 - 1, y1, -1):\n idx_list.append((d, x1))\n val_list.append(field[d][x1])\n if min_val > field[d][x1]:\n min_val = field[d][x1]\n\n for i in range(len(idx_list)):\n c_y, c_x = idx_list[i]\n field[c_y][c_x] = val_list[i - 1]\n\n answer.append(min_val)\n\n return answer","sub_path":"정준현/12_프로그래머스_1/행렬 테두리 회전하기.py","file_name":"행렬 테두리 회전하기.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"267178035","text":"from pwn import *\r\n\r\np = process('./pwn4')\r\ne = ELF('./pwn4')\r\n\r\nsystem = e.plt['system']\r\nbinsh = 0x0804a038\r\n\r\npayload = 'A' * 32\r\n\r\npayload += p32(system)\r\npayload += 'AAAA'\r\npayload += p32(binsh)\r\npayload += '\\n'\r\n\r\np.send(payload)\r\n\r\np.interactive()\r\n","sub_path":"tamu_pwn4exp.py","file_name":"tamu_pwn4exp.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"258218342","text":"# Copyright (c) 2014-present PlatformIO \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom platformio.package.exception import UnknownPackageError\nfrom platformio.package.meta import PackageSpec\n\n\nclass PlatformPackagesMixin(object):\n def get_package_spec(self, name, version=None):\n return PackageSpec(\n owner=self.packages[name].get(\"owner\"),\n name=name,\n requirements=version or self.packages[name].get(\"version\"),\n )\n\n def get_package(self, name, spec=None):\n if not name:\n return None\n return self.pm.get_package(spec or self.get_package_spec(name))\n\n def get_package_dir(self, name):\n pkg = self.get_package(name)\n return pkg.path if pkg else None\n\n def get_package_version(self, name):\n pkg = self.get_package(name)\n return str(pkg.metadata.version) if pkg else None\n\n def get_installed_packages(self, with_optional=False):\n result = []\n for name, options in self.packages.items():\n versions = [options.get(\"version\")]\n if with_optional:\n versions.extend(options.get(\"optionalVersions\", []))\n for version in versions:\n if not version:\n continue\n pkg = self.get_package(name, self.get_package_spec(name, version))\n if pkg:\n result.append(pkg)\n return result\n\n def dump_used_packages(self):\n result = []\n for name, options in self.packages.items():\n if options.get(\"optional\"):\n continue\n pkg = self.get_package(name)\n if not pkg or not pkg.metadata:\n continue\n item = {\"name\": pkg.metadata.name, \"version\": str(pkg.metadata.version)}\n if pkg.metadata.spec.external:\n item[\"src_url\"] = pkg.metadata.spec.url\n result.append(item)\n return result\n\n def autoinstall_runtime_packages(self):\n for name, options in self.packages.items():\n if options.get(\"optional\", False):\n continue\n if self.get_package(name):\n continue\n self.pm.install(self.get_package_spec(name))\n return True\n\n def install_packages( # pylint: disable=too-many-arguments\n self,\n with_packages=None,\n without_packages=None,\n skip_default_package=False,\n silent=False,\n force=False,\n ):\n with_packages = set(self._find_pkg_names(with_packages or []))\n without_packages = set(self._find_pkg_names(without_packages or []))\n\n upkgs = with_packages | without_packages\n ppkgs = set(self.packages)\n if not upkgs.issubset(ppkgs):\n raise UnknownPackageError(\", \".join(upkgs - ppkgs))\n\n for name, options in self.packages.items():\n if name in without_packages:\n continue\n if name in with_packages or not (\n skip_default_package or options.get(\"optional\", False)\n ):\n self.pm.install(self.get_package_spec(name), silent=silent, force=force)\n\n return True\n\n def _find_pkg_names(self, candidates):\n result = []\n for candidate in candidates:\n found = False\n\n # lookup by package types\n for _name, _opts in self.packages.items():\n if _opts.get(\"type\") == candidate:\n result.append(_name)\n found = True\n\n if (\n self.frameworks\n and candidate.startswith(\"framework-\")\n and candidate[10:] in self.frameworks\n ):\n result.append(self.frameworks[candidate[10:]][\"package\"])\n found = True\n\n if not found:\n result.append(candidate)\n\n return result\n\n def update_packages(self, only_check=False):\n for pkg in self.get_installed_packages():\n self.pm.update(\n pkg,\n to_spec=self.get_package_spec(pkg.metadata.name),\n only_check=only_check,\n show_incompatible=False,\n )\n\n def are_outdated_packages(self):\n for pkg in self.get_installed_packages():\n if self.pm.outdated(\n pkg, self.get_package_spec(pkg.metadata.name)\n ).is_outdated(allow_incompatible=False):\n return True\n return False\n","sub_path":"platformio/platform/_packages.py","file_name":"_packages.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"93911911","text":"# -*- coding: utf-8 -*-\n\n\nimport os\nimport sys\nimport simplejson as json\nfrom jinja2 import Environment, FileSystemLoader\n\n\ndef write_file(rendered, filename):\n with open(filename, 'wb') as f:\n f.write(rendered)\n\n\ndef gen_spider(kwargs):\n # validate parameters\n assert 'class_name' in kwargs\n assert 'spider_name' in kwargs\n assert 'domains' in kwargs\n assert 'start_urls' in kwargs\n assert 'rules' in kwargs\n assert 'fields' in kwargs\n assert 'producer_name' in kwargs\n assert 'brand_name' in kwargs\n assert not kwargs['domains'] is None\n assert not kwargs['domains'] == []\n assert not kwargs['start_urls'] is None\n assert not kwargs['start_urls'] == []\n assert not kwargs['rules'] is None\n assert not kwargs['rules'] == []\n assert not kwargs['fields'] is None\n assert not kwargs['fields'] == []\n\n env = Environment(\n autoescape=False,\n loader=FileSystemLoader(\n os.path.join(os.path.dirname(__file__), 'templates')))\n\n tmpl = env.get_template('spider.html')\n rendered = tmpl.render(**kwargs)\n write_file(rendered, ''.join([kwargs['spider_name'], '.py']))\n\n\nif __name__ == '__main__':\n conf_files = sys.argv[1:]\n confs = list()\n\n for conf_file in conf_files:\n with open(conf_file, 'r') as conf:\n con_str = conf.read()\n confs.append(json.loads(con_str))\n\n for conf in confs:\n gen_spider(conf)\n","sub_path":"spider_gen.py","file_name":"spider_gen.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"529288627","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 13:55:06 2019\n\n@author: Nout\n\"\"\"\nimport numpy as np\ndef Flightdata():\n #### RETURNS ALL ANGLE DATA IN RADIANS ####\n import csv\n with open('flightdata.txt', 'rt') as f:\n reader = csv.reader(f, delimiter=',', skipinitialspace=True)\n lineData = list()\n cols = next(reader)\n for col in cols:\n # Create a list in lineData for each column of data.\n lineData.append(list())\n \n for line in reader:\n for i in range(0, len(lineData)):\n # Copy the data from the line into the correct columns.\n lineData[i].append(line[i])\n \n data = dict()\n for i in range(0, len(cols)):\n # Create each key in the dict with the data in its column.\n data[cols[i]] = lineData[i]\n \n \n # store all parameters in lists \n AOA_deg = np.array(data[\"AOA_deg\"])\n elevator_dte_deg = np.array(data[\"elevator_dte_deg\"])\n column_fe_N = np.array(data[\"column_fe_N\"]) \n FF_L = np.array(data[\"FF_L_lbs\"])\n FF_R = np.array(data[\"FF_R_lbs\"])\n aileron_def_deg = np.array(data[\"aileron_def_deg\"])\n elevator_def_deg = np.array(data[\"elevator_def_deg\"])\n rudder_def_deg = np.array(data[\"rudder_def_deg\"])\n roll_angle_deg = np.array(data[\"roll_angle_deg\"])\n pitch_angle_deg = data[\"pitch_angle_deg\"]\n stat_airtemp_C = np.array(data[\"static_airtemp_C\"])\n pressure_altitude = np.array(data[\"pressure_altitude_ft\"])\n mach = np.array(data[\"mach_number\"])\n cas_airspeed = np.array(data[\"cas_airspeed_kts\"])\n tas_airspeed_kts = np.array(data[\"tas_airspeed_kts\"])\n time_sec = data[\"time_sec\"]\n roll_rate_deg_s = np.array(data[\"roll_rate\"])\n pitch_rate_deg_s = np.array(data[\"pitch_rate\"])\n yaw_rate_deg_s = np.array(data[\"yaw_rate\"])\n delta_e_deg = np.array(data[\"delta_e\"])\n delta_a_deg = np.array(data[\"delta_a\"])\n delta_r_deg = np.array(data[\"delta_r\"])\n F_used_L_lbs = np.array(data[\"F_used_L_lbs\"])\n F_used_R_lbs = np.array(data[\"F_used_R_lbs\"])\n\n \n #### MAKE DATA INTO FLOATS AND CONVERSE THEM TO SI UNITS & RADIANS \n AOA = []\n elevator_dte = []\n Fe_N = []\n FF_L_lbs = []\n FF_R_lbs = []\n aileron_def = []\n elevator_def = []\n rudder_def = []\n roll_angle = []\n pitch_angle =[]\n static_airtemp_C = []\n pressure_altitude_ft = []\n mach_number = []\n cas_airspeed_kts = []\n Vtrue =[]\n time = []\n roll_rate = []\n pitch_rate = []\n yaw_rate = []\n delta_e = []\n delta_a = []\n delta_r = []\n F_used = []\n \n ### Convert strings to floats and put them in an list to transform thes as an array###\n for i in range(len(AOA_deg)): \n AOA.append(float(AOA_deg[i])*np.pi/180) #[rad]\n elevator_dte.append(float(elevator_dte_deg[i])*np.pi/180.) #[rad]\n Fe_N.append(float(column_fe_N[i])) #[N]\n FF_L_lbs.append(float(FF_L[i])) #[lbs] not converted as this will be done in floyds program\n FF_R_lbs.append(float(FF_R[i])) #[lbs] not converted as this will be done in floyds program\n aileron_def.append(float(aileron_def_deg[i])*np.pi/180.) #[rad]\n elevator_def.append(float(elevator_def_deg[i])*np.pi/180.) #[rad]\n rudder_def.append(float(rudder_def_deg[i])*np.pi/180.) #[rad]\n roll_angle.append(float(roll_angle_deg[i])*np.pi/180.) #[rad]\n pitch_angle.append(float(pitch_angle_deg[i])*np.pi/180.) #[rad]\n static_airtemp_C.append(float(stat_airtemp_C[i])) #[degree C]\n pressure_altitude_ft.append(float(pressure_altitude[i])) #[ft]not converted to [m] as this will be done in floyd's program\n mach_number.append(float(mach[i])) #[-]\n cas_airspeed_kts.append(float(cas_airspeed[i])) #[kts]not converted to [m/s] as this will be done in floyd's program\n Vtrue.append(float(tas_airspeed_kts[i])*0.51444444444) #[m/s]\n time.append(float(time_sec[i])) #[sec]\n roll_rate.append(float(roll_rate_deg_s[i])*np.pi/180.) #[rad/s]\n pitch_rate.append(float(pitch_rate_deg_s[i])*np.pi/180.) #[rad/s]\n yaw_rate.append(float(yaw_rate_deg_s[i])*np.pi/180.) #[rad/s]\n delta_e.append(float(delta_e_deg[i])*np.pi/180.) #[rad]\n delta_a.append(float(delta_a_deg[i])*np.pi/180.) #[rad]\n delta_r.append(float(delta_r_deg[i])*np.pi/180.) #[rad]\n F_used.append((float(F_used_L_lbs[i]) + float(F_used_R_lbs[i]))*0.45359237) #[kg]\n \n \n AOA = np.array(AOA)\n elevator_dte =np.array(elevator_dte)\n Fe_N = np.array(Fe_N)\n FF_L_lbs = np.array(FF_L_lbs)\n FF_R_lbs = np.array(FF_R_lbs)\n aileron_def = np.array(aileron_def)\n elevator_def = np.array(elevator_def)\n rudder_def = np.array(rudder_def)\n roll_angle = np.array(roll_angle)\n pitch_angle =np.array(pitch_angle)\n static_airtemp_C = np.array(static_airtemp_C)\n pressure_altitude_ft = np.array(pressure_altitude_ft)\n mach_number = np.array(mach_number)\n cas_airspeed_kts = np.array(cas_airspeed_kts)\n Vtrue = np.array(Vtrue)\n time = np.array(time)\n roll_rate = np.array(roll_rate)\n pitch_rate = np.array(pitch_rate)\n yaw_rate = np.array(yaw_rate)\n delta_e = np.array(delta_e)\n delta_a = np.array(delta_a)\n delta_r = np.array(delta_r)\n F_used = np.array(F_used)\n \n #### RETURNS ALL DATA IN SI UNITS AND RADIANS Except for few parameters ####\n return AOA,elevator_dte,Fe_N,FF_L_lbs, FF_R_lbs,aileron_def,elevator_def,rudder_def,roll_angle,pitch_angle,static_airtemp_C,pressure_altitude_ft,mach_number,cas_airspeed_kts,Vtrue,time,roll_rate,pitch_rate,yaw_rate,delta_e,delta_a,delta_r, F_used\n\n\n\n\n\n\n\n\n\n \n\n","sub_path":"SVV_FlightDynamics_Python/SVV_FlightdataImporter.py","file_name":"SVV_FlightdataImporter.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"587352238","text":"'''\nObserver pattern lab\n\"Lab: Watching a File Change Size\"\n\nFrom Aaron Maxwell's awesome class \"Python: Beyond Basics\"\n'''\n\nimport os\nimport time\n\nclass FileWatcher(object):\n def __init__(self, path_of_file_to_watch):\n self.path = path_of_file_to_watch\n self.observers_for_channel = {\n 'increase': set(),\n 'decrease': set(),\n }\n self._last_size = 0\n\n def register(self, channel, observer):\n self.observers_for_channel[channel].add(observer)\n def unregister(self, channel, observer):\n self.observers_for_channel[channel].discard(observer)\n\n def check_forever(self):\n while True:\n self.check_file()\n time.sleep(0.1)\n\n def check_file(self):\n size = os.stat(self.path).st_size\n if size != self._last_size:\n if size > self._last_size:\n channel = 'increase'\n else:\n channel = 'decrease'\n self._last_size = size\n self.dispatch(channel, size)\n\n def dispatch(self, channel, size):\n for observer in self.observers_for_channel[channel]:\n observer.update(size)\n\nclass FileObserver(object):\n def __init__(self, name):\n self.name = name\n def update(self, size):\n print('{} noticed that the file is now {} bytes'.format(self.name, size))\n\nbob = FileObserver('Bob')\njohn = FileObserver('John')\nstacy = FileObserver('Stacy')\n\nwatcher = FileWatcher('data/watched.txt')\nwatcher.register('increase', bob)\nwatcher.register('decrease', john)\nwatcher.register('increase', stacy)\nwatcher.register('decrease', stacy)\n\nwatcher.check_forever()\n\n","sub_path":"myapps/serving/watcher/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"306908025","text":"year = 2008\ntransmission = 'automatic'\nstate = 'OK'\ncolor = 'silver'\nmiles = 85000\nmpg = 22\naccident = 'no'\n\n# Criterion a\nnot (state=='IL' or state=='MN' or state=='WI')\n# Criterion b\n(miles < 80000) and (mpg > 24)\n# Criterion c\n(color == 'silver' or color == 'white') and (transmission == 'automatic')\n# Criterion d\naccident == 'no' and year >= 2010\n","sub_path":"LogicalOp/UsedCar.py","file_name":"UsedCar.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"318251077","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2014-Today BrowseInfo ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport time\nfrom openerp.osv import osv\nfrom openerp.tools.translate import _\nfrom openerp.report import report_sxw\nfrom openerp.tools import ustr\nfrom openerp.tools.amount_to_text_en import amount_to_text\nimport util\nfrom datetime import datetime\n#from num2words import num2words\nfrom openerp.tools import amount_to_text_en\nfrom openerp.modules.module import get_module_resource\n\n\nclass invoice_custom(report_sxw.rml_parse):\n\n\n def __init__(self, cr, uid, name, context=None):\n super(invoice_custom, self).__init__(cr, uid, name, context=context)\n self.init_bal_sum = 0.0\n self.ucluler=[\"\",\"bin\",\"milyon\",\"milyar\",\"trilyon\",\"katrilyon\",\"kentilyon\",\n \"sekstilyon\",\"oktilyon\",\"nonilyon\",\"desilyon\"]\n self.localcontext.update({\n\n 'time': time,\n 'convert': self.convert,\n 'get_date':self.get_date,\n })\n\n\n def get_date(self,date):\n if date:\n date_list = date.split(' ')\n date1 = datetime.strptime(date_list[0], '%Y-%m-%d')\n date1 = date1.strftime('%d-%b-%y')\n return date1\n\n def convert(self, sayi, currency):\n sayi = str(sayi).split('.')\n translation1 = self.return_number(float(sayi[0]))\n if sayi[1] == '00':\n translation2 = ustr('sıfır‏')\n else:\n translation2 = self.return_number(float(sayi[1]))\n translation = translation1 + translation2\n if currency.name == 'EUR':\n translation = translation1 + 'AVRO' + translation2 + 'SENT'\n if currency.name == 'USD':\n translation = translation1 + 'DOLAR' + translation2 + 'SENT'\n if currency.name == 'TRY':\n translation = translation1 + 'TL' + translation2 + 'KR'\n return translation\n\n def ucluyuVer(self,sayi):\n birler = [\"\",\"bir\",\"iki\",ustr(\"üç\"),ustr(\"dört\"),\"bes\",\"alti\",\"yedi\",\"sekiz\",\"dokuz\"]\n onlar = [\"\",\"on\",\"yirmi\",\"otuz\",\"kirk\",\"elli\",ustr(\"altmış‏\"),\"yetmis\",\"seksen\",\"doksan\"]\n yuzler = [i+ustr(\"yüz\") for i in birler]\n yuzler[1] = ustr(\"yüz\")\n\n basamaklar = [birler,onlar,yuzler]\n\n sayi = sayi[::-1]\n yazi,bs = [],0\n for i in sayi:\n rakam = sayi[bs]\n bs += 1\n if rakam != \"0\":\n yazi.append(basamaklar[bs-1][int(rakam)])\n return \"\".join(reversed(yazi))\n\n def return_number(self,sayi):\n sayi = '{:,}'.format(int(sayi))\n haneler = reversed(sayi.split(\",\"))\n uclus,sonuc = 0,[]\n for hane in haneler:\n uclu = self.ucluyuVer(hane)\n if uclu != \"\":\n sonuc.append(uclu+\"\"+ self.ucluler[uclus])\n uclus+=1\n son = \"\".join(reversed(sonuc))\n if son.startswith('birbin'):\n son = son[3:]\n return(son.strip())\n\nclass test_report_template_id(osv.AbstractModel):\n _name = 'report.custom_invoice_report.invoice_report_template_id'\n _inherit = 'report.abstract_report'\n _template = 'custom_invoice_report.invoice_report_template_id'\n _wrapped_report_class = invoice_custom\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"custom_invoice_report/report/invoice_report_custom.py","file_name":"invoice_report_custom.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"519371324","text":"import argparse\n\nparser = argparse.ArgumentParser(description=\"Make prediction object detection on input images\")\nrequiredArgs = parser.add_argument_group(\"required argument\")\nrequiredArgs.add_argument(\"--input\", dest=\"input\", type=str,\n help=\"path to image for prediciton\",\n required = True)\nargs = parser.parse_args()\ninput = args.input\n\nprint(input)","sub_path":".ipynb_checkpoints/predict-checkpoint.py","file_name":"predict-checkpoint.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"219334348","text":"# 1.write a python program to get a string which is n(non-negative integer) copies of a given string.\r\ns = input(\"enter a string:\")\r\nnew_string = \"\"\r\nn = int(input(\"enter the number of times you want to copy:\"))\r\nfor i in range(n):\r\n new_string = new_string + s + \" \"\r\nprint(new_string)\r\n\r\n# 2.write a python program to get the n copies of the first 2 characters of a given string.Return the n copies of the whole string if the length is less than 2.\r\ndef string_copies(string,n):\r\n new_string_2 = \" \"\r\n if len(string) < 2:\r\n for i in range(n):\r\n new_string_2 = new_string_2 + string\r\n else:\r\n string2 = string[0] + string[1]\r\n for x in range(n):\r\n new_string_2 = new_string_2 + string2\r\n print(new_string_2)\r\nstring_copies(\"s\",2)\r\nstring_copies(\"sweety\",2)\r\n\r\n# 3.write a python program to test whether a passed letter is a vowel or not.\r\ndef check_vowels(letter):\r\n all_vowels = \"aeiouAEIOU\"\r\n return letter in all_vowels\r\nprint(check_vowels(\"F\"))\r\n\r\n# 4.write a python program to check whether a specified value is contained in a group of values\r\ndef list_check(number):\r\n list_ = [1, 5, 8, 3]\r\n return number in list_\r\nprint(list_check(9))\r\n\r\n# 5.write a python program to create a histogram from a given list of integers\r\ndef histogram(items):\r\n for n in items:\r\n output = \"\"\r\n while n > 0:\r\n output += \"@\"\r\n n = n-1\r\n print(output)\r\nhistogram([1,3,5,7])\r\n# 6.write a python program to concatenate all elements in a list into a string and return it.\r\ndef str_concatenate(words):\r\n new_string_ = \"\"\r\n for word in words:\r\n new_string_ += str(word)\r\n print(new_string_)\r\nstr_concatenate([1,2,3,4])\r\n\r\n# 7.write a python program to print all even numbers from a given numbers list in the same order and stop the printing if any numbers that come after 237 in the sequence.\r\nthe_list = [386,462,47,418,907,344,236,375,823,566,597,978,328,615,953,345,399,162,758,219,918,237,412,566,248,333,73]\r\ndef check_even(numbers):\r\n for i in numbers:\r\n if i == 237:\r\n break\r\n elif i%2 == 0:\r\n print(i)\r\ncheck_even(the_list)\r\n# 8.write a python program to print out a set containing all the colors from color_list_1 which are not present in color_list_2.\r\ncolor_list_1 = set(['white','Black','Red'])\r\ncolor_list_2 = set(['Red','Green'])\r\nprint(color_list_1.difference(color_list_2))\r\n\r\n# 9.write a python program that will accept the base and height of a triangle and compute the area.\r\nbase = float(input(\"enter the base:\"))\r\nheight = float(input(\"enter the height:\"))\r\narea = 0.5*(base * height)\r\nprint(\"area of the triangle is:\", area)\r\n# 10.write a python program to compute the greatest common divisor(GCD) of two positive integers\r\ndef compute_gcd(w,z):\r\n if w>z:\r\n smaller = z\r\n else:\r\n smaller = w\r\n for p in range(1,smaller+1):\r\n if((w%p == 0) and (z%p == 0)):\r\n hcf = p\r\n return hcf\r\nprint(\"the HCF is:\",compute_gcd(10,15))\r\n# or\r\nimport math\r\nprint(\"the HCF is: \",end = \"\")\r\nprint(math.gcd(60,48))","sub_path":"Basics_of_Python_3.py","file_name":"Basics_of_Python_3.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"17803386","text":"import requests\nfrom dataclasses import dataclass, asdict\nimport logging\nimport time\n\ntry:\n from weather_station.anemometer import Anemometer\n from weather_station.temp_probe import take_spot_reading\nexcept ImportError:\n from anemometer import Anemometer\n from temp_probe import take_spot_reading\n\nURL = \"http://gotwind.live/data\"\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass(frozen=True)\nclass Payload:\n windspeed: float\n temperature: float\n humidity: float\n\n def post(self):\n try:\n payload = asdict(self)\n res = requests.post(URL, json=payload)\n if res.status_code == 200:\n logger.info(f\"posted {payload}\")\n else:\n logger.error(f\"something went wrong. {res.status_code}\")\n except Exception:\n logger.error(\"something went very wrong\")\n\n\ndef poll(sleep: int):\n anemometer = Anemometer()\n while True:\n anemometer.get_reading()\n temp, humidity = take_spot_reading()\n payload = Payload(\n windspeed=anemometer.wind_speed_kn,\n temperature=temp,\n humidity=humidity,\n )\n payload.post()\n time.sleep(sleep)\n","sub_path":"weather_station/data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"120706889","text":"class Enemy:\n def __init__(self, name, hp, atk, defence):\n self.name = name\n self.hp = hp\n self.atk = atk\n self.defence = defence\n\n def __str__(self):\n return \"名字:%s, hp:%s, atk:%s, defence:%s\" % (self.name, self.hp, self.atk, self.defence)\n\n\nlist01 = [\n Enemy(\"毛玉\", 0, 15, 10),\n Enemy(\"小妖精\", 120, 20, 5),\n Enemy(\"大妖精\", 400, 30, 20),\n Enemy(\"灭霸\", 5000, 100, 200)\n]\n\n\ndef fun01():\n list02 = []\n for item in list01:\n list02.append((item.name))\n return list02\n\n\nre = fun01()\nprint(re)\n\n\ndef fun02():\n list02 = []\n for item in list01:\n list02.append((item.name, item.hp))\n return list02\n\n\nre = fun02()\nprint(re)\nprint(\"----------------------------\")\n\n\ndef select(list_target, func_handle):\n list_temp = []\n for item in list_target:\n list_temp.append(func_handle(item))\n return list_temp\n\n\ndef func_handle(item):\n return item.name, item.hp\n\n\nre = select(list01, func_handle)\nprint(re)\nprint('-------------------------')\n\nre = select(list01, lambda item: (item.name, item.atk))\nprint(re)\nprint('-------------------------')\n\nfrom list_helper import *\n\nre = ListHelper.select(list01, lambda item: (item.name, item.atk))\nfor item in re:\n print(item)\n","sub_path":"part_01_python_base/python_pro/day18/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"159163897","text":"'''\r\n----------------- AUTOMATIZAR BUSCA DE ERRO DE MULTIPOLIGONOS ----------------------\r\n\r\nAutor: Samuel Amico\r\nData: 20/01/2020\r\nProjeto: NetMapBR \r\nObjetivo: Encontrar Falsos Poligonos e detectar o nome das cidades com este problema \r\n\r\n------------------------------------------------------------------------------------\r\n\r\n'''\r\n\r\nimport json\r\nimport glob\r\n\r\nLista_json = glob.glob('*.json')\r\n\r\n# Para cada Estado descobrir cidades erradas:\r\n\r\nfor estado in Lista_json:\r\n numero_estado = estado.split('-')[1]\r\n\r\n # Lendo e carregando arquivo JSON em variavel data\r\n json_data = open(estado).read()\r\n data = json.loads(json_data)\r\n\r\n # Variaveis: contador de cidades e dicionario de cidades erradas\r\n cnt = 0\r\n cidades_erradas = {}\r\n\r\n # interar para cada cidade existente\r\n for json_cidades in data[\"features\"]:\r\n cnt += 1\r\n # Vetor que contem as coordenadas, pode ser apenas uma LineString ou multiplos LinesStrings\r\n vector =(json_cidades[\"geometry\"][\"coordinates\"])\r\n # Verifica se existe apenas um poligono\r\n if(len(vector) > 1):\r\n # Dicionario que recebe o nome da cidade e quantos poligos ele possui:\r\n cidade_errada = json_cidades[\"properties\"][\"name\"]\r\n poligonos = len(vector)\r\n cidades_erradas[str(cidade_errada)] = poligonos\r\n\r\n print( (\" Estado = {} , Cidades-Totais = {} , Cidades erradas e seus respectivos poligonos: {}\").format(numero_estado,cnt,cidades_erradas) ) \r\n\r\n\r\n","sub_path":"Verificador.py","file_name":"Verificador.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"} +{"seq_id":"429598895","text":"from tkinter import *\r\nroot = Tk()\r\ndef key (event):\r\n print(\"Pressed\",repr(event.char))\r\ndef pushb(event):\r\n frame.focus_set()\r\n print(\"Clicked at\",event.x,event.y)\r\nframe = Frame(root,width = 200, height = 200)\r\nframe.bind(\"\",key)\r\nframe.bind(\"